code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
# -*- coding: utf-8 -*-
from .provider import MendeleyCitationsProvider
from website.citations.views import GenericCitationViews
mendeley_views = GenericCitationViews('mendeley', MendeleyCitationsProvider)
|
[
"website.citations.views.GenericCitationViews"
] |
[((147, 206), 'website.citations.views.GenericCitationViews', 'GenericCitationViews', (['"""mendeley"""', 'MendeleyCitationsProvider'], {}), "('mendeley', MendeleyCitationsProvider)\n", (167, 206), False, 'from website.citations.views import GenericCitationViews\n')]
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
from google.cloud import datastore
from google.cloud import resource_manager
from googleapiclient import discovery
from googleapiclient import errors
import httplib2
from oauth2client import client
import webapp2
def resource_iterator(next_page_function):
"""Loop through resources from a Google API.
An iterator that returns all of the resources from a Google API 'list'
operation paging through each set.
Args:
next_page_function: A function that when called will return the next
page of results.
Yields:
A list if resources, which are typically dictionaries.
"""
next_page_token = None
more_results = True
while more_results:
resource_response = None
try:
resource_response = next_page_function(next_page_token).execute()
except errors.HttpError:
# Some projects throw a 403. (compute engine isn't enabled)
# just ignore those resources.
logging.debug('skipping resources.', exc_info=True)
return
for items_field in ['items', 'rrsets', 'managedZones']:
items = resource_response.get(items_field, {})
if items and (type(items) == dict):
for item in items.iteritems():
yield item
if items and (type(items) == list):
for item in items:
yield item
if 'nextPageToken' in resource_response:
next_page_token = resource_response['nextPageToken']
else:
more_results = False
class ThreadsafeClientLocal(object):
"""A thread local Google API client descriptor.
Httplib2 is not threadsafe so each request thread requires it's own
threadlocal client object which this creates.
Attributes:
service: String name of the API to create the client for.
version: String version of the API client.
"""
_class_thread_local = threading.local()
def __init__(self, service, version):
"""Create a thread local API client.
Will create the underlying httplib2.Http object on construction, but
the underlying API client is lazy constructed.
Args:
service: Name of API.
version: Version of the api.
"""
self.service = service
self.version = version
self.http = httplib2.Http(timeout=60)
self.cache_discovery = True
def __get__(self, instance, instance_type):
"""Construct the API client."""
if instance is None:
return self
thread_local = None
try:
app = webapp2.get_app()
# Python Google API clients aren't threadsafe as they use httplib2
# which isn't threadsafe.
thread_local = app.registry.get(self)
if thread_local is None:
thread_local = threading.local()
app.registry[self] = thread_local
except AssertionError:
# When not in a request context, use class thread local.
thread_local = ThreadsafeClientLocal._class_thread_local
cached_client = getattr(thread_local, 'api', None)
if cached_client is None:
credentials = client.GoogleCredentials.get_application_default()
if credentials.create_scoped_required():
credentials = credentials.create_scoped(
'https://www.googleapis.com/auth/cloud-platform')
cached_client = discovery.build(
self.service,
self.version,
http=credentials.authorize(self.http),
cache_discovery=self.cache_discovery)
thread_local.api = cached_client
return cached_client
class Clients(object):
"""Holds API clients.
For Google API clients, we use thread local descriptors which creates the
client on first access. The "google.cloud" clients are threadsafe and are
simple properties.
"""
metrics = ThreadsafeClientLocal('monitoring', 'v3')
compute = ThreadsafeClientLocal('compute', 'v1')
dns = ThreadsafeClientLocal('dns', 'v1')
iam = ThreadsafeClientLocal('cloudresourcemanager', 'v1')
def __init__(self):
self.datastore = datastore.Client()
self.crm = resource_manager.Client()
CLIENTS = Clients()
|
[
"httplib2.Http",
"google.cloud.datastore.Client",
"logging.debug",
"webapp2.get_app",
"google.cloud.resource_manager.Client",
"oauth2client.client.GoogleCredentials.get_application_default",
"threading.local"
] |
[((2574, 2591), 'threading.local', 'threading.local', ([], {}), '()\n', (2589, 2591), False, 'import threading\n'), ((2997, 3022), 'httplib2.Http', 'httplib2.Http', ([], {'timeout': '(60)'}), '(timeout=60)\n', (3010, 3022), False, 'import httplib2\n'), ((4895, 4913), 'google.cloud.datastore.Client', 'datastore.Client', ([], {}), '()\n', (4911, 4913), False, 'from google.cloud import datastore\n'), ((4933, 4958), 'google.cloud.resource_manager.Client', 'resource_manager.Client', ([], {}), '()\n', (4956, 4958), False, 'from google.cloud import resource_manager\n'), ((3261, 3278), 'webapp2.get_app', 'webapp2.get_app', ([], {}), '()\n', (3276, 3278), False, 'import webapp2\n'), ((3871, 3921), 'oauth2client.client.GoogleCredentials.get_application_default', 'client.GoogleCredentials.get_application_default', ([], {}), '()\n', (3919, 3921), False, 'from oauth2client import client\n'), ((1595, 1646), 'logging.debug', 'logging.debug', (['"""skipping resources."""'], {'exc_info': '(True)'}), "('skipping resources.', exc_info=True)\n", (1608, 1646), False, 'import logging\n'), ((3514, 3531), 'threading.local', 'threading.local', ([], {}), '()\n', (3529, 3531), False, 'import threading\n')]
|
# -*- coding: utf-8 -*-
import os
from io import StringIO
import json
import pytest
from unittest.mock import patch
from bdea.client import BDEAClient, URLError
from bdea.client import is_disposable_domain, is_disposable_email
class TestBDEAClientRequest(object):
def test_urlerror_returns_empty(self):
with patch('bdea.client.urlopen') as urlopen_mock:
urlopen_mock.side_effect = URLError('No luck!')
cl = BDEAClient('apikey')
assert cl.request('http://www.rottentomatoes.com/') == {}
def test_invalid_json_returns_empty(self):
with patch('bdea.client.urlopen') as urlopen_mock:
urlopen_mock.return_value = StringIO('invalid json')
cl = BDEAClient('apikey')
assert cl.request('http://www.rottentomatoes.com/') == {}
def test_valid_json(self):
with patch('bdea.client.urlopen') as urlopen_mock:
urlopen_mock.return_value = StringIO('{"blah": "blah"}')
cl = BDEAClient('apikey')
assert cl.request('http://www.rottentomatoes.com/') == {'blah': 'blah'}
def test_do_not_accept_email(self):
cl = BDEAClient('apikey')
with pytest.raises(ValueError):
cl.get_domain_status('<EMAIL>')
class TestBDEAClient(object):
def test_status_urlopen_args(self):
with patch('bdea.client.urlopen') as urlopen_mock:
urlopen_mock.return_value = StringIO('{}')
cl = BDEAClient('apikey')
cl.get_api_status()
url = 'http://status.block-disposable-email.com/status/?apikey=apikey'
urlopen_mock.assert_called_with(url, timeout=5)
def test_domain_urlopen_args(self):
with patch('bdea.client.urlopen') as urlopen_mock:
urlopen_mock.return_value = StringIO('{}')
cl = BDEAClient('apikey')
cl.get_domain_status('example.com')
url = 'http://check.block-disposable-email.com/easyapi/json/apikey/example.com'
urlopen_mock.assert_called_with(url, timeout=5)
class TestBDEAClientLive(object):
APIKEY_INVALID = 'invalid-unittest-apikey'
def _get_api_key(self):
return os.environ.get('BDEA_APIKEY', self.APIKEY_INVALID)
def test_invalid_apikey_domain_ok(self):
res = BDEAClient(self.APIKEY_INVALID).get_domain_status(BDEAClient.TEST_DOMAIN_OK)
assert res.response['domain_status'] == 'ok'
assert res.response['request_status'] == 'fail_key'
def test_invalid_apikey_domain_block(self):
res = BDEAClient(self.APIKEY_INVALID).get_domain_status(BDEAClient.TEST_DOMAIN_BLOCK)
assert res.response['domain_status'] == 'ok'
assert res.response['request_status'] == 'fail_key'
def test_invalid_apikey_api_status(self):
res = BDEAClient(self.APIKEY_INVALID).get_api_status()
assert res.response['request_status'] == 'ok'
assert res.response['apikeystatus'] == 'inactive'
@pytest.mark.xfail
def test_valid_apikey_api_status(self):
res = BDEAClient(self._get_api_key()).get_api_status()
assert res.response['request_status'] == 'ok'
assert res.response['apikeystatus'] == 'active'
@pytest.mark.xfail
def test_valid_apikey_domain_ok(self):
res = BDEAClient(self._get_api_key()).get_domain_status(BDEAClient.TEST_DOMAIN_OK)
assert res.response['domain_status'] == 'ok'
assert res.response['request_status'] == 'success'
@pytest.mark.xfail
def test_valid_apikey_domain_block(self):
res = BDEAClient(self._get_api_key()).get_domain_status(BDEAClient.TEST_DOMAIN_BLOCK)
assert res.response['domain_status'] == 'block'
assert res.response['request_status'] == 'success'
class TestShortcut(object):
RESPONSE = {
'domain_status': 'ok',
'execution_time': 0.0052359104156494,
'request_status': 'success',
'server_id': 'mirror5_vienna',
'servertime': '2015-10-25 5:25:54',
'version': '0.2'
}
def test_domain_shortcut_function(self):
with patch('bdea.client.urlopen') as urlopen_mock:
res = self.RESPONSE.copy()
urlopen_mock.return_value = StringIO('{}'.format(json.dumps(res)))
assert is_disposable_domain('google.com', 'apikey') == False
res.update({
'domain_status': 'block'
})
urlopen_mock.return_value = StringIO('{}'.format(json.dumps(res)))
assert is_disposable_domain('mailinator.com', 'apikey') == True
def test_email_shortcut_function(self):
with patch('bdea.client.urlopen') as urlopen_mock:
res = self.RESPONSE.copy()
urlopen_mock.return_value = StringIO('{}'.format(json.dumps(res)))
assert is_disposable_email('<EMAIL>', 'apikey') == False
res.update({
'domain_status': 'block'
})
urlopen_mock.return_value = StringIO('{}'.format(json.dumps(res)))
assert is_disposable_email('<EMAIL>', 'apikey') == True
|
[
"io.StringIO",
"bdea.client.is_disposable_email",
"json.dumps",
"os.environ.get",
"unittest.mock.patch",
"pytest.raises",
"bdea.client.is_disposable_domain",
"bdea.client.URLError",
"bdea.client.BDEAClient"
] |
[((1155, 1175), 'bdea.client.BDEAClient', 'BDEAClient', (['"""apikey"""'], {}), "('apikey')\n", (1165, 1175), False, 'from bdea.client import BDEAClient, URLError\n'), ((2180, 2230), 'os.environ.get', 'os.environ.get', (['"""BDEA_APIKEY"""', 'self.APIKEY_INVALID'], {}), "('BDEA_APIKEY', self.APIKEY_INVALID)\n", (2194, 2230), False, 'import os\n'), ((325, 353), 'unittest.mock.patch', 'patch', (['"""bdea.client.urlopen"""'], {}), "('bdea.client.urlopen')\n", (330, 353), False, 'from unittest.mock import patch\n'), ((410, 430), 'bdea.client.URLError', 'URLError', (['"""No luck!"""'], {}), "('No luck!')\n", (418, 430), False, 'from bdea.client import BDEAClient, URLError\n'), ((448, 468), 'bdea.client.BDEAClient', 'BDEAClient', (['"""apikey"""'], {}), "('apikey')\n", (458, 468), False, 'from bdea.client import BDEAClient, URLError\n'), ((600, 628), 'unittest.mock.patch', 'patch', (['"""bdea.client.urlopen"""'], {}), "('bdea.client.urlopen')\n", (605, 628), False, 'from unittest.mock import patch\n'), ((686, 710), 'io.StringIO', 'StringIO', (['"""invalid json"""'], {}), "('invalid json')\n", (694, 710), False, 'from io import StringIO\n'), ((728, 748), 'bdea.client.BDEAClient', 'BDEAClient', (['"""apikey"""'], {}), "('apikey')\n", (738, 748), False, 'from bdea.client import BDEAClient, URLError\n'), ((864, 892), 'unittest.mock.patch', 'patch', (['"""bdea.client.urlopen"""'], {}), "('bdea.client.urlopen')\n", (869, 892), False, 'from unittest.mock import patch\n'), ((950, 978), 'io.StringIO', 'StringIO', (['"""{"blah": "blah"}"""'], {}), '(\'{"blah": "blah"}\')\n', (958, 978), False, 'from io import StringIO\n'), ((996, 1016), 'bdea.client.BDEAClient', 'BDEAClient', (['"""apikey"""'], {}), "('apikey')\n", (1006, 1016), False, 'from bdea.client import BDEAClient, URLError\n'), ((1189, 1214), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1202, 1214), False, 'import pytest\n'), ((1346, 1374), 'unittest.mock.patch', 'patch', (['"""bdea.client.urlopen"""'], {}), "('bdea.client.urlopen')\n", (1351, 1374), False, 'from unittest.mock import patch\n'), ((1432, 1446), 'io.StringIO', 'StringIO', (['"""{}"""'], {}), "('{}')\n", (1440, 1446), False, 'from io import StringIO\n'), ((1464, 1484), 'bdea.client.BDEAClient', 'BDEAClient', (['"""apikey"""'], {}), "('apikey')\n", (1474, 1484), False, 'from bdea.client import BDEAClient, URLError\n'), ((1714, 1742), 'unittest.mock.patch', 'patch', (['"""bdea.client.urlopen"""'], {}), "('bdea.client.urlopen')\n", (1719, 1742), False, 'from unittest.mock import patch\n'), ((1800, 1814), 'io.StringIO', 'StringIO', (['"""{}"""'], {}), "('{}')\n", (1808, 1814), False, 'from io import StringIO\n'), ((1832, 1852), 'bdea.client.BDEAClient', 'BDEAClient', (['"""apikey"""'], {}), "('apikey')\n", (1842, 1852), False, 'from bdea.client import BDEAClient, URLError\n'), ((4083, 4111), 'unittest.mock.patch', 'patch', (['"""bdea.client.urlopen"""'], {}), "('bdea.client.urlopen')\n", (4088, 4111), False, 'from unittest.mock import patch\n'), ((4615, 4643), 'unittest.mock.patch', 'patch', (['"""bdea.client.urlopen"""'], {}), "('bdea.client.urlopen')\n", (4620, 4643), False, 'from unittest.mock import patch\n'), ((2291, 2322), 'bdea.client.BDEAClient', 'BDEAClient', (['self.APIKEY_INVALID'], {}), '(self.APIKEY_INVALID)\n', (2301, 2322), False, 'from bdea.client import BDEAClient, URLError\n'), ((2544, 2575), 'bdea.client.BDEAClient', 'BDEAClient', (['self.APIKEY_INVALID'], {}), '(self.APIKEY_INVALID)\n', (2554, 2575), False, 'from bdea.client import BDEAClient, URLError\n'), ((2798, 2829), 'bdea.client.BDEAClient', 'BDEAClient', (['self.APIKEY_INVALID'], {}), '(self.APIKEY_INVALID)\n', (2808, 2829), False, 'from bdea.client import BDEAClient, URLError\n'), ((4266, 4310), 'bdea.client.is_disposable_domain', 'is_disposable_domain', (['"""google.com"""', '"""apikey"""'], {}), "('google.com', 'apikey')\n", (4286, 4310), False, 'from bdea.client import is_disposable_domain, is_disposable_email\n'), ((4500, 4548), 'bdea.client.is_disposable_domain', 'is_disposable_domain', (['"""mailinator.com"""', '"""apikey"""'], {}), "('mailinator.com', 'apikey')\n", (4520, 4548), False, 'from bdea.client import is_disposable_domain, is_disposable_email\n'), ((4798, 4838), 'bdea.client.is_disposable_email', 'is_disposable_email', (['"""<EMAIL>"""', '"""apikey"""'], {}), "('<EMAIL>', 'apikey')\n", (4817, 4838), False, 'from bdea.client import is_disposable_domain, is_disposable_email\n'), ((5028, 5068), 'bdea.client.is_disposable_email', 'is_disposable_email', (['"""<EMAIL>"""', '"""apikey"""'], {}), "('<EMAIL>', 'apikey')\n", (5047, 5068), False, 'from bdea.client import is_disposable_domain, is_disposable_email\n'), ((4229, 4244), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (4239, 4244), False, 'import json\n'), ((4463, 4478), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (4473, 4478), False, 'import json\n'), ((4761, 4776), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (4771, 4776), False, 'import json\n'), ((4991, 5006), 'json.dumps', 'json.dumps', (['res'], {}), '(res)\n', (5001, 5006), False, 'import json\n')]
|
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree.
from __future__ import print_function
import httplib
import logging
import os
import re
import socket
import ssl
import subprocess
import sys
logger = logging.getLogger('utils')
# Run the process silently without stdout and stderr.
# On success, return stdout. Otherwise, raise CalledProcessError
# with combined stdout and stderr.
def check_output_silent(args, cwd=None, env=None):
# Use Popen here. check_ouput is not available in Python 2.6.
# cwd=None means don't change cwd.
# env=None means inheriting the current process' environment.
process = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd,
env=env)
out, err = process.communicate()
if process.returncode != 0:
error = subprocess.CalledProcessError(process.returncode, args)
error.output = out + err
raise error
else:
return out
def darwin_path_helper():
try:
out = check_output_silent(['/usr/libexec/path_helper', '-s'])
path = re.search(r'PATH=\"([^\"]+)\"', out).group(1)
return path
except Exception as e:
logger.warn('Failed to get additional PATH info (%s)', e.message)
return ''
# It supports https if key_file and cert_file are given.
def http_get(host, port, method, url, key_file=None, cert_file=None, ca_cert=None, timeout=1):
try:
conn = None
if key_file is not None and cert_file is not None and ca_cert is not None:
if sys.version_info < (2, 7, 9):
conn = httplib.HTTPSConnection(
host,
port,
key_file=key_file,
cert_file=cert_file,
timeout=timeout)
else:
ctx = ssl.create_default_context(cafile=ca_cert)
# We disable host name validation here so we can ping the server endpoint
# using localhost.
ctx.check_hostname = False
conn = httplib.HTTPSConnection(
host,
port,
key_file=key_file,
cert_file=cert_file,
timeout=timeout,
context=ctx)
else:
conn = httplib.HTTPConnection(host, port, timeout=timeout)
conn.request(method, url)
response = conn.getresponse()
if response.status == 200:
ret = response.read()
return ret
else:
return None
except ssl.SSLError as e:
if sys.version_info < (2, 7, 9):
logger.error("An SSL Error occurred")
else:
logger.error("An SSL Error occurred: %s" % e.reason)
return None
except socket.error:
return None
except:
logger.error("Unexpected error: %s" % sys.exc_info()[0])
return None
finally:
if conn:
conn.close()
def is_ip_address(addr):
try:
# Check ipv4 address.
socket.inet_aton(addr)
return True
except socket.error:
pass
try:
# Check ipv6 address.
socket.inet_pton(socket.AF_INET6, addr)
return True
except socket.error:
return False
# Read the resource and write it to a given dir using the resource name as file name.
# Return the file path.
def write_resource_to_file(name, dir):
target_path = os.path.join(dir, os.path.basename(name))
with open(name, 'r') as res_file:
content = res_file.read()
with open(target_path, 'w') as f:
f.write(content)
return target_path
|
[
"subprocess.Popen",
"os.path.basename",
"ssl.create_default_context",
"httplib.HTTPConnection",
"subprocess.CalledProcessError",
"socket.inet_aton",
"sys.exc_info",
"httplib.HTTPSConnection",
"socket.inet_pton",
"re.search",
"logging.getLogger"
] |
[((344, 370), 'logging.getLogger', 'logging.getLogger', (['"""utils"""'], {}), "('utils')\n", (361, 370), False, 'import logging\n'), ((764, 857), 'subprocess.Popen', 'subprocess.Popen', (['args'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'cwd': 'cwd', 'env': 'env'}), '(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=\n cwd, env=env)\n', (780, 857), False, 'import subprocess\n'), ((979, 1034), 'subprocess.CalledProcessError', 'subprocess.CalledProcessError', (['process.returncode', 'args'], {}), '(process.returncode, args)\n', (1008, 1034), False, 'import subprocess\n'), ((3231, 3253), 'socket.inet_aton', 'socket.inet_aton', (['addr'], {}), '(addr)\n', (3247, 3253), False, 'import socket\n'), ((3360, 3399), 'socket.inet_pton', 'socket.inet_pton', (['socket.AF_INET6', 'addr'], {}), '(socket.AF_INET6, addr)\n', (3376, 3399), False, 'import socket\n'), ((3653, 3675), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (3669, 3675), False, 'import os\n'), ((2486, 2537), 'httplib.HTTPConnection', 'httplib.HTTPConnection', (['host', 'port'], {'timeout': 'timeout'}), '(host, port, timeout=timeout)\n', (2508, 2537), False, 'import httplib\n'), ((1239, 1277), 're.search', 're.search', (['"""PATH=\\\\"([^\\\\"]+)\\\\\\""""', 'out'], {}), '(\'PATH=\\\\"([^\\\\"]+)\\\\"\', out)\n', (1248, 1277), False, 'import re\n'), ((1758, 1854), 'httplib.HTTPSConnection', 'httplib.HTTPSConnection', (['host', 'port'], {'key_file': 'key_file', 'cert_file': 'cert_file', 'timeout': 'timeout'}), '(host, port, key_file=key_file, cert_file=cert_file,\n timeout=timeout)\n', (1781, 1854), False, 'import httplib\n'), ((1992, 2034), 'ssl.create_default_context', 'ssl.create_default_context', ([], {'cafile': 'ca_cert'}), '(cafile=ca_cert)\n', (2018, 2034), False, 'import ssl\n'), ((2226, 2335), 'httplib.HTTPSConnection', 'httplib.HTTPSConnection', (['host', 'port'], {'key_file': 'key_file', 'cert_file': 'cert_file', 'timeout': 'timeout', 'context': 'ctx'}), '(host, port, key_file=key_file, cert_file=cert_file,\n timeout=timeout, context=ctx)\n', (2249, 2335), False, 'import httplib\n'), ((3063, 3077), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (3075, 3077), False, 'import sys\n')]
|
import pytest
import reinas.queens as queens
import numpy as np
import models.consultas as consultas
def test_numero_reinas(numero):
n = int(numero)
lista_soluciones = []
session = consultas.loadSession()
tablero = np.zeros(shape=(n,n),dtype=int)
queens.n_reinas(tablero,0,lista_soluciones)
num_soluciones = consultas.num_soluciones(n, session)
assert len(lista_soluciones) == num_soluciones
|
[
"models.consultas.num_soluciones",
"models.consultas.loadSession",
"numpy.zeros",
"reinas.queens.n_reinas"
] |
[((194, 217), 'models.consultas.loadSession', 'consultas.loadSession', ([], {}), '()\n', (215, 217), True, 'import models.consultas as consultas\n'), ((232, 265), 'numpy.zeros', 'np.zeros', ([], {'shape': '(n, n)', 'dtype': 'int'}), '(shape=(n, n), dtype=int)\n', (240, 265), True, 'import numpy as np\n'), ((272, 317), 'reinas.queens.n_reinas', 'queens.n_reinas', (['tablero', '(0)', 'lista_soluciones'], {}), '(tablero, 0, lista_soluciones)\n', (287, 317), True, 'import reinas.queens as queens\n'), ((341, 377), 'models.consultas.num_soluciones', 'consultas.num_soluciones', (['n', 'session'], {}), '(n, session)\n', (365, 377), True, 'import models.consultas as consultas\n')]
|
from django.urls import path, re_path
from . import views
urlpatterns = [
path('category/<str:category>/', views.article_list, name='article-list'),
path('tag/<str:tag_name>/', views.article_tag_list, name='article-tag-list'),
path('<int:pk>/', views.article_detail, name='article-detail-id'),
#path('<int:pk>/<slug:slug>/', views.article_detail, name='article-detail-slug'),
]
|
[
"django.urls.path"
] |
[((80, 153), 'django.urls.path', 'path', (['"""category/<str:category>/"""', 'views.article_list'], {'name': '"""article-list"""'}), "('category/<str:category>/', views.article_list, name='article-list')\n", (84, 153), False, 'from django.urls import path, re_path\n'), ((159, 235), 'django.urls.path', 'path', (['"""tag/<str:tag_name>/"""', 'views.article_tag_list'], {'name': '"""article-tag-list"""'}), "('tag/<str:tag_name>/', views.article_tag_list, name='article-tag-list')\n", (163, 235), False, 'from django.urls import path, re_path\n'), ((241, 306), 'django.urls.path', 'path', (['"""<int:pk>/"""', 'views.article_detail'], {'name': '"""article-detail-id"""'}), "('<int:pk>/', views.article_detail, name='article-detail-id')\n", (245, 306), False, 'from django.urls import path, re_path\n')]
|
import logging
from shapely.geometry.multipolygon import MultiPolygon
from shapely.geometry.polygon import Polygon
from snapshottest import TestCase
from graphic_coloring_engine.core import (
Color,
ColorChoice,
ColoringEngine,
ColoringEngineConstants,
Coordinate,
DominantColor,
Layer,
Layout,
)
logger = logging.getLogger(__name__)
class TestLayout(TestCase):
seed = 42
constants = ColoringEngineConstants()
default_allowed_color_set = set(
[ColorChoice(rgb_string="#000"), ColorChoice(rgb_string="#fff")]
)
bg_coord = Coordinate(
xmin=0,
xmax=100,
ymin=0,
ymax=200,
)
img_coord = Coordinate(
xmin=0,
xmax=30,
ymin=0,
ymax=30,
)
# 左上
text_coord = Coordinate(
xmin=0,
xmax=50,
ymin=0,
ymax=50,
)
# 右下
text_coord_2 = Coordinate(
xmin=50,
xmax=100,
ymin=150,
ymax=200,
)
def build_layout(self):
return Layout(
width=100,
height=200,
layers=[
# 背景
Layer(
order=3,
bbox_coordinate=self.bg_coord,
dominant_colors=[DominantColor(rgb_string="#1f1f1f", ratio=0.8)],
type="image",
),
# 图片
Layer(
order=2,
bbox_coordinate=self.img_coord,
dominant_colors=[DominantColor(rgb_string="#88F", ratio=0.8)],
type="image",
),
# 文字
Layer(
order=1,
bbox_coordinate=self.text_coord,
polygon=MultiPolygon(
[
Polygon(
[
(10, 20),
(40, 20),
(40, 30),
(10, 30),
]
)
]
),
color_mutable=True,
type="text",
),
# 文字
Layer(
order=0,
bbox_coordinate=self.text_coord_2,
color_mutable=True,
type="text",
),
],
)
def test_coloring_engine_init(self):
# 没有设置额外的约束
layout = self.build_layout()
engine = ColoringEngine(layout=layout, seed=self.seed, constants=self.constants)
assert engine.get_layer_color_filters(0) is None
assert engine.get_layer_color_constraint(0) is None
color_schemes = engine.colorize()
assert len(color_schemes) == 0
def test_coloring_engine_init_with_extra_color(self):
layout = self.build_layout()
engine = ColoringEngine(
layout=layout,
seed=self.seed,
constants=self.constants,
extra_usable_colors=self.default_allowed_color_set,
)
assert engine.get_layer_color_filters(0) is None
assert engine.get_layer_color_constraint(0) is None
color_schemes = engine.colorize()
assert len(color_schemes) > 0
# 预期文字颜色与相交的元素都有足够对比度
for color_scheme in color_schemes:
for layer_order in [
layer.order for layer in layout.layers if layer.type == "text"
]:
new_text_color = color_scheme[layer_order]
for bg_order in layout.layer_collision_map[layer_order]:
bg_layer = layout.layer_map[bg_order]
assert (
bg_layer.color.contrast(new_text_color)
> engine.constants.文字与背景的最小对比度
)
def test_coloring_engine_init_with_filter(self):
layout = self.build_layout()
engine = ColoringEngine(
layout=layout,
seed=self.seed,
constants=self.constants,
extra_usable_colors=self.default_allowed_color_set,
layer_color_filter_map={1: [lambda color, layout: False]},
)
assert engine.get_layer_color_filters(1) is not None
color_schemes = engine.colorize()
assert len(color_schemes) == 0
def test_coloring_engine_init_with_constraint(self):
layout = self.build_layout()
engine = ColoringEngine(
layout=layout,
seed=self.seed,
constants=self.constants,
extra_usable_colors=self.default_allowed_color_set,
layer_color_constraint_map={1: [lambda color, layout: False]},
)
assert engine.get_layer_color_constraint(1) is not None
color_schemes = engine.colorize()
assert len(color_schemes) == 0
def test_coloring_engine_init_with_constraint_ctx(self):
layout = self.build_layout()
other_node_colorized = set()
def build_flag_constraint(layer_order: int):
def flag_constraint(color: Color, coloring_engine: ColoringEngine):
nonlocal other_node_colorized
{
other_node_colorized.add(bool(layer.color))
for layer in coloring_engine.layout.layers
if layer.order != layer_order
}
return True
return flag_constraint
engine = ColoringEngine(
layout=layout,
seed=self.seed,
constants=self.constants,
extra_usable_colors=self.default_allowed_color_set,
layer_color_constraint_map={
1: [build_flag_constraint(1)],
2: [build_flag_constraint(0)],
},
)
color_schemes = engine.colorize()
assert len(color_schemes) != 0
assert True in other_node_colorized
assert False in other_node_colorized
def test_coloring_engine_init_with_global_constraint(self):
layout = self.build_layout()
engine = ColoringEngine(
layout=layout,
seed=self.seed,
constants=self.constants,
extra_usable_colors=self.default_allowed_color_set,
global_color_constraint=[
lambda layout: Color(rgb_string="#000") == layout.layers[2].color
],
)
color_schemes = engine.colorize()
assert len(color_schemes) != 0
|
[
"graphic_coloring_engine.core.ColoringEngine",
"graphic_coloring_engine.core.DominantColor",
"graphic_coloring_engine.core.ColorChoice",
"graphic_coloring_engine.core.Layer",
"shapely.geometry.polygon.Polygon",
"graphic_coloring_engine.core.Color",
"graphic_coloring_engine.core.ColoringEngineConstants",
"graphic_coloring_engine.core.Coordinate",
"logging.getLogger"
] |
[((341, 368), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (358, 368), False, 'import logging\n'), ((429, 454), 'graphic_coloring_engine.core.ColoringEngineConstants', 'ColoringEngineConstants', ([], {}), '()\n', (452, 454), False, 'from graphic_coloring_engine.core import Color, ColorChoice, ColoringEngine, ColoringEngineConstants, Coordinate, DominantColor, Layer, Layout\n'), ((586, 632), 'graphic_coloring_engine.core.Coordinate', 'Coordinate', ([], {'xmin': '(0)', 'xmax': '(100)', 'ymin': '(0)', 'ymax': '(200)'}), '(xmin=0, xmax=100, ymin=0, ymax=200)\n', (596, 632), False, 'from graphic_coloring_engine.core import Color, ColorChoice, ColoringEngine, ColoringEngineConstants, Coordinate, DominantColor, Layer, Layout\n'), ((688, 732), 'graphic_coloring_engine.core.Coordinate', 'Coordinate', ([], {'xmin': '(0)', 'xmax': '(30)', 'ymin': '(0)', 'ymax': '(30)'}), '(xmin=0, xmax=30, ymin=0, ymax=30)\n', (698, 732), False, 'from graphic_coloring_engine.core import Color, ColorChoice, ColoringEngine, ColoringEngineConstants, Coordinate, DominantColor, Layer, Layout\n'), ((798, 842), 'graphic_coloring_engine.core.Coordinate', 'Coordinate', ([], {'xmin': '(0)', 'xmax': '(50)', 'ymin': '(0)', 'ymax': '(50)'}), '(xmin=0, xmax=50, ymin=0, ymax=50)\n', (808, 842), False, 'from graphic_coloring_engine.core import Color, ColorChoice, ColoringEngine, ColoringEngineConstants, Coordinate, DominantColor, Layer, Layout\n'), ((910, 959), 'graphic_coloring_engine.core.Coordinate', 'Coordinate', ([], {'xmin': '(50)', 'xmax': '(100)', 'ymin': '(150)', 'ymax': '(200)'}), '(xmin=50, xmax=100, ymin=150, ymax=200)\n', (920, 959), False, 'from graphic_coloring_engine.core import Color, ColorChoice, ColoringEngine, ColoringEngineConstants, Coordinate, DominantColor, Layer, Layout\n'), ((2658, 2729), 'graphic_coloring_engine.core.ColoringEngine', 'ColoringEngine', ([], {'layout': 'layout', 'seed': 'self.seed', 'constants': 'self.constants'}), '(layout=layout, seed=self.seed, constants=self.constants)\n', (2672, 2729), False, 'from graphic_coloring_engine.core import Color, ColorChoice, ColoringEngine, ColoringEngineConstants, Coordinate, DominantColor, Layer, Layout\n'), ((3042, 3169), 'graphic_coloring_engine.core.ColoringEngine', 'ColoringEngine', ([], {'layout': 'layout', 'seed': 'self.seed', 'constants': 'self.constants', 'extra_usable_colors': 'self.default_allowed_color_set'}), '(layout=layout, seed=self.seed, constants=self.constants,\n extra_usable_colors=self.default_allowed_color_set)\n', (3056, 3169), False, 'from graphic_coloring_engine.core import Color, ColorChoice, ColoringEngine, ColoringEngineConstants, Coordinate, DominantColor, Layer, Layout\n'), ((4092, 4284), 'graphic_coloring_engine.core.ColoringEngine', 'ColoringEngine', ([], {'layout': 'layout', 'seed': 'self.seed', 'constants': 'self.constants', 'extra_usable_colors': 'self.default_allowed_color_set', 'layer_color_filter_map': '{(1): [lambda color, layout: False]}'}), '(layout=layout, seed=self.seed, constants=self.constants,\n extra_usable_colors=self.default_allowed_color_set,\n layer_color_filter_map={(1): [lambda color, layout: False]})\n', (4106, 4284), False, 'from graphic_coloring_engine.core import Color, ColorChoice, ColoringEngine, ColoringEngineConstants, Coordinate, DominantColor, Layer, Layout\n'), ((4601, 4797), 'graphic_coloring_engine.core.ColoringEngine', 'ColoringEngine', ([], {'layout': 'layout', 'seed': 'self.seed', 'constants': 'self.constants', 'extra_usable_colors': 'self.default_allowed_color_set', 'layer_color_constraint_map': '{(1): [lambda color, layout: False]}'}), '(layout=layout, seed=self.seed, constants=self.constants,\n extra_usable_colors=self.default_allowed_color_set,\n layer_color_constraint_map={(1): [lambda color, layout: False]})\n', (4615, 4797), False, 'from graphic_coloring_engine.core import Color, ColorChoice, ColoringEngine, ColoringEngineConstants, Coordinate, DominantColor, Layer, Layout\n'), ((501, 531), 'graphic_coloring_engine.core.ColorChoice', 'ColorChoice', ([], {'rgb_string': '"""#000"""'}), "(rgb_string='#000')\n", (512, 531), False, 'from graphic_coloring_engine.core import Color, ColorChoice, ColoringEngine, ColoringEngineConstants, Coordinate, DominantColor, Layer, Layout\n'), ((533, 563), 'graphic_coloring_engine.core.ColorChoice', 'ColorChoice', ([], {'rgb_string': '"""#fff"""'}), "(rgb_string='#fff')\n", (544, 563), False, 'from graphic_coloring_engine.core import Color, ColorChoice, ColoringEngine, ColoringEngineConstants, Coordinate, DominantColor, Layer, Layout\n'), ((2334, 2421), 'graphic_coloring_engine.core.Layer', 'Layer', ([], {'order': '(0)', 'bbox_coordinate': 'self.text_coord_2', 'color_mutable': '(True)', 'type': '"""text"""'}), "(order=0, bbox_coordinate=self.text_coord_2, color_mutable=True, type=\n 'text')\n", (2339, 2421), False, 'from graphic_coloring_engine.core import Color, ColorChoice, ColoringEngine, ColoringEngineConstants, Coordinate, DominantColor, Layer, Layout\n'), ((6480, 6504), 'graphic_coloring_engine.core.Color', 'Color', ([], {'rgb_string': '"""#000"""'}), "(rgb_string='#000')\n", (6485, 6504), False, 'from graphic_coloring_engine.core import Color, ColorChoice, ColoringEngine, ColoringEngineConstants, Coordinate, DominantColor, Layer, Layout\n'), ((1280, 1326), 'graphic_coloring_engine.core.DominantColor', 'DominantColor', ([], {'rgb_string': '"""#1f1f1f"""', 'ratio': '(0.8)'}), "(rgb_string='#1f1f1f', ratio=0.8)\n", (1293, 1326), False, 'from graphic_coloring_engine.core import Color, ColorChoice, ColoringEngine, ColoringEngineConstants, Coordinate, DominantColor, Layer, Layout\n'), ((1544, 1587), 'graphic_coloring_engine.core.DominantColor', 'DominantColor', ([], {'rgb_string': '"""#88F"""', 'ratio': '(0.8)'}), "(rgb_string='#88F', ratio=0.8)\n", (1557, 1587), False, 'from graphic_coloring_engine.core import Color, ColorChoice, ColoringEngine, ColoringEngineConstants, Coordinate, DominantColor, Layer, Layout\n'), ((1865, 1914), 'shapely.geometry.polygon.Polygon', 'Polygon', (['[(10, 20), (40, 20), (40, 30), (10, 30)]'], {}), '([(10, 20), (40, 20), (40, 30), (10, 30)])\n', (1872, 1914), False, 'from shapely.geometry.polygon import Polygon\n')]
|
'''
hooks for using tensorRT with the object detection program.
names and parameters are defined as required by the detect.py infrastructure.
'''
import tensorflow as tf
import tensorflow.contrib.tensorrt as trt
def load_graph_tensorrt(params):
graph_def = tf.compat.v1.GraphDef()
with tf.compat.v1.gfile.GFile(params["FROZEN_GRAPH"], 'rb') as f:
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
trt_graph = trt.create_inference_graph(
input_graph_def=graph_def,
outputs=['detection_boxes:0','detection_scores:0','detection_classes:0','num_detections:0'],
max_batch_size=params["BATCH_SIZE"],
max_workspace_size_bytes=4000000000,
is_dynamic_op=True if params["TENSORRT_DYNAMIC"]==1 else False,
precision_mode=params["TENSORRT_PRECISION"]
)
tf.import_graph_def(
trt_graph,
return_elements=['detection_boxes:0','detection_scores:0','detection_classes:0','num_detections:0'])
##no more needed
def convert_from_tensorrt(tmp_output_dict ):
return tmp_output_dict
### names of tensors are different from normal TF names, but can be retrieved and a dict with the same shape of the original one can be formed, thus avoiding the conversion after the postprocessing.
# note that for the tf session, the names are enough and there is no real need to get the tensors.
def get_handles_to_tensors_RT():
graph = tf.get_default_graph()
tensor_dict = {}
tensor_dict['num_detections'] = graph.get_tensor_by_name('import/num_detections:0')
tensor_dict['detection_classes']=graph.get_tensor_by_name( 'import/detection_classes:0')
tensor_dict['detection_boxes'] = graph.get_tensor_by_name('import/detection_boxes:0')
tensor_dict['detection_scores'] = graph.get_tensor_by_name('import/detection_scores:0')
image_tensor =graph.get_tensor_by_name('import/image_tensor:0')
return tensor_dict, image_tensor
|
[
"tensorflow.compat.v1.gfile.GFile",
"tensorflow.contrib.tensorrt.create_inference_graph",
"tensorflow.import_graph_def",
"tensorflow.get_default_graph",
"tensorflow.compat.v1.GraphDef"
] |
[((262, 285), 'tensorflow.compat.v1.GraphDef', 'tf.compat.v1.GraphDef', ([], {}), '()\n', (283, 285), True, 'import tensorflow as tf\n'), ((452, 809), 'tensorflow.contrib.tensorrt.create_inference_graph', 'trt.create_inference_graph', ([], {'input_graph_def': 'graph_def', 'outputs': "['detection_boxes:0', 'detection_scores:0', 'detection_classes:0',\n 'num_detections:0']", 'max_batch_size': "params['BATCH_SIZE']", 'max_workspace_size_bytes': '(4000000000)', 'is_dynamic_op': "(True if params['TENSORRT_DYNAMIC'] == 1 else False)", 'precision_mode': "params['TENSORRT_PRECISION']"}), "(input_graph_def=graph_def, outputs=[\n 'detection_boxes:0', 'detection_scores:0', 'detection_classes:0',\n 'num_detections:0'], max_batch_size=params['BATCH_SIZE'],\n max_workspace_size_bytes=4000000000, is_dynamic_op=True if params[\n 'TENSORRT_DYNAMIC'] == 1 else False, precision_mode=params[\n 'TENSORRT_PRECISION'])\n", (478, 809), True, 'import tensorflow.contrib.tensorrt as trt\n'), ((842, 980), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['trt_graph'], {'return_elements': "['detection_boxes:0', 'detection_scores:0', 'detection_classes:0',\n 'num_detections:0']"}), "(trt_graph, return_elements=['detection_boxes:0',\n 'detection_scores:0', 'detection_classes:0', 'num_detections:0'])\n", (861, 980), True, 'import tensorflow as tf\n'), ((1424, 1446), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (1444, 1446), True, 'import tensorflow as tf\n'), ((293, 347), 'tensorflow.compat.v1.gfile.GFile', 'tf.compat.v1.gfile.GFile', (["params['FROZEN_GRAPH']", '"""rb"""'], {}), "(params['FROZEN_GRAPH'], 'rb')\n", (317, 347), True, 'import tensorflow as tf\n'), ((398, 437), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (417, 437), True, 'import tensorflow as tf\n')]
|
import unittest
import os
import sys
FILE_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(FILE_PATH)
from ZTF_correction import *
if __name__ == "__main__":
unittest.main()
|
[
"sys.path.append",
"os.path.abspath",
"unittest.main"
] |
[((92, 118), 'sys.path.append', 'sys.path.append', (['FILE_PATH'], {}), '(FILE_PATH)\n', (107, 118), False, 'import sys\n'), ((65, 90), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (80, 90), False, 'import os\n'), ((181, 196), 'unittest.main', 'unittest.main', ([], {}), '()\n', (194, 196), False, 'import unittest\n')]
|
import os
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or os.urandom(32)
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or 'postgresql://superiorkid:root@localhost/todo'
SQLALCHEMY_TRACK_MODIFICATIONS = False
|
[
"os.environ.get",
"os.urandom"
] |
[((48, 76), 'os.environ.get', 'os.environ.get', (['"""SECRET_KEY"""'], {}), "('SECRET_KEY')\n", (62, 76), False, 'import os\n'), ((80, 94), 'os.urandom', 'os.urandom', (['(32)'], {}), '(32)\n', (90, 94), False, 'import os\n'), ((123, 153), 'os.environ.get', 'os.environ.get', (['"""DATABASE_URL"""'], {}), "('DATABASE_URL')\n", (137, 153), False, 'import os\n')]
|
from pyllist import dllist
#Classical implementation, requires manipulations with indexes
def ins_sort(array):
for i in range(1, len(array)):
for k in range(i, 0, -1):
if array[k] < array[k - 1]:
array[k], array[k - 1] = array[k - 1], array[k]
return array
#Linked-list implementation, demonstrates iteration starting from a given node
def ins_sort_llist(data):
for card in data.first.next.iternext(): # Start iterating from the second!
for left_card in card.iterprev():
if left_card.prev is not None and left_card.value < left_card.prev.value:
left_card.value, left_card.prev.value = left_card.prev.value, left_card.value
return data
#Linked-list implementation, demonstrates other types of iteration
#and moves nodes instead of their values, which isn't really efficient
def ins_sort_llist2(data):
for card in data.first.next.iternext():
for left_card in data.iternodes(to=card):
if left_card.value > card.value:
data.remove(card)
data.insert(card, before=left_card)
break
return data
data = [6, 5, 32, 8, 234, 5, 1, 9, 0, 33]
print(ins_sort(data))
data_llist = dllist([6, 5, 32, 8, 234, 5, 1, 9, 0, 33])
print(ins_sort_llist(data_llist))
data_llist = dllist([6, 5, 32, 8, 234, 5, 1, 9, 0, 33])
print(ins_sort_llist2(data_llist))
|
[
"pyllist.dllist"
] |
[((1238, 1280), 'pyllist.dllist', 'dllist', (['[6, 5, 32, 8, 234, 5, 1, 9, 0, 33]'], {}), '([6, 5, 32, 8, 234, 5, 1, 9, 0, 33])\n', (1244, 1280), False, 'from pyllist import dllist\n'), ((1329, 1371), 'pyllist.dllist', 'dllist', (['[6, 5, 32, 8, 234, 5, 1, 9, 0, 33]'], {}), '([6, 5, 32, 8, 234, 5, 1, 9, 0, 33])\n', (1335, 1371), False, 'from pyllist import dllist\n')]
|
"""
Script for generating a .csv.cuck file with a few parameters.
"""
import sys, os, random
def getPathFromFileName(fname):
if not fname.find(os.path.pathsep):
fname = os.path.join(os.path.dirname(__file__), fname)
return fname
if __name__ == "__main__":
### arg parsing
if len(sys.argv) < 5 + 1:
print("This script will generate a .csv.cuck file that describes a cuckoo filter with the given amount of added keys of random length between a minimum and a maximum")
print("Usage: python3 generate_testfile.py cuckootestfile 50 4 100 6 20")
print("arg 0: outputfilename (.csv.cuck will be appended")
print("arg 1: cuckoo num buckets log 2")
print("arg 2: cuckoo num items per bucket")
print("arg 3: number of items to generate")
print("arg 4: min length of an item")
print("arg 5: max length of an item")
quit()
out_fname = sys.argv[0 + 1] + ".csv.cuck"
num_bucks = int(sys.argv[1 + 1])
num_nests = int(sys.argv[2 + 1])
num_items = int(sys.argv[3 + 1])
min_len = int(sys.argv[4 + 1])
max_len = int(sys.argv[5 + 1])
F_out = open(getPathFromFileName(out_fname), "w+")
print(f"# testparameters num_items:{num_items}, min_len:{min_len}, max_len:{max_len}", file=F_out)
print(",".join(["cuckoofilter"]+[f"{num_bucks:#0{4}x}",f"{num_nests:#0{4}x}"]), file=F_out)
for i in range(num_items):
print(",".join(
["add"]+
[f"{random.getrandbits(8):#0{4}x}" for i in range(random.randint(min_len,max_len))] ),
file=F_out)
|
[
"os.path.dirname",
"random.randint",
"random.getrandbits"
] |
[((195, 220), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (210, 220), False, 'import sys, os, random\n'), ((1503, 1524), 'random.getrandbits', 'random.getrandbits', (['(8)'], {}), '(8)\n', (1521, 1524), False, 'import sys, os, random\n'), ((1549, 1581), 'random.randint', 'random.randint', (['min_len', 'max_len'], {}), '(min_len, max_len)\n', (1563, 1581), False, 'import sys, os, random\n')]
|
import pcbnew
import os
from .pcbnew2boardview import convert
class Pcbnew2Boardview(pcbnew.ActionPlugin):
def defaults(self):
self.name = "Pcbnew to Boardview"
self.category = "Read PCB"
self.description = "Generate Boardview file from KiCad pcb."
def Run(self):
kicad_pcb = pcbnew.GetBoard()
with open(kicad_pcb.GetFileName().replace('.kicad_pcb', '.brd'), 'wt') as brd_file:
convert(kicad_pcb, brd_file)
plugin = Pcbnew2Boardview()
plugin.register()
|
[
"pcbnew.GetBoard"
] |
[((321, 338), 'pcbnew.GetBoard', 'pcbnew.GetBoard', ([], {}), '()\n', (336, 338), False, 'import pcbnew\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-12 02:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('monitor', '0003_auto_20170912_1018'),
]
operations = [
migrations.AddField(
model_name='exchangehistory',
name='btc_cny',
field=models.DecimalField(decimal_places=20, default=0, max_digits=30),
preserve_default=False,
),
migrations.AddField(
model_name='exchangehistory',
name='xmr_btc',
field=models.DecimalField(decimal_places=20, default=0, max_digits=30),
preserve_default=False,
),
]
|
[
"django.db.models.DecimalField"
] |
[((409, 473), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(20)', 'default': '(0)', 'max_digits': '(30)'}), '(decimal_places=20, default=0, max_digits=30)\n', (428, 473), False, 'from django.db import migrations, models\n'), ((639, 703), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'decimal_places': '(20)', 'default': '(0)', 'max_digits': '(30)'}), '(decimal_places=20, default=0, max_digits=30)\n', (658, 703), False, 'from django.db import migrations, models\n')]
|
import argparse
import datetime
DESCRIPTION = "Convert markdown to hugo post page"
HUGO_POST_FORMAT = """---
title: "{}"
date: {}
---
<!--more-->
"""
def convert(source, dest, img_dest, time):
assert isinstance(source, file)
assert isinstance(dest, file)
# create heading
title = source.readline().split("# ")[1].replace('\n', '')
if time is None:
time_now = datetime.datetime.now()
time = "{}-{}-{}".format(time_now.year, time_now.month, time_now.day)
head = HUGO_POST_FORMAT.format(title, time)
dest.write(head)
data = ""
for l in source.readlines():
line = l.replace("img/", "/img/" + img_dest)
data += line
dest.write(data)
def main():
parser = argparse.ArgumentParser(description=DESCRIPTION)
# arguments list
parser.add_argument(dest='src',
metavar="src_file",
type=argparse.FileType("r"),
help="Markdown file")
parser.add_argument(dest='dst',
metavar="dst_file",
type=argparse.FileType("w"),
help="Hugo-converted file")
parser.add_argument('-d', dest='date', metavar='DATE', type=str,
help="Post creation date in format YYYY-MM-DD", default=None)
parser.add_argument('-i', dest='img', metavar='SUBDIR', type=str,
help="Subdirectory of '/img'", default="")
args = parser.parse_args()
convert(args.src, args.dst, '{}/'.format(args.img), args.date)
if __name__ == "__main__":
main()
|
[
"datetime.datetime.now",
"argparse.ArgumentParser",
"argparse.FileType"
] |
[((745, 793), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'DESCRIPTION'}), '(description=DESCRIPTION)\n', (768, 793), False, 'import argparse\n'), ((394, 417), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (415, 417), False, 'import datetime\n'), ((931, 953), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (948, 953), False, 'import argparse\n'), ((1112, 1134), 'argparse.FileType', 'argparse.FileType', (['"""w"""'], {}), "('w')\n", (1129, 1134), False, 'import argparse\n')]
|
import inspect
import pytest
import acconeer.exptool.structs.configbase as cb
from acconeer.exptool import configs
from acconeer.exptool.clients.reg import regmap
from acconeer.exptool.modes import Mode
BO = regmap.BYTEORDER
def test_full_names_unique():
unique_names = set([r.full_name for r in regmap.REGISTERS])
assert len(regmap.REGISTERS) == len(unique_names)
def test_get_reg_status():
reg = regmap.get_reg("status")
assert reg.full_name == "status"
assert reg == regmap.STATUS_REG
assert reg.bitset_flags == regmap.STATUS_FLAGS
assert reg.bitset_masks == regmap.STATUS_MASKS
assert regmap.get_reg(reg) == reg
assert regmap.get_reg(reg.addr) == reg
def test_get_reg():
with pytest.raises(ValueError):
regmap.get_reg("does-not-exist")
assert regmap.get_reg("iq_sampling_mode").full_name == "iq_sampling_mode"
assert regmap.get_reg("iq_sampling_mode", "iq").full_name == "iq_sampling_mode"
assert regmap.get_reg("sampling_mode", "iq").full_name == "iq_sampling_mode"
with pytest.raises(ValueError):
regmap.get_reg("iq_sampling_mode", "sparse")
with pytest.raises(ValueError):
regmap.get_reg("sampling_mode") # ambiguous
reg = regmap.get_reg("sp_start")
with pytest.raises(ValueError):
regmap.get_reg(reg.addr) # ambiguous
assert regmap.get_reg(reg.addr, reg.modes[0]) == reg
def test_config_to_reg_map_completeness():
m = regmap.CONFIG_TO_STRIPPED_REG_NAME_MAP
assert len(m) == len(set(m))
all_config_attrs = set()
for mode, config_class in configs.MODE_TO_CONFIG_CLASS_MAP.items():
attrs = [k for k, v in inspect.getmembers(config_class) if isinstance(v, cb.Parameter)]
all_config_attrs.update(attrs)
for attr in attrs:
reg_name = m[attr]
if reg_name is None:
continue
reg = regmap.get_reg(reg_name, mode)
assert reg.category in [regmap.Category.CONFIG, regmap.Category.GENERAL]
assert all_config_attrs == set(m.keys())
def test_encode_bitset():
reg = regmap.STATUS_REG
assert reg.data_type == regmap.DataType.BITSET
created = regmap.STATUS_FLAGS.CREATED
activated = regmap.STATUS_FLAGS.ACTIVATED
truth = int(activated).to_bytes(4, BO)
assert reg.encode(activated) == truth
assert reg.encode(int(activated)) == truth
assert reg.encode("activated") == truth
assert reg.encode("ACTIVATED") == truth
assert reg.encode(["activated"]) == truth
truth = int(0).to_bytes(4, BO)
assert reg.encode([]) == truth
assert reg.encode(0) == truth
truth = int(created | activated).to_bytes(4, BO)
assert reg.encode(created | activated) == truth
assert reg.encode(["created", "activated"]) == truth
def test_decode_bitset():
reg = regmap.STATUS_REG
created = regmap.STATUS_FLAGS.CREATED
activated = regmap.STATUS_FLAGS.ACTIVATED
assert reg.decode(reg.encode(created)) == created
assert reg.decode(reg.encode(created | activated)) == created | activated
def test_encode_enum():
reg = regmap.get_reg("mode_selection")
assert reg.data_type == regmap.DataType.ENUM
envelope = reg.enum.ENVELOPE
truth = int(envelope).to_bytes(4, BO)
assert reg.encode(envelope) == truth
assert reg.encode(int(envelope)) == truth
assert reg.encode("envelope") == truth
assert reg.encode("ENVELOPE") == truth
# Implicit remapping
assert reg.encode(Mode.ENVELOPE) == truth
# Explicit remapping
reg = regmap.get_reg("repetition_mode")
truth = int(reg.enum.STREAMING).to_bytes(4, BO)
assert reg.encode(configs.BaseServiceConfig.RepetitionMode.SENSOR_DRIVEN) == truth
def test_decode_enum():
reg = regmap.get_reg("mode_selection")
envelope = reg.enum.ENVELOPE
assert reg.decode(reg.encode(envelope)) == envelope
def test_encode_bool():
reg = regmap.get_reg("tx_disable")
assert reg.data_type == regmap.DataType.BOOL
assert reg.encode(False) == int(0).to_bytes(4, BO)
assert reg.encode(True) == int(1).to_bytes(4, BO)
assert reg.encode(0) == int(0).to_bytes(4, BO)
assert reg.encode(1) == int(1).to_bytes(4, BO)
assert reg.encode(123) == int(1).to_bytes(4, BO)
def test_decode_bool():
reg = regmap.get_reg("tx_disable")
assert reg.decode(reg.encode(True)) is True
def test_encode_int():
pass # tested in float
def test_decode_int():
pass
def test_encode_uint():
reg = regmap.get_reg("downsampling_factor")
assert reg.data_type == regmap.DataType.UINT32
assert reg.encode(0) == int(0).to_bytes(4, BO, signed=True)
assert reg.encode(1234) == int(1234).to_bytes(4, BO, signed=True)
with pytest.raises(ValueError):
reg.encode(-123)
def test_decode_uint():
reg = regmap.get_reg("downsampling_factor")
assert reg.decode(reg.encode(1234)) == 1234
def test_encode_float():
reg = regmap.get_reg("range_start")
assert reg.full_name == "range_start"
assert reg.float_scale == pytest.approx(1000)
assert reg.data_type == regmap.DataType.INT32
assert reg.encode(0) == int(0).to_bytes(4, BO, signed=True)
assert reg.encode(0.123) == int(123).to_bytes(4, BO, signed=True)
assert reg.encode(-0.123) == int(-123).to_bytes(4, BO, signed=True)
def test_decode_float():
reg = regmap.get_reg("range_start")
assert reg.decode(reg.encode(0.123)) == pytest.approx(0.123)
|
[
"acconeer.exptool.clients.reg.regmap.get_reg",
"pytest.raises",
"pytest.approx",
"acconeer.exptool.configs.MODE_TO_CONFIG_CLASS_MAP.items",
"inspect.getmembers"
] |
[((418, 442), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['"""status"""'], {}), "('status')\n", (432, 442), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((1233, 1259), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['"""sp_start"""'], {}), "('sp_start')\n", (1247, 1259), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((1587, 1627), 'acconeer.exptool.configs.MODE_TO_CONFIG_CLASS_MAP.items', 'configs.MODE_TO_CONFIG_CLASS_MAP.items', ([], {}), '()\n', (1625, 1627), False, 'from acconeer.exptool import configs\n'), ((3107, 3139), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['"""mode_selection"""'], {}), "('mode_selection')\n", (3121, 3139), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((3546, 3579), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['"""repetition_mode"""'], {}), "('repetition_mode')\n", (3560, 3579), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((3755, 3787), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['"""mode_selection"""'], {}), "('mode_selection')\n", (3769, 3787), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((3914, 3942), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['"""tx_disable"""'], {}), "('tx_disable')\n", (3928, 3942), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((4293, 4321), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['"""tx_disable"""'], {}), "('tx_disable')\n", (4307, 4321), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((4494, 4531), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['"""downsampling_factor"""'], {}), "('downsampling_factor')\n", (4508, 4531), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((4816, 4853), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['"""downsampling_factor"""'], {}), "('downsampling_factor')\n", (4830, 4853), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((4940, 4969), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['"""range_start"""'], {}), "('range_start')\n", (4954, 4969), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((5356, 5385), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['"""range_start"""'], {}), "('range_start')\n", (5370, 5385), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((629, 648), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['reg'], {}), '(reg)\n', (643, 648), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((667, 691), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['reg.addr'], {}), '(reg.addr)\n', (681, 691), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((730, 755), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (743, 755), False, 'import pytest\n'), ((765, 797), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['"""does-not-exist"""'], {}), "('does-not-exist')\n", (779, 797), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((1052, 1077), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1065, 1077), False, 'import pytest\n'), ((1087, 1131), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['"""iq_sampling_mode"""', '"""sparse"""'], {}), "('iq_sampling_mode', 'sparse')\n", (1101, 1131), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((1142, 1167), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1155, 1167), False, 'import pytest\n'), ((1177, 1208), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['"""sampling_mode"""'], {}), "('sampling_mode')\n", (1191, 1208), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((1270, 1295), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1283, 1295), False, 'import pytest\n'), ((1305, 1329), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['reg.addr'], {}), '(reg.addr)\n', (1319, 1329), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((1355, 1393), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['reg.addr', 'reg.modes[0]'], {}), '(reg.addr, reg.modes[0])\n', (1369, 1393), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((4728, 4753), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4741, 4753), False, 'import pytest\n'), ((5042, 5061), 'pytest.approx', 'pytest.approx', (['(1000)'], {}), '(1000)\n', (5055, 5061), False, 'import pytest\n'), ((5431, 5451), 'pytest.approx', 'pytest.approx', (['(0.123)'], {}), '(0.123)\n', (5444, 5451), False, 'import pytest\n'), ((810, 844), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['"""iq_sampling_mode"""'], {}), "('iq_sampling_mode')\n", (824, 844), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((888, 928), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['"""iq_sampling_mode"""', '"""iq"""'], {}), "('iq_sampling_mode', 'iq')\n", (902, 928), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((972, 1009), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['"""sampling_mode"""', '"""iq"""'], {}), "('sampling_mode', 'iq')\n", (986, 1009), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((1901, 1931), 'acconeer.exptool.clients.reg.regmap.get_reg', 'regmap.get_reg', (['reg_name', 'mode'], {}), '(reg_name, mode)\n', (1915, 1931), False, 'from acconeer.exptool.clients.reg import regmap\n'), ((1660, 1692), 'inspect.getmembers', 'inspect.getmembers', (['config_class'], {}), '(config_class)\n', (1678, 1692), False, 'import inspect\n')]
|
"""Align face images given landmarks."""
# MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sys
import os
import warnings
import argparse
import random
import cv2
from align.mtcnntf import detector
from align.matlab_cp2tform import get_similarity_transform_for_cv2
def align(src_img, src_pts, ref_pts, image_size, scale=1.0, transpose_input=False):
w, h = image_size = tuple(image_size)
# Actual offset = new center - old center (scaled)
scale_ = max(w,h) * scale
cx_ref = cy_ref = 0.
offset_x = 0.5 * w - cx_ref * scale_
offset_y = 0.5 * h - cy_ref * scale_
s = np.array(src_pts).astype(np.float32).reshape([-1,2])
r = np.array(ref_pts).astype(np.float32) * scale_ + np.array([[offset_x, offset_y]])
if transpose_input:
s = s.reshape([2,-1]).T
tfm = get_similarity_transform_for_cv2(s, r)
dst_img = cv2.warpAffine(src_img, tfm, image_size)
s_new = np.concatenate([s.reshape([2,-1]), np.ones((1, s.shape[0]))])
s_new = np.matmul(tfm, s_new)
s_new = s_new.reshape([-1]) if transpose_input else s_new.T.reshape([-1])
# tfm = tfm.reshape([-1])
return dst_img, s_new, tfm
def detect_align(image, image_size=(256,256), scale=0.7, transpose_input=False):
bboxes, landmarks = detector.detect(image)
if len(bboxes) == 0 : return None
elif len(bboxes) > 1:
img_size = image.shape[:2]
bbox_size = bboxes[:,2] * bboxes[:,3]
img_center = img_size / 2
offsets = np.vstack([ bboxes[:,0]+0.5*bboxes[:,2]-img_center[1], bboxes[:,1]+0.5*bboxes[:,3]-img_center[0] ])
offset_dist_squared = np.sum(np.power(offsets,2.0),0)
index = np.argmax(offset_dist_squared*2.0) # some extra weight on the centering
bboxes = bboxes[index][None]
landmarks = landmarks[index][None]
src_pts = landmarks[0]
ref_pts = np.array( [[ -1.58083929e-01, -3.84258929e-02],
[ 1.56533929e-01, -4.01660714e-02],
[ 2.25000000e-04, 1.40505357e-01],
[ -1.29024107e-01, 3.24691964e-01],
[ 1.31516964e-01, 3.23250893e-01]])
img_new, new_pts, tfm = align(image, src_pts, ref_pts, image_size, scale, transpose_input)
return img_new, tfm
|
[
"align.matlab_cp2tform.get_similarity_transform_for_cv2",
"numpy.argmax",
"numpy.power",
"numpy.ones",
"align.mtcnntf.detector.detect",
"cv2.warpAffine",
"numpy.array",
"numpy.matmul",
"numpy.vstack"
] |
[((1998, 2036), 'align.matlab_cp2tform.get_similarity_transform_for_cv2', 'get_similarity_transform_for_cv2', (['s', 'r'], {}), '(s, r)\n', (2030, 2036), False, 'from align.matlab_cp2tform import get_similarity_transform_for_cv2\n'), ((2051, 2091), 'cv2.warpAffine', 'cv2.warpAffine', (['src_img', 'tfm', 'image_size'], {}), '(src_img, tfm, image_size)\n', (2065, 2091), False, 'import cv2\n'), ((2179, 2200), 'numpy.matmul', 'np.matmul', (['tfm', 's_new'], {}), '(tfm, s_new)\n', (2188, 2200), True, 'import numpy as np\n'), ((2453, 2475), 'align.mtcnntf.detector.detect', 'detector.detect', (['image'], {}), '(image)\n', (2468, 2475), False, 'from align.mtcnntf import detector\n'), ((3048, 3211), 'numpy.array', 'np.array', (['[[-0.158083929, -0.0384258929], [0.156533929, -0.0401660714], [0.000225, \n 0.140505357], [-0.129024107, 0.324691964], [0.131516964, 0.323250893]]'], {}), '([[-0.158083929, -0.0384258929], [0.156533929, -0.0401660714], [\n 0.000225, 0.140505357], [-0.129024107, 0.324691964], [0.131516964, \n 0.323250893]])\n', (3056, 3211), True, 'import numpy as np\n'), ((1897, 1929), 'numpy.array', 'np.array', (['[[offset_x, offset_y]]'], {}), '([[offset_x, offset_y]])\n', (1905, 1929), True, 'import numpy as np\n'), ((2140, 2164), 'numpy.ones', 'np.ones', (['(1, s.shape[0])'], {}), '((1, s.shape[0]))\n', (2147, 2164), True, 'import numpy as np\n'), ((2673, 2790), 'numpy.vstack', 'np.vstack', (['[bboxes[:, 0] + 0.5 * bboxes[:, 2] - img_center[1], bboxes[:, 1] + 0.5 *\n bboxes[:, 3] - img_center[0]]'], {}), '([bboxes[:, 0] + 0.5 * bboxes[:, 2] - img_center[1], bboxes[:, 1] +\n 0.5 * bboxes[:, 3] - img_center[0]])\n', (2682, 2790), True, 'import numpy as np\n'), ((2851, 2887), 'numpy.argmax', 'np.argmax', (['(offset_dist_squared * 2.0)'], {}), '(offset_dist_squared * 2.0)\n', (2860, 2887), True, 'import numpy as np\n'), ((2810, 2832), 'numpy.power', 'np.power', (['offsets', '(2.0)'], {}), '(offsets, 2.0)\n', (2818, 2832), True, 'import numpy as np\n'), ((1788, 1805), 'numpy.array', 'np.array', (['src_pts'], {}), '(src_pts)\n', (1796, 1805), True, 'import numpy as np\n'), ((1849, 1866), 'numpy.array', 'np.array', (['ref_pts'], {}), '(ref_pts)\n', (1857, 1866), True, 'import numpy as np\n')]
|
import pyttsx3
class Speak:
def __init__(self):
self.speaker = pyttsx3.init(driverName='sapi5')
# self.speaker.setProperty('voice', 'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_EN-US_ZIRA_11.0')
def speak(self, text):
pass
self.speaker.say(text)
self.speaker.runAndWait()
|
[
"pyttsx3.init"
] |
[((77, 109), 'pyttsx3.init', 'pyttsx3.init', ([], {'driverName': '"""sapi5"""'}), "(driverName='sapi5')\n", (89, 109), False, 'import pyttsx3\n')]
|
from flask import render_template, request, redirect, url_for, session
from app import app
from model import *
@app.route('/', methods=["GET"])
def home():
if "username" in session:
return render_template('index.html')
else:
return render_template('login.html')
# Register new user
@app.route('/register', methods=["GET", "POST"])
def register():
if request.method == "GET":
return render_template("register.html")
elif request.method == "POST":
registerUser()
return redirect(url_for("login"))
#Check if email already exists in the registratiion page
@app.route('/checkusername', methods=["POST"])
def check():
return checkusername()
# Everything Login (Routes to renderpage, check if username exist and also verifypassword through Jquery AJAX request)
@app.route('/login', methods=["GET"])
def login():
if request.method == "GET":
if "username" not in session:
return render_template("login.html")
else:
return redirect(url_for("home"))
@app.route('/checkloginusername', methods=["POST"])
def checkUserlogin():
return checkloginusername()
@app.route('/checkloginpassword', methods=["POST"])
def checkUserpassword():
return checkloginpassword()
#The admin logout
@app.route('/logout', methods=["GET"]) # URL for logout
def logout(): # logout function
session.pop('username', None) # remove user session
return redirect(url_for("home")) # redirect to home page with message
#Forgot Password
@app.route('/forgot-password', methods=["GET"])
def forgotpassword():
return render_template('forgot-password.html')
#404 Page
@app.route('/404', methods=["GET"])
def errorpage():
return render_template("404.html")
#Blank Page
@app.route('/blank', methods=["GET"])
def blank():
return render_template('blank.html')
#Buttons Page
@app.route('/buttons', methods=["GET"])
def buttons():
return render_template("buttons.html")
#Cards Page
@app.route('/cards', methods=["GET"])
def cards():
return render_template('cards.html')
#Charts Page
@app.route('/charts', methods=["GET"])
def charts():
return render_template("charts.html")
#Tables Page
@app.route('/tables', methods=["GET"])
def tables():
return render_template("tables.html")
#Utilities-animation
@app.route('/utilities-animation', methods=["GET"])
def utilitiesanimation():
return render_template("utilities-animation.html")
#Utilities-border
@app.route('/utilities-border', methods=["GET"])
def utilitiesborder():
return render_template("utilities-border.html")
#Utilities-color
@app.route('/utilities-color', methods=["GET"])
def utilitiescolor():
return render_template("utilities-color.html")
#utilities-other
@app.route('/utilities-other', methods=["GET"])
def utilitiesother():
return render_template("utilities-other.html")
|
[
"app.app.route",
"flask.url_for",
"flask.session.pop",
"flask.render_template"
] |
[((113, 144), 'app.app.route', 'app.route', (['"""/"""'], {'methods': "['GET']"}), "('/', methods=['GET'])\n", (122, 144), False, 'from app import app\n'), ((309, 356), 'app.app.route', 'app.route', (['"""/register"""'], {'methods': "['GET', 'POST']"}), "('/register', methods=['GET', 'POST'])\n", (318, 356), False, 'from app import app\n'), ((612, 657), 'app.app.route', 'app.route', (['"""/checkusername"""'], {'methods': "['POST']"}), "('/checkusername', methods=['POST'])\n", (621, 657), False, 'from app import app\n'), ((819, 855), 'app.app.route', 'app.route', (['"""/login"""'], {'methods': "['GET']"}), "('/login', methods=['GET'])\n", (828, 855), False, 'from app import app\n'), ((1050, 1100), 'app.app.route', 'app.route', (['"""/checkloginusername"""'], {'methods': "['POST']"}), "('/checkloginusername', methods=['POST'])\n", (1059, 1100), False, 'from app import app\n'), ((1157, 1207), 'app.app.route', 'app.route', (['"""/checkloginpassword"""'], {'methods': "['POST']"}), "('/checkloginpassword', methods=['POST'])\n", (1166, 1207), False, 'from app import app\n'), ((1285, 1322), 'app.app.route', 'app.route', (['"""/logout"""'], {'methods': "['GET']"}), "('/logout', methods=['GET'])\n", (1294, 1322), False, 'from app import app\n'), ((1525, 1571), 'app.app.route', 'app.route', (['"""/forgot-password"""'], {'methods': "['GET']"}), "('/forgot-password', methods=['GET'])\n", (1534, 1571), False, 'from app import app\n'), ((1657, 1691), 'app.app.route', 'app.route', (['"""/404"""'], {'methods': "['GET']"}), "('/404', methods=['GET'])\n", (1666, 1691), False, 'from app import app\n'), ((1762, 1798), 'app.app.route', 'app.route', (['"""/blank"""'], {'methods': "['GET']"}), "('/blank', methods=['GET'])\n", (1771, 1798), False, 'from app import app\n'), ((1869, 1907), 'app.app.route', 'app.route', (['"""/buttons"""'], {'methods': "['GET']"}), "('/buttons', methods=['GET'])\n", (1878, 1907), False, 'from app import app\n'), ((1980, 2016), 'app.app.route', 'app.route', (['"""/cards"""'], {'methods': "['GET']"}), "('/cards', methods=['GET'])\n", (1989, 2016), False, 'from app import app\n'), ((2086, 2123), 'app.app.route', 'app.route', (['"""/charts"""'], {'methods': "['GET']"}), "('/charts', methods=['GET'])\n", (2095, 2123), False, 'from app import app\n'), ((2195, 2232), 'app.app.route', 'app.route', (['"""/tables"""'], {'methods': "['GET']"}), "('/tables', methods=['GET'])\n", (2204, 2232), False, 'from app import app\n'), ((2312, 2362), 'app.app.route', 'app.route', (['"""/utilities-animation"""'], {'methods': "['GET']"}), "('/utilities-animation', methods=['GET'])\n", (2321, 2362), False, 'from app import app\n'), ((2464, 2511), 'app.app.route', 'app.route', (['"""/utilities-border"""'], {'methods': "['GET']"}), "('/utilities-border', methods=['GET'])\n", (2473, 2511), False, 'from app import app\n'), ((2606, 2652), 'app.app.route', 'app.route', (['"""/utilities-color"""'], {'methods': "['GET']"}), "('/utilities-color', methods=['GET'])\n", (2615, 2652), False, 'from app import app\n'), ((2745, 2791), 'app.app.route', 'app.route', (['"""/utilities-other"""'], {'methods': "['GET']"}), "('/utilities-other', methods=['GET'])\n", (2754, 2791), False, 'from app import app\n'), ((1378, 1407), 'flask.session.pop', 'session.pop', (['"""username"""', 'None'], {}), "('username', None)\n", (1389, 1407), False, 'from flask import render_template, request, redirect, url_for, session\n'), ((1605, 1644), 'flask.render_template', 'render_template', (['"""forgot-password.html"""'], {}), "('forgot-password.html')\n", (1620, 1644), False, 'from flask import render_template, request, redirect, url_for, session\n'), ((1720, 1747), 'flask.render_template', 'render_template', (['"""404.html"""'], {}), "('404.html')\n", (1735, 1747), False, 'from flask import render_template, request, redirect, url_for, session\n'), ((1823, 1852), 'flask.render_template', 'render_template', (['"""blank.html"""'], {}), "('blank.html')\n", (1838, 1852), False, 'from flask import render_template, request, redirect, url_for, session\n'), ((1934, 1965), 'flask.render_template', 'render_template', (['"""buttons.html"""'], {}), "('buttons.html')\n", (1949, 1965), False, 'from flask import render_template, request, redirect, url_for, session\n'), ((2041, 2070), 'flask.render_template', 'render_template', (['"""cards.html"""'], {}), "('cards.html')\n", (2056, 2070), False, 'from flask import render_template, request, redirect, url_for, session\n'), ((2149, 2179), 'flask.render_template', 'render_template', (['"""charts.html"""'], {}), "('charts.html')\n", (2164, 2179), False, 'from flask import render_template, request, redirect, url_for, session\n'), ((2258, 2288), 'flask.render_template', 'render_template', (['"""tables.html"""'], {}), "('tables.html')\n", (2273, 2288), False, 'from flask import render_template, request, redirect, url_for, session\n'), ((2400, 2443), 'flask.render_template', 'render_template', (['"""utilities-animation.html"""'], {}), "('utilities-animation.html')\n", (2415, 2443), False, 'from flask import render_template, request, redirect, url_for, session\n'), ((2546, 2586), 'flask.render_template', 'render_template', (['"""utilities-border.html"""'], {}), "('utilities-border.html')\n", (2561, 2586), False, 'from flask import render_template, request, redirect, url_for, session\n'), ((2686, 2725), 'flask.render_template', 'render_template', (['"""utilities-color.html"""'], {}), "('utilities-color.html')\n", (2701, 2725), False, 'from flask import render_template, request, redirect, url_for, session\n'), ((2825, 2864), 'flask.render_template', 'render_template', (['"""utilities-other.html"""'], {}), "('utilities-other.html')\n", (2840, 2864), False, 'from flask import render_template, request, redirect, url_for, session\n'), ((202, 231), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (217, 231), False, 'from flask import render_template, request, redirect, url_for, session\n'), ((257, 286), 'flask.render_template', 'render_template', (['"""login.html"""'], {}), "('login.html')\n", (272, 286), False, 'from flask import render_template, request, redirect, url_for, session\n'), ((420, 452), 'flask.render_template', 'render_template', (['"""register.html"""'], {}), "('register.html')\n", (435, 452), False, 'from flask import render_template, request, redirect, url_for, session\n'), ((1451, 1466), 'flask.url_for', 'url_for', (['"""home"""'], {}), "('home')\n", (1458, 1466), False, 'from flask import render_template, request, redirect, url_for, session\n'), ((958, 987), 'flask.render_template', 'render_template', (['"""login.html"""'], {}), "('login.html')\n", (973, 987), False, 'from flask import render_template, request, redirect, url_for, session\n'), ((535, 551), 'flask.url_for', 'url_for', (['"""login"""'], {}), "('login')\n", (542, 551), False, 'from flask import render_template, request, redirect, url_for, session\n'), ((1030, 1045), 'flask.url_for', 'url_for', (['"""home"""'], {}), "('home')\n", (1037, 1045), False, 'from flask import render_template, request, redirect, url_for, session\n')]
|
import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp
import math
import MinkowskiEngine as ME
from torch.utils.data.sampler import Sampler
import os,sys
MAX_POINTS=3000000
SEM_COLOR_MAP = {
0: (0., 0., 0.),
1: (174., 199., 232.),
2: (152., 223., 138.),
3: (31., 119., 180.),
4: (255., 187., 120.),
5: (188., 189., 34.),
6: (140., 86., 75.),
7: (255., 152., 150.),
8: (214., 39., 40.),
9: (197., 176., 213.),
10: (148., 103., 189.),
11: (196., 156., 148.),
12: (23., 190., 207.),
14: (247., 182., 210.),
15: (66., 188., 102.),
16: (219., 219., 141.),
17: (140., 57., 197.),
18: (202., 185., 52.),
19: (51., 176., 203.),
20: (200., 54., 131.),
21: (92., 193., 61.),
22: (78., 71., 183.),
23: (172., 114., 82.),
24: (255., 127., 14.),
25: (91., 163., 138.),
26: (153., 98., 156.),
27: (140., 153., 101.),
28: (158., 218., 229.),
29: (100., 125., 154.),
30: (178., 127., 135.),
32: (146., 111., 194.),
33: (44., 160., 44.),
34: (112., 128., 144.),
35: (96., 207., 209.),
36: (227., 119., 194.),
37: (213., 92., 176.),
38: (94., 106., 211.),
39: (82., 84., 163.),
40: (100., 85., 144.),
}
# segmantic lable remapper
SEM_CLASS_LABELS = ('wall', 'floor', 'cabinet', 'bed', 'chair', 'sofa', 'table', 'door', 'window',
'bookshelf', 'picture', 'counter', 'desk', 'curtain', 'refrigerator',
'shower curtain', 'toilet', 'sink', 'bathtub', 'otherfurniture')
SEM_VALID_CLASS_IDS = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33, 34, 36, 39]
SEM_REMAPPER=np.ones(150)*(20)
for i,x in enumerate(SEM_VALID_CLASS_IDS):
SEM_REMAPPER[x]=i
# scene type remapper
TYPE_CLASS_LABELS=('aparment','bathroom','bedroom','conference room','copy','hallway','kitchen','laundry room','living room','office','storage','misc')
TYPE_VALID_CLASS_IDS=[1,2,3,4,8,9,13,14,15,16,18,20,21]
TYPE_REMAPPER=np.ones(22)*(12)
for i,x in enumerate(TYPE_VALID_CLASS_IDS):
TYPE_REMAPPER[x]=i
'''
ScanNet dataset
'''
class ScanNetDataset(torch.utils.data.Dataset):
def __init__(self,path,augment=False,voxel_size=0.02,leave_rate=None,
crop_rate=None,skip_rate=1,ind_remove=None):
torch.utils.data.Dataset.__init__(self)
self.voxel_size=voxel_size
self.augment=augment
self.leave_rate=leave_rate
self.crop_rate=crop_rate
self.skip_rate=skip_rate
self.ind_remove=ind_remove
# load data
self.data=[]
for x in torch.utils.data.DataLoader(
glob.glob(path), collate_fn=lambda x: torch.load(x[0]),num_workers=mp.cpu_count()):
self.data.append(x)
# preprocess data on train/val/test data
for i in range(len(self.data)):
# normalize colors
self.data[i]['feats']/=255
self.data[i]['feats']-=0.5
# scene type label
# self.data[i]['scene_label']=TYPE_REMAPPER[self.data[i]['scene_label']]
self.data[i]['scene_label']-=1
# semantic label
self.data[i]['sem_label']=SEM_REMAPPER[self.data[i]['sem_label'].astype('int')]
def __getitem__(self,n):
crn_sample=self.data[n]
xyz=crn_sample['coords']
feats=crn_sample['feats']
sem_labels=crn_sample['sem_label']
scene_type=crn_sample['scene_label']
scene_name=crn_sample['scene_name']
# filter by semantic index
ind_left=sem_labels!=self.ind_remove
xyz,feats,sem_labels=xyz[ind_left],feats[ind_left],sem_labels[ind_left]
# voxelization
sel = ME.utils.sparse_quantize(xyz / self.voxel_size, return_index=True)
down_xyz, down_feat,down_labels = xyz[sel],feats[sel],sem_labels[sel]
# Get coords, shift to center
coords = np.floor(down_xyz / self.voxel_size)
coords-=coords.min(0)
return (coords,down_feat,down_labels,scene_type,scene_name)
def __len__(self):
return len(self.data)
'''
collate data for each batch
'''
def collate_fn(list_data):
new_list_data = []
num_removed = 0
for data in list_data:
if data is not None:
new_list_data.append(data)
else:
num_removed += 1
list_data = new_list_data
if len(list_data) == 0:
raise ValueError('No data in the batch')
coords, feats, labels,scene_types,scene_names = list(zip(*list_data))
eff_num_batch = len(coords)
assert len(labels) == eff_num_batch
lens = [len(c) for c in coords]
# filter samples
cum_len=np.cumsum(lens)
n_samples=(cum_len<MAX_POINTS).sum()
feats=feats[:n_samples]
labels=labels[:n_samples]
coords=coords[:n_samples]
scene_types=scene_types[:n_samples]
scene_names=scene_names[:n_samples]
# Concatenate all lists
curr_ptr = 0
num_tot_pts = sum(lens[:n_samples])
coords_batch = torch.zeros(num_tot_pts, 4)
feats_batch = torch.from_numpy(np.vstack(feats)).float()
labels_batch=torch.from_numpy(np.hstack(labels)).long()
scene_types_batch=torch.from_numpy(np.hstack(scene_types)).long()
for batch_id in range(n_samples):
coords_batch[curr_ptr:curr_ptr + lens[batch_id], :3] = torch.from_numpy(
coords[batch_id])
coords_batch[curr_ptr:curr_ptr + lens[batch_id], 3] = batch_id
curr_ptr += len(coords[batch_id])
return {
'coords': coords_batch,
'feats': feats_batch,
'sem_labels': labels_batch,
'clf_labels':scene_types_batch,
'scene_names':scene_names
}
class InfSampler(Sampler):
"""Samples elements randomly, without replacement.
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, data_source, shuffle=True):
self.data_source = data_source
self.shuffle = shuffle
self.reset_permutation()
def reset_permutation(self):
perm = len(self.data_source)
if self.shuffle:
perm = torch.randperm(perm)
self._perm = perm.tolist()
def __iter__(self):
return self
def __next__(self):
if len(self._perm) == 0:
self.reset_permutation()
return self._perm.pop()
def __len__(self):
return len(self.data_source)
def get_iterators(path_train,path_val,config):
# train loader
train_set=ScanNetDataset(path_train,augment=True,voxel_size=config['voxel_size'])
train_args = {
'batch_size': config['train_batch_size'],
'num_workers': config['num_workers'],
'collate_fn': collate_fn,
'sampler':InfSampler(train_set),
'pin_memory': False,
'drop_last': False
}
train_loader = torch.utils.data.DataLoader(train_set, **train_args)
# val loader
val_set=ScanNetDataset(path_val,augment=False,voxel_size=config['voxel_size'])
val_args = {
'batch_size': config['val_batch_size'],
'num_workers': config['num_workers'],
'collate_fn': collate_fn,
'pin_memory': False,
'drop_last': False
}
val_loader = torch.utils.data.DataLoader(val_set,**val_args)
return {
'train': train_loader,
'val': val_loader
}
def get_testdataset(path_test,config):
test_set=ScanNetDataset(path_test,augment=False,voxel_size=config['voxel_size'])
val_args = {
'batch_size': config['test_batch_size'],
'num_workers': config['num_workers'],
'collate_fn': collate_fn,
'pin_memory': False,
'drop_last': False
}
test_loader = torch.utils.data.DataLoader(test_set,**val_args)
return test_loader
def get_valdataset(path_val,config):
# val loader
val_set=ScanNetDataset(path_val,augment=False,voxel_size=config['voxel_size'],
leave_rate=config['leave_rate'],crop_rate=config['crop_rate'],
skip_rate=config['skip_rate'],ind_remove=config['ind_remove'])
val_args = {
'batch_size': config['val_batch_size'],
'num_workers': config['num_workers'],
'collate_fn': collate_fn,
'pin_memory': False,
'drop_last': False
}
val_loader = torch.utils.data.DataLoader(val_set,**val_args)
return val_loader
|
[
"torch.utils.data.DataLoader",
"MinkowskiEngine.utils.sparse_quantize",
"numpy.floor",
"torch.load",
"numpy.ones",
"numpy.hstack",
"multiprocessing.cpu_count",
"numpy.cumsum",
"torch.utils.data.Dataset.__init__",
"torch.randperm",
"glob.glob",
"torch.zeros",
"numpy.vstack",
"torch.from_numpy"
] |
[((1684, 1696), 'numpy.ones', 'np.ones', (['(150)'], {}), '(150)\n', (1691, 1696), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((2012, 2023), 'numpy.ones', 'np.ones', (['(22)'], {}), '(22)\n', (2019, 2023), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((4765, 4780), 'numpy.cumsum', 'np.cumsum', (['lens'], {}), '(lens)\n', (4774, 4780), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((5104, 5131), 'torch.zeros', 'torch.zeros', (['num_tot_pts', '(4)'], {}), '(num_tot_pts, 4)\n', (5115, 5131), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((6937, 6989), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_set'], {}), '(train_set, **train_args)\n', (6964, 6989), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((7319, 7367), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_set'], {}), '(val_set, **val_args)\n', (7346, 7367), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((7805, 7854), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test_set'], {}), '(test_set, **val_args)\n', (7832, 7854), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((8421, 8469), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_set'], {}), '(val_set, **val_args)\n', (8448, 8469), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((2317, 2356), 'torch.utils.data.Dataset.__init__', 'torch.utils.data.Dataset.__init__', (['self'], {}), '(self)\n', (2350, 2356), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((3784, 3850), 'MinkowskiEngine.utils.sparse_quantize', 'ME.utils.sparse_quantize', (['(xyz / self.voxel_size)'], {'return_index': '(True)'}), '(xyz / self.voxel_size, return_index=True)\n', (3808, 3850), True, 'import MinkowskiEngine as ME\n'), ((3985, 4021), 'numpy.floor', 'np.floor', (['(down_xyz / self.voxel_size)'], {}), '(down_xyz / self.voxel_size)\n', (3993, 4021), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((5430, 5464), 'torch.from_numpy', 'torch.from_numpy', (['coords[batch_id]'], {}), '(coords[batch_id])\n', (5446, 5464), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((2666, 2681), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (2675, 2681), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((6223, 6243), 'torch.randperm', 'torch.randperm', (['perm'], {}), '(perm)\n', (6237, 6243), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((2733, 2747), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (2745, 2747), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((5167, 5183), 'numpy.vstack', 'np.vstack', (['feats'], {}), '(feats)\n', (5176, 5183), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((5227, 5244), 'numpy.hstack', 'np.hstack', (['labels'], {}), '(labels)\n', (5236, 5244), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((5292, 5314), 'numpy.hstack', 'np.hstack', (['scene_types'], {}), '(scene_types)\n', (5301, 5314), True, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n'), ((2704, 2720), 'torch.load', 'torch.load', (['x[0]'], {}), '(x[0])\n', (2714, 2720), False, 'import torch, numpy as np, glob, math, torch.utils.data, scipy.ndimage, multiprocessing as mp\n')]
|
# Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An implementation of the FedAvg algorithm with learning rate schedules.
This is intended to be a somewhat minimal implementation of Federated
Averaging that allows for client and server learning rate scheduling.
The original FedAvg is based on the paper:
Communication-Efficient Learning of Deep Networks from Decentralized Data
H. <NAME>, <NAME>, <NAME>,
<NAME>, <NAME>. AISTATS 2017.
https://arxiv.org/abs/1602.05629
"""
import collections
from typing import Callable, Optional, Union
import attr
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_federated.python.tensorflow_libs import tensor_utils
# Convenience type aliases.
ModelBuilder = Callable[[], tff.learning.Model]
OptimizerBuilder = Callable[[float], tf.keras.optimizers.Optimizer]
ClientWeightFn = Callable[..., float]
LRScheduleFn = Callable[[Union[int, tf.Tensor]], Union[tf.Tensor, float]]
def _initialize_optimizer_vars(model: tff.learning.Model,
optimizer: tf.keras.optimizers.Optimizer):
"""Ensures variables holding the state of `optimizer` are created."""
delta = tf.nest.map_structure(tf.zeros_like, _get_weights(model).trainable)
model_weights = _get_weights(model)
grads_and_vars = tf.nest.map_structure(lambda x, v: (x, v), delta,
model_weights.trainable)
optimizer.apply_gradients(grads_and_vars, name='server_update')
assert optimizer.variables()
def _get_weights(model: tff.learning.Model) -> tff.learning.ModelWeights:
return tff.learning.ModelWeights.from_model(model)
@attr.s(eq=False, order=False, frozen=True)
class ServerState(object):
"""Structure for state on the server.
Fields:
- `model`: primal model (weights), A dictionary of the model's trainable and non-trainable weights (consistent with FedAvg)
- `dual_model`: dual model (weights), dictionary of the model's trainable and non-trainable weights.
- `optimizer_state`: The server optimizer variables.
- `round_num`: The current training round, as a float.
"""
model = attr.ib()
dual_model_weights = attr.ib() # actually model_weights
optimizer_state = attr.ib()
elapsed_lr = attr.ib()
round_num = attr.ib()
# This is a float to avoid type incompatibility when calculating learning rate
# schedules.
@tf.function
def server_update(primal_model, dual_model, server_optimizer, server_mirror,
server_state, weights_delta, elapsed_lr_delta):
"""Updates `server_state` based on `weights_delta`, increase the round number.
Args:
model: A `tff.learning.Model`.
dual_model: A `tff.learning.Model` for dual weights.
server_optimizer: A `tf.keras.optimizers.Optimizer`.
server_state: A `ServerState`, the state to be updated.
weights_delta: An update to the trainable variables of the model.
Returns:
An updated `ServerState`.
"""
dual_model_weights = _get_weights(dual_model)
# server state hold dual model
tff.utils.assign(dual_model_weights, server_state.dual_model_weights)
# Server optimizer variables must be initialized prior to invoking this
tff.utils.assign(server_optimizer.variables(), server_state.optimizer_state)
weights_delta, has_non_finite_weight = (
tensor_utils.zero_all_if_any_non_finite(weights_delta))
if has_non_finite_weight > 0:
return server_state
# Apply the update to the model. We must multiply weights_delta by -1.0 to
# view it as a gradient that should be applied to the server_optimizer.
grads_and_vars = [
(-1.0 * x, v) for x, v in zip(weights_delta, dual_model_weights.trainable)
]
server_optimizer.apply_gradients(grads_and_vars)
elapsed_lr = server_state.elapsed_lr + elapsed_lr_delta * server_optimizer.lr
primal_model_weights = _get_weights(primal_model)
tff.utils.assign(primal_model_weights, dual_model_weights)
server_mirror(primal_model_weights.trainable, lr=elapsed_lr)
# Create a new state based on the updated model.
return tff.utils.update_state(
server_state,
model=primal_model_weights,
dual_model_weights=dual_model_weights,
optimizer_state=server_optimizer.variables(),
elapsed_lr = elapsed_lr,
round_num=server_state.round_num + 1.0)
@attr.s(eq=False, order=False, frozen=True)
class ClientOutput(object):
"""Structure for outputs returned from clients during federated optimization.
Fields:
- `weights_delta`: A dictionary of updates to the model's trainable
variables.
- `client_weight`: Weight to be used in a weighted mean when
aggregating `weights_delta`.
- `model_output`: A structure matching
`tff.learning.Model.report_local_outputs`, reflecting the results of
training on the input dataset.
- `optimizer_output`: Additional metrics or other outputs defined by the
optimizer.
"""
weights_delta = attr.ib()
client_weight = attr.ib()
elapsed_lr_delta = attr.ib() # necessary for dual averaging
model_output = attr.ib()
optimizer_output = attr.ib()
def create_client_update_fn():
"""Returns a tf.function for the client_update.
This "create" fn is necesessary to prevent
"ValueError: Creating variables on a non-first call to a function decorated
with tf.function" errors due to the client optimizer creating variables. This
is really only needed because we test the client_update function directly.
"""
@tf.function
def client_update(primal_model,
dual_model,
dataset,
dual_initial_weights,
client_optimizer,
client_mirror,
elapsed_lr,
client_weight_fn=None,
client_weight_pow=1):
"""Updates client model.
Args:
model: A `tff.learning.Model`.
dataset: A 'tf.data.Dataset'.
dual_initial_weights: A `tff.learning.Model.weights` from server.
client_optimizer: A `tf.keras.optimizer.Optimizer` object.
client_weight_fn: Optional function that takes the output of
`model.report_local_outputs` and returns a tensor that provides the
weight in the federated average of model deltas. If not provided, the
default is the total number of examples processed on device.
Returns:
A 'ClientOutput`.
"""
primal_model_weights = _get_weights(primal_model)
dual_model_weights = _get_weights(dual_model)
new_elapsed_lr = elapsed_lr
tff.utils.assign(dual_model_weights, dual_initial_weights)
num_examples = tf.constant(0, dtype=tf.int32)
for batch in dataset:
# assign dual to primal
tff.utils.assign(primal_model_weights, dual_model_weights)
# apply (in place) projector to primal model
client_mirror(primal_model_weights.trainable, lr=new_elapsed_lr)
# tape gradients
with tf.GradientTape() as tape:
output = primal_model.forward_pass(batch)
grads = tape.gradient(output.loss, primal_model_weights.trainable)
# zip gradient with DUAL trainable
grads_and_vars = zip(grads, dual_model_weights.trainable)
# apply gradients (to dual)
client_optimizer.apply_gradients(grads_and_vars)
num_examples += tf.shape(output.predictions)[0]
new_elapsed_lr += client_optimizer.lr
aggregated_outputs = primal_model.report_local_outputs()
weights_delta = tf.nest.map_structure(lambda a, b: a - b,
dual_model_weights.trainable,
dual_initial_weights.trainable)
weights_delta, has_non_finite_weight = (
tensor_utils.zero_all_if_any_non_finite(weights_delta))
if has_non_finite_weight > 0:
client_weight = tf.constant(0, dtype=tf.float32)
elif client_weight_fn is None:
client_weight = tf.cast(float(num_examples) ** float(client_weight_pow), tf.float32)
else:
client_weight = client_weight_fn(aggregated_outputs)
return ClientOutput(
weights_delta, client_weight,
new_elapsed_lr - elapsed_lr, aggregated_outputs,
collections.OrderedDict([('num_examples', num_examples)]))
return client_update
def build_server_init_fn(
model_fn: ModelBuilder,
server_optimizer_fn: Callable[[], tf.keras.optimizers.Optimizer]):
"""Builds a `tff.tf_computation` that returns the initial `ServerState`.
The attributes `ServerState.dual_model` and `ServerState.optimizer_state` are
initialized via their constructor functions. The attribute
`ServerState.round_num` is set to 0.0.
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`.
server_optimizer_fn: A no-arg function that returns a
`tf.keras.optimizers.Optimizer`.
Returns:
A `tff.tf_computation` that returns initial `ServerState`.
"""
@tff.tf_computation
def server_init_tf():
server_optimizer = server_optimizer_fn()
primal_model = model_fn()
dual_model = model_fn()
_initialize_optimizer_vars(dual_model, server_optimizer)
return ServerState(
model=_get_weights(primal_model),
dual_model_weights=_get_weights(dual_model),
optimizer_state=server_optimizer.variables(),
elapsed_lr=0.0,
round_num=0.0)
return server_init_tf
def build_fed_dual_avg_process(
model_fn: ModelBuilder,
client_optimizer_fn: OptimizerBuilder,
client_lr: Union[float, LRScheduleFn] = 0.1,
client_mirror=(lambda _: None),
server_optimizer_fn: OptimizerBuilder = tf.keras.optimizers.SGD,
server_lr: Union[float, LRScheduleFn] = 1.0,
server_mirror=(lambda _: None),
client_weight_fn: Optional[ClientWeightFn] = None,
client_weight_pow=1,
) -> tff.templates.IterativeProcess:
"""Builds the TFF computations for optimization using federated averaging.
Args:
model_fn: A no-arg function that returns a `tff.learning.Model`.
client_optimizer_fn: A function that accepts a `learning_rate` keyword
argument and returns a `tf.keras.optimizers.Optimizer` instance.
client_lr: A scalar learning rate or a function that accepts a float
`round_num` argument and returns a learning rate.
server_optimizer_fn: A function that accepts a `learning_rate` argument and
returns a `tf.keras.optimizers.Optimizer` instance.
server_lr: A scalar learning rate or a function that accepts a float
`round_num` argument and returns a learning rate.
client_weight_fn: Optional function that takes the output of
`model.report_local_outputs` and returns a tensor that provides the weight
in the federated average of model deltas. If not provided, the default is
the total number of examples processed on device.
Returns:
A `tff.templates.IterativeProcess`.
"""
client_lr_schedule = client_lr
if not callable(client_lr_schedule):
client_lr_schedule = lambda round_num: client_lr
server_lr_schedule = server_lr
if not callable(server_lr_schedule):
server_lr_schedule = lambda round_num: server_lr
dummy_model = model_fn()
server_init_tf = build_server_init_fn(
model_fn,
# Initialize with the learning rate for round zero.
lambda: server_optimizer_fn(server_lr_schedule(0)))
server_state_type = server_init_tf.type_signature.result
model_weights_type = server_state_type.model
round_num_type = server_state_type.round_num
elapsed_lr_type = server_state_type.elapsed_lr
tf_dataset_type = tff.SequenceType(dummy_model.input_spec)
model_input_type = tff.SequenceType(dummy_model.input_spec)
@tff.tf_computation(model_input_type, model_weights_type, round_num_type, elapsed_lr_type)
def client_update_fn(tf_dataset, initial_model_weights, round_num, elapsed_lr):
client_lr = client_lr_schedule(round_num)
client_optimizer = client_optimizer_fn(client_lr)
client_update = create_client_update_fn()
# client_update consumes two dummy model
return client_update(model_fn(), model_fn(), tf_dataset, initial_model_weights,
client_optimizer, client_mirror, elapsed_lr,
client_weight_fn, client_weight_pow)
@tff.tf_computation(server_state_type, model_weights_type.trainable, elapsed_lr_type)
def server_update_fn(server_state, model_delta, elapsed_lr_delta):
primal_model = model_fn()
dual_model = model_fn()
server_lr = server_lr_schedule(server_state.round_num)
server_optimizer = server_optimizer_fn(server_lr)
# We initialize the server optimizer variables to avoid creating them
# within the scope of the tf.function server_update.
_initialize_optimizer_vars(primal_model, server_optimizer)
_initialize_optimizer_vars(dual_model, server_optimizer)
return server_update(primal_model, dual_model, server_optimizer,
server_mirror, server_state,
model_delta, elapsed_lr_delta)
@tff.federated_computation(
tff.FederatedType(server_state_type, tff.SERVER),
tff.FederatedType(tf_dataset_type, tff.CLIENTS))
def run_one_round(server_state, federated_dataset):
"""Orchestration logic for one round of computation.
Args:
server_state: A `ServerState`.
federated_dataset: A federated `tf.Dataset` with placement `tff.CLIENTS`.
Returns:
A tuple of updated `ServerState` and the result of
`tff.learning.Model.federated_output_computation`.
"""
client_dual_model_weights = tff.federated_broadcast(server_state.dual_model_weights)
client_round_num = tff.federated_broadcast(server_state.round_num)
client_elapsed_lr = tff.federated_broadcast(server_state.elapsed_lr)
client_outputs = tff.federated_map(
client_update_fn,
(federated_dataset, client_dual_model_weights,
client_round_num, client_elapsed_lr))
client_weight = client_outputs.client_weight
model_delta = tff.federated_mean(
client_outputs.weights_delta, weight=client_weight)
elapsed_lr_delta = tff.federated_mean(
client_outputs.elapsed_lr_delta, weight=client_weight)
server_state = tff.federated_map(server_update_fn,
(server_state, model_delta, elapsed_lr_delta))
aggregated_outputs = dummy_model.federated_output_computation(
client_outputs.model_output)
if aggregated_outputs.type_signature.is_struct():
aggregated_outputs = tff.federated_zip(aggregated_outputs)
return server_state, aggregated_outputs
@tff.federated_computation
def initialize_fn():
return tff.federated_value(server_init_tf(), tff.SERVER)
return tff.templates.IterativeProcess(
initialize_fn=initialize_fn, next_fn=run_one_round)
|
[
"tensorflow_federated.SequenceType",
"tensorflow_federated.federated_zip",
"tensorflow_federated.python.tensorflow_libs.tensor_utils.zero_all_if_any_non_finite",
"collections.OrderedDict",
"tensorflow_federated.templates.IterativeProcess",
"attr.s",
"attr.ib",
"tensorflow_federated.federated_map",
"tensorflow.constant",
"tensorflow_federated.tf_computation",
"tensorflow.shape",
"tensorflow.nest.map_structure",
"tensorflow.GradientTape",
"tensorflow_federated.learning.ModelWeights.from_model",
"tensorflow_federated.utils.assign",
"tensorflow_federated.federated_broadcast",
"tensorflow_federated.FederatedType",
"tensorflow_federated.federated_mean"
] |
[((2168, 2210), 'attr.s', 'attr.s', ([], {'eq': '(False)', 'order': '(False)', 'frozen': '(True)'}), '(eq=False, order=False, frozen=True)\n', (2174, 2210), False, 'import attr\n'), ((4822, 4864), 'attr.s', 'attr.s', ([], {'eq': '(False)', 'order': '(False)', 'frozen': '(True)'}), '(eq=False, order=False, frozen=True)\n', (4828, 4864), False, 'import attr\n'), ((1823, 1897), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['(lambda x, v: (x, v))', 'delta', 'model_weights.trainable'], {}), '(lambda x, v: (x, v), delta, model_weights.trainable)\n', (1844, 1897), True, 'import tensorflow as tf\n'), ((2121, 2164), 'tensorflow_federated.learning.ModelWeights.from_model', 'tff.learning.ModelWeights.from_model', (['model'], {}), '(model)\n', (2157, 2164), True, 'import tensorflow_federated as tff\n'), ((2655, 2664), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2662, 2664), False, 'import attr\n'), ((2688, 2697), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2695, 2697), False, 'import attr\n'), ((2743, 2752), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2750, 2752), False, 'import attr\n'), ((2768, 2777), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2775, 2777), False, 'import attr\n'), ((2792, 2801), 'attr.ib', 'attr.ib', ([], {}), '()\n', (2799, 2801), False, 'import attr\n'), ((3554, 3623), 'tensorflow_federated.utils.assign', 'tff.utils.assign', (['dual_model_weights', 'server_state.dual_model_weights'], {}), '(dual_model_weights, server_state.dual_model_weights)\n', (3570, 3623), True, 'import tensorflow_federated as tff\n'), ((3827, 3881), 'tensorflow_federated.python.tensorflow_libs.tensor_utils.zero_all_if_any_non_finite', 'tensor_utils.zero_all_if_any_non_finite', (['weights_delta'], {}), '(weights_delta)\n', (3866, 3881), False, 'from tensorflow_federated.python.tensorflow_libs import tensor_utils\n'), ((4384, 4442), 'tensorflow_federated.utils.assign', 'tff.utils.assign', (['primal_model_weights', 'dual_model_weights'], {}), '(primal_model_weights, dual_model_weights)\n', (4400, 4442), True, 'import tensorflow_federated as tff\n'), ((5446, 5455), 'attr.ib', 'attr.ib', ([], {}), '()\n', (5453, 5455), False, 'import attr\n'), ((5474, 5483), 'attr.ib', 'attr.ib', ([], {}), '()\n', (5481, 5483), False, 'import attr\n'), ((5505, 5514), 'attr.ib', 'attr.ib', ([], {}), '()\n', (5512, 5514), False, 'import attr\n'), ((5563, 5572), 'attr.ib', 'attr.ib', ([], {}), '()\n', (5570, 5572), False, 'import attr\n'), ((5594, 5603), 'attr.ib', 'attr.ib', ([], {}), '()\n', (5601, 5603), False, 'import attr\n'), ((12036, 12076), 'tensorflow_federated.SequenceType', 'tff.SequenceType', (['dummy_model.input_spec'], {}), '(dummy_model.input_spec)\n', (12052, 12076), True, 'import tensorflow_federated as tff\n'), ((12098, 12138), 'tensorflow_federated.SequenceType', 'tff.SequenceType', (['dummy_model.input_spec'], {}), '(dummy_model.input_spec)\n', (12114, 12138), True, 'import tensorflow_federated as tff\n'), ((12143, 12236), 'tensorflow_federated.tf_computation', 'tff.tf_computation', (['model_input_type', 'model_weights_type', 'round_num_type', 'elapsed_lr_type'], {}), '(model_input_type, model_weights_type, round_num_type,\n elapsed_lr_type)\n', (12161, 12236), True, 'import tensorflow_federated as tff\n'), ((12726, 12814), 'tensorflow_federated.tf_computation', 'tff.tf_computation', (['server_state_type', 'model_weights_type.trainable', 'elapsed_lr_type'], {}), '(server_state_type, model_weights_type.trainable,\n elapsed_lr_type)\n', (12744, 12814), True, 'import tensorflow_federated as tff\n'), ((15173, 15260), 'tensorflow_federated.templates.IterativeProcess', 'tff.templates.IterativeProcess', ([], {'initialize_fn': 'initialize_fn', 'next_fn': 'run_one_round'}), '(initialize_fn=initialize_fn, next_fn=\n run_one_round)\n', (15203, 15260), True, 'import tensorflow_federated as tff\n'), ((7045, 7103), 'tensorflow_federated.utils.assign', 'tff.utils.assign', (['dual_model_weights', 'dual_initial_weights'], {}), '(dual_model_weights, dual_initial_weights)\n', (7061, 7103), True, 'import tensorflow_federated as tff\n'), ((7123, 7153), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.int32'}), '(0, dtype=tf.int32)\n', (7134, 7153), True, 'import tensorflow as tf\n'), ((7968, 8075), 'tensorflow.nest.map_structure', 'tf.nest.map_structure', (['(lambda a, b: a - b)', 'dual_model_weights.trainable', 'dual_initial_weights.trainable'], {}), '(lambda a, b: a - b, dual_model_weights.trainable,\n dual_initial_weights.trainable)\n', (7989, 8075), True, 'import tensorflow as tf\n'), ((8209, 8263), 'tensorflow_federated.python.tensorflow_libs.tensor_utils.zero_all_if_any_non_finite', 'tensor_utils.zero_all_if_any_non_finite', (['weights_delta'], {}), '(weights_delta)\n', (8248, 8263), False, 'from tensorflow_federated.python.tensorflow_libs import tensor_utils\n'), ((14033, 14089), 'tensorflow_federated.federated_broadcast', 'tff.federated_broadcast', (['server_state.dual_model_weights'], {}), '(server_state.dual_model_weights)\n', (14056, 14089), True, 'import tensorflow_federated as tff\n'), ((14113, 14160), 'tensorflow_federated.federated_broadcast', 'tff.federated_broadcast', (['server_state.round_num'], {}), '(server_state.round_num)\n', (14136, 14160), True, 'import tensorflow_federated as tff\n'), ((14185, 14233), 'tensorflow_federated.federated_broadcast', 'tff.federated_broadcast', (['server_state.elapsed_lr'], {}), '(server_state.elapsed_lr)\n', (14208, 14233), True, 'import tensorflow_federated as tff\n'), ((14255, 14379), 'tensorflow_federated.federated_map', 'tff.federated_map', (['client_update_fn', '(federated_dataset, client_dual_model_weights, client_round_num,\n client_elapsed_lr)'], {}), '(client_update_fn, (federated_dataset,\n client_dual_model_weights, client_round_num, client_elapsed_lr))\n', (14272, 14379), True, 'import tensorflow_federated as tff\n'), ((14471, 14541), 'tensorflow_federated.federated_mean', 'tff.federated_mean', (['client_outputs.weights_delta'], {'weight': 'client_weight'}), '(client_outputs.weights_delta, weight=client_weight)\n', (14489, 14541), True, 'import tensorflow_federated as tff\n'), ((14575, 14648), 'tensorflow_federated.federated_mean', 'tff.federated_mean', (['client_outputs.elapsed_lr_delta'], {'weight': 'client_weight'}), '(client_outputs.elapsed_lr_delta, weight=client_weight)\n', (14593, 14648), True, 'import tensorflow_federated as tff\n'), ((14678, 14764), 'tensorflow_federated.federated_map', 'tff.federated_map', (['server_update_fn', '(server_state, model_delta, elapsed_lr_delta)'], {}), '(server_update_fn, (server_state, model_delta,\n elapsed_lr_delta))\n', (14695, 14764), True, 'import tensorflow_federated as tff\n'), ((13521, 13569), 'tensorflow_federated.FederatedType', 'tff.FederatedType', (['server_state_type', 'tff.SERVER'], {}), '(server_state_type, tff.SERVER)\n', (13538, 13569), True, 'import tensorflow_federated as tff\n'), ((13577, 13624), 'tensorflow_federated.FederatedType', 'tff.FederatedType', (['tf_dataset_type', 'tff.CLIENTS'], {}), '(tf_dataset_type, tff.CLIENTS)\n', (13594, 13624), True, 'import tensorflow_federated as tff\n'), ((7217, 7275), 'tensorflow_federated.utils.assign', 'tff.utils.assign', (['primal_model_weights', 'dual_model_weights'], {}), '(primal_model_weights, dual_model_weights)\n', (7233, 7275), True, 'import tensorflow_federated as tff\n'), ((8322, 8354), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.float32'}), '(0, dtype=tf.float32)\n', (8333, 8354), True, 'import tensorflow as tf\n'), ((8679, 8736), 'collections.OrderedDict', 'collections.OrderedDict', (["[('num_examples', num_examples)]"], {}), "([('num_examples', num_examples)])\n", (8702, 8736), False, 'import collections\n'), ((14966, 15003), 'tensorflow_federated.federated_zip', 'tff.federated_zip', (['aggregated_outputs'], {}), '(aggregated_outputs)\n', (14983, 15003), True, 'import tensorflow_federated as tff\n'), ((7434, 7451), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (7449, 7451), True, 'import tensorflow as tf\n'), ((7810, 7838), 'tensorflow.shape', 'tf.shape', (['output.predictions'], {}), '(output.predictions)\n', (7818, 7838), True, 'import tensorflow as tf\n')]
|
#! /usr/bin/python3.7
from common import test_input_integer, \
handle_age_input, \
num_denum, \
week_day
# test user input and make exception if bad
test_input_integer()
# test age value
handle_age_input()
# test two input values and divide
num_denum()
# print week day
week_day()
|
[
"common.num_denum",
"common.week_day",
"common.test_input_integer",
"common.handle_age_input"
] |
[((208, 228), 'common.test_input_integer', 'test_input_integer', ([], {}), '()\n', (226, 228), False, 'from common import test_input_integer, handle_age_input, num_denum, week_day\n'), ((247, 265), 'common.handle_age_input', 'handle_age_input', ([], {}), '()\n', (263, 265), False, 'from common import test_input_integer, handle_age_input, num_denum, week_day\n'), ((302, 313), 'common.num_denum', 'num_denum', ([], {}), '()\n', (311, 313), False, 'from common import test_input_integer, handle_age_input, num_denum, week_day\n'), ((332, 342), 'common.week_day', 'week_day', ([], {}), '()\n', (340, 342), False, 'from common import test_input_integer, handle_age_input, num_denum, week_day\n')]
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import webbrowser
import requests
import time
import datetime
from bs4 import BeautifulSoup
import re
from googlesearch import *
import requests, json
#import pafy
def scrape(phrase):
flag=0
ext="https://www.google.com/search?q="
links=search(phrase, num=5, stop=5, pause=2)
msg=phrase.replace(" ","+")
url=ext+msg
i=0
for link in links:
i+=1
if 'wikipedia' in link:
flag=1
l=link
break
if flag==1:
wiki = requests.get(l)
wiki_c = wiki.content
soup = BeautifulSoup(wiki_c, 'html.parser')
data=soup.find_all('p')
print("Source:wikipedia")
print(data[0].get_text())
print(data[1].get_text())
print(data[2].get_text())
print(data[3].get_text())
else:
print("wikipedia Source not available")
print("Providing search results")
webbrowser.open(url,new=1)
time.sleep(3)
scrape("What is internet")
|
[
"bs4.BeautifulSoup",
"webbrowser.open",
"requests.get",
"time.sleep"
] |
[((1164, 1191), 'webbrowser.open', 'webbrowser.open', (['url'], {'new': '(1)'}), '(url, new=1)\n', (1179, 1191), False, 'import webbrowser\n'), ((1195, 1208), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1205, 1208), False, 'import time\n'), ((752, 767), 'requests.get', 'requests.get', (['l'], {}), '(l)\n', (764, 767), False, 'import requests, json\n'), ((814, 850), 'bs4.BeautifulSoup', 'BeautifulSoup', (['wiki_c', '"""html.parser"""'], {}), "(wiki_c, 'html.parser')\n", (827, 850), False, 'from bs4 import BeautifulSoup\n')]
|
import time
from pathlib import Path
import scipy.io
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
from nilearn import datasets
from nilearn.input_data import MultiNiftiMasker
from nilearn.image import get_data
from nilearn.mass_univariate import permuted_ols
from sklearn.feature_selection import VarianceThreshold
n_subjects = 2136
var_threshold = 0.001
smoothness = 12
permutations = 5000
jobs = 15
t0 = time.perf_counter()
print('loading and preprocessing data ...')
dir = Path('/mnt/qdata/raheppt1/data/brainage/nako/interim/vbm/test4')
files = sorted(list(dir.glob('*nii')))
keys = [f.stem[:6] for f in files]
tiv = Path('/mnt/qdata/raheppt1/data/brainage/nako/interim/vbm/test4/report/TIV_test4.txt')
tiv = np.array([float(l.split('\t')[0]) for l in tiv.open('r').readlines()])
info = pd.read_csv('/mnt/qdata/raheppt1/data/brainage/nako/interim/nako_age_labels.csv').astype({'key': str, 'age': np.float64})
info = info.set_index('key')
metadata = pd.merge(info.loc[keys]['age'], pd.DataFrame.from_dict({'key': keys, 'tiv': tiv}), how='inner', on='key')
metadata.set_index('key')
dir = Path('/mnt/qdata/raheppt1/data/brainage/nako/interim/vbm/test4/mri')
proc_files = sorted(list(dir.glob('mwp1*nii')))
proc_keys = [f.stem[4:10] for f in proc_files]
proc_metadata = metadata.set_index('key').loc[proc_keys]
print(len(proc_metadata))
gray_matter_map_filenames = proc_files
gray_matter_map_filenames = sorted([str(f) for f in gray_matter_map_filenames])[:n_subjects]
age = np.array(proc_metadata['age'].tolist())[:n_subjects]
tiv = np.array(proc_metadata['tiv'].tolist())[:n_subjects]
tiv[np.isnan(tiv)] = 0
tiv = tiv[:, np.newaxis]
nifti_masker = MultiNiftiMasker(standardize=False, smoothing_fwhm=smoothness, memory=None, n_jobs=jobs, verbose=1) #, cache options
gm_maps_masked = nifti_masker.fit_transform(gray_matter_map_filenames)
gm_maps_masked = np.concatenate(gm_maps_masked, axis=0)
n_samples, n_features = gm_maps_masked.shape
print('%d samples, %d features' % (n_subjects, n_features))
print(f'{time.perf_counter() - t0} s')
### Inference with massively univariate model ###
print("Massively univariate model")
# Remove features with too low between-subject variance
variance_threshold = VarianceThreshold(threshold=var_threshold)
# Statistical inference
data = variance_threshold.fit_transform(gm_maps_masked)
#data = gm_maps_masked
neg_log_pvals, t_scores_original_data, _ = permuted_ols(
age, data, # + intercept as a covariate by default
confounding_vars=tiv,
n_perm=permutations, # 1,000 in the interest of time; 10000 would be better
n_jobs=jobs) # CPUs
signed_neg_log_pvals = neg_log_pvals * np.sign(t_scores_original_data)
signed_neg_log_pvals_unmasked = nifti_masker.inverse_transform(
variance_threshold.inverse_transform(signed_neg_log_pvals))
print(f'{time.perf_counter() - t0} s')
nib.save(signed_neg_log_pvals_unmasked, 'test.nii.gz')
|
[
"nilearn.input_data.MultiNiftiMasker",
"numpy.concatenate",
"pandas.DataFrame.from_dict",
"pandas.read_csv",
"nilearn.mass_univariate.permuted_ols",
"time.perf_counter",
"numpy.isnan",
"nibabel.save",
"pathlib.Path",
"numpy.sign",
"sklearn.feature_selection.VarianceThreshold"
] |
[((459, 478), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (476, 478), False, 'import time\n'), ((531, 595), 'pathlib.Path', 'Path', (['"""/mnt/qdata/raheppt1/data/brainage/nako/interim/vbm/test4"""'], {}), "('/mnt/qdata/raheppt1/data/brainage/nako/interim/vbm/test4')\n", (535, 595), False, 'from pathlib import Path\n'), ((676, 771), 'pathlib.Path', 'Path', (['"""/mnt/qdata/raheppt1/data/brainage/nako/interim/vbm/test4/report/TIV_test4.txt"""'], {}), "(\n '/mnt/qdata/raheppt1/data/brainage/nako/interim/vbm/test4/report/TIV_test4.txt'\n )\n", (680, 771), False, 'from pathlib import Path\n'), ((1147, 1215), 'pathlib.Path', 'Path', (['"""/mnt/qdata/raheppt1/data/brainage/nako/interim/vbm/test4/mri"""'], {}), "('/mnt/qdata/raheppt1/data/brainage/nako/interim/vbm/test4/mri')\n", (1151, 1215), False, 'from pathlib import Path\n'), ((1708, 1811), 'nilearn.input_data.MultiNiftiMasker', 'MultiNiftiMasker', ([], {'standardize': '(False)', 'smoothing_fwhm': 'smoothness', 'memory': 'None', 'n_jobs': 'jobs', 'verbose': '(1)'}), '(standardize=False, smoothing_fwhm=smoothness, memory=None,\n n_jobs=jobs, verbose=1)\n', (1724, 1811), False, 'from nilearn.input_data import MultiNiftiMasker\n'), ((1914, 1952), 'numpy.concatenate', 'np.concatenate', (['gm_maps_masked'], {'axis': '(0)'}), '(gm_maps_masked, axis=0)\n', (1928, 1952), True, 'import numpy as np\n'), ((2263, 2305), 'sklearn.feature_selection.VarianceThreshold', 'VarianceThreshold', ([], {'threshold': 'var_threshold'}), '(threshold=var_threshold)\n', (2280, 2305), False, 'from sklearn.feature_selection import VarianceThreshold\n'), ((2454, 2533), 'nilearn.mass_univariate.permuted_ols', 'permuted_ols', (['age', 'data'], {'confounding_vars': 'tiv', 'n_perm': 'permutations', 'n_jobs': 'jobs'}), '(age, data, confounding_vars=tiv, n_perm=permutations, n_jobs=jobs)\n', (2466, 2533), False, 'from nilearn.mass_univariate import permuted_ols\n'), ((2894, 2948), 'nibabel.save', 'nib.save', (['signed_neg_log_pvals_unmasked', '"""test.nii.gz"""'], {}), "(signed_neg_log_pvals_unmasked, 'test.nii.gz')\n", (2902, 2948), True, 'import nibabel as nib\n'), ((1041, 1090), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (["{'key': keys, 'tiv': tiv}"], {}), "({'key': keys, 'tiv': tiv})\n", (1063, 1090), True, 'import pandas as pd\n'), ((1649, 1662), 'numpy.isnan', 'np.isnan', (['tiv'], {}), '(tiv)\n', (1657, 1662), True, 'import numpy as np\n'), ((2695, 2726), 'numpy.sign', 'np.sign', (['t_scores_original_data'], {}), '(t_scores_original_data)\n', (2702, 2726), True, 'import numpy as np\n'), ((847, 933), 'pandas.read_csv', 'pd.read_csv', (['"""/mnt/qdata/raheppt1/data/brainage/nako/interim/nako_age_labels.csv"""'], {}), "(\n '/mnt/qdata/raheppt1/data/brainage/nako/interim/nako_age_labels.csv')\n", (858, 933), True, 'import pandas as pd\n'), ((2068, 2087), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2085, 2087), False, 'import time\n'), ((2864, 2883), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2881, 2883), False, 'import time\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from distutils.core import Command
class TestCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
from django.conf import settings
settings.configure(
DATABASES={
'default': {
'NAME': ':memory:',
'ENGINE': 'django.db.backends.sqlite3'
}
},
MIDDLEWARE_CLASSES=(),
INSTALLED_APPS=('yamlfield',)
)
from django.core.management import call_command
import django
django.setup()
call_command('test', 'yamlfield')
setup(
name='django-yamlfield',
version='1.0.3',
description='A Django database field for storing YAML data',
author='The Los Angeles Times Data Desk',
author_email='<EMAIL>',
url="http://django-yamlfield.readthedocs.io/",
packages=find_packages(),
include_package_data=True,
license="MIT",
install_requires=(
'PyYAML>=3.10',
'six>=1.4.1'
),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
'Framework :: Django :: 1.9',
'Framework :: Django :: 1.10',
'License :: OSI Approved :: MIT License',
],
cmdclass={'test': TestCommand,}
)
|
[
"django.conf.settings.configure",
"django.core.management.call_command",
"django.setup",
"setuptools.find_packages"
] |
[((340, 506), 'django.conf.settings.configure', 'settings.configure', ([], {'DATABASES': "{'default': {'NAME': ':memory:', 'ENGINE': 'django.db.backends.sqlite3'}}", 'MIDDLEWARE_CLASSES': '()', 'INSTALLED_APPS': "('yamlfield',)"}), "(DATABASES={'default': {'NAME': ':memory:', 'ENGINE':\n 'django.db.backends.sqlite3'}}, MIDDLEWARE_CLASSES=(), INSTALLED_APPS=(\n 'yamlfield',))\n", (358, 506), False, 'from django.conf import settings\n'), ((718, 732), 'django.setup', 'django.setup', ([], {}), '()\n', (730, 732), False, 'import django\n'), ((741, 774), 'django.core.management.call_command', 'call_command', (['"""test"""', '"""yamlfield"""'], {}), "('test', 'yamlfield')\n", (753, 774), False, 'from django.core.management import call_command\n'), ((1037, 1052), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1050, 1052), False, 'from setuptools import setup, find_packages\n')]
|
import pytest
from app import app
@pytest.fixture
def client():
client = app.test_client()
yield client
def test_empty_db(client):
"""Start with a blank database."""
rv = client.get("/")
assert b"Hello World!" in rv.data
|
[
"app.app.test_client"
] |
[((81, 98), 'app.app.test_client', 'app.test_client', ([], {}), '()\n', (96, 98), False, 'from app import app\n')]
|
import shutil
from os import mkdir, remove
from os.path import dirname, isdir, isfile, join
import pytest
from geoalchemy2.shape import to_shape
from geoalchemy2.types import WKTElement
from numpy.testing import assert_almost_equal
from rasterio.crs import CRS
from snowexsql.projection import *
@pytest.mark.parametrize('info, expected', [
# Test we add UTM info when its not provided
({'latitude': 39.039, 'longitude': -108.003}, {'easting': 759397.644, 'northing': 4325379.675, 'utm_zone': 12}),
# Test we add lat long when its not provided
({'easting': 759397.644, 'northing': 4325379.675, 'utm_zone': 12}, {'latitude': 39.039, 'longitude': -108.003}),
# Test ignoring easting in another projection
({'latitude': 39.008078, 'longitude': -108.184794, 'utm_wgs84_easting': 743766.4795, 'utm_wgs84_northing': 4321444.155},
{'easting': 743766.480, 'northing': 4321444.155}),
# Confirm we force the zone to zone 12
({'latitude':39.097464, 'longitude':-107.862476}, {'northing':4332280.1658, 'easting':771338.607})
])
def test_reproject_point_in_dict(info, expected):
"""
Test adding point projection information
"""
result = reproject_point_in_dict(info)
for k, v in expected.items():
assert k in result
if type(v) == float:
assert_almost_equal(v, result[k], 3)
else:
assert v == result[k]
def test_add_geom():
"""
Test add_geom adds a WKB element to a dictionary containing easting/northing info
"""
info = {'easting': 759397.644, 'northing': 4325379.675, 'utm_zone': 12}
result = add_geom(info, 26912)
# Ensure we added a geom key and value that is WKTE
assert 'geom' in result.keys()
assert type(result['geom']) == WKTElement
# Convert it to pyshapely for testing/ data integrity
p = to_shape(result['geom'])
assert p.x == info['easting']
assert p.y == info['northing']
assert result['geom'].srid == 26912
class TestReprojectRasterByEPSG():
output_f = join(dirname(__file__), 'test.tif')
# def teardown_method(self):
# '''
# Remove our output file
# '''
# if isfile(self.output_f):
# remove(self.output_f)
@classmethod
def teardown_method(self):
remove(self.output_f)
@pytest.mark.parametrize("input_f, epsg, bounds", [
('uavsar_latlon.amp1.real.tif', 26912,
(748446.1945536422, 4325651.650770078, 751909.2857505103, 4328702.971977075)),
])
def test_reproject(self, input_f, epsg, bounds):
"""
test reprojecting a raster from EPSG to another
"""
d = dirname(__file__)
f = join(d, 'data', input_f)
reproject_raster_by_epsg(f, self.output_f, epsg)
with rasterio.open(self.output_f) as dataset:
dbounds = dataset.bounds
dcrs = dataset.crs
# Test our epsg was assigned
assert CRS.from_epsg(epsg) == dataset.crs
# Assert bounds
for i, v in enumerate(bounds):
assert_almost_equal(v, dataset.bounds[i], 3)
|
[
"os.remove",
"numpy.testing.assert_almost_equal",
"os.path.dirname",
"geoalchemy2.shape.to_shape",
"pytest.mark.parametrize",
"os.path.join",
"rasterio.crs.CRS.from_epsg"
] |
[((301, 876), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""info, expected"""', "[({'latitude': 39.039, 'longitude': -108.003}, {'easting': 759397.644,\n 'northing': 4325379.675, 'utm_zone': 12}), ({'easting': 759397.644,\n 'northing': 4325379.675, 'utm_zone': 12}, {'latitude': 39.039,\n 'longitude': -108.003}), ({'latitude': 39.008078, 'longitude': -\n 108.184794, 'utm_wgs84_easting': 743766.4795, 'utm_wgs84_northing': \n 4321444.155}, {'easting': 743766.48, 'northing': 4321444.155}), ({\n 'latitude': 39.097464, 'longitude': -107.862476}, {'northing': \n 4332280.1658, 'easting': 771338.607})]"], {}), "('info, expected', [({'latitude': 39.039,\n 'longitude': -108.003}, {'easting': 759397.644, 'northing': 4325379.675,\n 'utm_zone': 12}), ({'easting': 759397.644, 'northing': 4325379.675,\n 'utm_zone': 12}, {'latitude': 39.039, 'longitude': -108.003}), ({\n 'latitude': 39.008078, 'longitude': -108.184794, 'utm_wgs84_easting': \n 743766.4795, 'utm_wgs84_northing': 4321444.155}, {'easting': 743766.48,\n 'northing': 4321444.155}), ({'latitude': 39.097464, 'longitude': -\n 107.862476}, {'northing': 4332280.1658, 'easting': 771338.607})])\n", (324, 876), False, 'import pytest\n'), ((1840, 1864), 'geoalchemy2.shape.to_shape', 'to_shape', (["result['geom']"], {}), "(result['geom'])\n", (1848, 1864), False, 'from geoalchemy2.shape import to_shape\n'), ((2313, 2491), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""input_f, epsg, bounds"""', "[('uavsar_latlon.amp1.real.tif', 26912, (748446.1945536422, \n 4325651.650770078, 751909.2857505103, 4328702.971977075))]"], {}), "('input_f, epsg, bounds', [(\n 'uavsar_latlon.amp1.real.tif', 26912, (748446.1945536422, \n 4325651.650770078, 751909.2857505103, 4328702.971977075))])\n", (2336, 2491), False, 'import pytest\n'), ((2031, 2048), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (2038, 2048), False, 'from os.path import dirname, isdir, isfile, join\n'), ((2285, 2306), 'os.remove', 'remove', (['self.output_f'], {}), '(self.output_f)\n', (2291, 2306), False, 'from os import mkdir, remove\n'), ((2651, 2668), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (2658, 2668), False, 'from os.path import dirname, isdir, isfile, join\n'), ((2681, 2705), 'os.path.join', 'join', (['d', '"""data"""', 'input_f'], {}), "(d, 'data', input_f)\n", (2685, 2705), False, 'from os.path import dirname, isdir, isfile, join\n'), ((1314, 1350), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['v', 'result[k]', '(3)'], {}), '(v, result[k], 3)\n', (1333, 1350), False, 'from numpy.testing import assert_almost_equal\n'), ((2940, 2959), 'rasterio.crs.CRS.from_epsg', 'CRS.from_epsg', (['epsg'], {}), '(epsg)\n', (2953, 2959), False, 'from rasterio.crs import CRS\n'), ((3051, 3095), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['v', 'dataset.bounds[i]', '(3)'], {}), '(v, dataset.bounds[i], 3)\n', (3070, 3095), False, 'from numpy.testing import assert_almost_equal\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# ------------------------------------------------------------------------------
#
# Copyright (C) IBM Corporation 2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
s_write_unit.py:
- Implementation of the :py:class:`WriteUnit` for the ``S-MAC`` network (simplified MAC).
- Cf https://arxiv.org/abs/1803.03067 for the reference MAC paper (Hudson and Manning, ICLR 2018).
"""
__author__ = "<NAME> & <NAME>"
from torch.nn import Module
from vqa_experiments.s_mac.utils_mac import linear
class WriteUnit(Module):
"""
Implementation of the :py:class:`WriteUnit` for the ``S-MAC`` model.
.. note::
This implementation is part of a simplified version of the MAC network, where modifications regarding \
the different units have been done to reduce the number of linear layers (and thus number of parameters).
This is part of a submission to the ViGIL workshop for NIPS 2018. Feel free to use this model and refer to it \
with the following BibTex:
::
@article{marois2018transfer,
title={On transfer learning using a MAC model variant},
author={<NAME> <NAME> <NAME> <NAME>},
journal={arXiv preprint arXiv:1811.06529},
year={2018}
}
"""
def __init__(self, dim):
"""
Constructor for the :py:class:`WriteUnit` of the ``S-MAC`` model.
:param dim: global 'd' hidden dimension.
:type dim: int
"""
# call base constructor
super(WriteUnit, self).__init__()
# linear layer to create the new memory state from the current read vector (coming from the read unit)
self.concat_layer = linear(dim, dim, bias=True)
def forward(self, read_vector):
"""
Forward pass of the :py:class:`WriteUnit` for the ``S-MAC`` model.
:param read_vector: current read vector (output of the :py:class:`ReadUnit`), shape `[batch_size x dim]`.
:type read_vector: :py:class:`torch.Tensor`
:return: current memory state, shape [batch_size x mem_dim] (:py:class:`torch.Tensor`).
"""
return self.concat_layer(read_vector)
|
[
"vqa_experiments.s_mac.utils_mac.linear"
] |
[((3384, 3411), 'vqa_experiments.s_mac.utils_mac.linear', 'linear', (['dim', 'dim'], {'bias': '(True)'}), '(dim, dim, bias=True)\n', (3390, 3411), False, 'from vqa_experiments.s_mac.utils_mac import linear\n')]
|
import urllib.request
from bs4 import BeautifulSoup
from random import randint
# fetch the full html
fp = urllib.request.urlopen("https://www.python.org/dev/peps/pep-0020/")
mybytes = fp.read()
mystr = mybytes.decode("utf8")
fp.close()
# fetch the zen of python
soup = BeautifulSoup(mystr, 'html.parser')
txt = soup.pre.string
list_lines = txt.splitlines()
index_one = randint(1, 19)
print(list_lines[index_one])
|
[
"bs4.BeautifulSoup",
"random.randint"
] |
[((271, 306), 'bs4.BeautifulSoup', 'BeautifulSoup', (['mystr', '"""html.parser"""'], {}), "(mystr, 'html.parser')\n", (284, 306), False, 'from bs4 import BeautifulSoup\n'), ((371, 385), 'random.randint', 'randint', (['(1)', '(19)'], {}), '(1, 19)\n', (378, 385), False, 'from random import randint\n')]
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.java_gateway import get_gateway
from pyflink.table import DataTypes
from pyflink.table.udf import udf
from test_utils import PyFlinkStreamTableTestCase, TestAppendSink, results
class TableTests(PyFlinkStreamTableTestCase):
def get_results(self, table_name):
gateway = get_gateway()
TestValuesTableFactory = gateway.jvm.org.apache.flink.table.planner.factories.TestValuesTableFactory
return TestValuesTableFactory.getResults(table_name)
def test_scalar_function(self):
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT())
table_sink = TestAppendSink(
['a', 'b'],
[DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c'])
t.select(t.a, add_one(t.a)) \
.execute_insert("Results").wait()
actual = results()
self.assert_equals(actual, ["+I[1, 2]", "+I[2, 3]", "+I[3, 4]"])
def test_sink_ddl(self):
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT())
self.t_env.execute_sql("""
CREATE TABLE Results(
a BIGINT,
b BIGINT
) with (
'connector' = 'values'
)
""")
t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c'])
t.select(t.a, add_one(t.a)) \
.execute_insert("Results").wait()
actual = self.get_results("Results")
self.assert_equals(actual, ["+I[1, 2]", "+I[2, 3]", "+I[3, 4]"])
|
[
"test_utils.results",
"pyflink.java_gateway.get_gateway",
"pyflink.table.DataTypes.BIGINT"
] |
[((1255, 1268), 'pyflink.java_gateway.get_gateway', 'get_gateway', ([], {}), '()\n', (1266, 1268), False, 'from pyflink.java_gateway import get_gateway\n'), ((1916, 1925), 'test_utils.results', 'results', ([], {}), '()\n', (1923, 1925), False, 'from test_utils import PyFlinkStreamTableTestCase, TestAppendSink, results\n'), ((1527, 1545), 'pyflink.table.DataTypes.BIGINT', 'DataTypes.BIGINT', ([], {}), '()\n', (1543, 1545), False, 'from pyflink.table import DataTypes\n'), ((1622, 1640), 'pyflink.table.DataTypes.BIGINT', 'DataTypes.BIGINT', ([], {}), '()\n', (1638, 1640), False, 'from pyflink.table import DataTypes\n'), ((1642, 1660), 'pyflink.table.DataTypes.BIGINT', 'DataTypes.BIGINT', ([], {}), '()\n', (1658, 1660), False, 'from pyflink.table import DataTypes\n'), ((2080, 2098), 'pyflink.table.DataTypes.BIGINT', 'DataTypes.BIGINT', ([], {}), '()\n', (2096, 2098), False, 'from pyflink.table import DataTypes\n')]
|
#!/usr/bin/env python
import os
# Force pure-python implementation instead of C++, otherwise imports
# break things because we can't properly reset the symbol database.
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
import importlib
import json
import subprocess
import sys
from typing import Generator, Tuple
from google.protobuf import symbol_database
from google.protobuf.descriptor_pool import DescriptorPool
from google.protobuf.json_format import MessageToJson, Parse
root = os.path.dirname(os.path.realpath(__file__))
def get_files(end: str) -> Generator[str, None, None]:
for r, dirs, files in os.walk(root):
for filename in [f for f in files if f.endswith(end)]:
yield os.path.join(r, filename)
def get_base(filename: str) -> str:
return os.path.splitext(os.path.basename(filename))[0]
def ensure_ext(filename: str, ext: str) -> str:
if not filename.endswith(ext):
return filename + ext
return filename
if __name__ == "__main__":
os.chdir(root)
if len(sys.argv) > 1:
proto_files = [ensure_ext(f, ".proto") for f in sys.argv[1:]]
bases = {get_base(f) for f in proto_files}
json_files = [
f for f in get_files(".json") if get_base(f).split("-")[0] in bases
]
else:
proto_files = get_files(".proto")
json_files = get_files(".json")
for filename in proto_files:
print(f"Generating code for {os.path.basename(filename)}")
subprocess.run(
f"protoc --python_out=. {os.path.basename(filename)}", shell=True
)
subprocess.run(
f"protoc --plugin=protoc-gen-custom=../plugin.py --custom_out=. {os.path.basename(filename)}",
shell=True,
)
for filename in json_files:
# Reset the internal symbol database so we can import the `Test` message
# multiple times. Ugh.
sym = symbol_database.Default()
sym.pool = DescriptorPool()
parts = get_base(filename).split("-")
out = filename.replace(".json", ".bin")
print(f"Using {parts[0]}_pb2 to generate {os.path.basename(out)}")
imported = importlib.import_module(f"{parts[0]}_pb2")
input_json = open(filename).read()
parsed = Parse(input_json, imported.Test())
serialized = parsed.SerializeToString()
preserve = "casing" not in filename
serialized_json = MessageToJson(parsed, preserving_proto_field_name=preserve)
s_loaded = json.loads(serialized_json)
in_loaded = json.loads(input_json)
if s_loaded != in_loaded:
raise AssertionError("Expected JSON to be equal:", s_loaded, in_loaded)
open(out, "wb").write(serialized)
|
[
"json.loads",
"importlib.import_module",
"os.path.basename",
"google.protobuf.symbol_database.Default",
"os.path.realpath",
"os.walk",
"google.protobuf.json_format.MessageToJson",
"google.protobuf.descriptor_pool.DescriptorPool",
"os.path.join",
"os.chdir"
] |
[((519, 545), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (535, 545), False, 'import os\n'), ((630, 643), 'os.walk', 'os.walk', (['root'], {}), '(root)\n', (637, 643), False, 'import os\n'), ((1017, 1031), 'os.chdir', 'os.chdir', (['root'], {}), '(root)\n', (1025, 1031), False, 'import os\n'), ((1922, 1947), 'google.protobuf.symbol_database.Default', 'symbol_database.Default', ([], {}), '()\n', (1945, 1947), False, 'from google.protobuf import symbol_database\n'), ((1967, 1983), 'google.protobuf.descriptor_pool.DescriptorPool', 'DescriptorPool', ([], {}), '()\n', (1981, 1983), False, 'from google.protobuf.descriptor_pool import DescriptorPool\n'), ((2174, 2216), 'importlib.import_module', 'importlib.import_module', (['f"""{parts[0]}_pb2"""'], {}), "(f'{parts[0]}_pb2')\n", (2197, 2216), False, 'import importlib\n'), ((2430, 2489), 'google.protobuf.json_format.MessageToJson', 'MessageToJson', (['parsed'], {'preserving_proto_field_name': 'preserve'}), '(parsed, preserving_proto_field_name=preserve)\n', (2443, 2489), False, 'from google.protobuf.json_format import MessageToJson, Parse\n'), ((2510, 2537), 'json.loads', 'json.loads', (['serialized_json'], {}), '(serialized_json)\n', (2520, 2537), False, 'import json\n'), ((2558, 2580), 'json.loads', 'json.loads', (['input_json'], {}), '(input_json)\n', (2568, 2580), False, 'import json\n'), ((818, 844), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (834, 844), False, 'import os\n'), ((726, 751), 'os.path.join', 'os.path.join', (['r', 'filename'], {}), '(r, filename)\n', (738, 751), False, 'import os\n'), ((1456, 1482), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (1472, 1482), False, 'import os\n'), ((1547, 1573), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (1563, 1573), False, 'import os\n'), ((1699, 1725), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (1715, 1725), False, 'import os\n'), ((2129, 2150), 'os.path.basename', 'os.path.basename', (['out'], {}), '(out)\n', (2145, 2150), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-15 04:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0004_auto_20170811_0444'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='demo_1',
),
migrations.RemoveField(
model_name='question',
name='demo_2',
),
migrations.RemoveField(
model_name='question',
name='demo_3',
),
migrations.AddField(
model_name='question',
name='integer',
field=models.IntegerField(blank=True, default='321', null=True),
),
migrations.AddField(
model_name='question',
name='select',
field=models.CharField(choices=[(0, 'Option 1'), (1, 'Option two'), (2, 'Option Teemo')], max_length=1, null=True),
),
migrations.AddField(
model_name='question',
name='textarea',
field=models.TextField(default='Very long text, isnt it?', max_length=200),
),
]
|
[
"django.db.migrations.RemoveField",
"django.db.models.IntegerField",
"django.db.models.TextField",
"django.db.models.CharField"
] |
[((298, 358), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""question"""', 'name': '"""demo_1"""'}), "(model_name='question', name='demo_1')\n", (320, 358), False, 'from django.db import migrations, models\n'), ((403, 463), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""question"""', 'name': '"""demo_2"""'}), "(model_name='question', name='demo_2')\n", (425, 463), False, 'from django.db import migrations, models\n'), ((508, 568), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""question"""', 'name': '"""demo_3"""'}), "(model_name='question', name='demo_3')\n", (530, 568), False, 'from django.db import migrations, models\n'), ((715, 772), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'default': '"""321"""', 'null': '(True)'}), "(blank=True, default='321', null=True)\n", (734, 772), False, 'from django.db import migrations, models\n'), ((894, 1006), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[(0, 'Option 1'), (1, 'Option two'), (2, 'Option Teemo')]", 'max_length': '(1)', 'null': '(True)'}), "(choices=[(0, 'Option 1'), (1, 'Option two'), (2,\n 'Option Teemo')], max_length=1, null=True)\n", (910, 1006), False, 'from django.db import migrations, models\n'), ((1126, 1194), 'django.db.models.TextField', 'models.TextField', ([], {'default': '"""Very long text, isnt it?"""', 'max_length': '(200)'}), "(default='Very long text, isnt it?', max_length=200)\n", (1142, 1194), False, 'from django.db import migrations, models\n')]
|
import json
from ranked.datasets import Matchup
from ranked.models import Batch, Match
class ReplayMatchup(Matchup):
"""Returns a batch of matchups, each batch have each players once.
The matches are sorted by ascending timestamp.
This means that the first batch represent the first match for each player.
second batch second match, etc...
Parameters
----------
ranker:
Ranker object used to create teams
pool:
Pool of player
matchupfs:
Name of the file containing the replay data
"""
def __init__(self, ranker, pool, matchupfs: str) -> None:
self.ranker = ranker
self.matches = []
self.batches = []
self.pool = pool
self.step = 0
with open(matchupfs, "r") as data:
for line in data.readline():
#
match = json.loads(line)
batch = match.get("batch")
teams = match.get("teams")
leaderboard = []
for team in teams:
players = team["players"]
score = team["score"]
t1 = self.ranker.new_team(
*[self.pool[player_id] for player_id in players]
)
leaderboard.append((t1, score))
m = Match(*leaderboard)
if batch is not None:
self.batches.append((batch, m))
self.matches.append(m)
self.batches.sort(key=lambda item: item[0])
def matches(self) -> Batch:
for b in self.batches:
yield b
|
[
"json.loads",
"ranked.models.Match"
] |
[((876, 892), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (886, 892), False, 'import json\n'), ((1353, 1372), 'ranked.models.Match', 'Match', (['*leaderboard'], {}), '(*leaderboard)\n', (1358, 1372), False, 'from ranked.models import Batch, Match\n')]
|
# USAGE
# python mixed_training.py --dataset Houses-dataset/Houses\ Dataset/
# import the necessary packages
from pyimagesearch import datasets
from pyimagesearch import models
from sklearn.model_selection import train_test_split
from keras.layers.core import Dense
from keras.models import Model
from keras.optimizers import Adam
from keras.layers import concatenate
import numpy as np
import argparse
import locale
import os
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", type=str, required=True,
help="path to input dataset of house images")
args = vars(ap.parse_args())
# construct the path to the input .txt file that contains information
# on each house in the dataset and then load the dataset
print("[INFO] loading house attributes...")
inputPath = os.path.sep.join([args["dataset"], "HousesInfo.txt"])
df = datasets.load_house_attributes(inputPath)
# load the house images and then scale the pixel intensities to the
# range [0, 1]
print("[INFO] loading house images...")
images = datasets.load_house_images(df, args["dataset"])
images = images / 255.0
# partition the data into training and testing splits using 75% of
# the data for training and the remaining 25% for testing
print("[INFO] processing data...")
split = train_test_split(df, images, test_size=0.25, random_state=42)
(trainAttrX, testAttrX, trainImagesX, testImagesX) = split
# find the largest house price in the training set and use it to
# scale our house prices to the range [0, 1] (will lead to better
# training and convergence)
maxPrice = trainAttrX["price"].max()
trainY = trainAttrX["price"] / maxPrice
testY = testAttrX["price"] / maxPrice
# process the house attributes data by performing min-max scaling
# on continuous features, one-hot encoding on categorical features,
# and then finally concatenating them together
(trainAttrX, testAttrX) = datasets.process_house_attributes(df,
trainAttrX, testAttrX)
# create the MLP and CNN models
mlp = models.create_mlp(trainAttrX.shape[1], regress=False)
cnn = models.create_cnn(64, 64, 3, regress=False)
# create the input to our final set of layers as the *output* of both
# the MLP and CNN
combinedInput = concatenate([mlp.output, cnn.output])
# our final FC layer head will have two dense layers, the final one
# being our regression head
x = Dense(4, activation="relu")(combinedInput)
x = Dense(1, activation="linear")(x)
# our final model will accept categorical/numerical data on the MLP
# input and images on the CNN input, outputting a single value (the
# predicted price of the house)
model = Model(inputs=[mlp.input, cnn.input], outputs=x)
# compile the model using mean absolute percentage error as our loss,
# implying that we seek to minimize the absolute percentage difference
# between our price *predictions* and the *actual prices*
opt = Adam(lr=1e-3, decay=1e-3 / 200)
model.compile(loss="mean_absolute_percentage_error", optimizer=opt)
# train the model
print("[INFO] training model...")
model.fit(
[trainAttrX, trainImagesX], trainY,
validation_data=([testAttrX, testImagesX], testY),
epochs=200, batch_size=8)
# make predictions on the testing data
print("[INFO] predicting house prices...")
preds = model.predict([testAttrX, testImagesX])
# compute the difference between the *predicted* house prices and the
# *actual* house prices, then compute the percentage difference and
# the absolute percentage difference
diff = preds.flatten() - testY
percentDiff = (diff / testY) * 100
absPercentDiff = np.abs(percentDiff)
# compute the mean and standard deviation of the absolute percentage
# difference
mean = np.mean(absPercentDiff)
std = np.std(absPercentDiff)
# finally, show some statistics on our model
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
print("[INFO] avg. house price: {}, std house price: {}".format(
locale.currency(df["price"].mean(), grouping=True),
locale.currency(df["price"].std(), grouping=True)))
print("[INFO] mean: {:.2f}%, std: {:.2f}%".format(mean, std))
|
[
"keras.layers.core.Dense",
"pyimagesearch.models.create_mlp",
"numpy.abs",
"argparse.ArgumentParser",
"numpy.std",
"sklearn.model_selection.train_test_split",
"pyimagesearch.models.create_cnn",
"keras.optimizers.Adam",
"keras.models.Model",
"pyimagesearch.datasets.process_house_attributes",
"numpy.mean",
"locale.setlocale",
"pyimagesearch.datasets.load_house_attributes",
"os.path.sep.join",
"keras.layers.concatenate",
"pyimagesearch.datasets.load_house_images"
] |
[((490, 515), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (513, 515), False, 'import argparse\n'), ((836, 889), 'os.path.sep.join', 'os.path.sep.join', (["[args['dataset'], 'HousesInfo.txt']"], {}), "([args['dataset'], 'HousesInfo.txt'])\n", (852, 889), False, 'import os\n'), ((895, 936), 'pyimagesearch.datasets.load_house_attributes', 'datasets.load_house_attributes', (['inputPath'], {}), '(inputPath)\n', (925, 936), False, 'from pyimagesearch import datasets\n'), ((1070, 1117), 'pyimagesearch.datasets.load_house_images', 'datasets.load_house_images', (['df', "args['dataset']"], {}), "(df, args['dataset'])\n", (1096, 1117), False, 'from pyimagesearch import datasets\n'), ((1311, 1372), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df', 'images'], {'test_size': '(0.25)', 'random_state': '(42)'}), '(df, images, test_size=0.25, random_state=42)\n', (1327, 1372), False, 'from sklearn.model_selection import train_test_split\n'), ((1915, 1975), 'pyimagesearch.datasets.process_house_attributes', 'datasets.process_house_attributes', (['df', 'trainAttrX', 'testAttrX'], {}), '(df, trainAttrX, testAttrX)\n', (1948, 1975), False, 'from pyimagesearch import datasets\n'), ((2016, 2069), 'pyimagesearch.models.create_mlp', 'models.create_mlp', (['trainAttrX.shape[1]'], {'regress': '(False)'}), '(trainAttrX.shape[1], regress=False)\n', (2033, 2069), False, 'from pyimagesearch import models\n'), ((2076, 2119), 'pyimagesearch.models.create_cnn', 'models.create_cnn', (['(64)', '(64)', '(3)'], {'regress': '(False)'}), '(64, 64, 3, regress=False)\n', (2093, 2119), False, 'from pyimagesearch import models\n'), ((2225, 2262), 'keras.layers.concatenate', 'concatenate', (['[mlp.output, cnn.output]'], {}), '([mlp.output, cnn.output])\n', (2236, 2262), False, 'from keras.layers import concatenate\n'), ((2621, 2668), 'keras.models.Model', 'Model', ([], {'inputs': '[mlp.input, cnn.input]', 'outputs': 'x'}), '(inputs=[mlp.input, cnn.input], outputs=x)\n', (2626, 2668), False, 'from keras.models import Model\n'), ((2875, 2908), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)', 'decay': '(0.001 / 200)'}), '(lr=0.001, decay=0.001 / 200)\n', (2879, 2908), False, 'from keras.optimizers import Adam\n'), ((3545, 3564), 'numpy.abs', 'np.abs', (['percentDiff'], {}), '(percentDiff)\n', (3551, 3564), True, 'import numpy as np\n'), ((3655, 3678), 'numpy.mean', 'np.mean', (['absPercentDiff'], {}), '(absPercentDiff)\n', (3662, 3678), True, 'import numpy as np\n'), ((3685, 3707), 'numpy.std', 'np.std', (['absPercentDiff'], {}), '(absPercentDiff)\n', (3691, 3707), True, 'import numpy as np\n'), ((3754, 3800), 'locale.setlocale', 'locale.setlocale', (['locale.LC_ALL', '"""en_US.UTF-8"""'], {}), "(locale.LC_ALL, 'en_US.UTF-8')\n", (3770, 3800), False, 'import locale\n'), ((2364, 2391), 'keras.layers.core.Dense', 'Dense', (['(4)'], {'activation': '"""relu"""'}), "(4, activation='relu')\n", (2369, 2391), False, 'from keras.layers.core import Dense\n'), ((2411, 2440), 'keras.layers.core.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (2416, 2440), False, 'from keras.layers.core import Dense\n')]
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import csv
from collections import defaultdict
import webapp2
from webapp2_extras import json
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
import config
from tripit_facade import TripItFacade
AIRPORTS_ID = 1
MATRIX_ID = 2
class BlobModel(ndb.Model):
payload = ndb.PickleProperty(compressed=True)
@classmethod
def by_name(cls, name_value):
return cls.query(name=name_value)
class HomeHandler(webapp2.RequestHandler):
def get(self):
template_values = {}
template = config.JINJA_ENVIRONMENT.get_template('views/home.html')
self.response.write(template.render(template_values))
class AirportListHandler(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'application/csv'
airports = BlobModel.get_by_id(AIRPORTS_ID)
colors = ['#AF81C9', '#F89A7E', '#F2CA85', '#54D1F1', '#7C71AD', '#445569']
writer = csv.writer(self.response.out)
writer.writerow(['name', 'color'])
for i, value in enumerate(airports.payload):
writer.writerow([value, colors[i % len(colors)]])
class AirportMatrixHandler(webapp2.RequestHandler):
def get(self):
self.response.content_type = 'application/json'
matrix = BlobModel.get_by_id(MATRIX_ID)
self.response.write(json.encode(matrix.payload))
class RawHandler(webapp2.RequestHandler):
def get(self):
tripit = TripItFacade(config.TRIPIT_USERNAME, config.TRIPIT_PASSWORD)
flight_segments = tripit.list_flight_segments()
if len(flight_segments) > 0:
self.response.content_type = 'application/json'
self.response.write(json.encode(flight_segments))
class TripItHandler(webapp2.RequestHandler):
def get(self):
logging.info('Scheduling tripit fetch')
taskqueue.add(url='/tripit/worker')
def post(self):
tripit = TripItFacade(config.TRIPIT_USERNAME, config.TRIPIT_PASSWORD)
flight_segments = tripit.list_flight_segments()
logging.info('Flight segments retrieved!')
airports = set()
matrix = defaultdict(int)
for s in flight_segments:
origin, destination = s['start_airport_code'], s['end_airport_code']
matrix[origin, destination] += 1
airports.add(origin)
airports.add(destination)
airports = list(airports) # to guarantee order
weights = []
for i in airports:
current_line = [0] * len(airports)
for j, value in enumerate(airports):
current_line[j] = matrix[i, value]
weights.append(current_line)
if len(weights) > 0:
tripit_airport = BlobModel(id=AIRPORTS_ID, payload=airports)
tripit_airport.put()
tripit_matrix = BlobModel(id=MATRIX_ID, payload=weights)
tripit_matrix.put()
logging.info('Updated datastore entries with matrix and airport information')
else:
logging.error('Ignoring datastore update due to missing information, check log for errors')
app = webapp2.WSGIApplication([
('/', HomeHandler),
('/airports/matrix.json', AirportMatrixHandler),
('/airports/list.csv', AirportListHandler),
('/tripit/schedule', TripItHandler),
('/tripit/worker', TripItHandler),
('/tripit/raw', RawHandler)
], debug=True)
|
[
"logging.error",
"csv.writer",
"google.appengine.api.taskqueue.add",
"config.JINJA_ENVIRONMENT.get_template",
"tripit_facade.TripItFacade",
"collections.defaultdict",
"logging.info",
"google.appengine.ext.ndb.PickleProperty",
"webapp2_extras.json.encode",
"webapp2.WSGIApplication"
] |
[((3756, 4020), 'webapp2.WSGIApplication', 'webapp2.WSGIApplication', (["[('/', HomeHandler), ('/airports/matrix.json', AirportMatrixHandler), (\n '/airports/list.csv', AirportListHandler), ('/tripit/schedule',\n TripItHandler), ('/tripit/worker', TripItHandler), ('/tripit/raw',\n RawHandler)]"], {'debug': '(True)'}), "([('/', HomeHandler), ('/airports/matrix.json',\n AirportMatrixHandler), ('/airports/list.csv', AirportListHandler), (\n '/tripit/schedule', TripItHandler), ('/tripit/worker', TripItHandler),\n ('/tripit/raw', RawHandler)], debug=True)\n", (3779, 4020), False, 'import webapp2\n'), ((920, 955), 'google.appengine.ext.ndb.PickleProperty', 'ndb.PickleProperty', ([], {'compressed': '(True)'}), '(compressed=True)\n', (938, 955), False, 'from google.appengine.ext import ndb\n'), ((1163, 1219), 'config.JINJA_ENVIRONMENT.get_template', 'config.JINJA_ENVIRONMENT.get_template', (['"""views/home.html"""'], {}), "('views/home.html')\n", (1200, 1219), False, 'import config\n'), ((1573, 1602), 'csv.writer', 'csv.writer', (['self.response.out'], {}), '(self.response.out)\n', (1583, 1602), False, 'import csv\n'), ((2077, 2137), 'tripit_facade.TripItFacade', 'TripItFacade', (['config.TRIPIT_USERNAME', 'config.TRIPIT_PASSWORD'], {}), '(config.TRIPIT_USERNAME, config.TRIPIT_PASSWORD)\n', (2089, 2137), False, 'from tripit_facade import TripItFacade\n'), ((2428, 2467), 'logging.info', 'logging.info', (['"""Scheduling tripit fetch"""'], {}), "('Scheduling tripit fetch')\n", (2440, 2467), False, 'import logging\n'), ((2476, 2511), 'google.appengine.api.taskqueue.add', 'taskqueue.add', ([], {'url': '"""/tripit/worker"""'}), "(url='/tripit/worker')\n", (2489, 2511), False, 'from google.appengine.api import taskqueue\n'), ((2550, 2610), 'tripit_facade.TripItFacade', 'TripItFacade', (['config.TRIPIT_USERNAME', 'config.TRIPIT_PASSWORD'], {}), '(config.TRIPIT_USERNAME, config.TRIPIT_PASSWORD)\n', (2562, 2610), False, 'from tripit_facade import TripItFacade\n'), ((2675, 2717), 'logging.info', 'logging.info', (['"""Flight segments retrieved!"""'], {}), "('Flight segments retrieved!')\n", (2687, 2717), False, 'import logging\n'), ((2761, 2777), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2772, 2777), False, 'from collections import defaultdict\n'), ((1967, 1994), 'webapp2_extras.json.encode', 'json.encode', (['matrix.payload'], {}), '(matrix.payload)\n', (1978, 1994), False, 'from webapp2_extras import json\n'), ((3551, 3628), 'logging.info', 'logging.info', (['"""Updated datastore entries with matrix and airport information"""'], {}), "('Updated datastore entries with matrix and airport information')\n", (3563, 3628), False, 'import logging\n'), ((3655, 3756), 'logging.error', 'logging.error', (['"""Ignoring datastore update due to missing information, check log for errors"""'], {}), "(\n 'Ignoring datastore update due to missing information, check log for errors'\n )\n", (3668, 3756), False, 'import logging\n'), ((2323, 2351), 'webapp2_extras.json.encode', 'json.encode', (['flight_segments'], {}), '(flight_segments)\n', (2334, 2351), False, 'from webapp2_extras import json\n')]
|
# antioch
# Copyright (c) 1999-2019 <NAME>
#
#
# See LICENSE for details
"""
Client-side prompt support.
"""
from zope.interface import provider
from antioch import IPlugin
def ask(p, question, callback, *args, **kwargs):
details = dict(
question = question,
)
p.exchange.send_message(p.caller.get_id(), dict(
command = 'ask',
details = details,
callback = dict(
origin_id = callback.get_origin().get_id(),
verb_name = callback.get_names()[0],
args = args,
kwargs = kwargs,
)
))
@provider(IPlugin)
class AskPlugin(object):
script_url = 'js/ask-plugin.js'
def get_environment(self):
return dict(
ask = ask,
)
|
[
"zope.interface.provider"
] |
[((628, 645), 'zope.interface.provider', 'provider', (['IPlugin'], {}), '(IPlugin)\n', (636, 645), False, 'from zope.interface import provider\n')]
|
"""
Script to preprocess and save the annotated queries and the annotations.
Usages:
tests:
python preprocess_annotations.py --no_save
regular usage:
python preprocess_annotations.py
"""
from database_creation.annotation_task import AnnotationTask
from toolbox.parsers import standard_parser, add_annotations_arguments
from collections import defaultdict
from pickle import dump
from os import makedirs
from os.path import exists
def parse_arguments():
""" Use arparse to parse the input arguments and return it as a argparse.ArgumentParser. """
ap = standard_parser()
add_annotations_arguments(ap)
return ap.parse_args()
def filter_annotations(annotations, args):
"""
Remove the annotations which don't meet the two criteria (annotations with not enough answers and answers from
workers that didn't do enough assignments) and return them.
Args:
annotations: dict of list of Annotations, Annotations from the MT workers.
args: argparse.ArgumentParser, parser object that contains the options of a script.
"""
min_assignments = args.min_assignments
min_answers = args.min_answers
length1 = sum([len([annotation for annotation in annotation_list if annotation.preprocessed_answers])
for _, annotation_list in annotations.items()])
length2 = sum([len([annotation for annotation in annotation_list if not annotation.preprocessed_answers])
for _, annotation_list in annotations.items()])
if not args.silent:
print("Filtering the annotations; annotations answered: %i, n/a: %i..." % (length1, length2))
workers_count = defaultdict(list)
for annotation_id_, annotation_list in annotations.items():
for annotation in annotation_list:
workers_count[annotation.worker_id].append(annotation_id_)
worker_cmpt = 0
for worker_id, annotation_ids in workers_count.items():
if len(annotation_ids) < min_assignments:
worker_cmpt += 1
for annotation_id_ in annotation_ids:
annotations[annotation_id_] = [annotation for annotation in annotations[annotation_id_]
if annotation.worker_id != worker_id]
length1 = sum([len([annotation for annotation in annotation_list if annotation.preprocessed_answers])
for _, annotation_list in annotations.items()])
length2 = sum([len([annotation for annotation in annotation_list if not annotation.preprocessed_answers])
for _, annotation_list in annotations.items()])
if not args.silent:
print("Number of workers discarded: %i" % worker_cmpt)
print("First filter done (number of assignments); annotations answered: %i, n/a: %i..." % (length1, length2))
annotations = {id_: annotation_list for id_, annotation_list in annotations.items()
if len([annotation for annotation in annotation_list if not annotation.bug]) >= min_answers}
length1 = sum([len([annotation for annotation in annotation_list if annotation.preprocessed_answers])
for _, annotation_list in annotations.items()])
length2 = sum([len([annotation for annotation in annotation_list if not annotation.preprocessed_answers])
for _, annotation_list in annotations.items()])
if not args.silent:
print("Second filter done (number of answers); annotations answered: %i, n/a %i.\n" % (length1, length2))
return annotations
def save_pkl(annotations, queries, args):
"""
Saves the annotations and the queries using pickle.
Args:
annotations: dict of list of Annotations, Annotations from the MT workers.
queries: dict of Queries, Queries of the annotations.
args: argparse.ArgumentParser, parser object that contains the options of a script.
"""
path = args.annotations_path + "annotations/"
annotations_fname = path + "annotations.pkl"
queries_fname = path + "queries.pkl"
if not args.no_save:
if not exists(path):
makedirs(path)
if not args.silent:
print("Folder(s) created at %s." % path)
with open(annotations_fname, 'wb') as annotations_file, open(queries_fname, 'wb') as queries_file:
dump(obj=annotations, file=annotations_file, protocol=-1)
dump(obj=queries, file=queries_file, protocol=-1)
if not args.silent:
print("Files annotations.pkl & queries.pkl saved at %s." % path)
elif not args.silent:
print("Files annotations.pkl & queries.pkl not saved at %s (not in save mode)." % path)
def main():
""" Save in a .pkl the annotated queries and the annotations. """
args = parse_arguments()
annotation_task = AnnotationTask(silent=args.silent,
results_path=args.annotations_path,
years=None,
max_tuple_size=None,
short=None,
short_size=None,
random=None,
debug=None,
random_seed=None,
save=None,
corpus_path=None)
annotation_task.process_task(exclude_pilot=args.exclude_pilot)
queries = annotation_task.queries
annotations = annotation_task.annotations
annotations = filter_annotations(annotations, args=args)
save_pkl(queries=queries, annotations=annotations, args=args)
if __name__ == '__main__':
main()
|
[
"pickle.dump",
"os.makedirs",
"toolbox.parsers.add_annotations_arguments",
"database_creation.annotation_task.AnnotationTask",
"os.path.exists",
"collections.defaultdict",
"toolbox.parsers.standard_parser"
] |
[((589, 606), 'toolbox.parsers.standard_parser', 'standard_parser', ([], {}), '()\n', (604, 606), False, 'from toolbox.parsers import standard_parser, add_annotations_arguments\n'), ((611, 640), 'toolbox.parsers.add_annotations_arguments', 'add_annotations_arguments', (['ap'], {}), '(ap)\n', (636, 640), False, 'from toolbox.parsers import standard_parser, add_annotations_arguments\n'), ((1673, 1690), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1684, 1690), False, 'from collections import defaultdict\n'), ((4831, 5044), 'database_creation.annotation_task.AnnotationTask', 'AnnotationTask', ([], {'silent': 'args.silent', 'results_path': 'args.annotations_path', 'years': 'None', 'max_tuple_size': 'None', 'short': 'None', 'short_size': 'None', 'random': 'None', 'debug': 'None', 'random_seed': 'None', 'save': 'None', 'corpus_path': 'None'}), '(silent=args.silent, results_path=args.annotations_path,\n years=None, max_tuple_size=None, short=None, short_size=None, random=\n None, debug=None, random_seed=None, save=None, corpus_path=None)\n', (4845, 5044), False, 'from database_creation.annotation_task import AnnotationTask\n'), ((4087, 4099), 'os.path.exists', 'exists', (['path'], {}), '(path)\n', (4093, 4099), False, 'from os.path import exists\n'), ((4113, 4127), 'os.makedirs', 'makedirs', (['path'], {}), '(path)\n', (4121, 4127), False, 'from os import makedirs\n'), ((4337, 4394), 'pickle.dump', 'dump', ([], {'obj': 'annotations', 'file': 'annotations_file', 'protocol': '(-1)'}), '(obj=annotations, file=annotations_file, protocol=-1)\n', (4341, 4394), False, 'from pickle import dump\n'), ((4407, 4456), 'pickle.dump', 'dump', ([], {'obj': 'queries', 'file': 'queries_file', 'protocol': '(-1)'}), '(obj=queries, file=queries_file, protocol=-1)\n', (4411, 4456), False, 'from pickle import dump\n')]
|
import unittest
import flask
import mongoengine
class FlaskMongoEngineTestCase(unittest.TestCase):
"""Parent class of all test cases"""
def setUp(self):
self.app = flask.Flask(__name__)
self.app.config['MONGODB_DB'] = 'test_db'
self.app.config['TESTING'] = True
self.ctx = self.app.app_context()
self.ctx.push()
# Mongoengine keep a global state of the connections that must be
# reset before each test.
# Given it doesn't expose any method to get the list of registered
# connections, we have to do the cleaning by hand...
mongoengine.connection._connection_settings.clear()
mongoengine.connection._connections.clear()
mongoengine.connection._dbs.clear()
def tearDown(self):
self.ctx.pop()
|
[
"flask.Flask",
"mongoengine.connection._connection_settings.clear",
"mongoengine.connection._connections.clear",
"mongoengine.connection._dbs.clear"
] |
[((183, 204), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (194, 204), False, 'import flask\n'), ((615, 666), 'mongoengine.connection._connection_settings.clear', 'mongoengine.connection._connection_settings.clear', ([], {}), '()\n', (664, 666), False, 'import mongoengine\n'), ((675, 718), 'mongoengine.connection._connections.clear', 'mongoengine.connection._connections.clear', ([], {}), '()\n', (716, 718), False, 'import mongoengine\n'), ((727, 762), 'mongoengine.connection._dbs.clear', 'mongoengine.connection._dbs.clear', ([], {}), '()\n', (760, 762), False, 'import mongoengine\n')]
|
from google_api_helpers import auth
from google_api_helpers import drive
from test.utils import drive as test_drive
from test.utils import auth as test_auth
import pytest
@pytest.fixture(scope="session", autouse=True)
def getTestCredentials():
return test_auth.getTestCredentials()
class TestDrive(test_drive.WithDriveCleaningFixture):
def test_list_empty(self):
contents = drive.list()
assert contents == []
def test_createBlank(self):
testName = "testing_created_blank"
createdSheetId = drive.createBlank(testName, [], drive.MimeTypes.sheet)
contents = drive.list()
createdSheet = {'id': createdSheetId, 'name': testName}
assert contents == [createdSheet]
def test_createBlankSheet(self):
testName = "testing_created_blank_sheet"
createdSheetId = drive.createBlankSheet(testName, [])
contents = drive.list()
createdSheet = {'id': createdSheetId, 'name': testName}
assert contents == [createdSheet]
def test_list_filtered(self):
testName1 = "testing_created_1"
createdSheetId1 = drive.createBlank(
testName1, [], drive.MimeTypes.sheet)
testName2 = "testing_created_2"
createdSheetId2 = drive.createBlank(
testName2, [], drive.MimeTypes.sheet)
testName3 = "testing_created_3"
createdSheetId3 = drive.createBlank(
testName3, [], drive.MimeTypes.sheet)
matchedContents = drive.list("name = 'testing_created_2'")
assert matchedContents == [
{'id': createdSheetId2, 'name': testName2}
]
|
[
"pytest.fixture",
"google_api_helpers.drive.createBlankSheet",
"google_api_helpers.drive.createBlank",
"test.utils.auth.getTestCredentials",
"google_api_helpers.drive.list"
] |
[((175, 220), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""', 'autouse': '(True)'}), "(scope='session', autouse=True)\n", (189, 220), False, 'import pytest\n'), ((258, 288), 'test.utils.auth.getTestCredentials', 'test_auth.getTestCredentials', ([], {}), '()\n', (286, 288), True, 'from test.utils import auth as test_auth\n'), ((396, 408), 'google_api_helpers.drive.list', 'drive.list', ([], {}), '()\n', (406, 408), False, 'from google_api_helpers import drive\n'), ((540, 594), 'google_api_helpers.drive.createBlank', 'drive.createBlank', (['testName', '[]', 'drive.MimeTypes.sheet'], {}), '(testName, [], drive.MimeTypes.sheet)\n', (557, 594), False, 'from google_api_helpers import drive\n'), ((615, 627), 'google_api_helpers.drive.list', 'drive.list', ([], {}), '()\n', (625, 627), False, 'from google_api_helpers import drive\n'), ((847, 883), 'google_api_helpers.drive.createBlankSheet', 'drive.createBlankSheet', (['testName', '[]'], {}), '(testName, [])\n', (869, 883), False, 'from google_api_helpers import drive\n'), ((904, 916), 'google_api_helpers.drive.list', 'drive.list', ([], {}), '()\n', (914, 916), False, 'from google_api_helpers import drive\n'), ((1125, 1180), 'google_api_helpers.drive.createBlank', 'drive.createBlank', (['testName1', '[]', 'drive.MimeTypes.sheet'], {}), '(testName1, [], drive.MimeTypes.sheet)\n', (1142, 1180), False, 'from google_api_helpers import drive\n'), ((1261, 1316), 'google_api_helpers.drive.createBlank', 'drive.createBlank', (['testName2', '[]', 'drive.MimeTypes.sheet'], {}), '(testName2, [], drive.MimeTypes.sheet)\n', (1278, 1316), False, 'from google_api_helpers import drive\n'), ((1397, 1452), 'google_api_helpers.drive.createBlank', 'drive.createBlank', (['testName3', '[]', 'drive.MimeTypes.sheet'], {}), '(testName3, [], drive.MimeTypes.sheet)\n', (1414, 1452), False, 'from google_api_helpers import drive\n'), ((1493, 1533), 'google_api_helpers.drive.list', 'drive.list', (['"""name = \'testing_created_2\'"""'], {}), '("name = \'testing_created_2\'")\n', (1503, 1533), False, 'from google_api_helpers import drive\n')]
|
import words
words_database = words.words()
yellow_letters = []
my_letters = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
defaults = ["BRICK","JUMPY","VOZHD","GLENT","WAQFS"]
defaults_2 = [['b','r','i','c','k'],['j','u','m','p','y'],['v','o','z','h','d'],['g','l','e','n','t'],['w','a','q','f','s']]
fixed_letters = {}
valid_letters = ['x']
original_letters = []
expected_word = ['_','_','_','_','_']
print('---------- Welcome to wordle solver! ----------')
it = 0
for i in defaults_2:
print(f'---> Use {defaults[it]} as input {it+1}')
g = list(input('[ ] Enter green letters: ').split())
for j in g:
if j in i:
valid_letters.append(j)
original_letters.append(j)
for k in range(len(g)):
if g[k] in i:
fixed_letters[k] = g[k]
expected_word[k] = g[k]
it += 1
y = list(input('[ ] Enter yellow letters: ').split())
for j in y:
if j in i:
valid_letters.append(j)
yellow_letters.append(j)
original_letters.append(j)
final_word = ['_','_','_','_','_']
temp = []
positions = []
for keys in fixed_letters.keys():
positions.append(keys)
for wrd in words_database:
wrd = wrd.strip()
if len(fixed_letters) == 1:
if wrd[positions[0]] == fixed_letters[positions[0]]:
temp.append(wrd)
final_word[positions[0]] = wrd[positions[0]]
if len(fixed_letters) == 2:
if wrd[positions[0]] == fixed_letters[positions[0]] and wrd[positions[1]] == fixed_letters[positions[1]]:
temp.append(wrd)
final_word[positions[0]] = wrd[positions[0]]
final_word[positions[1]] = wrd[positions[1]]
if len(fixed_letters) == 3:
if wrd[positions[0]] == fixed_letters[positions[0]] and wrd[positions[1]] == fixed_letters[positions[1]] and wrd[positions[2]] == fixed_letters[positions[2]]:
temp.append(wrd)
final_word[positions[0]] = wrd[positions[0]]
final_word[positions[1]] = wrd[positions[1]]
final_word[positions[2]] = wrd[positions[2]]
if len(fixed_letters) == 4:
if wrd[positions[0]] == fixed_letters[positions[0]] and wrd[positions[1]] == fixed_letters[positions[1]] and wrd[positions[2]] == fixed_letters[positions[2]] and wrd[positions[3]] == fixed_letters[positions[3]]:
temp.append(wrd)
final_word[positions[0]] = wrd[positions[0]]
final_word[positions[1]] = wrd[positions[1]]
final_word[positions[2]] = wrd[positions[2]]
final_word[positions[3]] = wrd[positions[3]]
if len(fixed_letters) == 5:
if wrd[positions[0]] == fixed_letters[positions[0]] and wrd[positions[1]] == fixed_letters[positions[1]] and wrd[positions[2]] == fixed_letters[positions[2]] and wrd[positions[3]] == fixed_letters[positions[3]] and wrd[positions[4]] == fixed_letters[positions[4]]:
temp.append(wrd)
final_word[positions[0]] = wrd[positions[0]]
final_word[positions[1]] = wrd[positions[1]]
final_word[positions[2]] = wrd[positions[2]]
final_word[positions[3]] = wrd[positions[3]]
final_word[positions[4]] = wrd[positions[4]]
temp = sorted(temp)
last_filter = []
if len(temp) == 0:
for word in words_database:
count = 0
for i in word:
if i in valid_letters:
count += 1
if count == 5:
last_filter.append(word)
else:
for word in temp:
count = 0
for i in word:
if i in valid_letters:
count += 1
if count == 5:
last_filter.append(word)
if len(last_filter) == 0:
print('Sorry, no words found')
exit()
else:
result = {}
original_letters = sorted(original_letters)
last_filter = sorted(last_filter)
for word in last_filter:
w = word
word = list(word)
word = set(word)
score = len(word)
for i in word:
if i in yellow_letters:
score += 1
elif i not in original_letters:
score -= 1
result[w] = score
result = sorted(result.items(), key=lambda kv: kv[1], reverse=True)
output = result[0][0]
print('The word is:',output.upper())
|
[
"words.words"
] |
[((30, 43), 'words.words', 'words.words', ([], {}), '()\n', (41, 43), False, 'import words\n')]
|
"""Query mode"""
import os
import re
import csv
import sys
import argparse
from bson import json_util
from dataclasses import dataclass
from pymongo.mongo_client import MongoClient
from typing import Dict, List, Pattern, TextIO
from dbutils import constants
from dbutils.utils import get_comma_separated_fields, str2bool
@dataclass
class QueryModeOptions:
# Database name
database: str
# Collection name
collection: str
# Specify comma-separeted columns names, given fields will be projected
# If no value is specified, all columns will be returned in the output
columns: List[str]
# Limit number of records
limit: int
# provide batch size for file-chunks mode
# script will also use batch_size to fetch record in batches
batch_size: int
# Specify output mode (stdout, file, fie-chunks)
output_mode: str
# Specify output file type (json, csv)
output_file_type: str
# Specify bool to iclude header in csv file format
include_header: bool
# Output path. For file-chunks mode provide dir path, for file mode provide
# file path
output_path: str
# For file-chunks mode, provide file prefix
output_file_prefix: str
# For file-chunks mode, provide file extension
output_file_extension: str
# Provide queries
queries: Dict[str, Pattern]
# MongoDB client
mongodb_client: MongoClient
# MongoDB collection
mongodb_collection: any
# default mode
mode: str = constants.Modes.QUERY
def parse_arugments() -> argparse.Namespace:
"""Parse arguments for query mode."""
parser = argparse.ArgumentParser(description="DbUtils - Query mode")
parser.add_argument("mode", help="Operation mode", choices=[constants.Modes.QUERY])
parser.add_argument("-database", help="Mongodb Database Name", required=True)
parser.add_argument("-collection", help="Mongodb Collection Name", required=True)
parser.add_argument(
"-columns",
help="Given comma-separated values will be projected in the output (default=all columns)",
default="",
)
parser.add_argument("-batch-size", help="Batch size", default=500, type=int)
parser.add_argument("-limit", help="Limit number of records.", default=-1, type=int)
parser.add_argument(
"-output-mode",
help=(
"'stdout' output mode will print output in stdout. "
"'file' output mode will write output to a file. "
"'file-chunks' output mode will write output in smaller file chunks. "
"Use batch-mode argument to specify batch size."
),
required=True,
choices=[
constants.OutputMode.FILE,
constants.OutputMode.FILE_CHUNKS,
constants.OutputMode.STDOUT,
],
)
parser.add_argument(
"-output-file-type",
help="Output file type",
required=True,
choices=[
constants.FileTypes.CSV,
constants.FileTypes.JSON,
],
)
parser.add_argument(
"-include-header",
help="Include header for CSV file",
default=False,
type=str2bool,
)
parser.add_argument(
"-output-path",
help="If output-mode is file, provide file name. If output-mode is file-chunks, provide directory name",
default="",
)
parser.add_argument(
"-output-file-prefix",
help="Output file prefix for file-chunks mode",
default="",
)
parser.add_argument(
"-output-file-extension",
help="Output file extension for file-chunks mode",
default="txt",
)
parser.add_argument(
"-queries",
help="Provide regex queries in this format - '-queries KEY_NAME_1=REGEX_1 KEY_NAME_2=REGEX-2'",
nargs="*",
)
args = parser.parse_args()
# output-path is required when output-mode is file or file-chunks
if args.output_mode != constants.OutputMode.STDOUT and args.output_path == "":
parser.print_usage()
raise argparse.ArgumentTypeError(
"output-path is required when output-mode is file or file-chunks."
)
# output-file-prefix is required when output-mode is file-chunks
if (
args.output_mode == constants.OutputMode.FILE_CHUNKS
and args.output_file_prefix == ""
):
parser.print_usage()
raise argparse.ArgumentTypeError(
"output-file-prefix is required when output-mode is file-chunks."
)
args.queries = {
query[: query.index("=")]: re.compile(query[query.index("=") + 1 :])
for query in args.queries or []
}
args.columns = get_comma_separated_fields(args.columns)
return args
def create_options_from_args() -> QueryModeOptions:
"""Parses arguments and returns QueryModeOptions."""
args = vars(parse_arugments())
# FIXME: get mongodb connection string from environment variable
mongo_client = MongoClient()
args["mongodb_client"] = mongo_client
args["mongodb_collection"] = mongo_client[args["database"]][args["collection"]]
return QueryModeOptions(**args)
def _write_json(records: List[Dict], output_file: TextIO):
"""Write records to json file."""
# Using json_util.dumps convert mongodb record with object_id, to string
# and write records to file
output_file.writelines([json_util.dumps(record) + "\n" for record in records])
# Close output stream
if output_file != sys.stdout:
# If output_file mode is stdout, closing it will cause error for print()
output_file.close()
def _write_csv(records: List[Dict], output_file: TextIO, write_header: bool = False):
"""Write output to csv file."""
# Find unique column names
columns = set()
for record in records:
columns.update(record.keys())
csv_writer = csv.DictWriter(output_file, fieldnames=sorted(columns))
# Write columns
write_header and csv_writer.writeheader()
for record in records:
csv_writer.writerow(record)
# Close output stream
if output_file != sys.stdout:
# If output_file mode is stdout, closing it will cause error for print()
output_file.close()
def output(options: QueryModeOptions, batch_id: int, records: List[Dict]):
"""Write output"""
output_mode = options.output_mode
if output_mode == constants.OutputMode.STDOUT:
output_path = ""
output_stream = sys.stdout
elif output_mode == constants.OutputMode.FILE:
output_path = options.output_path
output_stream = open(output_path, "a")
elif output_mode == constants.OutputMode.FILE_CHUNKS:
output_file_name = (
f"{options.output_file_prefix}-{batch_id}.{options.output_file_extension}"
)
output_path = os.path.join(options.output_path, output_file_name)
output_stream = open(output_path, "w")
# Write output to csv/json file
if options.output_file_type == constants.FileTypes.CSV:
_write_csv(records, output_stream, options.include_header)
elif options.output_file_type == constants.FileTypes.JSON:
_write_json(records, output_stream)
def run(options: QueryModeOptions) -> None:
"""Runs query mode."""
if options.output_mode == constants.OutputMode.FILE_CHUNKS:
# Creates directory for file-chunks mode
os.makedirs(options.output_path, exist_ok=True)
elif options.output_mode == constants.OutputMode.FILE:
# If output-mode is `file` and if output-path is '/some/path/file.csv'
# and if directory '/some/path/' does not exist, it will be created
dir_path = os.path.dirname(os.path.abspath(options.output_path))
os.makedirs(dir_path, exist_ok=True)
# TODO: raise error if output-path exists, add new argument to
# overwrite file if it exists.
# If output_mode is file and output-path exists, this will delete it
if os.path.exists(options.output_path) and os.path.isfile(options.output_path):
os.unlink(options.output_path)
elif options.output_mode == constants.OutputMode.FILE_CHUNKS:
# TODO: if files with file-name matching output-mode-prefix and output-mode-extension
# exists, raise error
pass
db = options.mongodb_collection
last_id = ""
current_batch = 0
# Fetch records in batches
records = db.find(options.queries).limit(options.batch_size)
while records:
# Records to output will be stored in this list
output_records = []
for record in records:
last_id = record["_id"]
# Create dict containing the fields specified using 'columns' argument
output_record = {
key: value
for key, value in record.items()
if len(options.columns) == 0 or key in options.columns
}
output_records.append(output_record)
# If no records are found, exit
if len(output_records) == 0:
return
output(options, current_batch, output_records)
current_batch += 1
# Fetch record for the next batch
next_query = {**options.queries, "_id": {"$gt": last_id}}
records = db.find(next_query).limit(options.batch_size)
# If number of fetched records is >= limit provided, exit
if options.limit > 0 and current_batch * options.batch_size >= options.limit:
return
|
[
"os.path.abspath",
"os.makedirs",
"argparse.ArgumentParser",
"os.unlink",
"pymongo.mongo_client.MongoClient",
"os.path.exists",
"os.path.isfile",
"dbutils.utils.get_comma_separated_fields",
"bson.json_util.dumps",
"os.path.join",
"argparse.ArgumentTypeError"
] |
[((1606, 1665), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""DbUtils - Query mode"""'}), "(description='DbUtils - Query mode')\n", (1629, 1665), False, 'import argparse\n'), ((4671, 4711), 'dbutils.utils.get_comma_separated_fields', 'get_comma_separated_fields', (['args.columns'], {}), '(args.columns)\n', (4697, 4711), False, 'from dbutils.utils import get_comma_separated_fields, str2bool\n'), ((4964, 4977), 'pymongo.mongo_client.MongoClient', 'MongoClient', ([], {}), '()\n', (4975, 4977), False, 'from pymongo.mongo_client import MongoClient\n'), ((4040, 4139), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""output-path is required when output-mode is file or file-chunks."""'], {}), "(\n 'output-path is required when output-mode is file or file-chunks.')\n", (4066, 4139), False, 'import argparse\n'), ((4390, 4488), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""output-file-prefix is required when output-mode is file-chunks."""'], {}), "(\n 'output-file-prefix is required when output-mode is file-chunks.')\n", (4416, 4488), False, 'import argparse\n'), ((7383, 7430), 'os.makedirs', 'os.makedirs', (['options.output_path'], {'exist_ok': '(True)'}), '(options.output_path, exist_ok=True)\n', (7394, 7430), False, 'import os\n'), ((7726, 7762), 'os.makedirs', 'os.makedirs', (['dir_path'], {'exist_ok': '(True)'}), '(dir_path, exist_ok=True)\n', (7737, 7762), False, 'import os\n'), ((5378, 5401), 'bson.json_util.dumps', 'json_util.dumps', (['record'], {}), '(record)\n', (5393, 5401), False, 'from bson import json_util\n'), ((6817, 6868), 'os.path.join', 'os.path.join', (['options.output_path', 'output_file_name'], {}), '(options.output_path, output_file_name)\n', (6829, 6868), False, 'import os\n'), ((7680, 7716), 'os.path.abspath', 'os.path.abspath', (['options.output_path'], {}), '(options.output_path)\n', (7695, 7716), False, 'import os\n'), ((7963, 7998), 'os.path.exists', 'os.path.exists', (['options.output_path'], {}), '(options.output_path)\n', (7977, 7998), False, 'import os\n'), ((8003, 8038), 'os.path.isfile', 'os.path.isfile', (['options.output_path'], {}), '(options.output_path)\n', (8017, 8038), False, 'import os\n'), ((8052, 8082), 'os.unlink', 'os.unlink', (['options.output_path'], {}), '(options.output_path)\n', (8061, 8082), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
import os
import tornado.web
from zipfile import ZipFile
from tools import ListingFiles
folder_path = 'data/'
zip_file = './export.zip'
def list_all_export_file():
"""
Make a list of important 'data' files (important files to export).
:return: important file list
"""
temp = list_high_level_files()
return temp
def list_high_level_files():
return [os.path.normpath(os.path.join(folder_path, file))
for file in ListingFiles.list_file_root_folder(folder_path)
if file.endswith('.json')]
def create_zip(file_list: list):
"""
Create a zip from list_export_file() returned file list.
"""
with ZipFile(zip_file, 'w') as myzip:
for file in file_list:
myzip.write(file, arcname=file[len(folder_path):])
class DataAPIHandler(tornado.web.RequestHandler):
"""
Class to handle '/data' endpoint.
"""
def get(self, path_request):
"""
Handle GET requests.
:param path_request: request path ( < URI)
"""
if path_request == 'all_export.zip':
create_zip(list_all_export_file())
with open(zip_file, mode='rb') as file:
c = file.read()
self.set_header('content-type', 'application/zip')
self.write(c)
else:
self.send_error(status_code=400, reason='bad request')
return
def post(self, path_request):
"""
Handle POST requests.
:param path_request: request path ( < URI)
"""
if path_request == 'import.zip':
try:
fileinfo = self.request.files['file'][0]
# fname = fileinfo['filename'] # le nom du fichier recu
with open(os.path.join(folder_path, 'imported.zip'), 'wb') as fh:
fh.write(fileinfo['body'])
zip_2_extract = ZipFile(os.path.join(folder_path, 'imported.zip'), 'r')
zip_2_extract.extractall(folder_path)
zip_2_extract.close()
os.remove(os.path.join(folder_path, 'imported.zip'))
except KeyError: # pas 'file' comme nom dans le formulaire pour le fichier recu
self.send_error(status_code=400, reason='bad request')
else:
self.send_error(status_code=400, reason='bad request')
return
|
[
"tools.ListingFiles.list_file_root_folder",
"zipfile.ZipFile",
"os.path.join"
] |
[((689, 711), 'zipfile.ZipFile', 'ZipFile', (['zip_file', '"""w"""'], {}), "(zip_file, 'w')\n", (696, 711), False, 'from zipfile import ZipFile\n'), ((424, 455), 'os.path.join', 'os.path.join', (['folder_path', 'file'], {}), '(folder_path, file)\n', (436, 455), False, 'import os\n'), ((481, 528), 'tools.ListingFiles.list_file_root_folder', 'ListingFiles.list_file_root_folder', (['folder_path'], {}), '(folder_path)\n', (515, 528), False, 'from tools import ListingFiles\n'), ((1928, 1969), 'os.path.join', 'os.path.join', (['folder_path', '"""imported.zip"""'], {}), "(folder_path, 'imported.zip')\n", (1940, 1969), False, 'import os\n'), ((2094, 2135), 'os.path.join', 'os.path.join', (['folder_path', '"""imported.zip"""'], {}), "(folder_path, 'imported.zip')\n", (2106, 2135), False, 'import os\n'), ((1785, 1826), 'os.path.join', 'os.path.join', (['folder_path', '"""imported.zip"""'], {}), "(folder_path, 'imported.zip')\n", (1797, 1826), False, 'import os\n')]
|
import numpy as np
import tensorflow as tf
from gym_ds3.schedulers.deepsocs.average_reward import AveragePerStepReward
from gym_ds3.schedulers.deepsocs.compute_baselines import get_piecewise_linear_fit_baseline
from gym_ds3.schedulers.deepsocs.deepsocs_scheduler import Deepsocs
from gym_ds3.schedulers.models.deepsocs_model import create_deepsocs_model, create_deepsocs_graph
from gym_ds3.envs.utils.helper_deepsocs import suppress_tf_warning, discount
class ParameterServer(object):
def __init__(self, args):
self.args = args
self.seed = args.seed
suppress_tf_warning() # suppress TF warnings
# AAD model
self.model, self.sess = create_deepsocs_model(args)
self.graph = create_deepsocs_graph(args=args, model=self.model)
# Deepsocs Scheduler
self.deepsocs = Deepsocs(args, self.model, self.sess)
self.avg_reward_calculator = AveragePerStepReward(size=100000)
# Initialize model
tf.set_random_seed(self.seed)
np.random.seed(self.seed)
self.sess.run(tf.global_variables_initializer())
# Flag to initialize assign operations for 'set_weights()'
self.FIRST_SET_FLAG = True
def get_weights(self):
weight_vals = self.sess.run(self.model['all_vars'])
return weight_vals
def set_weights(self, weight_vals):
"""
Set weights without memory leakage
"""
if self.FIRST_SET_FLAG:
self.FIRST_SET_FLAG = False
self.assign_placeholders = []
self.assign_ops = []
for w_idx, weight_tf_var in enumerate(self.model['all_vars']):
a = weight_tf_var
assign_placeholder = tf.placeholder(a.dtype, shape=a.get_shape())
assign_op = a.assign(assign_placeholder)
self.assign_placeholders.append(assign_placeholder)
self.assign_ops.append(assign_op)
for w_idx, weight_tf_var in enumerate(self.model['all_vars']):
self.sess.run(self.assign_ops[w_idx],
{self.assign_placeholders[w_idx]: weight_vals[w_idx]})
def apply_gradients(self, gradients):
self.sess.run(self.graph['apply_grads'], feed_dict={
i: d for i, d in zip(self.graph['gradients'], gradients)
})
def compute_advantages(self, ops_vals):
# calculate advantages (input-dependent baselines)
all_times, all_diff_times, all_rewards, last_returns = [], [], [], []
results = {}
for ops_val in ops_vals:
rollout_val = ops_val[0]
stat = ops_val[1]
diff_time = np.array(rollout_val['wall_time'][1:]) - np.array(rollout_val['wall_time'][:-1])
self.avg_reward_calculator.add_list_filter_zero(rollout_val['reward'], diff_time)
all_diff_times.append(diff_time)
all_times.append(rollout_val['wall_time'][1:])
all_rewards.append(rollout_val['reward'])
for k, v in stat.items():
try:
results[k].append(v)
except:
results.update({k: []})
results[k].append(v)
adv, all_cum_reward = compute_advantage(
self.args, self.avg_reward_calculator, all_rewards, all_diff_times, all_times)
for cum_reward in all_cum_reward:
last_returns.append(cum_reward[-1])
return results, adv
def compute_advantage(args, reward_calculator, all_rewards, all_diff_times, all_times):
# compute differential reward
all_cum_reward = []
avg_per_step_reward = reward_calculator.get_avg_per_step_reward()
for i in range(args.num_agents):
# differential reward mode on
rewards = np.array([r - avg_per_step_reward * t for \
(r, t) in zip(all_rewards[i], all_diff_times[i])])
cum_reward = discount(rewards, args.gamma)
all_cum_reward.append(cum_reward)
baselines = get_piecewise_linear_fit_baseline(all_cum_reward, all_times)
# give worker back the advantage
advs = []
for i in range(args.num_agents):
batch_adv = all_cum_reward[i] - baselines[i]
batch_adv = np.reshape(batch_adv, [len(batch_adv), 1])
advs.append(batch_adv)
return advs, all_cum_reward
|
[
"gym_ds3.schedulers.deepsocs.average_reward.AveragePerStepReward",
"gym_ds3.schedulers.deepsocs.compute_baselines.get_piecewise_linear_fit_baseline",
"gym_ds3.schedulers.models.deepsocs_model.create_deepsocs_graph",
"numpy.random.seed",
"gym_ds3.schedulers.models.deepsocs_model.create_deepsocs_model",
"tensorflow.global_variables_initializer",
"gym_ds3.schedulers.deepsocs.deepsocs_scheduler.Deepsocs",
"gym_ds3.envs.utils.helper_deepsocs.suppress_tf_warning",
"tensorflow.set_random_seed",
"numpy.array",
"gym_ds3.envs.utils.helper_deepsocs.discount"
] |
[((4083, 4143), 'gym_ds3.schedulers.deepsocs.compute_baselines.get_piecewise_linear_fit_baseline', 'get_piecewise_linear_fit_baseline', (['all_cum_reward', 'all_times'], {}), '(all_cum_reward, all_times)\n', (4116, 4143), False, 'from gym_ds3.schedulers.deepsocs.compute_baselines import get_piecewise_linear_fit_baseline\n'), ((583, 604), 'gym_ds3.envs.utils.helper_deepsocs.suppress_tf_warning', 'suppress_tf_warning', ([], {}), '()\n', (602, 604), False, 'from gym_ds3.envs.utils.helper_deepsocs import suppress_tf_warning, discount\n'), ((682, 709), 'gym_ds3.schedulers.models.deepsocs_model.create_deepsocs_model', 'create_deepsocs_model', (['args'], {}), '(args)\n', (703, 709), False, 'from gym_ds3.schedulers.models.deepsocs_model import create_deepsocs_model, create_deepsocs_graph\n'), ((731, 781), 'gym_ds3.schedulers.models.deepsocs_model.create_deepsocs_graph', 'create_deepsocs_graph', ([], {'args': 'args', 'model': 'self.model'}), '(args=args, model=self.model)\n', (752, 781), False, 'from gym_ds3.schedulers.models.deepsocs_model import create_deepsocs_model, create_deepsocs_graph\n'), ((836, 873), 'gym_ds3.schedulers.deepsocs.deepsocs_scheduler.Deepsocs', 'Deepsocs', (['args', 'self.model', 'self.sess'], {}), '(args, self.model, self.sess)\n', (844, 873), False, 'from gym_ds3.schedulers.deepsocs.deepsocs_scheduler import Deepsocs\n'), ((920, 953), 'gym_ds3.schedulers.deepsocs.average_reward.AveragePerStepReward', 'AveragePerStepReward', ([], {'size': '(100000)'}), '(size=100000)\n', (940, 953), False, 'from gym_ds3.schedulers.deepsocs.average_reward import AveragePerStepReward\n'), ((990, 1019), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['self.seed'], {}), '(self.seed)\n', (1008, 1019), True, 'import tensorflow as tf\n'), ((1028, 1053), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (1042, 1053), True, 'import numpy as np\n'), ((3993, 4022), 'gym_ds3.envs.utils.helper_deepsocs.discount', 'discount', (['rewards', 'args.gamma'], {}), '(rewards, args.gamma)\n', (4001, 4022), False, 'from gym_ds3.envs.utils.helper_deepsocs import suppress_tf_warning, discount\n'), ((1076, 1109), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1107, 1109), True, 'import tensorflow as tf\n'), ((2685, 2723), 'numpy.array', 'np.array', (["rollout_val['wall_time'][1:]"], {}), "(rollout_val['wall_time'][1:])\n", (2693, 2723), True, 'import numpy as np\n'), ((2726, 2765), 'numpy.array', 'np.array', (["rollout_val['wall_time'][:-1]"], {}), "(rollout_val['wall_time'][:-1])\n", (2734, 2765), True, 'import numpy as np\n')]
|
#! /usr/bin/env python
#################################################################################
# File Name : ./layer.py
# Created By : yang
# Creation Date : [2017-11-15 12:51]
# Last Modified : [2017-11-15 13:09]
# Description : some layers definition
#################################################################################
import lasagne, theano
import numpy as np
import theano.tensor as T
DTYPE = "float32"
class FeatureCombineLayer(lasagne.layers.MergeLayer):
def __init__(self, incomings, **kwargs):
super(FeatureCombineLayer, self).__init__(incomings, **kwargs)
max_size = self.output_shape[2]
self.one = T.ones((1, max_size), dtype=DTYPE)
def get_output_shape_for(self, input_shapes, **kwargs):
return (input_shapes[0][0], input_shapes[0][1] + input_shapes[1][1] * 2, input_shapes[0][2], input_shapes[0][3])
def get_output_for(self, input,**kwargs):
feature2d = input[0]
feature1d = input[1]
feature1d_h = feature1d.dimshuffle(0, 1, 2, 'x')
feature1d_h = T.tensordot(feature1d_h, self.one, [[3], [0]])
feature1d_v = feature1d_h.dimshuffle(0, 1, 3, 2)
return T.concatenate([feature2d, feature1d_h, feature1d_v], axis = 1)
class Feature2dBiasLayer(lasagne.layers.Layer):
def __init__(self, incoming = None, **kwargs):
super(Feature2dBiasLayer,self).__init__(incoming, **kwargs)
self.max_size = self.output_shape[2]
###generate zero
self.bias = np.zeros((7, self.max_size, self.max_size), dtype = DTYPE)
for i in xrange(self.max_size):
for j in xrange(self.max_size):
delta = abs(i - j)
if delta < 14:
t = 0
elif delta < 18:
t = 1
elif delta < 23:
t = 2
elif delta < 28:
t = 3
elif delta < 38:
t = 4
elif delta < 48:
t = 5
else:
t = 6
self.bias[t, i, j] = 1.0
self.bias = theano.shared(self.bias)
self.bias = self.bias.dimshuffle('x', 0, 1, 2)
def get_output_shape_for(self, input_shape, **kwargs):
return (input_shape[0], input_shape[1] + 7, input_shape[2], input_shape[3])
def get_output_for(self, input, **kwargs):
batch_size = input.shape[0]
one = T.ones((batch_size, 1), dtype=DTYPE)
tmp = T.tensordot(one, self.bias, [[1], [0]])
return T.concatenate([input, tmp], axis = 1)
class LinearLayer(lasagne.layers.Layer):
def __init__(self, incoming = None, max_size = 256, deepth = 25, W = lasagne.init.GlorotUniform(), b = lasagne.init.Constant(0.0),num_output = 1,**kwargs):
super(LinearLayer, self).__init__(incoming, **kwargs)
self.max_size = max_size
self.deepth = deepth
self.num_output = num_output
self.W = self.add_param(W,(self.deepth,num_output), name = "W")
self.b = self.add_param(b, (num_output,), name = 'b')
def get_output_shape_for(self, input_shape, **kwargs):
return (input_shape[0], self.num_output, input_shape[2], input_shape[3])
def get_output_for(self, input, **kwargs):
tmp = T.tensordot(input, self.W, [[1],[0]]).dimshuffle(0, 3, 1, 2)
return tmp + self.b[None,:,None,None]
|
[
"theano.tensor.concatenate",
"lasagne.init.GlorotUniform",
"lasagne.init.Constant",
"numpy.zeros",
"theano.tensor.tensordot",
"theano.shared",
"theano.tensor.ones"
] |
[((741, 775), 'theano.tensor.ones', 'T.ones', (['(1, max_size)'], {'dtype': 'DTYPE'}), '((1, max_size), dtype=DTYPE)\n', (747, 775), True, 'import theano.tensor as T\n'), ((1146, 1192), 'theano.tensor.tensordot', 'T.tensordot', (['feature1d_h', 'self.one', '[[3], [0]]'], {}), '(feature1d_h, self.one, [[3], [0]])\n', (1157, 1192), True, 'import theano.tensor as T\n'), ((1266, 1326), 'theano.tensor.concatenate', 'T.concatenate', (['[feature2d, feature1d_h, feature1d_v]'], {'axis': '(1)'}), '([feature2d, feature1d_h, feature1d_v], axis=1)\n', (1279, 1326), True, 'import theano.tensor as T\n'), ((1588, 1644), 'numpy.zeros', 'np.zeros', (['(7, self.max_size, self.max_size)'], {'dtype': 'DTYPE'}), '((7, self.max_size, self.max_size), dtype=DTYPE)\n', (1596, 1644), True, 'import numpy as np\n'), ((2228, 2252), 'theano.shared', 'theano.shared', (['self.bias'], {}), '(self.bias)\n', (2241, 2252), False, 'import lasagne, theano\n'), ((2554, 2590), 'theano.tensor.ones', 'T.ones', (['(batch_size, 1)'], {'dtype': 'DTYPE'}), '((batch_size, 1), dtype=DTYPE)\n', (2560, 2590), True, 'import theano.tensor as T\n'), ((2605, 2644), 'theano.tensor.tensordot', 'T.tensordot', (['one', 'self.bias', '[[1], [0]]'], {}), '(one, self.bias, [[1], [0]])\n', (2616, 2644), True, 'import theano.tensor as T\n'), ((2660, 2695), 'theano.tensor.concatenate', 'T.concatenate', (['[input, tmp]'], {'axis': '(1)'}), '([input, tmp], axis=1)\n', (2673, 2695), True, 'import theano.tensor as T\n'), ((2813, 2841), 'lasagne.init.GlorotUniform', 'lasagne.init.GlorotUniform', ([], {}), '()\n', (2839, 2841), False, 'import lasagne, theano\n'), ((2847, 2873), 'lasagne.init.Constant', 'lasagne.init.Constant', (['(0.0)'], {}), '(0.0)\n', (2868, 2873), False, 'import lasagne, theano\n'), ((3398, 3436), 'theano.tensor.tensordot', 'T.tensordot', (['input', 'self.W', '[[1], [0]]'], {}), '(input, self.W, [[1], [0]])\n', (3409, 3436), True, 'import theano.tensor as T\n')]
|
# Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from tensorflow.python.keras import Model, layers
from tensorflow.python.keras.initializers import RandomNormal
from fastestimator.layers import InstanceNormalization, ReflectionPadding2D
def _resblock(x0, num_filter=256, kernel_size=3):
x = ReflectionPadding2D()(x0)
x = layers.Conv2D(filters=num_filter, kernel_size=kernel_size, kernel_initializer=RandomNormal(mean=0,
stddev=0.02))(x)
x = InstanceNormalization()(x)
x = layers.ReLU()(x)
x = ReflectionPadding2D()(x)
x = layers.Conv2D(filters=num_filter, kernel_size=kernel_size, kernel_initializer=RandomNormal(mean=0,
stddev=0.02))(x)
x = InstanceNormalization()(x)
x = layers.Add()([x, x0])
return x
def build_discriminator(input_shape=(256, 256, 3)):
"""Returns the discriminator network of the GAN.
Args:
input_shape (tuple, optional): shape of the input image. Defaults to (256, 256, 3).
Returns:
'Model' object: GAN discriminator.
"""
x0 = layers.Input(input_shape)
x = layers.Conv2D(filters=64,
kernel_size=4,
strides=2,
padding='same',
kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x0)
x = layers.LeakyReLU(0.2)(x)
x = layers.Conv2D(filters=128,
kernel_size=4,
strides=2,
padding='same',
kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
x = InstanceNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = layers.Conv2D(filters=256,
kernel_size=4,
strides=2,
padding='same',
kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
x = InstanceNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = ReflectionPadding2D()(x)
x = layers.Conv2D(filters=512, kernel_size=4, strides=1, kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
x = InstanceNormalization()(x)
x = layers.LeakyReLU(0.2)(x)
x = ReflectionPadding2D()(x)
x = layers.Conv2D(filters=1, kernel_size=4, strides=1, kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
return Model(inputs=x0, outputs=x)
def build_generator(input_shape=(256, 256, 3), num_blocks=9):
"""Returns the generator of the GAN.
Args:
input_shape (tuple, optional): shape of the input image. Defaults to (256, 256, 3).
num_blocks (int, optional): number of resblocks for the generator. Defaults to 9.
Returns:
'Model' object: GAN generator.
"""
x0 = layers.Input(input_shape)
x = ReflectionPadding2D(padding=(3, 3))(x0)
x = layers.Conv2D(filters=64, kernel_size=7, strides=1, kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
x = InstanceNormalization()(x)
x = layers.ReLU()(x)
# downsample
x = layers.Conv2D(filters=128,
kernel_size=3,
strides=2,
padding='same',
kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
x = InstanceNormalization()(x)
x = layers.ReLU()(x)
x = layers.Conv2D(filters=256,
kernel_size=3,
strides=2,
padding='same',
kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
x = InstanceNormalization()(x)
x = layers.ReLU()(x)
# residual
for _ in range(num_blocks):
x = _resblock(x)
# upsample
x = layers.Conv2DTranspose(filters=128,
kernel_size=3,
strides=2,
padding='same',
kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
x = InstanceNormalization()(x)
x = layers.ReLU()(x)
x = layers.Conv2DTranspose(filters=64,
kernel_size=3,
strides=2,
padding='same',
kernel_initializer=RandomNormal(mean=0, stddev=0.02))(x)
x = InstanceNormalization()(x)
x = layers.ReLU()(x)
# final
x = ReflectionPadding2D(padding=(3, 3))(x)
x = layers.Conv2D(filters=3, kernel_size=7, activation='tanh', kernel_initializer=RandomNormal(mean=0,
stddev=0.02))(x)
return Model(inputs=x0, outputs=x)
|
[
"tensorflow.python.keras.layers.Add",
"fastestimator.layers.InstanceNormalization",
"tensorflow.python.keras.layers.LeakyReLU",
"fastestimator.layers.ReflectionPadding2D",
"tensorflow.python.keras.layers.Input",
"tensorflow.python.keras.initializers.RandomNormal",
"tensorflow.python.keras.layers.ReLU",
"tensorflow.python.keras.Model"
] |
[((1876, 1901), 'tensorflow.python.keras.layers.Input', 'layers.Input', (['input_shape'], {}), '(input_shape)\n', (1888, 1901), False, 'from tensorflow.python.keras import Model, layers\n'), ((3124, 3151), 'tensorflow.python.keras.Model', 'Model', ([], {'inputs': 'x0', 'outputs': 'x'}), '(inputs=x0, outputs=x)\n', (3129, 3151), False, 'from tensorflow.python.keras import Model, layers\n'), ((3528, 3553), 'tensorflow.python.keras.layers.Input', 'layers.Input', (['input_shape'], {}), '(input_shape)\n', (3540, 3553), False, 'from tensorflow.python.keras import Model, layers\n'), ((5402, 5429), 'tensorflow.python.keras.Model', 'Model', ([], {'inputs': 'x0', 'outputs': 'x'}), '(inputs=x0, outputs=x)\n', (5407, 5429), False, 'from tensorflow.python.keras import Model, layers\n'), ((940, 961), 'fastestimator.layers.ReflectionPadding2D', 'ReflectionPadding2D', ([], {}), '()\n', (959, 961), False, 'from fastestimator.layers import InstanceNormalization, ReflectionPadding2D\n'), ((1197, 1220), 'fastestimator.layers.InstanceNormalization', 'InstanceNormalization', ([], {}), '()\n', (1218, 1220), False, 'from fastestimator.layers import InstanceNormalization, ReflectionPadding2D\n'), ((1232, 1245), 'tensorflow.python.keras.layers.ReLU', 'layers.ReLU', ([], {}), '()\n', (1243, 1245), False, 'from tensorflow.python.keras import Model, layers\n'), ((1258, 1279), 'fastestimator.layers.ReflectionPadding2D', 'ReflectionPadding2D', ([], {}), '()\n', (1277, 1279), False, 'from fastestimator.layers import InstanceNormalization, ReflectionPadding2D\n'), ((1514, 1537), 'fastestimator.layers.InstanceNormalization', 'InstanceNormalization', ([], {}), '()\n', (1535, 1537), False, 'from fastestimator.layers import InstanceNormalization, ReflectionPadding2D\n'), ((1549, 1561), 'tensorflow.python.keras.layers.Add', 'layers.Add', ([], {}), '()\n', (1559, 1561), False, 'from tensorflow.python.keras import Model, layers\n'), ((2132, 2153), 'tensorflow.python.keras.layers.LeakyReLU', 'layers.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2148, 2153), False, 'from tensorflow.python.keras import Model, layers\n'), ((2389, 2412), 'fastestimator.layers.InstanceNormalization', 'InstanceNormalization', ([], {}), '()\n', (2410, 2412), False, 'from fastestimator.layers import InstanceNormalization, ReflectionPadding2D\n'), ((2424, 2445), 'tensorflow.python.keras.layers.LeakyReLU', 'layers.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2440, 2445), False, 'from tensorflow.python.keras import Model, layers\n'), ((2681, 2704), 'fastestimator.layers.InstanceNormalization', 'InstanceNormalization', ([], {}), '()\n', (2702, 2704), False, 'from fastestimator.layers import InstanceNormalization, ReflectionPadding2D\n'), ((2716, 2737), 'tensorflow.python.keras.layers.LeakyReLU', 'layers.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2732, 2737), False, 'from tensorflow.python.keras import Model, layers\n'), ((2750, 2771), 'fastestimator.layers.ReflectionPadding2D', 'ReflectionPadding2D', ([], {}), '()\n', (2769, 2771), False, 'from fastestimator.layers import InstanceNormalization, ReflectionPadding2D\n'), ((2902, 2925), 'fastestimator.layers.InstanceNormalization', 'InstanceNormalization', ([], {}), '()\n', (2923, 2925), False, 'from fastestimator.layers import InstanceNormalization, ReflectionPadding2D\n'), ((2937, 2958), 'tensorflow.python.keras.layers.LeakyReLU', 'layers.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (2953, 2958), False, 'from tensorflow.python.keras import Model, layers\n'), ((2971, 2992), 'fastestimator.layers.ReflectionPadding2D', 'ReflectionPadding2D', ([], {}), '()\n', (2990, 2992), False, 'from fastestimator.layers import InstanceNormalization, ReflectionPadding2D\n'), ((3563, 3598), 'fastestimator.layers.ReflectionPadding2D', 'ReflectionPadding2D', ([], {'padding': '(3, 3)'}), '(padding=(3, 3))\n', (3582, 3598), False, 'from fastestimator.layers import InstanceNormalization, ReflectionPadding2D\n'), ((3729, 3752), 'fastestimator.layers.InstanceNormalization', 'InstanceNormalization', ([], {}), '()\n', (3750, 3752), False, 'from fastestimator.layers import InstanceNormalization, ReflectionPadding2D\n'), ((3764, 3777), 'tensorflow.python.keras.layers.ReLU', 'layers.ReLU', ([], {}), '()\n', (3775, 3777), False, 'from tensorflow.python.keras import Model, layers\n'), ((4029, 4052), 'fastestimator.layers.InstanceNormalization', 'InstanceNormalization', ([], {}), '()\n', (4050, 4052), False, 'from fastestimator.layers import InstanceNormalization, ReflectionPadding2D\n'), ((4064, 4077), 'tensorflow.python.keras.layers.ReLU', 'layers.ReLU', ([], {}), '()\n', (4075, 4077), False, 'from tensorflow.python.keras import Model, layers\n'), ((4312, 4335), 'fastestimator.layers.InstanceNormalization', 'InstanceNormalization', ([], {}), '()\n', (4333, 4335), False, 'from fastestimator.layers import InstanceNormalization, ReflectionPadding2D\n'), ((4347, 4360), 'tensorflow.python.keras.layers.ReLU', 'layers.ReLU', ([], {}), '()\n', (4358, 4360), False, 'from tensorflow.python.keras import Model, layers\n'), ((4728, 4751), 'fastestimator.layers.InstanceNormalization', 'InstanceNormalization', ([], {}), '()\n', (4749, 4751), False, 'from fastestimator.layers import InstanceNormalization, ReflectionPadding2D\n'), ((4763, 4776), 'tensorflow.python.keras.layers.ReLU', 'layers.ReLU', ([], {}), '()\n', (4774, 4776), False, 'from tensorflow.python.keras import Model, layers\n'), ((5055, 5078), 'fastestimator.layers.InstanceNormalization', 'InstanceNormalization', ([], {}), '()\n', (5076, 5078), False, 'from fastestimator.layers import InstanceNormalization, ReflectionPadding2D\n'), ((5090, 5103), 'tensorflow.python.keras.layers.ReLU', 'layers.ReLU', ([], {}), '()\n', (5101, 5103), False, 'from tensorflow.python.keras import Model, layers\n'), ((5128, 5163), 'fastestimator.layers.ReflectionPadding2D', 'ReflectionPadding2D', ([], {'padding': '(3, 3)'}), '(padding=(3, 3))\n', (5147, 5163), False, 'from fastestimator.layers import InstanceNormalization, ReflectionPadding2D\n'), ((1052, 1085), 'tensorflow.python.keras.initializers.RandomNormal', 'RandomNormal', ([], {'mean': '(0)', 'stddev': '(0.02)'}), '(mean=0, stddev=0.02)\n', (1064, 1085), False, 'from tensorflow.python.keras.initializers import RandomNormal\n'), ((1369, 1402), 'tensorflow.python.keras.initializers.RandomNormal', 'RandomNormal', ([], {'mean': '(0)', 'stddev': '(0.02)'}), '(mean=0, stddev=0.02)\n', (1381, 1402), False, 'from tensorflow.python.keras.initializers import RandomNormal\n'), ((2085, 2118), 'tensorflow.python.keras.initializers.RandomNormal', 'RandomNormal', ([], {'mean': '(0)', 'stddev': '(0.02)'}), '(mean=0, stddev=0.02)\n', (2097, 2118), False, 'from tensorflow.python.keras.initializers import RandomNormal\n'), ((2342, 2375), 'tensorflow.python.keras.initializers.RandomNormal', 'RandomNormal', ([], {'mean': '(0)', 'stddev': '(0.02)'}), '(mean=0, stddev=0.02)\n', (2354, 2375), False, 'from tensorflow.python.keras.initializers import RandomNormal\n'), ((2634, 2667), 'tensorflow.python.keras.initializers.RandomNormal', 'RandomNormal', ([], {'mean': '(0)', 'stddev': '(0.02)'}), '(mean=0, stddev=0.02)\n', (2646, 2667), False, 'from tensorflow.python.keras.initializers import RandomNormal\n'), ((2855, 2888), 'tensorflow.python.keras.initializers.RandomNormal', 'RandomNormal', ([], {'mean': '(0)', 'stddev': '(0.02)'}), '(mean=0, stddev=0.02)\n', (2867, 2888), False, 'from tensorflow.python.keras.initializers import RandomNormal\n'), ((3074, 3107), 'tensorflow.python.keras.initializers.RandomNormal', 'RandomNormal', ([], {'mean': '(0)', 'stddev': '(0.02)'}), '(mean=0, stddev=0.02)\n', (3086, 3107), False, 'from tensorflow.python.keras.initializers import RandomNormal\n'), ((3682, 3715), 'tensorflow.python.keras.initializers.RandomNormal', 'RandomNormal', ([], {'mean': '(0)', 'stddev': '(0.02)'}), '(mean=0, stddev=0.02)\n', (3694, 3715), False, 'from tensorflow.python.keras.initializers import RandomNormal\n'), ((3983, 4016), 'tensorflow.python.keras.initializers.RandomNormal', 'RandomNormal', ([], {'mean': '(0)', 'stddev': '(0.02)'}), '(mean=0, stddev=0.02)\n', (3995, 4016), False, 'from tensorflow.python.keras.initializers import RandomNormal\n'), ((4266, 4299), 'tensorflow.python.keras.initializers.RandomNormal', 'RandomNormal', ([], {'mean': '(0)', 'stddev': '(0.02)'}), '(mean=0, stddev=0.02)\n', (4278, 4299), False, 'from tensorflow.python.keras.initializers import RandomNormal\n'), ((4682, 4715), 'tensorflow.python.keras.initializers.RandomNormal', 'RandomNormal', ([], {'mean': '(0)', 'stddev': '(0.02)'}), '(mean=0, stddev=0.02)\n', (4694, 4715), False, 'from tensorflow.python.keras.initializers import RandomNormal\n'), ((5009, 5042), 'tensorflow.python.keras.initializers.RandomNormal', 'RandomNormal', ([], {'mean': '(0)', 'stddev': '(0.02)'}), '(mean=0, stddev=0.02)\n', (5021, 5042), False, 'from tensorflow.python.keras.initializers import RandomNormal\n'), ((5253, 5286), 'tensorflow.python.keras.initializers.RandomNormal', 'RandomNormal', ([], {'mean': '(0)', 'stddev': '(0.02)'}), '(mean=0, stddev=0.02)\n', (5265, 5286), False, 'from tensorflow.python.keras.initializers import RandomNormal\n')]
|
from typing import Union
import re
from phonemizer.phonemize import phonemize
from data.text.symbols import all_phonemes, _punctuations
class Tokenizer:
def __init__(self, start_token='>', end_token='<', pad_token='/', add_start_end=True, alphabet=None,
model_breathing=True):
if not alphabet:
self.alphabet = all_phonemes
else:
self.alphabet = sorted(list(set(alphabet))) # for testing
self.idx_to_token = {i: s for i, s in enumerate(self.alphabet, start=1)}
self.idx_to_token[0] = pad_token
self.token_to_idx = {s: [i] for i, s in self.idx_to_token.items()}
self.vocab_size = len(self.alphabet) + 1
self.add_start_end = add_start_end
if add_start_end:
self.start_token_index = len(self.alphabet) + 1
self.end_token_index = len(self.alphabet) + 2
self.vocab_size += 2
self.idx_to_token[self.start_token_index] = start_token
self.idx_to_token[self.end_token_index] = end_token
self.model_breathing = model_breathing
if model_breathing:
self.breathing_token_index = self.vocab_size
self.token_to_idx[' '] = self.token_to_idx[' '] + [self.breathing_token_index]
self.vocab_size += 1
self.breathing_token = '@'
self.idx_to_token[self.breathing_token_index] = self.breathing_token
self.token_to_idx[self.breathing_token] = [self.breathing_token_index]
def __call__(self, sentence: str) -> list:
sequence = [self.token_to_idx[c] for c in sentence] # No filtering: text should only contain known chars.
sequence = [item for items in sequence for item in items]
if self.model_breathing:
sequence = [self.breathing_token_index] + sequence
if self.add_start_end:
sequence = [self.start_token_index] + sequence + [self.end_token_index]
return sequence
def decode(self, sequence: list) -> str:
return ''.join([self.idx_to_token[int(t)] for t in sequence])
class Phonemizer:
def __init__(self, language: str, with_stress: bool, njobs=4):
self.language = language
self.njobs = njobs
self.with_stress = with_stress
self.special_hyphen = '—'
self.punctuation = ';:,.!?¡¿—…"«»“”'
self._whitespace_re = re.compile(r'\s+')
self._whitespace_punctuation_re = re.compile(f'\s*([{_punctuations}])\s*')
def __call__(self, text: Union[str, list], with_stress=None, njobs=None, language=None) -> Union[str, list]:
language = language or self.language
njobs = njobs or self.njobs
with_stress = with_stress or self.with_stress
# phonemizer does not like hyphens.
text = self._preprocess(text)
phonemes = phonemize(text,
language=language,
backend='espeak',
strip=True,
preserve_punctuation=True,
with_stress=with_stress,
punctuation_marks=self.punctuation,
njobs=njobs,
language_switch='remove-flags')
return self._postprocess(phonemes)
def _preprocess_string(self, text: str):
text = text.replace('-', self.special_hyphen)
return text
def _preprocess(self, text: Union[str, list]) -> Union[str, list]:
if isinstance(text, list):
return [self._preprocess_string(t) for t in text]
elif isinstance(text, str):
return self._preprocess_string(text)
else:
raise TypeError(f'{self} input must be list or str, not {type(text)}')
def _collapse_whitespace(self, text: str) -> str:
text = re.sub(self._whitespace_re, ' ', text)
return re.sub(self._whitespace_punctuation_re, r'\1', text)
def _postprocess_string(self, text: str) -> str:
text = text.replace(self.special_hyphen, '-')
text = ''.join([c for c in text if c in all_phonemes])
text = self._collapse_whitespace(text)
text = text.strip()
return text
def _postprocess(self, text: Union[str, list]) -> Union[str, list]:
if isinstance(text, list):
return [self._postprocess_string(t) for t in text]
elif isinstance(text, str):
return self._postprocess_string(text)
else:
raise TypeError(f'{self} input must be list or str, not {type(text)}')
|
[
"phonemizer.phonemize.phonemize",
"re.sub",
"re.compile"
] |
[((2398, 2416), 're.compile', 're.compile', (['"""\\\\s+"""'], {}), "('\\\\s+')\n", (2408, 2416), False, 'import re\n'), ((2459, 2501), 're.compile', 're.compile', (['f"""\\\\s*([{_punctuations}])\\\\s*"""'], {}), "(f'\\\\s*([{_punctuations}])\\\\s*')\n", (2469, 2501), False, 'import re\n'), ((2854, 3060), 'phonemizer.phonemize.phonemize', 'phonemize', (['text'], {'language': 'language', 'backend': '"""espeak"""', 'strip': '(True)', 'preserve_punctuation': '(True)', 'with_stress': 'with_stress', 'punctuation_marks': 'self.punctuation', 'njobs': 'njobs', 'language_switch': '"""remove-flags"""'}), "(text, language=language, backend='espeak', strip=True,\n preserve_punctuation=True, with_stress=with_stress, punctuation_marks=\n self.punctuation, njobs=njobs, language_switch='remove-flags')\n", (2863, 3060), False, 'from phonemizer.phonemize import phonemize\n'), ((3880, 3918), 're.sub', 're.sub', (['self._whitespace_re', '""" """', 'text'], {}), "(self._whitespace_re, ' ', text)\n", (3886, 3918), False, 'import re\n'), ((3934, 3986), 're.sub', 're.sub', (['self._whitespace_punctuation_re', '"""\\\\1"""', 'text'], {}), "(self._whitespace_punctuation_re, '\\\\1', text)\n", (3940, 3986), False, 'import re\n')]
|
from typing import Dict, Optional, Union
from fastapi.responses import Response, JSONResponse, PlainTextResponse
from rdflib import Graph
from rdflib.namespace import DCAT, DCTERMS, RDFS
from connegp import MEDIATYPE_NAMES
from config import *
from renderers import Renderer
from profiles.spaceprez_profiles import oai, geo
from models.spaceprez import SpacePrezFeatureCollection
from utils import templates
class SpacePrezFeatureCollectionRenderer(Renderer):
profiles = {"oai": oai, "geo": geo}
default_profile_token = "oai"
def __init__(self, request: object, instance_uri: str) -> None:
super().__init__(
request,
SpacePrezFeatureCollectionRenderer.profiles,
SpacePrezFeatureCollectionRenderer.default_profile_token,
instance_uri,
)
def set_collection(self, collection: SpacePrezFeatureCollection) -> None:
self.collection = collection
def _render_oai_html(
self, template_context: Union[Dict, None]
) -> templates.TemplateResponse:
"""Renders the HTML representation of the DCAT profile for a feature collection"""
_template_context = {
"request": self.request,
"collection": self.collection.to_dict(),
"uri": self.instance_uri,
"profiles": self.profiles,
"default_profile": self.default_profile_token,
"mediatype_names": dict(MEDIATYPE_NAMES, **{"application/geo+json": "GeoJSON"}),
}
if template_context is not None:
_template_context.update(template_context)
return templates.TemplateResponse(
"spaceprez/spaceprez_feature_collection.html",
context=_template_context,
headers=self.headers,
)
# def _render_oai_json(self) -> JSONResponse:
# """Renders the JSON representation of the OAI profile for a feature collection"""
# return JSONResponse(
# content={"test": "test"},
# media_type="application/json",
# headers=self.headers,
# )
def _render_oai_geojson(self) -> JSONResponse:
"""Renders the GeoJSON representation of the OAI profile for a feature collection"""
content = self.collection.to_geojson()
content["links"] = [
{
"href": str(self.request.url),
"rel": "self",
"type": self.mediatype,
"title": "this document",
},
{
"href": str(self.request.base_url)[:-1] + str(self.request.url.path),
"rel": "alternate",
"type": "text/html",
"title": "this document as HTML",
},
]
return JSONResponse(
content=content,
media_type="application/geo+json",
headers=self.headers,
)
def _render_oai(self, template_context: Union[Dict, None]):
"""Renders the OAI profile for a feature collection"""
if self.mediatype == "text/html":
return self._render_oai_html(template_context)
else: # else return GeoJSON
return self._render_oai_geojson()
def _generate_geo_rdf(self) -> Graph:
"""Generates a Graph of the GeoSPARQL representation"""
r = self.collection.graph.query(f"""
PREFIX dcat: <{DCAT}>
PREFIX dcterms: <{DCTERMS}>
PREFIX geo: <{GEO}>
PREFIX rdfs: <{RDFS}>
CONSTRUCT {{
?fc a geo:FeatureCollection ;
?fc_pred ?fc_o ;
geo:hasBoundingBox ?geom ;
rdfs:member ?mem .
?geom ?geom_p ?geom_o .
?d a dcat:Dataset ;
rdfs:member ?fc .
}}
WHERE {{
BIND (<{self.collection.uri}> AS ?fc)
?fc a geo:FeatureCollection ;
?fc_pred ?fc_o ;
rdfs:member ?mem .
FILTER (STRSTARTS(STR(?fc_pred), STR(geo:)))
OPTIONAL {{
?fc geo:hasBoundingBox ?geom .
?geom ?geom_p ?geom_o .
}}
?d a dcat:Dataset ;
rdfs:member ?fc .
}}
""")
g = r.graph
g.bind("dcat", DCAT)
g.bind("dcterms", DCTERMS)
g.bind("geo", GEO)
g.bind("rdfs", RDFS)
return g
def _render_geo_rdf(self) -> Response:
"""Renders the RDF representation of the GeoSPAQRL profile for a feature collection"""
g = self._generate_geo_rdf()
return self._make_rdf_response(g)
def _render_geo(self):
"""Renders the GeoSPARQL profile for a feature collection"""
return self._render_geo_rdf()
def render(
self, template_context: Optional[Dict] = None
) -> Union[
PlainTextResponse, templates.TemplateResponse, Response, JSONResponse, None
]:
if self.error is not None:
return PlainTextResponse(self.error, status_code=400)
elif self.profile == "alt":
return self._render_alt(template_context)
elif self.profile == "oai":
return self._render_oai(template_context)
elif self.profile == "geo":
return self._render_geo()
else:
return None
|
[
"fastapi.responses.PlainTextResponse",
"utils.templates.TemplateResponse",
"fastapi.responses.JSONResponse"
] |
[((1608, 1734), 'utils.templates.TemplateResponse', 'templates.TemplateResponse', (['"""spaceprez/spaceprez_feature_collection.html"""'], {'context': '_template_context', 'headers': 'self.headers'}), "('spaceprez/spaceprez_feature_collection.html',\n context=_template_context, headers=self.headers)\n", (1634, 1734), False, 'from utils import templates\n'), ((2768, 2859), 'fastapi.responses.JSONResponse', 'JSONResponse', ([], {'content': 'content', 'media_type': '"""application/geo+json"""', 'headers': 'self.headers'}), "(content=content, media_type='application/geo+json', headers=\n self.headers)\n", (2780, 2859), False, 'from fastapi.responses import Response, JSONResponse, PlainTextResponse\n'), ((4975, 5021), 'fastapi.responses.PlainTextResponse', 'PlainTextResponse', (['self.error'], {'status_code': '(400)'}), '(self.error, status_code=400)\n', (4992, 5021), False, 'from fastapi.responses import Response, JSONResponse, PlainTextResponse\n')]
|
#!/home/ec2-user/WEBARCH/env/bin/python3
from bs4 import BeautifulSoup
from urllib.request import urlopen
import re
from time import sleep
prefix = "https://webarchive.nationalarchives.gov.uk/"
#20121204113457/
page = "https://www.gov.uk/government/how-government-works"
def crawl_versions(url,url_file,skip_list = set()):
version_list = []
try:
html = urlopen(url)
except Exception as e:
print("Error with URL:",url)
print(e)
return
soup = BeautifulSoup(html, 'html.parser')
#print(soup)
if url[len(prefix)-1:len(prefix)+2] != "/*/":
print("Different format:",url,url[len(prefix)-1:len(prefix)+2])
return
domain = url[len(prefix)+2:]
#out_file = open(url_file,"a")
accordions = soup.findAll("div", {"class": "accordion"})
print("Dom:",domain)
print("Url:",url,"Accordions:",len(accordions))
for acc in accordions:
year = acc.find("span", {"class" : "year"})
#print("Acc:",acc)
print("\tYear", year, year.text,domain)
versions = acc.findAll("a", href=re.compile(".[1-2]*" + domain, re.IGNORECASE))
for v in versions:
print("\t\t",v['href'])
version_list.append(v['href'])
#out_file.write(domain + "|" + year.text + "|" + v['href'] + "\n")
#out_file.close()
return version_list
url = prefix + "*/" + page
crawl_versions(url,url.replace("/","_") + ".txt")
|
[
"bs4.BeautifulSoup",
"urllib.request.urlopen",
"re.compile"
] |
[((493, 527), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (506, 527), False, 'from bs4 import BeautifulSoup\n'), ((373, 385), 'urllib.request.urlopen', 'urlopen', (['url'], {}), '(url)\n', (380, 385), False, 'from urllib.request import urlopen\n'), ((1085, 1130), 're.compile', 're.compile', (["('.[1-2]*' + domain)", 're.IGNORECASE'], {}), "('.[1-2]*' + domain, re.IGNORECASE)\n", (1095, 1130), False, 'import re\n')]
|
from datetime import datetime
import time
import traceback
import urllib
import urllib2
from xml.dom import minidom
from django.core import mail
from travelist import utils
import settings
def call(method, **params):
response = urllib2.urlopen("http://flickr.com/services/rest?api_key=%s&method=%s&%s"
% (settings.FLICKR_KEY, method, urllib.urlencode(params)))
try:
return minidom.parse(response)
finally:
response.close()
def flickr_machinetags_getRecentValues(namespace, predicate, added_since):
dom = call('flickr.machinetags.getRecentValues', namespace=namespace, predicate=predicate, added_since=added_since)
return [{'value': node.childNodes[0].nodeValue,
'last_added': int(node.getAttribute('last_added'))}
for node in dom.getElementsByTagName('value')]
def flickr_photos_search(user_id, tags):
dom = call('flickr.photos.search', user_id=user_id, tags=tags)
return [{'id': int(node.getAttribute('id')),
'owner': node.getAttribute('owner')}
for node in dom.getElementsByTagName('photo')]
def flickr_photos_getInfo(photo_id):
dom = call('flickr.photos.getInfo', photo_id=photo_id)
try:
return [{'title': node.getElementsByTagName('title')[0].childNodes[0].nodeValue,
'date': datetime.strptime(node.getElementsByTagName('dates')[0].getAttribute('taken'), "%Y-%m-%d %H:%M:%S"),
'url': utils.find(node.getElementsByTagName('url'), lambda n: n.getAttribute('type') == 'photopage').childNodes[0].nodeValue}
for node in dom.getElementsByTagName('photo')][0]
except IndexError:
return None
def flickr_photos_getSizes(photo_id):
dom = call('flickr.photos.getSizes', photo_id=photo_id)
return dict((node.getAttribute('label'), node.getAttribute('source'))
for node in dom.getElementsByTagName('size'))
def track(namespace, predicate, callback):
wait_time = 60
last_added_max = 0
while True:
try:
values = flickr_machinetags_getRecentValues(namespace, predicate, last_added_max + 1)
wait_time = 60
for value in values:
callback(value['value'])
if value['last_added'] > last_added_max:
last_added_max = value['last_added']
time.sleep(60)
except Exception:
traceback.print_exc()
time.sleep(wait_time)
wait_time *= 2
if wait_time > 10 * 60:
mail.mail_admins("Flickr tracking error", traceback.format_exc(), fail_silently=True)
wait_time = 10 * 60
|
[
"traceback.print_exc",
"time.sleep",
"xml.dom.minidom.parse",
"traceback.format_exc",
"urllib.urlencode"
] |
[((424, 447), 'xml.dom.minidom.parse', 'minidom.parse', (['response'], {}), '(response)\n', (437, 447), False, 'from xml.dom import minidom\n'), ((2373, 2387), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (2383, 2387), False, 'import time\n'), ((373, 397), 'urllib.urlencode', 'urllib.urlencode', (['params'], {}), '(params)\n', (389, 397), False, 'import urllib\n'), ((2426, 2447), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (2445, 2447), False, 'import traceback\n'), ((2460, 2481), 'time.sleep', 'time.sleep', (['wait_time'], {}), '(wait_time)\n', (2470, 2481), False, 'import time\n'), ((2603, 2625), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (2623, 2625), False, 'import traceback\n')]
|
from unittest import TestCase
from mock import Mock
from openelex.base.transform import registry
class TestTransformRegistry(TestCase):
def test_register_with_validators(self):
mock_transform = Mock(return_value=None)
mock_transform.__name__ = 'mock_transform'
mock_validator1 = Mock(return_value=None)
mock_validator1.__name__ = 'mock_validator1'
mock_validator2 = Mock(return_value=None)
mock_validator2.__name__ = 'mock_validator2'
validators = [mock_validator1, mock_validator2]
registry.register("XX", mock_transform, validators)
transform = registry.get("XX", "mock_transform")
self.assertEqual(list(transform.validators.values()), validators)
transform()
mock_transform.assert_called_once_with()
def test_register_raw(self):
mock_transform = Mock(return_value=None)
mock_transform.__name__ = 'mock_transform'
registry.register("XX", mock_transform, raw=True)
transform = registry.get("XX", "mock_transform", raw=True)
transform()
mock_transform.assert_called_once_with()
|
[
"openelex.base.transform.registry.get",
"mock.Mock",
"openelex.base.transform.registry.register"
] |
[((209, 232), 'mock.Mock', 'Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (213, 232), False, 'from mock import Mock\n'), ((310, 333), 'mock.Mock', 'Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (314, 333), False, 'from mock import Mock\n'), ((413, 436), 'mock.Mock', 'Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (417, 436), False, 'from mock import Mock\n'), ((556, 607), 'openelex.base.transform.registry.register', 'registry.register', (['"""XX"""', 'mock_transform', 'validators'], {}), "('XX', mock_transform, validators)\n", (573, 607), False, 'from openelex.base.transform import registry\n'), ((630, 666), 'openelex.base.transform.registry.get', 'registry.get', (['"""XX"""', '"""mock_transform"""'], {}), "('XX', 'mock_transform')\n", (642, 666), False, 'from openelex.base.transform import registry\n'), ((870, 893), 'mock.Mock', 'Mock', ([], {'return_value': 'None'}), '(return_value=None)\n', (874, 893), False, 'from mock import Mock\n'), ((954, 1003), 'openelex.base.transform.registry.register', 'registry.register', (['"""XX"""', 'mock_transform'], {'raw': '(True)'}), "('XX', mock_transform, raw=True)\n", (971, 1003), False, 'from openelex.base.transform import registry\n'), ((1024, 1070), 'openelex.base.transform.registry.get', 'registry.get', (['"""XX"""', '"""mock_transform"""'], {'raw': '(True)'}), "('XX', 'mock_transform', raw=True)\n", (1036, 1070), False, 'from openelex.base.transform import registry\n')]
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
from copy import copy
from unittest import TestCase
import numpy as np
import torch
import torch.optim
from torch import nn as nn
from torch.distributions import constraints
import pyro
from tests.common import assert_equal
class ParamStoreDictTests(TestCase):
def setUp(self):
pyro.clear_param_store()
self.linear_module = nn.Linear(3, 2)
self.linear_module2 = nn.Linear(3, 2)
self.linear_module3 = nn.Linear(3, 2)
def test_save_and_load(self):
lin = pyro.module("mymodule", self.linear_module)
pyro.module("mymodule2", self.linear_module2)
x = torch.randn(1, 3)
myparam = pyro.param("myparam", 1.234 * torch.ones(1))
cost = torch.sum(torch.pow(lin(x), 2.0)) * torch.pow(myparam, 4.0)
cost.backward()
params = list(self.linear_module.parameters()) + [myparam]
optim = torch.optim.Adam(params, lr=0.01)
myparam_copy_stale = copy(pyro.param("myparam").detach().cpu().numpy())
optim.step()
myparam_copy = copy(pyro.param("myparam").detach().cpu().numpy())
param_store_params = copy(pyro.get_param_store()._params)
param_store_param_to_name = copy(pyro.get_param_store()._param_to_name)
assert len(list(param_store_params.keys())) == 5
assert len(list(param_store_param_to_name.values())) == 5
pyro.get_param_store().save("paramstore.unittest.out")
pyro.clear_param_store()
assert len(list(pyro.get_param_store()._params)) == 0
assert len(list(pyro.get_param_store()._param_to_name)) == 0
pyro.get_param_store().load("paramstore.unittest.out")
def modules_are_equal():
weights_equal = (
np.sum(
np.fabs(
self.linear_module3.weight.detach().cpu().numpy()
- self.linear_module.weight.detach().cpu().numpy()
)
)
== 0.0
)
bias_equal = (
np.sum(
np.fabs(
self.linear_module3.bias.detach().cpu().numpy()
- self.linear_module.bias.detach().cpu().numpy()
)
)
== 0.0
)
return weights_equal and bias_equal
assert not modules_are_equal()
pyro.module("mymodule", self.linear_module3, update_module_params=False)
assert id(self.linear_module3.weight) != id(pyro.param("mymodule$$$weight"))
assert not modules_are_equal()
pyro.module("mymodule", self.linear_module3, update_module_params=True)
assert id(self.linear_module3.weight) == id(pyro.param("mymodule$$$weight"))
assert modules_are_equal()
myparam = pyro.param("myparam")
store = pyro.get_param_store()
assert myparam_copy_stale != myparam.detach().cpu().numpy()
assert myparam_copy == myparam.detach().cpu().numpy()
assert sorted(param_store_params.keys()) == sorted(store._params.keys())
assert sorted(param_store_param_to_name.values()) == sorted(
store._param_to_name.values()
)
assert sorted(store._params.keys()) == sorted(store._param_to_name.values())
def test_dict_interface():
param_store = pyro.get_param_store()
# start empty
param_store.clear()
assert not param_store
assert len(param_store) == 0
assert "x" not in param_store
assert "y" not in param_store
assert list(param_store.items()) == []
assert list(param_store.keys()) == []
assert list(param_store.values()) == []
# add x
param_store["x"] = torch.zeros(1, 2, 3)
assert param_store
assert len(param_store) == 1
assert "x" in param_store
assert "y" not in param_store
assert list(param_store.keys()) == ["x"]
assert [key for key, value in param_store.items()] == ["x"]
assert len(list(param_store.values())) == 1
assert param_store["x"].shape == (1, 2, 3)
assert_equal(param_store.setdefault("x", torch.ones(1, 2, 3)), torch.zeros(1, 2, 3))
assert param_store["x"].unconstrained() is param_store["x"]
# add y
param_store.setdefault("y", torch.ones(4, 5), constraint=constraints.positive)
assert param_store
assert len(param_store) == 2
assert "x" in param_store
assert "y" in param_store
assert sorted(param_store.keys()) == ["x", "y"]
assert sorted(key for key, value in param_store.items()) == ["x", "y"]
assert len(list(param_store.values())) == 2
assert param_store["x"].shape == (1, 2, 3)
assert param_store["y"].shape == (4, 5)
assert_equal(param_store.setdefault("y", torch.zeros(4, 5)), torch.ones(4, 5))
assert_equal(param_store["y"].unconstrained(), torch.zeros(4, 5))
# remove x
del param_store["x"]
assert param_store
assert len(param_store) == 1
assert "x" not in param_store
assert "y" in param_store
assert list(param_store.keys()) == ["y"]
assert list(key for key, value in param_store.items()) == ["y"]
assert len(list(param_store.values())) == 1
assert param_store["y"].shape == (4, 5)
assert_equal(param_store.setdefault("y", torch.zeros(4, 5)), torch.ones(4, 5))
assert_equal(param_store["y"].unconstrained(), torch.zeros(4, 5))
# remove y
del param_store["y"]
assert not param_store
assert len(param_store) == 0
assert "x" not in param_store
assert "y" not in param_store
assert list(param_store.keys()) == []
assert list(key for key, value in param_store.items()) == []
assert len(list(param_store.values())) == 0
|
[
"torch.ones",
"pyro.get_param_store",
"pyro.param",
"torch.randn",
"pyro.module",
"torch.optim.Adam",
"torch.pow",
"torch.nn.Linear",
"torch.zeros",
"pyro.clear_param_store"
] |
[((3421, 3443), 'pyro.get_param_store', 'pyro.get_param_store', ([], {}), '()\n', (3441, 3443), False, 'import pyro\n'), ((3780, 3800), 'torch.zeros', 'torch.zeros', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (3791, 3800), False, 'import torch\n'), ((383, 407), 'pyro.clear_param_store', 'pyro.clear_param_store', ([], {}), '()\n', (405, 407), False, 'import pyro\n'), ((437, 452), 'torch.nn.Linear', 'nn.Linear', (['(3)', '(2)'], {}), '(3, 2)\n', (446, 452), True, 'from torch import nn as nn\n'), ((483, 498), 'torch.nn.Linear', 'nn.Linear', (['(3)', '(2)'], {}), '(3, 2)\n', (492, 498), True, 'from torch import nn as nn\n'), ((529, 544), 'torch.nn.Linear', 'nn.Linear', (['(3)', '(2)'], {}), '(3, 2)\n', (538, 544), True, 'from torch import nn as nn\n'), ((594, 637), 'pyro.module', 'pyro.module', (['"""mymodule"""', 'self.linear_module'], {}), "('mymodule', self.linear_module)\n", (605, 637), False, 'import pyro\n'), ((646, 691), 'pyro.module', 'pyro.module', (['"""mymodule2"""', 'self.linear_module2'], {}), "('mymodule2', self.linear_module2)\n", (657, 691), False, 'import pyro\n'), ((704, 721), 'torch.randn', 'torch.randn', (['(1)', '(3)'], {}), '(1, 3)\n', (715, 721), False, 'import torch\n'), ((968, 1001), 'torch.optim.Adam', 'torch.optim.Adam', (['params'], {'lr': '(0.01)'}), '(params, lr=0.01)\n', (984, 1001), False, 'import torch\n'), ((1520, 1544), 'pyro.clear_param_store', 'pyro.clear_param_store', ([], {}), '()\n', (1542, 1544), False, 'import pyro\n'), ((2480, 2552), 'pyro.module', 'pyro.module', (['"""mymodule"""', 'self.linear_module3'], {'update_module_params': '(False)'}), "('mymodule', self.linear_module3, update_module_params=False)\n", (2491, 2552), False, 'import pyro\n'), ((2685, 2756), 'pyro.module', 'pyro.module', (['"""mymodule"""', 'self.linear_module3'], {'update_module_params': '(True)'}), "('mymodule', self.linear_module3, update_module_params=True)\n", (2696, 2756), False, 'import pyro\n'), ((2896, 2917), 'pyro.param', 'pyro.param', (['"""myparam"""'], {}), "('myparam')\n", (2906, 2917), False, 'import pyro\n'), ((2934, 2956), 'pyro.get_param_store', 'pyro.get_param_store', ([], {}), '()\n', (2954, 2956), False, 'import pyro\n'), ((4192, 4212), 'torch.zeros', 'torch.zeros', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (4203, 4212), False, 'import torch\n'), ((4323, 4339), 'torch.ones', 'torch.ones', (['(4)', '(5)'], {}), '(4, 5)\n', (4333, 4339), False, 'import torch\n'), ((4821, 4837), 'torch.ones', 'torch.ones', (['(4)', '(5)'], {}), '(4, 5)\n', (4831, 4837), False, 'import torch\n'), ((4890, 4907), 'torch.zeros', 'torch.zeros', (['(4)', '(5)'], {}), '(4, 5)\n', (4901, 4907), False, 'import torch\n'), ((5340, 5356), 'torch.ones', 'torch.ones', (['(4)', '(5)'], {}), '(4, 5)\n', (5350, 5356), False, 'import torch\n'), ((5409, 5426), 'torch.zeros', 'torch.zeros', (['(4)', '(5)'], {}), '(4, 5)\n', (5420, 5426), False, 'import torch\n'), ((837, 860), 'torch.pow', 'torch.pow', (['myparam', '(4.0)'], {}), '(myparam, 4.0)\n', (846, 860), False, 'import torch\n'), ((4170, 4189), 'torch.ones', 'torch.ones', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (4180, 4189), False, 'import torch\n'), ((4801, 4818), 'torch.zeros', 'torch.zeros', (['(4)', '(5)'], {}), '(4, 5)\n', (4812, 4818), False, 'import torch\n'), ((5320, 5337), 'torch.zeros', 'torch.zeros', (['(4)', '(5)'], {}), '(4, 5)\n', (5331, 5337), False, 'import torch\n'), ((770, 783), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (780, 783), False, 'import torch\n'), ((1213, 1235), 'pyro.get_param_store', 'pyro.get_param_store', ([], {}), '()\n', (1233, 1235), False, 'import pyro\n'), ((1286, 1308), 'pyro.get_param_store', 'pyro.get_param_store', ([], {}), '()\n', (1306, 1308), False, 'import pyro\n'), ((1457, 1479), 'pyro.get_param_store', 'pyro.get_param_store', ([], {}), '()\n', (1477, 1479), False, 'import pyro\n'), ((1684, 1706), 'pyro.get_param_store', 'pyro.get_param_store', ([], {}), '()\n', (1704, 1706), False, 'import pyro\n'), ((2605, 2636), 'pyro.param', 'pyro.param', (['"""mymodule$$$weight"""'], {}), "('mymodule$$$weight')\n", (2615, 2636), False, 'import pyro\n'), ((2809, 2840), 'pyro.param', 'pyro.param', (['"""mymodule$$$weight"""'], {}), "('mymodule$$$weight')\n", (2819, 2840), False, 'import pyro\n'), ((1569, 1591), 'pyro.get_param_store', 'pyro.get_param_store', ([], {}), '()\n', (1589, 1591), False, 'import pyro\n'), ((1631, 1653), 'pyro.get_param_store', 'pyro.get_param_store', ([], {}), '()\n', (1651, 1653), False, 'import pyro\n'), ((1036, 1057), 'pyro.param', 'pyro.param', (['"""myparam"""'], {}), "('myparam')\n", (1046, 1057), False, 'import pyro\n'), ((1133, 1154), 'pyro.param', 'pyro.param', (['"""myparam"""'], {}), "('myparam')\n", (1143, 1154), False, 'import pyro\n')]
|
import os, cv2
import torch
from torch import nn
import numpy as np
def weights_path(_file_, _root_num, dirname):
basepath = os.path.dirname(_file_)
backs = [".."]*_root_num
model_dir = os.path.abspath(os.path.join(basepath, *backs, dirname))
return model_dir
def _check_ins(name, val, cls, allow_none=False, default=None):
if allow_none and val is None:
return default
if not isinstance(val, cls):
err = 'Argument \'{}\' must be {}, but got {}'
if isinstance(cls, (tuple, list)):
types = [c.__name__ for c in cls]
err = err.format(name, types, type(val).__name__)
raise ValueError(err)
else:
err = err.format(name, cls.__name__, type(val).__name__)
raise ValueError(err)
return val
def _check_retval(funcname, val, cls):
if not isinstance(val, cls):
err = '\'{}\' must return {}, but got {}'
if isinstance(cls, (tuple, list)):
types = [c.__name__ for c in cls]
err = err.format(funcname, types, type(val).__name__)
raise ValueError(err)
else:
err = err.format(funcname, cls.__name__, type(val).__name__)
raise ValueError(err)
return val
def _check_norm(name, val):
if isinstance(val, (float, int)):
val = torch.tensor([float(val)], requires_grad=False)
elif isinstance(val, (list, tuple)):
val = torch.tensor(val, requires_grad=False).float()
elif not isinstance(val, torch.Tensor):
raise ValueError('{} must be int, float, list, tuple, Tensor, but got {}'.format(name, type(val).__name__))
return val
def _initialize_xavier_uniform(layers):
from .models.layers import ConvRelu
for module in layers.modules():
if isinstance(module, nn.Conv2d):
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
nn.init.constant_(module.bias, 0)
elif isinstance(module, ConvRelu):
nn.init.xavier_uniform_(module.conv.weight)
if module.conv.bias is not None:
nn.init.constant_(module.conv.bias, 0)
def _get_model_url(name):
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
return model_urls[name]
def _check_image(image, device, size=None):
"""
:param image: ndarray or Tensor of list or tuple, or ndarray, or Tensor. Note that each type will be handled as;
ndarray of list or tuple, ndarray: (?, h, w, c). channel order will be handled as RGB
Tensor of list or tuple, Tensor: (?, c, h, w). channel order will be handled as RGB
:param device: torch.device
:param size: None or tuple, if None is passed, check will not be done
Note that size = (w, h)
:return:
img: Tensor, shape = (b, c, h, w)
orig_imgs: list of Tensor, shape = (c, h, w) these images may be used for visualization
"""
orig_imgs = []
def __check(_tim, _cim, cfirst):
"""
Note that 2d or 3d image is resizable
:param _tim: tensor, shape = (h, w, ?) or (?, h, w)
:param _cim: ndarray, shape = (h, w, ?) or (?, h, w)
:return:
tims: tensor, shape = (c, h, w)
cims: ndarray, shape = (h, w, c)
"""
#### check size of tensor ####
if size:
h, w = _tim.shape[-2:] if cfirst else _tim.shape[:2]
wcond = size[0] if size[0] is not None else w
hcond = size[1] if size[1] is not None else h
if not (h == hcond and w == wcond):
# do resize
if cfirst and _cim.ndim == 3:
# note that _cim's shape must be (c, h, w)
_cim = _cim.transpose((1, 2, 0))
# _cim's shape = (h, w, ?)
resized_cim = cv2.resize(_cim, (wcond, hcond))
return __check(torch.tensor(resized_cim, requires_grad=False), _cim, cfirst=False)
#### check tensor ####
assert isinstance(_tim, torch.Tensor)
if _tim.ndim == 2:
tim = _tim.unsqueeze(2)
elif _tim.ndim == 3:
tim = _tim
else:
raise ValueError('Invalid image found. image must be 2d or 3d, but got {}'.format(_tim.ndim))
if not cfirst:
# note that tim's shape must be (h, w, c)
tim = tim.permute((2, 0, 1))
#### check cvimg ####
assert isinstance(_cim, np.ndarray)
if _cim.ndim == 2:
cim = np.broadcast_to(np.expand_dims(_cim, 2), (_cim.shape[0], _cim.shape[1], 3)).copy()
elif _cim.ndim == 3:
cim = _cim
else:
raise ValueError('Invalid image found. image must be 2d or 3d, but got {}'.format(_cim.ndim))
if cfirst:
# note that cim's shape must be (c, h, w)
cim = cim.transpose((1, 2, 0))
return tim, cim
if isinstance(image, (list, tuple)):
img = []
for im in image:
if isinstance(im, np.ndarray):
tim = torch.tensor(im, requires_grad=False)
# im and tim's shape = (h, w, ?)
tim, cim = __check(tim, im, cfirst=False)
elif isinstance(im, torch.Tensor):
cim = im.cpu().numpy()
# im and tim's shape = (?, h, w)
tim, cim = __check(im, cim, cfirst=True)
else:
raise ValueError('Invalid image type. list or tuple\'s element must be ndarray, but got \'{}\''.format(type(im).__name__))
img += [tim]
orig_imgs += [cim]
# (b, c, h, w)
img = torch.stack(img)
elif isinstance(image, np.ndarray):
if image.ndim == 2:
tim, cim = __check(torch.tensor(image, requires_grad=False), image, cfirst=False)
img = tim.unsqueeze(0)
orig_imgs += [cim]
elif image.ndim == 3:
tim, cim = __check(torch.tensor(image, requires_grad=False), image, cfirst=False)
img = tim.unsqueeze(0)
orig_imgs += [cim]
elif image.ndim == 4:
img = []
for i in range(image.shape[0]):
tim, cim = __check(torch.tensor(image[i], requires_grad=False), image[i], cfirst=False)
img += [tim]
orig_imgs += [cim]
img = torch.stack(img)
else:
raise ValueError('Invalid image found. image must be from 2d to 4d, but got {}'.format(image.ndim))
elif isinstance(image, torch.Tensor):
if image.ndim == 2:
tim, cim = __check(image, image.cpu().numpy(), cfirst=True)
img = tim.unsqueeze(0)
orig_imgs += [cim]
elif image.ndim == 3:
tim, cim = __check(image, image.cpu().numpy(), cfirst=True)
img = tim.unsqueeze(0)
orig_imgs += [cim]
elif image.ndim == 4:
img = []
for i in range(image.shape[0]):
tim, cim = __check(image[i], image[i].cpu().numpy(), cfirst=True)
img += [tim]
orig_imgs += [cim]
img = torch.stack(img)
else:
raise ValueError('Invalid image found. image must be from 2d to 4d, but got {}'.format(image.ndim))
else:
raise ValueError('Invalid image type. list or tuple\'s element must be'
'\'list\', \'tuple\', \'ndarray\' or \'Tensor\', but got \'{}\''.format(type(image).__name__))
assert img.ndim == 4, "may forget checking..."
return img.to(device), orig_imgs
def _check_shape(desired_shape, input_shape):
"""
Note that desired_shape is allowed to have None, which means whatever input size is ok
:param desired_shape: array-like
:param input_shape: array-like
:return:
"""
if len(desired_shape) != len(input_shape):
raise ValueError("shape dim was not same, got {} and {}".format(len(desired_shape), len(input_shape)))
for i, (des_d, inp_d) in enumerate(zip(desired_shape, input_shape)):
if des_d is None:
continue
if des_d != inp_d:
raise ValueError('dim:{} is invalid size, desired one: {}, but got {}'.format(i, des_d, inp_d))
def _get_normed_and_origin_img(img, orig_imgs, rgb_means, rgb_stds, toNorm, device):
"""
:param img: Tensor, shape = (b, c, h, w)
:param orig_imgs: list of ndarray, shape = (h, w, c)
:param rgb_means: tuple or float
:param rgb_stds: tuple or float
:param toNorm: Bool
:param device: torch.device
:return:
normed_img: Tensor, shape = (b, c, h, w)
orig_img: Tensor, shape = (b, c, h, w). Order is rgb
"""
rgb_means = _check_norm('rgb_means', rgb_means)
rgb_stds = _check_norm('rgb_stds', rgb_stds)
img = img.to(device)
if toNorm:
# shape = (1, 3, 1, 1)
rgb_means = rgb_means.unsqueeze(0).unsqueeze(-1).unsqueeze(-1).to(device)
rgb_stds = rgb_stds.unsqueeze(0).unsqueeze(-1).unsqueeze(-1).to(device)
normed_img = (img / 255. - rgb_means) / rgb_stds
orig_imgs = orig_imgs
else:
normed_img = img
# shape = (1, 1, 3)
rgb_means = rgb_means.unsqueeze(0).unsqueeze(0).cpu().numpy()
rgb_stds = rgb_stds.unsqueeze(0).unsqueeze(0).cpu().numpy()
orig_imgs = [oim * rgb_stds + rgb_means for oim in orig_imgs]
return normed_img, orig_imgs
|
[
"torch.stack",
"torch.nn.init.xavier_uniform_",
"os.path.dirname",
"numpy.expand_dims",
"torch.nn.init.constant_",
"torch.tensor",
"os.path.join",
"cv2.resize"
] |
[((130, 153), 'os.path.dirname', 'os.path.dirname', (['_file_'], {}), '(_file_)\n', (145, 153), False, 'import os, cv2\n'), ((215, 254), 'os.path.join', 'os.path.join', (['basepath', '*backs', 'dirname'], {}), '(basepath, *backs, dirname)\n', (227, 254), False, 'import os, cv2\n'), ((6262, 6278), 'torch.stack', 'torch.stack', (['img'], {}), '(img)\n', (6273, 6278), False, 'import torch\n'), ((1835, 1873), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['module.weight'], {}), '(module.weight)\n', (1858, 1873), False, 'from torch import nn\n'), ((1930, 1963), 'torch.nn.init.constant_', 'nn.init.constant_', (['module.bias', '(0)'], {}), '(module.bias, 0)\n', (1947, 1963), False, 'from torch import nn\n'), ((2019, 2062), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['module.conv.weight'], {}), '(module.conv.weight)\n', (2042, 2062), False, 'from torch import nn\n'), ((4441, 4473), 'cv2.resize', 'cv2.resize', (['_cim', '(wcond, hcond)'], {}), '(_cim, (wcond, hcond))\n', (4451, 4473), False, 'import os, cv2\n'), ((5672, 5709), 'torch.tensor', 'torch.tensor', (['im'], {'requires_grad': '(False)'}), '(im, requires_grad=False)\n', (5684, 5709), False, 'import torch\n'), ((1440, 1478), 'torch.tensor', 'torch.tensor', (['val'], {'requires_grad': '(False)'}), '(val, requires_grad=False)\n', (1452, 1478), False, 'import torch\n'), ((2124, 2162), 'torch.nn.init.constant_', 'nn.init.constant_', (['module.conv.bias', '(0)'], {}), '(module.conv.bias, 0)\n', (2141, 2162), False, 'from torch import nn\n'), ((4505, 4551), 'torch.tensor', 'torch.tensor', (['resized_cim'], {'requires_grad': '(False)'}), '(resized_cim, requires_grad=False)\n', (4517, 4551), False, 'import torch\n'), ((6379, 6419), 'torch.tensor', 'torch.tensor', (['image'], {'requires_grad': '(False)'}), '(image, requires_grad=False)\n', (6391, 6419), False, 'import torch\n'), ((5141, 5164), 'numpy.expand_dims', 'np.expand_dims', (['_cim', '(2)'], {}), '(_cim, 2)\n', (5155, 5164), True, 'import numpy as np\n'), ((6569, 6609), 'torch.tensor', 'torch.tensor', (['image'], {'requires_grad': '(False)'}), '(image, requires_grad=False)\n', (6581, 6609), False, 'import torch\n'), ((6979, 6995), 'torch.stack', 'torch.stack', (['img'], {}), '(img)\n', (6990, 6995), False, 'import torch\n'), ((7758, 7774), 'torch.stack', 'torch.stack', (['img'], {}), '(img)\n', (7769, 7774), False, 'import torch\n'), ((6828, 6871), 'torch.tensor', 'torch.tensor', (['image[i]'], {'requires_grad': '(False)'}), '(image[i], requires_grad=False)\n', (6840, 6871), False, 'import torch\n')]
|
from django.db import models
class OverallTotals(models.Model):
id = models.AutoField(primary_key=True)
create_date = models.DateTimeField(auto_now_add=True, blank=True, null=True)
update_date = models.DateTimeField(auto_now=True, null=True)
fiscal_year = models.IntegerField(blank=True, null=True)
total_budget_authority = models.DecimalField(max_digits=23, decimal_places=2, blank=True, null=True)
class Meta:
managed = True
db_table = "overall_totals"
|
[
"django.db.models.DecimalField",
"django.db.models.DateTimeField",
"django.db.models.IntegerField",
"django.db.models.AutoField"
] |
[((75, 109), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)'}), '(primary_key=True)\n', (91, 109), False, 'from django.db import models\n'), ((128, 190), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'blank': '(True)', 'null': '(True)'}), '(auto_now_add=True, blank=True, null=True)\n', (148, 190), False, 'from django.db import models\n'), ((209, 255), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'null': '(True)'}), '(auto_now=True, null=True)\n', (229, 255), False, 'from django.db import models\n'), ((274, 316), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (293, 316), False, 'from django.db import models\n'), ((346, 421), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(23)', 'decimal_places': '(2)', 'blank': '(True)', 'null': '(True)'}), '(max_digits=23, decimal_places=2, blank=True, null=True)\n', (365, 421), False, 'from django.db import models\n')]
|
from __future__ import print_function
# The following comments couldn't be translated into the new config version:
#! /bin/env cmsRun
import FWCore.ParameterSet.Config as cms
process = cms.Process("validation")
import FWCore.ParameterSet.VarParsing as VarParsing
options = VarParsing.VarParsing ('analysis')
# load the full reconstraction configuration, to make sure we're getting all needed dependencies
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("Configuration.StandardSequences.Reconstruction_cff")
options.register ('jets',
"ak4PFJetsCHS", # default value, examples : "ak4PFJets", "ak4PFJetsCHS"
VarParsing.VarParsing.multiplicity.singleton,
VarParsing.VarParsing.varType.string,
"jet collection to use")
options.parseArguments()
whichJets = options.jets
applyJEC = True
corrLabel = "ak4PFCHS"
from Configuration.AlCa.GlobalTag import GlobalTag
tag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '')
useTrigger = False
triggerPath = "HLT_PFJet80_v*"
runOnMC = True
#Flavour plots for MC: "all" = plots for all jets ; "dusg" = plots for d, u, s, dus, g independently ; not mandatory and any combinations are possible
#b, c, light (dusg), non-identified (NI), PU jets plots are always produced
flavPlots = "allbcldusg"
###prints###
print("jet collcetion asked : ", whichJets)
print("JEC applied?", applyJEC, ", correction:", corrLabel)
print("trigger will be used ? : ", useTrigger, ", Trigger paths:", triggerPath)
print("is it MC ? : ", runOnMC, ", Flavours:", flavPlots)
print("Global Tag : ", tag.globaltag)
############
process.load("DQMServices.Components.DQMEnvironment_cfi")
process.load("DQMServices.Core.DQM_cfg")
process.load("JetMETCorrections.Configuration.JetCorrectors_cff")
process.load("CommonTools.ParticleFlow.goodOfflinePrimaryVertices_cfi")
process.load("RecoJets.JetAssociationProducers.ak4JTA_cff")
process.load("RecoBTag.Configuration.RecoBTag_cff")
process.load("PhysicsTools.JetMCAlgos.HadronAndPartonSelector_cfi")
process.load("PhysicsTools.JetMCAlgos.AK4PFJetsMCFlavourInfos_cfi")
process.load("PhysicsTools.JetMCAlgos.CaloJetsMCFlavour_cfi")
process.load("PhysicsTools.PatAlgos.mcMatchLayer0.jetMatch_cfi")
process.JECseq = cms.Sequence(getattr(process,corrLabel+"L1FastL2L3CorrectorChain"))
newjetID=cms.InputTag(whichJets)
process.ak4JetFlavourInfos.jets = newjetID
process.ak4JetFlavourInfos.hadronFlavourHasPriority = cms.bool(True)
process.AK4byRef.jets = newjetID
if not "ak4PFJetsCHS" in whichJets:
process.ak4JetTracksAssociatorAtVertexPF.jets = newjetID
process.pfImpactParameterTagInfos.jets = newjetID
process.softPFMuonsTagInfos.jets = newjetID
process.softPFElectronsTagInfos.jets = newjetID
process.patJetGenJetMatch.src = newjetID
process.btagSequence = cms.Sequence(
process.ak4JetTracksAssociatorAtVertexPF *
process.btagging
)
process.jetSequences = cms.Sequence(process.goodOfflinePrimaryVertices * process.btagSequence)
###
print("inputTag : ", process.ak4JetTracksAssociatorAtVertexPF.jets)
###
if runOnMC:
process.flavourSeq = cms.Sequence(
process.selectedHadronsAndPartons *
process.ak4JetFlavourInfos
)
process.load("Validation.RecoB.bTagAnalysis_cfi")
process.bTagValidation.jetMCSrc = 'ak4JetFlavourInfos'
if "Calo" in whichJets:
process.bTagValidation.caloJetMCSrc = 'AK4byValAlgo'
process.bTagValidation.useOldFlavourTool = True
process.flavourSeq = cms.Sequence(
process.myPartons *
process.AK4Flavour
)
process.bTagValidation.applyPtHatWeight = False
process.bTagValidation.doJetID = True
process.bTagValidation.doJEC = applyJEC
process.bTagValidation.JECsourceMC = cms.InputTag(corrLabel+"L1FastL2L3Corrector")
process.bTagValidation.flavPlots = flavPlots
process.bTagHarvestMC.flavPlots = flavPlots
#process.bTagValidation.ptRecJetMin = cms.double(20.)
process.bTagValidation.genJetsMatched = cms.InputTag("patJetGenJetMatch")
process.bTagValidation.doPUid = cms.bool(True)
process.ak4GenJetsForPUid = cms.EDFilter("GenJetSelector",
src = cms.InputTag("ak4GenJets"),
cut = cms.string('pt > 8.'),
filter = cms.bool(False)
)
process.patJetGenJetMatch.matched = cms.InputTag("ak4GenJetsForPUid")
process.patJetGenJetMatch.maxDeltaR = cms.double(0.25)
process.patJetGenJetMatch.resolveAmbiguities = cms.bool(True)
else:
process.load("DQMOffline.RecoB.bTagAnalysisData_cfi")
process.bTagAnalysis.doJEC = applyJEC
process.bTagAnalysis.JECsourceData = cms.InputTag(corrLabel+"L1FastL2L3ResidualCorrector")
process.JECseq *= (getattr(process,corrLabel+"ResidualCorrector") * getattr(process,corrLabel+"L1FastL2L3ResidualCorrector"))
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring()
)
from HLTrigger.HLTfilters.hltHighLevel_cfi import *
if useTrigger:
process.bTagHLT = hltHighLevel.clone(TriggerResultsTag = "TriggerResults::HLT", HLTPaths = ["HLT_PFJet40_v*"])
process.bTagHLT.HLTPaths = [triggerPath]
if runOnMC:
process.dqmSeq = cms.Sequence(process.ak4GenJetsForPUid * process.patJetGenJetMatch * process.flavourSeq * process.bTagValidation * process.bTagHarvestMC * process.dqmSaver)
else:
process.dqmSeq = cms.Sequence(process.bTagAnalysis * process.bTagHarvest * process.dqmSaver)
if useTrigger:
process.plots = cms.Path(process.bTagHLT * process.JECseq * process.jetSequences * process.dqmSeq)
else:
process.plots = cms.Path(process.JECseq * process.jetSequences * process.dqmSeq)
process.dqmEnv.subSystemFolder = 'BTAG'
process.dqmSaver.producer = 'DQM'
process.dqmSaver.workflow = '/POG/BTAG/BJET'
process.dqmSaver.convention = 'Offline'
process.dqmSaver.saveByRun = cms.untracked.int32(-1)
process.dqmSaver.saveAtJobEnd =cms.untracked.bool(True)
process.dqmSaver.forceRunNumber = cms.untracked.int32(1)
process.PoolSource.fileNames = [
]
#keep the logging output to a nice level
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 100
process.GlobalTag = tag
|
[
"FWCore.ParameterSet.Config.string",
"FWCore.ParameterSet.Config.untracked.int32",
"FWCore.ParameterSet.Config.double",
"FWCore.ParameterSet.Config.Sequence",
"FWCore.ParameterSet.Config.untracked.vstring",
"Configuration.AlCa.GlobalTag.GlobalTag",
"FWCore.ParameterSet.Config.untracked.bool",
"FWCore.ParameterSet.Config.Process",
"FWCore.ParameterSet.VarParsing.VarParsing",
"FWCore.ParameterSet.Config.bool",
"FWCore.ParameterSet.Config.InputTag",
"FWCore.ParameterSet.Config.Path"
] |
[((186, 211), 'FWCore.ParameterSet.Config.Process', 'cms.Process', (['"""validation"""'], {}), "('validation')\n", (197, 211), True, 'import FWCore.ParameterSet.Config as cms\n'), ((275, 308), 'FWCore.ParameterSet.VarParsing.VarParsing', 'VarParsing.VarParsing', (['"""analysis"""'], {}), "('analysis')\n", (296, 308), True, 'import FWCore.ParameterSet.VarParsing as VarParsing\n'), ((1121, 1169), 'Configuration.AlCa.GlobalTag.GlobalTag', 'GlobalTag', (['process.GlobalTag', '"""auto:run2_mc"""', '""""""'], {}), "(process.GlobalTag, 'auto:run2_mc', '')\n", (1130, 1169), False, 'from Configuration.AlCa.GlobalTag import GlobalTag\n'), ((2544, 2567), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['whichJets'], {}), '(whichJets)\n', (2556, 2567), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2679, 2693), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(True)'], {}), '(True)\n', (2687, 2693), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3116, 3189), 'FWCore.ParameterSet.Config.Sequence', 'cms.Sequence', (['(process.ak4JetTracksAssociatorAtVertexPF * process.btagging)'], {}), '(process.ak4JetTracksAssociatorAtVertexPF * process.btagging)\n', (3128, 3189), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3227, 3298), 'FWCore.ParameterSet.Config.Sequence', 'cms.Sequence', (['(process.goodOfflinePrimaryVertices * process.btagSequence)'], {}), '(process.goodOfflinePrimaryVertices * process.btagSequence)\n', (3239, 3298), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6357, 6380), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(-1)'], {}), '(-1)\n', (6376, 6380), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6412, 6436), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (6430, 6436), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6472, 6494), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(1)'], {}), '(1)\n', (6491, 6494), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3414, 3490), 'FWCore.ParameterSet.Config.Sequence', 'cms.Sequence', (['(process.selectedHadronsAndPartons * process.ak4JetFlavourInfos)'], {}), '(process.selectedHadronsAndPartons * process.ak4JetFlavourInfos)\n', (3426, 3490), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4074, 4121), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (["(corrLabel + 'L1FastL2L3Corrector')"], {}), "(corrLabel + 'L1FastL2L3Corrector')\n", (4086, 4121), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4319, 4352), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""patJetGenJetMatch"""'], {}), "('patJetGenJetMatch')\n", (4331, 4352), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4389, 4403), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(True)'], {}), '(True)\n', (4397, 4403), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4777, 4810), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""ak4GenJetsForPUid"""'], {}), "('ak4GenJetsForPUid')\n", (4789, 4810), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4853, 4869), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.25)'], {}), '(0.25)\n', (4863, 4869), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4921, 4935), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(True)'], {}), '(True)\n', (4929, 4935), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5083, 5138), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (["(corrLabel + 'L1FastL2L3ResidualCorrector')"], {}), "(corrLabel + 'L1FastL2L3ResidualCorrector')\n", (5095, 5138), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5694, 5858), 'FWCore.ParameterSet.Config.Sequence', 'cms.Sequence', (['(process.ak4GenJetsForPUid * process.patJetGenJetMatch * process.flavourSeq *\n process.bTagValidation * process.bTagHarvestMC * process.dqmSaver)'], {}), '(process.ak4GenJetsForPUid * process.patJetGenJetMatch *\n process.flavourSeq * process.bTagValidation * process.bTagHarvestMC *\n process.dqmSaver)\n', (5706, 5858), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5878, 5953), 'FWCore.ParameterSet.Config.Sequence', 'cms.Sequence', (['(process.bTagAnalysis * process.bTagHarvest * process.dqmSaver)'], {}), '(process.bTagAnalysis * process.bTagHarvest * process.dqmSaver)\n', (5890, 5953), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5990, 6077), 'FWCore.ParameterSet.Config.Path', 'cms.Path', (['(process.bTagHLT * process.JECseq * process.jetSequences * process.dqmSeq)'], {}), '(process.bTagHLT * process.JECseq * process.jetSequences * process.\n dqmSeq)\n', (5998, 6077), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6099, 6163), 'FWCore.ParameterSet.Config.Path', 'cms.Path', (['(process.JECseq * process.jetSequences * process.dqmSeq)'], {}), '(process.JECseq * process.jetSequences * process.dqmSeq)\n', (6107, 6163), True, 'import FWCore.ParameterSet.Config as cms\n'), ((3804, 3856), 'FWCore.ParameterSet.Config.Sequence', 'cms.Sequence', (['(process.myPartons * process.AK4Flavour)'], {}), '(process.myPartons * process.AK4Flavour)\n', (3816, 3856), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5320, 5343), 'FWCore.ParameterSet.Config.untracked.int32', 'cms.untracked.int32', (['(-1)'], {}), '(-1)\n', (5339, 5343), True, 'import FWCore.ParameterSet.Config as cms\n'), ((5404, 5427), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', ([], {}), '()\n', (5425, 5427), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4518, 4544), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['"""ak4GenJets"""'], {}), "('ak4GenJets')\n", (4530, 4544), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4597, 4618), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""pt > 8."""'], {}), "('pt > 8.')\n", (4607, 4618), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4674, 4689), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(False)'], {}), '(False)\n', (4682, 4689), True, 'import FWCore.ParameterSet.Config as cms\n')]
|
import sentencepiece as spm
import argparse
def main():
print("ciao")
parser = argparse.ArgumentParser()
parser.add_argument("-input", "--input", type=str, default="data/train.txt",
help="tokenizer input file")
parser.add_argument("-model_prefix", "--model_prefix", type=str, default="m",
help="prefix for the model")
parser.add_argument("-vocab_size", "--vocab_size", type=int, default=32000,
help="the size of the vocabulary")
parser.add_argument("-character_coverage", "--character_coverage", type=float, default=0.995,
help="amount of characters covered by the model, good defaults are: 0.9995 for languages with rich character set like Japanse or Chinese and 1.0 for other languages with small character set")
parser.add_argument("-bos_id", "--bos_id", type=int, default=-1,
help="begin of sentence id")
parser.add_argument("-eos_id", "--eos_id", type=int, default=1,
help="end of sentence id")
parser.add_argument("-unk_id", "--unk_id", type=int, default=2,
help="unknown id")
parser.add_argument("-pad_id", "--pad_id", type=int, default=0,
help="padding id")
args = parser.parse_args()
# spm.SentencePieceTrainer.train('--input=train_pretraining_clean.txt --model_prefix=dl4se --vocab_size=32000 --bos_id=-1 --eos_id=1 --unk_id=2 --pad_id=0')
spm.SentencePieceTrainer.train(input=args.input, model_prefix=args.model_prefix, vocab_size=args.vocab_size, character_coverage=args.character_coverage,
bos_id=args.bos_id, eos_id=args.eos_id, unk_id=args.unk_id, pad_id=args.pad_id)
if __name__=="__main__":
main()
|
[
"sentencepiece.SentencePieceTrainer.train",
"argparse.ArgumentParser"
] |
[((90, 115), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (113, 115), False, 'import argparse\n'), ((1507, 1754), 'sentencepiece.SentencePieceTrainer.train', 'spm.SentencePieceTrainer.train', ([], {'input': 'args.input', 'model_prefix': 'args.model_prefix', 'vocab_size': 'args.vocab_size', 'character_coverage': 'args.character_coverage', 'bos_id': 'args.bos_id', 'eos_id': 'args.eos_id', 'unk_id': 'args.unk_id', 'pad_id': 'args.pad_id'}), '(input=args.input, model_prefix=args.\n model_prefix, vocab_size=args.vocab_size, character_coverage=args.\n character_coverage, bos_id=args.bos_id, eos_id=args.eos_id, unk_id=args\n .unk_id, pad_id=args.pad_id)\n', (1537, 1754), True, 'import sentencepiece as spm\n')]
|
import pandas as pd
from datetime import date, timedelta
from django.conf import settings
from django.core.exceptions import ValidationError
from django.test import TestCase
from research.models.choices import DominantHand, Sex, Gender
from ..factories import SubjectFactory
class SubjectModelTestCase(TestCase):
def setUp(self):
self.test_subject = SubjectFactory()
self.test_subject.save()
df = pd.read_excel(
settings.RAW_SUBJECT_TABLE_PATH,
sheet_name="Subjects",
header=[0, 1],
index_col=0,
)
subject_details = {
("Anonymized", "Patient ID"): "ABC123",
("Anonymized", "First Name"): "Noam",
("Anonymized", "Last Name"): "Aharony",
("Raw", "Patient ID"): "11111",
("Raw", "First Name"): "Name",
("Raw", "Last Name"): "Last",
}
for item in subject_details:
df[item].iloc[0] = subject_details[item]
def test_not_future_birthdate_validator(self):
self.test_subject.date_of_birth = date.today() + timedelta(days=1)
with self.assertRaises(ValidationError):
self.test_subject.full_clean()
def test_null_char_field(self):
subject_one = SubjectFactory(id_number=None)
subject_one.save()
subject_two = SubjectFactory(id_number=None)
subject_two.save()
self.assertIsNone(subject_one.id_number)
self.assertIsNone(subject_two.id_number)
def test_dominant_hand_choices(self):
for choice in DominantHand:
self.test_subject.dominant_hand = choice.name
try:
self.test_subject.full_clean()
except ValidationError:
self.fail(f"Failed to set dominant hand to {choice.value}")
def test_invalid_dominant_hand_choice(self):
self.test_subject.dominant_hand = "Right"
with self.assertRaises(ValidationError):
self.test_subject.full_clean()
def test_sex_choices(self):
for choice in Sex:
self.test_subject.sex = choice.name
try:
self.test_subject.full_clean()
except ValidationError:
self.fail(f"Failed to set sex to {choice.value}")
def test_invalid_sex_choice(self):
self.test_subject.sex = "Z"
with self.assertRaises(ValidationError):
self.test_subject.full_clean()
def test_gender_choices(self):
for choice in Gender:
self.test_subject.gender = choice.name
try:
self.test_subject.full_clean()
except ValidationError:
self.fail(f"Failed to set gender to {choice.value}")
def test_invalid_gender_choice(self):
self.test_subject.gender = "Z"
with self.assertRaises(ValidationError):
self.test_subject.full_clean()
def test_get_full_name(self):
s = self.test_subject
expected = f"{s.first_name} {s.last_name}"
self.assertEqual(self.test_subject.get_full_name(), expected)
def test_str(self):
subject_id = self.test_subject.id
expected = f"Subject #{subject_id}"
self.assertEqual(str(self.test_subject), expected)
def test_get_personal_information(self):
# @TODO: Finish the personal information test.
# result = self.test_subject.get_personal_information()
# result = result[[item for item in ]]
# excpected = {
# ("Anonymized", "Patient ID"): "ABC123",
# ("Anonymized", "First Name"): "Noam",
# ("Anonymized", "Last Name"): "Aharony",
# ("Raw", "Patient ID"): "11111",
# ("Raw", "First Name"): "Name",
# ("Raw", "Last Name"): "Last",
# }
pass
def test_get_raw_information(self):
pass
def test_get_questionnaire_data(self):
pass
|
[
"pandas.read_excel",
"datetime.timedelta",
"datetime.date.today"
] |
[((429, 530), 'pandas.read_excel', 'pd.read_excel', (['settings.RAW_SUBJECT_TABLE_PATH'], {'sheet_name': '"""Subjects"""', 'header': '[0, 1]', 'index_col': '(0)'}), "(settings.RAW_SUBJECT_TABLE_PATH, sheet_name='Subjects',\n header=[0, 1], index_col=0)\n", (442, 530), True, 'import pandas as pd\n'), ((1093, 1105), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1103, 1105), False, 'from datetime import date, timedelta\n'), ((1108, 1125), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1117, 1125), False, 'from datetime import date, timedelta\n')]
|
import pandas as pd
filename = "dictionary.csv"
df_vocab = pd.read_csv(filename)
print("Duplicates:")
df_duplicated = df_vocab[df_vocab[["Chinese", "PinYin"]].duplicated(keep=False)][["Chinese", "PinYin"]]
if df_duplicated.shape[0] == 0:
print("===== [OK] No duplicates found =====")
else:
print(df_duplicated)
|
[
"pandas.read_csv"
] |
[((60, 81), 'pandas.read_csv', 'pd.read_csv', (['filename'], {}), '(filename)\n', (71, 81), True, 'import pandas as pd\n')]
|
from flask import render_template, request
from app import app
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
def index():
info = dict(title='DigiLabel')
files = request.files.getlist("file")
for file in files:
print("Content: ", file.filename)
return render_template('index.html', **info)
|
[
"app.app.route",
"flask.render_template",
"flask.request.files.getlist"
] |
[((66, 105), 'app.app.route', 'app.route', (['"""/"""'], {'methods': "['GET', 'POST']"}), "('/', methods=['GET', 'POST'])\n", (75, 105), False, 'from app import app\n'), ((107, 151), 'app.app.route', 'app.route', (['"""/index"""'], {'methods': "['GET', 'POST']"}), "('/index', methods=['GET', 'POST'])\n", (116, 151), False, 'from app import app\n'), ((212, 241), 'flask.request.files.getlist', 'request.files.getlist', (['"""file"""'], {}), "('file')\n", (233, 241), False, 'from flask import render_template, request\n'), ((318, 355), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html', **info)\n", (333, 355), False, 'from flask import render_template, request\n')]
|
''' Query Search SRA tables for 1K Genomes data, access files via SRA DRS ids'''
# IMPORTS
import sys
from fasp.search import DiscoverySearchClient
def main(argv):
searchClient = DiscoverySearchClient('https://ga4gh-search-adapter-presto-public.prod.dnastack.com', debug=False)
query = """SELECT s.su_submitter_id, drs_id
FROM thousand_genomes.onek_genomes.ssd_drs s
join thousand_genomes.onek_genomes.sra_drs_files f on f.sample_name = s.su_submitter_id
where filetype = 'bam' and mapped = 'mapped' and sequencing_type ='exome' and population = 'JPT' LIMIT 3"""
searchClient.runQuery(query)
if __name__ == "__main__":
main(sys.argv[1:])
|
[
"fasp.search.DiscoverySearchClient"
] |
[((186, 294), 'fasp.search.DiscoverySearchClient', 'DiscoverySearchClient', (['"""https://ga4gh-search-adapter-presto-public.prod.dnastack.com"""'], {'debug': '(False)'}), "(\n 'https://ga4gh-search-adapter-presto-public.prod.dnastack.com', debug=False\n )\n", (207, 294), False, 'from fasp.search import DiscoverySearchClient\n')]
|
from django.shortcuts import render
import urllib.request
import json
from datetime import datetime
from .city_weather import CityWeather
#locations given by Uni assignment
loc1_details = CityWeather("Lake District National Park", "54.4609", "-3.0886")
loc2_details = CityWeather("Corfe Castle", "50.6395", "-2.0566")
loc3_details = CityWeather("The Cotswolds", "51.8330", "-1.8433")
loc4_details = CityWeather("Cambridge", "52.2053", "0.1218")
loc5_details = CityWeather("Bristol", "51.4545", "-2.5879")
loc6_details = CityWeather("Oxford", "51.7520", "-1.2577")
loc7_details = CityWeather("Norwich", "52.6309", "1.2974")
loc8_details = CityWeather("Stonehenge", "51.1789", "-1.8262")
loc9_details = CityWeather("Watergate Bay", "50.4429", "-5.0553")
loc10_details = CityWeather("Birmingham", "52.4862", "-1.8904")
def get_displayed_cities_weather(cities_list):
#for each location in list, run method
for city in cities_list:
city.get_city_weather()
def weatherapp(request):
#list of assigned cities, user assigned to be inserted later in position 0
displayed_cities = [loc1_details, loc2_details, loc3_details, loc4_details, loc5_details, loc6_details, loc7_details, loc8_details, loc9_details, loc10_details]
if request.method == "POST":
#add user defined location to cities list
select_box_json = request.POST['cityselectbox']
select_city = None
if select_box_json != '':
select_city = json.loads(select_box_json)
if request.POST['latitude'] != '' and request.POST['longitude'] != '':
#if user inputs customer lat/lon
input_city = CityWeather(request.POST['city'], request.POST['latitude'], request.POST['longitude'])
displayed_cities.insert(0, input_city)
elif select_city != None:
#if user selects city from list
input_city = CityWeather(select_city['city'], select_city['lat'], select_city['lon'])
displayed_cities.insert(0, input_city)
#pulls weather information for cities in list
get_displayed_cities_weather(displayed_cities)
#displays updated cities weather information on weatherapp template
return render(request, 'weather/weatherapp.html', {'displayed_cities': displayed_cities})
|
[
"django.shortcuts.render",
"json.loads"
] |
[((2198, 2284), 'django.shortcuts.render', 'render', (['request', '"""weather/weatherapp.html"""', "{'displayed_cities': displayed_cities}"], {}), "(request, 'weather/weatherapp.html', {'displayed_cities':\n displayed_cities})\n", (2204, 2284), False, 'from django.shortcuts import render\n'), ((1470, 1497), 'json.loads', 'json.loads', (['select_box_json'], {}), '(select_box_json)\n', (1480, 1497), False, 'import json\n')]
|
"""
Parser for cechmate format of simplicial complex
"""
from itertools import chain, combinations
from cechmate import Cech, Rips, Alpha
import numpy as np
from scipy.sparse import coo_matrix
from dmt.morse_complex import MorseComplex
from dmt.perseus import save_points_perseus_brips, load_points_perseus_brips
def parse_cechmate(cechmate_complex):
""" Parses the Cechmate format for simplicial complexes
:param cechmate_complex: [(simplex_as_index_tuple, filtration)]
:return dict 'cell_dimensions': np.ndarray, 'filtration': np.ndarray,
'boundary_matrix': scipy.sparse.coo_matrix, 'cechmate_complex': cechmate complex for testing
:Example:
>>> cechmate_cplx = [([0], 0), ([1], 0), ([2], 0), ((0, 1, 2), 1.760962625882297), ((1, 2), 1.760962625882297), ((0, 2), 0.30122587679897417), ((0, 1), 0.2489387964292784)]
>>> MorseComplex(**parse_cechmate(cechmate_cplx)
"""
simplices, filtration = zip(*cechmate_complex)
simplices = list(map(tuple, simplices)) # All should be tuples, so they can be in a dict
size = len(simplices)
index_map = {splx: ix for splx, ix in zip(simplices, range(size))}
columns_rows = chain.from_iterable([[(index_map[splx], index_map[bdry])
for bdry in combinations(splx, len(splx) - 1) if bdry]
for splx in simplices])
columns, rows = zip(*columns_rows)
columns, rows = list(columns), list(rows)
data = [True] * len(columns)
boundary = coo_matrix((data, (rows, columns)), shape=(size, size), dtype=bool)
filtration = list(filtration)
cell_dimensions = np.array(list(map(len, simplices))) - 1
return dict(boundary_matrix=boundary,
cell_dimensions=cell_dimensions,
filtration=filtration,
cechmate_complex=cechmate_complex)
class VietorisRips(MorseComplex):
default_max_dim = 3
def __init__(self, points, max_dimension=default_max_dim):
points = np.array(points)
self.max_dimension = max_dimension
super().__init__(points=points, **parse_cechmate(Rips(maxdim=self.max_dimension).build(points)))
def save_brips(self, filepath):
save_points_perseus_brips(filepath, self.points)
@classmethod
def load_brips(cls, filepath, max_dimension=default_max_dim):
return cls(load_points_perseus_brips(filepath), max_dimension)
class CechComplex(MorseComplex):
default_max_dim = 3
def __init__(self, points, max_dimension=default_max_dim):
points = np.array(points, dtype=float)
self.max_dimension = max_dimension
super().__init__(points=points, **parse_cechmate(Cech(maxdim=self.max_dimension).build(points)))
class AlphaComplex(MorseComplex):
def __init__(self, points):
points = np.array(points, dtype=float)
super().__init__(points=points, **parse_cechmate(Alpha().build(points)))
|
[
"cechmate.Rips",
"cechmate.Cech",
"dmt.perseus.load_points_perseus_brips",
"scipy.sparse.coo_matrix",
"numpy.array",
"cechmate.Alpha",
"dmt.perseus.save_points_perseus_brips"
] |
[((1520, 1587), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(data, (rows, columns))'], {'shape': '(size, size)', 'dtype': 'bool'}), '((data, (rows, columns)), shape=(size, size), dtype=bool)\n', (1530, 1587), False, 'from scipy.sparse import coo_matrix\n'), ((2007, 2023), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (2015, 2023), True, 'import numpy as np\n'), ((2217, 2265), 'dmt.perseus.save_points_perseus_brips', 'save_points_perseus_brips', (['filepath', 'self.points'], {}), '(filepath, self.points)\n', (2242, 2265), False, 'from dmt.perseus import save_points_perseus_brips, load_points_perseus_brips\n'), ((2562, 2591), 'numpy.array', 'np.array', (['points'], {'dtype': 'float'}), '(points, dtype=float)\n', (2570, 2591), True, 'import numpy as np\n'), ((2826, 2855), 'numpy.array', 'np.array', (['points'], {'dtype': 'float'}), '(points, dtype=float)\n', (2834, 2855), True, 'import numpy as np\n'), ((2369, 2404), 'dmt.perseus.load_points_perseus_brips', 'load_points_perseus_brips', (['filepath'], {}), '(filepath)\n', (2394, 2404), False, 'from dmt.perseus import save_points_perseus_brips, load_points_perseus_brips\n'), ((2124, 2155), 'cechmate.Rips', 'Rips', ([], {'maxdim': 'self.max_dimension'}), '(maxdim=self.max_dimension)\n', (2128, 2155), False, 'from cechmate import Cech, Rips, Alpha\n'), ((2692, 2723), 'cechmate.Cech', 'Cech', ([], {'maxdim': 'self.max_dimension'}), '(maxdim=self.max_dimension)\n', (2696, 2723), False, 'from cechmate import Cech, Rips, Alpha\n'), ((2913, 2920), 'cechmate.Alpha', 'Alpha', ([], {}), '()\n', (2918, 2920), False, 'from cechmate import Cech, Rips, Alpha\n')]
|
from __future__ import print_function
import json
import os
import re
import subprocess
import maya.cmds
import maya.mel
# The version that Redshift fixed the render layer render setup override locking issue
# Prior versions will need to use the workaround in the unlockRenderSetupOverrides function
REDSHIFT_RENDER_SETUP_FIX_VERSION = (2, 5, 64)
def getCurrentRenderLayer():
return maya.cmds.editRenderLayerGlobals( query=True, currentRenderLayer=True )
# A method mimicing the built-in mel function: 'renderLayerDisplayName', but first tries to see if it exists
def getRenderLayerDisplayName( layer_name ):
if maya.mel.eval( 'exists renderLayerDisplayName' ):
layer_name = maya.mel.eval( 'renderLayerDisplayName ' + layer_name )
else:
# renderLayerDisplayName doesn't exist, so we try to do it ourselves
if layer_name == 'masterLayer':
return layer_name
if maya.cmds.objExists(layer_name) and maya.cmds.nodeType( layer_name ) == 'renderLayer':
# Display name for default render layer
if maya.cmds.getAttr( layer_name + '.identification' ) == 0:
return 'masterLayer'
# If Render Setup is used the corresponding Render Setup layer name should be used instead of the legacy render layer name.
result = maya.cmds.listConnections( layer_name + '.msg', type='renderSetupLayer' )
if result:
return result[0]
return layer_name
# remove_override_json_string is a json string consisting of a node as a key, with a list of attributes we want to unlock as the value
# ie. remove_override_json_string = '{ "defaultRenderGlobals": [ "animation", "startFrame", "endFrame" ] }'
def unlockRenderSetupOverrides( remove_overrides_json_string ):
try:
# Ensure we're in a version that HAS render setups
import maya.app.renderSetup.model.renderSetup as renderSetup
except ImportError:
return
# Ensure that the scene is actively using render setups and not the legacy layers
if not maya.mel.eval( 'exists mayaHasRenderSetup' ) or not maya.mel.eval( 'mayaHasRenderSetup();' ):
return
# If the version of Redshift has the bug fix, bypass the overrides
if not redshiftRequiresWorkaround():
return
remove_overrides = json.loads( remove_overrides_json_string )
render_setup = renderSetup.instance()
layers = render_setup.getRenderLayers()
layers_to_unlock = [ layer for layer in layers if layer.name() != 'defaultRenderLayer' ]
for render_layer in layers_to_unlock:
print('Disabling Render Setup Overrides in "%s"' % render_layer.name())
for collection in render_layer.getCollections():
if type(collection) == maya.app.renderSetup.model.collection.RenderSettingsCollection:
for override in collection.getOverrides():
if override.targetNodeName() in remove_overrides and override.attributeName() in remove_overrides[ override.targetNodeName() ]:
print( ' Disabling Override: %s.%s' % ( override.targetNodeName(), override.attributeName() ) )
override.setSelfEnabled( False )
def redshiftRequiresWorkaround():
# Get the version of Redshift
redshiftVersion = maya.cmds.pluginInfo( 'redshift4maya', query=True, version=True )
redshiftVersion = tuple( int(version) for version in redshiftVersion.split('.') )
# Check if the Redshift version is prior to the bug fix
return redshiftVersion < REDSHIFT_RENDER_SETUP_FIX_VERSION
def performArnoldPathmapping( startFrame, endFrame, tempLocation=None ):
"""
Performs pathmapping on all arnold standin files that are need for the current task
:param startFrame: Start frame of the task
:param endFrame: End frame of the task
:param tempLocation: The temporary location where all pathmapped files will be copied to. Only needs to be provided the first time this function is called.
:return: Nothing
"""
if tempLocation:
performArnoldPathmapping.tempLocation = tempLocation
else:
if not performArnoldPathmapping.tempLocation:
raise ValueError( "The first call made to performArnoldPathmapping must provided a tempLocation" )
#a simple regex for finding frame numbers
frameRE = re.compile( r'#+' )
# Define a function that will be used when looping to replace padding with a 0 padded string.
def __replaceHashesWithZeroPaddedFrame( frameNum, origFileName ):
return frameRE.sub( lambda matchObj: str( frameNum ).zfill( len(matchObj.group(0)) ), origFileName )
standInObjects = maya.cmds.ls( type="aiStandIn" )
for standIn in standInObjects:
try:
# If we have already seen this node before then grab the settings that we need
origDir, origFileName = performArnoldPathmapping.originalProperties[ standIn ]
except KeyError:
# If we have not seen this node before then store it's original path and update the path in the node to where we will be pathmapping the file.
standinFile = maya.cmds.getAttr( standIn + ".dso" )
if not standinFile or os.path.splitext( standinFile )[ 1 ].lower() != ".ass":
# If the standinFile isn't set or isn't .ass file then we cannot pathmap it.
continue
origDir, origFileName = os.path.split( standinFile )
standinTempLocation = os.path.join( performArnoldPathmapping.tempLocation, standIn )
maya.cmds.setAttr( "%s.dso" % standIn, os.path.join( standinTempLocation, origFileName ), type="string" )
#Create the Temp directory the first time we see a new standin
if not os.path.isdir( standinTempLocation ):
os.makedirs( standinTempLocation )
performArnoldPathmapping.originalProperties[ standIn ] = (origDir, origFileName)
for frame in range( startFrame, endFrame + 1 ):
# evaluate the frame that the node is using (Normally it will be the same as the scene but it can be different)
evalFrame = maya.cmds.getAttr( "%s.frameNumber" % standIn, time=frame )
fileNameWithFrame = __replaceHashesWithZeroPaddedFrame( evalFrame, origFileName )
# If we have already mapped this file then continue.
if not ( standIn, fileNameWithFrame ) in performArnoldPathmapping.mappedFiles:
#Perform pathmapping
runPathmappingOnFile(
os.path.join( origDir, fileNameWithFrame ),
os.path.join( performArnoldPathmapping.tempLocation, standIn, fileNameWithFrame )
)
performArnoldPathmapping.mappedFiles.add( ( standIn, fileNameWithFrame ) )
performArnoldPathmapping.tempLocation = ""
#State property which contains mappings of standin objects to their original fileproperties
performArnoldPathmapping.originalProperties = {}
#State property which contains unique identifier for each file that we have already mapped in the form of ( standin, filename )
performArnoldPathmapping.mappedFiles=set()
def runPathmappingOnFile( originalLocation, pathmappedLocation ):
print( 'Running PathMapping on "%s" and copying to "%s"' % (originalLocation, pathmappedLocation) )
arguments = [ "-CheckPathMappingInFile", originalLocation, pathmappedLocation ]
print( CallDeadlineCommand( arguments ) )
def GetDeadlineCommand():
deadlineBin = ""
try:
deadlineBin = os.environ['DEADLINE_PATH']
except KeyError:
#if the error is a key error it means that DEADLINE_PATH is not set. however Deadline command may be in the PATH or on OSX it could be in the file /Users/Shared/Thinkbox/DEADLINE_PATH
pass
# On OSX, we look for the DEADLINE_PATH file if the environment variable does not exist.
if deadlineBin == "" and os.path.exists( "/Users/Shared/Thinkbox/DEADLINE_PATH" ):
with open( "/Users/Shared/Thinkbox/DEADLINE_PATH" ) as f:
deadlineBin = f.read().strip()
deadlineCommand = os.path.join(deadlineBin, "deadlinecommand")
return deadlineCommand
def CallDeadlineCommand(arguments, hideWindow=True):
deadlineCommand = GetDeadlineCommand()
startupinfo = None
creationflags = 0
if os.name == 'nt':
if hideWindow:
# Python 2.6 has subprocess.STARTF_USESHOWWINDOW, and Python 2.7 has subprocess._subprocess.STARTF_USESHOWWINDOW, so check for both.
if hasattr( subprocess, '_subprocess' ) and hasattr( subprocess._subprocess, 'STARTF_USESHOWWINDOW' ):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess._subprocess.STARTF_USESHOWWINDOW
elif hasattr( subprocess, 'STARTF_USESHOWWINDOW' ):
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
# still show top-level windows, but don't show a console window
CREATE_NO_WINDOW = 0x08000000 #MSDN process creation flag
creationflags = CREATE_NO_WINDOW
arguments.insert( 0, deadlineCommand )
# Specifying PIPE for all handles to workaround a Python bug on Windows. The unused handles are then closed immediatley afterwards.
proc = subprocess.Popen(arguments, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, startupinfo=startupinfo, creationflags=creationflags)
output, errors = proc.communicate()
return output
def OutputPluginVersions():
print("================== PLUGINS ===================\n")
plugins = sorted(maya.cmds.pluginInfo(query=True, listPlugins=True), key=lambda p: p.lower())
for plugin in plugins:
version = maya.cmds.pluginInfo(plugin, query=True, version=True)
print("%s (v%s)" % (plugin, version))
print("==============================================\n")
def ForceLoadPlugins():
"""
Force load an explicit set of plug-ins with known issues. There are bugs in Maya where these plug-ins are not
automatically loaded when required in a scene.
When a scene contains an Alembic reference node (backed by an external .abc file), Maya does not embed "requires"
statements into the scene to indicate that the "AbcImport" and "fbxmaya" plug-ins are dependencies of the scene.
This can be changed for the current Maya session with the following MEL commands:
pluginInfo -edit -writeRequires AbcImport
pluginInfo -edit -writeRequires fbxmaya
However, there is a secondary bug where the "requires" statements are inserted in the scene after already trying to
load the references.
Our work-around is to force loading of these plug-ins always before loading the job scene. Both plugins ship with
Maya and are fairly lightweight in size.
"""
PLUGINS_TO_LOAD = (
'AbcImport', # For Maya 2017 on Windows this is 5MB and takes 15 ms to load
'fbxmaya' # For Maya 2017 on Windows this is 12MB and takes 141ms to load
)
for plugin in PLUGINS_TO_LOAD:
plugin_loaded = maya.cmds.pluginInfo(plugin, query=True, loaded=True)
if not plugin_loaded:
try:
print( "Loading %s..." % plugin, end="" )
maya.cmds.loadPlugin( plugin )
except RuntimeError as e:
# Maya raises this exception when it cannot find the plugin. The message is formatted as:
#
# Plug-in, "pluginName", was not found on MAYA_PLUG_IN_PATH
#
# This seems reasonable enough to forward on to the user. The try-except only serves the purpose of
# continuing to attempt additional plug-ins. This is a best-effort work-around.
print( 'Error: %s' % e)
else:
print( "ok" )
|
[
"maya.app.renderSetup.model.renderSetup.instance",
"subprocess.Popen",
"json.loads",
"os.makedirs",
"os.path.isdir",
"os.path.exists",
"subprocess.STARTUPINFO",
"os.path.splitext",
"os.path.split",
"os.path.join",
"re.compile"
] |
[((2324, 2364), 'json.loads', 'json.loads', (['remove_overrides_json_string'], {}), '(remove_overrides_json_string)\n', (2334, 2364), False, 'import json\n'), ((2387, 2409), 'maya.app.renderSetup.model.renderSetup.instance', 'renderSetup.instance', ([], {}), '()\n', (2407, 2409), True, 'import maya.app.renderSetup.model.renderSetup as renderSetup\n'), ((4352, 4368), 're.compile', 're.compile', (['"""#+"""'], {}), "('#+')\n", (4362, 4368), False, 'import re\n'), ((8153, 8197), 'os.path.join', 'os.path.join', (['deadlineBin', '"""deadlinecommand"""'], {}), "(deadlineBin, 'deadlinecommand')\n", (8165, 8197), False, 'import os\n'), ((9412, 9573), 'subprocess.Popen', 'subprocess.Popen', (['arguments'], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'startupinfo': 'startupinfo', 'creationflags': 'creationflags'}), '(arguments, stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, startupinfo=startupinfo, creationflags=\n creationflags)\n', (9428, 9573), False, 'import subprocess\n'), ((7963, 8017), 'os.path.exists', 'os.path.exists', (['"""/Users/Shared/Thinkbox/DEADLINE_PATH"""'], {}), "('/Users/Shared/Thinkbox/DEADLINE_PATH')\n", (7977, 8017), False, 'import os\n'), ((5429, 5455), 'os.path.split', 'os.path.split', (['standinFile'], {}), '(standinFile)\n', (5442, 5455), False, 'import os\n'), ((5492, 5552), 'os.path.join', 'os.path.join', (['performArnoldPathmapping.tempLocation', 'standIn'], {}), '(performArnoldPathmapping.tempLocation, standIn)\n', (5504, 5552), False, 'import os\n'), ((8709, 8733), 'subprocess.STARTUPINFO', 'subprocess.STARTUPINFO', ([], {}), '()\n', (8731, 8733), False, 'import subprocess\n'), ((5607, 5654), 'os.path.join', 'os.path.join', (['standinTempLocation', 'origFileName'], {}), '(standinTempLocation, origFileName)\n', (5619, 5654), False, 'import os\n'), ((5768, 5802), 'os.path.isdir', 'os.path.isdir', (['standinTempLocation'], {}), '(standinTempLocation)\n', (5781, 5802), False, 'import os\n'), ((5822, 5854), 'os.makedirs', 'os.makedirs', (['standinTempLocation'], {}), '(standinTempLocation)\n', (5833, 5854), False, 'import os\n'), ((6566, 6606), 'os.path.join', 'os.path.join', (['origDir', 'fileNameWithFrame'], {}), '(origDir, fileNameWithFrame)\n', (6578, 6606), False, 'import os\n'), ((6630, 6709), 'os.path.join', 'os.path.join', (['performArnoldPathmapping.tempLocation', 'standIn', 'fileNameWithFrame'], {}), '(performArnoldPathmapping.tempLocation, standIn, fileNameWithFrame)\n', (6642, 6709), False, 'import os\n'), ((8911, 8935), 'subprocess.STARTUPINFO', 'subprocess.STARTUPINFO', ([], {}), '()\n', (8933, 8935), False, 'import subprocess\n'), ((5218, 5247), 'os.path.splitext', 'os.path.splitext', (['standinFile'], {}), '(standinFile)\n', (5234, 5247), False, 'import os\n')]
|
# conflicts with isort because of local non-relative import
# pylint: disable=wrong-import-order
import unittest
from fastapi.testclient import TestClient
from models.tortoise_models.fleet import Fleet, Robot
from models.tortoise_models.fleet_state import FleetState, RobotStateEnum
from rest_server.app import get_app
from rest_server.repositories.report.fleet_state import get_fleet_state
from rest_server.test_utils import start_test_database
from tortoise import Tortoise
app = get_app()
class TestReportFleetState(unittest.IsolatedAsyncioTestCase):
async def asyncSetUp(self):
await start_test_database()
self.client = TestClient(app)
robot = await Robot.create(name="Robot 1")
fleet = await Fleet.create(name="Fleet 1")
await FleetState.create(
fleet=fleet,
robot=robot,
robot_battery_percent="100",
robot_location="1",
robot_mode=RobotStateEnum.MODE_WAITING,
robot_seq=1,
robot_task_id="test",
)
await FleetState.create(
fleet=fleet,
robot=robot,
robot_battery_percent="100",
robot_location="1",
robot_mode=RobotStateEnum.MODE_WAITING,
robot_seq=2,
robot_task_id="test",
)
async def asyncTearDown(self):
await Tortoise.close_connections()
async def test_get_fleet_states(self):
fleet_list = await get_fleet_state(0, 10)
self.assertEqual(len(fleet_list), 2)
|
[
"models.tortoise_models.fleet.Robot.create",
"models.tortoise_models.fleet.Fleet.create",
"rest_server.repositories.report.fleet_state.get_fleet_state",
"tortoise.Tortoise.close_connections",
"rest_server.test_utils.start_test_database",
"models.tortoise_models.fleet_state.FleetState.create",
"rest_server.app.get_app",
"fastapi.testclient.TestClient"
] |
[((484, 493), 'rest_server.app.get_app', 'get_app', ([], {}), '()\n', (491, 493), False, 'from rest_server.app import get_app\n'), ((648, 663), 'fastapi.testclient.TestClient', 'TestClient', (['app'], {}), '(app)\n', (658, 663), False, 'from fastapi.testclient import TestClient\n'), ((604, 625), 'rest_server.test_utils.start_test_database', 'start_test_database', ([], {}), '()\n', (623, 625), False, 'from rest_server.test_utils import start_test_database\n'), ((687, 715), 'models.tortoise_models.fleet.Robot.create', 'Robot.create', ([], {'name': '"""Robot 1"""'}), "(name='Robot 1')\n", (699, 715), False, 'from models.tortoise_models.fleet import Fleet, Robot\n'), ((738, 766), 'models.tortoise_models.fleet.Fleet.create', 'Fleet.create', ([], {'name': '"""Fleet 1"""'}), "(name='Fleet 1')\n", (750, 766), False, 'from models.tortoise_models.fleet import Fleet, Robot\n'), ((782, 957), 'models.tortoise_models.fleet_state.FleetState.create', 'FleetState.create', ([], {'fleet': 'fleet', 'robot': 'robot', 'robot_battery_percent': '"""100"""', 'robot_location': '"""1"""', 'robot_mode': 'RobotStateEnum.MODE_WAITING', 'robot_seq': '(1)', 'robot_task_id': '"""test"""'}), "(fleet=fleet, robot=robot, robot_battery_percent='100',\n robot_location='1', robot_mode=RobotStateEnum.MODE_WAITING, robot_seq=1,\n robot_task_id='test')\n", (799, 957), False, 'from models.tortoise_models.fleet_state import FleetState, RobotStateEnum\n'), ((1059, 1234), 'models.tortoise_models.fleet_state.FleetState.create', 'FleetState.create', ([], {'fleet': 'fleet', 'robot': 'robot', 'robot_battery_percent': '"""100"""', 'robot_location': '"""1"""', 'robot_mode': 'RobotStateEnum.MODE_WAITING', 'robot_seq': '(2)', 'robot_task_id': '"""test"""'}), "(fleet=fleet, robot=robot, robot_battery_percent='100',\n robot_location='1', robot_mode=RobotStateEnum.MODE_WAITING, robot_seq=2,\n robot_task_id='test')\n", (1076, 1234), False, 'from models.tortoise_models.fleet_state import FleetState, RobotStateEnum\n'), ((1372, 1400), 'tortoise.Tortoise.close_connections', 'Tortoise.close_connections', ([], {}), '()\n', (1398, 1400), False, 'from tortoise import Tortoise\n'), ((1472, 1494), 'rest_server.repositories.report.fleet_state.get_fleet_state', 'get_fleet_state', (['(0)', '(10)'], {}), '(0, 10)\n', (1487, 1494), False, 'from rest_server.repositories.report.fleet_state import get_fleet_state\n')]
|
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from utils import get_most_recent_checkpoint,get_test_set,get_training_set, set_seed
from math import log10
from model.srcnn_upconv7 import Upconv
from model.rdn import RDN
import argparse
import os
from os.path import exists, join, basename
from os import makedirs, remove
import urllib
import tarfile
def download_bsd300(dest):
output_image_dir = join(dest, "BSDS300/images")
if not exists(output_image_dir):
makedirs(dest)
url = "http://www2.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/BSDS300-images.tgz"
print("downloading url ", url)
data = urllib.request.urlopen(url)
file_path = join(dest, basename(url))
with open(file_path, 'wb') as f:
f.write(data.read())
print("Extracting data")
with tarfile.open(file_path) as tar:
for item in tar:
tar.extract(item, dest)
remove(file_path)
else:
print("BSDS300 dataset already exists")
return output_image_dir
'''
Training Settings
'''
def str2bool(v):
return str(v).lower() in ("y", "yes", "true", "t", "1")
parser = argparse.ArgumentParser(description='Pytorch Image/Video Super-Resolution')
parser.add_argument('--upscale_factor',type=int,default=2, help="Super-resolution upscale factor")
parser.add_argument('--datapath',type=str,default="data/", help="Path to Original data")
parser.add_argument('--model',type=str,default="RDN",help="Choose which SR model to use")
parser.add_argument('--threads',type=int,default=4,help='Number of thread for DataLoader')
parser.add_argument('--lr',type=float,default=0.001,help='Learning rate')
parser.add_argument('--nEpochs',type=int,default=1000,help='Number of epochs')
parser.add_argument('--batchSize',type=int,default=8,help='Training batch size')
parser.add_argument('--testBatchSize',type=int,default=4,help='Test batch size')
parser.add_argument('--isCuda',type=str2bool,default=True,help='Cuda Usage')
opt = parser.parse_args()
print(opt)
lr = opt.lr
nEpochs = opt.nEpochs
batchSize = opt.batchSize
testBatchSize = opt.testBatchSize
isCuda = opt.isCuda
set_seed(0)
if isCuda and not torch.cuda.is_available():
raise Exception("No GPU, please change isCuda False")
device = torch.device("cuda" if isCuda else "cpu")
print('===> Loading datasets')
dataset_path = download_bsd300(opt.datapath)
train_set = get_training_set(opt.upscale_factor,dataset_path)
test_set = get_test_set(opt.upscale_factor,dataset_path)
training_data_loader = DataLoader(dataset=train_set, num_workers=opt.threads, batch_size=batchSize, shuffle=True)
testing_data_loader = DataLoader(dataset=test_set, num_workers=opt.threads, batch_size=testBatchSize, shuffle=False)
print('===> Datasets Loading Complete')
print('===> Model Initialize')
if opt.model == "Upconv":
model = Upconv(upscale_factor=opt.upscale_factor).to(device)
os.makedirs('ckpt/Upconv',exist_ok=True)
criterion = model.criterion
optimizer = model.optimizer
#scheduler = model.scheduler
if len(next(os.walk('ckpt/Upconv'))[2]) != 0:
min_iter = 1
last_ckpt, min_iter = get_most_recent_checkpoint('ckpt/Upconv')
model = torch.load(last_ckpt)
else :
min_iter = 1
elif opt.model == "RDN":
model = RDN(channel = 1,growth_rate = 64,rdb_number = 3,upscale_factor=opt.upscale_factor).to(device)
os.makedirs('ckpt/RDN',exist_ok=True)
criterion = model.criterion
optimizer = model.optimizer
scheduler = model.scheduler
min_iter = 1
print('===> Model Initialize Complete')
'''
Model Implementation
elif opt.model == "Model_name":
model = Model_name(upscale_factor=opt.upscale_factor).to(device)
os.makedirs('ckpt/Model_name',exist_ok=True)
criterion = model.criterion
optimizer = model.optimizer
scheduler = model.scheduler
if len(next(os.walk('ckpt/Model_name'))[2]) != 0:
min_iter = 1
last_ckpt, min_iter = get_most_recent_checkpoint('ckpt/Model_name')
model = torch.load(last_ckpt)
else :
min_iter = 1
'''
print('===> Training Initialize')
if torch.cuda.is_available():
cudnn.benchmark = True
criterion.cuda()
print('===> Training Initialize Complete')
def train(epoch):
print('===> Training # %d epoch'%(epoch))
epoch_loss = 0
for iteration, batch in enumerate(training_data_loader, 1):
input, target = batch[0].to(device), batch[1].to(device)
optimizer.zero_grad()
loss = criterion(model(input), target)
epoch_loss += loss.item()
loss.backward()
optimizer.step()
print("===> Epoch[{}]({}/{}): Loss: {:.6f}".format(epoch, iteration, len(training_data_loader), loss.item()))
print("===> Epoch {} Complete: Avg. Loss: {:.6f}".format(epoch, epoch_loss / len(training_data_loader)))
def test():
print('===> Testing # %d epoch'%(epoch))
avg_psnr = 0
with torch.no_grad():
for batch in testing_data_loader:
input, target = batch[0].to(device), batch[1].to(device)
prediction = model(input)
mse = criterion(prediction, target)
psnr = 10 * log10(1 / mse.item())
avg_psnr += psnr
print("===> Avg. PSNR: {:.6f} dB".format(avg_psnr / len(testing_data_loader)))
def checkpoint(epoch):
if opt.model == "Upconv":
model_out_path = "ckpt/" + "Upconv" + "/model_epoch_{}.pth".format(epoch)
elif opt.model == "RDN":
model_out_path = "ckpt/" + "RDN" + "/model_epoch_{}.pth".format(epoch)
'''
Model Implementation
elif opt.model == "Model_Name":
model_out_path = "ckpt/" + "Model_Name" + "/model_epoch_{}.pth".format(epoch)
'''
print(model_out_path)
torch.save(model, model_out_path)
print("Checkpoint saved to {}".format(model_out_path))
if __name__ == '__main__':
for epoch in range(min_iter, nEpochs + 1):
print("=====> Training %d epochs"%(epoch))
train(epoch)
print("=====> Training %d epochs completed"%(epoch))
print("=====> Testing %d epochs"%(epoch))
test()
print("=====> Testing %d epochs completed"%(epoch))
print("=====> lr scheduler activated in %d epochs"%(epoch))
scheduler.step(epoch)
print("=====> lr scheduler activated in %d epochs completed"%(epoch))
print("=====> Save checkpoint %d epochs"%(epoch))
checkpoint(epoch)
print("=====> Save checkpoint %d epochs completed"%(epoch))
|
[
"os.remove",
"argparse.ArgumentParser",
"utils.set_seed",
"model.srcnn_upconv7.Upconv",
"os.walk",
"torch.device",
"torch.no_grad",
"os.path.join",
"torch.utils.data.DataLoader",
"torch.load",
"os.path.exists",
"urllib.request.urlopen",
"tarfile.open",
"model.rdn.RDN",
"os.path.basename",
"utils.get_training_set",
"utils.get_most_recent_checkpoint",
"utils.get_test_set",
"torch.cuda.is_available",
"os.makedirs",
"torch.save"
] |
[((1282, 1357), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Pytorch Image/Video Super-Resolution"""'}), "(description='Pytorch Image/Video Super-Resolution')\n", (1305, 1357), False, 'import argparse\n'), ((2275, 2286), 'utils.set_seed', 'set_seed', (['(0)'], {}), '(0)\n', (2283, 2286), False, 'from utils import get_most_recent_checkpoint, get_test_set, get_training_set, set_seed\n'), ((2401, 2442), 'torch.device', 'torch.device', (["('cuda' if isCuda else 'cpu')"], {}), "('cuda' if isCuda else 'cpu')\n", (2413, 2442), False, 'import torch\n'), ((2533, 2583), 'utils.get_training_set', 'get_training_set', (['opt.upscale_factor', 'dataset_path'], {}), '(opt.upscale_factor, dataset_path)\n', (2549, 2583), False, 'from utils import get_most_recent_checkpoint, get_test_set, get_training_set, set_seed\n'), ((2594, 2640), 'utils.get_test_set', 'get_test_set', (['opt.upscale_factor', 'dataset_path'], {}), '(opt.upscale_factor, dataset_path)\n', (2606, 2640), False, 'from utils import get_most_recent_checkpoint, get_test_set, get_training_set, set_seed\n'), ((2664, 2758), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'train_set', 'num_workers': 'opt.threads', 'batch_size': 'batchSize', 'shuffle': '(True)'}), '(dataset=train_set, num_workers=opt.threads, batch_size=batchSize,\n shuffle=True)\n', (2674, 2758), False, 'from torch.utils.data import DataLoader\n'), ((2777, 2876), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'test_set', 'num_workers': 'opt.threads', 'batch_size': 'testBatchSize', 'shuffle': '(False)'}), '(dataset=test_set, num_workers=opt.threads, batch_size=\n testBatchSize, shuffle=False)\n', (2787, 2876), False, 'from torch.utils.data import DataLoader\n'), ((4265, 4290), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4288, 4290), False, 'import torch\n'), ((515, 543), 'os.path.join', 'join', (['dest', '"""BSDS300/images"""'], {}), "(dest, 'BSDS300/images')\n", (519, 543), False, 'from os.path import exists, join, basename\n'), ((3043, 3084), 'os.makedirs', 'os.makedirs', (['"""ckpt/Upconv"""'], {'exist_ok': '(True)'}), "('ckpt/Upconv', exist_ok=True)\n", (3054, 3084), False, 'import os\n'), ((5888, 5921), 'torch.save', 'torch.save', (['model', 'model_out_path'], {}), '(model, model_out_path)\n', (5898, 5921), False, 'import torch\n'), ((556, 580), 'os.path.exists', 'exists', (['output_image_dir'], {}), '(output_image_dir)\n', (562, 580), False, 'from os.path import exists, join, basename\n'), ((590, 604), 'os.makedirs', 'makedirs', (['dest'], {}), '(dest)\n', (598, 604), False, 'from os import makedirs, remove\n'), ((758, 785), 'urllib.request.urlopen', 'urllib.request.urlopen', (['url'], {}), '(url)\n', (780, 785), False, 'import urllib\n'), ((1064, 1081), 'os.remove', 'remove', (['file_path'], {}), '(file_path)\n', (1070, 1081), False, 'from os import makedirs, remove\n'), ((2306, 2331), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2329, 2331), False, 'import torch\n'), ((3282, 3323), 'utils.get_most_recent_checkpoint', 'get_most_recent_checkpoint', (['"""ckpt/Upconv"""'], {}), "('ckpt/Upconv')\n", (3308, 3323), False, 'from utils import get_most_recent_checkpoint, get_test_set, get_training_set, set_seed\n'), ((3340, 3361), 'torch.load', 'torch.load', (['last_ckpt'], {}), '(last_ckpt)\n', (3350, 3361), False, 'import torch\n'), ((3530, 3568), 'os.makedirs', 'os.makedirs', (['"""ckpt/RDN"""'], {'exist_ok': '(True)'}), "('ckpt/RDN', exist_ok=True)\n", (3541, 3568), False, 'import os\n'), ((5073, 5088), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5086, 5088), False, 'import torch\n'), ((818, 831), 'os.path.basename', 'basename', (['url'], {}), '(url)\n', (826, 831), False, 'from os.path import exists, join, basename\n'), ((954, 977), 'tarfile.open', 'tarfile.open', (['file_path'], {}), '(file_path)\n', (966, 977), False, 'import tarfile\n'), ((2986, 3027), 'model.srcnn_upconv7.Upconv', 'Upconv', ([], {'upscale_factor': 'opt.upscale_factor'}), '(upscale_factor=opt.upscale_factor)\n', (2992, 3027), False, 'from model.srcnn_upconv7 import Upconv\n'), ((3432, 3511), 'model.rdn.RDN', 'RDN', ([], {'channel': '(1)', 'growth_rate': '(64)', 'rdb_number': '(3)', 'upscale_factor': 'opt.upscale_factor'}), '(channel=1, growth_rate=64, rdb_number=3, upscale_factor=opt.upscale_factor)\n', (3435, 3511), False, 'from model.rdn import RDN\n'), ((3197, 3219), 'os.walk', 'os.walk', (['"""ckpt/Upconv"""'], {}), "('ckpt/Upconv')\n", (3204, 3219), False, 'import os\n')]
|
from django.urls import path,include
from . import views
from rate import views as user_views
from django.conf.urls.static import static
from django.conf import settings
from django.conf.urls import url
urlpatterns=[
path('',views.home,name = 'home'),
path('accounts/register/', views.register, name='register'),
path('profile/', views.profile,name = 'profile'),
path('update_profile/', user_views.update_profile,name = 'update_profile'),
path('new_project/', views.new_project,name ='new_project'),
path('search/', views.search_results, name = 'search_results'),
url(r'^singleproject/(\d+)',views.single_project,name='singleproject'),
path('rate/<int:id>/',views.rate,name='rates'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
[
"django.conf.urls.static.static",
"django.conf.urls.url",
"django.urls.path"
] |
[((222, 255), 'django.urls.path', 'path', (['""""""', 'views.home'], {'name': '"""home"""'}), "('', views.home, name='home')\n", (226, 255), False, 'from django.urls import path, include\n'), ((261, 320), 'django.urls.path', 'path', (['"""accounts/register/"""', 'views.register'], {'name': '"""register"""'}), "('accounts/register/', views.register, name='register')\n", (265, 320), False, 'from django.urls import path, include\n'), ((326, 373), 'django.urls.path', 'path', (['"""profile/"""', 'views.profile'], {'name': '"""profile"""'}), "('profile/', views.profile, name='profile')\n", (330, 373), False, 'from django.urls import path, include\n'), ((380, 453), 'django.urls.path', 'path', (['"""update_profile/"""', 'user_views.update_profile'], {'name': '"""update_profile"""'}), "('update_profile/', user_views.update_profile, name='update_profile')\n", (384, 453), False, 'from django.urls import path, include\n'), ((460, 519), 'django.urls.path', 'path', (['"""new_project/"""', 'views.new_project'], {'name': '"""new_project"""'}), "('new_project/', views.new_project, name='new_project')\n", (464, 519), False, 'from django.urls import path, include\n'), ((525, 585), 'django.urls.path', 'path', (['"""search/"""', 'views.search_results'], {'name': '"""search_results"""'}), "('search/', views.search_results, name='search_results')\n", (529, 585), False, 'from django.urls import path, include\n'), ((593, 665), 'django.conf.urls.url', 'url', (['"""^singleproject/(\\\\d+)"""', 'views.single_project'], {'name': '"""singleproject"""'}), "('^singleproject/(\\\\d+)', views.single_project, name='singleproject')\n", (596, 665), False, 'from django.conf.urls import url\n'), ((669, 717), 'django.urls.path', 'path', (['"""rate/<int:id>/"""', 'views.rate'], {'name': '"""rates"""'}), "('rate/<int:id>/', views.rate, name='rates')\n", (673, 717), False, 'from django.urls import path, include\n'), ((765, 826), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (771, 826), False, 'from django.conf.urls.static import static\n')]
|
import boto.dynamodb2
from boto.dynamodb2.table import Table
from boto.dynamodb2.fields import HashKey
from boto.regioninfo import RegionInfo
from boto.dynamodb2.layer1 import DynamoDBConnection
from faker import Factory
import uuid
import time
try:
sessions = Table(
table_name='usertable',
schema=[HashKey('id')],
connection=DynamoDBConnection(
region=RegionInfo(name='eu-west-1',
endpoint='dynamodb.eu-west-1.amazonaws.com')
))
except:
print("connection not successful")
def create_session():
id = str(uuid.uuid4())
timestamp = time.strftime("%Y%m%d%H%M%S")
ipv4 = Factory.create().ipv4()
users_id = Factory.create().slug()
users_name = Factory.create().first_name()
users_surname = Factory.create().last_name()
res = sessions.put_item(data={
'username': id,
'data': {
'user_id': users_id,
'name' : users_name,
'surname' : users_surname,
'ip': str(ipv4),
'datetime': timestamp
}
})
print('Created: ' + str(res))
if __name__ == '__main__':
for x in range(20):
create_session()
|
[
"uuid.uuid4",
"boto.dynamodb2.fields.HashKey",
"time.strftime",
"boto.regioninfo.RegionInfo",
"faker.Factory.create"
] |
[((616, 645), 'time.strftime', 'time.strftime', (['"""%Y%m%d%H%M%S"""'], {}), "('%Y%m%d%H%M%S')\n", (629, 645), False, 'import time\n'), ((586, 598), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (596, 598), False, 'import uuid\n'), ((657, 673), 'faker.Factory.create', 'Factory.create', ([], {}), '()\n', (671, 673), False, 'from faker import Factory\n'), ((696, 712), 'faker.Factory.create', 'Factory.create', ([], {}), '()\n', (710, 712), False, 'from faker import Factory\n'), ((737, 753), 'faker.Factory.create', 'Factory.create', ([], {}), '()\n', (751, 753), False, 'from faker import Factory\n'), ((787, 803), 'faker.Factory.create', 'Factory.create', ([], {}), '()\n', (801, 803), False, 'from faker import Factory\n'), ((321, 334), 'boto.dynamodb2.fields.HashKey', 'HashKey', (['"""id"""'], {}), "('id')\n", (328, 334), False, 'from boto.dynamodb2.fields import HashKey\n'), ((391, 464), 'boto.regioninfo.RegionInfo', 'RegionInfo', ([], {'name': '"""eu-west-1"""', 'endpoint': '"""dynamodb.eu-west-1.amazonaws.com"""'}), "(name='eu-west-1', endpoint='dynamodb.eu-west-1.amazonaws.com')\n", (401, 464), False, 'from boto.regioninfo import RegionInfo\n')]
|
##############################################################################
# Copyright 2020 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import pandas as pd
from . import DFPBase
import numpy as np
import onnx
from onnx import helper
from onnx import AttributeProto, TensorProto, GraphProto
class DateTransformer(DFPBase):
"""
Create time features.
Parameters
----------
column : string
Column name holding the time data. Each element of the column must be a string representing a date such as '2018-02-02 18:31' or an int value representing the time in seconds. When the time is represented as seconds, the origin argument needs to be specified to calculate the date from the time data. From this column, the following six features (columns) are created. The names of the created columns have this column name as a prefix.
- MY (months in a year)
- WY (weeks in a year)
- DY (days in a year)
- DM (days in a month)
- DW (days in a week)
- HD (hours in a day)
origin: string (default is 1970-01-01)
An origin of the time to calculate dates. This is needed when a columm has the time values in seconds. This is not needed when a column has the string values representing dates.
Examples:
----------
>>> df = pd.DataFrame({'DT': ['2018-02-02 18:31', '2018-02-03 11:15', '2018-02-03 13:11']})
>>> tf1 = TimeTransformer(datetime='DT')
"""
def __init__(
self,
column=None,
origin=None
):
super().__init__()
self.column = column
self.origin = origin
self.date_fields = ['MY', 'WY', 'DY', 'DM', 'DW', 'HD']
def transform(self, df):
if self.origin is not None:
df[self.column] = pd.to_datetime(df[self.column], origin=self.origin, unit='s')
else:
df[self.column] = pd.to_datetime(df[self.column])
for f in self.date_fields:
output_column = self.column + '_' + f
if f == 'MY':
df[output_column] = df[self.column].dt.month
elif f == 'WY':
df[output_column] = df[self.column].dt.isocalendar().week.astype(np.int64)
elif f == 'DY':
df[output_column] = df[self.column].dt.dayofyear
elif f == 'DM':
df[output_column] = df[self.column].dt.day
elif f == 'DW':
df[output_column] = df[self.column].dt.dayofweek
elif f == 'HD':
df[output_column] = df[self.column].dt.hour
else:
assert False, 'Uknown date field ' + f
return df
def to_onnx_operator(self, graph):
input_tensor = graph.get_current_tensor(self.column)
output_tensors = []
output_tensor_names = []
for f in self.date_fields:
output_column = self.column + '_' + f
output_tensor = graph.get_next_tensor(output_column, TensorProto.INT32)
output_tensors.append(output_tensor)
output_tensor_names.append(output_tensor.name)
kwargs = {}
kwargs['format'] = '%Y-%m-%d'
op = helper.make_node('Date', [input_tensor.name], output_tensor_names, graph.get_node_name('Date'), domain='ai.onnx.ml', **kwargs)
graph.add([input_tensor], output_tensors, [op])
|
[
"pandas.to_datetime"
] |
[((2416, 2477), 'pandas.to_datetime', 'pd.to_datetime', (['df[self.column]'], {'origin': 'self.origin', 'unit': '"""s"""'}), "(df[self.column], origin=self.origin, unit='s')\n", (2430, 2477), True, 'import pandas as pd\n'), ((2522, 2553), 'pandas.to_datetime', 'pd.to_datetime', (['df[self.column]'], {}), '(df[self.column])\n', (2536, 2553), True, 'import pandas as pd\n')]
|
"""Kraken - objects.operators.kl_operator module.
Classes:
KLOperator - Splice operator object.
"""
import pprint
import re
from kraken.core.maths import MathObject, Mat44, Xfo, Vec2, Vec3
from kraken.core.objects.object_3d import Object3D
from kraken.core.objects.operators.operator import Operator
from kraken.core.objects.attributes.attribute import Attribute
from kraken.core.kraken_system import ks
from kraken.log import getLogger
logger = getLogger('kraken')
class KLOperator(Operator):
"""KL Operator representation."""
def __init__(self, name, solverTypeName, extension):
super(KLOperator, self).__init__(name)
self.solverTypeName = solverTypeName
self.extension = extension
# Load the Fabric Engine client and construct the RTVal for the Solver
ks.loadCoreClient()
ks.loadExtension('Kraken')
if self.extension != 'Kraken':
ks.loadExtension(self.extension)
self.solverRTVal = ks.constructRTVal(self.solverTypeName)
# logger.debug("Creating kl operator object [%s] of type [%s] from extension [%s]:" % (self.getName(), self.solverTypeName, self.extension))
self.args = self.solverRTVal.getArguments('KrakenSolverArg[]')
# Initialize the inputs and outputs based on the given args.
for i in xrange(len(self.args)):
arg = self.args[i]
argName = arg.name.getSimpleType()
argDataType = arg.dataType.getSimpleType()
argConnectionType = arg.connectionType.getSimpleType()
# Note, do not create empty arrays here as we need to know later whether or not
# to create default values if input/output is None
if argConnectionType == 'In':
self.inputs[argName] = None
else:
self.outputs[argName] = None
def getSolverTypeName(self):
"""Returns the solver type name for this operator.
Returns:
str: Name of the solver type this operator uses.
"""
return self.solverTypeName
def getExtension(self):
"""Returns the extention this operator uses.
Returns:
str: Name of the extension this solver uses.
"""
return self.extension
def getSolverArgs(self):
"""Returns the args array defined by the KL Operator.
Returns:
RTValArray: Args array defined by the KL Operator.
"""
return self.args
def getInputType(self, name):
"""Returns the type of input with the specified name."""
for arg in self.args:
if arg.connectionType.getSimpleType() == "In" and arg.name.getSimpleType() == name:
return arg.dataType.getSimpleType()
raise Exception("Could not find input argument %s in kl operator %s" % (name, self.getName()))
def getOutputType(self, name):
"""Returns the type of output with the specified name."""
for arg in self.args:
if arg.connectionType.getSimpleType() == "Out" and arg.name.getSimpleType() == name:
return arg.dataType.getSimpleType()
raise Exception("Could not find output argument %s in kl operator %s" % (name, self.getName()))
def getDefaultValue(self, name, RTValDataType, mode="arg"):
"""Returns the default RTVal value for this argument
Only print debug if setting default inputs. Don't care about outputs, really
Args:
name (str): Name of the input to get.
mode (str): "inputs" or "outputs"
Returns:
RTVal
"""
def isFixedArrayType(string):
return bool(re.search(r'\[\d', string))
# If attribute has a default value
if self.solverRTVal.defaultValues.has("Boolean", name).getSimpleType():
RTVal = ks.convertFromRTVal(self.solverRTVal.defaultValues[name])
if RTVal.isArray():
# If RTValDataType is variable array, but default value is fixed array, convert it
if isFixedArrayType(RTVal.getTypeName().getSimpleType()) and not isFixedArrayType(RTValDataType):
RTValArray = ks.rtVal(RTValDataType)
if len(RTVal):
RTValArray.resize(len(RTVal))
for i in range(len(RTVal)):
RTValArray[i] = RTVal[i]
RTVal = RTValArray
else:
# Not totally sure why we need to do this, but we get None from getSimpleType from the RTVal
# when we run it on it's own and use the type that we query. Gotta investigate this further...
RTVal = ks.convertFromRTVal(self.solverRTVal.defaultValues[name], RTTypeName=RTValDataType)
logger.debug("Using default value for %s.%s.%s(%s) --> %s" % (self.solverTypeName, self.getName(), mode, name, RTVal))
return RTVal
else:
if True: #mode == "arg": #Only report a warning if default value is not provided for arg
logger.warn("No default value for %s.%s.%s[%s]." % (self.solverTypeName, self.getName(), mode, name))
defaultValue = ks.rtVal(RTValDataType)
if True: #mode == "arg":
logger.warn(" Creating default value by generating new RTVal object of type: %s. You should set default values for %s.%s(%s) in your KL Operator." %
(RTValDataType, self.solverTypeName, mode, name,))
return defaultValue
def getInput(self, name):
"""Returns the input with the specified name.
If there is no input value, it get the default RTVal and converts to
python data
Args:
name (str): Name of the input to get.
Returns:
object: Input object.
"""
if name in self.inputs and self.inputs[name] is not None:
return self.inputs[name]
def rt2Py(rtVal, rtType):
if "[" in rtType:
return []
if rtType == "Xfo":
return Xfo(rtVal)
if rtType == "Mat44":
return Mat44(rtVal)
if rtType == "Vec2":
return Vec2(rtVal)
if rtType == "Vec3":
return Vec3(rtVal)
else:
return rtVal.getSimpleType()
#raise ValueError("Cannot convert rtval %s from %s" (rtVal, rtType))
argDataType = None
for arg in self.args:
if arg.name.getSimpleType() == name:
argDataType = arg.dataType.getSimpleType()
break
if argDataType is None:
raise Exception("Cannot find arg %s for object %s" (arg, self.getName()))
defaultVal = self.getDefaultValue(name, argDataType, mode="arg")
pyVal = rt2Py(defaultVal, argDataType)
return pyVal
def generateSourceCode(self):
"""Returns the source code for a stub operator that will invoke the KL operator
Returns:
str: The source code for the stub operator.
"""
# Start constructing the source code.
opSourceCode = "dfgEntry {\n"
# In SpliceMaya, output arrays are not resized by the system prior to
# calling into Splice, so we explicily resize the arrays in the
# generated operator stub code.
for i in xrange(len(self.args)):
arg = self.args[i]
argName = arg.name.getSimpleType()
argDataType = arg.dataType.getSimpleType()
argConnectionType = arg.connectionType.getSimpleType()
if argDataType.endswith('[]') and argConnectionType == 'Out':
arraySize = len(self.getOutput(argName))
opSourceCode += " " + argName + ".resize(" + str(arraySize) + \
");\n"
# guard
if argDataType.endswith('[]') and argConnectionType == 'In':
arraySize = len(self.getInput(argName))
opSourceCode += " if({}.size() != {}){{\n".format(argName, str(arraySize))
opSourceCode += " return;\n"
opSourceCode += " }\n"
opSourceCode += " if(solver == null)\n"
opSourceCode += " solver = " + self.solverTypeName + "();\n"
opSourceCode += " solver.solve(\n"
for i in xrange(len(self.args)):
argName = self.args[i].name.getSimpleType()
if i == len(self.args) - 1:
opSourceCode += " " + argName + "\n"
else:
opSourceCode += " " + argName + ",\n"
opSourceCode += " );\n"
opSourceCode += "}\n"
return opSourceCode
def evaluate(self):
"""Invokes the KL operator causing the output values to be computed.
Returns:
bool: True if successful.
"""
# logger.debug("\nEvaluating kl operator [%s] of type [%s] from extension [%s]..." % (self.getName(), self.solverTypeName, self.extension))
super(KLOperator, self).evaluate()
def getRTVal(obj, asInput=True):
if isinstance(obj, Object3D):
if asInput:
return obj.globalXfo.getRTVal().toMat44('Mat44')
else:
return obj.xfo.getRTVal().toMat44('Mat44')
elif isinstance(obj, Xfo):
return obj.getRTVal().toMat44('Mat44')
elif isinstance(obj, MathObject):
return obj.getRTVal()
elif isinstance(obj, Attribute):
return obj.getRTVal()
elif type(obj) is bool:
return ks.rtVal('Boolean', obj)
elif type(obj) is int:
return ks.rtVal('Integer', obj)
elif type(obj) is float:
return ks.rtVal('Scalar', obj)
elif type(obj) is str:
return ks.rtVal('String', obj)
else:
return obj #
def validateArg(rtVal, argName, argDataType):
"""Validate argument types when passing built in Python types.
Args:
rtVal (RTVal): rtValue object.
argName (str): Name of the argument being validated.
argDataType (str): Type of the argument being validated.
"""
# Validate types when passing a built in Python type
if type(rtVal) in (bool, str, int, float):
if argDataType in ('Scalar', 'Float32', 'UInt32', 'Integer'):
if type(rtVal) not in (float, int):
raise TypeError(self.getName() + ".evaluate(): Invalid Argument Value: " + str(rtVal) + " (" + type(rtVal).__name__ + "), for Argument: " + argName + " (" + argDataType + ")")
elif argDataType == 'Boolean':
if type(rtVal) != bool:
raise TypeError(self.getName() + ".evaluate(): Invalid Argument Value: " + str(rtVal) + " (" + type(rtVal).__name__ + "), for Argument: " + argName + " (" + argDataType + ")")
elif argDataType == 'String':
if type(rtVal) != str:
raise TypeError(self.getName() + ".evaluate(): Invalid Argument Value: " + str(rtVal) + " (" + type(rtVal).__name__ + "), for Argument: " + argName + " (" + argDataType + ")")
argVals = []
debug = []
for i in xrange(len(self.args)):
arg = self.args[i]
argName = arg.name.getSimpleType()
argDataType = arg.dataType.getSimpleType()
argConnectionType = arg.connectionType.getSimpleType()
if argDataType == 'EvalContext':
argVals.append(ks.constructRTVal(argDataType))
continue
if argName == 'time':
argVals.append(ks.constructRTVal(argDataType))
continue
if argName == 'frame':
argVals.append(ks.constructRTVal(argDataType))
continue
if argConnectionType == 'In':
if str(argDataType).endswith('[]'):
if argName in self.inputs and self.inputs[argName] is not None:
rtValArray = ks.rtVal(argDataType)
rtValArray.resize(len(self.inputs[argName]))
for j in xrange(len(self.inputs[argName])):
if self.inputs[argName][j] is None:
continue
rtVal = getRTVal(self.inputs[argName][j])
validateArg(rtVal, argName, argDataType[:-2])
rtValArray[j] = rtVal
else:
rtValArray = self.getDefaultValue(argName, argDataType, mode="arg")
argVals.append(rtValArray)
else:
if argName in self.inputs and self.inputs[argName] is not None:
rtVal = getRTVal(self.inputs[argName])
else:
rtVal = self.getDefaultValue(argName, argDataType, mode="arg")
validateArg(rtVal, argName, argDataType)
argVals.append(rtVal)
elif argConnectionType in ('IO', 'Out'):
if str(argDataType).endswith('[]'):
if argName in self.outputs and self.outputs[argName] is not None:
rtValArray = ks.rtVal(argDataType)
rtValArray.resize(len(self.outputs[argName]))
for j in xrange(len(self.outputs[argName])):
if self.outputs[argName][j] is None:
continue
rtVal = getRTVal(self.outputs[argName][j], asInput=False)
validateArg(rtVal, argName, argDataType[:-2])
rtValArray[j] = rtVal
else:
rtValArray = self.getDefaultValue(argName, argDataType, mode="output")
argVals.append(rtValArray)
else:
if argName in self.outputs and self.outputs[argName] is not None:
rtVal = getRTVal(self.outputs[argName], asInput=False)
else:
rtVal = self.getDefaultValue(argName, argDataType, mode="output")
validateArg(rtVal, argName, argDataType)
argVals.append(rtVal)
else:
raise Exception("Operator:'" + self.getName() + " has an invalid 'argConnectionType': " + argConnectionType)
debug.append(
{
argName: [
{
"dataType": argDataType,
"connectionType": argConnectionType
},
argVals[-1]
]
})
try:
# argstr = [str(arg) for arg in argVals]
# logger.debug("%s.solve('', %s)" % (self.solverTypeName, ", ".join(argstr)))
self.solverRTVal.solve('', *argVals)
except Exception as e:
errorMsg = "\nPossible problem with KL operator [%s]. Arguments:\n" % self.getName()
errorMsg += pprint.pformat(debug, indent=4, width=800)
logger.error(errorMsg)
raise e
# Now put the computed values out to the connected output objects.
def setRTVal(obj, rtval):
if isinstance(obj, Object3D):
obj.xfo.setFromMat44(Mat44(rtval))
elif isinstance(obj, Xfo):
obj.setFromMat44(Mat44(rtval))
elif isinstance(obj, Mat44):
obj.setFromMat44(rtval)
elif isinstance(obj, Attribute):
if ks.isRTVal(rtval):
obj.setValue(rtval.getSimpleType())
else:
obj.setValue(rtval)
else:
if hasattr(obj, '__iter__'):
logger.warning("Warning: Trying to set a KL port with an array directly.")
logger.warning("Not setting rtval: %s\n\tfor output object: %s\n\tof KL object: %s\n." % \
(rtval, obj.getName(), self.getName()))
for i in xrange(len(argVals)):
arg = self.args[i]
argName = arg.name.getSimpleType()
argDataType = arg.dataType.getSimpleType()
argConnectionType = arg.connectionType.getSimpleType()
if argConnectionType != 'In':
if argName in self.outputs and self.outputs[argName] is not None:
if str(argDataType).endswith('[]'):
for j in xrange(len(argVals[i])):
if len(self.outputs[argName]) > j and self.outputs[argName][j] is not None:
setRTVal(self.outputs[argName][j], argVals[i][j])
else:
setRTVal(self.outputs[argName], argVals[i])
return True
|
[
"kraken.core.kraken_system.ks.constructRTVal",
"kraken.core.kraken_system.ks.loadCoreClient",
"kraken.log.getLogger",
"pprint.pformat",
"kraken.core.maths.Xfo",
"kraken.core.maths.Vec3",
"kraken.core.maths.Mat44",
"kraken.core.kraken_system.ks.convertFromRTVal",
"kraken.core.kraken_system.ks.loadExtension",
"kraken.core.maths.Vec2",
"kraken.core.kraken_system.ks.rtVal",
"re.search",
"kraken.core.kraken_system.ks.isRTVal"
] |
[((451, 470), 'kraken.log.getLogger', 'getLogger', (['"""kraken"""'], {}), "('kraken')\n", (460, 470), False, 'from kraken.log import getLogger\n'), ((813, 832), 'kraken.core.kraken_system.ks.loadCoreClient', 'ks.loadCoreClient', ([], {}), '()\n', (830, 832), False, 'from kraken.core.kraken_system import ks\n'), ((841, 867), 'kraken.core.kraken_system.ks.loadExtension', 'ks.loadExtension', (['"""Kraken"""'], {}), "('Kraken')\n", (857, 867), False, 'from kraken.core.kraken_system import ks\n'), ((979, 1017), 'kraken.core.kraken_system.ks.constructRTVal', 'ks.constructRTVal', (['self.solverTypeName'], {}), '(self.solverTypeName)\n', (996, 1017), False, 'from kraken.core.kraken_system import ks\n'), ((5218, 5241), 'kraken.core.kraken_system.ks.rtVal', 'ks.rtVal', (['RTValDataType'], {}), '(RTValDataType)\n', (5226, 5241), False, 'from kraken.core.kraken_system import ks\n'), ((919, 951), 'kraken.core.kraken_system.ks.loadExtension', 'ks.loadExtension', (['self.extension'], {}), '(self.extension)\n', (935, 951), False, 'from kraken.core.kraken_system import ks\n'), ((3867, 3924), 'kraken.core.kraken_system.ks.convertFromRTVal', 'ks.convertFromRTVal', (['self.solverRTVal.defaultValues[name]'], {}), '(self.solverRTVal.defaultValues[name])\n', (3886, 3924), False, 'from kraken.core.kraken_system import ks\n'), ((3694, 3721), 're.search', 're.search', (['"""\\\\[\\\\d"""', 'string'], {}), "('\\\\[\\\\d', string)\n", (3703, 3721), False, 'import re\n'), ((4718, 4806), 'kraken.core.kraken_system.ks.convertFromRTVal', 'ks.convertFromRTVal', (['self.solverRTVal.defaultValues[name]'], {'RTTypeName': 'RTValDataType'}), '(self.solverRTVal.defaultValues[name], RTTypeName=\n RTValDataType)\n', (4737, 4806), False, 'from kraken.core.kraken_system import ks\n'), ((6099, 6109), 'kraken.core.maths.Xfo', 'Xfo', (['rtVal'], {}), '(rtVal)\n', (6102, 6109), False, 'from kraken.core.maths import MathObject, Mat44, Xfo, Vec2, Vec3\n'), ((6167, 6179), 'kraken.core.maths.Mat44', 'Mat44', (['rtVal'], {}), '(rtVal)\n', (6172, 6179), False, 'from kraken.core.maths import MathObject, Mat44, Xfo, Vec2, Vec3\n'), ((6236, 6247), 'kraken.core.maths.Vec2', 'Vec2', (['rtVal'], {}), '(rtVal)\n', (6240, 6247), False, 'from kraken.core.maths import MathObject, Mat44, Xfo, Vec2, Vec3\n'), ((6304, 6315), 'kraken.core.maths.Vec3', 'Vec3', (['rtVal'], {}), '(rtVal)\n', (6308, 6315), False, 'from kraken.core.maths import MathObject, Mat44, Xfo, Vec2, Vec3\n'), ((15396, 15438), 'pprint.pformat', 'pprint.pformat', (['debug'], {'indent': '(4)', 'width': '(800)'}), '(debug, indent=4, width=800)\n', (15410, 15438), False, 'import pprint\n'), ((4204, 4227), 'kraken.core.kraken_system.ks.rtVal', 'ks.rtVal', (['RTValDataType'], {}), '(RTValDataType)\n', (4212, 4227), False, 'from kraken.core.kraken_system import ks\n'), ((11756, 11786), 'kraken.core.kraken_system.ks.constructRTVal', 'ks.constructRTVal', (['argDataType'], {}), '(argDataType)\n', (11773, 11786), False, 'from kraken.core.kraken_system import ks\n'), ((11878, 11908), 'kraken.core.kraken_system.ks.constructRTVal', 'ks.constructRTVal', (['argDataType'], {}), '(argDataType)\n', (11895, 11908), False, 'from kraken.core.kraken_system import ks\n'), ((12001, 12031), 'kraken.core.kraken_system.ks.constructRTVal', 'ks.constructRTVal', (['argDataType'], {}), '(argDataType)\n', (12018, 12031), False, 'from kraken.core.kraken_system import ks\n'), ((15684, 15696), 'kraken.core.maths.Mat44', 'Mat44', (['rtval'], {}), '(rtval)\n', (15689, 15696), False, 'from kraken.core.maths import MathObject, Mat44, Xfo, Vec2, Vec3\n'), ((12274, 12295), 'kraken.core.kraken_system.ks.rtVal', 'ks.rtVal', (['argDataType'], {}), '(argDataType)\n', (12282, 12295), False, 'from kraken.core.kraken_system import ks\n'), ((15770, 15782), 'kraken.core.maths.Mat44', 'Mat44', (['rtval'], {}), '(rtval)\n', (15775, 15782), False, 'from kraken.core.maths import MathObject, Mat44, Xfo, Vec2, Vec3\n'), ((13515, 13536), 'kraken.core.kraken_system.ks.rtVal', 'ks.rtVal', (['argDataType'], {}), '(argDataType)\n', (13523, 13536), False, 'from kraken.core.kraken_system import ks\n'), ((15929, 15946), 'kraken.core.kraken_system.ks.isRTVal', 'ks.isRTVal', (['rtval'], {}), '(rtval)\n', (15939, 15946), False, 'from kraken.core.kraken_system import ks\n'), ((9684, 9708), 'kraken.core.kraken_system.ks.rtVal', 'ks.rtVal', (['"""Boolean"""', 'obj'], {}), "('Boolean', obj)\n", (9692, 9708), False, 'from kraken.core.kraken_system import ks\n'), ((9767, 9791), 'kraken.core.kraken_system.ks.rtVal', 'ks.rtVal', (['"""Integer"""', 'obj'], {}), "('Integer', obj)\n", (9775, 9791), False, 'from kraken.core.kraken_system import ks\n'), ((9852, 9875), 'kraken.core.kraken_system.ks.rtVal', 'ks.rtVal', (['"""Scalar"""', 'obj'], {}), "('Scalar', obj)\n", (9860, 9875), False, 'from kraken.core.kraken_system import ks\n'), ((9934, 9957), 'kraken.core.kraken_system.ks.rtVal', 'ks.rtVal', (['"""String"""', 'obj'], {}), "('String', obj)\n", (9942, 9957), False, 'from kraken.core.kraken_system import ks\n')]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import argparse
import torch
import torch.optim as optim
import torch.nn as nn
from torch.autograd import Variable
import os
from torch.utils.data import TensorDataset, DataLoader
from sklearn.model_selection import train_test_split
import thermodrift_model
def load_data():
# Load data
X = torch.load('/gscratch/stf/jgershon/tensor_x.pt')
Y = torch.load('/gscratch/stf/jgershon/tensor_y.pt')
return X, Y
def split_data(X, Y):
if 'X_train.pt' not in os.listdir('/gscratch/stf/jgershon/'):
# Convert y back from one hot encoding
Y = torch.argmax(Y, dim=1)
print('new Y: ', Y[:10])
print('X load: ', X.size())
print('Y load: ', Y.size())
# Split data tensors into dev and test sets
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.20, random_state=42)
print('X_train: ', X_train.size())
print('X_test: ', X_test.size())
print('y_train: ', y_train.size())
print('y_test: ', y_test.size())
torch.save(X_train, '/gscratch/stf/jgershon/X_train.pt')
torch.save(X_test, '/gscratch/stf/jgershon/X_test.pt')
torch.save(y_train, '/gscratch/stf/jgershon/y_train.pt')
torch.save(y_test, '/gscratch/stf/jgershon/y_test.pt')
else:
X_train = torch.load('/gscratch/stf/jgershon/X_train.pt')
X_test = torch.load('/gscratch/stf/jgershon/X_test.pt')
y_train = torch.load('/gscratch/stf/jgershon/y_train.pt')
y_test = torch.load('/gscratch/stf/jgershon/y_test.pt')
return X_train, X_test, y_train, y_test
def get_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-indir', type=str, required=False, default=None)
parser.add_argument('-outdir', type=str, required=True, default=None)
args = parser.parse_args()
return args
args = get_args()
indir = args.indir
outdir = args.outdir
# Loading and processing the data:
X, Y = load_data()
X_train, X_test, y_train, y_test = split_data(X, Y)
# Do we need to normalize the one hot encoded tensors? Prob not.
# Generate train and test datasets
trainset = TensorDataset(X_train, y_train)
testset = TensorDataset(X_test, y_test)
# Prepare train and test loaders
train_loader = torch.utils.data.DataLoader(trainset,
batch_size=100,
shuffle=True,
num_workers=2)
test_loader = torch.utils.data.DataLoader(testset,
batch_size=100,
shuffle=True,
num_workers=2)
# Instantiate the network
model = thermodrift_model.Net()
# Load model from previous state if indir arg is specified
if indir is not None:
if len(indir) > 0:
model.load_state_dict(torch.load(indir))
model.eval()
print('Model loaded from: ', indir)
# Instantiate the cross-entropy loss
criterion = nn.CrossEntropyLoss()
# Instantiate the Adam optimizer
optimizer = optim.Adam(model.parameters(),
lr=3e-4,
weight_decay=0.001)
# Moving tensors over to gpu if available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('Device chosen: ', device)
X_train = X_train.to(device)
X_test = X_test.to(device)
y_train = y_train.to(device)
y_test = y_test.to(device)
model = model.to(device)
# batch_size, epoch and iteration
batch_size = 100
features_train = X.size()[0]
n_iters = 100000
num_epochs = int(n_iters/(features_train/batch_size))
num_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('Number of parameters: ', num_parameters)
# CNN model training
count = 0
loss_list = []
iteration_list = []
accuracy_list = []
output_dict = {}
# Number of iterations between validation cycles
n_run_valid = 500
for epoch in range(num_epochs):
for i, data in enumerate(train_loader, 0):
train, labels = data
# Clear gradients
optimizer.zero_grad()
# Forward propagation
outputs = model(train.unsqueeze(1))
# Calculate relu and cross entropy loss
loss = criterion(outputs, labels)
# Calculating gradients
loss.backward()
# Update weights
optimizer.step()
count += 1
print('Train - example: '+str(i)+' loss: '+str(float(loss.data)))
if count % n_run_valid == 0:
# Calculate Accuracy
correct = 0
total = 0
valid_loss = 0
# Iterate through test dataset
for j, data in enumerate(test_loader, 0):
test, labels = data
# Forward propagation
outputs = model(test.unsqueeze(1))
loss_valid = criterion(outputs, labels)
# Get predictions from the maximum value
predicted = torch.max(outputs.data, 1)[1]
# Total number of labels
total += len(labels)
correct += (predicted == labels).sum()
valid_loss += float(loss_valid.data)
#print('valid_loss: ', valid_loss)
accuracy = 100 * correct / float(total)
print('Valid - iter: '+str(count/n_run_valid) +
' loss: '+str(float(valid_loss/(j+1))))
if count % 500 == 0:
# Print Loss
print('Iteration: {} Train Loss: {} Test Accuracy: {} %'.format(
count, loss.data, accuracy))
path = outdir+'save_model/model_'+str(count)+'.pt'
torch.save(model.state_dict(), path)
print('Model '+str(count)+' was saved.')
|
[
"thermodrift_model.Net",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"torch.argmax",
"torch.load",
"sklearn.model_selection.train_test_split",
"torch.nn.CrossEntropyLoss",
"torch.save",
"torch.cuda.is_available",
"torch.utils.data.TensorDataset",
"torch.max",
"os.listdir"
] |
[((2265, 2296), 'torch.utils.data.TensorDataset', 'TensorDataset', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (2278, 2296), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((2307, 2336), 'torch.utils.data.TensorDataset', 'TensorDataset', (['X_test', 'y_test'], {}), '(X_test, y_test)\n', (2320, 2336), False, 'from torch.utils.data import TensorDataset, DataLoader\n'), ((2386, 2472), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': '(100)', 'shuffle': '(True)', 'num_workers': '(2)'}), '(trainset, batch_size=100, shuffle=True,\n num_workers=2)\n', (2413, 2472), False, 'import torch\n'), ((2612, 2697), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': '(100)', 'shuffle': '(True)', 'num_workers': '(2)'}), '(testset, batch_size=100, shuffle=True,\n num_workers=2)\n', (2639, 2697), False, 'import torch\n'), ((2856, 2879), 'thermodrift_model.Net', 'thermodrift_model.Net', ([], {}), '()\n', (2877, 2879), False, 'import thermodrift_model\n'), ((3148, 3169), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3167, 3169), True, 'import torch.nn as nn\n'), ((374, 422), 'torch.load', 'torch.load', (['"""/gscratch/stf/jgershon/tensor_x.pt"""'], {}), "('/gscratch/stf/jgershon/tensor_x.pt')\n", (384, 422), False, 'import torch\n'), ((431, 479), 'torch.load', 'torch.load', (['"""/gscratch/stf/jgershon/tensor_y.pt"""'], {}), "('/gscratch/stf/jgershon/tensor_y.pt')\n", (441, 479), False, 'import torch\n'), ((1709, 1779), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.RawTextHelpFormatter'}), '(formatter_class=argparse.RawTextHelpFormatter)\n', (1732, 1779), False, 'import argparse\n'), ((547, 584), 'os.listdir', 'os.listdir', (['"""/gscratch/stf/jgershon/"""'], {}), "('/gscratch/stf/jgershon/')\n", (557, 584), False, 'import os\n'), ((645, 667), 'torch.argmax', 'torch.argmax', (['Y'], {'dim': '(1)'}), '(Y, dim=1)\n', (657, 667), False, 'import torch\n'), ((870, 924), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'Y'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(X, Y, test_size=0.2, random_state=42)\n', (886, 924), False, 'from sklearn.model_selection import train_test_split\n'), ((1115, 1171), 'torch.save', 'torch.save', (['X_train', '"""/gscratch/stf/jgershon/X_train.pt"""'], {}), "(X_train, '/gscratch/stf/jgershon/X_train.pt')\n", (1125, 1171), False, 'import torch\n'), ((1180, 1234), 'torch.save', 'torch.save', (['X_test', '"""/gscratch/stf/jgershon/X_test.pt"""'], {}), "(X_test, '/gscratch/stf/jgershon/X_test.pt')\n", (1190, 1234), False, 'import torch\n'), ((1243, 1299), 'torch.save', 'torch.save', (['y_train', '"""/gscratch/stf/jgershon/y_train.pt"""'], {}), "(y_train, '/gscratch/stf/jgershon/y_train.pt')\n", (1253, 1299), False, 'import torch\n'), ((1308, 1362), 'torch.save', 'torch.save', (['y_test', '"""/gscratch/stf/jgershon/y_test.pt"""'], {}), "(y_test, '/gscratch/stf/jgershon/y_test.pt')\n", (1318, 1362), False, 'import torch\n'), ((1391, 1438), 'torch.load', 'torch.load', (['"""/gscratch/stf/jgershon/X_train.pt"""'], {}), "('/gscratch/stf/jgershon/X_train.pt')\n", (1401, 1438), False, 'import torch\n'), ((1456, 1502), 'torch.load', 'torch.load', (['"""/gscratch/stf/jgershon/X_test.pt"""'], {}), "('/gscratch/stf/jgershon/X_test.pt')\n", (1466, 1502), False, 'import torch\n'), ((1521, 1568), 'torch.load', 'torch.load', (['"""/gscratch/stf/jgershon/y_train.pt"""'], {}), "('/gscratch/stf/jgershon/y_train.pt')\n", (1531, 1568), False, 'import torch\n'), ((1586, 1632), 'torch.load', 'torch.load', (['"""/gscratch/stf/jgershon/y_test.pt"""'], {}), "('/gscratch/stf/jgershon/y_test.pt')\n", (1596, 1632), False, 'import torch\n'), ((3399, 3424), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3422, 3424), False, 'import torch\n'), ((3014, 3031), 'torch.load', 'torch.load', (['indir'], {}), '(indir)\n', (3024, 3031), False, 'import torch\n'), ((5106, 5132), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (5115, 5132), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
import frappe
from toolz.curried import (
compose,
merge,
unique,
concat,
valmap,
groupby,
first,
excepts,
keyfilter,
map,
filter,
)
import html
from erpnext.portal.product_configurator.utils import (
get_products_for_website,
get_product_settings,
get_item_codes_by_attributes,
get_conditions,
)
from erpnext.shopping_cart.product_info import get_product_info_for_website
from erpnext.accounts.doctype.sales_invoice.pos import get_child_nodes
from erpnext.utilities.product import get_price, get_qty_in_stock
from cm_custom.api.utils import handle_error, transform_route
@frappe.whitelist(allow_guest=True)
@handle_error
def get_list(page="1", field_filters=None, attribute_filters=None, search=None):
other_fieldnames = ["item_group", "thumbnail", "has_variants"]
price_list = frappe.db.get_single_value("Shopping Cart Settings", "price_list")
products_settings = get_product_settings()
products_per_page = products_settings.products_per_page
get_other_fields = compose(
valmap(excepts(StopIteration, first, lambda _: {})),
groupby("name"),
lambda item_codes: frappe.db.sql(
"""
SELECT name, {other_fieldnames}
FROM `tabItem`
WHERE name IN %(item_codes)s
""".format(
other_fieldnames=", ".join(other_fieldnames)
),
values={"item_codes": item_codes},
as_dict=1,
),
lambda items: [x.get("name") for x in items],
)
frappe.form_dict.start = (frappe.utils.cint(page) - 1) * products_per_page
kwargs = _get_args(field_filters, attribute_filters, search)
items = get_products_for_website(**kwargs)
other_fields = get_other_fields(items) if items else {}
item_prices = _get_item_prices(price_list, items) if items else {}
get_rates = _rate_getter(price_list, item_prices)
stock_qtys_by_item = _get_stock_by_item(items) if items else {}
return [
merge(
x,
get_rates(x.get("name")),
{k: other_fields.get(x.get("name"), {}).get(k) for k in other_fieldnames},
{
"route": transform_route(x),
"description": frappe.utils.strip_html_tags(x.get("description") or ""),
"stock_qty": stock_qtys_by_item.get(x.get("name"), 0),
},
)
for x in items
]
@frappe.whitelist(allow_guest=True)
@handle_error
def get_count(field_filters=None, attribute_filters=None, search=None):
products_settings = get_product_settings()
products_per_page = products_settings.products_per_page
def get_pages(count):
return frappe.utils.ceil(count / products_per_page)
kwargs = _get_args(field_filters, attribute_filters, search)
def get_field_filters():
if not field_filters:
return []
meta = frappe.get_meta("Item")
def get_filter(fieldname, values):
df = meta.get_field(fieldname)
if df.fieldtype == "Table MultiSelect":
child_meta = frappe.get_meta(df.options)
fields = child_meta.get(
"fields", {"fieldtype": "Link", "in_list_view": 1}
)
if fields:
return [df.options, fields[0].fieldname, "in", values]
return ["Item", fieldname, "in", values]
return [get_filter(k, v) for k, v in kwargs.get("field_filters").items() if v]
def get_attribute_conditions():
if not attribute_filters:
return None
return get_conditions(
[
[
"Item",
"name",
"in",
get_item_codes_by_attributes(kwargs.get("attribute_filters")),
]
]
)
def get_default_conditions():
return get_conditions([["Item", "disabled", "=", 0]])
def get_variant_conditions():
if products_settings.hide_variants:
return get_conditions([["Item", "show_in_website", "=", 1]])
return get_conditions(
[
["Item", "show_in_website", "=", 1],
["Item", "show_variant_in_website", "=", 1],
],
"or",
)
def get_search_conditions():
if not search:
return None
meta = frappe.get_meta("Item")
search_fields = set(
meta.get_search_fields(),
["name", "item_name", "description", "item_group"],
)
return get_conditions(
[["Item", field, "like", "%(search)s"] for field in search_fields], "or"
)
_field_filters = get_field_filters()
conditions = " and ".join(
[
c
for c in [
get_attribute_conditions(),
get_conditions(_field_filters, "and"),
get_default_conditions(),
get_variant_conditions(),
get_search_conditions(),
]
if c
]
)
left_joins = " ".join(
[
"LEFT JOIN `tab{0}` ON `tab{}`.parent = `tabItem`.name".format(f[0])
for f in _field_filters
if f[0] != "Item"
]
)
count = frappe.db.sql(
"""
SELECT COUNT(`tabItem`.name) FROM `tabItem` {left_joins}
WHERE {conditions}
""".format(
left_joins=left_joins, conditions=conditions
)
)[0][0]
return {"count": count, "pages": get_pages(count)}
@frappe.whitelist(allow_guest=True)
@handle_error
def get(name=None, route=None):
item_code = _get_name(name, route)
if not item_code:
frappe.throw(frappe._("Item does not exist at this route"))
doc = frappe.get_cached_value(
"Item",
item_code,
fieldname=[
"name",
"item_name",
"item_group",
"has_variants",
"description",
"web_long_description",
"image",
"website_image",
],
as_dict=1,
)
price_list = frappe.get_cached_value("Shopping Cart Settings", None, "price_list")
item_prices = _get_item_prices(price_list, [doc])
get_rate = _rate_getter(price_list, item_prices)
return merge({"route": route}, doc, get_rate(doc.get("name")))
@frappe.whitelist(allow_guest=True)
@handle_error
def get_product_info(name=None, item_code=None, route=None, token=None):
# todo: first set user from token
frappe.set_user(
frappe.get_cached_value("Ahong eCommerce Settings", None, "webapp_user")
)
item_code = item_code or _get_name(name, route)
if not item_code:
frappe.throw(frappe._("Item does not exist at this route"))
item_for_website = get_product_info_for_website(
item_code, skip_quotation_creation=True
)
stock_qtys_by_item = _get_stock_by_item([{"name": item_code}])
return {
"price": keyfilter(
lambda x: x in ["currency", "price_list_rate"],
item_for_website.get("product_info", {}).get("price", {}),
),
"stock_qty": stock_qtys_by_item.get(item_code, 0),
}
@frappe.whitelist(allow_guest=True)
@handle_error
def get_media(name=None, route=None):
item_code = _get_name(name, route)
def get_values(name):
return frappe.get_cached_value(
"Item",
name,
["thumbnail", "image", "website_image", "slideshow"],
as_dict=1,
)
def get_slideshows(slideshow):
if not slideshow:
return None
doc = frappe.get_cached_doc("Website Slideshow", slideshow)
if not doc:
return None
return [x.get("image") for x in doc.slideshow_items if x.get("image")]
variant_of = frappe.get_cached_value("Item", item_code, "variant_of")
images = get_values(item_code)
template_images = get_values(variant_of) if variant_of else {}
def get_image(field):
return images.get(field) or template_images.get(field)
return {
"thumbnail": get_image("thumbnail"),
"image": get_image("image"),
"website_image": get_image("website_image"),
"slideshow": get_slideshows(get_image("slideshow")),
}
@frappe.whitelist(allow_guest=True)
@handle_error
def get_related_items(name=None, route=None):
item_code = _get_name(name, route)
if not item_code:
frappe.throw(frappe._("Item does not exist at this route"))
item_group = frappe.get_cached_value("Item", item_code, "item_group")
result = get_list(field_filters={"item_group": [item_group]})
return [x for x in result if x.get("name") != item_code]
def _get_name(name=None, route=None):
if name:
return html.unescape(name)
if route:
return frappe.db.exists("Item", {"route": (route or "").replace("__", "/")})
return None
_get_item_prices = compose(
valmap(excepts(StopIteration, first, lambda _: {})),
groupby("item_code"),
lambda price_list, items: frappe.db.sql(
"""
SELECT item_code, price_list_rate
FROM `tabItem Price`
WHERE price_list = %(price_list)s AND item_code IN %(item_codes)s
""",
values={"price_list": price_list, "item_codes": [x.get("name") for x in items]},
as_dict=1,
)
if price_list
else {},
)
def _rate_getter(price_list, item_prices):
def fn(item_code):
price_obj = (
get_price(
item_code,
price_list,
customer_group=frappe.get_cached_value(
"Selling Settings", None, "customer_group"
),
company=frappe.defaults.get_global_default("company"),
)
or {}
)
price_list_rate = item_prices.get(item_code, {}).get("price_list_rate")
item_price = price_obj.get("price_list_rate") or price_list_rate
return {
"price_list_rate": item_price,
"slashed_rate": price_list_rate if price_list_rate != item_price else None,
}
return fn
def _get_args(field_filters=None, attribute_filters=None, search=None):
get_item_groups = compose(
list,
unique,
map(lambda x: x.get("name")),
concat,
map(lambda x: get_child_nodes("Item Group", x) if x else []),
)
field_dict = (
frappe.parse_json(field_filters)
if isinstance(field_filters, str)
else field_filters
) or {}
item_groups = (
get_item_groups(field_dict.get("item_group"))
if field_dict.get("item_group")
else None
)
return {
"field_filters": merge(
field_dict, {"item_group": item_groups} if item_groups else {}
),
"attribute_filters": frappe.parse_json(attribute_filters),
"search": search,
}
@frappe.whitelist(allow_guest=True)
@handle_error
def get_recent_items():
price_list = frappe.db.get_single_value("Shopping Cart Settings", "price_list")
products_per_page = frappe.db.get_single_value(
"Products Settings", "products_per_page"
)
items = frappe.db.sql(
"""
SELECT
name, item_name, item_group, route, has_variants,
thumbnail, image, website_image,
description, web_long_description
FROM `tabItem`
WHERE show_in_website = 1
ORDER BY modified DESC
LIMIT %(products_per_page)s
""",
values={"products_per_page": products_per_page},
as_dict=1,
)
item_prices = _get_item_prices(price_list, items) if items else {}
get_rates = _rate_getter(price_list, item_prices)
stock_qtys_by_item = _get_stock_by_item(items) if items else {}
return [
merge(
x,
get_rates(x.get("name")),
{
"route": transform_route(x),
"description": frappe.utils.strip_html_tags(x.get("description") or ""),
"stock_qty": stock_qtys_by_item.get(x.get("name"), 0),
},
)
for x in items
]
@frappe.whitelist(allow_guest=True)
@handle_error
def get_featured_items():
homepage = frappe.get_single("Homepage")
if not homepage.products:
return []
price_list = frappe.db.get_single_value("Shopping Cart Settings", "price_list")
items = frappe.db.sql(
"""
SELECT
name, item_name, item_group, route, has_variants,
thumbnail, image, website_image,
description, web_long_description
FROM `tabItem`
WHERE show_in_website = 1 AND name IN %(featured)s
ORDER BY modified DESC
""",
values={"featured": [x.item_code for x in homepage.products]},
as_dict=1,
)
item_prices = _get_item_prices(price_list, items) if items else {}
get_rates = _rate_getter(price_list, item_prices)
stock_qtys_by_item = _get_stock_by_item(items) if items else {}
return [
merge(
x,
get_rates(x.get("name")),
{
"route": transform_route(x),
"description": frappe.utils.strip_html_tags(x.get("description") or ""),
"stock_qty": stock_qtys_by_item.get(x.get("name"), 0),
},
)
for x in items
]
@frappe.whitelist(allow_guest=True)
def get_next_attribute_and_values(item_code, selected_attributes):
from erpnext.portal.product_configurator.utils import get_next_attribute_and_values
session_user = frappe.session.user
webapp_user = frappe.get_cached_value(
"Ahong eCommerce Settings", None, "webapp_user"
)
if not webapp_user:
frappe.throw(frappe._("Site setup not complete"))
frappe.set_user(webapp_user)
result = get_next_attribute_and_values(item_code, selected_attributes)
frappe.set_user(session_user)
return result
def _get_stock_by_item(items):
warehouses = [
x.get("name")
for x in get_child_nodes(
"Warehouse",
frappe.db.get_single_value("Ahong eCommerce Settings", "warehouse"),
)
]
if not warehouses:
return {}
return {
item_code: stock_qty
for item_code, stock_qty in frappe.db.sql(
"""
SELECT b.item_code,
GREATEST(
b.actual_qty - b.reserved_qty - b.reserved_qty_for_production - b.reserved_qty_for_sub_contract,
0
) / IFNULL(C.conversion_factor, 1)
FROM `tabBin` AS b
INNER JOIN `tabItem` AS i
ON b.item_code = i.item_code
LEFT JOIN `tabUOM Conversion Detail` C
ON i.sales_uom = C.uom AND C.parent = i.item_code
WHERE b.item_code IN %(item_codes)s AND b.warehouse in %(warehouses)s
""",
values={
"item_codes": [x.get("name") for x in items],
"warehouses": warehouses,
},
as_list=1,
)
}
|
[
"erpnext.accounts.doctype.sales_invoice.pos.get_child_nodes",
"frappe.parse_json",
"erpnext.portal.product_configurator.utils.get_next_attribute_and_values",
"frappe.get_cached_value",
"frappe.get_meta",
"frappe.db.sql",
"erpnext.shopping_cart.product_info.get_product_info_for_website",
"frappe.defaults.get_global_default",
"frappe.utils.ceil",
"erpnext.portal.product_configurator.utils.get_conditions",
"toolz.curried.merge",
"frappe.get_cached_doc",
"frappe.get_single",
"frappe._",
"erpnext.portal.product_configurator.utils.get_product_settings",
"toolz.curried.groupby",
"frappe.whitelist",
"frappe.set_user",
"frappe.utils.cint",
"frappe.db.get_single_value",
"html.unescape",
"toolz.curried.excepts",
"cm_custom.api.utils.transform_route",
"erpnext.portal.product_configurator.utils.get_products_for_website"
] |
[((660, 694), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (676, 694), False, 'import frappe\n'), ((2480, 2514), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (2496, 2514), False, 'import frappe\n'), ((5644, 5678), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (5660, 5678), False, 'import frappe\n'), ((6459, 6493), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (6475, 6493), False, 'import frappe\n'), ((7298, 7332), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (7314, 7332), False, 'import frappe\n'), ((8391, 8425), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (8407, 8425), False, 'import frappe\n'), ((11029, 11063), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (11045, 11063), False, 'import frappe\n'), ((12302, 12336), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (12318, 12336), False, 'import frappe\n'), ((13564, 13598), 'frappe.whitelist', 'frappe.whitelist', ([], {'allow_guest': '(True)'}), '(allow_guest=True)\n', (13580, 13598), False, 'import frappe\n'), ((874, 940), 'frappe.db.get_single_value', 'frappe.db.get_single_value', (['"""Shopping Cart Settings"""', '"""price_list"""'], {}), "('Shopping Cart Settings', 'price_list')\n", (900, 940), False, 'import frappe\n'), ((965, 987), 'erpnext.portal.product_configurator.utils.get_product_settings', 'get_product_settings', ([], {}), '()\n', (985, 987), False, 'from erpnext.portal.product_configurator.utils import get_products_for_website, get_product_settings, get_item_codes_by_attributes, get_conditions\n'), ((1747, 1781), 'erpnext.portal.product_configurator.utils.get_products_for_website', 'get_products_for_website', ([], {}), '(**kwargs)\n', (1771, 1781), False, 'from erpnext.portal.product_configurator.utils import get_products_for_website, get_product_settings, get_item_codes_by_attributes, get_conditions\n'), ((2625, 2647), 'erpnext.portal.product_configurator.utils.get_product_settings', 'get_product_settings', ([], {}), '()\n', (2645, 2647), False, 'from erpnext.portal.product_configurator.utils import get_products_for_website, get_product_settings, get_item_codes_by_attributes, get_conditions\n'), ((5865, 6054), 'frappe.get_cached_value', 'frappe.get_cached_value', (['"""Item"""', 'item_code'], {'fieldname': "['name', 'item_name', 'item_group', 'has_variants', 'description',\n 'web_long_description', 'image', 'website_image']", 'as_dict': '(1)'}), "('Item', item_code, fieldname=['name', 'item_name',\n 'item_group', 'has_variants', 'description', 'web_long_description',\n 'image', 'website_image'], as_dict=1)\n", (5888, 6054), False, 'import frappe\n'), ((6211, 6280), 'frappe.get_cached_value', 'frappe.get_cached_value', (['"""Shopping Cart Settings"""', 'None', '"""price_list"""'], {}), "('Shopping Cart Settings', None, 'price_list')\n", (6234, 6280), False, 'import frappe\n'), ((6895, 6964), 'erpnext.shopping_cart.product_info.get_product_info_for_website', 'get_product_info_for_website', (['item_code'], {'skip_quotation_creation': '(True)'}), '(item_code, skip_quotation_creation=True)\n', (6923, 6964), False, 'from erpnext.shopping_cart.product_info import get_product_info_for_website\n'), ((7923, 7979), 'frappe.get_cached_value', 'frappe.get_cached_value', (['"""Item"""', 'item_code', '"""variant_of"""'], {}), "('Item', item_code, 'variant_of')\n", (7946, 7979), False, 'import frappe\n'), ((8633, 8689), 'frappe.get_cached_value', 'frappe.get_cached_value', (['"""Item"""', 'item_code', '"""item_group"""'], {}), "('Item', item_code, 'item_group')\n", (8656, 8689), False, 'import frappe\n'), ((9111, 9131), 'toolz.curried.groupby', 'groupby', (['"""item_code"""'], {}), "('item_code')\n", (9118, 9131), False, 'from toolz.curried import compose, merge, unique, concat, valmap, groupby, first, excepts, keyfilter, map, filter\n'), ((11119, 11185), 'frappe.db.get_single_value', 'frappe.db.get_single_value', (['"""Shopping Cart Settings"""', '"""price_list"""'], {}), "('Shopping Cart Settings', 'price_list')\n", (11145, 11185), False, 'import frappe\n'), ((11210, 11278), 'frappe.db.get_single_value', 'frappe.db.get_single_value', (['"""Products Settings"""', '"""products_per_page"""'], {}), "('Products Settings', 'products_per_page')\n", (11236, 11278), False, 'import frappe\n'), ((11305, 11729), 'frappe.db.sql', 'frappe.db.sql', (['"""\n SELECT\n name, item_name, item_group, route, has_variants,\n thumbnail, image, website_image,\n description, web_long_description\n FROM `tabItem`\n WHERE show_in_website = 1\n ORDER BY modified DESC\n LIMIT %(products_per_page)s\n """'], {'values': "{'products_per_page': products_per_page}", 'as_dict': '(1)'}), '(\n """\n SELECT\n name, item_name, item_group, route, has_variants,\n thumbnail, image, website_image,\n description, web_long_description\n FROM `tabItem`\n WHERE show_in_website = 1\n ORDER BY modified DESC\n LIMIT %(products_per_page)s\n """\n , values={\'products_per_page\': products_per_page}, as_dict=1)\n', (11318, 11729), False, 'import frappe\n'), ((12392, 12421), 'frappe.get_single', 'frappe.get_single', (['"""Homepage"""'], {}), "('Homepage')\n", (12409, 12421), False, 'import frappe\n'), ((12489, 12555), 'frappe.db.get_single_value', 'frappe.db.get_single_value', (['"""Shopping Cart Settings"""', '"""price_list"""'], {}), "('Shopping Cart Settings', 'price_list')\n", (12515, 12555), False, 'import frappe\n'), ((12568, 12991), 'frappe.db.sql', 'frappe.db.sql', (['"""\n SELECT\n name, item_name, item_group, route, has_variants,\n thumbnail, image, website_image,\n description, web_long_description\n FROM `tabItem`\n WHERE show_in_website = 1 AND name IN %(featured)s\n ORDER BY modified DESC\n """'], {'values': "{'featured': [x.item_code for x in homepage.products]}", 'as_dict': '(1)'}), '(\n """\n SELECT\n name, item_name, item_group, route, has_variants,\n thumbnail, image, website_image,\n description, web_long_description\n FROM `tabItem`\n WHERE show_in_website = 1 AND name IN %(featured)s\n ORDER BY modified DESC\n """\n , values={\'featured\': [x.item_code for x in homepage.products]}, as_dict=1)\n', (12581, 12991), False, 'import frappe\n'), ((13812, 13884), 'frappe.get_cached_value', 'frappe.get_cached_value', (['"""Ahong eCommerce Settings"""', 'None', '"""webapp_user"""'], {}), "('Ahong eCommerce Settings', None, 'webapp_user')\n", (13835, 13884), False, 'import frappe\n'), ((13985, 14013), 'frappe.set_user', 'frappe.set_user', (['webapp_user'], {}), '(webapp_user)\n', (14000, 14013), False, 'import frappe\n'), ((14028, 14089), 'erpnext.portal.product_configurator.utils.get_next_attribute_and_values', 'get_next_attribute_and_values', (['item_code', 'selected_attributes'], {}), '(item_code, selected_attributes)\n', (14057, 14089), False, 'from erpnext.portal.product_configurator.utils import get_next_attribute_and_values\n'), ((14095, 14124), 'frappe.set_user', 'frappe.set_user', (['session_user'], {}), '(session_user)\n', (14110, 14124), False, 'import frappe\n'), ((1150, 1165), 'toolz.curried.groupby', 'groupby', (['"""name"""'], {}), "('name')\n", (1157, 1165), False, 'from toolz.curried import compose, merge, unique, concat, valmap, groupby, first, excepts, keyfilter, map, filter\n'), ((2750, 2794), 'frappe.utils.ceil', 'frappe.utils.ceil', (['(count / products_per_page)'], {}), '(count / products_per_page)\n', (2767, 2794), False, 'import frappe\n'), ((2959, 2982), 'frappe.get_meta', 'frappe.get_meta', (['"""Item"""'], {}), "('Item')\n", (2974, 2982), False, 'import frappe\n'), ((3967, 4013), 'erpnext.portal.product_configurator.utils.get_conditions', 'get_conditions', (["[['Item', 'disabled', '=', 0]]"], {}), "([['Item', 'disabled', '=', 0]])\n", (3981, 4013), False, 'from erpnext.portal.product_configurator.utils import get_products_for_website, get_product_settings, get_item_codes_by_attributes, get_conditions\n'), ((4181, 4289), 'erpnext.portal.product_configurator.utils.get_conditions', 'get_conditions', (["[['Item', 'show_in_website', '=', 1], ['Item', 'show_variant_in_website',\n '=', 1]]", '"""or"""'], {}), "([['Item', 'show_in_website', '=', 1], ['Item',\n 'show_variant_in_website', '=', 1]], 'or')\n", (4195, 4289), False, 'from erpnext.portal.product_configurator.utils import get_products_for_website, get_product_settings, get_item_codes_by_attributes, get_conditions\n'), ((4464, 4487), 'frappe.get_meta', 'frappe.get_meta', (['"""Item"""'], {}), "('Item')\n", (4479, 4487), False, 'import frappe\n'), ((4644, 4736), 'erpnext.portal.product_configurator.utils.get_conditions', 'get_conditions', (["[['Item', field, 'like', '%(search)s'] for field in search_fields]", '"""or"""'], {}), "([['Item', field, 'like', '%(search)s'] for field in\n search_fields], 'or')\n", (4658, 4736), False, 'from erpnext.portal.product_configurator.utils import get_products_for_website, get_product_settings, get_item_codes_by_attributes, get_conditions\n'), ((6648, 6720), 'frappe.get_cached_value', 'frappe.get_cached_value', (['"""Ahong eCommerce Settings"""', 'None', '"""webapp_user"""'], {}), "('Ahong eCommerce Settings', None, 'webapp_user')\n", (6671, 6720), False, 'import frappe\n'), ((7466, 7572), 'frappe.get_cached_value', 'frappe.get_cached_value', (['"""Item"""', 'name', "['thumbnail', 'image', 'website_image', 'slideshow']"], {'as_dict': '(1)'}), "('Item', name, ['thumbnail', 'image',\n 'website_image', 'slideshow'], as_dict=1)\n", (7489, 7572), False, 'import frappe\n'), ((7728, 7781), 'frappe.get_cached_doc', 'frappe.get_cached_doc', (['"""Website Slideshow"""', 'slideshow'], {}), "('Website Slideshow', slideshow)\n", (7749, 7781), False, 'import frappe\n'), ((8885, 8904), 'html.unescape', 'html.unescape', (['name'], {}), '(name)\n', (8898, 8904), False, 'import html\n'), ((9061, 9104), 'toolz.curried.excepts', 'excepts', (['StopIteration', 'first', '(lambda _: {})'], {}), '(StopIteration, first, lambda _: {})\n', (9068, 9104), False, 'from toolz.curried import compose, merge, unique, concat, valmap, groupby, first, excepts, keyfilter, map, filter\n'), ((10834, 10903), 'toolz.curried.merge', 'merge', (['field_dict', "({'item_group': item_groups} if item_groups else {})"], {}), "(field_dict, {'item_group': item_groups} if item_groups else {})\n", (10839, 10903), False, 'from toolz.curried import compose, merge, unique, concat, valmap, groupby, first, excepts, keyfilter, map, filter\n'), ((10956, 10992), 'frappe.parse_json', 'frappe.parse_json', (['attribute_filters'], {}), '(attribute_filters)\n', (10973, 10992), False, 'import frappe\n'), ((1096, 1139), 'toolz.curried.excepts', 'excepts', (['StopIteration', 'first', '(lambda _: {})'], {}), '(StopIteration, first, lambda _: {})\n', (1103, 1139), False, 'from toolz.curried import compose, merge, unique, concat, valmap, groupby, first, excepts, keyfilter, map, filter\n'), ((1621, 1644), 'frappe.utils.cint', 'frappe.utils.cint', (['page'], {}), '(page)\n', (1638, 1644), False, 'import frappe\n'), ((4112, 4165), 'erpnext.portal.product_configurator.utils.get_conditions', 'get_conditions', (["[['Item', 'show_in_website', '=', 1]]"], {}), "([['Item', 'show_in_website', '=', 1]])\n", (4126, 4165), False, 'from erpnext.portal.product_configurator.utils import get_products_for_website, get_product_settings, get_item_codes_by_attributes, get_conditions\n'), ((5807, 5852), 'frappe._', 'frappe._', (['"""Item does not exist at this route"""'], {}), "('Item does not exist at this route')\n", (5815, 5852), False, 'import frappe\n'), ((6824, 6869), 'frappe._', 'frappe._', (['"""Item does not exist at this route"""'], {}), "('Item does not exist at this route')\n", (6832, 6869), False, 'import frappe\n'), ((8568, 8613), 'frappe._', 'frappe._', (['"""Item does not exist at this route"""'], {}), "('Item does not exist at this route')\n", (8576, 8613), False, 'import frappe\n'), ((10544, 10576), 'frappe.parse_json', 'frappe.parse_json', (['field_filters'], {}), '(field_filters)\n', (10561, 10576), False, 'import frappe\n'), ((13944, 13979), 'frappe._', 'frappe._', (['"""Site setup not complete"""'], {}), "('Site setup not complete')\n", (13952, 13979), False, 'import frappe\n'), ((2243, 2261), 'cm_custom.api.utils.transform_route', 'transform_route', (['x'], {}), '(x)\n', (2258, 2261), False, 'from cm_custom.api.utils import handle_error, transform_route\n'), ((3151, 3178), 'frappe.get_meta', 'frappe.get_meta', (['df.options'], {}), '(df.options)\n', (3166, 3178), False, 'import frappe\n'), ((12065, 12083), 'cm_custom.api.utils.transform_route', 'transform_route', (['x'], {}), '(x)\n', (12080, 12083), False, 'from cm_custom.api.utils import handle_error, transform_route\n'), ((13327, 13345), 'cm_custom.api.utils.transform_route', 'transform_route', (['x'], {}), '(x)\n', (13342, 13345), False, 'from cm_custom.api.utils import handle_error, transform_route\n'), ((14289, 14356), 'frappe.db.get_single_value', 'frappe.db.get_single_value', (['"""Ahong eCommerce Settings"""', '"""warehouse"""'], {}), "('Ahong eCommerce Settings', 'warehouse')\n", (14315, 14356), False, 'import frappe\n'), ((4935, 4972), 'erpnext.portal.product_configurator.utils.get_conditions', 'get_conditions', (['_field_filters', '"""and"""'], {}), "(_field_filters, 'and')\n", (4949, 4972), False, 'from erpnext.portal.product_configurator.utils import get_products_for_website, get_product_settings, get_item_codes_by_attributes, get_conditions\n'), ((9706, 9773), 'frappe.get_cached_value', 'frappe.get_cached_value', (['"""Selling Settings"""', 'None', '"""customer_group"""'], {}), "('Selling Settings', None, 'customer_group')\n", (9729, 9773), False, 'import frappe\n'), ((9837, 9882), 'frappe.defaults.get_global_default', 'frappe.defaults.get_global_default', (['"""company"""'], {}), "('company')\n", (9871, 9882), False, 'import frappe\n'), ((10463, 10495), 'erpnext.accounts.doctype.sales_invoice.pos.get_child_nodes', 'get_child_nodes', (['"""Item Group"""', 'x'], {}), "('Item Group', x)\n", (10478, 10495), False, 'from erpnext.accounts.doctype.sales_invoice.pos import get_child_nodes\n')]
|
"""Module for SHA1 hashing algorithm"""
__all__ = ['SHA1HashingAlgorithm']
import hashlib
from entities.hashing_algorithms import IHashingAlgorithm
class SHA1HashingAlgorithm(IHashingAlgorithm):
"""SHA1 hashing algorithm
Attributes:
name: Algorithm name
bits: Amount of checksum bits
is_secure: Can algorithm be used for securing purposes
"""
name: str = "sha1"
bits: int = 160
is_secure: bool = False
def hash(self, data: str) -> str:
"""Hash data with SHA1 hashing algorithm
Args:
data: Data to hash
Returns:
Checksum
"""
encoded_data: bytes = data.encode('utf-8')
return hashlib.sha1(encoded_data).hexdigest()
|
[
"hashlib.sha1"
] |
[((709, 735), 'hashlib.sha1', 'hashlib.sha1', (['encoded_data'], {}), '(encoded_data)\n', (721, 735), False, 'import hashlib\n')]
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: import_workload_src_check
short_description: Export OpenStack instance information
extends_documentation_fragment: openstack
version_added: "2.9.0"
author: "OpenStack tenant migration tools (@os-migrate)"
description:
- "Check OpenStack workload in source cloud"
options:
auth:
description:
- Dictionary with parameters for chosen auth type.
required: true
type: dict
auth_type:
description:
- Auth type plugin for OpenStack. Can be omitted if using password authentication.
required: false
type: str
validate_certs:
description:
- Validate HTTPS certificates when logging in to OpenStack.
required: false
type: bool
region_name:
description:
- OpenStack region name. Can be omitted if using default region.
required: false
type: str
name:
description:
- Name (or ID) of an instance to check.
required: true
type: str
availability_zone:
description:
- Availability zone.
required: false
type: str
cloud:
description:
- Ignored. Present for backwards compatibility.
required: false
type: raw
'''
EXAMPLES = '''
- name: ensure workload in source cloud is ready to continue
os_migrate.os_migrate.import_workload_src_check:
auth: "{{ os_migrate_src_auth }}"
auth_type: "{{ os_migrate_src_auth_type|default(omit) }}"
region_name: "{{ os_migrate_src_region_name|default(omit) }}"
validate_certs: "{{ os_migrate_src_validate_certs|default(omit) }}"
ca_cert: "{{ os_migrate_src_ca_cert|default(omit) }}"
client_cert: "{{ os_migrate_src_client_cert|default(omit) }}"
client_key: "{{ os_migrate_src_client_key|default(omit) }}"
name: migration-vm
when: prelim.changed
'''
RETURN = '''
'''
from ansible.module_utils.basic import AnsibleModule
# Import openstack module utils from ansible_collections.openstack.cloud.plugins as per ansible 3+
try:
from ansible_collections.openstack.cloud.plugins.module_utils.openstack \
import openstack_full_argument_spec, openstack_cloud_from_module
except ImportError:
# If this fails fall back to ansible < 3 imports
from ansible.module_utils.openstack \
import openstack_full_argument_spec, openstack_cloud_from_module
from ansible_collections.os_migrate.os_migrate.plugins.module_utils import server
def run_module():
argument_spec = openstack_full_argument_spec(
auth=dict(type='dict', no_log=True, required=True),
name=dict(type='str', required=True),
)
# TODO: check the del
# del argument_spec['cloud']
result = dict(
changed=False,
)
module = AnsibleModule(
argument_spec=argument_spec,
# TODO: Consider check mode. We'd fetch the resource and check
# if the file representation matches it.
# supports_check_mode=True,
)
sdk, conn = openstack_cloud_from_module(module)
sdk_server_nodetails = conn.compute.find_server(module.params['name'], ignore_missing=False)
sdk_server = conn.compute.get_server(sdk_server_nodetails['id'])
srv = server.Server.from_sdk(conn, sdk_server)
params, info = srv.params_and_info()
result['server_name'] = params['name']
# Checks
# below this area add a block for each check required on a source workload
# prior to migration. If the check fails, exit the module with a
# descriptive message of why the check failed.
# Status Check
#: The state this server is in. Valid values include ``ACTIVE``,
#: ``BUILDING``, ``DELETED``, ``ERROR``, ``HARD_REBOOT``, ``PASSWORD``,
#: ``PAUSED``, ``REBOOT``, ``REBUILD``, ``RESCUED``, ``RESIZED``,
#: ``REVERT_RESIZE``, ``SHUTOFF``, ``SOFT_DELETED``, ``STOPPED``,
#: ``SUSPENDED``, ``UNKNOWN``, or ``VERIFY_RESIZE``.
# Make sure source instance is shutdown before proceeding.
if info['status'] != 'SHUTOFF':
msg = "Cannot migrate instance {} because it is not in state SHUTOFF! Currently in state {}."
module.fail_json(msg=msg.format(params['name'], info['status']), **result)
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
|
[
"ansible.module_utils.basic.AnsibleModule",
"ansible.module_utils.openstack.openstack_cloud_from_module",
"ansible_collections.os_migrate.os_migrate.plugins.module_utils.server.Server.from_sdk"
] |
[((2904, 2946), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'argument_spec'}), '(argument_spec=argument_spec)\n', (2917, 2946), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((3135, 3170), 'ansible.module_utils.openstack.openstack_cloud_from_module', 'openstack_cloud_from_module', (['module'], {}), '(module)\n', (3162, 3170), False, 'from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_cloud_from_module\n'), ((3347, 3387), 'ansible_collections.os_migrate.os_migrate.plugins.module_utils.server.Server.from_sdk', 'server.Server.from_sdk', (['conn', 'sdk_server'], {}), '(conn, sdk_server)\n', (3369, 3387), False, 'from ansible_collections.os_migrate.os_migrate.plugins.module_utils import server\n')]
|
# Generated by Django 3.1 on 2020-08-27 06:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Share', '0003_auto_20200827_1134'),
]
operations = [
migrations.RemoveField(
model_name='group',
name='uid',
),
]
|
[
"django.db.migrations.RemoveField"
] |
[((223, 277), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""group"""', 'name': '"""uid"""'}), "(model_name='group', name='uid')\n", (245, 277), False, 'from django.db import migrations\n')]
|
from __future__ import print_function
import pyxb.bundles.opengis.gml as gml
dv = gml.DegreesType(32, direction='N')
print(dv.toDOM(element_name='degrees').toxml("utf-8"))
|
[
"pyxb.bundles.opengis.gml.DegreesType"
] |
[((82, 116), 'pyxb.bundles.opengis.gml.DegreesType', 'gml.DegreesType', (['(32)'], {'direction': '"""N"""'}), "(32, direction='N')\n", (97, 116), True, 'import pyxb.bundles.opengis.gml as gml\n')]
|
import colortrans, sys, arcpy
from GeMS_utilityFunctions import *
gdb = sys.argv[1]
dmu = gdb+'/DescriptionOfMapUnits'
fields = ('Symbol','AreaFillRGB')
with arcpy.da.UpdateCursor(dmu, fields) as cursor:
for row in cursor:
if row[0] != None:
try:
rgb = colortrans.wpg2rgb(row[0])
r,g,b = rgb.split(',')
rr = r.zfill(3)
gg = g.zfill(3)
bb = b.zfill(3)
rrggbb = rr+','+gg+','+bb
addMsgAndPrint(str(row)+', '+rgb+', '+rrggbb)
cursor.updateRow([row[0],rrggbb])
except:
addMsgAndPrint('Symbol = '+str(row[0])+': failed to assign RGB value')
else:
addMsgAndPrint('No Symbol value')
|
[
"colortrans.wpg2rgb",
"arcpy.da.UpdateCursor"
] |
[((162, 196), 'arcpy.da.UpdateCursor', 'arcpy.da.UpdateCursor', (['dmu', 'fields'], {}), '(dmu, fields)\n', (183, 196), False, 'import colortrans, sys, arcpy\n'), ((297, 323), 'colortrans.wpg2rgb', 'colortrans.wpg2rgb', (['row[0]'], {}), '(row[0])\n', (315, 323), False, 'import colortrans, sys, arcpy\n')]
|
from django.contrib import admin
from taggit.models import Tag, TaggedItem, TagTransform
class TaggedItemInline(admin.StackedInline):
model = TaggedItem
extra = 0
class TagAdmin(admin.ModelAdmin):
inlines = [
TaggedItemInline
]
ordering = ['name']
search_fields = ['name']
class TagTransformAdmin(admin.ModelAdmin):
model = TagTransform
order = ('rule',)
search_fields = ('name',)
list_per_page = 50
list_display = ('type', 'rule', 'transform')
admin.site.register(Tag, TagAdmin)
admin.site.register(TagTransform, TagTransformAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((503, 537), 'django.contrib.admin.site.register', 'admin.site.register', (['Tag', 'TagAdmin'], {}), '(Tag, TagAdmin)\n', (522, 537), False, 'from django.contrib import admin\n'), ((538, 590), 'django.contrib.admin.site.register', 'admin.site.register', (['TagTransform', 'TagTransformAdmin'], {}), '(TagTransform, TagTransformAdmin)\n', (557, 590), False, 'from django.contrib import admin\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 12 11:39:56 2019
@author: autol
"""
#%%
import numpy as np
import time
from gfun import StepClass,ConvClass,JClass,Hessian
from gupdate import UpdateClass
#%% methodtion
#@accepts(w=np.ndarray)
def gradient_descent_f(var,
X=0,y=0,w=0,n_iters=1,n_b=10,
sgd=0,method='mm10',isStep=0,
trace=1,doplot=1,ŋ=0,ŋ_a=1,skipConv=1,
**kwargs):
records = []
# Shuffle X,y
# r_index = np.random.RandomState(seed=43).permutation(len(y))
# X1 = X[r_index,:]
# w = var.w
# y1 = y[r_index]
time1 = time.time()
He = Hessian(var)
var.set(dict(A=He.A_(),H=He.H_()))
Jc = JClass(var,method)
var.set(dict(gJ=Jc.gJ,J=Jc.Loss,e0=Jc.Loss(w)))
var.set(dict(θ=w.copy(),
m=np.zeros(len(w)),v=np.zeros(len(w)),
t=1,))
Uc = UpdateClass(var)
Cc = ConvClass(var)
Sc = StepClass(var)
if isStep : #and not method in ['mm52','mm26']
ŋ = Sc.armijo_i(w,ŋ_a)
e1 = var.J(w)
ratio = 0
n_w,n_y=len(w),len(y)
records.append([-1,w.copy(),e1,ratio])
for i in range(n_iters):
if sgd == 0:
#if isStep : #and not method in ['mm52','mm26']
# ŋ = Sc.armijo_i(w,ŋ_a)
w = Uc.update_w(w,ŋ=ŋ,i=i)
# w += -ŋ*2./len(y)*X.T.dot(X.dot(w)-y)
e1 = var.J(w)
# e1 = np.mean((X.dot(w)-y)**2)
isConv,ratio = Cc.Conv(w,e1,ŋ,skipConv)
elif sgd == 1:
bb = range(0,n_y,n_b)
ws = np.zeros(n_w)
e1s = 0
for k in bb:
X_b = X[k:k + n_b]
y_b = y[k:k + n_b]
# print('each batch:',len(y_b))
if len(y_b) ==0:break # 没数据就退出
w = Uc.update_w(w,ŋ=ŋ,i=i,X=X_b,y=y_b)
e1s += var.J(w)
ws += w
e1 = e1s/len(bb)
w = ws/len(bb)
isConv,ratio = Cc.Conv(w,e1,ŋ,skipConv)
else:
print('None...');return None
records.append([i,w.copy(),e1,ratio])
ret = dict(ik=i,w=w,e1=e1,ratio=ratio)
# print(ret)
if isConv>0:break
# if trace:pass
print('last: \n',ret)
if not doplot: print('There\'s no method:',method)
time2 = time.time()
print('All Running time: %s Seconds'%(time2-time1))
rets = dict(wh=np.stack(records),finals=ret,method=method)
return rets
#%%
|
[
"numpy.stack",
"gfun.JClass",
"gupdate.UpdateClass",
"gfun.ConvClass",
"numpy.zeros",
"time.time",
"gfun.Hessian",
"gfun.StepClass"
] |
[((651, 662), 'time.time', 'time.time', ([], {}), '()\n', (660, 662), False, 'import time\n'), ((673, 685), 'gfun.Hessian', 'Hessian', (['var'], {}), '(var)\n', (680, 685), False, 'from gfun import StepClass, ConvClass, JClass, Hessian\n'), ((735, 754), 'gfun.JClass', 'JClass', (['var', 'method'], {}), '(var, method)\n', (741, 754), False, 'from gfun import StepClass, ConvClass, JClass, Hessian\n'), ((924, 940), 'gupdate.UpdateClass', 'UpdateClass', (['var'], {}), '(var)\n', (935, 940), False, 'from gupdate import UpdateClass\n'), ((950, 964), 'gfun.ConvClass', 'ConvClass', (['var'], {}), '(var)\n', (959, 964), False, 'from gfun import StepClass, ConvClass, JClass, Hessian\n'), ((974, 988), 'gfun.StepClass', 'StepClass', (['var'], {}), '(var)\n', (983, 988), False, 'from gfun import StepClass, ConvClass, JClass, Hessian\n'), ((2361, 2372), 'time.time', 'time.time', ([], {}), '()\n', (2370, 2372), False, 'import time\n'), ((2448, 2465), 'numpy.stack', 'np.stack', (['records'], {}), '(records)\n', (2456, 2465), True, 'import numpy as np\n'), ((1608, 1621), 'numpy.zeros', 'np.zeros', (['n_w'], {}), '(n_w)\n', (1616, 1621), True, 'import numpy as np\n')]
|
from random import randint
def sumNum(x):
x -= 1
if x == 1:
return x
x1 = sumNum(x)
x = x * x1
return x
def coinFlip(n):
n -= 1
if n == 0:
return n
n = coinFlip(n)
n += randint(0,1)
return n
def main():
y = 0
#x = int(input("Number: "))
x = 10
y = coinFlip(x + 1)
print(y)
main()
|
[
"random.randint"
] |
[((238, 251), 'random.randint', 'randint', (['(0)', '(1)'], {}), '(0, 1)\n', (245, 251), False, 'from random import randint\n')]
|
import logging
import multiprocessing
import select
import socketserver
import struct
import shared.config as config
from rpi.network.messagehandler import MessageHandler
class NetworkError(Exception):
pass
class NetworkReadingTimeoutError(Exception):
pass
class NetworkWritingTimeoutError(Exception):
pass
message_handler = None
class RequestHandler(socketserver.StreamRequestHandler):
"""
Handles an incoming request. It expects the request to have a header of type
unsigned long indicating the size of the following body. Body should be in JSON with
utf-8 encoding. If the message can successfully be parsed (but not necessarily processed),
a "OK" will be sent back to the client. If there is an error while parsing the message,
the connection will be closed without anything being sent.
"""
def read_chunk(self, size):
buf = b""
while len(buf) != size:
read, _, _ = select.select([self.request], [], [],
1) # Make sure we can read from client. If not, we wait up to 1 sec before timing out
if len(read) == 0:
raise NetworkReadingTimeoutError()
data = read[0].recv(size - len(buf))
if not data:
raise NetworkError()
buf += data
return buf
def send_data(self, data):
_, write, _ = select.select([], [self.request], [],
1) # Make sure we can write to client. If not, we wait up to 1 sec before timing out
if len(write) == 0:
raise NetworkWritingTimeoutError()
write[0].sendall(data)
def handle(self):
logger = logging.getLogger(__name__)
self.request.setblocking(0)
try:
header = self.read_chunk(struct.calcsize("L"))
bodySize = struct.unpack("!L", header)[0]
body = self.read_chunk(bodySize).decode("utf-8")
logger.debug("Received {} from {}".format(body, self.client_address))
self.send_data("OK".encode("utf-8"))
except NetworkReadingTimeoutError:
logger.error("Timed out while reading from client")
except NetworkWritingTimeoutError:
logger.error("Timed out while writing to client")
except NetworkError:
logger.error("Error while reading from client. Is the message in the correct format?")
except:
logger.exception("Major error while handling client connection")
else:
try:
# noinspection PyUnresolvedReferences
message_handler.process_message(body, self.client_address)
except Exception as e:
logger.error(f"Error processing client message: {e}")
class Server(multiprocessing.Process):
def run(self):
"""
Starts a server to listen and handle incoming requests. This will run until the heat
death of the universe, or until the program is interrupted, whichever comes first.
"""
logger = logging.getLogger(__name__)
logger.info("Starting server and listening to incoming connections")
RPIConfig = config.get_config('rpi')
socketserver.TCPServer.allow_reuse_address = True
with socketserver.TCPServer((RPIConfig['rpi_listening_ip'], RPIConfig.getint('rpi_port')),
RequestHandler) as server:
global message_handler
message_handler = MessageHandler()
server.serve_forever()
|
[
"rpi.network.messagehandler.MessageHandler",
"struct.unpack",
"struct.calcsize",
"shared.config.get_config",
"select.select",
"logging.getLogger"
] |
[((1413, 1453), 'select.select', 'select.select', (['[]', '[self.request]', '[]', '(1)'], {}), '([], [self.request], [], 1)\n', (1426, 1453), False, 'import select\n'), ((1721, 1748), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1738, 1748), False, 'import logging\n'), ((3089, 3116), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (3106, 3116), False, 'import logging\n'), ((3215, 3239), 'shared.config.get_config', 'config.get_config', (['"""rpi"""'], {}), "('rpi')\n", (3232, 3239), True, 'import shared.config as config\n'), ((955, 995), 'select.select', 'select.select', (['[self.request]', '[]', '[]', '(1)'], {}), '([self.request], [], [], 1)\n', (968, 995), False, 'import select\n'), ((3525, 3541), 'rpi.network.messagehandler.MessageHandler', 'MessageHandler', ([], {}), '()\n', (3539, 3541), False, 'from rpi.network.messagehandler import MessageHandler\n'), ((1836, 1856), 'struct.calcsize', 'struct.calcsize', (['"""L"""'], {}), "('L')\n", (1851, 1856), False, 'import struct\n'), ((1881, 1908), 'struct.unpack', 'struct.unpack', (['"""!L"""', 'header'], {}), "('!L', header)\n", (1894, 1908), False, 'import struct\n')]
|
"""Run inline python code when beets events are fired."""
from __future__ import division, absolute_import, print_function
import ast
import confuse
from beets.plugins import BeetsPlugin
def _syntaxerror_offset(value, lineoffset):
"""Adjust the line number in a SyntaxError exception."""
if lineoffset:
msg, (efname, elineno, eoffset, badline) = value.args
value.args = (msg, (efname, elineno + lineoffset, eoffset, badline))
value.lineno = elineno + lineoffset
def compile_offset(source, filename='<string>', lineoffset=0):
"""Compile the python source and adjust its line numbers by lineoffset."""
try:
compiled = compile(source, filename, 'exec', ast.PyCF_ONLY_AST)
except SyntaxError as exc:
_syntaxerror_offset(exc, lineoffset)
raise
if lineoffset:
ast.increment_lineno(compiled, lineoffset)
return compile(compiled, filename, 'exec', dont_inherit=True)
def compile_func(source, name, argspec='', filename='<string>', lineoffset=0,
env=None):
"""Compile the python source, wrapped in a function definition."""
# Adjust for 'def' line
lineoffset -= 1
code = source.rstrip().replace('\t', ' ')
lines = (' ' + line for line in code.split('\n'))
code = '\n'.join(lines)
defined = 'def {name}({argspec}):\n{body}'.format(name=name,
argspec=argspec,
body=code)
compiled = compile_offset(defined, filename, lineoffset)
if env is None:
env = {}
tmpenv = {}
exec(compiled, env, tmpenv)
return eval(name, env, tmpenv)
class InlineHookPlugin(BeetsPlugin):
"""Run inline python code when beets events are fired."""
argspecs = {
'after_write': 'item, path',
'album_imported': 'lib, album',
'albuminfo_received': 'info',
'art_set': 'album',
'before_item_moved': 'item, source',
'cli_exit': 'lib',
'database_change': 'lib, model',
'import': 'lib, paths',
'import_begin': 'session',
'import_task_apply': 'session, task',
'import_task_choice': 'session, task',
'import_task_created': 'session, task',
'import_task_files': 'session, task',
'import_task_start': 'session, task',
'item_copied': 'item, source',
'item_hardlinked': 'item, source',
'item_imported': 'lib, item',
'item_linked': 'item, source',
'item_moved': 'item, source',
'item_removed': 'item',
'library_opened': 'lib',
'trackinfo_received': 'info',
'write': 'item, path, tags',
}
def __init__(self):
super(InlineHookPlugin, self).__init__()
self.config.add({
'hooks': [],
'argspecs': {}
})
self.argspecs = dict(InlineHookPlugin.argspecs)
self.argspecs.update(self.config['argspecs'].get())
inline_hooks = self.config['hooks'].get(list)
for hook_index in range(len(inline_hooks)):
hook = self.config['hooks'][hook_index]
event = hook['event'].as_str()
if event not in self.argspecs:
raise confuse.ConfigError('inline_hook.hooks[{0}].event: `{1}` is not a handled event'.format(hook_index, event))
handler = hook['handler'].as_str()
function = compile_func(handler, 'inline_hook_' + event, self.argspecs.get(event) or '')
self.register_listener(event, function)
|
[
"ast.increment_lineno"
] |
[((842, 884), 'ast.increment_lineno', 'ast.increment_lineno', (['compiled', 'lineoffset'], {}), '(compiled, lineoffset)\n', (862, 884), False, 'import ast\n')]
|
#!/usr/bin/python
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import logging
import common.storage_helpers as storage_helpers
import common.image_helpers as image_helpers
def correct_form(form, vision_key, vision_region):
# Get form data
form_data = image_helpers.get_form_data(form, vision_key, vision_region)
# Fix orientation
if form_data:
angle_to_fix = form_data['orientation']
logging.info("Fixing orientation of %d"%angle_to_fix)
corrected_form = image_helpers.rotate_image(form, angle_to_fix, form_data['width'], form_data['height'])
return corrected_form
return None
def create_response_single(storage_name, storage_key, vision_key, vision_region, form_path, output_form_path):
# get original form
blob_service = storage_helpers.create_blob_service(storage_name, storage_key)
path = form_path.split('/')
blob_name = path[1]
container_name = path[0]
blob = storage_helpers.get_blob(blob_service, container_name, blob_name)
form = image_helpers.blob_to_image(blob)
if form:
# correct form and save
corrected_form = correct_form(form, vision_key, vision_region)
if corrected_form:
output_path = output_form_path.split('/')
output_name = output_path[1]
output_container = output_path[0]
storage_helpers.upload_blob(corrected_form, blob_service, output_name, output_container)
# Create json response
response = {
"name": blob_name,
"output_path": output_form_path
}
else:
response = {
"name": blob_name,
"status":"failed"
}
return response
else:
logging.error("Could not create response.")
return None
def create_response_batch(storage_name, storage_key, vision_key, vision_region, container_name, output_container=''):
blob_service = storage_helpers.create_blob_service(storage_name, storage_key)
generator = storage_helpers.list_blobs(blob_service, container_name)
corrected_forms = []
if(generator != None):
for blob in generator:
# get form
form = image_helpers.blob_to_image(storage_helpers.get_blob(blob_service, container_name, blob.name))
if(form != None):
# correct form and save
output_name = "corrected_" + blob.name
output_path = output_container + "/" + output_name
corrected_form = correct_form(form, vision_key, vision_region)
if(corrected_form != None):
storage_helpers.upload_blob(corrected_form, blob_service, output_name, output_container)
# create json
corrected_form_json = {
"name": blob.name,
"outputPath": output_path
}
else:
corrected_form_json = {
"name": blob.name,
"status": "failed"
}
corrected_forms.append(corrected_form_json)
else:
logging.error("Error creating response.")
# Create final json response
response = {
"correctedForms": corrected_forms
}
return response
|
[
"logging.error",
"common.storage_helpers.create_blob_service",
"common.storage_helpers.list_blobs",
"common.image_helpers.get_form_data",
"common.image_helpers.rotate_image",
"logging.info",
"common.storage_helpers.get_blob",
"common.storage_helpers.upload_blob",
"common.image_helpers.blob_to_image"
] |
[((312, 372), 'common.image_helpers.get_form_data', 'image_helpers.get_form_data', (['form', 'vision_key', 'vision_region'], {}), '(form, vision_key, vision_region)\n', (339, 372), True, 'import common.image_helpers as image_helpers\n'), ((844, 906), 'common.storage_helpers.create_blob_service', 'storage_helpers.create_blob_service', (['storage_name', 'storage_key'], {}), '(storage_name, storage_key)\n', (879, 906), True, 'import common.storage_helpers as storage_helpers\n'), ((1003, 1068), 'common.storage_helpers.get_blob', 'storage_helpers.get_blob', (['blob_service', 'container_name', 'blob_name'], {}), '(blob_service, container_name, blob_name)\n', (1027, 1068), True, 'import common.storage_helpers as storage_helpers\n'), ((1080, 1113), 'common.image_helpers.blob_to_image', 'image_helpers.blob_to_image', (['blob'], {}), '(blob)\n', (1107, 1113), True, 'import common.image_helpers as image_helpers\n'), ((2035, 2097), 'common.storage_helpers.create_blob_service', 'storage_helpers.create_blob_service', (['storage_name', 'storage_key'], {}), '(storage_name, storage_key)\n', (2070, 2097), True, 'import common.storage_helpers as storage_helpers\n'), ((2114, 2170), 'common.storage_helpers.list_blobs', 'storage_helpers.list_blobs', (['blob_service', 'container_name'], {}), '(blob_service, container_name)\n', (2140, 2170), True, 'import common.storage_helpers as storage_helpers\n'), ((470, 525), 'logging.info', 'logging.info', (["('Fixing orientation of %d' % angle_to_fix)"], {}), "('Fixing orientation of %d' % angle_to_fix)\n", (482, 525), False, 'import logging\n'), ((549, 640), 'common.image_helpers.rotate_image', 'image_helpers.rotate_image', (['form', 'angle_to_fix', "form_data['width']", "form_data['height']"], {}), "(form, angle_to_fix, form_data['width'],\n form_data['height'])\n", (575, 640), True, 'import common.image_helpers as image_helpers\n'), ((1828, 1871), 'logging.error', 'logging.error', (['"""Could not create response."""'], {}), "('Could not create response.')\n", (1841, 1871), False, 'import logging\n'), ((1413, 1505), 'common.storage_helpers.upload_blob', 'storage_helpers.upload_blob', (['corrected_form', 'blob_service', 'output_name', 'output_container'], {}), '(corrected_form, blob_service, output_name,\n output_container)\n', (1440, 1505), True, 'import common.storage_helpers as storage_helpers\n'), ((2328, 2393), 'common.storage_helpers.get_blob', 'storage_helpers.get_blob', (['blob_service', 'container_name', 'blob.name'], {}), '(blob_service, container_name, blob.name)\n', (2352, 2393), True, 'import common.storage_helpers as storage_helpers\n'), ((3295, 3336), 'logging.error', 'logging.error', (['"""Error creating response."""'], {}), "('Error creating response.')\n", (3308, 3336), False, 'import logging\n'), ((2744, 2836), 'common.storage_helpers.upload_blob', 'storage_helpers.upload_blob', (['corrected_form', 'blob_service', 'output_name', 'output_container'], {}), '(corrected_form, blob_service, output_name,\n output_container)\n', (2771, 2836), True, 'import common.storage_helpers as storage_helpers\n')]
|
from ipykernel.kernelapp import IPKernelApp
from ipykernel.kernelbase import Kernel
from pexpect.replwrap import REPLWrapper
from pexpect.exceptions import EOF
class TuppenceKernel(Kernel):
implementation = 'Tuppence'
implementation_version = '1.0'
language = 'tuppence'
language_version = '0.1'
language_info = {'mimetype': 'text/plain', 'name':'tuppence'}
banner = "Tuppence kernel"
def __init__(self, **kwargs):
Kernel.__init__(self, **kwargs)
self._start_tuppence()
def _start_tuppence(self):
self.replwrapper = REPLWrapper("tuppence", ">>> ", None)
def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False):
try:
if not silent:
output = self.replwrapper.run_command(code)
stream_content = {'name': 'stdout', 'text': output}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'ok',
# The base class increments the execution count
'execution_count': self.execution_count,
'payload': [],
'user_expressions': {},
}
except EOF:
if not silent:
output = 'killed'
stream_content = {'name': 'stdout', 'text': output}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'abort',
# The base class increments the execution count
'execution_count': self.execution_count,
'payload': [],
'user_expressions': {},
}
except OSError:
if not silent:
output = 'killed'
stream_content = {'name': 'stdout', 'text': output}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'abort',
# The base class increments the execution count
'execution_count': self.execution_count,
'payload': [],
'user_expressions': {},
}
def do_shutdown(self, restart):
try:
self.replwrapper.run_command('exit()')
except EOF:
pass
except OSError:
pass
if __name__ == '__main__':
IPKernelApp.launch_instance(kernel_class=TuppenceKernel)
|
[
"ipykernel.kernelapp.IPKernelApp.launch_instance",
"ipykernel.kernelbase.Kernel.__init__",
"pexpect.replwrap.REPLWrapper"
] |
[((2441, 2497), 'ipykernel.kernelapp.IPKernelApp.launch_instance', 'IPKernelApp.launch_instance', ([], {'kernel_class': 'TuppenceKernel'}), '(kernel_class=TuppenceKernel)\n', (2468, 2497), False, 'from ipykernel.kernelapp import IPKernelApp\n'), ((453, 484), 'ipykernel.kernelbase.Kernel.__init__', 'Kernel.__init__', (['self'], {}), '(self, **kwargs)\n', (468, 484), False, 'from ipykernel.kernelbase import Kernel\n'), ((575, 612), 'pexpect.replwrap.REPLWrapper', 'REPLWrapper', (['"""tuppence"""', '""">>> """', 'None'], {}), "('tuppence', '>>> ', None)\n", (586, 612), False, 'from pexpect.replwrap import REPLWrapper\n')]
|
import requests
from bs4 import BeautifulSoup
from datetime import datetime as dt
from gzip import GzipFile
from urllib.request import urlopen
import re
TOP_URL = "https://ftp.acc.umu.se/mirror/wikimedia.org/dumps/enwiktionary/{}"
FILENAME = "/enwiktionary-{}-all-titles-in-ns0.gz"
NON_ALPHA_PATTERN = re.compile(rb'[\W]+')
NON_BRACKET_PATTERN = re.compile(rb"[\(\[].*?[\)\]]")
def find_latest_wikidump():
'''Identify the date (in the wikimedia dumps format)
of the most recent wiktionary dump. Actually returns the secondmost
recent date, as this is found to be more stable (e.g. if we make the
request during the upload)
Returns:
wikidate (str): The most recent date (in the wikimedia dumps format)
'''
r = requests.get(TOP_URL.format(""))
r.raise_for_status()
soup = BeautifulSoup(r.text, "lxml")
max_date, max_date_str = None, None
second_max_date_str = None
for anchor in soup.find_all("a", href=True):
raw_date = anchor.text.rstrip("/")
try:
date = dt.strptime(raw_date, '%Y%m%d')
except ValueError:
continue
if max_date is None or date > max_date:
second_max_date_str = max_date_str
max_date = date
max_date_str = raw_date
if second_max_date_str is not None:
return second_max_date_str
return max_date_str
def extract_ngrams(date):
'''Extract and reformat n-grams from wiktionary titles.
Terms in parentheses are removed, and hyphens are converted
to the standard n-gram separator (underscore). All other
non-alphanumeric characters are then removed, and leading/trailing
underscores are removed. Unigrams are then excluded.
Args:
date (str): A date string (in the wikimedia dumps format)
Returns:
ngrams (set): The set of n-grams from wiktionary
'''
r = urlopen((TOP_URL+FILENAME).format(date, date))
ngrams = set()
with GzipFile(fileobj=r) as gzio:
for line in gzio:
line = line.rstrip(b'\n')
line = line.replace(b'-', b'_')
line = NON_BRACKET_PATTERN.sub(b'', line)
line = NON_ALPHA_PATTERN.sub(b'', line)
if line.startswith(b'_'):
line = line[1:]
if line.endswith(b'_'):
line = line[:-1]
size = len(line.split(b'_'))
if size == 1 or size > 6:
continue
if len(line) > 50:
continue
if line.decode('utf-8')[0].isnumeric():
continue
ngrams.add(line.lower())
return ngrams
if __name__ == "__main__":
wiki_date = find_latest_wikidump()
ngrams = extract_ngrams(wiki_date)
print(f"Found {len(ngrams)} n-grams")
|
[
"bs4.BeautifulSoup",
"datetime.datetime.strptime",
"gzip.GzipFile",
"re.compile"
] |
[((303, 324), 're.compile', 're.compile', (["b'[\\\\W]+'"], {}), "(b'[\\\\W]+')\n", (313, 324), False, 'import re\n'), ((347, 381), 're.compile', 're.compile', (["b'[\\\\(\\\\[].*?[\\\\)\\\\]]'"], {}), "(b'[\\\\(\\\\[].*?[\\\\)\\\\]]')\n", (357, 381), False, 'import re\n'), ((818, 847), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.text', '"""lxml"""'], {}), "(r.text, 'lxml')\n", (831, 847), False, 'from bs4 import BeautifulSoup\n'), ((1961, 1980), 'gzip.GzipFile', 'GzipFile', ([], {'fileobj': 'r'}), '(fileobj=r)\n', (1969, 1980), False, 'from gzip import GzipFile\n'), ((1043, 1074), 'datetime.datetime.strptime', 'dt.strptime', (['raw_date', '"""%Y%m%d"""'], {}), "(raw_date, '%Y%m%d')\n", (1054, 1074), True, 'from datetime import datetime as dt\n')]
|
#!/usr/bin/env python
from __future__ import absolute_import
import sys
# from distutils.core import setup
from setuptools import setup, find_packages
from os import path
import io
from os.path import join, dirname
sys.path.append(join(dirname(__file__), "EyesLibraryExtended"))
exec(compile(open("EyesLibraryExtended/version.py").read(), "EyesLibraryExtended/version.py", "exec"))
with io.open("README.md", encoding="utf-8") as f:
long_description = f.read()
setup(
name="robotframework-eyeslibraryextended",
version=__version__,
description="Visual verification testing library for Robot Framework using Applitool python SDK eye-selenium",
long_description=long_description,
long_description_content_type="text/markdown",
author="<NAME>",
author_email="<<EMAIL>>",
url="https://github.com/JisThomas14/EyesLibraryExtended",
license="Apache License 2.0",
keywords="robotframework testing testautomation eyes-selenium selenium appium visual-verification ultrafastgrid classicrunner applitool",
platforms="any",
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.7",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Framework :: Robot Framework :: Library",
"Topic :: Software Development :: Testing",
"Topic :: Software Development :: Quality Assurance",
],
install_requires=[
"robotframework > 3.0, < 4",
"eyes-selenium >= 4.1.25",
"six > 1.0.0, < 2",
"robotframework-seleniumlibrary",
"robotframework-appiumlibrary",
],
packages=find_packages(exclude=["tests", "docs"]),
)
|
[
"os.path.dirname",
"setuptools.find_packages",
"io.open"
] |
[((392, 430), 'io.open', 'io.open', (['"""README.md"""'], {'encoding': '"""utf-8"""'}), "('README.md', encoding='utf-8')\n", (399, 430), False, 'import io\n'), ((239, 256), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (246, 256), False, 'from os.path import join, dirname\n'), ((1737, 1777), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['tests', 'docs']"}), "(exclude=['tests', 'docs'])\n", (1750, 1777), False, 'from setuptools import setup, find_packages\n')]
|
import os
import re
from typing import List
from setuptools import find_packages, setup
def get_version(package: str) -> str:
"""
Return package version as listed in `__version__` in `__main__.py`.
"""
path = os.path.join(package, "__main__.py")
main_py = open(path, "r", encoding="utf8").read()
match = re.search("__version__ = ['\"]([^'\"]+)['\"]", main_py)
if match is None:
return "0.0.0"
return match.group(1)
def get_long_description() -> str:
"""
Return the README.
"""
return open("README.md", "r", encoding="utf8").read()
def get_install_requires() -> List[str]:
return open("requirements.txt").read().splitlines()
setup(
name="joint-teapot",
version=get_version("joint_teapot"),
url="https://github.com/BoYanZh/joint-teapot",
license="MIT",
description="A handy tool for TAs in JI to handle stuffs through Gitea, Canvas, and JOJ.",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="BoYanZh",
author_email="<EMAIL>",
maintainer="BoYanZh",
maintainer_email="<EMAIL>",
packages=find_packages(),
python_requires=">=3.6",
entry_points={"console_scripts": ["joint-teapot=joint_teapot:main"]},
install_requires=get_install_requires(),
)
|
[
"os.path.join",
"setuptools.find_packages",
"re.search"
] |
[((228, 264), 'os.path.join', 'os.path.join', (['package', '"""__main__.py"""'], {}), "(package, '__main__.py')\n", (240, 264), False, 'import os\n'), ((331, 386), 're.search', 're.search', (['"""__version__ = [\'"]([^\'"]+)[\'"]"""', 'main_py'], {}), '(\'__version__ = [\\\'"]([^\\\'"]+)[\\\'"]\', main_py)\n', (340, 386), False, 'import re\n'), ((1148, 1163), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (1161, 1163), False, 'from setuptools import find_packages, setup\n')]
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Name: streamStatus.py
# Purpose: functionality for reporting on the notational status of streams
#
# Authors: <NAME>
#
# Copyright: Copyright © 2013 <NAME> and the music21
# Project
# License: BSD, see license.txt
# -----------------------------------------------------------------------------
import unittest
from music21 import environment
from music21 import common
from music21.common.objects import SlottedObjectMixin
environLocal = environment.Environment(__file__)
# -----------------------------------------------------------------------------
class StreamStatus(SlottedObjectMixin):
'''
An object that stores the current notation state for the client stream.
Separates out tasks such as whether notation has been made, etc.
>>> s = stream.Stream()
>>> ss = s.streamStatus
>>> ss
<music21.stream.streamStatus.StreamStatus object at 0x...>
>>> s.streamStatus.client is s
True
Copying of StreamStatus and surrounding Streams
>>> import copy
>>> ss2 = copy.deepcopy(ss)
>>> ss2.client is None
True
>>> s2 = copy.deepcopy(s)
>>> s2.streamStatus
<music21.stream.streamStatus.StreamStatus object at 0x...>
>>> s2.streamStatus is ss
False
>>> s2.streamStatus.client is s2
True
'''
# CLASS VARIABLES #
__slots__ = (
'_accidentals',
'_beams',
'_client',
'_concertPitch',
'_dirty',
'_enharmonics',
'_measures',
'_ornaments',
'_rests',
'_ties',
'_tuplets',
)
# INITIALIZER #
def __init__(self, client=None):
self._client = None
self._accidentals = None
self._beams = None
self._concertPitch = None
self._dirty = None
self._enharmonics = None
self._measures = None
self._ornaments = None
self._rests = None
self._ties = None
self._tuplets = None
self.client = client
# SPECIAL METHODS #
def __deepcopy__(self, memo=None):
'''
Manage deepcopying by creating a new reference to the same object.
leaving out the client
'''
new = type(self)()
for x in self.__slots__:
if x == '_client':
new._client = None
else:
setattr(new, x, getattr(self, x))
return new
# unwrap weakref for pickling
def __getstate__(self):
self._client = common.unwrapWeakref(self._client)
return SlottedObjectMixin.__getstate__(self)
def __setstate__(self, state):
SlottedObjectMixin.__setstate__(self, state)
self._client = common.wrapWeakref(self._client)
# PUBLIC METHODS #
def haveAccidentalsBeenMade(self):
'''
If Accidentals.displayStatus is None for all contained pitches, it as
assumed that accidentals have not been set for display and/or
makeAccidentals has not been run. If any Accidental has displayStatus
other than None, this method returns True, regardless of if
makeAccidentals has actually been run.
'''
for p in self.client.pitches:
if p.accidental is not None:
if p.accidental.displayStatus is not None:
return True
return False
def haveBeamsBeenMade(self):
'''
If any Note in this Stream has .beams defined, it as assumed that Beams
have not been set and/or makeBeams has not been run. If any Beams
exist, this method returns True, regardless of if makeBeams has
actually been run.
'''
for n in self.client.recurse(classFilter=('NotRest',), restoreActiveSites=False):
if n.beams is not None and n.beams.beamsList:
return True
return False
def haveTupletBracketsBeenMade(self):
'''
If any GeneralNote in this Stream is a tuplet, then check to
see if any of them have a first Tuplet with type besides None
return True. Otherwise return False if there is a tuplet. Return None if
no Tuplets.
>>> s = stream.Stream()
>>> s.streamStatus.haveTupletBracketsBeenMade() is None
True
>>> s.append(note.Note())
>>> s.streamStatus.haveTupletBracketsBeenMade() is None
True
>>> n = note.Note(quarterLength=1/3)
>>> s.append(n)
>>> s.streamStatus.haveTupletBracketsBeenMade()
False
>>> n.duration.tuplets[0].type = 'start'
>>> s.streamStatus.haveTupletBracketsBeenMade()
True
'''
foundTuplet = False
for n in self.client.recurse(classFilter='GeneralNote', restoreActiveSites=False):
if n.duration.tuplets:
foundTuplet = True
if n.duration.tuplets[0].type is not None:
return True
if foundTuplet:
return False
else:
return None
# PUBLIC PROPERTIES #
@property
def client(self):
return common.unwrapWeakref(self._client)
@client.setter
def client(self, client):
# client is the Stream that this status lives on
self._client = common.wrapWeakref(client)
@property
def accidentals(self):
if self._accidentals is None:
self._accidentals = self.haveAccidentalsBeenMade()
return self._accidentals
@accidentals.setter
def accidentals(self, expr):
if expr is not None:
self._accidentals = bool(expr)
else:
self._accidentals = None
@property
def beams(self):
if self._beams is None:
self._beams = self.haveBeamsBeenMade()
return self._beams
@beams.setter
def beams(self, expr):
if expr is not None:
self._beams = bool(expr)
else:
self._beams = None
@property
def tuplets(self):
if self._tuplets is None:
self._tuplets = self.haveTupletBracketsBeenMade()
# If there were no tuplet durations,
# tuplet brackets don't need to be made.
if self._tuplets is None:
self._tuplets = True
return self._tuplets
@tuplets.setter
def tuplets(self, expr):
if expr is not None:
self._tuplets = bool(expr)
else:
self._tuplets = None
# -----------------------------------------------------------------------------
class Test(unittest.TestCase):
'''
Note: most Stream tests are found in stream.tests
'''
def testHaveBeamsBeenMadeAfterDeepcopy(self):
import copy
from music21 import stream
from music21 import note
m = stream.Measure()
c = note.Note('C4', type='quarter')
m.append(c)
d1 = note.Note('D4', type='eighth')
d2 = note.Note('D4', type='eighth')
m.append([d1, d2])
e3 = note.Note('E4', type='eighth')
e4 = note.Note('E4', type='eighth')
m.append([e3, e4])
d1.beams.append('start')
d2.beams.append('stop')
self.assertTrue(m.streamStatus.haveBeamsBeenMade())
mm = copy.deepcopy(m)
self.assertTrue(mm.streamStatus.haveBeamsBeenMade())
mm.streamStatus.beams = False
mmm = copy.deepcopy(mm)
self.assertFalse(mmm.streamStatus.beams)
# m.show()
# -----------------------------------------------------------------------------
if __name__ == '__main__':
import music21
music21.mainTest(Test)
|
[
"copy.deepcopy",
"music21.stream.Measure",
"music21.environment.Environment",
"music21.common.wrapWeakref",
"music21.common.objects.SlottedObjectMixin.__setstate__",
"music21.mainTest",
"music21.common.objects.SlottedObjectMixin.__getstate__",
"music21.note.Note",
"music21.common.unwrapWeakref"
] |
[((587, 620), 'music21.environment.Environment', 'environment.Environment', (['__file__'], {}), '(__file__)\n', (610, 620), False, 'from music21 import environment\n'), ((7698, 7720), 'music21.mainTest', 'music21.mainTest', (['Test'], {}), '(Test)\n', (7714, 7720), False, 'import music21\n'), ((2613, 2647), 'music21.common.unwrapWeakref', 'common.unwrapWeakref', (['self._client'], {}), '(self._client)\n', (2633, 2647), False, 'from music21 import common\n'), ((2663, 2700), 'music21.common.objects.SlottedObjectMixin.__getstate__', 'SlottedObjectMixin.__getstate__', (['self'], {}), '(self)\n', (2694, 2700), False, 'from music21.common.objects import SlottedObjectMixin\n'), ((2745, 2789), 'music21.common.objects.SlottedObjectMixin.__setstate__', 'SlottedObjectMixin.__setstate__', (['self', 'state'], {}), '(self, state)\n', (2776, 2789), False, 'from music21.common.objects import SlottedObjectMixin\n'), ((2813, 2845), 'music21.common.wrapWeakref', 'common.wrapWeakref', (['self._client'], {}), '(self._client)\n', (2831, 2845), False, 'from music21 import common\n'), ((5206, 5240), 'music21.common.unwrapWeakref', 'common.unwrapWeakref', (['self._client'], {}), '(self._client)\n', (5226, 5240), False, 'from music21 import common\n'), ((5371, 5397), 'music21.common.wrapWeakref', 'common.wrapWeakref', (['client'], {}), '(client)\n', (5389, 5397), False, 'from music21 import common\n'), ((6899, 6915), 'music21.stream.Measure', 'stream.Measure', ([], {}), '()\n', (6913, 6915), False, 'from music21 import stream\n'), ((6928, 6959), 'music21.note.Note', 'note.Note', (['"""C4"""'], {'type': '"""quarter"""'}), "('C4', type='quarter')\n", (6937, 6959), False, 'from music21 import note\n'), ((6993, 7023), 'music21.note.Note', 'note.Note', (['"""D4"""'], {'type': '"""eighth"""'}), "('D4', type='eighth')\n", (7002, 7023), False, 'from music21 import note\n'), ((7037, 7067), 'music21.note.Note', 'note.Note', (['"""D4"""'], {'type': '"""eighth"""'}), "('D4', type='eighth')\n", (7046, 7067), False, 'from music21 import note\n'), ((7108, 7138), 'music21.note.Note', 'note.Note', (['"""E4"""'], {'type': '"""eighth"""'}), "('E4', type='eighth')\n", (7117, 7138), False, 'from music21 import note\n'), ((7152, 7182), 'music21.note.Note', 'note.Note', (['"""E4"""'], {'type': '"""eighth"""'}), "('E4', type='eighth')\n", (7161, 7182), False, 'from music21 import note\n'), ((7348, 7364), 'copy.deepcopy', 'copy.deepcopy', (['m'], {}), '(m)\n', (7361, 7364), False, 'import copy\n'), ((7478, 7495), 'copy.deepcopy', 'copy.deepcopy', (['mm'], {}), '(mm)\n', (7491, 7495), False, 'import copy\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
from six import iteritems
import os
import csv
from dcase_util.containers import DictContainer
from dcase_util.utils import FileFormat
class OneToOneMappingContainer(DictContainer):
"""Mapping container class for 1:1 data mapping, inherited from DictContainer class."""
valid_formats = [FileFormat.CSV, FileFormat.TXT, FileFormat.CPICKLE] #: Valid file formats
def __init__(self, *args, **kwargs):
# Run DictContainer init
DictContainer.__init__(self, *args, **kwargs)
super(OneToOneMappingContainer, self).__init__(*args, **kwargs)
def load(self, filename=None):
"""Load file
Parameters
----------
filename : str, optional
File path
Default value filename given to class constructor
Raises
------
ImportError:
Error if file format specific module cannot be imported
IOError:
File does not exists or has unknown file format
Returns
-------
self
"""
if filename:
self.filename = filename
self.detect_file_format()
self.validate_format()
dict.clear(self)
if self.exists():
from dcase_util.files import Serializer
if self.format == FileFormat.TXT or self.format == FileFormat.CSV:
map_data = {}
with open(self.filename, 'rtU') as f:
for row in csv.reader(f, delimiter=self.delimiter()):
if len(row) == 2:
map_data[row[0]] = row[1]
dict.update(self, map_data)
elif self.format == FileFormat.CPICKLE:
dict.update(self, Serializer.load_cpickle(filename=self.filename))
else:
message = '{name}: Unknown format [{format}]'.format(name=self.__class__.__name__, format=self.filename)
self.logger.exception(message)
raise IOError(message)
else:
message = '{name}: File does not exists [{file}]'.format(name=self.__class__.__name__, file=self.filename)
self.logger.exception(message)
raise IOError(message)
# Check if after load function is defined, call if found
if hasattr(self, '_after_load'):
self._after_load()
return self
def save(self, filename=None):
"""Save file
Parameters
----------
filename : str, optional
File path
Default value filename given to class constructor
Raises
------
ImportError:
Error if file format specific module cannot be imported
IOError:
File has unknown file format
Returns
-------
self
"""
if filename:
self.filename = filename
self.detect_file_format()
self.validate_format()
if self.filename is None or self.filename == '':
message = '{name}: Filename is empty [{filename}]'.format(
name=self.__class__.__name__,
filename=self.filename
)
self.logger.exception(message)
raise IOError(message)
try:
from dcase_util.files import Serializer
if self.format == FileFormat.CSV or self.format == FileFormat.TXT:
delimiter = ','
with open(self.filename, 'w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=delimiter)
for key, value in iteritems(self):
if key not in ['filename']:
csv_writer.writerow((key, value))
elif self.format == FileFormat.CPICKLE:
Serializer.save_cpickle(filename=self.filename, data=dict(self))
else:
message = '{name}: Unknown format [{format}]'.format(name=self.__class__.__name__, format=self.filename)
self.logger.exception(message)
raise IOError(message)
except KeyboardInterrupt:
os.remove(self.filename) # Delete the file, since most likely it was not saved fully
raise
# Check if after save function is defined, call if found
if hasattr(self, '_after_save'):
self._after_save()
return self
@property
def flipped(self):
"""Exchange map key and value pairs.
Returns
-------
OneToOneMappingContainer
flipped map
"""
return OneToOneMappingContainer(dict((v, k) for k, v in iteritems(self)))
def map(self, key, default=None):
"""Map with a key.
Parameters
----------
key : str or number
Mapping key
default : str or number
Default value to be returned if key does not exists in the mapping container.
Returns
-------
OneToOneMappingContainer
flipped map
"""
if key in self:
return self[key]
else:
return default
|
[
"os.remove",
"csv.writer",
"dcase_util.containers.DictContainer.__init__",
"dcase_util.files.Serializer.load_cpickle",
"six.iteritems"
] |
[((556, 601), 'dcase_util.containers.DictContainer.__init__', 'DictContainer.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (578, 601), False, 'from dcase_util.containers import DictContainer\n'), ((4269, 4293), 'os.remove', 'os.remove', (['self.filename'], {}), '(self.filename)\n', (4278, 4293), False, 'import os\n'), ((3651, 3692), 'csv.writer', 'csv.writer', (['csv_file'], {'delimiter': 'delimiter'}), '(csv_file, delimiter=delimiter)\n', (3661, 3692), False, 'import csv\n'), ((3731, 3746), 'six.iteritems', 'iteritems', (['self'], {}), '(self)\n', (3740, 3746), False, 'from six import iteritems\n'), ((1846, 1893), 'dcase_util.files.Serializer.load_cpickle', 'Serializer.load_cpickle', ([], {'filename': 'self.filename'}), '(filename=self.filename)\n', (1869, 1893), False, 'from dcase_util.files import Serializer\n'), ((4789, 4804), 'six.iteritems', 'iteritems', (['self'], {}), '(self)\n', (4798, 4804), False, 'from six import iteritems\n')]
|
import os
from base64 import b64encode
from urllib.parse import quote
def g_b64(data):
return b"".join([b"data:image/svg+xml;base64,", b64encode(data.encode("utf-8"))])
def g_uri(data):
return "".join(["data:image/svg+xml;charset=UTF-8,", quote(data)])
def svg_to_data(data):
d_b64 = g_b64(data)
d_uri = g_uri(data)
if len(d_b64) > len(d_uri):
return d_uri
return d_b64
def svg_to_data_uri(file_path, include_paths):
for path in include_paths:
try:
with open(os.path.join(path, file_path)) as f:
return svg_to_data(f.read())
except FileNotFoundError:
pass
raise FileNotFoundError(file_path)
|
[
"urllib.parse.quote",
"os.path.join"
] |
[((251, 262), 'urllib.parse.quote', 'quote', (['data'], {}), '(data)\n', (256, 262), False, 'from urllib.parse import quote\n'), ((523, 552), 'os.path.join', 'os.path.join', (['path', 'file_path'], {}), '(path, file_path)\n', (535, 552), False, 'import os\n')]
|
import xml.etree.ElementTree as ET, sys, random
from graph import Graph
from time import time
e = []
varS = []
N = None
bn = Graph()
if len(sys.argv) < 4:
print("Invalid argv: Less Than 2 argvs!")
sys.exit()
if len(sys.argv) > 4:
if len(sys.argv) % 2 != 0:
print("Invalid argv: Wrong Format!")
sys.exit()
file = sys.argv[2]
X = sys.argv[3]
try:
N = int(sys.argv[1])
except:
print("Invalid argv: Wrong Sample Number")
sys.exit()
varS.append(sys.argv[3])
try:
tree = ET.parse(file)
except:
print("File does not exists!")
sys.exit()
if len(sys.argv) != 4:
i = 4
while i < len(sys.argv):
if sys.argv[i+1].lower() == "true":
varS.append(sys.argv[i])
e.append(sys.argv[i])
i += 2
elif sys.argv[i+1].lower() == "false":
varS.append(sys.argv[i])
e.append("!"+sys.argv[i])
i += 2
else:
print("Invalid argv: Wrong Format!")
sys.exit()
root = tree.getroot()
varList = []
fgList = []
defList = []
for i in range(len(root[0])):
tem = root[0][i]
if tem.tag == 'VARIABLE':
varList += tem[0].text.split(' ')
elif tem.tag == 'DEFINITION':
for stuff in tem:
if stuff.tag == 'FOR':
node = stuff.text
temList_1 = [stuff.text]
elif stuff.tag == 'GIVEN':
temList_1 += [stuff.text]
bn.addEdge((node, stuff.text))
elif stuff.tag == 'TABLE':
temList = stuff.text.replace('\n','').replace('\t','').strip().split(' ')
j = 0
while j < len(temList):
if not temList[j]:
temList.pop(j)
else:
temList[j] = float(temList[j])
j += 1
defList.append(temList)
fgList.append(temList_1)
j = 0
while j < len(varS):
tem = bn.findParent(varS[j])
for var in list(tem):
if var in varS:
tem.remove(var)
if len(tem) > 0:
for stuff in tem:
varS.insert(1,stuff)
j = 0
j += 1
# sort the list topologically
i = 0
while 1:
flag = 0
while i < len(varS) - 1:
var = bn.findParent(varS[i])
if var:
for stuff in var:
if stuff not in varS[:i]:
varS[i], varS[i+1] = varS[i+1], varS[i]
flag += 1
break
i += 1
else:
i += 1
if flag == 0:
break
newDict = dict()
for i in range(len(fgList)):
counter = 2 ** len(fgList[i])
j = 0
while j < counter:
k = 0
List = []
a = "" if j % 2 == 0 else "!"
b = "" if j < 0.5 * counter else "!"
c = "" if j % 4 == 0 or (j - 1) % 4 == 0 else "!"
while k < len(fgList[i]):
if k == 0:
List.append(a+fgList[i][k])
elif k == 1:
List.append(b+fgList[i][k])
elif k == 2:
List.append(c+fgList[i][k])
k += 1
newDict[frozenset(List)] = defList[i][j]
j += 1
i += 1
def priorSample(sortedGraph):
sample = []
while sortedGraph:
Y = sortedGraph.pop(0)
parent = findParent(list(bn.findParent(Y)), sample)
parent += [Y]
if random.random() <= newDict[frozenset(parent)]:
sample.append(Y)
else:
sample.append("!"+Y)
return sample
def findParent(parent, e):
i = 0
while i < len(parent):
if parent[i] not in e:
parent[i] = "!" + parent[i]
i += 1
return parent
def consistent(sample, e):
for var in sample:
if "!" + var in e:
return False
for evidence in e:
if "!" + evidence in sample:
return False
return True
def normalize(Q):
List1 = []
List2 = []
for key, val in Q.items():
List1.append(key)
List2.append(val)
if len(List1) == 1:
Q[List1[0]] = 1
if len(List1[0]) == 1:
Q["!"+List1[0]] = 0
elif len(List1[0]) == 2:
Q[List1[0][1:]] = 0
return "Sample Not Enough"
elif len(List1) == 0:
return Q
alpha = 1/(List2[0]+List2[1])
Q[List1[0]] = alpha*List2[0]
Q[List1[1]] = alpha*List2[1]
return Q
def rejectionSampling(X, e, sortedGraph, N):
Q = {}
reject = 0
accept = 0
for i in range(1, N + 1):
sample = priorSample(list(sortedGraph))
if not consistent(sample, e):
reject += 1
continue
if X in sample:
Q[X] = Q.get(X,0) + 1
accept += 1
elif "!"+X in sample:
Q["!"+X] = Q.get("!"+X,0) + 1
accept += 1
return normalize(Q), accept, reject
t = time()
result = rejectionSampling(X, e, list(varS), N)
print("")
print("Result:", result[0])
rate = result[1] / (result[1] + result[2])
print("Accept:", result[1], "Reject:", result[2])
print("Acception Rate:", rate)
print("Calculated in %.1fs" % (time() - t))
print("")
|
[
"xml.etree.ElementTree.parse",
"graph.Graph",
"time.time",
"random.random",
"sys.exit"
] |
[((126, 133), 'graph.Graph', 'Graph', ([], {}), '()\n', (131, 133), False, 'from graph import Graph\n'), ((4930, 4936), 'time.time', 'time', ([], {}), '()\n', (4934, 4936), False, 'from time import time\n'), ((207, 217), 'sys.exit', 'sys.exit', ([], {}), '()\n', (215, 217), False, 'import xml.etree.ElementTree as ET, sys, random\n'), ((512, 526), 'xml.etree.ElementTree.parse', 'ET.parse', (['file'], {}), '(file)\n', (520, 526), True, 'import xml.etree.ElementTree as ET, sys, random\n'), ((324, 334), 'sys.exit', 'sys.exit', ([], {}), '()\n', (332, 334), False, 'import xml.etree.ElementTree as ET, sys, random\n'), ((460, 470), 'sys.exit', 'sys.exit', ([], {}), '()\n', (468, 470), False, 'import xml.etree.ElementTree as ET, sys, random\n'), ((574, 584), 'sys.exit', 'sys.exit', ([], {}), '()\n', (582, 584), False, 'import xml.etree.ElementTree as ET, sys, random\n'), ((3438, 3453), 'random.random', 'random.random', ([], {}), '()\n', (3451, 3453), False, 'import xml.etree.ElementTree as ET, sys, random\n'), ((5178, 5184), 'time.time', 'time', ([], {}), '()\n', (5182, 5184), False, 'from time import time\n'), ((997, 1007), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1005, 1007), False, 'import xml.etree.ElementTree as ET, sys, random\n')]
|
'''
File: main.py
Project: sketchKeras
File Created: Sunday, 7th October 2018 5:51:22 pm
Author: xiaofeng (<EMAIL>)
-----
Last Modified: Sunday, 7th October 2018 7:09:45 pm
Modified By: xiaofeng (<EMAIL>>)
-----
Copyright 2018.06 - 2018 onion Math, onion Math
'''
from keras.models import load_model
import keras.backend.tensorflow_backend as K
import tensorflow as tf
from keras.utils import plot_model
import datetime
import cv2
import os
import numpy as np
import pickle
from helper_sketch import *
class Sketch:
def __init__(self, gpu=0):
print("start")
self.root = "./images/"
self.batchsize = 1
self.outdir = self.root + "sketch/"
self.gpu = gpu
self._dtype = np.float32
if not os.path.isfile("./sketchKeras/mod.h5"):
print("/sketchKeras/mod.h5 not found. Please download them from github")
print("load model")
if self.gpu >= 0:
self.gpu_option = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
self.model_config = tf.ConfigProto(device_count={"CPU": 7},
gpu_options=self.gpu_option,
intra_op_parallelism_threads=0,
inter_op_parallelism_threads=0)
else:
self.model_config = tf.ConfigProto(device_count={"CPU": 2, "GPU": 0},
intra_op_parallelism_threads=0,
inter_op_parallelism_threads=0)
self.model = load_model('./sketchKeras/mod.h5')
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
def tosketch(self, id_str):
path = os.path.join(self.root, 'line', id_str + '.png')
saved_path = os.path.join(self.outdir, id_str+'.jpg')
from_mat = cv2.imread(path)
width = float(from_mat.shape[1])
height = float(from_mat.shape[0])
new_width = 0
new_height = 0
if (width > height):
from_mat = cv2.resize(
from_mat, (512, int(512 / width * height)),
interpolation=cv2.INTER_AREA)
new_width = 512
new_height = int(512 / width * height)
else:
from_mat = cv2.resize(from_mat, (int(512 / height * width), 512),
interpolation=cv2.INTER_AREA)
new_width = int(512 / height * width)
new_height = 512
from_mat = from_mat.transpose((2, 0, 1))
light_map = np.zeros(from_mat.shape, dtype=np.float)
for channel in range(3):
light_map[channel] = get_light_map_single(from_mat[channel])
light_map = normalize_pic(light_map)
light_map = resize_img_512_3d(light_map)
line_mat = self.model.predict(light_map, batch_size=self.batchsize)
line_mat = line_mat.transpose((3, 1, 2, 0))[0]
line_mat = line_mat[0:int(new_height), 0:int(new_width), :]
# show_active_img_and_save('sketchKeras_colored', line_mat, saved_path)
line_mat = np.amax(line_mat, 2)
# show_active_img_and_save_denoise_filter2('sketchKeras_enhanced', line_mat, saved_path)
show_active_img_and_save_denoise_filter('sketchKeras_pured', line_mat, saved_path)
# show_active_img_and_save_denoise('sketchKeras', line_mat, saved_path)
# cv2.waitKey(0)
if __name__ == '__main__':
for n in range(1):
s = Sketch()
s.tosketch(n * s.batchsize)
|
[
"keras.models.load_model",
"os.makedirs",
"numpy.zeros",
"os.path.exists",
"numpy.amax",
"cv2.imread",
"os.path.isfile",
"tensorflow.ConfigProto",
"tensorflow.GPUOptions",
"os.path.join"
] |
[((1593, 1627), 'keras.models.load_model', 'load_model', (['"""./sketchKeras/mod.h5"""'], {}), "('./sketchKeras/mod.h5')\n", (1603, 1627), False, 'from keras.models import load_model\n'), ((1757, 1805), 'os.path.join', 'os.path.join', (['self.root', '"""line"""', "(id_str + '.png')"], {}), "(self.root, 'line', id_str + '.png')\n", (1769, 1805), False, 'import os\n'), ((1827, 1869), 'os.path.join', 'os.path.join', (['self.outdir', "(id_str + '.jpg')"], {}), "(self.outdir, id_str + '.jpg')\n", (1839, 1869), False, 'import os\n'), ((1887, 1903), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (1897, 1903), False, 'import cv2\n'), ((2585, 2625), 'numpy.zeros', 'np.zeros', (['from_mat.shape'], {'dtype': 'np.float'}), '(from_mat.shape, dtype=np.float)\n', (2593, 2625), True, 'import numpy as np\n'), ((3124, 3144), 'numpy.amax', 'np.amax', (['line_mat', '(2)'], {}), '(line_mat, 2)\n', (3131, 3144), True, 'import numpy as np\n'), ((749, 787), 'os.path.isfile', 'os.path.isfile', (['"""./sketchKeras/mod.h5"""'], {}), "('./sketchKeras/mod.h5')\n", (763, 787), False, 'import os\n'), ((959, 1009), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(0.9)'}), '(per_process_gpu_memory_fraction=0.9)\n', (972, 1009), True, 'import tensorflow as tf\n'), ((1042, 1178), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'CPU': 7}", 'gpu_options': 'self.gpu_option', 'intra_op_parallelism_threads': '(0)', 'inter_op_parallelism_threads': '(0)'}), "(device_count={'CPU': 7}, gpu_options=self.gpu_option,\n intra_op_parallelism_threads=0, inter_op_parallelism_threads=0)\n", (1056, 1178), True, 'import tensorflow as tf\n'), ((1363, 1480), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'device_count': "{'CPU': 2, 'GPU': 0}", 'intra_op_parallelism_threads': '(0)', 'inter_op_parallelism_threads': '(0)'}), "(device_count={'CPU': 2, 'GPU': 0},\n intra_op_parallelism_threads=0, inter_op_parallelism_threads=0)\n", (1377, 1480), True, 'import tensorflow as tf\n'), ((1643, 1670), 'os.path.exists', 'os.path.exists', (['self.outdir'], {}), '(self.outdir)\n', (1657, 1670), False, 'import os\n'), ((1684, 1708), 'os.makedirs', 'os.makedirs', (['self.outdir'], {}), '(self.outdir)\n', (1695, 1708), False, 'import os\n')]
|
import sys, getpass, getopt, requests, random, time
from math import *
from datetime import datetime
import os
print('Number of arguments : ', len(sys.argv))
print('#########################################')
print('')
print('Script récupération d\'images sur Reddit')
print('')
print('#########################################')
## need link subreddit
## --link -l
## need nb d'image à récupérer
## --number -n
## folder destination
## --folder -f
##help
## --help -h
# Print iterations progress
def printProgressBar (iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '█', printEnd = "\r"):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
printEnd - Optional : end character (e.g. "\r", "\r\n") (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print(f'\r{prefix} |{bar}| {percent}% {suffix}', end = printEnd)
# Print New Line on Complete
if iteration == total:
print()
def checkFolder(folder):
if os.path.exists(folder):
print("Le fichier existe déjà")
else:
os.mkdir(folder)
def download(dataRequest, folder):
l = len(dataRequest['data']['children'])
i=0
printProgressBar(0, l, prefix = 'Progress:', suffix = 'Complete', length = 50)
for images in dataRequest['data']['children']:
image = images['data']['url']
titre = image.split('/')
response = requests.get(image)
file_image = folder + str(titre[-1])
while os.path.exists(file_image):
now = datetime.now()
random.seed(str(now.strftime("%S")))
file_image = folder + str(random.randint(0,99)) + str(titre[-1])
break
file = open(file_image, "wb")
file.write(response.content)
file.close()
printProgressBar(i + 1, l, prefix = 'Progress:', suffix = 'Complete', length = 50)
i=i+1
def getDataJson(link, folder, number):
print("Download images from https://www.reddit.com/r/" + link )
limit = 100
after = ''
iteration = ceil(int(number)/limit)
rest = int(number)%limit
print(iteration)
print(rest)
for i in range(0, iteration):
if i == 0 and int(number) > 100:
print("First request with max limit")
url = 'https://www.reddit.com/r/' + link + '.json?limit=' + str(limit)
elif i == 0 and int(number) <= 100:
print("First request with " + str(number) + " in limit")
url = 'https://www.reddit.com/r/' + link + '.json?limit=' + str(number)
elif i == iteration-1:
print("request with limit parameter and after parameter" + after + " " + str(rest))
url = 'https://www.reddit.com/r/' + link + '.json?limit=' + str(rest) + "&after=" + after
else:
print("request with after in parameter and max limit " + after + " " + str(limit))
url = 'https://www.reddit.com/r/' + link + '.json?limit=' + str(limit) + "&after=" + after
## Make the request
r = requests.get(url, headers = {'User-agent': 'Zbi 1'})
##parse request in json format
data = r.json()
## Download all image from json
checkFolder(folder)
download(data, folder)
after = data['data']['after']
print("After: " + after)
def main(argv):
link = ''
username = getpass.getuser()
folder = ''
number = '25'
username = getpass.getuser()
try:
opts, args = getopt.getopt(argv, "hs:f:n:", ["help", "subReddit=", "folder=", "number="])
except getopt.GetoptError:
print('You must call the script with this arguments \".\EP_Script_v2.py -s <sub> -f <folder> -n <number>\"')
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print('You must call the script with this arguments \".\EP_Script_v2.py -s <sub> -f <folder> -n <number>\"')
print ('Example : .\\EP_Script_v2.py -s EarthPorn -f D:\\Users\\username\\Documents\\ImageReddit\\ -n 50')
sys.exit()
elif opt in ("-s", "--sub"):
link = arg
elif opt in ("-f", "--folder"):
folder = arg
elif opt in ("-n", "--number"):
number = arg
if (link == ''):
print('You must specified a subreddit !')
sys.exit(2)
elif (folder == ''):
folder = 'C:\\Users\\' + username + '\\Documents\\Images_SubReddit_test\\'
print("You havn't specified a file. The default file is", folder)
print("Sub: ", link, " folder: ", folder, " number: ", number)
getDataJson(link, folder, number)
##getDataJson('pp', 'mm', '06')
main(sys.argv[1:])
|
[
"os.mkdir",
"getpass.getuser",
"getopt.getopt",
"random.randint",
"os.path.exists",
"requests.get",
"datetime.datetime.now",
"sys.exit"
] |
[((1594, 1616), 'os.path.exists', 'os.path.exists', (['folder'], {}), '(folder)\n', (1608, 1616), False, 'import os\n'), ((4018, 4035), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (4033, 4035), False, 'import sys, getpass, getopt, requests, random, time\n'), ((4085, 4102), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (4100, 4102), False, 'import sys, getpass, getopt, requests, random, time\n'), ((1676, 1692), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (1684, 1692), False, 'import os\n'), ((2014, 2033), 'requests.get', 'requests.get', (['image'], {}), '(image)\n', (2026, 2033), False, 'import sys, getpass, getopt, requests, random, time\n'), ((2095, 2121), 'os.path.exists', 'os.path.exists', (['file_image'], {}), '(file_image)\n', (2109, 2121), False, 'import os\n'), ((3650, 3700), 'requests.get', 'requests.get', (['url'], {'headers': "{'User-agent': 'Zbi 1'}"}), "(url, headers={'User-agent': 'Zbi 1'})\n", (3662, 3700), False, 'import sys, getpass, getopt, requests, random, time\n'), ((4134, 4210), 'getopt.getopt', 'getopt.getopt', (['argv', '"""hs:f:n:"""', "['help', 'subReddit=', 'folder=', 'number=']"], {}), "(argv, 'hs:f:n:', ['help', 'subReddit=', 'folder=', 'number='])\n", (4147, 4210), False, 'import sys, getpass, getopt, requests, random, time\n'), ((4980, 4991), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (4988, 4991), False, 'import sys, getpass, getopt, requests, random, time\n'), ((2141, 2155), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2153, 2155), False, 'from datetime import datetime\n'), ((4368, 4379), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (4376, 4379), False, 'import sys, getpass, getopt, requests, random, time\n'), ((4695, 4705), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4703, 4705), False, 'import sys, getpass, getopt, requests, random, time\n'), ((2243, 2264), 'random.randint', 'random.randint', (['(0)', '(99)'], {}), '(0, 99)\n', (2257, 2264), False, 'import sys, getpass, getopt, requests, random, time\n')]
|
import pandas as pd
import numpy as np
from collections import namedtuple
from xbbg import const
from xbbg.io import logs, param
Session = namedtuple('Session', ['start_time', 'end_time'])
SessNA = Session(None, None)
def get_interval(ticker, session) -> Session:
"""
Get interval from defined session
Args:
ticker: ticker
session: session
Returns:
Session of start_time and end_time
Examples:
>>> get_interval('005490 KS Equity', 'day_open_30')
Session(start_time='09:00', end_time='09:30')
>>> get_interval('005490 KS Equity', 'day_normal_30_20')
Session(start_time='09:31', end_time='15:00')
>>> get_interval('005490 KS Equity', 'day_close_20')
Session(start_time='15:01', end_time='15:20')
>>> get_interval('700 HK Equity', 'am_open_30')
Session(start_time='09:30', end_time='10:00')
>>> get_interval('700 HK Equity', 'am_normal_30_30')
Session(start_time='10:01', end_time='11:30')
>>> get_interval('700 HK Equity', 'am_close_30')
Session(start_time='11:31', end_time='12:00')
>>> get_interval('ES1 Index', 'day_exact_2130_2230')
Session(start_time=None, end_time=None)
>>> get_interval('ES1 Index', 'allday_exact_2130_2230')
Session(start_time='21:30', end_time='22:30')
>>> get_interval('ES1 Index', 'allday_exact_2130_0230')
Session(start_time='21:30', end_time='02:30')
>>> get_interval('AMLP US', 'day_open_30')
Session(start_time=None, end_time=None)
>>> get_interval('7974 JP Equity', 'day_normal_180_300') is SessNA
True
>>> get_interval('Z 1 Index', 'allday_normal_30_30')
Session(start_time='01:31', end_time='20:30')
>>> get_interval('GBP Curncy', 'day')
Session(start_time='17:02', end_time='17:00')
"""
if '_' not in session:
session = f'{session}_normal_0_0'
interval = Intervals(ticker=ticker)
ss_info = session.split('_')
return getattr(interval, f'market_{ss_info.pop(1)}')(*ss_info)
def shift_time(start_time, mins) -> str:
"""
Shift start time by mins
Args:
start_time: start time in terms of HH:MM string
mins: number of minutes (+ / -)
Returns:
end time in terms of HH:MM string
"""
s_time = pd.Timestamp(start_time)
e_time = s_time + np.sign(mins) * pd.Timedelta(f'00:{abs(mins)}:00')
return e_time.strftime('%H:%M')
class Intervals(object):
def __init__(self, ticker):
"""
Args:
ticker: ticker
"""
self.ticker = ticker
self.exch = const.exch_info(ticker=ticker)
def market_open(self, session, mins) -> Session:
"""
Time intervals for market open
Args:
session: [allday, day, am, pm, night]
mins: mintues after open
Returns:
Session of start_time and end_time
"""
if session not in self.exch: return SessNA
start_time = self.exch[session][0]
return Session(start_time, shift_time(start_time, int(mins)))
def market_close(self, session, mins) -> Session:
"""
Time intervals for market close
Args:
session: [allday, day, am, pm, night]
mins: mintues before close
Returns:
Session of start_time and end_time
"""
if session not in self.exch: return SessNA
end_time = self.exch[session][-1]
return Session(shift_time(end_time, -int(mins) + 1), end_time)
def market_normal(self, session, after_open, before_close) -> Session:
"""
Time intervals between market
Args:
session: [allday, day, am, pm, night]
after_open: mins after open
before_close: mins before close
Returns:
Session of start_time and end_time
"""
logger = logs.get_logger(self.market_normal)
if session not in self.exch: return SessNA
ss = self.exch[session]
s_time = shift_time(ss[0], int(after_open) + 1)
e_time = shift_time(ss[-1], -int(before_close))
request_cross = pd.Timestamp(s_time) >= pd.Timestamp(e_time)
session_cross = pd.Timestamp(ss[0]) >= pd.Timestamp(ss[1])
if request_cross and (not session_cross):
logger.warning(f'end time {e_time} is earlier than {s_time} ...')
return SessNA
return Session(s_time, e_time)
def market_exact(self, session, start_time: str, end_time: str) -> Session:
"""
Explicitly specify start time and end time
Args:
session: predefined session
start_time: start time in terms of HHMM string
end_time: end time in terms of HHMM string
Returns:
Session of start_time and end_time
"""
if session not in self.exch: return SessNA
ss = self.exch[session]
same_day = ss[0] < ss[-1]
if not start_time: s_time = ss[0]
else:
s_time = param.to_hour(start_time)
if same_day: s_time = max(s_time, ss[0])
if not end_time: e_time = ss[-1]
else:
e_time = param.to_hour(end_time)
if same_day: e_time = min(e_time, ss[-1])
if same_day and (s_time > e_time): return SessNA
return Session(start_time=s_time, end_time=e_time)
|
[
"xbbg.io.logs.get_logger",
"pandas.Timestamp",
"xbbg.io.param.to_hour",
"collections.namedtuple",
"numpy.sign",
"xbbg.const.exch_info"
] |
[((142, 191), 'collections.namedtuple', 'namedtuple', (['"""Session"""', "['start_time', 'end_time']"], {}), "('Session', ['start_time', 'end_time'])\n", (152, 191), False, 'from collections import namedtuple\n'), ((2358, 2382), 'pandas.Timestamp', 'pd.Timestamp', (['start_time'], {}), '(start_time)\n', (2370, 2382), True, 'import pandas as pd\n'), ((2666, 2696), 'xbbg.const.exch_info', 'const.exch_info', ([], {'ticker': 'ticker'}), '(ticker=ticker)\n', (2681, 2696), False, 'from xbbg import const\n'), ((3966, 4001), 'xbbg.io.logs.get_logger', 'logs.get_logger', (['self.market_normal'], {}), '(self.market_normal)\n', (3981, 4001), False, 'from xbbg.io import logs, param\n'), ((2405, 2418), 'numpy.sign', 'np.sign', (['mins'], {}), '(mins)\n', (2412, 2418), True, 'import numpy as np\n'), ((4224, 4244), 'pandas.Timestamp', 'pd.Timestamp', (['s_time'], {}), '(s_time)\n', (4236, 4244), True, 'import pandas as pd\n'), ((4248, 4268), 'pandas.Timestamp', 'pd.Timestamp', (['e_time'], {}), '(e_time)\n', (4260, 4268), True, 'import pandas as pd\n'), ((4293, 4312), 'pandas.Timestamp', 'pd.Timestamp', (['ss[0]'], {}), '(ss[0])\n', (4305, 4312), True, 'import pandas as pd\n'), ((4316, 4335), 'pandas.Timestamp', 'pd.Timestamp', (['ss[1]'], {}), '(ss[1])\n', (4328, 4335), True, 'import pandas as pd\n'), ((5116, 5141), 'xbbg.io.param.to_hour', 'param.to_hour', (['start_time'], {}), '(start_time)\n', (5129, 5141), False, 'from xbbg.io import logs, param\n'), ((5272, 5295), 'xbbg.io.param.to_hour', 'param.to_hour', (['end_time'], {}), '(end_time)\n', (5285, 5295), False, 'from xbbg.io import logs, param\n')]
|
# -*- coding: utf-8 -*-
"""
docstring, to write
"""
from functools import wraps
__all__ = [
"indicator_enter_leave_func",
"trivial_jit",
]
def indicator_enter_leave_func(verbose:int=0):
"""
"""
def dec_outer(fn:callable):
@wraps(fn)
def dec_inner(*args, **kwargs):
if verbose >= 1:
print("\n"+"*"*10+" entering function {} ".format(fn.__name__)+"*"*10)
start = time.time()
response = fn(*args, **kwargs)
if verbose >= 1:
print("\n"+"*"*10+" execution of function {} used {} second(s) ".format(fn.__name__, time.time()-start)+"*"*10)
print("\n"+"*"*10+" leaving function {} ".format(fn.__name__)+"*"*10+"\n")
return response
return dec_inner
return dec_outer
def trivial_jit(signature_or_function=None, locals={}, target='cpu', cache=False, pipeline_class=None, **options):
"""
"""
def dec(fn:callable):
return fn
return dec
|
[
"functools.wraps"
] |
[((256, 265), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (261, 265), False, 'from functools import wraps\n')]
|
from custom import Airbyte, Dbt, Superset
from diagrams import Diagram
from diagrams.aws.security import SecretsManager
from diagrams.aws.storage import S3
from diagrams.onprem.database import Postgresql
from diagrams.onprem.monitoring import Grafana, Prometheus
from diagrams.onprem.workflow import Airflow
with Diagram(
filename="kind-data-platform",
show=False,
graph_attr={
"bgcolor": "#272935", # snazzy theme
"dpi": "48.0",
"pad": "0.5",
},
edge_attr={
"color": "#eff0ea", # snazzy theme
},
):
airbyte = Airbyte()
airflow = Airflow()
dbt = Dbt()
postgresql = Postgresql()
grafana = Grafana()
prometheus = Prometheus()
superset = Superset()
s3 = S3()
secretsmanager = SecretsManager()
airflow >> airbyte >> [s3, postgresql]
airflow >> dbt >> postgresql
airflow >> secretsmanager
grafana >> prometheus
superset >> postgresql
prometheus >> airflow
|
[
"diagrams.onprem.workflow.Airflow",
"diagrams.onprem.monitoring.Prometheus",
"diagrams.aws.security.SecretsManager",
"custom.Dbt",
"custom.Superset",
"diagrams.aws.storage.S3",
"custom.Airbyte",
"diagrams.Diagram",
"diagrams.onprem.database.Postgresql",
"diagrams.onprem.monitoring.Grafana"
] |
[((315, 465), 'diagrams.Diagram', 'Diagram', ([], {'filename': '"""kind-data-platform"""', 'show': '(False)', 'graph_attr': "{'bgcolor': '#272935', 'dpi': '48.0', 'pad': '0.5'}", 'edge_attr': "{'color': '#eff0ea'}"}), "(filename='kind-data-platform', show=False, graph_attr={'bgcolor':\n '#272935', 'dpi': '48.0', 'pad': '0.5'}, edge_attr={'color': '#eff0ea'})\n", (322, 465), False, 'from diagrams import Diagram\n'), ((574, 583), 'custom.Airbyte', 'Airbyte', ([], {}), '()\n', (581, 583), False, 'from custom import Airbyte, Dbt, Superset\n'), ((598, 607), 'diagrams.onprem.workflow.Airflow', 'Airflow', ([], {}), '()\n', (605, 607), False, 'from diagrams.onprem.workflow import Airflow\n'), ((618, 623), 'custom.Dbt', 'Dbt', ([], {}), '()\n', (621, 623), False, 'from custom import Airbyte, Dbt, Superset\n'), ((641, 653), 'diagrams.onprem.database.Postgresql', 'Postgresql', ([], {}), '()\n', (651, 653), False, 'from diagrams.onprem.database import Postgresql\n'), ((668, 677), 'diagrams.onprem.monitoring.Grafana', 'Grafana', ([], {}), '()\n', (675, 677), False, 'from diagrams.onprem.monitoring import Grafana, Prometheus\n'), ((695, 707), 'diagrams.onprem.monitoring.Prometheus', 'Prometheus', ([], {}), '()\n', (705, 707), False, 'from diagrams.onprem.monitoring import Grafana, Prometheus\n'), ((723, 733), 'custom.Superset', 'Superset', ([], {}), '()\n', (731, 733), False, 'from custom import Airbyte, Dbt, Superset\n'), ((743, 747), 'diagrams.aws.storage.S3', 'S3', ([], {}), '()\n', (745, 747), False, 'from diagrams.aws.storage import S3\n'), ((769, 785), 'diagrams.aws.security.SecretsManager', 'SecretsManager', ([], {}), '()\n', (783, 785), False, 'from diagrams.aws.security import SecretsManager\n')]
|
#system modules
from os import path
# dynamic load modules
from os import listdir
from imp import load_source
# local modules
from mat.utils.utils import Utils, Log
from mat.utils import settings
class CordovaAnalysis(object):
LATEST_VERSION_URL = 'https://dist.apache.org/repos/dist/release/cordova/platforms/'
LATEST_VERSION = {
'ios': '4.4.0',
'android': '6.2.3',
}
LOCATIONS = {
'config': ['config.xml', 'res/xml/config.xml'],
'cordova': ['cordova.js', 'assets/www/cordova.js'],
'www': ['www', 'assets/www']
}
def __init__(self, root=None, data=None, atype=None, config=None, cordova=None):
self.ASSESSMENT_TYPE = atype
self.ROOT = root
self.CONFIG_FILE = config
self.CORDOVA_FILE = cordova
if self.ROOT and not self.CONFIG_FILE:
for location in CordovaAnalysis.LOCATIONS['config']:
if path.exists('{root}/{loc}'.format(root=self.ROOT, loc=location)):
self.CONFIG_FILE = '{root}/{loc}'.format(root=self.ROOT, loc=location)
break
if self.ROOT and not self.CORDOVA_FILE:
for location in CordovaAnalysis.LOCATIONS['cordova']:
if path.exists('{root}/{loc}'.format(root=self.ROOT, loc=location)):
self.CORDOVA_FILE = '{root}/{loc}'.format(root=self.ROOT, loc=location)
break
if not self.CORDOVA_FILE and data:
self.CORDOVA_FILE = Utils.run('find {data} -name cordova.js'.format(data=data))[0].split('\n')[0].strip()
if not self.CONFIG_FILE and self.ROOT:
self.CONFIG_FILE = Utils.run('find {root} -name config.xml'.format(root=self.ROOT))[0].split('\n')[0].strip()
Log.d('Root: {fpath}'.format(fpath=self.ROOT))
Log.d('cordova.js: {fpath}'.format(fpath=self.CORDOVA_FILE))
Log.d('config.xml: {fpath}'.format(fpath=self.CONFIG_FILE))
def found(self):
return self.CONFIG_FILE or self.CORDOVA_FILE
def prepare_analysis(self):
Log.w('Getting latest cordova versions')
import urllib2
response = urllib2.urlopen(CordovaAnalysis.LATEST_VERSION_URL)
html = response.read()
for os in CordovaAnalysis.LATEST_VERSION:
self.LATEST_VERSION[os] = html.split('-{os}-'.format(os=os))[1].rsplit('.', 1)[0]
def get_custom_modules(self, modules_types=['modules/cordova/static', 'modules/cordova/dynamic']):
found_modules = []
for module_type in modules_types:
modules = [m.replace('.py', '') for m in listdir('{local}/{type}'.format(local=settings.LOCAL_SETTINGS, type=module_type)) if not m.endswith('.pyc')]
for m in modules:
found_modules += [load_source(m, '{local}/{type}/{check}.py'.format(local=settings.LOCAL_SETTINGS, type=module_type, check=m))]
return found_modules
def _run_custom_modules(self, module_type):
issues = []
modules = self.get_custom_modules([module_type])
for m in modules:
Log.d('Running Static {check}'.format(check=m.__name__))
issue = m.Issue(self)
if issue.dependencies():
issue.run()
else:
Log.e('Error: Dependencies not met.')
if issue.REPORT:
issues += [issue]
return issues
def _run_custom_static_analysis(self):
module_type = 'modules/cordova/static'
return self._run_custom_modules(module_type)
def run_analysis(self):
Log.w('Starting Analysis.')
self.prepare_analysis()
if not self.CONFIG_FILE and not self.CORDOVA_FILE:
Log.w('No cordova files found.')
return []
issues = []
import mat.modules.cordova.static
static_checks = [m.replace('.py', '') for m in listdir(mat.modules.cordova.static.__path__[0]) if not m.endswith('.pyc') and not m.startswith('__')]
for check in static_checks:
Log.d('Running Static {check}'.format(check=check))
check_module = __import__('mat.modules.cordova.static.{check}'.format(check=check), fromlist=['Issue'])
issue = check_module.Issue(self)
if issue.dependencies():
issue.run()
else:
Log.e('Error: Dependencies not met.')
if issue.REPORT:
issues += [issue]
issues += self._run_custom_static_analysis()
return issues
|
[
"os.listdir",
"mat.utils.utils.Log.e",
"urllib2.urlopen",
"mat.utils.utils.Log.w"
] |
[((2111, 2151), 'mat.utils.utils.Log.w', 'Log.w', (['"""Getting latest cordova versions"""'], {}), "('Getting latest cordova versions')\n", (2116, 2151), False, 'from mat.utils.utils import Utils, Log\n'), ((2195, 2246), 'urllib2.urlopen', 'urllib2.urlopen', (['CordovaAnalysis.LATEST_VERSION_URL'], {}), '(CordovaAnalysis.LATEST_VERSION_URL)\n', (2210, 2246), False, 'import urllib2\n'), ((3619, 3646), 'mat.utils.utils.Log.w', 'Log.w', (['"""Starting Analysis."""'], {}), "('Starting Analysis.')\n", (3624, 3646), False, 'from mat.utils.utils import Utils, Log\n'), ((3751, 3783), 'mat.utils.utils.Log.w', 'Log.w', (['"""No cordova files found."""'], {}), "('No cordova files found.')\n", (3756, 3783), False, 'from mat.utils.utils import Utils, Log\n'), ((3314, 3351), 'mat.utils.utils.Log.e', 'Log.e', (['"""Error: Dependencies not met."""'], {}), "('Error: Dependencies not met.')\n", (3319, 3351), False, 'from mat.utils.utils import Utils, Log\n'), ((3925, 3972), 'os.listdir', 'listdir', (['mat.modules.cordova.static.__path__[0]'], {}), '(mat.modules.cordova.static.__path__[0])\n', (3932, 3972), False, 'from os import listdir\n'), ((4388, 4425), 'mat.utils.utils.Log.e', 'Log.e', (['"""Error: Dependencies not met."""'], {}), "('Error: Dependencies not met.')\n", (4393, 4425), False, 'from mat.utils.utils import Utils, Log\n')]
|
#!/usr/bin/python
#
# Copyright (c) 2012 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import copy
import heapq
import itertools
def _safe_coerce(cls):
def _do_safe_coerce(value):
if isinstance(value, (str, dict)):
return cls((value,))
try:
return cls(value)
except TypeError:
return cls((value,))
_do_safe_coerce.__doc__ = """Takes a value which be a single object, or an an iterable
and returns the content wrapped in a {0}. In the case of strings,
and dictionaries the original string object is returned in a {0},
and not as a {0} of chars. A TypeError is raised if this is not
possible (e.g. dict in frozenset).""".format(
cls.__name__
)
_do_safe_coerce.__name__ = "safe_coerce_to_{0}".format(cls.__name__)
return _do_safe_coerce
safe_coerce_to_tuple = _safe_coerce(tuple)
safe_coerce_to_frozenset = _safe_coerce(frozenset)
def try_cast(value, cast_to):
try:
return cast_to(value)
except (ValueError, TypeError):
return value
def set_in(dictionary, keys, value):
"""Traverses a set of nested dictionaries using the given keys,
and assigns the specified value to the inner-most
dictionary (obtained from the second-to-last key), using
the last key in keys. Thus calling set_in is(d, [X, Y, Z], v)
is equivalent to calling
d.setdefault(X, {}).setdefault(Y, {})[Z] = v
Behavior on non-dictionaries is undefined."""
keys = list(keys)
if not keys:
raise ValueError("No keys passed to 'set_in'!")
for key in keys[:-1]:
try:
dictionary = dictionary[key]
except KeyError:
new_dict = {}
dictionary[key] = new_dict
dictionary = new_dict
dictionary[keys[-1]] = value
def get_in(dictionary, keys, default=None):
"""Traverses a set of nested dictionaries using the keys in
kws, and returns the value assigned to the final keyword
in the innermost dictionary. Calling get_in(d, [X, Y])
is equivalent to calling d.get(X).get(Y), with the
difference that any missing keys causes the default value
to be returned.
Behavior on non-dictgionaries is undefined."""
keys = list(keys)
for key in keys[:-1]:
try:
dictionary = dictionary[key]
except KeyError:
return default
return dictionary.get(keys[-1], default)
def split_before(iterable, pred):
"""Takes a sequence and splits it before every value where pred(v) is true.
Thus split_before(range(10), key = lambda x: x % 2 == 0) would return the
sequence [[1], [2,3], [4,5], [6,7], [7,8], [9]]"""
items = []
for value in iterable:
if pred(value) and items:
yield items
items = []
items.append(value)
if items:
yield items
# Copied from the Python 'itertools' module documentation
def grouper(size, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * size
return itertools.zip_longest(fillvalue=fillvalue, *args)
def group_by_pred(pred, iterable):
"""Splits items in a sequence into two lists, one containing
items matching the predicate, and another containing those that
do not."""
is_true, is_false = [], []
for item in iterable:
if pred(item):
is_true.append(item)
else:
is_false.append(item)
return is_true, is_false
def fragment(size, lstlike):
"""Faster alternative to grouper for lists/strings."""
return (lstlike[i : i + size] for i in range(0, len(lstlike), size))
def cumsum(lst, initial=0):
"""Yields the cummulative sums of the values in a
iterable, starting with the specified initial value."""
for item in lst:
initial += item
yield initial
def fill_dict(destination, source):
"""Returns a copy of 'destination' after setting missing key-
pairs with copies of those of 'source' recursively."""
if not isinstance(destination, dict) or not isinstance(source, dict):
raise TypeError("Non-dictionary parameters in 'fill_dict'")
def _fill_dict(cur_dest, cur_src):
for key in cur_src:
if isinstance(cur_src[key], dict) and isinstance(cur_dest.get(key), dict):
_fill_dict(cur_dest[key], cur_src[key])
elif key not in cur_dest:
cur_dest[key] = cur_src[key]
return cur_dest
return _fill_dict(copy.deepcopy(destination), copy.deepcopy(source))
def chain_sorted(*sequences, **kwargs):
"""Chains together sorted sequences, and yields the contents
in the same order, such that the result is also a sorted sequence.
The function accepts a 'key'-function keyword, following sort().
chain_sorted is intended for a few long sequences, and not many short
sequences. Behavior is undefined if the sequences are not sorted.
Example:
>>> tuple(chain_sorted((1, 3, 5), (0, 2, 4)))
(0, 1, 2, 3, 4, 5)
"""
key = kwargs.pop("key", None)
if kwargs:
raise TypeError(
"chain_sorted expected keyword 'key', got %r" % (", ".join(kwargs))
)
iterators = []
for index, sequence_iter in enumerate(map(iter, sequences)):
try:
current = next(sequence_iter)
key_value = current if key is None else key(current)
iterators.append((key_value, index, current, sequence_iter))
except StopIteration:
pass
heapq.heapify(iterators)
_len, _heappop, _heapreplace = len, heapq.heappop, heapq.heapreplace
while _len(iterators) > 1:
last_key_value, index, current, sequence_iter = iterators[0]
yield current
for current in sequence_iter:
key_value = current if key is None else key(current)
# Optimization for runs of repeated values
if key_value != last_key_value:
_heapreplace(iterators, (key_value, index, current, sequence_iter))
break
else:
yield current
else:
# No items remaining in top iterator
_heappop(iterators)
if _len(iterators) == 1:
_, _, current, sequence_iter = iterators[0]
yield current
for current in sequence_iter:
yield current
class Immutable:
"""Mixin implementing a immutable class; member variables are specified in
the init function, cannot be changed afterwards; note that this does not
prevent changes to the member variables themselves (if not immutable)."""
def __init__(self, **kwargs):
object.__init__(self)
for (key, value) in kwargs.items():
object.__setattr__(self, key, value)
def __setattr__(self, _name, _value):
raise NotImplementedError("Object is immutable")
def __delattr__(self, _name):
raise NotImplementedError("Object is immutable")
class TotallyOrdered:
"""Mixin implementing a rich-comparison interface, provided
that the subclass implements the less-than operator (__lt__).
The __lt__ function should return NotImplemented if the other
object is not the same type.
The implementation assumes total order:
http://en.wikipedia.org/wiki/Total_order
"""
def __lt__(self, other):
raise NotImplementedError("__lt__ must be implemented!")
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return not ((self < other) or (other < self))
def __ne__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return not (self == other)
def __le__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return not (other < self)
def __ge__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return not (self < other)
def __gt__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return other < self
# Shut up warning; if hashable, then the subclass will have
# to implement the __hash__ member function.
__hash__ = None
|
[
"copy.deepcopy",
"itertools.zip_longest",
"heapq.heapify"
] |
[((4156, 4205), 'itertools.zip_longest', 'itertools.zip_longest', (['*args'], {'fillvalue': 'fillvalue'}), '(*args, fillvalue=fillvalue)\n', (4177, 4205), False, 'import itertools\n'), ((6639, 6663), 'heapq.heapify', 'heapq.heapify', (['iterators'], {}), '(iterators)\n', (6652, 6663), False, 'import heapq\n'), ((5602, 5628), 'copy.deepcopy', 'copy.deepcopy', (['destination'], {}), '(destination)\n', (5615, 5628), False, 'import copy\n'), ((5630, 5651), 'copy.deepcopy', 'copy.deepcopy', (['source'], {}), '(source)\n', (5643, 5651), False, 'import copy\n')]
|
import unittest
from vessel.preprocess import FileFeeder
from vessel.preprocess import DICOMFileIterator
class TestFileFeeder(unittest.TestCase):
def test_link_building(self):
feeder = FileFeeder('data')
self.assertEqual(feeder._patient_contours['SCD0000401'], 'SC-HF-I-5')
print(len(feeder))
def test_patient_files(self):
feeder = FileFeeder('data')
self.assertEqual(len(feeder._patient_files['SCD0000401']['dicoms']), 220)
self.assertEqual(len(feeder._patient_files['SCD0000401']['i_contours']), 18)
self.assertEqual(len(feeder._patient_files['SCD0000401']['o_contours']), 9)
def test_smiple_iterator(self):
feeder = FileFeeder('data')
for image, mask in feeder:
if mask is None:
y = 'None'
else:
y = mask.shape
print(image.shape, y)
def test_DICOMFileIterator(self):
feeder = FileFeeder('data')
itert = DICOMFileIterator(x=feeder.files(), batch_size=8)
print("Total sample: {}, batches: {}".format(len(feeder), len(itert)))
# test for generate 20 batch
n = 20
while(n>0):
batch_x, batch_y = next(itert)
print(batch_x.shape, batch_y.shape)
n -= 1
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"vessel.preprocess.FileFeeder"
] |
[((1330, 1345), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1343, 1345), False, 'import unittest\n'), ((198, 216), 'vessel.preprocess.FileFeeder', 'FileFeeder', (['"""data"""'], {}), "('data')\n", (208, 216), False, 'from vessel.preprocess import FileFeeder\n'), ((378, 396), 'vessel.preprocess.FileFeeder', 'FileFeeder', (['"""data"""'], {}), "('data')\n", (388, 396), False, 'from vessel.preprocess import FileFeeder\n'), ((702, 720), 'vessel.preprocess.FileFeeder', 'FileFeeder', (['"""data"""'], {}), "('data')\n", (712, 720), False, 'from vessel.preprocess import FileFeeder\n'), ((951, 969), 'vessel.preprocess.FileFeeder', 'FileFeeder', (['"""data"""'], {}), "('data')\n", (961, 969), False, 'from vessel.preprocess import FileFeeder\n')]
|
import logging
import os, re
from threading import Thread
from .utils import boolstr, walkfiles
from .items import FileItem
from .processors import ProcessSequence
from .providers import BaseProvider, register_producer, register_property
log = logging.getLogger(__name__)
class BaseProducer(BaseProvider, Thread):
"""
All Consumer objects classes inherit from this
"""
process_sequence = None
def __init__(self, thread=False, **kwargs):
Thread.__init__(self)
BaseProvider.__init__(self, **kwargs)
self.is_thread = boolstr(thread)
self._process_sequence = ProcessSequence()
@property
def process_sequence(self):
return self._process_sequence
def add_process(self, processor):
self._process_sequence.add_process(processor)
def get_items(self):
"""
Override this method to write your own producer.
Return a list of produced items.
"""
return []
def run(self):
self.process_sequence.run(self.generator)
def list(self):
self.process_sequence.list()
@register_property('file_name', 'File name to read', str, True, "")
@register_producer('read', 'Reads file')
class Read(BaseProducer):
def __init__(self, **kwargs):
super(Read, self).__init__(**kwargs)
def generator(self):
pass
@register_property('basedir', 'Directory to monitor', str, True, "")
@register_property('recursive', 'Is monitor recursive', boolstr, False, "True")
@register_property('filter', 'RegEx filter to filenames', str, False, ".*")
@register_property('mtime', 'Filter files with modified TS', int, False, "0")
@register_property('atime', 'Filter files with accessed TS', int, False, "0")
@register_property('ctime', 'Filter files with creation TS', int, False, "0")
@register_producer('dir_mon', 'Monitors directory changes between runs')
class DirMon(BaseProducer):
"""
Consumer that recursively walks a directory structure
and collects files do deliver to a process sequence
"""
def __init__(self, **kwargs):
super(DirMon, self).__init__(**kwargs)
def generator(self):
if not os.path.exists(self.basedir):
log.error("Path does not exist {0}".format(self.basedir))
yield []
if not self.recursive:
level = 0
else:
level = -1
for file_name in walkfiles(self.basedir, self.filter, level):
# filter file name
if FileItem.check_mtime(file_name, self.mtime) and \
FileItem.check_atime(file_name, self.atime) and \
FileItem.check_ctime(file_name, self.ctime):
yield FileItem(file_name, self.basedir)
def __repr__(self):
return "Base Dir:{0}, Recursive: {1}, Filter: {2}".format(self.basedir, self.recursive, self.filter)
|
[
"threading.Thread.__init__",
"os.path.exists",
"logging.getLogger"
] |
[((245, 272), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (262, 272), False, 'import logging\n'), ((473, 494), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (488, 494), False, 'from threading import Thread\n'), ((2191, 2219), 'os.path.exists', 'os.path.exists', (['self.basedir'], {}), '(self.basedir)\n', (2205, 2219), False, 'import os, re\n')]
|
import asyncio
import logging
from functools import update_wrapper
import inject
from click import ClickException
from mycloud.drive.filesync.progress import ProgressTracker
from mycloud.mycloudapi import MyCloudRequestExecutor
from mycloud.mycloudapi.auth import AuthMode, MyCloudAuthenticator
def authenticated(func):
def wrapper(*args, **kwargs):
@inject.params(mycloud_authenticator=MyCloudAuthenticator)
def inject_wrap(mycloud_authenticator: MyCloudAuthenticator):
logging.debug(
'Checking whether user can be authenticated for given command.')
if mycloud_authenticator.auth_mode == None:
raise ClickException(
'Run "mycloud auth login" to authenticate yourself first, or specify a token')
else:
func(*args, **kwargs)
inject_wrap()
return update_wrapper(wrapper, func)
def async_click(func):
func = asyncio.coroutine(func)
def wrapper(*args, **kwargs):
loop = asyncio.get_event_loop()
logging.debug('Running asynchronous click action...')
return loop.run_until_complete(func(*args, **kwargs))
return update_wrapper(wrapper, func)
|
[
"logging.debug",
"asyncio.get_event_loop",
"asyncio.coroutine",
"click.ClickException",
"functools.update_wrapper",
"inject.params"
] |
[((886, 915), 'functools.update_wrapper', 'update_wrapper', (['wrapper', 'func'], {}), '(wrapper, func)\n', (900, 915), False, 'from functools import update_wrapper\n'), ((952, 975), 'asyncio.coroutine', 'asyncio.coroutine', (['func'], {}), '(func)\n', (969, 975), False, 'import asyncio\n'), ((1186, 1215), 'functools.update_wrapper', 'update_wrapper', (['wrapper', 'func'], {}), '(wrapper, func)\n', (1200, 1215), False, 'from functools import update_wrapper\n'), ((367, 424), 'inject.params', 'inject.params', ([], {'mycloud_authenticator': 'MyCloudAuthenticator'}), '(mycloud_authenticator=MyCloudAuthenticator)\n', (380, 424), False, 'import inject\n'), ((1026, 1050), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1048, 1050), False, 'import asyncio\n'), ((1059, 1112), 'logging.debug', 'logging.debug', (['"""Running asynchronous click action..."""'], {}), "('Running asynchronous click action...')\n", (1072, 1112), False, 'import logging\n'), ((507, 585), 'logging.debug', 'logging.debug', (['"""Checking whether user can be authenticated for given command."""'], {}), "('Checking whether user can be authenticated for given command.')\n", (520, 585), False, 'import logging\n'), ((681, 784), 'click.ClickException', 'ClickException', (['"""Run "mycloud auth login" to authenticate yourself first, or specify a token"""'], {}), '(\n \'Run "mycloud auth login" to authenticate yourself first, or specify a token\'\n )\n', (695, 784), False, 'from click import ClickException\n')]
|
"""
Item 50: Annotate Class Attributes with __set_name__
"""
#!/usr/bin/env PYTHONHASHSEED=1234 python3
# Reproduce book environment
import random
random.seed(1234)
import logging
from pprint import pprint
from sys import stdout as STDOUT
# Write all output to a temporary directory
import atexit
import gc
import io
import os
import tempfile
TEST_DIR = tempfile.TemporaryDirectory()
atexit.register(TEST_DIR.cleanup)
# Make sure Windows processes exit cleanly
OLD_CWD = os.getcwd()
atexit.register(lambda: os.chdir(OLD_CWD))
os.chdir(TEST_DIR.name)
def close_open_files():
everything = gc.get_objects()
for obj in everything:
if isinstance(obj, io.IOBase):
obj.close()
atexit.register(close_open_files)
"""
A useful feature of metaclasses is that metaclasses allow one to modify or annotate properties of a class after
the class is defined but before it is used. For this approach we used descriptors. A descriptor class can
provide __get__ and __set__ methods.
Metaclasses allow you to modify class attributes before the class is fully defined.
Descriptors and metaclasses allow for declarative behavorior and runtime inttrospection.
Define __set_name__ on your descriptor classs to allow them to take into account their surrounding
class and property names.
Avoid memory leaks with the weakref built-in module by having descriptors store data they manipulation
directly within a class's instance dictionary.
"""
# Example 1:
class Field:
def __init__(self, name):
self.name = name
self.internal_name = '_' + self.name
def __get__(self, instance, instance_type):
if instance is None:
return self
return getattr(instance, self.internal_name, '')
def __set__(self, instance, value):
setattr(instance, self.internal_name, value)
# Example 2
class Customer:
# Class attributes
first_name = Field('first_name')
last_name = Field('last_name')
prefix = Field('prefix')
suffix = Field('suffix')
# Example 3
cust = Customer()
print(f'Before: {cust.first_name!r} {cust.__dict__}')
cust.first_name = 'Euclid'
print(f'After: {cust.first_name!r} {cust.__dict__}')
# Example 4
class Customer:
# Left side is redundant with right side
first_name = Field('first_name')
last_name = Field('last_name')
prefix = Field('prefix')
suffix = Field('suffix')
print(20*'*')
# Example 5: The above is redundant since the name of the class is given in the name of the field (left-side).
"""
We use a metaclass to avoid this problem. The metaclass will allow us to hook the class statement directly.
"""
class Meta(type):
def __new__(meta, name, bases, class_dict):
for key, value in class_dict.items():
if isinstance(value, Field):
value.name = key
value.internal_name = '_' + key
cls = type.__new__(meta, name, bases, class_dict)
return cls
# Example 6: Database rows sholuld inherit from Meta
class DatabaseRow(metaclass=Meta):
pass
# Example 7: We adjust the Field class so that the name can be assigned when a field instance is created
class Field:
def __init__(self):
# These will be assigned by the metaclass.
self.name = None
self.internal_name = None
def __get__(self, instance, instance_type):
if instance is None:
return self
return getattr(instance, self.internal_name, '')
def __set__(self, instance, value):
setattr(instance, self.internal_name, value)
# Example 8
class BetterCustomer(DatabaseRow):
first_name = Field()
last_name = Field()
prefix = Field()
suffix = Field()
# Example 9
cust = BetterCustomer()
print(f'Before: {cust.first_name!r} {cust.__dict__}')
cust.first_name = 'Euler'
print(f'After: {cust.first_name!r} {cust.__dict__}')
# Example 10: Must inherit from DatabaseRow or code will break.
try:
class BrokenCustomer:
first_name = Field()
last_name = Field()
prefix = Field()
suffix = Field()
cust = BrokenCustomer()
cust.first_name = 'Mersenne'
except:
logging.exception('Expected')
else:
assert False
# Example 11: The solution is to use the __set_name__ special method for descriptors.
class Field:
def __init__(self):
self.name = None
self.internal_name = None
def __set_name__(self, owner, name):
# Called on class creation for each descriptor
self.name = name
self.internal_name = '_' + name
def __get__(self, instance, instance_type):
if instance is None:
return self
return getattr(instance, self.internal_name, '')
def __set__(self, instance, value):
setattr(instance, self.internal_name, value)
# Example 12: Now it works with having to inherit from a specific parent class or having to use a metaclass.
class FixedCustomer:
first_name = Field()
last_name = Field()
prefix = Field()
suffix = Field()
cust = FixedCustomer()
print(f'Before: {cust.first_name!r} {cust.__dict__}')
cust.first_name = 'Mersenne'
print(f'After: {cust.first_name!r} {cust.__dict__}')
|
[
"atexit.register",
"logging.exception",
"tempfile.TemporaryDirectory",
"os.getcwd",
"gc.get_objects",
"random.seed",
"os.chdir"
] |
[((150, 167), 'random.seed', 'random.seed', (['(1234)'], {}), '(1234)\n', (161, 167), False, 'import random\n'), ((360, 389), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (387, 389), False, 'import tempfile\n'), ((390, 423), 'atexit.register', 'atexit.register', (['TEST_DIR.cleanup'], {}), '(TEST_DIR.cleanup)\n', (405, 423), False, 'import atexit\n'), ((478, 489), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (487, 489), False, 'import os\n'), ((533, 556), 'os.chdir', 'os.chdir', (['TEST_DIR.name'], {}), '(TEST_DIR.name)\n', (541, 556), False, 'import os\n'), ((707, 740), 'atexit.register', 'atexit.register', (['close_open_files'], {}), '(close_open_files)\n', (722, 740), False, 'import atexit\n'), ((599, 615), 'gc.get_objects', 'gc.get_objects', ([], {}), '()\n', (613, 615), False, 'import gc\n'), ((514, 531), 'os.chdir', 'os.chdir', (['OLD_CWD'], {}), '(OLD_CWD)\n', (522, 531), False, 'import os\n'), ((4162, 4191), 'logging.exception', 'logging.exception', (['"""Expected"""'], {}), "('Expected')\n", (4179, 4191), False, 'import logging\n')]
|