code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from abc import ABC, abstractmethod
import numpy as np
from .activations import Identity
class Layer(ABC):
def __init__(self, size, input_shape=(None, None)):
'''
Params
------
size : int
number of neurons (size) of output layer
input_shape : (int, int)
(number of input features, number of samples) *only required for first layer*
'''
self.size = size
self.IN, self.n = self.input_shape = input_shape
def compile(self, input_shape, output_shape):
'''
Notes
-----
IN - number of neurons (size) of input layer
OUT - number of neurons (size) of output layer
n - number of samples
'''
assert len(input_shape)== 2
IN, n = input_shape
self.input_shape = input_shape
self.output_shape = output_shape
self.IN = IN
self.OUT = output_shape[0]
self.n = n
@abstractmethod
def forward(self, X):
pass
@abstractmethod
def backward(self, dA):
pass
class Dense(Layer):
def __init__(self, size, input_shape=(None, None), activation=Identity):
super().__init__(size, input_shape)
self.activation = activation()
def compile(self, input_shape, output_shape=(1,)):
''' W is the weights matrix | [in x out]
Z is Sum(w_i * x_i) | [out x n]
A is activation.apply(Z)| [out x n]
'''
super().compile(input_shape, output_shape)
#self.W = np.random.rand(self.OUT, self.IN)
self.W = np.random.randn(self.OUT, self.IN) * np.sqrt(2 / (self.IN + self.OUT))
# Important note: for tanh: 1/self.IN, Relu: 2/self.IN. Instead, I'm using new theory
self.alpha = 0.1 # Place holder for optimizer
def forward(self, X):
'''Applies forward propagation to inputs X, i.e.
self.Z = W * X
self.A = a(Z)
'''
assert X.ndim == 2 and X.shape[0] == self.input_shape[0]
self.X = X
self.Z = np.dot(self.W, self.X)
self.A = self.activation.apply(self.Z)
assert self.A.shape == self.Z.shape # Sanity check
return self.A
def backward(self, dA):
'''Given derivatives of next layer, adjust the weights
Math:
dZ = dA .* a'(Z), .* - element wise multiplication
dW = dZ dot X.T
dX = dW.T dot dZ
Params:
dA := partial derivative dJ / dA
Notes:
dX is dA of left layer
'''
assert dA.shape == self.Z.shape
dZ = dA * self.activation.derivative(self.Z, A=self.A)
assert dZ.shape == dA.shape
dW = np.dot(dZ, self.X.transpose()) / self.n
assert dW.shape == self.W.shape
dX = np.dot(self.W.transpose(), dZ)
self.W = self.W - self.alpha * dW
return dX, dW
class Lambda(Layer):
'''Gotta think about this one'''
def __init__(self, function):
self.function = function
def compile(self):
pass
def forward(self, X):
return
def backward(self, dA):
pass | [
"numpy.sqrt",
"numpy.dot",
"numpy.random.randn"
] | [((2107, 2129), 'numpy.dot', 'np.dot', (['self.W', 'self.X'], {}), '(self.W, self.X)\n', (2113, 2129), True, 'import numpy as np\n'), ((1633, 1667), 'numpy.random.randn', 'np.random.randn', (['self.OUT', 'self.IN'], {}), '(self.OUT, self.IN)\n', (1648, 1667), True, 'import numpy as np\n'), ((1670, 1703), 'numpy.sqrt', 'np.sqrt', (['(2 / (self.IN + self.OUT))'], {}), '(2 / (self.IN + self.OUT))\n', (1677, 1703), True, 'import numpy as np\n')] |
from transliterate import translit
from parameterized import parameterized
from cinemanio.core.tests.base import BaseTestCase
class TranslitTest(BaseTestCase):
@parameterized.expand([
('Ирония судьбы, или с легким паром!', "Ironiya sudby, ili s legkim parom!"),
('<NAME>', '<NAME>'),
('<NAME>', '<NAME>'),
('<NAME>', '<NAME>'),
('Илья', 'Ilya'),
('<NAME>', '<NAME>'),
('<NAME>', '<NAME>'),
('<NAME>', '<NAME>'),
('<NAME>', '<NAME>'),
('<NAME>', '<NAME>'),
('<NAME>', '<NAME>'),
])
def test_ru_transliteration(self, text, result):
self.assertEqual(translit(text, 'ru', reversed=True), result)
| [
"transliterate.translit",
"parameterized.parameterized.expand"
] | [((168, 501), 'parameterized.parameterized.expand', 'parameterized.expand', (["[('Ирония судьбы, или с легким паром!',\n 'Ironiya sudby, ili s legkim parom!'), ('<NAME>', '<NAME>'), ('<NAME>',\n '<NAME>'), ('<NAME>', '<NAME>'), ('Илья', 'Ilya'), ('<NAME>', '<NAME>'),\n ('<NAME>', '<NAME>'), ('<NAME>', '<NAME>'), ('<NAME>', '<NAME>'), (\n '<NAME>', '<NAME>'), ('<NAME>', '<NAME>')]"], {}), "([('Ирония судьбы, или с легким паром!',\n 'Ironiya sudby, ili s legkim parom!'), ('<NAME>', '<NAME>'), ('<NAME>',\n '<NAME>'), ('<NAME>', '<NAME>'), ('Илья', 'Ilya'), ('<NAME>', '<NAME>'),\n ('<NAME>', '<NAME>'), ('<NAME>', '<NAME>'), ('<NAME>', '<NAME>'), (\n '<NAME>', '<NAME>'), ('<NAME>', '<NAME>')])\n", (188, 501), False, 'from parameterized import parameterized\n'), ((658, 693), 'transliterate.translit', 'translit', (['text', '"""ru"""'], {'reversed': '(True)'}), "(text, 'ru', reversed=True)\n", (666, 693), False, 'from transliterate import translit\n')] |
__copyright__ = """
Copyright (c) 2018 Uber Technologies, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import numbers
import threading
from queue import Queue
import numpy as np
import math
import pickle
import os
class SharedNoiseTable(object):
def __init__(self):
import ctypes, multiprocessing
seed = 123
count = 250000000 # 1 gigabyte of 32-bit numbers. Will actually sample 2 gigabytes below.
print('Sampling {} random numbers with seed {}'.format(count, seed))
self._shared_mem = multiprocessing.Array(ctypes.c_float, count)
self.noise = np.ctypeslib.as_array(self._shared_mem.get_obj())
assert self.noise.dtype == np.float32
sharednoisetablefile = "/tmp/sharednoisetable"
if False: #os.path.isfile(sharednoisetablefile):
print("Loading shared noise from {}".format(sharednoisetablefile))
with open(sharednoisetablefile, 'rb') as fh:
self.noise[:] = pickle.load(fh)
else:
self.noise[:] = np.random.RandomState(seed).randn(count) # 64-bit to 32-bit conversion here
print('Sampled {} bytes'.format(self.noise.size * 4))
with open(sharednoisetablefile, 'wb') as fh:
print("Saving shared noise table to {}".format(sharednoisetablefile))
pickle.dump(self.noise, fh)
def get(self, i, dim):
return self.noise[i:i + dim]
def sample_index(self, stream, dim):
return stream.randint(0, len(self.noise) - dim + 1)
class ConstantSchedule(object):
def __init__(self, value):
self._value = value
def value(self, **kwargs):
return self._value
class LinearSchedule(object):
def __init__(self, schedule, final_p, initial_p, field):
self.schedule = schedule
self.field = field
self.final_p = final_p
self.initial_p = initial_p
def value(self, **kwargs):
assert self.field in kwargs, "Argument {} not provided to scheduler Available: {}".format(self.field, kwargs)
fraction = min(float(kwargs[self.field]) / self.schedule, 1.0)
return self.initial_p + fraction * (self.final_p - self.initial_p)
class ExponentialSchedule(object):
def __init__(self, initial_p, final_p, schedule, field):
self.initial_p = initial_p
self.final_p = final_p
self.schedule = schedule
self.field = field
self.linear = LinearSchedule(
initial_p=math.log(self.initial_p),
final_p=math.log(self.final_p),
schedule=self.schedule,
field=self.field)
def value(self, **kwargs):
return math.exp(self.linear(**kwargs))
def make_schedule(args):
if isinstance(args, numbers.Number):
return ConstantSchedule(args)
else:
return globals()[args['type']](**{key: value for key, value in args.items() if key != 'type'})
| [
"pickle.dump",
"multiprocessing.Array",
"pickle.load",
"math.log",
"numpy.random.RandomState"
] | [((1518, 1562), 'multiprocessing.Array', 'multiprocessing.Array', (['ctypes.c_float', 'count'], {}), '(ctypes.c_float, count)\n', (1539, 1562), False, 'import ctypes, multiprocessing\n'), ((1960, 1975), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (1971, 1975), False, 'import pickle\n'), ((2320, 2347), 'pickle.dump', 'pickle.dump', (['self.noise', 'fh'], {}), '(self.noise, fh)\n', (2331, 2347), False, 'import pickle\n'), ((3472, 3496), 'math.log', 'math.log', (['self.initial_p'], {}), '(self.initial_p)\n', (3480, 3496), False, 'import math\n'), ((3522, 3544), 'math.log', 'math.log', (['self.final_p'], {}), '(self.final_p)\n', (3530, 3544), False, 'import math\n'), ((2018, 2045), 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), '(seed)\n', (2039, 2045), True, 'import numpy as np\n')] |
import pandas as pd
if __name__ == '__main__':
target_file = '/home/jarto/work/wine-quality-predictor/b87fe6d2-ab92-4d36-8946-111850523f12/AutoML(1):wine-quality.log'
with open(target_file, 'r') as file_h:
content = file_h.readlines()
preprocessors = []
regressors = []
rescalers = []
idx = 0
for line in content:
if 'preprocessor:__choice__' in line:
preprocessors.append(line.split('Value: ')[1].replace("'", '').strip())
elif 'regressor:__choice__' in line:
regressors.append(line.split('Value: ')[1].replace("'", '').strip())
elif 'rescaling:__choice__' in line:
rescalers.append(line.split('Value: ')[1].replace("'", '').strip())
# just a quick check
df = pd.DataFrame({'preprocessor': preprocessors, 'regressor': regressors, 'rescaling': rescalers})
for idx, row in df.iterrows():
for idx2, row2 in df.iterrows():
if idx == idx2:
continue
if row['preprocessor'] == row2['preprocessor'] and row['regressor'] == row2['regressor'] and row['rescaling'] == row2['rescaling']:
print(row)
print(row2)
| [
"pandas.DataFrame"
] | [((769, 867), 'pandas.DataFrame', 'pd.DataFrame', (["{'preprocessor': preprocessors, 'regressor': regressors, 'rescaling': rescalers\n }"], {}), "({'preprocessor': preprocessors, 'regressor': regressors,\n 'rescaling': rescalers})\n", (781, 867), True, 'import pandas as pd\n')] |
from pprint import pprint
import asyncio
import netdev
import yaml
r1 = {
"device_type": "cisco_ios",
"host": "192.168.100.1",
"username": "cisco",
"password": "<PASSWORD>",
"secret": "cisco",
}
async def send_show(device, commands):
result = {}
if type(commands) == str:
commands = [commands]
try:
async with netdev.create(**device) as ssh:
for cmd in commands:
output = await ssh.send_command(cmd)
result[cmd] = output
return result
except netdev.exceptions.TimeoutError as error:
print(error)
except netdev.exceptions.DisconnectError as error:
print(error)
if __name__ == "__main__":
output = asyncio.run(send_show(r1, ["sh ip int br", "sh clock"]))
pprint(output, width=120)
| [
"netdev.create",
"pprint.pprint"
] | [((796, 821), 'pprint.pprint', 'pprint', (['output'], {'width': '(120)'}), '(output, width=120)\n', (802, 821), False, 'from pprint import pprint\n'), ((363, 386), 'netdev.create', 'netdev.create', ([], {}), '(**device)\n', (376, 386), False, 'import netdev\n')] |
from flask import session
from werkzeug.security import generate_password_hash
from . import database
USERNAME_MAX_LEN = 15
EMAIL_MAX_LEN = 320
PASSWORD_MAX_LEN = 100
DEFAULT_RATING = 1000
MIN_RATING = 1
MAX_RATING = 3000
PUBLIC_USER_ID = 0
DRAW_USER_ID = 0
USER_SESSION = 'user_id'
def get_data_by_id(userid, fields='*'):
"""Retrieves the data of a user with the given id."""
query = f'SELECT {",".join(fields)} FROM users WHERE id = ? LIMIT 1'
query_args = [userid]
return database.sql_exec(database.DATABASE_FILE, query, query_args, False)
def get_data_by_name(username, fields=('*',), case_sensitive=False):
"""Retrieves the data of a user with the given name."""
if not case_sensitive:
query = (f'SELECT {",".join(fields)} FROM users WHERE '
'LOWER(username) = ? LIMIT 1')
query_args = [username.lower()]
else:
query = (f'SELECT {",".join(fields)} FROM users WHERE '
'username = ? LIMIT 1')
query_args = [username]
return database.sql_exec(database.DATABASE_FILE, query, query_args, False)
def logged_in():
"""Checks if the user is logged in."""
return True if session.get(USER_SESSION) else False
def create(username, password, email, rating, notifications):
"""Creates a new user in the database."""
query = ('INSERT INTO users (username, password, email, rating, '
'notifications) VALUES(?, ?, ?, ?, ?)')
query_args = [username, generate_password_hash(password), email, rating,
notifications]
database.sql_exec(database.DATABASE_FILE, query, query_args, False)
def update_settings(user_id, notify):
"""Updates a user's settings."""
query = 'UPDATE users SET notifications = ? WHERE id = ?'
query_args = [notify, user_id]
database.sql_exec(database.DATABASE_FILE, query, query_args, False)
def auto_login(username):
"""Automatically logs in a user given a username."""
query = 'SELECT id FROM users WHERE username=?'
query_args = [username]
user_id = database.sql_exec(database.DATABASE_FILE, query, query_args,
False)
create_session(user_id['id'])
def get_logged_in_id():
"""Gets the id of the currently logged-in user."""
return session[USER_SESSION]
def create_session(userid):
"""Creates a session for the given user id."""
session[USER_SESSION] = userid
def delete_session():
"""Deletes the current user session."""
session.clear()
def set_rating(rating):
"""Sets the user's starting rating."""
if not rating:
rating = DEFAULT_RATING
return rating
try:
rating = int(rating)
except ValueError:
rating = DEFAULT_RATING
return rating
| [
"flask.session.clear",
"werkzeug.security.generate_password_hash",
"flask.session.get"
] | [((2508, 2523), 'flask.session.clear', 'session.clear', ([], {}), '()\n', (2521, 2523), False, 'from flask import session\n'), ((1186, 1211), 'flask.session.get', 'session.get', (['USER_SESSION'], {}), '(USER_SESSION)\n', (1197, 1211), False, 'from flask import session\n'), ((1485, 1517), 'werkzeug.security.generate_password_hash', 'generate_password_hash', (['password'], {}), '(password)\n', (1507, 1517), False, 'from werkzeug.security import generate_password_hash\n')] |
# Copyright 2020 Petuum, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kubernetes_asyncio as kubernetes
from aiohttp import web
import logging
from adaptdl.sched_hints import SCHED_HINTS
from adaptdl_sched.config import get_supervisor_port
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
class Supervisor:
"""
Supervisor provides a simple REST interface for several functionalities.
Currently, it has two endpoints:
1. /hints for jobs to send scheduling hints.
2. /discover for finding the pod IPs of a job.
"""
def __init__(self, port, host='0.0.0.0'):
self._host = host
self._port = port
self._core_api = kubernetes.client.CoreV1Api()
self._objs_api = kubernetes.client.CustomObjectsApi()
async def _handle_healthz(self, request):
# Health check.
return web.Response()
async def _handle_discover(self, request):
# Long-polling endpoint used for discovering pod IPs for a given job.
namespace = request.match_info["namespace"]
name = request.match_info["name"]
group = request.match_info["group"]
timeout = int(request.query.get("timeout", "30"))
pod_ip_list = None
async with kubernetes.watch.Watch() as w:
stream = w.stream(self._core_api.list_namespaced_pod, namespace,
label_selector="adaptdl/job={}".format(name),
field_selector="status.podIP!=",
timeout_seconds=timeout)
async for event in stream:
pod = event["object"]
replicas = int(pod.metadata.annotations["adaptdl/replicas"])
rank = int(pod.metadata.annotations["adaptdl/rank"])
if pod.metadata.annotations["adaptdl/group"] == group:
if pod_ip_list is None:
pod_ip_list = [None] * replicas
pod_ip_list[rank] = pod.status.pod_ip
if all(pod_ip is not None for pod_ip in pod_ip_list):
return web.json_response(pod_ip_list)
return web.json_response(status=408) # Timeout.
async def _handle_report(self, request):
namespace = request.match_info['namespace']
name = request.match_info['name']
hints = await request.json()
# Drop all unrecognized fields. TODO: validate each client-sent field.
hints = {k: hints[k] for k in SCHED_HINTS if k in hints}
# Patch only the train field to avoid conflicts with controller.
patch = {"status": {"train": hints}}
LOG.info("Patch AdaptDLJob %s/%s: %s", namespace, name, patch)
await self._objs_api.patch_namespaced_custom_object_status(
"adaptdl.petuum.com", "v1", namespace, "adaptdljobs", name, patch)
return web.Response()
def run(self):
self.app = web.Application()
self.app.add_routes([
web.get('/healthz', self._handle_healthz),
web.get('/discover/{namespace}/{name}/{group}',
self._handle_discover),
web.put('/hints/{namespace}/{name}', self._handle_report),
])
LOG.info("%s %s", self._host, self._port)
web.run_app(self.app, host=self._host, port=self._port)
if __name__ == "__main__":
logging.basicConfig()
kubernetes.config.load_incluster_config()
supervisor = Supervisor(get_supervisor_port())
supervisor.run()
| [
"logging.getLogger",
"kubernetes_asyncio.client.CustomObjectsApi",
"logging.basicConfig",
"aiohttp.web.run_app",
"kubernetes_asyncio.watch.Watch",
"aiohttp.web.put",
"aiohttp.web.Response",
"aiohttp.web.Application",
"adaptdl_sched.config.get_supervisor_port",
"kubernetes_asyncio.client.CoreV1Api"... | [((784, 811), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (801, 811), False, 'import logging\n'), ((3885, 3906), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (3904, 3906), False, 'import logging\n'), ((3911, 3952), 'kubernetes_asyncio.config.load_incluster_config', 'kubernetes.config.load_incluster_config', ([], {}), '()\n', (3950, 3952), True, 'import kubernetes_asyncio as kubernetes\n'), ((1215, 1244), 'kubernetes_asyncio.client.CoreV1Api', 'kubernetes.client.CoreV1Api', ([], {}), '()\n', (1242, 1244), True, 'import kubernetes_asyncio as kubernetes\n'), ((1270, 1306), 'kubernetes_asyncio.client.CustomObjectsApi', 'kubernetes.client.CustomObjectsApi', ([], {}), '()\n', (1304, 1306), True, 'import kubernetes_asyncio as kubernetes\n'), ((1393, 1407), 'aiohttp.web.Response', 'web.Response', ([], {}), '()\n', (1405, 1407), False, 'from aiohttp import web\n'), ((2681, 2710), 'aiohttp.web.json_response', 'web.json_response', ([], {'status': '(408)'}), '(status=408)\n', (2698, 2710), False, 'from aiohttp import web\n'), ((3395, 3409), 'aiohttp.web.Response', 'web.Response', ([], {}), '()\n', (3407, 3409), False, 'from aiohttp import web\n'), ((3449, 3466), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (3464, 3466), False, 'from aiohttp import web\n'), ((3796, 3851), 'aiohttp.web.run_app', 'web.run_app', (['self.app'], {'host': 'self._host', 'port': 'self._port'}), '(self.app, host=self._host, port=self._port)\n', (3807, 3851), False, 'from aiohttp import web\n'), ((3982, 4003), 'adaptdl_sched.config.get_supervisor_port', 'get_supervisor_port', ([], {}), '()\n', (4001, 4003), False, 'from adaptdl_sched.config import get_supervisor_port\n'), ((1776, 1800), 'kubernetes_asyncio.watch.Watch', 'kubernetes.watch.Watch', ([], {}), '()\n', (1798, 1800), True, 'import kubernetes_asyncio as kubernetes\n'), ((3509, 3550), 'aiohttp.web.get', 'web.get', (['"""/healthz"""', 'self._handle_healthz'], {}), "('/healthz', self._handle_healthz)\n", (3516, 3550), False, 'from aiohttp import web\n'), ((3564, 3634), 'aiohttp.web.get', 'web.get', (['"""/discover/{namespace}/{name}/{group}"""', 'self._handle_discover'], {}), "('/discover/{namespace}/{name}/{group}', self._handle_discover)\n", (3571, 3634), False, 'from aiohttp import web\n'), ((3668, 3725), 'aiohttp.web.put', 'web.put', (['"""/hints/{namespace}/{name}"""', 'self._handle_report'], {}), "('/hints/{namespace}/{name}', self._handle_report)\n", (3675, 3725), False, 'from aiohttp import web\n'), ((2635, 2665), 'aiohttp.web.json_response', 'web.json_response', (['pod_ip_list'], {}), '(pod_ip_list)\n', (2652, 2665), False, 'from aiohttp import web\n')] |
import django
import os
import sys
sys.path.append("/home/ubuntu/edtech-backend")
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'superteacher.settings')
django.setup()
from exams.models import Exam
from subjects.models import Subject
from chapters.models import Chapter
from questions.models import QuestionChapterMapping
if __name__ == '__main__':
exam = Exam.objects.get(exam_code=sys.argv[1])
subjects = Subject.objects.filter(exam=exam)
for subject in subjects:
chapters = Chapter.objects.filter(subject=subject)
for chapter in chapters:
chapter.question_count = QuestionChapterMapping.objects.filter(chapter=chapter).count()
chapter.save()
| [
"os.environ.setdefault",
"django.setup",
"chapters.models.Chapter.objects.filter",
"questions.models.QuestionChapterMapping.objects.filter",
"subjects.models.Subject.objects.filter",
"exams.models.Exam.objects.get",
"sys.path.append"
] | [((36, 82), 'sys.path.append', 'sys.path.append', (['"""/home/ubuntu/edtech-backend"""'], {}), "('/home/ubuntu/edtech-backend')\n", (51, 82), False, 'import sys\n'), ((83, 155), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""superteacher.settings"""'], {}), "('DJANGO_SETTINGS_MODULE', 'superteacher.settings')\n", (104, 155), False, 'import os\n'), ((156, 170), 'django.setup', 'django.setup', ([], {}), '()\n', (168, 170), False, 'import django\n'), ((365, 404), 'exams.models.Exam.objects.get', 'Exam.objects.get', ([], {'exam_code': 'sys.argv[1]'}), '(exam_code=sys.argv[1])\n', (381, 404), False, 'from exams.models import Exam\n'), ((420, 453), 'subjects.models.Subject.objects.filter', 'Subject.objects.filter', ([], {'exam': 'exam'}), '(exam=exam)\n', (442, 453), False, 'from subjects.models import Subject\n'), ((503, 542), 'chapters.models.Chapter.objects.filter', 'Chapter.objects.filter', ([], {'subject': 'subject'}), '(subject=subject)\n', (525, 542), False, 'from chapters.models import Chapter\n'), ((613, 667), 'questions.models.QuestionChapterMapping.objects.filter', 'QuestionChapterMapping.objects.filter', ([], {'chapter': 'chapter'}), '(chapter=chapter)\n', (650, 667), False, 'from questions.models import QuestionChapterMapping\n')] |
import os
import metricbeat
import unittest
import time
class Test(metricbeat.BaseTest):
COMPOSE_SERVICES = ['logstash']
FIELDS = ['logstash']
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_node(self):
"""
logstash node metricset test
"""
self.check_metricset("logstash", "node", self.get_hosts(), self.FIELDS + ["process"])
@unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test")
def test_node_stats(self):
"""
logstash node_stats metricset test
"""
self.check_metricset("logstash", "node_stats", self.get_hosts(), self.FIELDS)
| [
"unittest.skipUnless"
] | [((160, 229), 'unittest.skipUnless', 'unittest.skipUnless', (['metricbeat.INTEGRATION_TESTS', '"""integration test"""'], {}), "(metricbeat.INTEGRATION_TESTS, 'integration test')\n", (179, 229), False, 'import unittest\n'), ((416, 485), 'unittest.skipUnless', 'unittest.skipUnless', (['metricbeat.INTEGRATION_TESTS', '"""integration test"""'], {}), "(metricbeat.INTEGRATION_TESTS, 'integration test')\n", (435, 485), False, 'import unittest\n')] |
import base64
import json
import os
from BeautifulSoup import BeautifulSoup
import requests
import local_settings
import utils
NATIONAL_BRACKET = ('http://games.espn.com/tournament-challenge-bracket/'
'2017/en/entry?entryID=115703')
HTML_FILENAME = os.path.join(
local_settings.YEAR,
base64.b64encode(NATIONAL_BRACKET) + '.html')
SLOT_KEY = 'data-slotindex'
def get_national_bracket():
if not os.path.exists(HTML_FILENAME):
response = requests.get(NATIONAL_BRACKET)
with open(HTML_FILENAME, 'w') as fh:
fh.write(response.content)
response.close()
with open(HTML_FILENAME, 'r') as fh:
return fh.read()
def get_team_info(data_tag):
"""Returns ({teamID}, {team name}) from a node."""
name_span, = data_tag.findAll('span', {'class': 'name'})
team_name = name_span.text
slot_id = int(data_tag[SLOT_KEY])
# NOTE: Assumes the team ID is 1 more than the slot ID.
team_id = slot_id + 1
return team_id, team_name
def get_data_slot_tags(tag):
if tag.name != 'div':
return False
return tag.has_key(SLOT_KEY)
def parse_teams():
bracket_html = get_national_bracket()
soup = BeautifulSoup(bracket_html)
data_tags = soup.findAll(get_data_slot_tags)
assert len(data_tags) == 127
opening_round_tags = [tag for tag in data_tags
if int(tag[SLOT_KEY]) < 64]
assert len(opening_round_tags) == 64
team_info = [get_team_info(data_tag) for data_tag in
opening_round_tags]
team_info = dict(set(team_info))
with open(utils.TEAM_MAP_FILENAME, 'w') as fh:
json.dump(team_info, fh, indent=2, sort_keys=True,
separators=(',', ': '))
if __name__ == '__main__':
utils.prepare_directory(local_settings.YEAR)
parse_teams()
| [
"os.path.exists",
"utils.prepare_directory",
"base64.b64encode",
"requests.get",
"BeautifulSoup.BeautifulSoup",
"json.dump"
] | [((1204, 1231), 'BeautifulSoup.BeautifulSoup', 'BeautifulSoup', (['bracket_html'], {}), '(bracket_html)\n', (1217, 1231), False, 'from BeautifulSoup import BeautifulSoup\n'), ((1778, 1822), 'utils.prepare_directory', 'utils.prepare_directory', (['local_settings.YEAR'], {}), '(local_settings.YEAR)\n', (1801, 1822), False, 'import utils\n'), ((316, 350), 'base64.b64encode', 'base64.b64encode', (['NATIONAL_BRACKET'], {}), '(NATIONAL_BRACKET)\n', (332, 350), False, 'import base64\n'), ((431, 460), 'os.path.exists', 'os.path.exists', (['HTML_FILENAME'], {}), '(HTML_FILENAME)\n', (445, 460), False, 'import os\n'), ((481, 511), 'requests.get', 'requests.get', (['NATIONAL_BRACKET'], {}), '(NATIONAL_BRACKET)\n', (493, 511), False, 'import requests\n'), ((1652, 1726), 'json.dump', 'json.dump', (['team_info', 'fh'], {'indent': '(2)', 'sort_keys': '(True)', 'separators': "(',', ': ')"}), "(team_info, fh, indent=2, sort_keys=True, separators=(',', ': '))\n", (1661, 1726), False, 'import json\n')] |
import csv
from flask import (
Blueprint, flash, redirect, render_template, request, url_for
)
bp = Blueprint('catalogue', __name__)
@bp.route('/getCategories', methods=['GET'])
def index():
# Returns all the categories present in excel sheet
if request.method == 'POST':
return "the method is post"
return "the method is get"
@bp.route('/getProducts/<category>', methods=['GET'])
def delete(category):
# gets product based on category
return "the method is delete {}".format(id)
@bp.route('/getProductDetails/<productId>', methods=['GET'])
def getProductDetails(productId):
pass
@bp.route('/getProductInventory/<productId>', methods=['GET'])
def getProductInventory(productId):
pass | [
"flask.Blueprint"
] | [((105, 137), 'flask.Blueprint', 'Blueprint', (['"""catalogue"""', '__name__'], {}), "('catalogue', __name__)\n", (114, 137), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n')] |
# -*- coding: utf-8 -*-
from copy import copy
from re import compile, escape
from templex.exceptions import KeyNotFound, MustUseString
import difflib
import sys
if sys.version_info[0] == 3:
unicode = str
def escape_to_regex(text):
escaped = r""
for character in text:
if character == r" ":
escaped += r" "
else:
escaped += escape(character)
return escaped
DELIMETER_REGEX = compile(
escape("{{") + r'(.*?)' + escape("}}")
)
class TemplexMatch(object):
def __init__(self, **variables):
self._variables = variables
def __getitem__(self, key):
return self._variables[key]
class Templex(object):
def __init__(self, template):
if not isinstance(template, (unicode, str)) or isinstance(template, bytes):
raise MustUseString("Must use string with templex (e.g. not bytes).")
self._template = template
self._variables = {}
def with_vars(self, **variables):
for name, variable in variables.items():
if not isinstance(variable, (unicode, str)):
raise MustUseString("Must use string with templex (e.g. not bytes).")
new_templex = copy(self)
new_templex._variables = variables
return new_templex
def assert_match(self, string):
"""
Raises informative exception when string does not match the templex.
"""
if self.match(string) is not None:
return
else:
is_plain_text = True
compiled_regex = r""
list_of_chunks = []
list_of_unescaped_chunks = []
for chunk in DELIMETER_REGEX.split(self._template):
if is_plain_text:
compiled_regex = compiled_regex + escape_to_regex(chunk)
list_of_chunks.append(escape_to_regex(chunk))
list_of_unescaped_chunks.append(chunk)
else:
stripped_chunk = chunk.strip()
if stripped_chunk in self._variables.keys():
compiled_regex = compiled_regex + unicode(
r"(?P<{0}>{1})".format(
stripped_chunk,
self._variables[stripped_chunk],
)
)
list_of_chunks.append(
r"(?P<{0}>{1})".format(stripped_chunk, self._variables[stripped_chunk])
)
list_of_unescaped_chunks.append(r"{{ " + stripped_chunk + r" }}")
else:
raise KeyNotFound((
"'{0}' not found in variables. "
"Specify with with_vars(var=regex).\n".format(
stripped_chunk
)
))
is_plain_text = not is_plain_text
to_diff = r""
to_compare = string
for chunk, unescaped_chunk in zip(list_of_chunks, list_of_unescaped_chunks):
match = compile(chunk).search(string)
if match is not None:
to_diff += to_compare[0:match.end() - match.start()]
to_compare = to_compare[match.end():]
else:
to_diff += unescaped_chunk
diff = ''.join(difflib.ndiff(
string.splitlines(1),
to_diff.splitlines(1)
))
raise AssertionError(
u"ACTUAL:\n{0}\n\nEXPECTED:\n{1}\n\nDIFF:\n{2}".format(
string,
self._template,
diff,
)
)
def match(self, string):
"""
Returns TemplexMatch object if there is a match or None if there isn't.
"""
if not isinstance(string, (unicode, str)) or isinstance(string, bytes):
raise MustUseString("Must use string with templex (e.g. not bytes).")
is_plain_text = True
compiled_regex = r"^"
for chunk in DELIMETER_REGEX.split(self._template):
if is_plain_text:
compiled_regex = compiled_regex + escape_to_regex(chunk)
else:
stripped_chunk = chunk.strip()
if stripped_chunk in self._variables.keys():
compiled_regex = compiled_regex + unicode(
r"(?P<{0}>{1})".format(
stripped_chunk,
self._variables[stripped_chunk]
)
)
else:
raise KeyNotFound(
"'{0}' not found in variables. Specify with with_vars(var=regex).".format(
stripped_chunk
)
)
is_plain_text = not is_plain_text
compiled_regex = compiled_regex + r"$"
match_obj = compile(compiled_regex).match(string)
return TemplexMatch(**match_obj.groupdict()) if match_obj else None
| [
"re.compile",
"re.escape",
"templex.exceptions.MustUseString",
"copy.copy"
] | [((475, 487), 're.escape', 'escape', (['"""}}"""'], {}), "('}}')\n", (481, 487), False, 'from re import compile, escape\n'), ((1203, 1213), 'copy.copy', 'copy', (['self'], {}), '(self)\n', (1207, 1213), False, 'from copy import copy\n'), ((379, 396), 're.escape', 'escape', (['character'], {}), '(character)\n', (385, 396), False, 'from re import compile, escape\n'), ((449, 461), 're.escape', 'escape', (['"""{{"""'], {}), "('{{')\n", (455, 461), False, 'from re import compile, escape\n'), ((823, 886), 'templex.exceptions.MustUseString', 'MustUseString', (['"""Must use string with templex (e.g. not bytes)."""'], {}), "('Must use string with templex (e.g. not bytes).')\n", (836, 886), False, 'from templex.exceptions import KeyNotFound, MustUseString\n'), ((4024, 4087), 'templex.exceptions.MustUseString', 'MustUseString', (['"""Must use string with templex (e.g. not bytes)."""'], {}), "('Must use string with templex (e.g. not bytes).')\n", (4037, 4087), False, 'from templex.exceptions import KeyNotFound, MustUseString\n'), ((1117, 1180), 'templex.exceptions.MustUseString', 'MustUseString', (['"""Must use string with templex (e.g. not bytes)."""'], {}), "('Must use string with templex (e.g. not bytes).')\n", (1130, 1180), False, 'from templex.exceptions import KeyNotFound, MustUseString\n'), ((5064, 5087), 're.compile', 'compile', (['compiled_regex'], {}), '(compiled_regex)\n', (5071, 5087), False, 'from re import compile, escape\n'), ((3161, 3175), 're.compile', 'compile', (['chunk'], {}), '(chunk)\n', (3168, 3175), False, 'from re import compile, escape\n')] |
import mysql.connector
from asyncore import read
from PyQt5 import uic
from PyQt5 import QtWidgets
from numpy import save
from reportlab.pdfgen import canvas
c = 0
# Conectando com o banco de dados
con = mysql.connector.connect(
host='localhost', database='cadastro_estoque', user='andre2', password='<PASSWORD>')
# Função de inserir as informações digitadas nos campos no banco de dados SQL
def insert():
linha1 = formulario.lineEdit.text()
linha2 = formulario.lineEdit_2.text()
linha3 = formulario.lineEdit_3.text()
linha4 = formulario.lineEdit_4.text()
categoria = ("")
if formulario.checkBox.isChecked():
print("Item adicionado à categoria Informática.")
categoria = ("Informática")
elif formulario.checkBox_2.isChecked():
print("Item adicionado à categoria Alimentos.")
categoria = ("Alimentos")
elif formulario.checkBox_3.isChecked():
print("Item adicionado à categoria Eletrodomésticos.")
categoria = ("Eletrodomésticos")
elif formulario.checkBox_4.isChecked():
print("Item adicionado à categoria Cama, Mesa e Banho.")
categoria = ("Cama, Mesa e Banho")
elif formulario.checkBox_5.isChecked():
print("Item adicionado à categoria Brinquedos.")
categoria = ("Brinquedos")
elif formulario.checkBox_6.isChecked():
print("Item adicionado à categoria Produtos de Limpeza.")
categoria = ("Produtos de Limpeza")
elif formulario.checkBox_7.isChecked():
print("Item adicionado à categoria Higiene Pessoal.")
categoria = ("Higiene Pessoal")
# Printando no terminal os itens inseridos, para simples conferência.
print("Codigo ", linha1)
print("Descrição ", linha2)
print("Preço ", linha3)
print("Categoria", linha4)
# Comando SQL de inserção de dados
cursor = con.cursor()
query = (
"INSERT INTO produtos (codigo, descricao, preco, categoria, quantidade) VALUES (%s,%s,%s,%s,%s)")
dados = (str(linha1), str(linha2), str(linha3), categoria, str(linha4))
cursor.execute(query, dados)
con.commit()
# Limpando os campos de texo após cada cadastro
formulario.lineEdit.setText("")
formulario.lineEdit_2.setText("")
formulario.lineEdit_3.setText("")
formulario.lineEdit_4.setText("")
# Função consultar no banco de dados
def consult():
consultar.show()
cursor = con.cursor()
query = ("SELECT codigo, descricao, preco, categoria, quantidade FROM produtos;")
cursor.execute(query)
readed_data = cursor.fetchall()
consultar.tableWidget.setRowCount(len(readed_data))
consultar.tableWidget.setColumnCount(5)
for i in range(0, len(readed_data)):
for j in range(0, 5):
consultar.tableWidget.setItem(
i, j, QtWidgets.QTableWidgetItem(str(readed_data[i][j])))
# Exportar em arquivo
def export():
cursor = con.cursor()
query = ("SELECT * FROM produtos")
cursor.execute(query)
readed_data = cursor.fetchall()
y = 0
pdf = canvas.Canvas("cadastro_produtos.pdf")
pdf.setFont("Times-Bold", 20)
pdf.drawString(200, 800, "Produtos Cadastrados:")
pdf.setFont("Times-Bold", 12)
# Posições dos tópicos na exibição do arquivo em PDF "X, Y"
# X é a distância entre um titulo em outro na mesma linha
pdf.drawString(10, 750, "CÓD")
pdf.drawString(50, 750, "PRODUTO")
pdf.drawString(280, 750, "PREÇO")
pdf.drawString(330, 750, "CATEGORIA")
pdf.drawString(480, 750, "QTD")
# Linha 37 espaçamento entre linhas.
for i in range(0, len(readed_data)):
y = y + 15
pdf.drawString(10, 750 - y, str(readed_data[i][0]))
pdf.drawString(50, 750 - y, str(readed_data[i][1]))
pdf.drawString(280, 750 - y, str(readed_data[i][2]))
pdf.drawString(330, 750 - y, str(readed_data[i][3]))
pdf.drawString(480, 750 - y, str(readed_data[i][4]))
#pdf.drawString(420, 750 - y, str(readed_data[i][5]))
pdf.save()
print("Planilha gerada com sucesso.")
def delete():
line = consultar.tableWidget.currentRow()
consultar.tableWidget.removeRow(line)
cursor = con.cursor()
selectquery = ("Select codigo FROM produtos")
cursor.execute(selectquery)
readed_data = cursor.fetchall()
codigo = readed_data[line][0]
print(codigo)
cursor.close()
cursor = con.cursor()
deletequery = (
"DELETE FROM cadastro_estoque.produtos WHERE codigo = " + str(codigo) + (";"))
cursor.execute(deletequery)
con.commit()
print("Item excluido da lista.")
# Alterar qualquer dado inserido no DB
def edit():
global c
line = consultar.tableWidget.currentRow()
cursor = con.cursor()
selectquery = ("Select codigo FROM produtos")
cursor.execute(selectquery)
readed_data = cursor.fetchall()
codigo = readed_data[line][0]
cursor = con.cursor()
selectquery2 = (
"SELECT * FROM produtos WHERE codigo = " + str(codigo) + (";"))
cursor.execute(selectquery2)
produto = cursor.fetchall()
editwindow.show()
c = codigo
editwindow.lineEdit.setText(str(produto[0][0]))
editwindow.lineEdit_2.setText(str(produto[0][1]))
editwindow.lineEdit_3.setText(str(produto[0][2]))
editwindow.lineEdit_4.setText(str(produto[0][3]))
editwindow.lineEdit_5.setText(str(produto[0][4]))
# Salvando os dados editados
def save():
# Identifica do número do código do Produto
global c
# Valor digitado na caixa de texto para edição
descricao = editwindow.lineEdit_2.text()
preco = editwindow.lineEdit_3.text()
categoria = editwindow.lineEdit_4.text()
quantidade = editwindow.lineEdit_5.text()
# Atualizando o banco de dados
cursor = con.cursor()
editquery = ("UPDATE produtos SET descricao = '{}', preco = '{}', categoria = '{}', quantidade = '{}' WHERE codigo = {}".format(
descricao, preco, categoria, quantidade, c))
cursor.execute(editquery)
con.commit()
print("Dados alterados com sucesso!")
editwindow.close()
consultar.close()
consult()
# Apagar dado inserido no DB
app = QtWidgets.QApplication([])
formulario = uic.loadUi(
"/home/andre/Área de Trabalho/Estudos/Cadastro-de-Produtos/formulario.ui")
consultar = uic.loadUi(
"/home/andre/Área de Trabalho/Estudos/Cadastro-de-Produtos/consultar.ui")
editwindow = uic.loadUi(
"/home/andre/Área de Trabalho/Estudos/Cadastro-de-Produtos/editwindow.ui")
formulario.pushButton.clicked.connect(insert)
formulario.pushButton_2.clicked.connect(consult)
consultar.pushButton.clicked.connect(export)
consultar.pushButton_2.clicked.connect(delete)
consultar.pushButton_3.clicked.connect(edit)
editwindow.pushButton.clicked.connect(save)
formulario.show()
app.exec()
""" Finalmente, com o pouco tempo que eu tenho consegui finalizar essa parte, à partir daqui é que começa a parte de praticar, ainda assim, como estou iniciando tive muita dificuldade em conseguir que todas as funções funcionassem, pesquisei por conta própria, tirei dúvidas em grupos do Telegram, o GitHub me ajudou demais, várias das dúvidas e vários problemas que eu tive foram possíveis encontrar a solução lá.
A ideia agora, é criar uma tela de Login e com esse login se conectar ao banco de dados, delegando menos atribuições e deixando as funções de editar e excluir dados apenas para o administrador, além do mais, será acrescentada uma tela que será basicamente a tela de um caixa de comércio, que irá buscar o produto pelo código lido, inserir em um display e fazer a somatória dos valores de cada produto.""" | [
"PyQt5.uic.loadUi",
"PyQt5.QtWidgets.QApplication",
"reportlab.pdfgen.canvas.Canvas"
] | [((6145, 6171), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['[]'], {}), '([])\n', (6167, 6171), False, 'from PyQt5 import QtWidgets\n'), ((6185, 6275), 'PyQt5.uic.loadUi', 'uic.loadUi', (['"""/home/andre/Área de Trabalho/Estudos/Cadastro-de-Produtos/formulario.ui"""'], {}), "(\n '/home/andre/Área de Trabalho/Estudos/Cadastro-de-Produtos/formulario.ui')\n", (6195, 6275), False, 'from PyQt5 import uic\n'), ((6288, 6377), 'PyQt5.uic.loadUi', 'uic.loadUi', (['"""/home/andre/Área de Trabalho/Estudos/Cadastro-de-Produtos/consultar.ui"""'], {}), "(\n '/home/andre/Área de Trabalho/Estudos/Cadastro-de-Produtos/consultar.ui')\n", (6298, 6377), False, 'from PyQt5 import uic\n'), ((6391, 6481), 'PyQt5.uic.loadUi', 'uic.loadUi', (['"""/home/andre/Área de Trabalho/Estudos/Cadastro-de-Produtos/editwindow.ui"""'], {}), "(\n '/home/andre/Área de Trabalho/Estudos/Cadastro-de-Produtos/editwindow.ui')\n", (6401, 6481), False, 'from PyQt5 import uic\n'), ((3049, 3087), 'reportlab.pdfgen.canvas.Canvas', 'canvas.Canvas', (['"""cadastro_produtos.pdf"""'], {}), "('cadastro_produtos.pdf')\n", (3062, 3087), False, 'from reportlab.pdfgen import canvas\n')] |
import json
import os
import sys
import re
import numpy as np
from Preprocess import processed_answer
from subquestions import filter_Questions
def most_frequent(List):
counter = 4
num = None
for i in List:
curr_frequency = List.count(i)
if(curr_frequency> counter):
counter = curr_frequency
num = i
return num
def count_unanswerable_vq(path):
count_vq_unanswerable=0
with open(path,encoding="utf-8") as json_file:
datas = json.load(json_file)
for data in datas:
if data["answer_type"]=="unanswerable":
count_vq_unanswerable+=1
return count_vq_unanswerable
def unanswerable_multiquestions_mostcommon(path,write_unanswerable_path,write_subquestion_path,final_path):
count=0
filtered_mostcommon_count=0
count_vq_answerable=0
unanswerable_datas=[]
with open(path,encoding="utf-8") as json_file:
with open(write_unanswerable_path,'w',encoding="utf-8") as json_file_w:
datas = json.load(json_file)
for data in datas:
# unanswerable questions
if data["answer_type"]!="unanswerable":
count_vq_answerable+=1
unanswerable_datas.append(data)
json.dump(unanswerable_datas,json_file_w)
filter_Questions(write_unanswerable_path,write_subquestion_path)
with open(write_subquestion_path,encoding="utf-8") as json_file_2:
with open(final_path,'w',encoding="utf-8") as json_file_w2:
datas = json.load(json_file_2)
MostCommon_ann=[]
for data in datas:
tmp_ann={}
tmp_answers=[i['answer'] for i in data["answers"]]
answerable_answers=list(filter(lambda x:x!="unsuitable" and x!="unsuitable image" and x!="unanswerable",tmp_answers))
p_answerable_answers=processed_answer(answerable_answers)
# filtering answers; if more than three workers agree on it, then keep it.
most_freq_answer=most_frequent(p_answerable_answers)
if most_freq_answer is None:
filtered_mostcommon_count+=1
else:
count+=1
tmp_ann["answers"]=[most_freq_answer]
tmp_ann["image"]=data["image"]
tmp_ann["question"]=data["question"]
tmp_ann["answer_type"]=data["answer_type"]
MostCommon_ann.append(tmp_ann)
json.dump(MostCommon_ann,json_file_w2)
print("filtered most common count= (un_sub)-(left vq)="+str(filtered_mostcommon_count)+ " = "+str(len(datas))+"-"+str(count))
def unanswerable_MostCommon(path,mostcommonpath):
count=0
count_vq_answerable=0
MostCommon_ann=[]
with open(path,encoding="utf-8") as json_file:
with open(mostcommonpath,'w',encoding="utf-8") as json_file_w:
datas = json.load(json_file)
for data in datas:
tmp_ann={}
# unanswerable questions
if data["answer_type"]!="unanswerable":
count_vq_answerable+=1
tmp_answers=[i['answer'] for i in data["answers"]]
answerable_answers=list(filter(lambda x:x!="unsuitable" and x!="unsuitable image" and x!="unanswerable",tmp_answers))
p_answerable_answers=processed_answer(answerable_answers)
# filtering answers; if more than three workers agree on it, then keep it.
most_freq_answer=most_frequent(p_answerable_answers)
if most_freq_answer is None:
pass
else:
count+=1
tmp_ann["answers"]=[most_freq_answer]
tmp_ann["image"]=data["image"]
tmp_ann["question"]=data["question"]
tmp_ann["answer_type"]=data["answer_type"]
MostCommon_ann.append(tmp_ann)
json.dump(MostCommon_ann,json_file_w)
print("count_vq_answerable: ",count_vq_answerable)
print("count: ",count)
# random and index
def Random_Index(random_path,group_answers):
with open(random_path,encoding="utf-8") as json_file_gr:
with open(group_answers,'w',encoding="utf-8") as json_file_gw:
datas = json.load(json_file_gr)
print(len(datas))
group=[datas[i:i+5] for i in range(0, len(datas), 5)]
print(len(group))
json.dump(group,json_file_gw)
if __name__ == "__main__":
"""===========================================================
Use for generating
============================================================="""
split="test"
# path=split+".json"
# mostcommonpath=split+"_most_common.json"
# Wpath=split+"_mostcommon_sub.json"
random_path=split+"_mostcommon_sub_rand.json"
# random_i_path=split+"_mostcommon_sub_rand_index.json"
group_answers=split+"_grouped.json"
# # process
# unanswerable_MostCommon(path,mostcommonpath)
# filter_Questions(mostcommonpath, Wpath, random_path,random_i_path, randomFlag=True)
Random_Index(random_path,group_answers)
"""=========================================================
Use for counting
============================================================"""
# # #2. counting unanswerable
# # counting_unanswerable_total=0
# for split in ["test","train","val"]:
# path=split+".json"
# # counting_unanswerable_total+=count_unanswerable_vq(path)
# # print("counting_unanswerable_total: ",counting_unanswerable_total)
# # 3. unanswerable->subquestions
# print(split,"\n")
# unanswerable_path = split+"_unanswerable.json"
# subquestion_path = split + "_unanswerable_subquestion.json"
# final_path = split+"_unanswerable_subquestion_mostcommon.json"
# unanswerable_multiquestions_mostcommon(path,unanswerable_path, subquestion_path,final_path)
| [
"json.load",
"Preprocess.processed_answer",
"subquestions.filter_Questions",
"json.dump"
] | [((1377, 1442), 'subquestions.filter_Questions', 'filter_Questions', (['write_unanswerable_path', 'write_subquestion_path'], {}), '(write_unanswerable_path, write_subquestion_path)\n', (1393, 1442), False, 'from subquestions import filter_Questions\n'), ((523, 543), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (532, 543), False, 'import json\n'), ((1066, 1086), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1075, 1086), False, 'import json\n'), ((1330, 1372), 'json.dump', 'json.dump', (['unanswerable_datas', 'json_file_w'], {}), '(unanswerable_datas, json_file_w)\n', (1339, 1372), False, 'import json\n'), ((1608, 1630), 'json.load', 'json.load', (['json_file_2'], {}), '(json_file_2)\n', (1617, 1630), False, 'import json\n'), ((2631, 2670), 'json.dump', 'json.dump', (['MostCommon_ann', 'json_file_w2'], {}), '(MostCommon_ann, json_file_w2)\n', (2640, 2670), False, 'import json\n'), ((3064, 3084), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (3073, 3084), False, 'import json\n'), ((4233, 4271), 'json.dump', 'json.dump', (['MostCommon_ann', 'json_file_w'], {}), '(MostCommon_ann, json_file_w)\n', (4242, 4271), False, 'import json\n'), ((4582, 4605), 'json.load', 'json.load', (['json_file_gr'], {}), '(json_file_gr)\n', (4591, 4605), False, 'import json\n'), ((4748, 4778), 'json.dump', 'json.dump', (['group', 'json_file_gw'], {}), '(group, json_file_gw)\n', (4757, 4778), False, 'import json\n'), ((1963, 1999), 'Preprocess.processed_answer', 'processed_answer', (['answerable_answers'], {}), '(answerable_answers)\n', (1979, 1999), False, 'from Preprocess import processed_answer\n'), ((3541, 3577), 'Preprocess.processed_answer', 'processed_answer', (['answerable_answers'], {}), '(answerable_answers)\n', (3557, 3577), False, 'from Preprocess import processed_answer\n')] |
import unittest
from kivy3.loaders import OBJLoader
class OBJLoaderTest(unittest.TestCase):
pass
if __name__ == '__main__':
unittest.main()
| [
"unittest.main"
] | [((134, 149), 'unittest.main', 'unittest.main', ([], {}), '()\n', (147, 149), False, 'import unittest\n')] |
import ast
import json
import os
import signal
import sys
import zmq
from tasks import *
context = zmq.Context()
receiver = context.socket(zmq.PULL)
receiver.bind('tcp://127.0.0.1:5588')
# Socket to send messages to
sender = context.socket(zmq.PUSH)
sender.connect("tcp://127.0.0.1:5589")
# todo: clean kill- wait until all processes are finished
def signal_handler(signum, frame):
signal.signal(signum, signal.SIG_IGN) # ignore additional signals
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
while True:
msg = receiver.recv()
queue, tid, func, arg, kwarg = msg.split(b' ', maxsplit=4)
arg= tuple(arg.decode().split(','))
kwargs= ast.literal_eval(kwarg.decode())
sender.send(tid+ b' 1') # status running
result = getattr(Tasks, func.decode())(*arg, **kwargs)
sender.send(tid + b' 2') # status completed
| [
"signal.signal",
"zmq.Context",
"sys.exit"
] | [((100, 113), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (111, 113), False, 'import zmq\n'), ((389, 426), 'signal.signal', 'signal.signal', (['signum', 'signal.SIG_IGN'], {}), '(signum, signal.SIG_IGN)\n', (402, 426), False, 'import signal\n'), ((459, 470), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (467, 470), False, 'import sys\n'), ((505, 549), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal_handler'], {}), '(signal.SIGINT, signal_handler)\n', (518, 549), False, 'import signal\n')] |
#!/usr/bin/env python3
# Test libLF.Regex's for SL behavior using the vuln-regex-detector suite.
# This analysis can run single-node many-core.
# Import libLF
import os
import sys
import re
sys.path.append(os.path.join(os.environ['ECOSYSTEM_REGEXP_PROJECT_ROOT'], 'lib'))
import libLF
import json
import tempfile
import argparse
import traceback
##########
reg2lang = {
'npm': 'JavaScript', # TypeScript is evaluated on a JS engine
'crates.io': 'Rust',
'packagist': 'PHP',
'pypi': 'Python',
'rubygems': 'Ruby',
'cpan': 'Perl',
'maven': 'Java',
'godoc': 'Go'
}
def allSLTestLanguages():
return reg2lang.values()
def remainingTestLanguages(langsAlready):
return [lang for lang in allSLTestLanguages() if lang not in langsAlready]
def registryToSLTestLanguage(registry):
return reg2lang[registry]
##########
class MyTask(libLF.parallel.ParallelTask):
def __init__(self, regex, slTimeout, powerPumps):
self.regex = regex
self.slTimeout = slTimeout
self.powerPumps = powerPumps
self.slra = None
def run(self):
try:
libLF.log('Working on regex: /{}/'.format(self.regex.pattern))
# Run the analysis
self.slra = self._testRegexForSLBehavior(self.regex)
# Return
libLF.log('Completed regex /{}/'.format(self.regex.pattern))
return self.slra
except KeyboardInterrupt:
raise
except BaseException as err:
libLF.log('Exception while testing regex /{}/: '.format(self.regex.pattern) + err)
return err
def _testRegexForSLBehavior(self, regex):
"""Returns SLRegexAnalysis or raises an exception
With lang_pump2timedOut populated for the language(s) this regex
occurs in.
"""
try:
libLF.log('Testing regex: <{}>'.format(regex.pattern))
slra = libLF.SLRegexAnalysis(regex, self.slTimeout, self.powerPumps)
## Query detectors
slra.queryDetectors()
## Check its behavior in all available languages
# (Not just the ones it appears in)
# We can identify the "real" vs. "what-if" by looking at the slra.regex object.
for lang in allSLTestLanguages():
libLF.log('Validating detector opinions in {}'.format(lang))
slra.validateDetectorOpinionsInLang(lang)
return slra
except BaseException as err:
libLF.log('Exception while analyzing: err <{}> libLF.Regex {}'.format(err, regex.toNDJSON()))
raise
################
def getTasks(regexFile, slTimeout, powerPumps):
regexes = loadRegexFile(regexFile)
tasks = [MyTask(regex, slTimeout, powerPumps) for regex in regexes]
libLF.log('Prepared {} tasks'.format(len(tasks)))
return tasks
def loadRegexFile(regexFile):
"""Return a list of Regex's"""
regexes = []
libLF.log('Loading regexes from {}'.format(regexFile))
with open(regexFile, 'r') as inStream:
for line in inStream:
line = line.strip()
if len(line) == 0:
continue
try:
# Build a Regex
regex = libLF.Regex()
regex.initFromNDJSON(line)
regexes.append(regex)
except KeyboardInterrupt:
raise
except BaseException as err:
libLF.log('Exception parsing line:\n {}\n {}'.format(line, err))
traceback.print_exc()
libLF.log('Loaded {} regexes from {}'.format(len(regexes), regexFile))
return regexes
################
def main(regexFile, outFile, slTimeout, powerPumps, parallelism):
libLF.log('regexFile {} outFile {} slTimeout {} powerPumps {} parallelism {}' \
.format(regexFile, outFile, slTimeout, powerPumps, parallelism))
#### Load data
tasks = getTasks(regexFile, slTimeout, powerPumps)
nRegexes = len(tasks)
#### Process data
# CPU-bound, no limits
libLF.log('Submitting to map')
results = libLF.parallel.map(tasks, parallelism,
libLF.parallel.RateLimitEnums.NO_RATE_LIMIT, libLF.parallel.RateLimitEnums.NO_RATE_LIMIT,
jitter=False)
#### Emit results
libLF.log('Writing results to {}'.format(outFile))
nSuccesses = 0
nExceptions = 0
with open(outFile, 'w') as outStream:
for slra in results:
# Emit
if type(slra) is libLF.SLRegexAnalysis:
nSuccesses += 1
outStream.write(slra.toNDJSON() + '\n')
else:
nExceptions += 1
libLF.log('Successfully performed SLRegexAnalysis on {} regexes, {} exceptions'.format(nSuccesses, nExceptions))
#### Analyze the successful SLRegexAnalysis's
slras = [
res
for res in results
if type(res) is libLF.SLRegexAnalysis
]
# How many regexes exhibited SL behavior in any language?
# TODO Must confirm whether this is testing full-match or partial-match semantics consistently,
# or favor the most conservative behavior possible, or try both.
slras_timedOut = list(
filter(lambda slra: slra.everTimedOut(), slras)
)
libLF.log('{} of {} successful analyses timed out in some language'.format(len(slras_timedOut), len(slras)))
# Did we find any differences in SL regex behavior across languages?
# The answer to this is presumably always "yes" since we are including linear-time engines.
slras_diffBehav = []
for slra in slras:
behaviors = set()
for lang in allSLTestLanguages():
behaviors.add(slra.predictedPerformanceInLang(lang))
if len(behaviors) > 1:
slras_diffBehav.append(slra)
libLF.log('{} of the regexes had different performance in different languages'.format(len(slras_diffBehav)))
# Did we find any differences in SL regex behavior across languages *for those they appeared in*?
# This may be a more interesting metric.
slras_diffBehav_real = []
for slra in slras:
behaviors = set()
for registry in slra.regex.registriesUsedIn():
lang = registryToSLTestLanguage(registry)
behaviors.add(slra.predictedPerformanceInLang(lang))
if len(behaviors) > 1:
slras_diffBehav_real.append(slra)
libLF.log('{} of the regexes had different performance in the languages they actually appeared in'.format(len(slras_diffBehav_real)))
#####################################################
# Parse args
parser = argparse.ArgumentParser(description='Test a set of libLF.Regex\'s for SL behavior. Regexes are tested in every supported language.')
parser.add_argument('--regex-file', type=str, help='In: File of libLF.Regex objects', required=True,
dest='regexFile')
parser.add_argument('--out-file', type=str, help='Out: File of libLF.SLRegexAnalysis objects', required=True,
dest='outFile')
parser.add_argument('--sl-timeout', type=int, help='Threshold used to determine super-linearity', required=False, default=5,
dest='slTimeout')
parser.add_argument('--power-pumps', type=int, help='Number of pumps to trigger power-law SL behavior (e.g. quadratic)', required=False, default=500000,
dest='powerPumps')
parser.add_argument('--parallelism', type=int, help='Maximum cores to use', required=False, default=libLF.parallel.CPUCount.CPU_BOUND,
dest='parallelism')
args = parser.parse_args()
# Here we go!
main(args.regexFile, args.outFile, args.slTimeout, args.powerPumps, args.parallelism)
| [
"argparse.ArgumentParser",
"libLF.parallel.map",
"os.path.join",
"libLF.Regex",
"libLF.SLRegexAnalysis",
"libLF.log",
"traceback.print_exc"
] | [((6098, 6239), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test a set of libLF.Regex\'s for SL behavior. Regexes are tested in every supported language."""'}), '(description=\n "Test a set of libLF.Regex\'s for SL behavior. Regexes are tested in every supported language."\n )\n', (6121, 6239), False, 'import argparse\n'), ((207, 271), 'os.path.join', 'os.path.join', (["os.environ['ECOSYSTEM_REGEXP_PROJECT_ROOT']", '"""lib"""'], {}), "(os.environ['ECOSYSTEM_REGEXP_PROJECT_ROOT'], 'lib')\n", (219, 271), False, 'import os\n'), ((3718, 3748), 'libLF.log', 'libLF.log', (['"""Submitting to map"""'], {}), "('Submitting to map')\n", (3727, 3748), False, 'import libLF\n'), ((3761, 3908), 'libLF.parallel.map', 'libLF.parallel.map', (['tasks', 'parallelism', 'libLF.parallel.RateLimitEnums.NO_RATE_LIMIT', 'libLF.parallel.RateLimitEnums.NO_RATE_LIMIT'], {'jitter': '(False)'}), '(tasks, parallelism, libLF.parallel.RateLimitEnums.\n NO_RATE_LIMIT, libLF.parallel.RateLimitEnums.NO_RATE_LIMIT, jitter=False)\n', (3779, 3908), False, 'import libLF\n'), ((1786, 1847), 'libLF.SLRegexAnalysis', 'libLF.SLRegexAnalysis', (['regex', 'self.slTimeout', 'self.powerPumps'], {}), '(regex, self.slTimeout, self.powerPumps)\n', (1807, 1847), False, 'import libLF\n'), ((2978, 2991), 'libLF.Regex', 'libLF.Regex', ([], {}), '()\n', (2989, 2991), False, 'import libLF\n'), ((3222, 3243), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3241, 3243), False, 'import traceback\n')] |
import random
import glob
import torch
from torch import nn
import numpy as np
from torch.nn import init
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter as P
from torchvision import transforms, set_image_backend
# set_image_backend('accimage')
from models.pytorch_biggan.datasets import default_loader
from pytorch_pretrained_biggan import one_hot_from_names
alpha = np.concatenate([np.linspace(0, 1, 256), np.linspace(1, 0, 256)])
alpha = torch.from_numpy(alpha).to('cuda', dtype=torch.float32)
Sx = torch.Tensor([[[[1, 0, -1], [2, 0, -2], [1, 0, -1]]]]).to('cuda', dtype=torch.float32)
Sy = torch.transpose(Sx, 1, 0)
data_dir = 'data/processed/vase_fragment_dataset/'
full_img = lambda img_id: f'{data_dir}/full_{img_id}.jpg'
frag_img = lambda img_id, n_frag: f'{data_dir}/frag_{img_id}_{n_frag}.jpg'
def vase_vector(batch_size):
return one_hot_from_names(['vase'], batch_size=batch_size)
def gather_ids():
img_ids = list()
for f in glob.glob(f'{data_dir}/full_*.jpg'):
img_id = f.split('_')[-1].split('.')[0]
img_ids.append(int(img_id))
assert img_ids
# print('num frags', n_frags)
# print('num ids', len(img_ids))
# print(img_ids[:10])
n_frags = len(glob.glob(f'{data_dir}/frag_{img_ids[0]}_*.jpg'))
return img_ids, n_frags
def loss_fn_scaled_mse(x, y):
loss = (x-y)**2
n_terms = np.product(loss.shape)
# print(loss.shape)
loss = torch.einsum('bcmn,n->bcm', loss, alpha)
# print(loss.shape)
loss = torch.einsum('bcm,m->bc', loss, alpha)
# print(loss.shape)
loss = torch.mean(loss) / n_terms
# print(loss.shape)
# input()
return loss
def loss_fn_scaled_mae(x, y):
loss = torch.abs(x-y)
n_terms = np.product(loss.shape)
# print(loss.shape)
loss = torch.einsum('bcmn,n->bcm', loss, alpha)
# print(loss.shape)
loss = torch.einsum('bcm,m->bc', loss, alpha)
# print(loss.shape)
loss = torch.mean(loss) / n_terms
# print(loss.shape)
# input()
return loss
def sobel(img):
# print(img.shape)
gray = torch.sum(img, keepdim=True, dim=1)
# print(gray.shape)
edge_x = torch.conv2d(gray, Sx, padding=1)
# print(edge_x.shape)
edge_y = torch.conv2d(gray, Sy, padding=1)
# input()
return edge_x**2 + edge_y**2
# return torch.sqrt(edge_x**2 + edge_y**2)
class FragmentDataset:
def __init__(self):
img_ids, n_frags = gather_ids()
self.to_tensor = transforms.ToTensor()
self.data_dir = data_dir
self.img_ids = img_ids
self.n_frags = n_frags
self.loader = default_loader
def take(self, N, batch_size=1):
for _ in range(N):
imgs, frags = [], []
for _ in range(batch_size):
img_id = random.choice(self.img_ids)
# print(img_id)
n_frag = random.randint(0, self.n_frags-1)
img = self.loader(full_img(img_id))
frag = self.loader(frag_img(img_id, n_frag))
imgs += [self.to_tensor(img).unsqueeze(0)]
frags += [self.to_tensor(frag).unsqueeze(0)]
imgs = torch.cat(imgs, axis=0)
frags = torch.cat(frags, axis=0)
yield frags, imgs
class PreGAN(nn.Module):
def __init__(self):
super(PreGAN, self).__init__()
# 1 input image channel, 6 output channels, 3x3 square convolution
# kernel
# self.conv1 = nn.Conv2d(3, 16, 3)
# self.conv2 = nn.Conv2d(16, 16, 3)
# an affine operation: y = Wx + b
# self.fc1 = nn.Linear(120*120, 128) # 6*6 from image dimension
# self.fc2 = nn.Linear(128, 128)
# self.fc3 = nn.Linear(128, 128)
#
# mods = [
# self.conv1,
# self.conv2,
# self.fc1,
# self.fc2,
# self.fc3,
# ]
# self.layers = nn.ModuleList(mods)
# OLD FORWARD
# Max pooling over a (2, 2) window
# x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
# If the size is a square you can only specify a single number
# x = F.max_pool2d(F.relu(self.conv2(x)), 2)
# x = x.view(-1, self.num_flat_features(x))
# x = F.relu(self.fc1(x))
# x = F.relu(self.fc2(x))
# x = self.fc3(x)
self.main = nn.Sequential(
nn.Conv2d(3, 16, 3),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(16, 16, 3),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Flatten(),
# nn.Flatten(),
nn.Linear(120 * 120, 128),
nn.ReLU(),
nn.Linear(128, 128),
nn.ReLU(),
# nn.Linear(128, 128),
)
# added custom
self.init = 'ortho'
self.param_count = 0
def forward(self, x):
return self.main(x)
def init_weights(self):
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print('Param count for PreGAN initialized parameters: %d' % self.param_count)
class BothGAN(nn.Module):
def __init__(self, pregan, biggan, lr=1e-4):
super(BothGAN, self).__init__()
self.pregan = pregan
self.biggan = biggan
self.vase_vec = torch.from_numpy(vase_vector(1))
self.add_module('pregan', self.pregan)
self.add_module('biggan', self.biggan)
# optim called last
# for k, v in self.named_parameters():
# print('BothGAN parameter', k)
self.optim = optim.Adam(self.parameters(), lr=lr)
# self.optim = optim.Adam(params=self.parameters(), lr=self.lr,
# betas=(self.B1, self.B2), weight_decay=0,
# eps=self.adam_eps)
def forward(self, frag):
noise = self.pregan(frag)
vase_vec = torch.cat([self.vase_vec]*noise.shape[0], dim=0)
return self.biggan(noise, vase_vec, 1.0)
def to(self, *args, **kwargs):
super().to(*args, **kwargs)
self.vase_vec = self.vase_vec.to(*args, **kwargs)
class View(nn.Module):
def __init__(self, shape):
super(View, self).__init__()
self.shape = shape
def forward(self, x):
return x.view(*self.shape)
class ScratchGAN(nn.Module):
def __init__(self):
super(ScratchGAN, self).__init__()
# 1 input image channel, 6 output channels, 3x3 square convolution
# kernel
# self.conv1 = nn.Conv2d(3, 16, 3)
# self.conv2 = nn.Conv2d(16, 16, 3)
# an affine operation: y = Wx + b
# self.fc1 = nn.Linear(120*120, 128) # 6*6 from image dimension
# self.fc2 = nn.Linear(128, 128)
# self.fc3 = nn.Linear(128, 128)
#
# mods = [
# self.conv1,
# self.conv2,
# self.fc1,
# self.fc2,
# self.fc3,
# ]
# self.layers = nn.ModuleList(mods)
# OLD FORWARD
# Max pooling over a (2, 2) window
# x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
# If the size is a square you can only specify a single number
# x = F.max_pool2d(F.relu(self.conv2(x)), 2)
# x = x.view(-1, self.num_flat_features(x))
# x = F.relu(self.fc1(x))
# x = F.relu(self.fc2(x))
# x = self.fc3(x)
self.main = nn.Sequential(
# nn.Conv2d(3, 8, 3, padding=1),
# nn.ReLU(),
nn.Flatten(),
nn.Linear(128*128*3, 32*32*4),
View((-1, 4, 32, 32)),
nn.Conv2d(4, 4, 3, padding=1),
nn.ReLU(),
nn.Upsample(scale_factor=2),
nn.Conv2d(4, 4, 3, padding=1),
nn.ReLU(),
nn.Upsample(scale_factor=2),
# nn.MaxPool2d(2),
# nn.Conv2d(4, 4, 3, padding=1),
# nn.ReLU(),
nn.Conv2d(4, 4, 3, padding=1),
nn.ReLU(),
nn.Upsample(scale_factor=2),
# nn.Conv2d(4, 4, 3, padding=1),
# nn.ReLU(),
nn.Conv2d(4, 3, 3, padding=1),
nn.ReLU(),
nn.Upsample(scale_factor=2),
nn.Conv2d(3, 3, 3, padding=1),
# nn.MaxPool2d(2),
# nn.Flatten(),
# nn.Linear(675, 512*512*3),
# nn.ReLU(),
# nn.Linear(512*512*3, 512*512*3),
# nn.ReLU(),
# nn.Linear(512*512*3, 512*512*3),
# nn.Sigmoid(),
)
# added custom
self.init = 'ortho'
self.param_count = 0
# optim called last
self.optim = optim.Adam(self.parameters())
# self.optim = optim.Adam(params=self.parameters(), lr=self.lr,
# betas=(self.B1, self.B2), weight_decay=0,
# eps=self.adam_eps)
def forward(self, x):
return self.main(x).view(-1, 3, 512, 512)
def init_weights(self):
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print('Param count for PreGAN initialized parameters: %d' % self.param_count)
| [
"numpy.product",
"torch.nn.ReLU",
"torch.from_numpy",
"torch.nn.init.orthogonal_",
"torch.sum",
"torch.conv2d",
"torch.mean",
"torch.nn.Flatten",
"torch.nn.init.xavier_uniform_",
"numpy.linspace",
"torchvision.transforms.ToTensor",
"random.randint",
"glob.glob",
"torch.abs",
"random.choi... | [((641, 666), 'torch.transpose', 'torch.transpose', (['Sx', '(1)', '(0)'], {}), '(Sx, 1, 0)\n', (656, 666), False, 'import torch\n'), ((894, 945), 'pytorch_pretrained_biggan.one_hot_from_names', 'one_hot_from_names', (["['vase']"], {'batch_size': 'batch_size'}), "(['vase'], batch_size=batch_size)\n", (912, 945), False, 'from pytorch_pretrained_biggan import one_hot_from_names\n'), ((1000, 1035), 'glob.glob', 'glob.glob', (['f"""{data_dir}/full_*.jpg"""'], {}), "(f'{data_dir}/full_*.jpg')\n", (1009, 1035), False, 'import glob\n'), ((1401, 1423), 'numpy.product', 'np.product', (['loss.shape'], {}), '(loss.shape)\n', (1411, 1423), True, 'import numpy as np\n'), ((1459, 1499), 'torch.einsum', 'torch.einsum', (['"""bcmn,n->bcm"""', 'loss', 'alpha'], {}), "('bcmn,n->bcm', loss, alpha)\n", (1471, 1499), False, 'import torch\n'), ((1535, 1573), 'torch.einsum', 'torch.einsum', (['"""bcm,m->bc"""', 'loss', 'alpha'], {}), "('bcm,m->bc', loss, alpha)\n", (1547, 1573), False, 'import torch\n'), ((1733, 1749), 'torch.abs', 'torch.abs', (['(x - y)'], {}), '(x - y)\n', (1742, 1749), False, 'import torch\n'), ((1762, 1784), 'numpy.product', 'np.product', (['loss.shape'], {}), '(loss.shape)\n', (1772, 1784), True, 'import numpy as np\n'), ((1820, 1860), 'torch.einsum', 'torch.einsum', (['"""bcmn,n->bcm"""', 'loss', 'alpha'], {}), "('bcmn,n->bcm', loss, alpha)\n", (1832, 1860), False, 'import torch\n'), ((1896, 1934), 'torch.einsum', 'torch.einsum', (['"""bcm,m->bc"""', 'loss', 'alpha'], {}), "('bcm,m->bc', loss, alpha)\n", (1908, 1934), False, 'import torch\n'), ((2103, 2138), 'torch.sum', 'torch.sum', (['img'], {'keepdim': '(True)', 'dim': '(1)'}), '(img, keepdim=True, dim=1)\n', (2112, 2138), False, 'import torch\n'), ((2176, 2209), 'torch.conv2d', 'torch.conv2d', (['gray', 'Sx'], {'padding': '(1)'}), '(gray, Sx, padding=1)\n', (2188, 2209), False, 'import torch\n'), ((2249, 2282), 'torch.conv2d', 'torch.conv2d', (['gray', 'Sy'], {'padding': '(1)'}), '(gray, Sy, padding=1)\n', (2261, 2282), False, 'import torch\n'), ((431, 453), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(256)'], {}), '(0, 1, 256)\n', (442, 453), True, 'import numpy as np\n'), ((455, 477), 'numpy.linspace', 'np.linspace', (['(1)', '(0)', '(256)'], {}), '(1, 0, 256)\n', (466, 477), True, 'import numpy as np\n'), ((488, 511), 'torch.from_numpy', 'torch.from_numpy', (['alpha'], {}), '(alpha)\n', (504, 511), False, 'import torch\n'), ((549, 603), 'torch.Tensor', 'torch.Tensor', (['[[[[1, 0, -1], [2, 0, -2], [1, 0, -1]]]]'], {}), '([[[[1, 0, -1], [2, 0, -2], [1, 0, -1]]]])\n', (561, 603), False, 'import torch\n'), ((1257, 1305), 'glob.glob', 'glob.glob', (['f"""{data_dir}/frag_{img_ids[0]}_*.jpg"""'], {}), "(f'{data_dir}/frag_{img_ids[0]}_*.jpg')\n", (1266, 1305), False, 'import glob\n'), ((1609, 1625), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (1619, 1625), False, 'import torch\n'), ((1970, 1986), 'torch.mean', 'torch.mean', (['loss'], {}), '(loss)\n', (1980, 1986), False, 'import torch\n'), ((2491, 2512), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2510, 2512), False, 'from torchvision import transforms, set_image_backend\n'), ((6476, 6526), 'torch.cat', 'torch.cat', (['([self.vase_vec] * noise.shape[0])'], {'dim': '(0)'}), '([self.vase_vec] * noise.shape[0], dim=0)\n', (6485, 6526), False, 'import torch\n'), ((3179, 3202), 'torch.cat', 'torch.cat', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3188, 3202), False, 'import torch\n'), ((3223, 3247), 'torch.cat', 'torch.cat', (['frags'], {'axis': '(0)'}), '(frags, axis=0)\n', (3232, 3247), False, 'import torch\n'), ((4393, 4412), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(16)', '(3)'], {}), '(3, 16, 3)\n', (4402, 4412), False, 'from torch import nn\n'), ((4426, 4435), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4433, 4435), False, 'from torch import nn\n'), ((4449, 4464), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (4461, 4464), False, 'from torch import nn\n'), ((4479, 4499), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', '(16)', '(3)'], {}), '(16, 16, 3)\n', (4488, 4499), False, 'from torch import nn\n'), ((4513, 4522), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4520, 4522), False, 'from torch import nn\n'), ((4536, 4551), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {}), '(2)\n', (4548, 4551), False, 'from torch import nn\n'), ((4565, 4577), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (4575, 4577), False, 'from torch import nn\n'), ((4619, 4644), 'torch.nn.Linear', 'nn.Linear', (['(120 * 120)', '(128)'], {}), '(120 * 120, 128)\n', (4628, 4644), False, 'from torch import nn\n'), ((4658, 4667), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4665, 4667), False, 'from torch import nn\n'), ((4681, 4700), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(128)'], {}), '(128, 128)\n', (4690, 4700), False, 'from torch import nn\n'), ((4714, 4723), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4721, 4723), False, 'from torch import nn\n'), ((8079, 8091), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (8089, 8091), False, 'from torch import nn\n'), ((8105, 8142), 'torch.nn.Linear', 'nn.Linear', (['(128 * 128 * 3)', '(32 * 32 * 4)'], {}), '(128 * 128 * 3, 32 * 32 * 4)\n', (8114, 8142), False, 'from torch import nn\n'), ((8183, 8212), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4)', '(4)', '(3)'], {'padding': '(1)'}), '(4, 4, 3, padding=1)\n', (8192, 8212), False, 'from torch import nn\n'), ((8226, 8235), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8233, 8235), False, 'from torch import nn\n'), ((8249, 8276), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)'}), '(scale_factor=2)\n', (8260, 8276), False, 'from torch import nn\n'), ((8290, 8319), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4)', '(4)', '(3)'], {'padding': '(1)'}), '(4, 4, 3, padding=1)\n', (8299, 8319), False, 'from torch import nn\n'), ((8333, 8342), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8340, 8342), False, 'from torch import nn\n'), ((8356, 8383), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)'}), '(scale_factor=2)\n', (8367, 8383), False, 'from torch import nn\n'), ((8499, 8528), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4)', '(4)', '(3)'], {'padding': '(1)'}), '(4, 4, 3, padding=1)\n', (8508, 8528), False, 'from torch import nn\n'), ((8542, 8551), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8549, 8551), False, 'from torch import nn\n'), ((8565, 8592), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)'}), '(scale_factor=2)\n', (8576, 8592), False, 'from torch import nn\n'), ((8676, 8705), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4)', '(3)', '(3)'], {'padding': '(1)'}), '(4, 3, 3, padding=1)\n', (8685, 8705), False, 'from torch import nn\n'), ((8719, 8728), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8726, 8728), False, 'from torch import nn\n'), ((8742, 8769), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)'}), '(scale_factor=2)\n', (8753, 8769), False, 'from torch import nn\n'), ((8783, 8812), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(3)', '(3)'], {'padding': '(1)'}), '(3, 3, 3, padding=1)\n', (8792, 8812), False, 'from torch import nn\n'), ((2808, 2835), 'random.choice', 'random.choice', (['self.img_ids'], {}), '(self.img_ids)\n', (2821, 2835), False, 'import random\n'), ((2893, 2928), 'random.randint', 'random.randint', (['(0)', '(self.n_frags - 1)'], {}), '(0, self.n_frags - 1)\n', (2907, 2928), False, 'import random\n'), ((5191, 5222), 'torch.nn.init.orthogonal_', 'init.orthogonal_', (['module.weight'], {}), '(module.weight)\n', (5207, 5222), False, 'from torch.nn import init\n'), ((9820, 9851), 'torch.nn.init.orthogonal_', 'init.orthogonal_', (['module.weight'], {}), '(module.weight)\n', (9836, 9851), False, 'from torch.nn import init\n'), ((5284, 5320), 'torch.nn.init.normal_', 'init.normal_', (['module.weight', '(0)', '(0.02)'], {}), '(module.weight, 0, 0.02)\n', (5296, 5320), False, 'from torch.nn import init\n'), ((9913, 9949), 'torch.nn.init.normal_', 'init.normal_', (['module.weight', '(0)', '(0.02)'], {}), '(module.weight, 0, 0.02)\n', (9925, 9949), False, 'from torch.nn import init\n'), ((5397, 5432), 'torch.nn.init.xavier_uniform_', 'init.xavier_uniform_', (['module.weight'], {}), '(module.weight)\n', (5417, 5432), False, 'from torch.nn import init\n'), ((10026, 10061), 'torch.nn.init.xavier_uniform_', 'init.xavier_uniform_', (['module.weight'], {}), '(module.weight)\n', (10046, 10061), False, 'from torch.nn import init\n')] |
from django.contrib import admin
from chat_users.models import ChatUser
# Register your models here.
admin.site.register(ChatUser) | [
"django.contrib.admin.site.register"
] | [((102, 131), 'django.contrib.admin.site.register', 'admin.site.register', (['ChatUser'], {}), '(ChatUser)\n', (121, 131), False, 'from django.contrib import admin\n')] |
import pytest
from newsreadercli import NewsScraper
from newsreadercli import Article
def test_web_scraper():
scraper = NewsScraper()
test_page = open("tests/test_data/vnexpress-test.html", 'r', encoding='utf-8')
scraper.page_scrape(None, test_page.read())
for article in scraper.articles_current_page:
assert isinstance(article) is Article
for category in scraper.categories:
assert type(category) is str
| [
"newsreadercli.NewsScraper"
] | [((131, 144), 'newsreadercli.NewsScraper', 'NewsScraper', ([], {}), '()\n', (142, 144), False, 'from newsreadercli import NewsScraper\n')] |
#!/usr/bin/python3
import aioros
from rospy_tutorials.srv import AddTwoInts
from rospy_tutorials.srv import AddTwoIntsResponse
async def setup(nh: aioros.NodeHandle):
await nh.create_service(
'add_two_ints',
AddTwoInts,
lambda request: AddTwoIntsResponse(request.a + request.b))
if __name__ == '__main__':
aioros.run_forever(setup, 'add_two_ints_server')
| [
"aioros.run_forever",
"rospy_tutorials.srv.AddTwoIntsResponse"
] | [((344, 392), 'aioros.run_forever', 'aioros.run_forever', (['setup', '"""add_two_ints_server"""'], {}), "(setup, 'add_two_ints_server')\n", (362, 392), False, 'import aioros\n'), ((268, 309), 'rospy_tutorials.srv.AddTwoIntsResponse', 'AddTwoIntsResponse', (['(request.a + request.b)'], {}), '(request.a + request.b)\n', (286, 309), False, 'from rospy_tutorials.srv import AddTwoIntsResponse\n')] |
#
# Licensed Materials - Property of IBM
#
# (c) Copyright IBM Corp. 2016
#
import unittest, sys
import ibm_db
import config
from testfunctions import IbmDbTestFunctions
class Wrapper(str):
def __del__(self):
print("Wrapper(" + self + ") being deleted")
class IbmDbTestCase(unittest.TestCase):
def test_312_CacheBoundParameters(self):
obj = IbmDbTestFunctions()
obj.assert_expect(self.run_test_312)
def run_test_312(self):
conn = ibm_db.connect(config.database, config.user, config.password)
ibm_db.autocommit(conn, ibm_db.SQL_AUTOCOMMIT_OFF)
query = "INSERT INTO department (deptno, deptname, mgrno, admrdept, location) VALUES (?, ?, ?, ?, ?)"
if conn:
stmt = ibm_db.prepare(conn, query)
params = ['STG', 'Systems & Technology', '123456', 'RSF', 'Fiji']
print("Binding parameters")
for i,p in enumerate(params, 1):
ibm_db.bind_param(stmt, i, Wrapper(p))
if ibm_db.execute(stmt):
print("Executing statement")
ibm_db.execute(stmt)
# force the cache to be unbound
for i,p in enumerate(params, 1):
ibm_db.bind_param(stmt, i, p)
ibm_db.rollback(conn)
else:
print("Connection failed.")
#__END__
#__LUW_EXPECTED__
#Binding parameters
#Executing statement
#Wrapper(STG) being deleted
#Wrapper(Systems & Technology) being deleted
#Wrapper(123456) being deleted
#Wrapper(RSF) being deleted
#Wrapper(Fiji) being deleted
#__ZOS_EXPECTED__
#Binding parameters
#Executing statement
#Wrapper(STG) being deleted
#Wrapper(Systems & Technology) being deleted
#Wrapper(123456) being deleted
#Wrapper(RSF) being deleted
#Wrapper(Fiji) being deleted
#__SYSTEMI_EXPECTED__
#Binding parameters
#Executing statement
#Wrapper(STG) being deleted
#Wrapper(Systems & Technology) being deleted
#Wrapper(123456) being deleted
#Wrapper(RSF) being deleted
#Wrapper(Fiji) being deleted
#__IDS_EXPECTED__
#Binding parameters
#Executing statement
#Wrapper(STG) being deleted
#Wrapper(Systems & Technology) being deleted
#Wrapper(123456) being deleted
#Wrapper(RSF) being deleted
#Wrapper(Fiji) being deleted
| [
"ibm_db.connect",
"ibm_db.autocommit",
"ibm_db.prepare",
"ibm_db.bind_param",
"ibm_db.execute",
"testfunctions.IbmDbTestFunctions",
"ibm_db.rollback"
] | [((360, 380), 'testfunctions.IbmDbTestFunctions', 'IbmDbTestFunctions', ([], {}), '()\n', (378, 380), False, 'from testfunctions import IbmDbTestFunctions\n'), ((462, 523), 'ibm_db.connect', 'ibm_db.connect', (['config.database', 'config.user', 'config.password'], {}), '(config.database, config.user, config.password)\n', (476, 523), False, 'import ibm_db\n'), ((529, 579), 'ibm_db.autocommit', 'ibm_db.autocommit', (['conn', 'ibm_db.SQL_AUTOCOMMIT_OFF'], {}), '(conn, ibm_db.SQL_AUTOCOMMIT_OFF)\n', (546, 579), False, 'import ibm_db\n'), ((722, 749), 'ibm_db.prepare', 'ibm_db.prepare', (['conn', 'query'], {}), '(conn, query)\n', (736, 749), False, 'import ibm_db\n'), ((959, 979), 'ibm_db.execute', 'ibm_db.execute', (['stmt'], {}), '(stmt)\n', (973, 979), False, 'import ibm_db\n'), ((1026, 1046), 'ibm_db.execute', 'ibm_db.execute', (['stmt'], {}), '(stmt)\n', (1040, 1046), False, 'import ibm_db\n'), ((1186, 1207), 'ibm_db.rollback', 'ibm_db.rollback', (['conn'], {}), '(conn)\n', (1201, 1207), False, 'import ibm_db\n'), ((1139, 1168), 'ibm_db.bind_param', 'ibm_db.bind_param', (['stmt', 'i', 'p'], {}), '(stmt, i, p)\n', (1156, 1168), False, 'import ibm_db\n')] |
#!/usr/bin/python3
import math
import numpy as np
import pdb
import time
import torch
from mseg_semantic.domain_generalization.ccsa_utils import (
contrastive_loss,
paired_euclidean_distance,
downsample_label_map,
sample_pair_indices,
find_matching_pairs,
remove_pairs_from_same_domain,
get_merged_pair_embeddings,
pytorch_random_choice,
shuffle_pytorch_tensor,
get_pair_embedding,
count_per_domain_statistics,
sample_px_locations_uniformly,
sample_crossdomain_pos_neg_pairs,
form_pair_info_tensor
)
"""
For sake of unit tests, pretend we have the following categories:
Let 0 = Sky
1 = Mountain
2 = Road
3 = Person
4 = Vegetation
"""
def test_contrastive_loss1():
"""
Should be no loss here (zero from pull term, and zero from push term)
"""
# which pairs share the same semantic class label
y_c = torch.tensor([ 1., 0., 0., 0., 1.], dtype=torch.float32)
# distances between pairs
pred_dists = torch.tensor([0, 1.1, 1.1, 1.1, 0], dtype=torch.float32)
loss = contrastive_loss(y_c, pred_dists)
gt_loss = torch.tensor([0])
assert torch.allclose(loss, gt_loss)
def test_contrastive_loss2():
"""
There should be more loss here (coming only from push term)
"""
# which pairs share the same semantic class label
y_c = torch.tensor([ 1., 0., 0., 0., 1.], dtype=torch.float32)
# distances between pairs
pred_dists = torch.tensor([0, 0.2, 0.3, 0.1, 0], dtype=torch.float32)
loss = contrastive_loss(y_c, pred_dists)
gt_loss = torch.tensor([0.3880])
assert torch.allclose(loss, gt_loss, atol=1e-3)
def test_contrastive_loss3():
"""
There should be the most loss here (some from pull term, and some from push term also)
"""
# which pairs share the same semantic class label
y_c = torch.tensor([ 1., 0., 0., 0., 1.], dtype=torch.float32)
# distances between pairs
pred_dists = torch.tensor([2.0, 0.2, 0.3, 0.1, 4.0], dtype=torch.float32)
loss = contrastive_loss(y_c, pred_dists)
gt_loss = torch.tensor([4.3880])
assert torch.allclose(loss, gt_loss, atol=1e-3)
def test_paired_euclidean_distance():
""" """
X = torch.tensor(
[
[3,0],
[4,0],
[1,1]
], dtype=torch.float32)
Y = torch.tensor(
[
[1,1],
[0,3],
[0,4]
], dtype=torch.float32)
dists = paired_euclidean_distance(X, Y)
gt_dists = torch.tensor(
[
[ math.sqrt(2*2 + 1) ], # (3,0) vs. (1,1)
[ math.sqrt(3*3 + 4*4) ], # (4,0) vs. (0,3)
[ math.sqrt(3*3 + 1) ] # (1,1) vs. (0,4)
])
torch.allclose(gt_dists.squeeze(), dists, atol=1e-3)
def test_downsample_label_map():
"""
Downsample two label maps "Y"
"""
labelmap_1 = torch.tensor(
[
[0,0,0,0,0,0,0,0],
[4,4,0,0,0,0,4,4],
[4,3,2,2,2,2,3,4],
[4,2,2,2,2,2,2,4]
])
labelmap_2 = torch.tensor(
[
[1,1,1,1,0,0,0,0],
[1,1,1,1,2,2,2,4],
[4,4,4,4,2,2,2,4],
[4,4,4,3,2,2,2,4]
])
Y = torch.stack([labelmap_1, labelmap_2])
Y = Y.type(torch.float32)
assert Y.shape == (2,4,8)
dY = downsample_label_map(Y, d=2)
assert dY.shape == (2,2,4)
gt_dY = torch.tensor(
[
[[0., 0., 0., 0.],
[4., 2., 2., 3.]],
[[1., 1., 0., 0.],
[4., 4., 2., 2.]]
])
dY = downsample_label_map(Y, d=4)
gt_dY = torch.tensor(
[
[[0., 0.]],
[[1., 0.]]
])
assert dY.shape == (2,1,2)
def test_sample_pair_indices1():
"""
Given labels for 3 images, sample corresponding pixels that
are known positives and that are known negatives.
Suppose images 0 and 2 come from Domain-0, and image 1 comes
from Domain-1.
"""
labelmap_0 = torch.tensor(
[
[0,0,0,0,0,0,0,0],
[4,4,0,0,0,0,4,4],
[4,3,2,2,2,2,3,4],
[4,2,2,2,2,2,2,4]
], dtype=torch.float32)
labelmap_1 = torch.tensor(
[
[1,1,1,1,0,0,0,0],
[1,1,1,1,2,2,2,4],
[4,4,4,4,2,2,2,4],
[4,4,4,3,2,2,2,4]
], dtype=torch.float32)
labelmap_2 = torch.tensor(
[
[4,4,4,4,4,4,4,4],
[4,4,4,4,4,4,4,4],
[4,4,4,4,4,4,4,4],
[4,4,4,4,4,4,4,4]
], dtype=torch.float32)
Y = torch.stack([labelmap_0, labelmap_1, labelmap_2])
assert Y.shape == (3,4,8)
batch_domain_indices = torch.tensor([0,1,0], dtype=torch.int32)
pos_pair_info, neg_pair_info = sample_pair_indices(Y, batch_domain_indices, num_pos_pairs=30000, neg_to_pos_ratio=3, downsample_factor=1)
for (bi, hi, wi, bj, hj, wj) in pos_pair_info:
assert Y[bi,hi,wi] == Y[bj,hj,wj] # is same class
assert batch_domain_indices[bi] != batch_domain_indices[bj] # must be cross-domain
for (bi, hi, wi, bj, hj, wj) in neg_pair_info:
assert Y[bi,hi,wi] != Y[bj,hj,wj] # is different class
assert batch_domain_indices[bi] != batch_domain_indices[bj] # must be cross-domain
def test_sample_pair_indices2():
"""
Given labels for 3 images, sample corresponding pixels that
are known positives and that are known negatives.
Suppose images 0 and 2 come from Domain-0, and image 1 comes
from Domain-1.
"""
labelmap_0 = torch.tensor(
[
[0,0,0,0,1,1,1,1],
[0,0,0,0,1,1,1,1],
[2,2,2,2,4,4,4,4],
[2,2,2,2,4,4,4,4]
], dtype=torch.float32)
labelmap_1 = torch.tensor(
[
[1,1,1,1,0,0,0,0],
[1,1,1,1,0,0,0,0],
[4,4,4,4,2,2,2,2],
[4,4,4,4,2,2,2,2]
], dtype=torch.float32)
labelmap_2 = torch.tensor(
[
[4,4,4,4,4,4,4,4],
[4,4,4,4,4,4,4,4],
[4,4,4,4,4,4,4,4],
[4,4,4,4,4,4,4,4]
], dtype=torch.float32)
Y = torch.stack([labelmap_0, labelmap_1, labelmap_2])
assert Y.shape == (3,4,8)
batch_domain_indices = torch.tensor([0,1,0], dtype=torch.int32)
pos_pair_info, neg_pair_info = sample_pair_indices(Y, batch_domain_indices, num_pos_pairs=3000, neg_to_pos_ratio=3, downsample_factor=2)
for (bi, hi, wi, bj, hj, wj) in pos_pair_info:
assert Y[:,::2,::2][bi,hi,wi] == Y[:,::2,::2][bj,hj,wj] # is same class
assert batch_domain_indices[bi] != batch_domain_indices[bj] # must be cross-domain
for (bi, hi, wi, bj, hj, wj) in neg_pair_info:
assert Y[:,::2,::2][bi,hi,wi] != Y[:,::2,::2][bj,hj,wj] # is different class
assert batch_domain_indices[bi] != batch_domain_indices[bj] # must be cross-domain
def test_remove_pairs_from_same_domain():
"""
Consider a minibatch of size 5 (examples). Suppose we have sampled 4 pairs
of pixel locations.
In training, we want only pairs from different domains. We
enforce that their feature embeddings are similar.
We could have 1 million sampled pairs from a minibatch of size 5.
(Number of elements in batch (batch_domain_indices) need not
agree with number of sampled pairs!)
"""
# show which minibatch examples belong to which domain
batch_domain_indices = torch.tensor([0,1,2,1,0])
# sampled pairs (a,b) are enumerated here.
a_info_ = torch.tensor(
[
[0, 1, 2], # Belongs to domain 0 (will be removed)
[0, 1, 2], # Belongs to domain 0
[2, 1, 2], # Belongs to domain 2
[3, 1, 2] # Belongs to domain 1 (will be removed)
])
b_info_ = torch.tensor(
[
[4, 3, 4], # Belongs to domain 0 (will be removed)
[1, 3, 4], # Belongs to domain 1
[3, 3, 4], # Belongs to domain 1
[1, 3, 4] # Belongs to domain 1 (will be removed)
])
a_pair_info, b_pair_info = remove_pairs_from_same_domain(batch_domain_indices, a_info_, b_info_)
gt_a_pair_info = torch.tensor(
[
[0, 1, 2],
[2, 1, 2]
])
assert torch.allclose(gt_a_pair_info, a_pair_info)
gt_b_pair_info = torch.tensor(
[
[1, 3, 4],
[3, 3, 4]
])
assert torch.allclose(gt_b_pair_info, b_pair_info)
def test_form_pair_info_tensor():
"""
Ensure hstacking of 3 length-N 1d arrays into a (N,3) array
is successful.
Given batch_dim_idxs (representing indices of examples in a minibatch),
and px_1d_y (representing row indices) and px_1d_x
(representing column indices), stack them along axis-0 (row dimension).
"""
batch_dim_idxs = torch.tensor([5,6,7,8,9], dtype=torch.int32)
px_1d_y = torch.tensor([4,3,2,1,0], dtype=torch.int32)
px_1d_x = torch.tensor([0,2,4,6,8], dtype=torch.int32)
pair_info = form_pair_info_tensor(batch_dim_idxs, px_1d_y, px_1d_x)
gt_pair_info = torch.tensor(
[
[5,4,0],
[6,3,2],
[7,2,4],
[8,1,6],
[9,0,8]
], dtype=torch.int32)
assert torch.allclose(pair_info, gt_pair_info)
def test_find_matching_pairs():
"""
Given a batch of ground truth label maps, and sampled pixel
pair locations (pairs are across label maps), identify which
pairs are matching vs. non-matching and return corresponding metadata
(basically, partition them).
Get back pos_pair_info -- Pytorch tensor containing info about each positive pair (a,b). Contains
(a batch_idx, a row, a col, b batch_idx, b row, b col)
Also get back neg_pair_info -- same as above, but for negative pairs.
"""
labelmap_0 = torch.tensor(
[
[0,0,0,0,0,0,0,0],
[4,4,0,0,0,0,4,4],
[4,3,2,2,2,2,3,4],
[4,2,2,2,2,2,2,4]
])
labelmap_1 = torch.tensor(
[
[1,1,1,1,0,0,0,0],
[1,1,1,1,2,2,2,4],
[4,4,4,4,2,2,2,4],
[4,4,4,3,2,2,2,4]
])
labelmap_2 = torch.tensor(
[
[4,4,4,4,4,4,4,4],
[4,4,4,4,4,4,4,4],
[4,4,4,4,4,4,4,4],
[4,4,4,4,4,4,4,4]
])
Y = torch.stack([labelmap_0, labelmap_1, labelmap_2])
assert Y.shape == (3,4,8)
a_pair_info = torch.tensor(
[
[0,1,1], # pos
[2,1,4], # neg
[1,1,7], # pos
[0,2,2] # neg
])
b_pair_info = torch.tensor(
[
[2,3,7], # pos
[0,1,4], # neg
[2,3,0], # pos
[1,3,3] # neg
])
pos_pair_info, neg_pair_info = find_matching_pairs(Y, a_pair_info, b_pair_info)
gt_pos_pair_info = torch.tensor(
[
[0, 1, 1, 2, 3, 7], # pos pairs
[1, 1, 7, 2, 3, 0]
])
assert torch.allclose(pos_pair_info, gt_pos_pair_info)
gt_neg_pair_info = torch.tensor(
[
[2, 1, 4, 0, 1, 4], # neg pairs
[0, 2, 2, 1, 3, 3]
])
assert torch.allclose(neg_pair_info, gt_neg_pair_info)
def test_sample_crossdomain_pos_neg_pairs():
""" """
labelmap_0 = torch.tensor(
[
[0,0,0,0,0,0,0,0],
[4,4,0,0,0,0,4,4],
[4,3,2,2,2,2,3,4],
[4,2,2,2,2,2,2,4]
])
labelmap_1 = torch.tensor(
[
[1,1,1,1,0,0,0,0],
[1,1,1,1,2,2,2,4],
[4,4,4,4,2,2,2,4],
[4,4,4,3,2,2,2,4]
])
labelmap_2 = torch.tensor(
[
[4,4,4,4,4,4,4,4],
[4,4,4,4,4,4,4,4],
[4,4,4,4,4,4,4,4],
[4,4,4,4,4,4,4,4]
])
Y = torch.stack([labelmap_0, labelmap_1, labelmap_2])
assert Y.shape == (3,4,8)
# here, domain 1 would be sampled more than others
batch_domain_indices = torch.tensor([0,1,0], dtype=torch.int64)
_, unique_domain_idxs = count_per_domain_statistics(batch_domain_indices)
b, h, w = Y.shape
INITIAL_SAMPLE_NUM = int(1e4)
pos_pair_info, neg_pair_info = sample_crossdomain_pos_neg_pairs(Y, batch_domain_indices, unique_domain_idxs, w, h, INITIAL_SAMPLE_NUM)
for (bi, hi, wi, bj, hj, wj) in pos_pair_info:
assert Y[bi,hi,wi] == Y[bj,hj,wj] # is same class
assert batch_domain_indices[bi] != batch_domain_indices[bj] # must be cross-domain
for (bi, hi, wi, bj, hj, wj) in neg_pair_info:
assert Y[bi,hi,wi] != Y[bj,hj,wj] # is different class
assert batch_domain_indices[bi] != batch_domain_indices[bj] # must be cross-domain
def test_count_per_domain_statistics():
"""
"""
domain_idxs = torch.tensor([0,1,0,1,4])
examples_per_domain, unique_domain_idxs = count_per_domain_statistics(domain_idxs)
gt_examples_per_domain = np.array([2., 2., 0., 0., 1.], dtype=np.int32)
gt_unique_domain_idxs = np.array([0, 1, 4])
assert np.allclose(examples_per_domain, gt_examples_per_domain)
assert np.allclose(unique_domain_idxs, gt_unique_domain_idxs)
assert examples_per_domain.dtype == np.int64
def test_sample_px_locations_uniformly():
"""
Let 0 = Sky
1 = Mountain
2 = Road
3 = Person
4 = Vegetation
In expectation, minibatch examples from less common domains should be
sampled more often, if domains sampled uniformly.
"""
labelmap_1 = torch.tensor(
[
[0,0,0,0,0,0,0,0],
[4,4,0,0,0,0,4,4],
[4,3,2,2,2,2,3,4],
[4,2,2,2,2,2,2,4]
])
labelmap_2 = torch.tensor(
[
[1,1,1,1,0,0,0,0],
[1,1,1,1,2,2,2,4],
[4,4,4,4,2,2,2,4],
[4,4,4,3,2,2,2,4]
])
labelmap_3 = torch.tensor(
[
[4,4,4,4,4,4,4,4],
[4,4,4,4,4,4,4,4],
[4,4,4,4,4,4,4,4],
[4,4,4,4,4,4,4,4]
])
Y = torch.stack([labelmap_1, labelmap_2, labelmap_3])
assert Y.shape == (3,4,8)
# here, domain 1 would be sampled more than others (sampled twice as often)
domain_indices = torch.tensor([0,1,0], dtype=torch.int64)
# unique domain indices would be [0,1]
_, unique_domain_idxs = count_per_domain_statistics(domain_indices)
b, h, w = Y.shape
INITIAL_SAMPLE_NUM = int(1e6)
b_idxs, w_idxs, h_idxs = sample_px_locations_uniformly(
domain_indices,
unique_domain_idxs,
w,
h,
INITIAL_SAMPLE_NUM
)
# Verify expected value vs. empirical. Allow for some margin of error.
# Less common domain (minibatch example 1) should be sampled roughly
# 2x as often, since it appears less often.
assert 245000 < (b_idxs == 0).sum() and (b_idxs == 0).sum() < 255000
assert 495000 < (b_idxs == 1).sum() and (b_idxs == 1).sum() < 505000
assert 245000 < (b_idxs == 2).sum() and (b_idxs == 2).sum() < 255000
# Sample minibatch indices should lie in [0,b)
assert (b_idxs >= 0).sum() == INITIAL_SAMPLE_NUM
assert (b_idxs < b).sum() == INITIAL_SAMPLE_NUM
# Sampled pixel rows should lie in [0,h)
assert (h_idxs >= 0).sum() == INITIAL_SAMPLE_NUM
assert (h_idxs < h).sum() == INITIAL_SAMPLE_NUM
# Sampled pixel columns should lie in [0,w)
assert (w_idxs >= 0).sum() == INITIAL_SAMPLE_NUM
assert (w_idxs < w).sum() == INITIAL_SAMPLE_NUM
def test_shuffle_pytorch_tensor():
"""
Given all possible permutations, ensure that the shuffling that was
executed corresponds to any valid permutation.
"""
t = torch.tensor(
[
[1,2],
[3,4],
[5,6]
])
shuffled = shuffle_pytorch_tensor(t)
gt_permutations = torch.tensor(
[
[[1,2],
[3,4],
[5,6]],
[[1,2],
[5,6],
[3,4]],
[[3,4],
[5,6],
[1,2]],
[[5,6],
[3,4],
[1,2]],
[[3,4],
[1,2],
[5,6]],
[[5,6],
[1,2],
[3,4]]
])
assert any([torch.allclose(gt_permutations[i], shuffled) for i in range(6)])
def test_pytorch_random_choice():
"""
Ensure that sampling with replacement returns values that are found
in original array, and of correct shape.
"""
x = np.array([0,2,4,5,6])
vals = pytorch_random_choice(x, num_samples=10)
for val in list(torch.unique(vals).cpu().numpy()):
assert val in list(x)
assert vals.shape == (10,)
x = np.array([0,2,4,5,6])
vals = pytorch_random_choice(x, num_samples=3)
for val in list(torch.unique(vals).cpu().numpy()):
assert val in list(x)
assert vals.shape == (3,)
x = np.array([0,2])
vals = pytorch_random_choice(x, num_samples=10)
for val in list(torch.unique(vals).cpu().numpy()):
assert val in list(x)
assert vals.shape == (10,)
def test_get_merged_pair_embeddings():
"""
"""
pos_pair_info = torch.tensor(
[
[0,1,1,1,2,2],
[1,3,4,2,0,0]
])
neg_pair_info = torch.tensor(
[
[0,1,1,1,2,2],
[1,3,4,2,0,0]
])
resnet_embedding = torch.arange(2*3*4*5).reshape(3,2,4,5)
y_c, a_embedding, b_embedding = get_merged_pair_embeddings(
pos_pair_info,
neg_pair_info,
resnet_embedding
)
gt_y_c = torch.tensor([1,1,0,0], dtype=torch.float32)
gt_a_embedding = torch.tensor(
[
[ 6, 26],
[59, 79],
[ 6, 26],
[59, 79]
])
gt_b_embedding = torch.tensor(
[
[ 52, 72],
[ 80, 100],
[ 52, 72],
[ 80, 100]
])
assert torch.allclose(a_embedding, gt_a_embedding)
assert torch.allclose(b_embedding, gt_b_embedding)
assert torch.allclose(y_c, gt_y_c)
def test_get_pair_embedding():
"""
"""
pair_info = torch.tensor(
[
# (bi,hi,wi,bj,hj,wj)
[0, 1, 1, 1, 2, 2],
[1, 3, 4, 2, 0, 0]
])
embedding = torch.arange(2*3*4*5).reshape(3,2,4,5)
a_embedding, b_embedding = get_pair_embedding(pair_info, embedding)
gt_a_embedding = torch.tensor(
[
[ 6, 26],
[59, 79]
])
gt_b_embedding = torch.tensor(
[
[ 52, 72],
[ 80, 100]
])
assert torch.allclose(a_embedding, gt_a_embedding)
assert torch.allclose(b_embedding, gt_b_embedding)
def time_sample_pair_indices():
"""
Count how long it takes to sample pairs.
Suppose we have a batch size of 128 images, and 194 possible
classes. Suppose the 128 minibatch examples come from 7 different
domains.
Takes around 0.5 sec on Macbook Pro to sample pair indices each time.
"""
for _ in range(10):
batch_domain_idxs = torch.randint(low=0, high=7, size=(128,))
Y = torch.randint(low=0, high=194, size=(128,201,201))
start = time.time()
out = sample_pair_indices(
Y.type(torch.float32),
batch_domain_idxs,
num_pos_pairs=int(1e3),
neg_to_pos_ratio=3,
downsample_factor=8
)
end = time.time()
duration = end - start
print(f'Duration was {duration}')
if __name__ == '__main__':
""" """
test_contrastive_loss1()
test_contrastive_loss2()
test_contrastive_loss3()
test_paired_euclidean_distance()
test_downsample_label_map()
test_shuffle_pytorch_tensor()
test_pytorch_random_choice()
test_count_per_domain_statistics()
test_sample_px_locations_uniformly()
test_form_pair_info_tensor()
test_remove_pairs_from_same_domain()
test_find_matching_pairs()
test_sample_crossdomain_pos_neg_pairs()
test_sample_pair_indices1()
test_sample_pair_indices2()
test_get_pair_embedding()
test_get_merged_pair_embeddings()
time_sample_pair_indices()
| [
"mseg_semantic.domain_generalization.ccsa_utils.sample_pair_indices",
"math.sqrt",
"mseg_semantic.domain_generalization.ccsa_utils.pytorch_random_choice",
"numpy.array",
"torch.arange",
"mseg_semantic.domain_generalization.ccsa_utils.paired_euclidean_distance",
"torch.unique",
"mseg_semantic.domain_ge... | [((854, 914), 'torch.tensor', 'torch.tensor', (['[1.0, 0.0, 0.0, 0.0, 1.0]'], {'dtype': 'torch.float32'}), '([1.0, 0.0, 0.0, 0.0, 1.0], dtype=torch.float32)\n', (866, 914), False, 'import torch\n'), ((959, 1015), 'torch.tensor', 'torch.tensor', (['[0, 1.1, 1.1, 1.1, 0]'], {'dtype': 'torch.float32'}), '([0, 1.1, 1.1, 1.1, 0], dtype=torch.float32)\n', (971, 1015), False, 'import torch\n'), ((1028, 1061), 'mseg_semantic.domain_generalization.ccsa_utils.contrastive_loss', 'contrastive_loss', (['y_c', 'pred_dists'], {}), '(y_c, pred_dists)\n', (1044, 1061), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((1076, 1093), 'torch.tensor', 'torch.tensor', (['[0]'], {}), '([0])\n', (1088, 1093), False, 'import torch\n'), ((1106, 1135), 'torch.allclose', 'torch.allclose', (['loss', 'gt_loss'], {}), '(loss, gt_loss)\n', (1120, 1135), False, 'import torch\n'), ((1313, 1373), 'torch.tensor', 'torch.tensor', (['[1.0, 0.0, 0.0, 0.0, 1.0]'], {'dtype': 'torch.float32'}), '([1.0, 0.0, 0.0, 0.0, 1.0], dtype=torch.float32)\n', (1325, 1373), False, 'import torch\n'), ((1418, 1474), 'torch.tensor', 'torch.tensor', (['[0, 0.2, 0.3, 0.1, 0]'], {'dtype': 'torch.float32'}), '([0, 0.2, 0.3, 0.1, 0], dtype=torch.float32)\n', (1430, 1474), False, 'import torch\n'), ((1487, 1520), 'mseg_semantic.domain_generalization.ccsa_utils.contrastive_loss', 'contrastive_loss', (['y_c', 'pred_dists'], {}), '(y_c, pred_dists)\n', (1503, 1520), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((1535, 1556), 'torch.tensor', 'torch.tensor', (['[0.388]'], {}), '([0.388])\n', (1547, 1556), False, 'import torch\n'), ((1570, 1611), 'torch.allclose', 'torch.allclose', (['loss', 'gt_loss'], {'atol': '(0.001)'}), '(loss, gt_loss, atol=0.001)\n', (1584, 1611), False, 'import torch\n'), ((1814, 1874), 'torch.tensor', 'torch.tensor', (['[1.0, 0.0, 0.0, 0.0, 1.0]'], {'dtype': 'torch.float32'}), '([1.0, 0.0, 0.0, 0.0, 1.0], dtype=torch.float32)\n', (1826, 1874), False, 'import torch\n'), ((1919, 1979), 'torch.tensor', 'torch.tensor', (['[2.0, 0.2, 0.3, 0.1, 4.0]'], {'dtype': 'torch.float32'}), '([2.0, 0.2, 0.3, 0.1, 4.0], dtype=torch.float32)\n', (1931, 1979), False, 'import torch\n'), ((1992, 2025), 'mseg_semantic.domain_generalization.ccsa_utils.contrastive_loss', 'contrastive_loss', (['y_c', 'pred_dists'], {}), '(y_c, pred_dists)\n', (2008, 2025), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((2040, 2061), 'torch.tensor', 'torch.tensor', (['[4.388]'], {}), '([4.388])\n', (2052, 2061), False, 'import torch\n'), ((2075, 2116), 'torch.allclose', 'torch.allclose', (['loss', 'gt_loss'], {'atol': '(0.001)'}), '(loss, gt_loss, atol=0.001)\n', (2089, 2116), False, 'import torch\n'), ((2176, 2235), 'torch.tensor', 'torch.tensor', (['[[3, 0], [4, 0], [1, 1]]'], {'dtype': 'torch.float32'}), '([[3, 0], [4, 0], [1, 1]], dtype=torch.float32)\n', (2188, 2235), False, 'import torch\n'), ((2296, 2355), 'torch.tensor', 'torch.tensor', (['[[1, 1], [0, 3], [0, 4]]'], {'dtype': 'torch.float32'}), '([[1, 1], [0, 3], [0, 4]], dtype=torch.float32)\n', (2308, 2355), False, 'import torch\n'), ((2420, 2451), 'mseg_semantic.domain_generalization.ccsa_utils.paired_euclidean_distance', 'paired_euclidean_distance', (['X', 'Y'], {}), '(X, Y)\n', (2445, 2451), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((2826, 2948), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0, 4, 4], [4, 3, 2, 2, 2, 2, 3, \n 4], [4, 2, 2, 2, 2, 2, 2, 4]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0, 4, 4], [4, 3, 2,\n 2, 2, 2, 3, 4], [4, 2, 2, 2, 2, 2, 2, 4]])\n', (2838, 2948), False, 'import torch\n'), ((3002, 3124), 'torch.tensor', 'torch.tensor', (['[[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 2, 2, 2, 4], [4, 4, 4, 4, 2, 2, 2, \n 4], [4, 4, 4, 3, 2, 2, 2, 4]]'], {}), '([[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 2, 2, 2, 4], [4, 4, 4,\n 4, 2, 2, 2, 4], [4, 4, 4, 3, 2, 2, 2, 4]])\n', (3014, 3124), False, 'import torch\n'), ((3168, 3205), 'torch.stack', 'torch.stack', (['[labelmap_1, labelmap_2]'], {}), '([labelmap_1, labelmap_2])\n', (3179, 3205), False, 'import torch\n'), ((3276, 3304), 'mseg_semantic.domain_generalization.ccsa_utils.downsample_label_map', 'downsample_label_map', (['Y'], {'d': '(2)'}), '(Y, d=2)\n', (3296, 3304), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((3348, 3458), 'torch.tensor', 'torch.tensor', (['[[[0.0, 0.0, 0.0, 0.0], [4.0, 2.0, 2.0, 3.0]], [[1.0, 1.0, 0.0, 0.0], [4.0,\n 4.0, 2.0, 2.0]]]'], {}), '([[[0.0, 0.0, 0.0, 0.0], [4.0, 2.0, 2.0, 3.0]], [[1.0, 1.0, 0.0,\n 0.0], [4.0, 4.0, 2.0, 2.0]]])\n', (3360, 3458), False, 'import torch\n'), ((3517, 3545), 'mseg_semantic.domain_generalization.ccsa_utils.downsample_label_map', 'downsample_label_map', (['Y'], {'d': '(4)'}), '(Y, d=4)\n', (3537, 3545), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((3558, 3600), 'torch.tensor', 'torch.tensor', (['[[[0.0, 0.0]], [[1.0, 0.0]]]'], {}), '([[[0.0, 0.0]], [[1.0, 0.0]]])\n', (3570, 3600), False, 'import torch\n'), ((3942, 4085), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0, 4, 4], [4, 3, 2, 2, 2, 2, 3, \n 4], [4, 2, 2, 2, 2, 2, 2, 4]]'], {'dtype': 'torch.float32'}), '([[0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0, 4, 4], [4, 3, 2,\n 2, 2, 2, 3, 4], [4, 2, 2, 2, 2, 2, 2, 4]], dtype=torch.float32)\n', (3954, 4085), False, 'import torch\n'), ((4139, 4282), 'torch.tensor', 'torch.tensor', (['[[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 2, 2, 2, 4], [4, 4, 4, 4, 2, 2, 2, \n 4], [4, 4, 4, 3, 2, 2, 2, 4]]'], {'dtype': 'torch.float32'}), '([[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 2, 2, 2, 4], [4, 4, 4,\n 4, 2, 2, 2, 4], [4, 4, 4, 3, 2, 2, 2, 4]], dtype=torch.float32)\n', (4151, 4282), False, 'import torch\n'), ((4335, 4478), 'torch.tensor', 'torch.tensor', (['[[4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, \n 4], [4, 4, 4, 4, 4, 4, 4, 4]]'], {'dtype': 'torch.float32'}), '([[4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4,\n 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4]], dtype=torch.float32)\n', (4347, 4478), False, 'import torch\n'), ((4523, 4572), 'torch.stack', 'torch.stack', (['[labelmap_0, labelmap_1, labelmap_2]'], {}), '([labelmap_0, labelmap_1, labelmap_2])\n', (4534, 4572), False, 'import torch\n'), ((4631, 4673), 'torch.tensor', 'torch.tensor', (['[0, 1, 0]'], {'dtype': 'torch.int32'}), '([0, 1, 0], dtype=torch.int32)\n', (4643, 4673), False, 'import torch\n'), ((4708, 4818), 'mseg_semantic.domain_generalization.ccsa_utils.sample_pair_indices', 'sample_pair_indices', (['Y', 'batch_domain_indices'], {'num_pos_pairs': '(30000)', 'neg_to_pos_ratio': '(3)', 'downsample_factor': '(1)'}), '(Y, batch_domain_indices, num_pos_pairs=30000,\n neg_to_pos_ratio=3, downsample_factor=1)\n', (4727, 4818), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((5492, 5635), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1], [2, 2, 2, 2, 4, 4, 4, \n 4], [2, 2, 2, 2, 4, 4, 4, 4]]'], {'dtype': 'torch.float32'}), '([[0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1], [2, 2, 2,\n 2, 4, 4, 4, 4], [2, 2, 2, 2, 4, 4, 4, 4]], dtype=torch.float32)\n', (5504, 5635), False, 'import torch\n'), ((5689, 5832), 'torch.tensor', 'torch.tensor', (['[[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0], [4, 4, 4, 4, 2, 2, 2, \n 2], [4, 4, 4, 4, 2, 2, 2, 2]]'], {'dtype': 'torch.float32'}), '([[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0, 0, 0], [4, 4, 4,\n 4, 2, 2, 2, 2], [4, 4, 4, 4, 2, 2, 2, 2]], dtype=torch.float32)\n', (5701, 5832), False, 'import torch\n'), ((5885, 6028), 'torch.tensor', 'torch.tensor', (['[[4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, \n 4], [4, 4, 4, 4, 4, 4, 4, 4]]'], {'dtype': 'torch.float32'}), '([[4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4,\n 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4]], dtype=torch.float32)\n', (5897, 6028), False, 'import torch\n'), ((6073, 6122), 'torch.stack', 'torch.stack', (['[labelmap_0, labelmap_1, labelmap_2]'], {}), '([labelmap_0, labelmap_1, labelmap_2])\n', (6084, 6122), False, 'import torch\n'), ((6181, 6223), 'torch.tensor', 'torch.tensor', (['[0, 1, 0]'], {'dtype': 'torch.int32'}), '([0, 1, 0], dtype=torch.int32)\n', (6193, 6223), False, 'import torch\n'), ((6258, 6367), 'mseg_semantic.domain_generalization.ccsa_utils.sample_pair_indices', 'sample_pair_indices', (['Y', 'batch_domain_indices'], {'num_pos_pairs': '(3000)', 'neg_to_pos_ratio': '(3)', 'downsample_factor': '(2)'}), '(Y, batch_domain_indices, num_pos_pairs=3000,\n neg_to_pos_ratio=3, downsample_factor=2)\n', (6277, 6367), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((7360, 7389), 'torch.tensor', 'torch.tensor', (['[0, 1, 2, 1, 0]'], {}), '([0, 1, 2, 1, 0])\n', (7372, 7389), False, 'import torch\n'), ((7447, 7505), 'torch.tensor', 'torch.tensor', (['[[0, 1, 2], [0, 1, 2], [2, 1, 2], [3, 1, 2]]'], {}), '([[0, 1, 2], [0, 1, 2], [2, 1, 2], [3, 1, 2]])\n', (7459, 7505), False, 'import torch\n'), ((7712, 7770), 'torch.tensor', 'torch.tensor', (['[[4, 3, 4], [1, 3, 4], [3, 3, 4], [1, 3, 4]]'], {}), '([[4, 3, 4], [1, 3, 4], [3, 3, 4], [1, 3, 4]])\n', (7724, 7770), False, 'import torch\n'), ((7994, 8063), 'mseg_semantic.domain_generalization.ccsa_utils.remove_pairs_from_same_domain', 'remove_pairs_from_same_domain', (['batch_domain_indices', 'a_info_', 'b_info_'], {}), '(batch_domain_indices, a_info_, b_info_)\n', (8023, 8063), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((8085, 8121), 'torch.tensor', 'torch.tensor', (['[[0, 1, 2], [2, 1, 2]]'], {}), '([[0, 1, 2], [2, 1, 2]])\n', (8097, 8121), False, 'import torch\n'), ((8176, 8219), 'torch.allclose', 'torch.allclose', (['gt_a_pair_info', 'a_pair_info'], {}), '(gt_a_pair_info, a_pair_info)\n', (8190, 8219), False, 'import torch\n'), ((8241, 8277), 'torch.tensor', 'torch.tensor', (['[[1, 3, 4], [3, 3, 4]]'], {}), '([[1, 3, 4], [3, 3, 4]])\n', (8253, 8277), False, 'import torch\n'), ((8332, 8375), 'torch.allclose', 'torch.allclose', (['gt_b_pair_info', 'b_pair_info'], {}), '(gt_b_pair_info, b_pair_info)\n', (8346, 8375), False, 'import torch\n'), ((8740, 8788), 'torch.tensor', 'torch.tensor', (['[5, 6, 7, 8, 9]'], {'dtype': 'torch.int32'}), '([5, 6, 7, 8, 9], dtype=torch.int32)\n', (8752, 8788), False, 'import torch\n'), ((8799, 8847), 'torch.tensor', 'torch.tensor', (['[4, 3, 2, 1, 0]'], {'dtype': 'torch.int32'}), '([4, 3, 2, 1, 0], dtype=torch.int32)\n', (8811, 8847), False, 'import torch\n'), ((8858, 8906), 'torch.tensor', 'torch.tensor', (['[0, 2, 4, 6, 8]'], {'dtype': 'torch.int32'}), '([0, 2, 4, 6, 8], dtype=torch.int32)\n', (8870, 8906), False, 'import torch\n'), ((8920, 8975), 'mseg_semantic.domain_generalization.ccsa_utils.form_pair_info_tensor', 'form_pair_info_tensor', (['batch_dim_idxs', 'px_1d_y', 'px_1d_x'], {}), '(batch_dim_idxs, px_1d_y, px_1d_x)\n', (8941, 8975), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((8995, 9088), 'torch.tensor', 'torch.tensor', (['[[5, 4, 0], [6, 3, 2], [7, 2, 4], [8, 1, 6], [9, 0, 8]]'], {'dtype': 'torch.int32'}), '([[5, 4, 0], [6, 3, 2], [7, 2, 4], [8, 1, 6], [9, 0, 8]], dtype\n =torch.int32)\n', (9007, 9088), False, 'import torch\n'), ((9164, 9203), 'torch.allclose', 'torch.allclose', (['pair_info', 'gt_pair_info'], {}), '(pair_info, gt_pair_info)\n', (9178, 9203), False, 'import torch\n'), ((9757, 9879), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0, 4, 4], [4, 3, 2, 2, 2, 2, 3, \n 4], [4, 2, 2, 2, 2, 2, 2, 4]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0, 4, 4], [4, 3, 2,\n 2, 2, 2, 3, 4], [4, 2, 2, 2, 2, 2, 2, 4]])\n', (9769, 9879), False, 'import torch\n'), ((9933, 10055), 'torch.tensor', 'torch.tensor', (['[[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 2, 2, 2, 4], [4, 4, 4, 4, 2, 2, 2, \n 4], [4, 4, 4, 3, 2, 2, 2, 4]]'], {}), '([[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 2, 2, 2, 4], [4, 4, 4,\n 4, 2, 2, 2, 4], [4, 4, 4, 3, 2, 2, 2, 4]])\n', (9945, 10055), False, 'import torch\n'), ((10108, 10230), 'torch.tensor', 'torch.tensor', (['[[4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, \n 4], [4, 4, 4, 4, 4, 4, 4, 4]]'], {}), '([[4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4,\n 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4]])\n', (10120, 10230), False, 'import torch\n'), ((10275, 10324), 'torch.stack', 'torch.stack', (['[labelmap_0, labelmap_1, labelmap_2]'], {}), '([labelmap_0, labelmap_1, labelmap_2])\n', (10286, 10324), False, 'import torch\n'), ((10378, 10436), 'torch.tensor', 'torch.tensor', (['[[0, 1, 1], [2, 1, 4], [1, 1, 7], [0, 2, 2]]'], {}), '([[0, 1, 1], [2, 1, 4], [1, 1, 7], [0, 2, 2]])\n', (10390, 10436), False, 'import torch\n'), ((10538, 10596), 'torch.tensor', 'torch.tensor', (['[[2, 3, 7], [0, 1, 4], [2, 3, 0], [1, 3, 3]]'], {}), '([[2, 3, 7], [0, 1, 4], [2, 3, 0], [1, 3, 3]])\n', (10550, 10596), False, 'import torch\n'), ((10715, 10763), 'mseg_semantic.domain_generalization.ccsa_utils.find_matching_pairs', 'find_matching_pairs', (['Y', 'a_pair_info', 'b_pair_info'], {}), '(Y, a_pair_info, b_pair_info)\n', (10734, 10763), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((10787, 10841), 'torch.tensor', 'torch.tensor', (['[[0, 1, 1, 2, 3, 7], [1, 1, 7, 2, 3, 0]]'], {}), '([[0, 1, 1, 2, 3, 7], [1, 1, 7, 2, 3, 0]])\n', (10799, 10841), False, 'import torch\n'), ((10908, 10955), 'torch.allclose', 'torch.allclose', (['pos_pair_info', 'gt_pos_pair_info'], {}), '(pos_pair_info, gt_pos_pair_info)\n', (10922, 10955), False, 'import torch\n'), ((10979, 11033), 'torch.tensor', 'torch.tensor', (['[[2, 1, 4, 0, 1, 4], [0, 2, 2, 1, 3, 3]]'], {}), '([[2, 1, 4, 0, 1, 4], [0, 2, 2, 1, 3, 3]])\n', (10991, 11033), False, 'import torch\n'), ((11100, 11147), 'torch.allclose', 'torch.allclose', (['neg_pair_info', 'gt_neg_pair_info'], {}), '(neg_pair_info, gt_neg_pair_info)\n', (11114, 11147), False, 'import torch\n'), ((11224, 11346), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0, 4, 4], [4, 3, 2, 2, 2, 2, 3, \n 4], [4, 2, 2, 2, 2, 2, 2, 4]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0, 4, 4], [4, 3, 2,\n 2, 2, 2, 3, 4], [4, 2, 2, 2, 2, 2, 2, 4]])\n', (11236, 11346), False, 'import torch\n'), ((11400, 11522), 'torch.tensor', 'torch.tensor', (['[[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 2, 2, 2, 4], [4, 4, 4, 4, 2, 2, 2, \n 4], [4, 4, 4, 3, 2, 2, 2, 4]]'], {}), '([[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 2, 2, 2, 4], [4, 4, 4,\n 4, 2, 2, 2, 4], [4, 4, 4, 3, 2, 2, 2, 4]])\n', (11412, 11522), False, 'import torch\n'), ((11575, 11697), 'torch.tensor', 'torch.tensor', (['[[4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, \n 4], [4, 4, 4, 4, 4, 4, 4, 4]]'], {}), '([[4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4,\n 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4]])\n', (11587, 11697), False, 'import torch\n'), ((11742, 11791), 'torch.stack', 'torch.stack', (['[labelmap_0, labelmap_1, labelmap_2]'], {}), '([labelmap_0, labelmap_1, labelmap_2])\n', (11753, 11791), False, 'import torch\n'), ((11909, 11951), 'torch.tensor', 'torch.tensor', (['[0, 1, 0]'], {'dtype': 'torch.int64'}), '([0, 1, 0], dtype=torch.int64)\n', (11921, 11951), False, 'import torch\n'), ((11979, 12028), 'mseg_semantic.domain_generalization.ccsa_utils.count_per_domain_statistics', 'count_per_domain_statistics', (['batch_domain_indices'], {}), '(batch_domain_indices)\n', (12006, 12028), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((12121, 12228), 'mseg_semantic.domain_generalization.ccsa_utils.sample_crossdomain_pos_neg_pairs', 'sample_crossdomain_pos_neg_pairs', (['Y', 'batch_domain_indices', 'unique_domain_idxs', 'w', 'h', 'INITIAL_SAMPLE_NUM'], {}), '(Y, batch_domain_indices,\n unique_domain_idxs, w, h, INITIAL_SAMPLE_NUM)\n', (12153, 12228), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((12707, 12736), 'torch.tensor', 'torch.tensor', (['[0, 1, 0, 1, 4]'], {}), '([0, 1, 0, 1, 4])\n', (12719, 12736), False, 'import torch\n'), ((12779, 12819), 'mseg_semantic.domain_generalization.ccsa_utils.count_per_domain_statistics', 'count_per_domain_statistics', (['domain_idxs'], {}), '(domain_idxs)\n', (12806, 12819), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((12849, 12900), 'numpy.array', 'np.array', (['[2.0, 2.0, 0.0, 0.0, 1.0]'], {'dtype': 'np.int32'}), '([2.0, 2.0, 0.0, 0.0, 1.0], dtype=np.int32)\n', (12857, 12900), True, 'import numpy as np\n'), ((12924, 12943), 'numpy.array', 'np.array', (['[0, 1, 4]'], {}), '([0, 1, 4])\n', (12932, 12943), True, 'import numpy as np\n'), ((12955, 13011), 'numpy.allclose', 'np.allclose', (['examples_per_domain', 'gt_examples_per_domain'], {}), '(examples_per_domain, gt_examples_per_domain)\n', (12966, 13011), True, 'import numpy as np\n'), ((13023, 13077), 'numpy.allclose', 'np.allclose', (['unique_domain_idxs', 'gt_unique_domain_idxs'], {}), '(unique_domain_idxs, gt_unique_domain_idxs)\n', (13034, 13077), True, 'import numpy as np\n'), ((13449, 13571), 'torch.tensor', 'torch.tensor', (['[[0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0, 4, 4], [4, 3, 2, 2, 2, 2, 3, \n 4], [4, 2, 2, 2, 2, 2, 2, 4]]'], {}), '([[0, 0, 0, 0, 0, 0, 0, 0], [4, 4, 0, 0, 0, 0, 4, 4], [4, 3, 2,\n 2, 2, 2, 3, 4], [4, 2, 2, 2, 2, 2, 2, 4]])\n', (13461, 13571), False, 'import torch\n'), ((13625, 13747), 'torch.tensor', 'torch.tensor', (['[[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 2, 2, 2, 4], [4, 4, 4, 4, 2, 2, 2, \n 4], [4, 4, 4, 3, 2, 2, 2, 4]]'], {}), '([[1, 1, 1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 2, 2, 2, 4], [4, 4, 4,\n 4, 2, 2, 2, 4], [4, 4, 4, 3, 2, 2, 2, 4]])\n', (13637, 13747), False, 'import torch\n'), ((13800, 13922), 'torch.tensor', 'torch.tensor', (['[[4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, \n 4], [4, 4, 4, 4, 4, 4, 4, 4]]'], {}), '([[4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4,\n 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4]])\n', (13812, 13922), False, 'import torch\n'), ((13967, 14016), 'torch.stack', 'torch.stack', (['[labelmap_1, labelmap_2, labelmap_3]'], {}), '([labelmap_1, labelmap_2, labelmap_3])\n', (13978, 14016), False, 'import torch\n'), ((14153, 14195), 'torch.tensor', 'torch.tensor', (['[0, 1, 0]'], {'dtype': 'torch.int64'}), '([0, 1, 0], dtype=torch.int64)\n', (14165, 14195), False, 'import torch\n'), ((14266, 14309), 'mseg_semantic.domain_generalization.ccsa_utils.count_per_domain_statistics', 'count_per_domain_statistics', (['domain_indices'], {}), '(domain_indices)\n', (14293, 14309), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((14396, 14491), 'mseg_semantic.domain_generalization.ccsa_utils.sample_px_locations_uniformly', 'sample_px_locations_uniformly', (['domain_indices', 'unique_domain_idxs', 'w', 'h', 'INITIAL_SAMPLE_NUM'], {}), '(domain_indices, unique_domain_idxs, w, h,\n INITIAL_SAMPLE_NUM)\n', (14425, 14491), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((15595, 15633), 'torch.tensor', 'torch.tensor', (['[[1, 2], [3, 4], [5, 6]]'], {}), '([[1, 2], [3, 4], [5, 6]])\n', (15607, 15633), False, 'import torch\n'), ((15702, 15727), 'mseg_semantic.domain_generalization.ccsa_utils.shuffle_pytorch_tensor', 'shuffle_pytorch_tensor', (['t'], {}), '(t)\n', (15724, 15727), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((15751, 15930), 'torch.tensor', 'torch.tensor', (['[[[1, 2], [3, 4], [5, 6]], [[1, 2], [5, 6], [3, 4]], [[3, 4], [5, 6], [1, 2\n ]], [[5, 6], [3, 4], [1, 2]], [[3, 4], [1, 2], [5, 6]], [[5, 6], [1, 2],\n [3, 4]]]'], {}), '([[[1, 2], [3, 4], [5, 6]], [[1, 2], [5, 6], [3, 4]], [[3, 4],\n [5, 6], [1, 2]], [[5, 6], [3, 4], [1, 2]], [[3, 4], [1, 2], [5, 6]], [[\n 5, 6], [1, 2], [3, 4]]])\n', (15763, 15930), False, 'import torch\n'), ((16403, 16428), 'numpy.array', 'np.array', (['[0, 2, 4, 5, 6]'], {}), '([0, 2, 4, 5, 6])\n', (16411, 16428), True, 'import numpy as np\n'), ((16436, 16476), 'mseg_semantic.domain_generalization.ccsa_utils.pytorch_random_choice', 'pytorch_random_choice', (['x'], {'num_samples': '(10)'}), '(x, num_samples=10)\n', (16457, 16476), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((16602, 16627), 'numpy.array', 'np.array', (['[0, 2, 4, 5, 6]'], {}), '([0, 2, 4, 5, 6])\n', (16610, 16627), True, 'import numpy as np\n'), ((16635, 16674), 'mseg_semantic.domain_generalization.ccsa_utils.pytorch_random_choice', 'pytorch_random_choice', (['x'], {'num_samples': '(3)'}), '(x, num_samples=3)\n', (16656, 16674), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((16799, 16815), 'numpy.array', 'np.array', (['[0, 2]'], {}), '([0, 2])\n', (16807, 16815), True, 'import numpy as np\n'), ((16826, 16866), 'mseg_semantic.domain_generalization.ccsa_utils.pytorch_random_choice', 'pytorch_random_choice', (['x'], {'num_samples': '(10)'}), '(x, num_samples=10)\n', (16847, 16866), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((17060, 17114), 'torch.tensor', 'torch.tensor', (['[[0, 1, 1, 1, 2, 2], [1, 3, 4, 2, 0, 0]]'], {}), '([[0, 1, 1, 1, 2, 2], [1, 3, 4, 2, 0, 0]])\n', (17072, 17114), False, 'import torch\n'), ((17168, 17222), 'torch.tensor', 'torch.tensor', (['[[0, 1, 1, 1, 2, 2], [1, 3, 4, 2, 0, 0]]'], {}), '([[0, 1, 1, 1, 2, 2], [1, 3, 4, 2, 0, 0]])\n', (17180, 17222), False, 'import torch\n'), ((17355, 17429), 'mseg_semantic.domain_generalization.ccsa_utils.get_merged_pair_embeddings', 'get_merged_pair_embeddings', (['pos_pair_info', 'neg_pair_info', 'resnet_embedding'], {}), '(pos_pair_info, neg_pair_info, resnet_embedding)\n', (17381, 17429), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((17473, 17520), 'torch.tensor', 'torch.tensor', (['[1, 1, 0, 0]'], {'dtype': 'torch.float32'}), '([1, 1, 0, 0], dtype=torch.float32)\n', (17485, 17520), False, 'import torch\n'), ((17539, 17591), 'torch.tensor', 'torch.tensor', (['[[6, 26], [59, 79], [6, 26], [59, 79]]'], {}), '([[6, 26], [59, 79], [6, 26], [59, 79]])\n', (17551, 17591), False, 'import torch\n'), ((17682, 17738), 'torch.tensor', 'torch.tensor', (['[[52, 72], [80, 100], [52, 72], [80, 100]]'], {}), '([[52, 72], [80, 100], [52, 72], [80, 100]])\n', (17694, 17738), False, 'import torch\n'), ((17823, 17866), 'torch.allclose', 'torch.allclose', (['a_embedding', 'gt_a_embedding'], {}), '(a_embedding, gt_a_embedding)\n', (17837, 17866), False, 'import torch\n'), ((17878, 17921), 'torch.allclose', 'torch.allclose', (['b_embedding', 'gt_b_embedding'], {}), '(b_embedding, gt_b_embedding)\n', (17892, 17921), False, 'import torch\n'), ((17933, 17960), 'torch.allclose', 'torch.allclose', (['y_c', 'gt_y_c'], {}), '(y_c, gt_y_c)\n', (17947, 17960), False, 'import torch\n'), ((18025, 18079), 'torch.tensor', 'torch.tensor', (['[[0, 1, 1, 1, 2, 2], [1, 3, 4, 2, 0, 0]]'], {}), '([[0, 1, 1, 1, 2, 2], [1, 3, 4, 2, 0, 0]])\n', (18037, 18079), False, 'import torch\n'), ((18235, 18275), 'mseg_semantic.domain_generalization.ccsa_utils.get_pair_embedding', 'get_pair_embedding', (['pair_info', 'embedding'], {}), '(pair_info, embedding)\n', (18253, 18275), False, 'from mseg_semantic.domain_generalization.ccsa_utils import contrastive_loss, paired_euclidean_distance, downsample_label_map, sample_pair_indices, find_matching_pairs, remove_pairs_from_same_domain, get_merged_pair_embeddings, pytorch_random_choice, shuffle_pytorch_tensor, get_pair_embedding, count_per_domain_statistics, sample_px_locations_uniformly, sample_crossdomain_pos_neg_pairs, form_pair_info_tensor\n'), ((18298, 18331), 'torch.tensor', 'torch.tensor', (['[[6, 26], [59, 79]]'], {}), '([[6, 26], [59, 79]])\n', (18310, 18331), False, 'import torch\n'), ((18397, 18432), 'torch.tensor', 'torch.tensor', (['[[52, 72], [80, 100]]'], {}), '([[52, 72], [80, 100]])\n', (18409, 18432), False, 'import torch\n'), ((18491, 18534), 'torch.allclose', 'torch.allclose', (['a_embedding', 'gt_a_embedding'], {}), '(a_embedding, gt_a_embedding)\n', (18505, 18534), False, 'import torch\n'), ((18546, 18589), 'torch.allclose', 'torch.allclose', (['b_embedding', 'gt_b_embedding'], {}), '(b_embedding, gt_b_embedding)\n', (18560, 18589), False, 'import torch\n'), ((18960, 19001), 'torch.randint', 'torch.randint', ([], {'low': '(0)', 'high': '(7)', 'size': '(128,)'}), '(low=0, high=7, size=(128,))\n', (18973, 19001), False, 'import torch\n'), ((19014, 19066), 'torch.randint', 'torch.randint', ([], {'low': '(0)', 'high': '(194)', 'size': '(128, 201, 201)'}), '(low=0, high=194, size=(128, 201, 201))\n', (19027, 19066), False, 'import torch\n'), ((19082, 19093), 'time.time', 'time.time', ([], {}), '()\n', (19091, 19093), False, 'import time\n'), ((19319, 19330), 'time.time', 'time.time', ([], {}), '()\n', (19328, 19330), False, 'import time\n'), ((16160, 16204), 'torch.allclose', 'torch.allclose', (['gt_permutations[i]', 'shuffled'], {}), '(gt_permutations[i], shuffled)\n', (16174, 16204), False, 'import torch\n'), ((17279, 17306), 'torch.arange', 'torch.arange', (['(2 * 3 * 4 * 5)'], {}), '(2 * 3 * 4 * 5)\n', (17291, 17306), False, 'import torch\n'), ((18165, 18192), 'torch.arange', 'torch.arange', (['(2 * 3 * 4 * 5)'], {}), '(2 * 3 * 4 * 5)\n', (18177, 18192), False, 'import torch\n'), ((2505, 2525), 'math.sqrt', 'math.sqrt', (['(2 * 2 + 1)'], {}), '(2 * 2 + 1)\n', (2514, 2525), False, 'import math\n'), ((2559, 2583), 'math.sqrt', 'math.sqrt', (['(3 * 3 + 4 * 4)'], {}), '(3 * 3 + 4 * 4)\n', (2568, 2583), False, 'import math\n'), ((2616, 2636), 'math.sqrt', 'math.sqrt', (['(3 * 3 + 1)'], {}), '(3 * 3 + 1)\n', (2625, 2636), False, 'import math\n'), ((16497, 16515), 'torch.unique', 'torch.unique', (['vals'], {}), '(vals)\n', (16509, 16515), False, 'import torch\n'), ((16695, 16713), 'torch.unique', 'torch.unique', (['vals'], {}), '(vals)\n', (16707, 16713), False, 'import torch\n'), ((16887, 16905), 'torch.unique', 'torch.unique', (['vals'], {}), '(vals)\n', (16899, 16905), False, 'import torch\n')] |
# coding=utf-8
# Copyright 2014-2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from time import time
from oslo_log import log as logging
from neutron.plugins.common import constants as plugin_const
from neutron_lbaas.services.loadbalancer import constants as lb_const
from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex
from f5_openstack_agent.lbaasv2.drivers.bigip import l7policy_service
from f5_openstack_agent.lbaasv2.drivers.bigip import listener_service
from f5_openstack_agent.lbaasv2.drivers.bigip import pool_service
from f5_openstack_agent.lbaasv2.drivers.bigip import virtual_address
from f5_openstack_agent.lbaasv2.drivers.bigip import utils
from requests import HTTPError
#import pdb
LOG = logging.getLogger(__name__)
class LBaaSBuilder(object):
# F5 LBaaS Driver using iControl for BIG-IP to
# create objects (vips, pools) - not using an iApp."""
def __init__(self, conf, driver, l2_service=None):
self.conf = conf
self.driver = driver
self.l2_service = l2_service
self.service_adapter = driver.service_adapter
self.listener_builder = listener_service.ListenerServiceBuilder(self,
self.service_adapter,
driver.cert_manager,
conf.f5_parent_ssl_profile)
self.pool_builder = pool_service.PoolServiceBuilder(
self.service_adapter,
conf.f5_parent_https_monitor)
self.l7service = l7policy_service.L7PolicyService(self, conf)
self.esd = None
@utils.instrument_execution_time
def assure_service(self, service, traffic_group, all_subnet_hints, delete_event=False):
"""Assure that a service is configured on the BIGIP."""
start_time = time()
LOG.debug("Starting assure_service")
# Needed also for delete events because of subnet hints
self._assure_loadbalancer_created(service, all_subnet_hints)
# Create and update
if not delete_event:
self._assure_pools_created(service)
self._assure_listeners_created(service)
self._assure_monitors_created(service)
self._assure_members_created(service, all_subnet_hints)
self._assure_pools_configured(service)
self._assure_l7policies_created(service)
self._assure_l7rules_created(service)
else: # delete
self._assure_monitors_deleted(service)
self._assure_members_deleted(service, all_subnet_hints)
self._assure_l7rules_deleted(service)
self._assure_l7policies_deleted(service)
self._assure_pools_deleted(service)
self._assure_listeners_deleted(service)
self._assure_loadbalancer_deleted(service)
LOG.debug(" _assure_service took %.5f secs" %
(time() - start_time))
return all_subnet_hints
@utils.instrument_execution_time
def _assure_loadbalancer_created(self, service, all_subnet_hints):
if 'loadbalancer' not in service:
return
bigips = self.driver.get_config_bigips()
loadbalancer = service["loadbalancer"]
vip_address = virtual_address.VirtualAddress(
self.service_adapter,
loadbalancer)
for bigip in bigips:
vip_address.assure(bigip)
if self.driver.l3_binding:
loadbalancer = service["loadbalancer"]
self.driver.l3_binding.bind_address(
subnet_id=loadbalancer["vip_subnet_id"],
ip_address=loadbalancer["vip_address"])
self._update_subnet_hints(loadbalancer["provisioning_status"],
loadbalancer["vip_subnet_id"],
loadbalancer["network_id"],
all_subnet_hints,
False)
if loadbalancer['provisioning_status'] != plugin_const.PENDING_DELETE:
loadbalancer['provisioning_status'] = plugin_const.ACTIVE
@utils.instrument_execution_time
def _assure_listeners_created(self, service):
if 'listeners' not in service:
return
listeners = service["listeners"]
loadbalancer = service["loadbalancer"]
networks = service["networks"]
bigips = self.driver.get_config_bigips()
old_listener = service.get('old_listener')
for listener in listeners:
if (old_listener != None and old_listener.get('id') == listener.get('id')):
svc = {"loadbalancer": loadbalancer,
"listener": listener,
"old_listener": old_listener,
"networks": networks}
else:
svc = {"loadbalancer": loadbalancer,
"listener": listener,
"networks": networks}
default_pool_id = listener.get('default_pool_id', '')
if default_pool_id:
pool = self.get_pool_by_id(service, default_pool_id)
if pool:
svc['pool'] = pool
if listener['provisioning_status'] == plugin_const.PENDING_UPDATE:
try:
self.listener_builder.update_listener(svc, bigips)
except Exception as err:
loadbalancer['provisioning_status'] = plugin_const.ERROR
listener['provisioning_status'] = plugin_const.ERROR
LOG.exception(err)
raise f5_ex.VirtualServerUpdateException(err.message)
elif listener['provisioning_status'] != \
plugin_const.PENDING_DELETE:
try:
# create_listener() will do an update if VS exists
self.listener_builder.create_listener(svc, bigips)
listener['operating_status'] = \
svc['listener']['operating_status']
except Exception as err:
loadbalancer['provisioning_status'] = plugin_const.ERROR
listener['provisioning_status'] = plugin_const.ERROR
raise f5_ex.VirtualServerCreationException(err.message)
if listener['provisioning_status'] != plugin_const.PENDING_DELETE:
listener['provisioning_status'] = plugin_const.ACTIVE
@utils.instrument_execution_time
def _assure_pools_created(self, service):
if "pools" not in service:
return
pools = service["pools"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
for pool in pools:
if pool['provisioning_status'] != plugin_const.PENDING_DELETE:
svc = {"loadbalancer": loadbalancer, "pool": pool}
svc['members'] = self._get_pool_members(service, pool['id'])
try:
# create or update pool
if pool['provisioning_status'] == \
plugin_const.PENDING_CREATE:
self.pool_builder.create_pool(svc, bigips)
else:
try:
self.pool_builder.update_pool(svc, bigips)
except HTTPError as err:
if err.response.status_code == 404:
self.pool_builder.create_pool(svc, bigips)
except HTTPError as err:
if err.response.status_code != 409:
pool['provisioning_status'] = plugin_const.ERROR
loadbalancer['provisioning_status'] = (
plugin_const.ERROR)
raise f5_ex.PoolCreationException(err.message)
except Exception as err:
pool['provisioning_status'] = plugin_const.ERROR
loadbalancer['provisioning_status'] = plugin_const.ERROR
raise f5_ex.PoolCreationException(err.message)
pool['provisioning_status'] = plugin_const.ACTIVE
@utils.instrument_execution_time
def _assure_pools_configured(self, service):
if "pools" not in service:
return
pools = service["pools"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
for pool in pools:
if pool['provisioning_status'] != plugin_const.PENDING_DELETE:
svc = {"loadbalancer": loadbalancer, "pool": pool}
svc['members'] = self._get_pool_members(service, pool['id'])
try:
# assign pool name to virtual
pool_name = self.service_adapter.init_pool_name(
loadbalancer, pool)
# get associated listeners for pool
for listener in pool['listeners']:
listener = self.get_listener_by_id(service, listener['id'])
if listener:
svc['listener'] = listener
self.listener_builder.update_listener_pool(
svc, pool_name["name"], bigips)
# update virtual sever pool name, session persistence
self.listener_builder.update_session_persistence(
svc, bigips)
# ccloud: update pool to set lb_method right
self.pool_builder.update_pool(svc, bigips)
pool['provisioning_status'] = plugin_const.ACTIVE
except HTTPError as err:
if err.response.status_code != 409:
pool['provisioning_status'] = plugin_const.ERROR
loadbalancer['provisioning_status'] = (
plugin_const.ERROR)
LOG.exception(err)
raise f5_ex.PoolCreationException("ccloud: Error #1" + err.message)
except Exception as err:
pool['provisioning_status'] = plugin_const.ERROR
loadbalancer['provisioning_status'] = plugin_const.ERROR
LOG.exception(err)
raise f5_ex.PoolCreationException("ccloud: Error #2" + err.message)
@utils.instrument_execution_time
def _get_pool_members(self, service, pool_id):
'''Return a list of members associated with given pool.'''
members = []
for member in service['members']:
if member['pool_id'] == pool_id:
members.append(member)
return members
@utils.instrument_execution_time
def _update_listener_pool(self, service, listener_id, pool_name, bigips):
listener = self.get_listener_by_id(service, listener_id)
if listener is not None:
try:
listener["pool"] = pool_name
svc = {"loadbalancer": service["loadbalancer"],
"listener": listener}
self.listener_builder.update_listener(svc, bigips)
except Exception as err:
listener['provisioning_status'] = plugin_const.ERROR
raise f5_ex.VirtualServerUpdateException(err.message)
@utils.instrument_execution_time
def _assure_monitors_deleted(self, service):
if not (("pools" in service) and ("healthmonitors" in service)):
return
monitors = service["healthmonitors"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
for monitor in monitors:
svc = {"loadbalancer": loadbalancer,
"healthmonitor": monitor,
"pool": self.get_pool_by_id(service, monitor["pool_id"])}
if monitor['provisioning_status'] == plugin_const.PENDING_DELETE:
try:
self.pool_builder.delete_healthmonitor(svc, bigips)
except Exception as err:
monitor['provisioning_status'] = plugin_const.ERROR
raise f5_ex.MonitorDeleteException(err.message)
@utils.instrument_execution_time
def _assure_monitors_created(self, service):
if not (("pools" in service) and ("healthmonitors" in service)):
return
monitors = service["healthmonitors"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
for monitor in monitors:
svc = {"loadbalancer": loadbalancer,
"healthmonitor": monitor,
"pool": self.get_pool_by_id(service, monitor["pool_id"])}
if monitor['provisioning_status'] != plugin_const.PENDING_DELETE:
try:
self.pool_builder.create_healthmonitor(svc, bigips)
except HTTPError as err:
if err.response.status_code != 409:
# pool['provisioning_status'] = plugin_const.ERROR
loadbalancer['provisioning_status'] = (
plugin_const.ERROR
)
raise f5_ex.MonitorCreationException(err.message)
else:
self.pool_builder.update_healthmonitor(svc, bigips)
except Exception as err:
monitor['provisioning_status'] = plugin_const.ERROR
raise f5_ex.MonitorCreationException(err.message)
monitor['provisioning_status'] = plugin_const.ACTIVE
@utils.instrument_execution_time
def _assure_members_created(self, service, all_subnet_hints):
if not (("pools" in service) and ("members" in service)):
return
members = service["members"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
for member in members:
pool = self.get_pool_by_id(service, member["pool_id"])
svc = {"loadbalancer": loadbalancer,
"member": member,
"pool": pool}
if 'port' not in member and \
member['provisioning_status'] != plugin_const.PENDING_DELETE:
LOG.warning("Member definition does not include Neutron port")
# delete member if pool is being deleted
if not (member['provisioning_status'] == plugin_const.PENDING_DELETE or \
pool['provisioning_status'] == plugin_const.PENDING_DELETE):
try:
self.pool_builder.create_member(svc, bigips)
member['provisioning_status'] = plugin_const.ACTIVE
except HTTPError as err:
if err.response.status_code != 409:
# FIXME(RB)
# pool['provisioning_status'] = plugin_const.ERROR
loadbalancer['provisioning_status'] = (
plugin_const.ERROR
)
raise f5_ex.MemberCreationException(err.message)
else:
try:
self.pool_builder.update_member(svc, bigips)
except Exception as err:
member['provisioning_status'] = plugin_const.ERROR
raise f5_ex.MemberUpdateException(err.message)
except Exception as err:
member['provisioning_status'] = plugin_const.ERROR
raise f5_ex.MemberCreationException(err.message)
self._update_subnet_hints(member["provisioning_status"],
member["subnet_id"],
member["network_id"],
all_subnet_hints,
True)
@utils.instrument_execution_time
def _assure_members_deleted(self, service, all_subnet_hints):
if not (("pools" in service) and ("members" in service)):
return
members = service["members"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
for member in members:
pool = self.get_pool_by_id(service, member["pool_id"])
svc = {"loadbalancer": loadbalancer,
"member": member,
"pool": pool}
if 'port' not in member and \
member['provisioning_status'] != plugin_const.PENDING_DELETE:
LOG.warning("Member definition does not include Neutron port")
# delete member if pool is being deleted
if member['provisioning_status'] == plugin_const.PENDING_DELETE or \
pool['provisioning_status'] == plugin_const.PENDING_DELETE:
try:
self.pool_builder.delete_member(svc, bigips)
except Exception as err:
member['provisioning_status'] = plugin_const.ERROR
raise f5_ex.MemberDeleteException(err.message)
self._update_subnet_hints(member["provisioning_status"],
member["subnet_id"],
member["network_id"],
all_subnet_hints,
True)
@utils.instrument_execution_time
def _assure_loadbalancer_deleted(self, service):
if (service['loadbalancer']['provisioning_status'] !=
plugin_const.PENDING_DELETE):
return
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
if self.driver.l3_binding:
self.driver.l3_binding.unbind_address(
subnet_id=loadbalancer["vip_subnet_id"],
ip_address=loadbalancer["vip_address"])
vip_address = virtual_address.VirtualAddress(
self.service_adapter,
loadbalancer)
for bigip in bigips:
vip_address.assure(bigip, delete=True)
@utils.instrument_execution_time
def _assure_pools_deleted(self, service):
if 'pools' not in service:
return
pools = service["pools"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
for pool in pools:
# Is the pool being deleted?
if pool['provisioning_status'] == plugin_const.PENDING_DELETE:
svc = {"loadbalancer": loadbalancer,
"pool": pool}
try:
# update listeners for pool
for listener in pool['listeners']:
svc['listener'] = \
self.get_listener_by_id(service, listener['id'])
# remove pool name from virtual before deleting pool
self.listener_builder.update_listener_pool(
svc, "", bigips)
self.listener_builder.remove_session_persistence(
svc, bigips)
# delete pool
self.pool_builder.delete_pool(svc, bigips)
except Exception as err:
pool['provisioning_status'] = plugin_const.ERROR
raise f5_ex.PoolDeleteException(err.message)
@utils.instrument_execution_time
def _assure_listeners_deleted(self, service):
if 'listeners' not in service:
return
listeners = service["listeners"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
for listener in listeners:
if listener['provisioning_status'] == plugin_const.PENDING_DELETE:
svc = {"loadbalancer": loadbalancer,
"listener": listener}
# ccloud: try to delete persistence which might be attached to listener
# ignore errors, persistence might be used somewhere else if pool is used more than once as default
try:
self.listener_builder.remove_session_persistence(
svc, bigips)
except Exception:
pass
# delete the listener
try:
self.listener_builder.delete_listener(svc, bigips)
except Exception as err:
listener['provisioning_status'] = plugin_const.ERROR
raise f5_ex.VirtualServerDeleteException(err.message)
@staticmethod
def _check_monitor_delete(service):
# If the pool is being deleted, then delete related objects
if service['pool']['status'] == plugin_const.PENDING_DELETE:
# Everything needs to be go with the pool, so overwrite
# service state to appropriately remove all elements
service['vip']['status'] = plugin_const.PENDING_DELETE
for member in service['members']:
member['status'] = plugin_const.PENDING_DELETE
for monitor in service['pool']['health_monitors_status']:
monitor['status'] = plugin_const.PENDING_DELETE
@staticmethod
def get_pool_by_id(service, pool_id):
if pool_id and "pools" in service:
pools = service["pools"]
for pool in pools:
if pool["id"] == pool_id:
return pool
return None
@staticmethod
def get_listener_by_id(service, listener_id):
if "listeners" in service:
listeners = service["listeners"]
for listener in listeners:
if listener["id"] == listener_id:
return listener
return None
def _update_subnet_hints(self, status, subnet_id,
network_id, all_subnet_hints, is_member):
bigips = self.driver.get_config_bigips()
for bigip in bigips:
subnet_hints = all_subnet_hints[bigip.device_name]
if status == plugin_const.PENDING_CREATE or \
status == plugin_const.PENDING_UPDATE:
if subnet_id in subnet_hints['check_for_delete_subnets']:
del subnet_hints['check_for_delete_subnets'][subnet_id]
if subnet_id not in subnet_hints['do_not_delete_subnets']:
subnet_hints['do_not_delete_subnets'].append(subnet_id)
elif status == plugin_const.PENDING_DELETE:
if subnet_id not in subnet_hints['do_not_delete_subnets']:
subnet_hints['check_for_delete_subnets'][subnet_id] = \
{'network_id': network_id,
'subnet_id': subnet_id,
'is_for_member': is_member}
@utils.instrument_execution_time
def listener_exists(self, bigip, service):
"""Test the existence of the listener defined by service."""
try:
# Throw an exception if the listener does not exist.
self.listener_builder.get_listener(service, bigip)
except HTTPError as err:
LOG.debug("Virtual service service discovery error, %s." %
err.message)
return False
return True
@utils.instrument_execution_time
def _assure_l7policies_created(self, service):
if 'l7policies' not in service:
return
bigips = self.driver.get_config_bigips()
l7policies = service['l7policies']
for l7policy in l7policies:
if l7policy['provisioning_status'] != plugin_const.PENDING_DELETE:
try:
name = l7policy.get('name', None)
if name and self.is_esd(name):
continue
else:
self.l7service.create_l7policy(
l7policy, service, bigips)
except Exception as err:
l7policy['provisioning_status'] = plugin_const.ERROR
service['loadbalancer']['provisioning_status'] = \
plugin_const.ERROR
raise f5_ex.L7PolicyCreationException(err.message)
l7policy['provisioning_status'] = plugin_const.ACTIVE
@utils.instrument_execution_time
def _assure_l7policies_deleted(self, service):
if 'l7policies' not in service:
return
bigips = self.driver.get_config_bigips()
l7policies = service['l7policies']
for l7policy in l7policies:
if l7policy['provisioning_status'] == plugin_const.PENDING_DELETE:
try:
name = l7policy.get('name', None)
if name and self.is_esd(name):
continue
else:
# Note: use update_l7policy because a listener can have
# multiple policies
self.l7service.update_l7policy(
l7policy, service, bigips)
except Exception as err:
l7policy['provisioning_status'] = plugin_const.ERROR
service['loadbalancer']['provisioning_status'] = \
plugin_const.ERROR
raise f5_ex.L7PolicyDeleteException(err.message)
@utils.instrument_execution_time
def _assure_l7rules_created(self, service):
if 'l7policy_rules' not in service:
return
bigips = self.driver.get_config_bigips()
l7rules = service['l7policy_rules']
for l7rule in l7rules:
if l7rule['provisioning_status'] != plugin_const.PENDING_DELETE:
try:
# ignore L7 rule if its policy is really an ESD
l7policy = self.get_l7policy_for_rule(
service['l7policies'], l7rule)
name = l7policy.get('name', None)
if name and self.is_esd(name):
LOG.error("L7 policy {0} is an ESD. Cannot add "
"an L7 rule to and ESD.".format(name))
continue
self.l7service.create_l7rule(l7rule, service, bigips)
except Exception as err:
l7rule['provisioning_status'] = plugin_const.ERROR
service['loadbalancer']['provisioning_status'] = \
plugin_const.ERROR
raise f5_ex.L7PolicyCreationException(err.message)
l7rule['provisioning_status'] = plugin_const.ACTIVE
@utils.instrument_execution_time
def _assure_l7rules_deleted(self, service):
if 'l7policy_rules' not in service:
return
bigips = self.driver.get_config_bigips()
l7rules = service['l7policy_rules']
for l7rule in l7rules:
if l7rule['provisioning_status'] == plugin_const.PENDING_DELETE:
try:
# ignore L7 rule if its policy is really an ESD
l7policy = self.get_l7policy_for_rule(
service['l7policies'], l7rule)
name = l7policy.get('name', None)
if name and self.is_esd(name):
continue
self.l7service.bigips = self.driver.get_config_bigips()
self.l7service.delete_l7rule(l7rule, service, bigips)
except Exception as err:
l7rule['provisioning_status'] = plugin_const.ERROR
service['loadbalancer']['provisioning_status'] = \
plugin_const.ERROR
raise f5_ex.L7PolicyDeleteException(err.message)
@utils.instrument_execution_time
def get_listener_stats(self, service, stats):
"""Get statistics for a loadbalancer service.
Sums values for stats defined in stats dictionary for all listeners
defined in service object. For example, if loadbalancer has two
listeners and stats defines a stat 'clientside.bitsIn' as a key, the
sum of all pools' clientside.bitsIn will be returned in stats.
Provisioning status is ignored -- PENDING_DELETE objects are
included.
:param service: defines loadbalancer and set of pools.
:param stats: a dictionary that defines which stats to get.
Should be initialized by caller with 0 values.
:return: stats are appended to input stats dict (i.e., contains
the sum of given stats for all BIG-IPs).
"""
listeners = service["listeners"]
loadbalancer = service["loadbalancer"]
bigips = self.driver.get_config_bigips()
collected_stats = {}
for stat in stats:
collected_stats[stat] = 0
for listener in listeners:
svc = {"loadbalancer": loadbalancer, "listener": listener}
vs_stats = self.listener_builder.get_stats(svc, bigips, stats)
for stat in stats:
collected_stats[stat] += vs_stats[stat]
return collected_stats
@utils.instrument_execution_time
def update_operating_status(self, service):
bigip = self.driver.get_active_bigip()
loadbalancer = service["loadbalancer"]
status_keys = ['status.availabilityState',
'status.enabledState']
members = service["members"]
for member in members:
if member['provisioning_status'] == plugin_const.ACTIVE:
pool = self.get_pool_by_id(service, member["pool_id"])
svc = {"loadbalancer": loadbalancer,
"member": member,
"pool": pool}
status = self.pool_builder.get_member_status(
svc, bigip, status_keys)
member['operating_status'] = self.convert_operating_status(
status)
@staticmethod
def convert_operating_status(status):
"""Convert object status to LBaaS operating status.
status.availabilityState and status.enabledState = Operating Status
available enabled ONLINE
available disabled DISABLED
offline - OFFLINE
unknown - NO_MONITOR
"""
op_status = None
available = status.get('status.availabilityState', '')
if available == 'available':
enabled = status.get('status.enabledState', '')
if enabled == 'enabled':
op_status = lb_const.ONLINE
elif enabled == 'disabled':
op_status = lb_const.DISABLED
else:
LOG.warning('Unexpected value %s for status.enabledState',
enabled)
elif available == 'offline':
op_status = lb_const.OFFLINE
elif available == 'unknown':
op_status = lb_const.NO_MONITOR
return op_status
def get_l7policy_for_rule(self, l7policies, l7rule):
policy_id = l7rule['policy_id']
for policy in l7policies:
if policy_id == policy['id']:
return policy
return None
def init_esd(self, esd):
self.esd = esd
def get_esd(self, name):
if self.esd:
return self.esd.get_esd(name)
return None
def is_esd(self, name):
return self.esd.get_esd(name) is not None | [
"f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.PoolDeleteException",
"f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.MonitorDeleteException",
"f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.VirtualServerUpdateException",
"f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.PoolCreationExceptio... | [((1254, 1281), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1271, 1281), True, 'from oslo_log import log as logging\n'), ((1655, 1776), 'f5_openstack_agent.lbaasv2.drivers.bigip.listener_service.ListenerServiceBuilder', 'listener_service.ListenerServiceBuilder', (['self', 'self.service_adapter', 'driver.cert_manager', 'conf.f5_parent_ssl_profile'], {}), '(self, self.service_adapter, driver.\n cert_manager, conf.f5_parent_ssl_profile)\n', (1694, 1776), False, 'from f5_openstack_agent.lbaasv2.drivers.bigip import listener_service\n'), ((2016, 2104), 'f5_openstack_agent.lbaasv2.drivers.bigip.pool_service.PoolServiceBuilder', 'pool_service.PoolServiceBuilder', (['self.service_adapter', 'conf.f5_parent_https_monitor'], {}), '(self.service_adapter, conf.\n f5_parent_https_monitor)\n', (2047, 2104), False, 'from f5_openstack_agent.lbaasv2.drivers.bigip import pool_service\n'), ((2150, 2194), 'f5_openstack_agent.lbaasv2.drivers.bigip.l7policy_service.L7PolicyService', 'l7policy_service.L7PolicyService', (['self', 'conf'], {}), '(self, conf)\n', (2182, 2194), False, 'from f5_openstack_agent.lbaasv2.drivers.bigip import l7policy_service\n'), ((2434, 2440), 'time.time', 'time', ([], {}), '()\n', (2438, 2440), False, 'from time import time\n'), ((3882, 3948), 'f5_openstack_agent.lbaasv2.drivers.bigip.virtual_address.VirtualAddress', 'virtual_address.VirtualAddress', (['self.service_adapter', 'loadbalancer'], {}), '(self.service_adapter, loadbalancer)\n', (3912, 3948), False, 'from f5_openstack_agent.lbaasv2.drivers.bigip import virtual_address\n'), ((18802, 18868), 'f5_openstack_agent.lbaasv2.drivers.bigip.virtual_address.VirtualAddress', 'virtual_address.VirtualAddress', (['self.service_adapter', 'loadbalancer'], {}), '(self.service_adapter, loadbalancer)\n', (18832, 18868), False, 'from f5_openstack_agent.lbaasv2.drivers.bigip import virtual_address\n'), ((3539, 3545), 'time.time', 'time', ([], {}), '()\n', (3543, 3545), False, 'from time import time\n'), ((12017, 12064), 'f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.VirtualServerUpdateException', 'f5_ex.VirtualServerUpdateException', (['err.message'], {}), '(err.message)\n', (12051, 12064), True, 'from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex\n'), ((6245, 6292), 'f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.VirtualServerUpdateException', 'f5_ex.VirtualServerUpdateException', (['err.message'], {}), '(err.message)\n', (6279, 6292), True, 'from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex\n'), ((8734, 8774), 'f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.PoolCreationException', 'f5_ex.PoolCreationException', (['err.message'], {}), '(err.message)\n', (8761, 8774), True, 'from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex\n'), ((11047, 11108), 'f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.PoolCreationException', 'f5_ex.PoolCreationException', (["('ccloud: Error #2' + err.message)"], {}), "('ccloud: Error #2' + err.message)\n", (11074, 11108), True, 'from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex\n'), ((12902, 12943), 'f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.MonitorDeleteException', 'f5_ex.MonitorDeleteException', (['err.message'], {}), '(err.message)\n', (12930, 12943), True, 'from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex\n'), ((14266, 14309), 'f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.MonitorCreationException', 'f5_ex.MonitorCreationException', (['err.message'], {}), '(err.message)\n', (14296, 14309), True, 'from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex\n'), ((16385, 16427), 'f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.MemberCreationException', 'f5_ex.MemberCreationException', (['err.message'], {}), '(err.message)\n', (16414, 16427), True, 'from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex\n'), ((17913, 17953), 'f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.MemberDeleteException', 'f5_ex.MemberDeleteException', (['err.message'], {}), '(err.message)\n', (17940, 17953), True, 'from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex\n'), ((20265, 20303), 'f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.PoolDeleteException', 'f5_ex.PoolDeleteException', (['err.message'], {}), '(err.message)\n', (20290, 20303), True, 'from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex\n'), ((21462, 21509), 'f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.VirtualServerDeleteException', 'f5_ex.VirtualServerDeleteException', (['err.message'], {}), '(err.message)\n', (21496, 21509), True, 'from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex\n'), ((25142, 25186), 'f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.L7PolicyCreationException', 'f5_ex.L7PolicyCreationException', (['err.message'], {}), '(err.message)\n', (25173, 25186), True, 'from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex\n'), ((26288, 26330), 'f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.L7PolicyDeleteException', 'f5_ex.L7PolicyDeleteException', (['err.message'], {}), '(err.message)\n', (26317, 26330), True, 'from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex\n'), ((27496, 27540), 'f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.L7PolicyCreationException', 'f5_ex.L7PolicyCreationException', (['err.message'], {}), '(err.message)\n', (27527, 27540), True, 'from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex\n'), ((28703, 28745), 'f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.L7PolicyDeleteException', 'f5_ex.L7PolicyDeleteException', (['err.message'], {}), '(err.message)\n', (28732, 28745), True, 'from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex\n'), ((6890, 6939), 'f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.VirtualServerCreationException', 'f5_ex.VirtualServerCreationException', (['err.message'], {}), '(err.message)\n', (6926, 6939), True, 'from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex\n'), ((8479, 8519), 'f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.PoolCreationException', 'f5_ex.PoolCreationException', (['err.message'], {}), '(err.message)\n', (8506, 8519), True, 'from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex\n'), ((10732, 10793), 'f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.PoolCreationException', 'f5_ex.PoolCreationException', (["('ccloud: Error #1' + err.message)"], {}), "('ccloud: Error #1' + err.message)\n", (10759, 10793), True, 'from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex\n'), ((13981, 14024), 'f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.MonitorCreationException', 'f5_ex.MonitorCreationException', (['err.message'], {}), '(err.message)\n', (14011, 14024), True, 'from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex\n'), ((15871, 15913), 'f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.MemberCreationException', 'f5_ex.MemberCreationException', (['err.message'], {}), '(err.message)\n', (15900, 15913), True, 'from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex\n'), ((16204, 16244), 'f5_openstack_agent.lbaasv2.drivers.bigip.exceptions.MemberUpdateException', 'f5_ex.MemberUpdateException', (['err.message'], {}), '(err.message)\n', (16231, 16244), True, 'from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex\n')] |
import cherrypy
from gspread import CellNotFound
from python_utility.spreadsheet.simple_spreadsheet import SimpleSpreadsheet
class SpreadsheetService:
@staticmethod
def read_status() -> str:
try:
from python_utility.build import Build
except ImportError:
# TODO: Understand the best practice.
from python_utility.build_undefined import Build # type: ignore
return 'Version: ' + Build.GIT_TAG + '\n' \
+ 'Git hash: ' + Build.GIT_HASH + '\n' \
+ 'Build date: ' + Build.BUILD_DATE + '\n'
@cherrypy.expose
def index(self):
cherrypy.response.headers['Content-Type'] = 'text/plain'
return 'Hello friend.\n'
@cherrypy.expose
@cherrypy.tools.json_out()
@cherrypy.tools.json_in()
def spreadsheet(self):
request = cherrypy.request.json
if 'search' not in request:
response = 'search missing'
elif 'replace' not in request:
response = 'replace missing'
elif 'x-offset' not in request:
response = 'x-offset missing'
else:
search = request['search']
replace = request['replace']
x_offset = request['x-offset']
spreadsheet = SimpleSpreadsheet()
spreadsheet.connect()
try:
cell = spreadsheet.search(search)
spreadsheet.edit_coordinates(
cell.row,
cell.col + int(x_offset),
replace
)
response = 'Success'
except CellNotFound as e:
response = 'Not found: ' + str(e)
return response
@cherrypy.expose
def status(self):
cherrypy.response.headers['Content-Type'] = 'text/plain'
return SpreadsheetService.read_status()
| [
"cherrypy.tools.json_in",
"python_utility.spreadsheet.simple_spreadsheet.SimpleSpreadsheet",
"cherrypy.tools.json_out"
] | [((757, 782), 'cherrypy.tools.json_out', 'cherrypy.tools.json_out', ([], {}), '()\n', (780, 782), False, 'import cherrypy\n'), ((788, 812), 'cherrypy.tools.json_in', 'cherrypy.tools.json_in', ([], {}), '()\n', (810, 812), False, 'import cherrypy\n'), ((1282, 1301), 'python_utility.spreadsheet.simple_spreadsheet.SimpleSpreadsheet', 'SimpleSpreadsheet', ([], {}), '()\n', (1299, 1301), False, 'from python_utility.spreadsheet.simple_spreadsheet import SimpleSpreadsheet\n')] |
from src.modeling import util
import numpy as np
from dskc import dskc_modeling
def test(model):
x_test, y_test, _ = util.read_test_data()
# predict
y_pred = model.predict(x_test)
# clean
y_pred = np.asarray([x[0] for x in y_pred])
# evaluate
report = dskc_modeling.EvaluationReport(y_test, y_pred, name="Neural Network")
return report
def predict(model,data):
data = np.array(data,ndmin=2)
y_pred = model.predict(data)
value = y_pred[0][0]
return value
| [
"dskc.dskc_modeling.EvaluationReport",
"numpy.array",
"src.modeling.util.read_test_data",
"numpy.asarray"
] | [((123, 144), 'src.modeling.util.read_test_data', 'util.read_test_data', ([], {}), '()\n', (142, 144), False, 'from src.modeling import util\n'), ((221, 255), 'numpy.asarray', 'np.asarray', (['[x[0] for x in y_pred]'], {}), '([x[0] for x in y_pred])\n', (231, 255), True, 'import numpy as np\n'), ((285, 354), 'dskc.dskc_modeling.EvaluationReport', 'dskc_modeling.EvaluationReport', (['y_test', 'y_pred'], {'name': '"""Neural Network"""'}), "(y_test, y_pred, name='Neural Network')\n", (315, 354), False, 'from dskc import dskc_modeling\n'), ((411, 434), 'numpy.array', 'np.array', (['data'], {'ndmin': '(2)'}), '(data, ndmin=2)\n', (419, 434), True, 'import numpy as np\n')] |
# added specifically to make floating point division apply to code in bar position calculation
from __future__ import division
import libtcodpy as libtcod
import xp_loader
import gzip
from vec2d import Vec2d
from model.attribute import AttributeTag
from ui.frame import Frame
from ui.ui_event import UIEvent, UIEventType
# Displays remaining and queued actions.
class FrameLibraries(Frame):
def __init__(self, root_console_width, root_console_height, frame_manager):
self.entity_manager = frame_manager.parent_menu.entity_manager
# load xp for bg
console_bg_xp = gzip.open('assets\\ui\\ui_frame_libraries_bg.xp')
self.bg_data = xp_loader.load_xp_string(console_bg_xp.read())
Frame.__init__(self, root_console_width, root_console_height, self.bg_data['width'], self.bg_data['height'], frame_manager)
library_start_xy = xp_loader.get_position_key_xy(self.bg_data['layer_data'][1], xp_loader.poskey_color_red)
self.library_start_xy = Vec2d(library_start_xy[0], library_start_xy[1])
self.library_line_extent = xp_loader.get_position_key_xy(self.bg_data['layer_data'][1], xp_loader.poskey_color_green)
#TODO put these in config somewhere
self.line_char = chr(196)
self.line_bg = libtcod.Color(2, 22, 12)
self.line_fg = libtcod.Color(6, 130, 60)
self.libname_fg = libtcod.Color(102, 255, 178)
libtcod.console_set_default_background(self.console,self.line_bg)
libtcod.console_set_default_foreground(self.console,self.libname_fg)
libtcod.console_set_alignment(self.console, libtcod.LEFT)
xp_loader.load_layer_to_console(self.console, self.bg_data['layer_data'][0])
def handle_ui_event(self, event):
pass
def draw(self):
libtcod.console_clear(self.console)
xp_loader.load_layer_to_console(self.console, self.bg_data['layer_data'][0])
player_libraries = self.entity_manager.get_entity_by_id(self.entity_manager.player_id).get_attribute(AttributeTag.Libraries).data['value']
for lib in range(4):
#+1 here because range will go up to but not including the final screen tile needed
for x in range(self.library_line_extent[0] - self.library_start_xy[0] + 1):
libtcod.console_put_char_ex(self.console, self.library_start_xy[0] + x, self.library_start_xy[1] + lib, self.line_char, self.line_fg, self.line_bg)
libname_xy = Vec2d(self.library_start_xy[0], self.library_start_xy[1] + lib)
#TODO: move to config strings
libname = 'lib_missing'
print_color = self.line_fg
if len(player_libraries) > lib:
print_color = self.libname_fg
libname = player_libraries[lib].name
libtcod.console_set_default_foreground(self.console, print_color)
libtcod.console_print(self.console, libname_xy[0], libname_xy[1], libname)
libtcod.console_blit(self.console, 0, 0, self.width, self.height, 0, 0, 0) | [
"vec2d.Vec2d",
"xp_loader.get_position_key_xy",
"gzip.open",
"libtcodpy.console_set_default_foreground",
"libtcodpy.console_set_default_background",
"xp_loader.load_layer_to_console",
"libtcodpy.console_clear",
"libtcodpy.console_blit",
"libtcodpy.console_set_alignment",
"libtcodpy.console_put_cha... | [((577, 626), 'gzip.open', 'gzip.open', (['"""assets\\\\ui\\\\ui_frame_libraries_bg.xp"""'], {}), "('assets\\\\ui\\\\ui_frame_libraries_bg.xp')\n", (586, 626), False, 'import gzip\n'), ((694, 822), 'ui.frame.Frame.__init__', 'Frame.__init__', (['self', 'root_console_width', 'root_console_height', "self.bg_data['width']", "self.bg_data['height']", 'frame_manager'], {}), "(self, root_console_width, root_console_height, self.bg_data[\n 'width'], self.bg_data['height'], frame_manager)\n", (708, 822), False, 'from ui.frame import Frame\n'), ((840, 933), 'xp_loader.get_position_key_xy', 'xp_loader.get_position_key_xy', (["self.bg_data['layer_data'][1]", 'xp_loader.poskey_color_red'], {}), "(self.bg_data['layer_data'][1], xp_loader.\n poskey_color_red)\n", (869, 933), False, 'import xp_loader\n'), ((956, 1003), 'vec2d.Vec2d', 'Vec2d', (['library_start_xy[0]', 'library_start_xy[1]'], {}), '(library_start_xy[0], library_start_xy[1])\n', (961, 1003), False, 'from vec2d import Vec2d\n'), ((1033, 1128), 'xp_loader.get_position_key_xy', 'xp_loader.get_position_key_xy', (["self.bg_data['layer_data'][1]", 'xp_loader.poskey_color_green'], {}), "(self.bg_data['layer_data'][1], xp_loader.\n poskey_color_green)\n", (1062, 1128), False, 'import xp_loader\n'), ((1208, 1232), 'libtcodpy.Color', 'libtcod.Color', (['(2)', '(22)', '(12)'], {}), '(2, 22, 12)\n', (1221, 1232), True, 'import libtcodpy as libtcod\n'), ((1250, 1275), 'libtcodpy.Color', 'libtcod.Color', (['(6)', '(130)', '(60)'], {}), '(6, 130, 60)\n', (1263, 1275), True, 'import libtcodpy as libtcod\n'), ((1296, 1324), 'libtcodpy.Color', 'libtcod.Color', (['(102)', '(255)', '(178)'], {}), '(102, 255, 178)\n', (1309, 1324), True, 'import libtcodpy as libtcod\n'), ((1328, 1394), 'libtcodpy.console_set_default_background', 'libtcod.console_set_default_background', (['self.console', 'self.line_bg'], {}), '(self.console, self.line_bg)\n', (1366, 1394), True, 'import libtcodpy as libtcod\n'), ((1396, 1465), 'libtcodpy.console_set_default_foreground', 'libtcod.console_set_default_foreground', (['self.console', 'self.libname_fg'], {}), '(self.console, self.libname_fg)\n', (1434, 1465), True, 'import libtcodpy as libtcod\n'), ((1467, 1524), 'libtcodpy.console_set_alignment', 'libtcod.console_set_alignment', (['self.console', 'libtcod.LEFT'], {}), '(self.console, libtcod.LEFT)\n', (1496, 1524), True, 'import libtcodpy as libtcod\n'), ((1528, 1604), 'xp_loader.load_layer_to_console', 'xp_loader.load_layer_to_console', (['self.console', "self.bg_data['layer_data'][0]"], {}), "(self.console, self.bg_data['layer_data'][0])\n", (1559, 1604), False, 'import xp_loader\n'), ((1669, 1704), 'libtcodpy.console_clear', 'libtcod.console_clear', (['self.console'], {}), '(self.console)\n', (1690, 1704), True, 'import libtcodpy as libtcod\n'), ((1707, 1783), 'xp_loader.load_layer_to_console', 'xp_loader.load_layer_to_console', (['self.console', "self.bg_data['layer_data'][0]"], {}), "(self.console, self.bg_data['layer_data'][0])\n", (1738, 1783), False, 'import xp_loader\n'), ((2700, 2774), 'libtcodpy.console_blit', 'libtcod.console_blit', (['self.console', '(0)', '(0)', 'self.width', 'self.height', '(0)', '(0)', '(0)'], {}), '(self.console, 0, 0, self.width, self.height, 0, 0, 0)\n', (2720, 2774), True, 'import libtcodpy as libtcod\n'), ((2284, 2347), 'vec2d.Vec2d', 'Vec2d', (['self.library_start_xy[0]', '(self.library_start_xy[1] + lib)'], {}), '(self.library_start_xy[0], self.library_start_xy[1] + lib)\n', (2289, 2347), False, 'from vec2d import Vec2d\n'), ((2553, 2618), 'libtcodpy.console_set_default_foreground', 'libtcod.console_set_default_foreground', (['self.console', 'print_color'], {}), '(self.console, print_color)\n', (2591, 2618), True, 'import libtcodpy as libtcod\n'), ((2622, 2696), 'libtcodpy.console_print', 'libtcod.console_print', (['self.console', 'libname_xy[0]', 'libname_xy[1]', 'libname'], {}), '(self.console, libname_xy[0], libname_xy[1], libname)\n', (2643, 2696), True, 'import libtcodpy as libtcod\n'), ((2120, 2272), 'libtcodpy.console_put_char_ex', 'libtcod.console_put_char_ex', (['self.console', '(self.library_start_xy[0] + x)', '(self.library_start_xy[1] + lib)', 'self.line_char', 'self.line_fg', 'self.line_bg'], {}), '(self.console, self.library_start_xy[0] + x, \n self.library_start_xy[1] + lib, self.line_char, self.line_fg, self.line_bg)\n', (2147, 2272), True, 'import libtcodpy as libtcod\n')] |
from random import choice
TRID_CSET = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
def gen_trid(length=12):
return ''.join(choice(TRID_CSET) for _i in range(length))
| [
"random.choice"
] | [((151, 168), 'random.choice', 'choice', (['TRID_CSET'], {}), '(TRID_CSET)\n', (157, 168), False, 'from random import choice\n')] |
"""
Derived from keras-yolo3 train.py (https://github.com/qqwweee/keras-yolo3),
with additions from https://github.com/AntonMu/TrainYourOwnYOLO.
"""
import os
import sys
import argparse
import pickle
import numpy as np
import keras.backend as K
from keras.layers import Input, Lambda
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from PIL import Image
from time import time
from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss
from yolo3.utils import get_random_data
def get_curr_dir():
return os.path.dirname(os.path.abspath(__file__))
def get_parent_dir(n=1):
"""
returns the n-th parent dicrectory of the current
working directory
"""
current_path = get_curr_dir()
for k in range(n):
current_path = os.path.dirname(current_path)
return current_path
# --- global constants
EXPORT_DIR = os.path.join(get_parent_dir(), 'for_yolo', 'vott', 'vott-export')
ANNOT_FILE = os.path.join(EXPORT_DIR, 'yolo_annotations.txt')
WEIGHTS_DIR = os.path.join(get_curr_dir(), 'model_data')
YOLO_CLASSES = os.path.join(EXPORT_DIR, 'classes.names')
LOG_DIR = 'logs/000/'
ANCHORS_PATH = os.path.join(WEIGHTS_DIR, 'yolo_anchors.txt')
WEIGHTS_PATH = os.path.join(WEIGHTS_DIR, 'yolo_weights.h5')
VAL_SPLIT = 0.1 # 10% validation data
EPOCHS = 102 # number of epochs to train; 50% transfer, 50% fine-tuning
def _main():
class_names = get_classes(YOLO_CLASSES)
num_classes = len(class_names)
anchors = get_anchors(ANCHORS_PATH)
input_shape = (416, 416) # multiple of 32, height, width
epoch1, epoch2 = EPOCHS // 2, EPOCHS // 2
model = create_model(input_shape, anchors, num_classes,
freeze_body=2, weights_path=WEIGHTS_PATH) # make sure you know what you freeze
logging = TensorBoard(log_dir=LOG_DIR)
checkpoint = ModelCheckpoint(LOG_DIR + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
with open(ANNOT_FILE) as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
num_val = int(len(lines) * VAL_SPLIT)
num_train = len(lines) - num_val
# Train with frozen layers first, to get a stable loss.
# Adjust num epochs to your dataset. This step is enough to obtain a decent model.
if True:
model.compile(optimizer=Adam(lr=1e-3), loss={
# use custom yolo_loss Lambda layer.
'yolo_loss': lambda y_true, y_pred: y_pred})
batch_size = 32
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
history = model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=epoch1,
initial_epoch=0,
callbacks=[logging, checkpoint])
model.save_weights(os.path.join(LOG_DIR, 'trained_weights_stage_1.h5'))
step1_train_loss = history.history['loss']
with open(os.path.join(log_dir_time,'step1_loss.npy'), 'w') as f:
for item in step1_train_loss:
f.write("%s\n" % item)
step1_val_loss = np.array(history.history['val_loss'])
with open(os.path.join(log_dir_time,'step1_val_loss.npy'), 'w') as f:
for item in step1_val_loss:
f.write("%s\n" % item)
# Unfreeze and continue training, to fine-tune.
# Train longer if the result is not good.
if True:
for i in range(len(model.layers)):
model.layers[i].trainable = True
model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
print('Unfreeze all layers.')
batch_size = 4 # note that more GPU memory is required after unfreezing the body
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
history=model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train//batch_size),
validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
validation_steps=max(1, num_val//batch_size),
epochs=epoch1+epoch2,
initial_epoch=epoch1,
callbacks=[logging, checkpoint, reduce_lr, early_stopping])
model.save_weights(os.path.join(LOG_DIR, 'trained_weights_final.h5'))
step2_train_loss = history.history['loss']
with open(os.path.join(log_dir_time,'step2_loss.npy'), 'w') as f:
for item in step2_train_loss:
f.write("%s\n" % item)
step2_val_loss = np.array(history.history['val_loss'])
with open(os.path.join(log_dir_time,'step2_val_loss.npy'), 'w') as f:
for item in step2_val_loss:
f.write("%s\n" % item)
# --- HELPER FUNCS
def get_classes(classes_path):
""" loads the classes """
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def get_anchors(anchors_path):
'''loads the anchors from a file'''
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def create_model(input_shape, anchors, num_classes, load_pretrained=True, freeze_body=2,
weights_path='keras_yolo3/model_data/yolo_weights.h5'):
'''create the training model'''
K.clear_session() # get a new session
image_input = Input(shape=(None, None, 3))
h, w = input_shape
num_anchors = len(anchors)
y_true = [Input(shape=(h//{0:32, 1:16, 2:8}[l], w//{0:32, 1:16, 2:8}[l], \
num_anchors//3, num_classes+5)) for l in range(3)]
model_body = yolo_body(image_input, num_anchors//3, num_classes)
print('Create YOLOv3 model with {} anchors and {} classes.'.format(num_anchors, num_classes))
if load_pretrained:
model_body.load_weights(weights_path, by_name=True, skip_mismatch=True)
print('Load weights {}.'.format(weights_path))
if freeze_body in [1, 2]:
# Freeze darknet53 body or freeze all but 3 output layers.
num = (185, len(model_body.layers)-3)[freeze_body-1]
for i in range(num): model_body.layers[i].trainable = False
print('Freeze the first {} layers of total {} layers.'.format(num, len(model_body.layers)))
model_loss = Lambda(yolo_loss, output_shape=(1,), name='yolo_loss',
arguments={'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})(
[*model_body.output, *y_true])
model = Model([model_body.input, *y_true], model_loss)
return model
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
'''data generator for fit_generator'''
n = len(annotation_lines)
i = 0
while True:
image_data = []
box_data = []
for b in range(batch_size):
if i==0:
np.random.shuffle(annotation_lines)
image, box = get_random_data(annotation_lines[i], input_shape, random=True)
image_data.append(image)
box_data.append(box)
i = (i+1) % n
image_data = np.array(image_data)
box_data = np.array(box_data)
y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
yield [image_data, *y_true], np.zeros(batch_size)
def data_generator_wrapper(annotation_lines, batch_size, input_shape, anchors, num_classes):
n = len(annotation_lines)
if n==0 or batch_size<=0: return None
return data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes)
# ----
if __name__ == '__main__':
_main()
| [
"yolo3.model.preprocess_true_boxes",
"numpy.array",
"yolo3.model.yolo_body",
"numpy.random.seed",
"keras.callbacks.EarlyStopping",
"keras.backend.clear_session",
"keras.models.Model",
"keras.optimizers.Adam",
"keras.callbacks.ReduceLROnPlateau",
"os.path.dirname",
"yolo3.utils.get_random_data",
... | [((1060, 1108), 'os.path.join', 'os.path.join', (['EXPORT_DIR', '"""yolo_annotations.txt"""'], {}), "(EXPORT_DIR, 'yolo_annotations.txt')\n", (1072, 1108), False, 'import os\n'), ((1182, 1223), 'os.path.join', 'os.path.join', (['EXPORT_DIR', '"""classes.names"""'], {}), "(EXPORT_DIR, 'classes.names')\n", (1194, 1223), False, 'import os\n'), ((1262, 1307), 'os.path.join', 'os.path.join', (['WEIGHTS_DIR', '"""yolo_anchors.txt"""'], {}), "(WEIGHTS_DIR, 'yolo_anchors.txt')\n", (1274, 1307), False, 'import os\n'), ((1323, 1367), 'os.path.join', 'os.path.join', (['WEIGHTS_DIR', '"""yolo_weights.h5"""'], {}), "(WEIGHTS_DIR, 'yolo_weights.h5')\n", (1335, 1367), False, 'import os\n'), ((1911, 1939), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'LOG_DIR'}), '(log_dir=LOG_DIR)\n', (1922, 1939), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((1957, 2124), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (["(LOG_DIR + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5')"], {'monitor': '"""val_loss"""', 'save_weights_only': '(True)', 'save_best_only': '(True)', 'period': '(3)'}), "(LOG_DIR +\n 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5', monitor=\n 'val_loss', save_weights_only=True, save_best_only=True, period=3)\n", (1972, 2124), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((2140, 2212), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.1)', 'patience': '(3)', 'verbose': '(1)'}), "(monitor='val_loss', factor=0.1, patience=3, verbose=1)\n", (2157, 2212), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((2234, 2304), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'patience': '(10)', 'verbose': '(1)'}), "(monitor='val_loss', min_delta=0, patience=10, verbose=1)\n", (2247, 2304), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau, EarlyStopping\n'), ((2373, 2394), 'numpy.random.seed', 'np.random.seed', (['(10101)'], {}), '(10101)\n', (2387, 2394), True, 'import numpy as np\n'), ((2399, 2423), 'numpy.random.shuffle', 'np.random.shuffle', (['lines'], {}), '(lines)\n', (2416, 2423), True, 'import numpy as np\n'), ((6205, 6222), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (6220, 6222), True, 'import keras.backend as K\n'), ((6261, 6289), 'keras.layers.Input', 'Input', ([], {'shape': '(None, None, 3)'}), '(shape=(None, None, 3))\n', (6266, 6289), False, 'from keras.layers import Input, Lambda\n'), ((6501, 6554), 'yolo3.model.yolo_body', 'yolo_body', (['image_input', '(num_anchors // 3)', 'num_classes'], {}), '(image_input, num_anchors // 3, num_classes)\n', (6510, 6554), False, 'from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss\n'), ((7372, 7418), 'keras.models.Model', 'Model', (['[model_body.input, *y_true]', 'model_loss'], {}), '([model_body.input, *y_true], model_loss)\n', (7377, 7418), False, 'from keras.models import Model\n'), ((662, 687), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (677, 687), False, 'import os\n'), ((889, 918), 'os.path.dirname', 'os.path.dirname', (['current_path'], {}), '(current_path)\n', (904, 918), False, 'import os\n'), ((3767, 3804), 'numpy.array', 'np.array', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (3775, 3804), True, 'import numpy as np\n'), ((5351, 5388), 'numpy.array', 'np.array', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (5359, 5388), True, 'import numpy as np\n'), ((6359, 6484), 'keras.layers.Input', 'Input', ([], {'shape': '(h // {(0): 32, (1): 16, (2): 8}[l], w // {(0): 32, (1): 16, (2): 8}[l], \n num_anchors // 3, num_classes + 5)'}), '(shape=(h // {(0): 32, (1): 16, (2): 8}[l], w // {(0): 32, (1): 16, (2\n ): 8}[l], num_anchors // 3, num_classes + 5))\n', (6364, 6484), False, 'from keras.layers import Input, Lambda\n'), ((7175, 7315), 'keras.layers.Lambda', 'Lambda', (['yolo_loss'], {'output_shape': '(1,)', 'name': '"""yolo_loss"""', 'arguments': "{'anchors': anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5}"}), "(yolo_loss, output_shape=(1,), name='yolo_loss', arguments={'anchors':\n anchors, 'num_classes': num_classes, 'ignore_thresh': 0.5})\n", (7181, 7315), False, 'from keras.layers import Input, Lambda\n'), ((7982, 8002), 'numpy.array', 'np.array', (['image_data'], {}), '(image_data)\n', (7990, 8002), True, 'import numpy as np\n'), ((8022, 8040), 'numpy.array', 'np.array', (['box_data'], {}), '(box_data)\n', (8030, 8040), True, 'import numpy as np\n'), ((8058, 8124), 'yolo3.model.preprocess_true_boxes', 'preprocess_true_boxes', (['box_data', 'input_shape', 'anchors', 'num_classes'], {}), '(box_data, input_shape, anchors, num_classes)\n', (8079, 8124), False, 'from yolo3.model import preprocess_true_boxes, yolo_body, tiny_yolo_body, yolo_loss\n'), ((3480, 3531), 'os.path.join', 'os.path.join', (['LOG_DIR', '"""trained_weights_stage_1.h5"""'], {}), "(LOG_DIR, 'trained_weights_stage_1.h5')\n", (3492, 3531), False, 'import os\n'), ((5066, 5115), 'os.path.join', 'os.path.join', (['LOG_DIR', '"""trained_weights_final.h5"""'], {}), "(LOG_DIR, 'trained_weights_final.h5')\n", (5078, 5115), False, 'import os\n'), ((5974, 5991), 'numpy.array', 'np.array', (['anchors'], {}), '(anchors)\n', (5982, 5991), True, 'import numpy as np\n'), ((7802, 7864), 'yolo3.utils.get_random_data', 'get_random_data', (['annotation_lines[i]', 'input_shape'], {'random': '(True)'}), '(annotation_lines[i], input_shape, random=True)\n', (7817, 7864), False, 'from yolo3.utils import get_random_data\n'), ((2696, 2710), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (2700, 2710), False, 'from keras.optimizers import Adam\n'), ((3603, 3647), 'os.path.join', 'os.path.join', (['log_dir_time', '"""step1_loss.npy"""'], {}), "(log_dir_time, 'step1_loss.npy')\n", (3615, 3647), False, 'import os\n'), ((3823, 3871), 'os.path.join', 'os.path.join', (['log_dir_time', '"""step1_val_loss.npy"""'], {}), "(log_dir_time, 'step1_val_loss.npy')\n", (3835, 3871), False, 'import os\n'), ((4195, 4210), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (4199, 4210), False, 'from keras.optimizers import Adam\n'), ((5187, 5231), 'os.path.join', 'os.path.join', (['log_dir_time', '"""step2_loss.npy"""'], {}), "(log_dir_time, 'step2_loss.npy')\n", (5199, 5231), False, 'import os\n'), ((5407, 5455), 'os.path.join', 'os.path.join', (['log_dir_time', '"""step2_val_loss.npy"""'], {}), "(log_dir_time, 'step2_val_loss.npy')\n", (5419, 5455), False, 'import os\n'), ((7741, 7776), 'numpy.random.shuffle', 'np.random.shuffle', (['annotation_lines'], {}), '(annotation_lines)\n', (7758, 7776), True, 'import numpy as np\n'), ((8162, 8182), 'numpy.zeros', 'np.zeros', (['batch_size'], {}), '(batch_size)\n', (8170, 8182), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
#
# 2020 February 15 - <NAME>
#
# I dedicate any and all copyright interest in this software to the
# public domain. I make this dedication for the benefit of the public at
# large and to the detriment of my heirs and successors. I intend this
# dedication to be an overt act of relinquishment in perpetuity of all
# present and future rights to this software under copyright law.
#
import argparse
import pathlib
import json
import yaml
__prgdesc__ = "Simplified YAML Ansible inventory"
__version__ = "0.1"
class SimplifiedAnsibleInventory(object):
def __init__(self):
parser = argparse.ArgumentParser(description=__prgdesc__)
parser.add_argument("-v", "--version", action="version",
version="%(prog)s " + __version__)
parser.add_argument("-l", "--list", action="store_true",
help="output inventory")
parser.add_argument("-H", "--host", action="store",
help="output host vars")
args = parser.parse_args()
self.parse_yaml()
if args.list:
self.output_list()
if args.host:
self.output_host(args.host)
def print_json(self, data):
print(json.dumps(data or {}, indent=4, sort_keys=False))
def parse_yaml(self):
self.groups = {}
self.hosts = {}
filename = pathlib.Path(__file__).stem + ".yml"
with open(filename) as file:
data = yaml.load(file, Loader=yaml.FullLoader)
for entry in data:
if entry.get("name"):
self.groups[entry.get("name")] = entry
if entry.get("host"):
self.hosts[entry.get("host")] = entry
for name, host in self.hosts.items():
tags = host.get("tags") or ["ungrouped"]
for tag in tags:
if not tag in self.groups:
self.groups[tag] = {}
group = self.groups[tag]
if not "hosts" in group:
group["hosts"] = []
group["hosts"].append(name)
def output_list(self):
inventory = {}
for name, group in self.groups.items():
inventory[name] = {}
if name != "all":
inventory[name]["hosts"] = group["hosts"]
if "vars" in group:
inventory[name]["vars"] = group["vars"]
self.print_json(inventory)
exit(0)
def output_host(self, name):
hostvars = {}
if name in self.hosts:
hostvars = self.hosts[name].get("vars")
self.print_json(hostvars)
exit(0)
if __name__ == "__main__":
SimplifiedAnsibleInventory()
| [
"json.dumps",
"yaml.load",
"argparse.ArgumentParser",
"pathlib.Path"
] | [((627, 675), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__prgdesc__'}), '(description=__prgdesc__)\n', (650, 675), False, 'import argparse\n'), ((1265, 1314), 'json.dumps', 'json.dumps', (['(data or {})'], {'indent': '(4)', 'sort_keys': '(False)'}), '(data or {}, indent=4, sort_keys=False)\n', (1275, 1314), False, 'import json\n'), ((1506, 1545), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'yaml.FullLoader'}), '(file, Loader=yaml.FullLoader)\n', (1515, 1545), False, 'import yaml\n'), ((1412, 1434), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (1424, 1434), False, 'import pathlib\n')] |
import pandas as pd
import csv
import numpy as np
def parse_val(val):
'''Parse string value and change it to appropriate type to be manipulated by RuleSet functions
Accepted formats: "false" and "true" case insensitive, 'nan', '', "*", string representation of pandas.Interval
'''
if val == '' or val == '*' or val.lower() == 'nan': return float('nan')
elif val[0] == '(' or val[0] == '[' or val[0] == ']':
return parse_interval(val)
elif val.lower() == 'false': return False
elif val.lower() == 'true': return True
else:
try:
new_val = float(val)
return new_val
except ValueError:
raise ValueError("Value given doesn't have appropriate format: " + str(val))
def parse_interval(val):
#get characteristics of interval (closed on the left-side, right-side, both or neither)
leftClosed = val[0] == '['
if not leftClosed and not (val[0] == '(' or val[0] == ']'):
raise ValueError("Value given doesn't have the proper format for a pandas.Interval: " + str(val))
rightClosed = val[len(val)-1] == ']'
if not rightClosed and not (val[len(val)-1] == ')' or val[len(val)-1] == '['):
raise ValueError("Value given doesn't have the proper format for a pandas.Interval: " + str(val))
if leftClosed and rightClosed: closed = 'both'
elif leftClosed and not rightClosed: closed = 'left'
elif not leftClosed and rightClosed: closed = 'right'
else: closed = 'neither'
#get left value
left_str = ''
i = 1
while val[i] is not ',':
left_str += val[i]
i+=1
if i==len(val):
raise ValueError("Value given doesn't have the proper format for a pandas.Interval: " + str(val)+ ". A comma is missing.")
try:
left_val = float(left_str)
except ValueError:
raise ValueError("Value given doesn't have the proper format for a pandas.Interval: " + str(val))
#get right value
right_str = ''
i += 1
if val[i] == ' ': i+= 1 #skip the space that follows the comma if there is one
for j in range(i,len(val)-1):
right_str += val[j]
try:
right_val = float(right_str)
except ValueError:
raise ValueError("Value given doesn't have the proper format for pandas.Interval: " + str(val))
return pd.Interval(left_val, right_val, closed)
def parse_csv(csv_name):
#Possible to improve this function by checking that values in one column all have the same type
''' Parse csv and returns list of dictionnaries representing the rules'''
rules = []
with open(csv_name, 'r') as csv_file:
reader = csv.DictReader(csv_file)
for r in reader:
for key, val in r.items():
if not key == "Rec" and not key == "Recommendation":
if isinstance(val,str):
try:
r[key] = float(val)
except ValueError:
r[key] = parse_val(val)
rules.append(r)
return rules
| [
"pandas.Interval",
"csv.DictReader"
] | [((2329, 2369), 'pandas.Interval', 'pd.Interval', (['left_val', 'right_val', 'closed'], {}), '(left_val, right_val, closed)\n', (2340, 2369), True, 'import pandas as pd\n'), ((2648, 2672), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (2662, 2672), False, 'import csv\n')] |
from django.urls import path
from .views import *
urlpatterns = [
path('',homepage,name='home'),
path('services/',services, name='services'),
path('masters/',masters, name='masters'),
path('register/',register_page,name='register'),
path('login/',login_page, name='login'),
path('master_detail/<int:master_id>/',master_detail,name='master_detail'),
path('logout/',logout_page,name='logout'),
] | [
"django.urls.path"
] | [((71, 102), 'django.urls.path', 'path', (['""""""', 'homepage'], {'name': '"""home"""'}), "('', homepage, name='home')\n", (75, 102), False, 'from django.urls import path\n'), ((106, 150), 'django.urls.path', 'path', (['"""services/"""', 'services'], {'name': '"""services"""'}), "('services/', services, name='services')\n", (110, 150), False, 'from django.urls import path\n'), ((155, 196), 'django.urls.path', 'path', (['"""masters/"""', 'masters'], {'name': '"""masters"""'}), "('masters/', masters, name='masters')\n", (159, 196), False, 'from django.urls import path\n'), ((201, 250), 'django.urls.path', 'path', (['"""register/"""', 'register_page'], {'name': '"""register"""'}), "('register/', register_page, name='register')\n", (205, 250), False, 'from django.urls import path\n'), ((254, 294), 'django.urls.path', 'path', (['"""login/"""', 'login_page'], {'name': '"""login"""'}), "('login/', login_page, name='login')\n", (258, 294), False, 'from django.urls import path\n'), ((299, 374), 'django.urls.path', 'path', (['"""master_detail/<int:master_id>/"""', 'master_detail'], {'name': '"""master_detail"""'}), "('master_detail/<int:master_id>/', master_detail, name='master_detail')\n", (303, 374), False, 'from django.urls import path\n'), ((378, 421), 'django.urls.path', 'path', (['"""logout/"""', 'logout_page'], {'name': '"""logout"""'}), "('logout/', logout_page, name='logout')\n", (382, 421), False, 'from django.urls import path\n')] |
#!/usr/bin/env python
'''
Created by Seria at 02/11/2018 3:38 PM
Email: <EMAIL>
_ooOoo_
o888888888o
o88`_ . _`88o
(| 0 0 |)
O \ 。 / O
_____/`-----‘\_____
.’ \|| _ _ ||/ `.
| _ ||| | ||| _ |
| | \\ // | |
| | \-----/ | |
\ .\ ___/- -\___ /. /
,--- / ___\<|>/___ \ ---,
| |: \ \ / / :| |
`\--\_ -. ___ .- _/--/‘
=========== \__ NOBUG __/ ===========
'''
# -*- coding:utf-8 -*-
import numpy as np
import random as rand
from collections import abc
from torchvision.transforms import *
F = functional
from PIL import Image
from ..toolkit import byte2arr
__all__ = ('Comburant', 'HWC2CHW', 'Random',
'NEAREST', 'LINEAR', 'CUBIC', 'HORIZONTAL', 'VERTICAL',
'Resize', 'Crop', 'Flip', 'Rotate',
'Brighten', 'Contrast', 'Saturate', 'Hue')
NEAREST = 0
LINEAR = 1
CUBIC = 2
PIL_INTERP = {NEAREST: Image.NEAREST, LINEAR: Image.BILINEAR, CUBIC: Image.BICUBIC}
HORIZONTAL = 10
VERTICAL = 11
class Comburant(object):
def __init__(self, *args, is_encoded=False):
if isinstance(args[-1], HWC2CHW):
ls_args = list(args[:-1])
self.cvt_form = args[-1]
else:
ls_args = list(args)
self.comburant = Compose(ls_args)
self.is_encoded = is_encoded
def __call__(self, imgs):
if self.is_encoded:
if isinstance(imgs, abc.Sequence):
img = []
for i in imgs:
img.append(byte2arr(i, as_np=False))
else:
img = byte2arr(imgs, as_np=False)
imgs = img
imgs = self.comburant(imgs)
if isinstance(imgs, abc.Sequence):
img = []
for i in imgs:
i = np.array(i)
i = i.astype(np.float32) / 255
if hasattr(self, 'cvt_form'):
i = self.cvt_form(i)
img.append(i)
else:
img = np.array(imgs)
img = img.astype(np.float32) / 255
if hasattr(self, 'cvt_form'):
img= self.cvt_form(img)
return img
class ABC(object):
def __init__(self):
pass
def call(self, *args, **kwargs):
raise NotImplementedError
def __call__(self, imgs):
if isinstance(imgs, abc.Sequence):
ret = []
for i in imgs:
ret.append(self.call(i))
else:
ret = self.call(imgs)
return ret
class HWC2CHW(ABC):
def __init__(self):
super(HWC2CHW, self).__init__()
def __call__(self, img):
return np.transpose(img, (2, 0, 1))
class Random(ABC):
def __init__(self, p, comburant):
super(Random, self).__init__()
self.p = p
self.cbr = comburant
def call(self, img, conduct):
if conduct:
return self.cbr(img)
else:
return img
def __call__(self, imgs):
if rand.random() < self.p:
conduct = True
else:
conduct = False
if isinstance(imgs, abc.Sequence):
ret = []
for i in imgs:
ret.append(self.call(i, conduct))
else:
ret = self.call(imgs, conduct)
return ret
class Resize(ABC):
def __init__(self, size, interp=LINEAR):
# size: (height, width)
super(Resize, self).__init__()
self.size = size
self.interp = interp
def call(self, img):
return img.resize((self.size[1], self.size[0]), PIL_INTERP[self.interp])
class Crop(ABC):
def __init__(self, size, padding=(0, 0, 0, 0), area_ratio=(1, 1), aspect_ratio=(1, 1), interp=LINEAR, scale=()):
# size: (height, width)
# padding: (left, top, right, bottom)
super(Crop, self).__init__()
self.size = size
self.padding = padding
self.area_ratio = area_ratio
self.aspect_ratio = aspect_ratio
self.scale = scale
if area_ratio == aspect_ratio == (1,1):
self.reshape = False
self.comburant = RandomCrop(size)
else:
self.reshape = True
self.comburant = RandomResizedCrop(size, area_ratio, aspect_ratio, PIL_INTERP[interp])
def call(self, img, param, t=1):
param = [p * t for p in param]
y, x, h, w = param
padding = tuple([p * t for p in self.padding])
size = tuple([s * t for s in self.size])
img = F.pad(img, padding, 0, 'constant')
# pad the width if needed
if img.size[0] < size[1]:
img = F.pad(img, (size[1] - img.size[0], 0), 0, 'constant')
# pad the height if needed
if img.size[1] < size[0]:
img = F.pad(img, (0, size[0] - img.size[1]), 0, 'constant')
if self.reshape:
return F.resized_crop(img, y, x, h, w, size, self.comburant.interpolation)
else:
return F.crop(img, y, x, h, w)
def __call__(self, imgs):
if len(self.scale) == 0:
self.scale = len(imgs) * [1]
img = F.pad(imgs[0], self.padding, 0, 'constant')
# pad the width if needed
if img.size[0] < self.size[1]:
img = F.pad(img, (self.size[1] - img.size[0], 0), 0, 'constant')
# pad the height if needed
if img.size[1] < self.size[0]:
img = F.pad(img, (0, self.size[0] - img.size[1]), 0, 'constant')
if self.reshape:
param = self.comburant.get_params(img, self.comburant.scale, self.comburant.ratio)
else:
param = self.comburant.get_params(img, self.comburant.size)
if isinstance(imgs, abc.Sequence):
ret = []
for i, v in enumerate(imgs):
ret.append(self.call(v, param, self.scale[i]))
else:
ret = self.call(imgs, param)
return ret
class Flip(ABC):
def __init__(self, axial):
super(Flip, self).__init__()
if axial == HORIZONTAL:
self.comburant = RandomVerticalFlip(1)
elif axial == VERTICAL:
self.comburant = RandomHorizontalFlip(1)
else:
raise Exception('NEBULAE ERROR ⨷ the invoked flip type is not defined or supported.')
def call(self, img):
return self.comburant(img)
class Rotate(ABC):
def __init__(self, degree, intact=False, interp=NEAREST):
'''
Args
intact: whether to keep image intact which might enlarge the output size
'''
super(Rotate, self).__init__()
self.comburant = RandomRotation(degree, PIL_INTERP[interp], intact)
def call(self, img, angle):
return F.rotate(img, angle, self.comburant.resample, self.comburant.expand,
self.comburant.center, self.comburant.fill)
def __call__(self, imgs):
angle = self.comburant.get_params(self.comburant.degrees)
if isinstance(imgs, abc.Sequence):
ret = []
for i in imgs:
ret.append(self.call(i, angle))
else:
ret = self.call(imgs, angle)
return ret
class Brighten(ABC):
def __init__(self, range):
super(Brighten, self).__init__()
self.comburant = ColorJitter(brightness=range)
def call(self, img, factor):
return F.adjust_brightness(img, factor)
def __call__(self, imgs):
factor = rand.uniform(self.comburant.brightness[0], self.comburant.brightness[1])
if isinstance(imgs, abc.Sequence):
ret = []
for i in imgs:
ret.append(self.call(i, factor))
else:
ret = self.call(imgs, factor)
return ret
class Contrast(ABC):
def __init__(self, range):
super(Contrast, self).__init__()
self.comburant = ColorJitter(contrast=range)
def call(self, img, factor):
return F.adjust_contrast(img, factor)
def __call__(self, imgs):
factor = rand.uniform(self.comburant.contrast[0], self.comburant.contrast[1])
if isinstance(imgs, abc.Sequence):
ret = []
for i in imgs:
ret.append(self.call(i, factor))
else:
ret = self.call(imgs, factor)
return ret
class Saturate(ABC):
def __init__(self, range):
super(Saturate, self).__init__()
self.comburant = ColorJitter(saturation=range)
def call(self, img, factor):
return F.adjust_saturation(img, factor)
def __call__(self, imgs):
factor = rand.uniform(self.comburant.saturation[0], self.comburant.saturation[1])
if isinstance(imgs, abc.Sequence):
ret = []
for i in imgs:
ret.append(self.call(i, factor))
else:
ret = self.call(imgs, factor)
return ret
class Hue(ABC):
def __init__(self, range):
super(Hue, self).__init__()
self.comburant = ColorJitter(hue=range)
def call(self, img, factor):
return F.adjust_hue(img, factor)
def __call__(self, imgs):
factor = rand.uniform(self.comburant.hue[0], self.comburant.hue[1])
if isinstance(imgs, abc.Sequence):
ret = []
for i in imgs:
ret.append(self.call(i, factor))
else:
ret = self.call(imgs, factor)
return ret | [
"random.random",
"numpy.array",
"random.uniform",
"numpy.transpose"
] | [((2822, 2850), 'numpy.transpose', 'np.transpose', (['img', '(2, 0, 1)'], {}), '(img, (2, 0, 1))\n', (2834, 2850), True, 'import numpy as np\n'), ((7607, 7679), 'random.uniform', 'rand.uniform', (['self.comburant.brightness[0]', 'self.comburant.brightness[1]'], {}), '(self.comburant.brightness[0], self.comburant.brightness[1])\n', (7619, 7679), True, 'import random as rand\n'), ((8173, 8241), 'random.uniform', 'rand.uniform', (['self.comburant.contrast[0]', 'self.comburant.contrast[1]'], {}), '(self.comburant.contrast[0], self.comburant.contrast[1])\n', (8185, 8241), True, 'import random as rand\n'), ((8739, 8811), 'random.uniform', 'rand.uniform', (['self.comburant.saturation[0]', 'self.comburant.saturation[1]'], {}), '(self.comburant.saturation[0], self.comburant.saturation[1])\n', (8751, 8811), True, 'import random as rand\n'), ((9285, 9343), 'random.uniform', 'rand.uniform', (['self.comburant.hue[0]', 'self.comburant.hue[1]'], {}), '(self.comburant.hue[0], self.comburant.hue[1])\n', (9297, 9343), True, 'import random as rand\n'), ((2165, 2179), 'numpy.array', 'np.array', (['imgs'], {}), '(imgs)\n', (2173, 2179), True, 'import numpy as np\n'), ((3165, 3178), 'random.random', 'rand.random', ([], {}), '()\n', (3176, 3178), True, 'import random as rand\n'), ((1957, 1968), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (1965, 1968), True, 'import numpy as np\n')] |
import obspy
import unittest
import numpy as np
import madpy.checks as ch
import madpy.tests.testdata.config as cfg
class TestChecks(unittest.TestCase):
def test_check_config(self):
class Measurements: pass
self.assertRaises(AttributeError, ch.check_config, Measurements())
self.assertIsNone(ch.check_config(cfg.Amplitude()))
cfg.Amplitude.noise_phase = 'Pn'
self.assertRaises(AssertionError, ch.check_config, cfg.Amplitude())
cfg.Amplitude.noise_phase = 'P'
cfg.Amplitude.amp_factor = -2.
self.assertRaises(AssertionError, ch.check_config, cfg.Amplitude())
cfg.Amplitude.plot = 'Yes'
self.assertRaises(AssertionError, ch.check_config, cfg.Amplitude())
cfg.Amplitude.plot = False
cfg.Amplitude.signal_window_begin = 50.
self.assertRaises(AssertionError, ch.check_config, cfg.Amplitude())
cfg.Amplitude.signal_window_begin = -1
cfg.Amplitude.save_figure = True
self.assertRaises(AssertionError, ch.check_config, cfg.Amplitude())
cfg.Amplitude.save_figure = False
self.assertIsNone(ch.check_config(cfg.Duration()))
cfg.Duration.signal_phase = 'Sg'
self.assertRaises(AssertionError, ch.check_config, cfg.Duration())
cfg.Duration.signal_phase = 'S'
cfg.Duration.moving_average_window = -2
self.assertRaises(AssertionError, ch.check_config, cfg.Duration())
cfg.moving_average_window = 2
cfg.threshold_type = 'pre-p noise'
self.assertRaises(AssertionError, ch.check_config, cfg.Duration())
cfg.threshold_type = 'noise'
cfg.plot = True
cfg.save_figure = True
cfg.figure_path = ''
self.assertRaises(AssertionError, ch.check_config, cfg.Duration())
cfg.plot = False
cfg.save_figure = False
def test_check_waveform(self):
st = obspy.read('testdata/*.mseed')
for tr in st:
tr.stats.o = obspy.UTCDateTime('2020-10-10T13:05:00.00')
tr.stats.p = 10.
self.assertRaises(AttributeError, ch.check_config, tr)
tr.stats.s = 20.
self.assertIsNone(ch.check_stats(tr))
tr.stats.o = '2020-10-10T13:05:00.00'
self.assertRaises(AssertionError, ch.check_stats, tr)
tr.stats.o = obspy.UTCDateTime('2020-10-10T13:05:00.00')
def test_check_datagaps(self):
st = obspy.read('testdata/*.mseed')
for tr in st:
self.assertIsNone(ch.check_datagaps(tr))
n = int(len(tr.data) * 0.25)
tr.data = tr.data[0:n]
self.assertRaises(AssertionError, ch.check_datagaps, tr)
def test_check_window(self):
st = obspy.read('testdata/*.mseed')
for tr in st:
starttime = obspy.UTCDateTime('2020-10-10T13:05:00.00')
endtime = obspy.UTCDateTime('2020-10-10T13:07:00.00')
self.assertIsNone(ch.check_window(tr, starttime, endtime))
starttime = obspy.UTCDateTime('2020-10-10T13:04:00.00')
self.assertRaises(AssertionError, ch.check_window, tr, starttime, endtime)
endtime = obspy.UTCDateTime('2020-10-10T13:08:00.00')
self.assertRaises(AssertionError, ch.check_window, tr, starttime, endtime)
def test_check_amplitude(self):
self.assertIsNone(ch.check_amplitude(0.5))
self.assertRaises(ValueError, ch.check_amplitude, np.nan)
self.assertRaises(ValueError, ch.check_amplitude, np.inf)
self.assertRaises(ValueError, ch.check_amplitude, -np.inf)
self.assertRaises(ValueError, ch.check_amplitude, -0.5)
self.assertRaises(ValueError, ch.check_amplitude, None)
self.assertRaises(ValueError, ch.check_amplitude, True)
self.assertRaises(ValueError, ch.check_amplitude, {'test': 'dict'})
self.assertRaises(ValueError, ch.check_amplitude, ['list', 5])
def test_check_fitting_window_end(self):
i_max0 = 20000
i_end0 = np.arange(500, 5005)
dt = 0.01
sp = 10
self.assertIsNone(ch.check_fitting_window_end(i_end0, i_max0, dt, sp))
i_end1 = []
self.assertRaises(AssertionError, ch.check_fitting_window_end, i_end1, i_max0, dt, sp)
i_max1 = 2
self.assertRaises(AssertionError, ch.check_fitting_window_end, i_end0, i_max1, dt, sp)
def test_check_plottype(self):
self.assertIsNone(ch.check_plottype('linear'))
self.assertIsNone(ch.check_plottype('log'))
self.assertRaises(AssertionError, ch.check_plottype, 2)
self.assertRaises(AssertionError, ch.check_plottype, 'fourier')
def test_check_duration_index(self):
cross = np.arange(0, 10, dtype=float)
self.assertIsNone(ch.check_duration_index(cross))
self.assertRaises(AssertionError, ch.check_duration_index, [])
def test_check_cc(self):
cc = np.array([
[0.1, 0.8, 0.5, 0.9],
[0.9, 0.1, 0.8, 0.5],
[0.5, 0.9, 0.1, 0.8],
[0.8, 0.5, 0.9, 0.1]
])
self.assertIsNone(ch.check_cc(cc, 1, 2))
self.assertRaises(AssertionError, ch.check_cc, cc.astype(int), 1, 2)
self.assertRaises(AssertionError, ch.check_cc, cc[0:3, :], 1, 2)
self.assertRaises(AssertionError, ch.check_cc, cc[:, 0:3], 1, 2)
cc[1, 2] = np.nan
self.assertRaises(AssertionError, ch.check_cc, cc, 1, 2)
cc[1, 2] = np.inf
self.assertRaises(AssertionError, ch.check_cc, cc, 1, 2)
cc[1, 2] = -np.inf
self.assertRaises(AssertionError, ch.check_cc, cc, 1, 2)
cc[1, 2] = 0
self.assertRaises(AssertionError, ch.check_cc, cc, 1, 2)
cc[1, 2] = -10
self.assertRaises(AssertionError, ch.check_cc, cc, 1, 2)
cc[1, 2] = 25
self.assertRaises(AssertionError, ch.check_cc, cc, 1, 2)
def test_check_coda(self):
x0 = np.arange(0, 100)
y0 = np.arange(0, 100)
x1 = np.arange(0, 100, dtype=float)
y1 = np.arange(0, 100, dtype=float)
self.assertRaises(AssertionError, ch.check_coda, x0, y1)
self.assertRaises(AssertionError, ch.check_coda, x1, y0)
self.assertRaises(AssertionError, ch.check_coda, x0, y0)
x2, y2 = ch.check_coda(x1, y1)
self.assertEqual(len(x2), 100)
self.assertEqual(len(y2), 100)
x2[5:10] = np.nan
self.assertRaises(AssertionError, ch.check_coda, x2, y1)
y2[60:72] = np.nan
x3, y3 = ch.check_coda(x1, y2)
self.assertEqual(len(x3), 88)
self.assertEqual(len(y3), 88)
if __name__ == '__main__':
unittest.main() | [
"obspy.read",
"madpy.checks.check_amplitude",
"numpy.arange",
"madpy.checks.check_stats",
"madpy.checks.check_duration_index",
"madpy.checks.check_cc",
"obspy.UTCDateTime",
"madpy.checks.check_window",
"madpy.checks.check_datagaps",
"numpy.array",
"madpy.checks.check_plottype",
"unittest.main"... | [((6909, 6924), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6922, 6924), False, 'import unittest\n'), ((1949, 1979), 'obspy.read', 'obspy.read', (['"""testdata/*.mseed"""'], {}), "('testdata/*.mseed')\n", (1959, 1979), False, 'import obspy\n'), ((2506, 2536), 'obspy.read', 'obspy.read', (['"""testdata/*.mseed"""'], {}), "('testdata/*.mseed')\n", (2516, 2536), False, 'import obspy\n'), ((2827, 2857), 'obspy.read', 'obspy.read', (['"""testdata/*.mseed"""'], {}), "('testdata/*.mseed')\n", (2837, 2857), False, 'import obspy\n'), ((4165, 4185), 'numpy.arange', 'np.arange', (['(500)', '(5005)'], {}), '(500, 5005)\n', (4174, 4185), True, 'import numpy as np\n'), ((4909, 4938), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {'dtype': 'float'}), '(0, 10, dtype=float)\n', (4918, 4938), True, 'import numpy as np\n'), ((5135, 5237), 'numpy.array', 'np.array', (['[[0.1, 0.8, 0.5, 0.9], [0.9, 0.1, 0.8, 0.5], [0.5, 0.9, 0.1, 0.8], [0.8, \n 0.5, 0.9, 0.1]]'], {}), '([[0.1, 0.8, 0.5, 0.9], [0.9, 0.1, 0.8, 0.5], [0.5, 0.9, 0.1, 0.8],\n [0.8, 0.5, 0.9, 0.1]])\n', (5143, 5237), True, 'import numpy as np\n'), ((6170, 6187), 'numpy.arange', 'np.arange', (['(0)', '(100)'], {}), '(0, 100)\n', (6179, 6187), True, 'import numpy as np\n'), ((6201, 6218), 'numpy.arange', 'np.arange', (['(0)', '(100)'], {}), '(0, 100)\n', (6210, 6218), True, 'import numpy as np\n'), ((6232, 6262), 'numpy.arange', 'np.arange', (['(0)', '(100)'], {'dtype': 'float'}), '(0, 100, dtype=float)\n', (6241, 6262), True, 'import numpy as np\n'), ((6276, 6306), 'numpy.arange', 'np.arange', (['(0)', '(100)'], {'dtype': 'float'}), '(0, 100, dtype=float)\n', (6285, 6306), True, 'import numpy as np\n'), ((6519, 6540), 'madpy.checks.check_coda', 'ch.check_coda', (['x1', 'y1'], {}), '(x1, y1)\n', (6532, 6540), True, 'import madpy.checks as ch\n'), ((6754, 6775), 'madpy.checks.check_coda', 'ch.check_coda', (['x1', 'y2'], {}), '(x1, y2)\n', (6767, 6775), True, 'import madpy.checks as ch\n'), ((478, 493), 'madpy.tests.testdata.config.Amplitude', 'cfg.Amplitude', ([], {}), '()\n', (491, 493), True, 'import madpy.tests.testdata.config as cfg\n'), ((633, 648), 'madpy.tests.testdata.config.Amplitude', 'cfg.Amplitude', ([], {}), '()\n', (646, 648), True, 'import madpy.tests.testdata.config as cfg\n'), ((744, 759), 'madpy.tests.testdata.config.Amplitude', 'cfg.Amplitude', ([], {}), '()\n', (757, 759), True, 'import madpy.tests.testdata.config as cfg\n'), ((903, 918), 'madpy.tests.testdata.config.Amplitude', 'cfg.Amplitude', ([], {}), '()\n', (916, 918), True, 'import madpy.tests.testdata.config as cfg\n'), ((1067, 1082), 'madpy.tests.testdata.config.Amplitude', 'cfg.Amplitude', ([], {}), '()\n', (1080, 1082), True, 'import madpy.tests.testdata.config as cfg\n'), ((1294, 1308), 'madpy.tests.testdata.config.Duration', 'cfg.Duration', ([], {}), '()\n', (1306, 1308), True, 'import madpy.tests.testdata.config as cfg\n'), ((1457, 1471), 'madpy.tests.testdata.config.Duration', 'cfg.Duration', ([], {}), '()\n', (1469, 1471), True, 'import madpy.tests.testdata.config as cfg\n'), ((1613, 1627), 'madpy.tests.testdata.config.Duration', 'cfg.Duration', ([], {}), '()\n', (1625, 1627), True, 'import madpy.tests.testdata.config as cfg\n'), ((1809, 1823), 'madpy.tests.testdata.config.Duration', 'cfg.Duration', ([], {}), '()\n', (1821, 1823), True, 'import madpy.tests.testdata.config as cfg\n'), ((2027, 2070), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['"""2020-10-10T13:05:00.00"""'], {}), "('2020-10-10T13:05:00.00')\n", (2044, 2070), False, 'import obspy\n'), ((2387, 2430), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['"""2020-10-10T13:05:00.00"""'], {}), "('2020-10-10T13:05:00.00')\n", (2404, 2430), False, 'import obspy\n'), ((2904, 2947), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['"""2020-10-10T13:05:00.00"""'], {}), "('2020-10-10T13:05:00.00')\n", (2921, 2947), False, 'import obspy\n'), ((2970, 3013), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['"""2020-10-10T13:07:00.00"""'], {}), "('2020-10-10T13:07:00.00')\n", (2987, 3013), False, 'import obspy\n'), ((3109, 3152), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['"""2020-10-10T13:04:00.00"""'], {}), "('2020-10-10T13:04:00.00')\n", (3126, 3152), False, 'import obspy\n'), ((3262, 3305), 'obspy.UTCDateTime', 'obspy.UTCDateTime', (['"""2020-10-10T13:08:00.00"""'], {}), "('2020-10-10T13:08:00.00')\n", (3279, 3305), False, 'import obspy\n'), ((3482, 3505), 'madpy.checks.check_amplitude', 'ch.check_amplitude', (['(0.5)'], {}), '(0.5)\n', (3500, 3505), True, 'import madpy.checks as ch\n'), ((4246, 4297), 'madpy.checks.check_fitting_window_end', 'ch.check_fitting_window_end', (['i_end0', 'i_max0', 'dt', 'sp'], {}), '(i_end0, i_max0, dt, sp)\n', (4273, 4297), True, 'import madpy.checks as ch\n'), ((4608, 4635), 'madpy.checks.check_plottype', 'ch.check_plottype', (['"""linear"""'], {}), "('linear')\n", (4625, 4635), True, 'import madpy.checks as ch\n'), ((4663, 4687), 'madpy.checks.check_plottype', 'ch.check_plottype', (['"""log"""'], {}), "('log')\n", (4680, 4687), True, 'import madpy.checks as ch\n'), ((4965, 4995), 'madpy.checks.check_duration_index', 'ch.check_duration_index', (['cross'], {}), '(cross)\n', (4988, 4995), True, 'import madpy.checks as ch\n'), ((5318, 5339), 'madpy.checks.check_cc', 'ch.check_cc', (['cc', '(1)', '(2)'], {}), '(cc, 1, 2)\n', (5329, 5339), True, 'import madpy.checks as ch\n'), ((360, 375), 'madpy.tests.testdata.config.Amplitude', 'cfg.Amplitude', ([], {}), '()\n', (373, 375), True, 'import madpy.tests.testdata.config as cfg\n'), ((1177, 1191), 'madpy.tests.testdata.config.Duration', 'cfg.Duration', ([], {}), '()\n', (1189, 1191), True, 'import madpy.tests.testdata.config as cfg\n'), ((2226, 2244), 'madpy.checks.check_stats', 'ch.check_stats', (['tr'], {}), '(tr)\n', (2240, 2244), True, 'import madpy.checks as ch\n'), ((2589, 2610), 'madpy.checks.check_datagaps', 'ch.check_datagaps', (['tr'], {}), '(tr)\n', (2606, 2610), True, 'import madpy.checks as ch\n'), ((3044, 3083), 'madpy.checks.check_window', 'ch.check_window', (['tr', 'starttime', 'endtime'], {}), '(tr, starttime, endtime)\n', (3059, 3083), True, 'import madpy.checks as ch\n')] |
import taichi as ti
from tests import test_utils
@test_utils.test()
def test_simplify_bug():
@ti.kernel
def foo() -> ti.types.vector(4, dtype=ti.i32):
a = ti.Vector([0, 0, 0, 0])
for i in range(5):
for k in ti.static(range(4)):
if i == 3:
a[k] = 1
return a
a = foo()
assert (a == ti.Vector([1, 1, 1, 1])).all() == 1
| [
"tests.test_utils.test",
"taichi.Vector",
"taichi.types.vector"
] | [((52, 69), 'tests.test_utils.test', 'test_utils.test', ([], {}), '()\n', (67, 69), False, 'from tests import test_utils\n'), ((127, 159), 'taichi.types.vector', 'ti.types.vector', (['(4)'], {'dtype': 'ti.i32'}), '(4, dtype=ti.i32)\n', (142, 159), True, 'import taichi as ti\n'), ((173, 196), 'taichi.Vector', 'ti.Vector', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (182, 196), True, 'import taichi as ti\n'), ((372, 395), 'taichi.Vector', 'ti.Vector', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (381, 395), True, 'import taichi as ti\n')] |
import torch
import torch.nn as nn
from model.fpn import *
from model.backbone.shufflenetv2 import *
class Detector(nn.Module):
def __init__(self, classes, anchor_num, load_param):
super(Detector, self).__init__()
out_depth = 72
stage_out_channels = [-1, 24, 48, 96, 192]
self.backbone = ShuffleNetV2(stage_out_channels, load_param)
self.fpn = LightFPN(stage_out_channels[-2] + stage_out_channels[-1], stage_out_channels[-1], out_depth)
self.output_reg_layers = nn.Conv2d(out_depth, 4 * anchor_num, 1, 1, 0, bias=True)
self.output_obj_layers = nn.Conv2d(out_depth, anchor_num, 1, 1, 0, bias=True)
self.output_cls_layers = nn.Conv2d(out_depth, classes, 1, 1, 0, bias=True)
def forward(self, x):
C2, C3 = self.backbone(x)
cls_2, obj_2, reg_2, cls_3, obj_3, reg_3 = self.fpn(C2, C3)
out_reg_2 = self.output_reg_layers(reg_2)
out_obj_2 = self.output_obj_layers(obj_2)
out_cls_2 = self.output_cls_layers(cls_2)
out_reg_3 = self.output_reg_layers(reg_3)
out_obj_3 = self.output_obj_layers(obj_3)
out_cls_3 = self.output_cls_layers(cls_3)
return out_reg_2, out_obj_2, out_cls_2, out_reg_3, out_obj_3, out_cls_3
if __name__ == "__main__":
model = Detector(80, 3, False)
test_data = torch.rand(1, 3, 352, 352)
torch.onnx.export(model, #model being run
test_data, # model input (or a tuple for multiple inputs)
"test.onnx", # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=11, # the ONNX version to export the model to
do_constant_folding=True) # whether to execute constant folding for optimization
| [
"torch.nn.Conv2d",
"torch.rand",
"torch.onnx.export"
] | [((1351, 1377), 'torch.rand', 'torch.rand', (['(1)', '(3)', '(352)', '(352)'], {}), '(1, 3, 352, 352)\n', (1361, 1377), False, 'import torch\n'), ((1382, 1498), 'torch.onnx.export', 'torch.onnx.export', (['model', 'test_data', '"""test.onnx"""'], {'export_params': '(True)', 'opset_version': '(11)', 'do_constant_folding': '(True)'}), "(model, test_data, 'test.onnx', export_params=True,\n opset_version=11, do_constant_folding=True)\n", (1399, 1498), False, 'import torch\n'), ((518, 574), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_depth', '(4 * anchor_num)', '(1)', '(1)', '(0)'], {'bias': '(True)'}), '(out_depth, 4 * anchor_num, 1, 1, 0, bias=True)\n', (527, 574), True, 'import torch.nn as nn\n'), ((608, 660), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_depth', 'anchor_num', '(1)', '(1)', '(0)'], {'bias': '(True)'}), '(out_depth, anchor_num, 1, 1, 0, bias=True)\n', (617, 660), True, 'import torch.nn as nn\n'), ((694, 743), 'torch.nn.Conv2d', 'nn.Conv2d', (['out_depth', 'classes', '(1)', '(1)', '(0)'], {'bias': '(True)'}), '(out_depth, classes, 1, 1, 0, bias=True)\n', (703, 743), True, 'import torch.nn as nn\n')] |
#!/usr/bin/env python3
import pytest
from tools import play
samples = [
(9, 25, 32),
(10, 1618, 8317),
(13, 7999, 146373),
(17, 1104, 2764),
(21, 6111, 54718),
(30, 5807, 37305),
]
def ids(t):
return '{} players {} points max. score {}'.format(*t)
@pytest.fixture(params=samples, ids=ids)
def sample(request):
return request.param
def test_sample(sample):
num_players, points, target = sample
sb = play(num_players, points)
winner = max(sb, key=lambda k: sb[k])
assert sb[winner] == target
| [
"pytest.fixture",
"tools.play"
] | [((289, 328), 'pytest.fixture', 'pytest.fixture', ([], {'params': 'samples', 'ids': 'ids'}), '(params=samples, ids=ids)\n', (303, 328), False, 'import pytest\n'), ((452, 477), 'tools.play', 'play', (['num_players', 'points'], {}), '(num_players, points)\n', (456, 477), False, 'from tools import play\n')] |
from sklearn.metrics import roc_auc_score, f1_score, accuracy_score
import scipy
import numpy as np
import pandas as pd
from pyarc.qcba import *
from pyarc.algorithms import createCARs, top_rules
from pyarc import TransactionDB
from pyids import IDS
from pyids.ids_cacher import IDSCacher
from pyids.ids_ruleset import IDSRuleSet
from pyids.ids_rule import IDSRule
from pyids.ids_classifier import IDSOneVsAll
from pyids.model_selection import encode_label, mode, KFoldCV
dataframes = [ pd.read_csv("./data/iris{}.csv".format(i)) for i in range(10)]
kfold = KFoldCV(IDSOneVsAll(), dataframes, score_auc=True)
scores = kfold.fit(rule_cutoff=50)
print(scores) | [
"pyids.ids_classifier.IDSOneVsAll"
] | [((573, 586), 'pyids.ids_classifier.IDSOneVsAll', 'IDSOneVsAll', ([], {}), '()\n', (584, 586), False, 'from pyids.ids_classifier import IDSOneVsAll\n')] |
"""
Plot gas, tar, char, water, and water vapor from primary and secondary
reactions based on Blasi / Chan / Liden kinetic schemes for biomass pyrolysis.
This combined scheme is referred to as the Cpc 2016 kinetic scheme. A similar
scheme but without water reaction was proposed in Papadikis 2010 paper.
References:
Blasi, 1993. Combustion Science and Technology, 90, pp 315–340.
<NAME>, Krieger, 1985. Fuel, 64, pp 1505–1513.
<NAME>, Scott, 1988. Chem. Eng. Comm., 65, pp 207-221.
<NAME>, 2010. Fuel Processing Technology, 91, pp 68–79.
"""
import numpy as np
import matplotlib.pyplot as py
# Parameters
# ------------------------------------------------------------------------------
T = 773 # temperature for rate constants, K
mc = 0.20 # moisture content as weight fraction, (-)
dt = 0.01 # time step, delta t
tmax = 25 # max time, s
t = np.linspace(0, tmax, num=tmax/dt) # time vector
nt = len(t) # total number of time steps
# Function for Cpc 2016 Kinetic Scheme
# ------------------------------------------------------------------------------
def cpc(wood, gas, tar, char, water, vapor, T, dt, s=1):
"""
Primary and secondary kinetic reactions for Cpc 2016 scheme based on
Blasi 1993, Chan 1985, and Liden 1988 kinetics. Same scheme as presented in
Papadikis 2010 but with the addition of the water reaction.
Parameters
----------
wood = wood concentration, kg/m^3
gas = gas concentation, kg/m^3
tar = tar concentation, kg/m^3
char = char concentation, kg/m^3
water = water concentration based on moisture content, kg/m^3
vapor = water vapor concentration, kg/m^3
T = temperature, K
dt = time step, s
s = 1 primary reactions only, 2 primary and secondary reactions
Returns
-------
nwood = new wood concentration, kg/m^3
ngas = new gas concentration, kg/m^3
ntar = new tar concentration, kg/m^3
nchar = new char concentration, kg/m^3
nwater = new water concentration, kg/m^3
nvapor = new water vapor concentration, kg/m^3
"""
# A = pre-factor (1/s) and E = activation energy (kJ/mol)
A1 = 1.3e8; E1 = 140 # wood -> gas from Chan 1985
A2 = 2.0e8; E2 = 133 # wood -> tar from Chan 1985
A3 = 1.08e7; E3 = 121 # wood -> char from Chan 1985
A4 = 4.28e6; E4 = 107.5 # tar -> gas from Liden 1988
A5 = 1.0e6; E5 = 108 # tar -> char from Blasi 1993
Aw = 5.13e6; Ew = 87.9 # water -> water vapor from Chan 1985
R = 0.008314 # universal gas constant, kJ/mol*K
# reaction rate constant for each reaction, 1/s
K1 = A1 * np.exp(-E1 / (R * T)) # wood -> gas
K2 = A2 * np.exp(-E2 / (R * T)) # wood -> tar
K3 = A3 * np.exp(-E3 / (R * T)) # wood -> char
K4 = A4 * np.exp(-E4 / (R * T)) # tar -> gas
K5 = A5 * np.exp(-E5 / (R * T)) # tar -> char
Kw = Aw * np.exp(-Ew / (R * T)) # water -> vapor
if s == 1:
# primary reactions only
rw = -(K1+K2+K3)*wood # wood rate
rg = K1*wood # gas rate
rt = K2*wood # tar rate
rc = K3*wood # char rate
rwt = -Kw*water # moisture content rate
rwv = Kw*water # water vapor rate
nwood = wood + rw*dt # update wood concentration
ngas = gas + rg*dt # update gas concentration
ntar = tar + rt*dt # update tar concentration
nchar = char + rc*dt # update char concentration
nwater = water + rwt*dt # update water concentration
nvapor = vapor + rwv*dt # update water vapor concentation
elif s == 2:
# primary and secondary reactions
rw = -(K1+K2+K3)*wood # wood rate
rg = K1*wood + K4*tar # gas rate
rt = K2*wood - (K4+K5)*tar # tar rate
rc = K3*wood + K5*tar # char rate
rwt = -Kw*water # moisture content rate
rwv = Kw*water # water vapor rate
nwood = wood + rw*dt # update wood concentration
ngas = gas + rg*dt # update gas concentration
ntar = tar + rt*dt # update tar concentration
nchar = char + rc*dt # update char concentration
nwater = water + rwt*dt # update water concentration
nvapor = vapor + rwv*dt # update water vapor concentation
# return new mass concentrations of products, kg/m^3
return nwood, ngas, ntar, nchar, nwater, nvapor
# Products from Kinetic Scheme
# ------------------------------------------------------------------------------
# store concentrations from primary reactions at each time step
# concentrations calculated on a mass basis such as kg/m^3
wood = np.ones(nt) * (1-mc) # wood concentration vector
gas = np.zeros(nt) # gas concentration vector
tar = np.zeros(nt) # tar concentration vector
char = np.zeros(nt) # char concentration vector
water = np.ones(nt) * mc # water concentration vector
vapor = np.zeros(nt) # water vapor concentration vector
# products from primary reactions only
for i in range(1, nt):
wood[i], gas[i], tar[i], char[i], water[i], vapor[i] = cpc(wood[i-1], gas[i-1], tar[i-1], char[i-1], water[i-1], vapor[i-1], T, dt)
# store concentrations from primary and secondary reactions at each time step
# concentrations calculated on a mass basis such as kg/m^3
wood2 = np.ones(nt)*(1-mc) # wood concentration vector
gas2 = np.zeros(nt) # gas concentration vector
tar2 = np.zeros(nt) # tar concentration vector
char2 = np.zeros(nt) # char concentration vector
water2 = np.ones(nt)*mc # water concentration vector
vapor2 = np.zeros(nt) # water vapor concentration vector
# products from primary and secondary reactions only
for i in range(1, nt):
wood2[i], gas2[i], tar2[i], char2[i], water2[i], vapor2[i] = cpc(wood2[i-1], gas2[i-1], tar2[i-1], char2[i-1], water2[i-1], vapor2[i-1], T, dt, s=2)
# Print Mass Balances
# ------------------------------------------------------------------------------
# check mass balance at each time step
tot1 = wood + gas + tar + char + water + vapor
print('total mass fraction (primary) \n', tot1)
tot2 = wood2 + gas2 + tar2 + char2 + water2 + vapor2
print('total mass fraction (pri+sec) \n', tot2)
# Plot Results
# ------------------------------------------------------------------------------
py.ion()
py.close('all')
py.figure(1)
py.plot(t, wood, lw=2, label='wood')
py.plot(t, gas, lw=2, label='gas')
py.plot(t, tar, lw=2, label='tar')
py.plot(t, char, lw=2, label='char')
py.plot(t, water, lw=2, label='water')
py.plot(t, vapor, lw=2, label='vapor')
py.title('Cpc 2016 primary reactions at T = {} K'.format(T))
py.xlabel('Time (s)')
py.ylabel('Concentration (m.f. basis)')
py.legend(loc='best', numpoints=1, fontsize=12)
py.grid()
py.figure(2)
py.plot(t, wood2, lw=2, label='wood')
py.plot(t, gas2, lw=2, label='gas')
py.plot(t, tar2, lw=2, label='tar')
py.plot(t, char2, lw=2, label='char')
py.plot(t, water2, lw=2, label='water')
py.plot(t, vapor2, lw=2, label='vapor')
py.title('Cpc 2016 primary and secondary reactions at T = {} K'.format(T))
py.xlabel('Time (s)')
py.ylabel('Concentration (m.f. basis)')
py.legend(loc='best', numpoints=1, fontsize=12)
py.grid()
| [
"matplotlib.pyplot.grid",
"numpy.ones",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.zeros",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.legend"
] | [((913, 948), 'numpy.linspace', 'np.linspace', (['(0)', 'tmax'], {'num': '(tmax / dt)'}), '(0, tmax, num=tmax / dt)\n', (924, 948), True, 'import numpy as np\n'), ((4914, 4926), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (4922, 4926), True, 'import numpy as np\n'), ((4971, 4983), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (4979, 4983), True, 'import numpy as np\n'), ((5029, 5041), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (5037, 5041), True, 'import numpy as np\n'), ((5147, 5159), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (5155, 5159), True, 'import numpy as np\n'), ((5606, 5618), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (5614, 5618), True, 'import numpy as np\n'), ((5663, 5675), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (5671, 5675), True, 'import numpy as np\n'), ((5721, 5733), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (5729, 5733), True, 'import numpy as np\n'), ((5839, 5851), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (5847, 5851), True, 'import numpy as np\n'), ((6564, 6572), 'matplotlib.pyplot.ion', 'py.ion', ([], {}), '()\n', (6570, 6572), True, 'import matplotlib.pyplot as py\n'), ((6573, 6588), 'matplotlib.pyplot.close', 'py.close', (['"""all"""'], {}), "('all')\n", (6581, 6588), True, 'import matplotlib.pyplot as py\n'), ((6590, 6602), 'matplotlib.pyplot.figure', 'py.figure', (['(1)'], {}), '(1)\n', (6599, 6602), True, 'import matplotlib.pyplot as py\n'), ((6603, 6639), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'wood'], {'lw': '(2)', 'label': '"""wood"""'}), "(t, wood, lw=2, label='wood')\n", (6610, 6639), True, 'import matplotlib.pyplot as py\n'), ((6640, 6674), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'gas'], {'lw': '(2)', 'label': '"""gas"""'}), "(t, gas, lw=2, label='gas')\n", (6647, 6674), True, 'import matplotlib.pyplot as py\n'), ((6675, 6709), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'tar'], {'lw': '(2)', 'label': '"""tar"""'}), "(t, tar, lw=2, label='tar')\n", (6682, 6709), True, 'import matplotlib.pyplot as py\n'), ((6710, 6746), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'char'], {'lw': '(2)', 'label': '"""char"""'}), "(t, char, lw=2, label='char')\n", (6717, 6746), True, 'import matplotlib.pyplot as py\n'), ((6747, 6785), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'water'], {'lw': '(2)', 'label': '"""water"""'}), "(t, water, lw=2, label='water')\n", (6754, 6785), True, 'import matplotlib.pyplot as py\n'), ((6786, 6824), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'vapor'], {'lw': '(2)', 'label': '"""vapor"""'}), "(t, vapor, lw=2, label='vapor')\n", (6793, 6824), True, 'import matplotlib.pyplot as py\n'), ((6886, 6907), 'matplotlib.pyplot.xlabel', 'py.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (6895, 6907), True, 'import matplotlib.pyplot as py\n'), ((6908, 6947), 'matplotlib.pyplot.ylabel', 'py.ylabel', (['"""Concentration (m.f. basis)"""'], {}), "('Concentration (m.f. basis)')\n", (6917, 6947), True, 'import matplotlib.pyplot as py\n'), ((6948, 6995), 'matplotlib.pyplot.legend', 'py.legend', ([], {'loc': '"""best"""', 'numpoints': '(1)', 'fontsize': '(12)'}), "(loc='best', numpoints=1, fontsize=12)\n", (6957, 6995), True, 'import matplotlib.pyplot as py\n'), ((6996, 7005), 'matplotlib.pyplot.grid', 'py.grid', ([], {}), '()\n', (7003, 7005), True, 'import matplotlib.pyplot as py\n'), ((7007, 7019), 'matplotlib.pyplot.figure', 'py.figure', (['(2)'], {}), '(2)\n', (7016, 7019), True, 'import matplotlib.pyplot as py\n'), ((7020, 7057), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'wood2'], {'lw': '(2)', 'label': '"""wood"""'}), "(t, wood2, lw=2, label='wood')\n", (7027, 7057), True, 'import matplotlib.pyplot as py\n'), ((7058, 7093), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'gas2'], {'lw': '(2)', 'label': '"""gas"""'}), "(t, gas2, lw=2, label='gas')\n", (7065, 7093), True, 'import matplotlib.pyplot as py\n'), ((7094, 7129), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'tar2'], {'lw': '(2)', 'label': '"""tar"""'}), "(t, tar2, lw=2, label='tar')\n", (7101, 7129), True, 'import matplotlib.pyplot as py\n'), ((7130, 7167), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'char2'], {'lw': '(2)', 'label': '"""char"""'}), "(t, char2, lw=2, label='char')\n", (7137, 7167), True, 'import matplotlib.pyplot as py\n'), ((7168, 7207), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'water2'], {'lw': '(2)', 'label': '"""water"""'}), "(t, water2, lw=2, label='water')\n", (7175, 7207), True, 'import matplotlib.pyplot as py\n'), ((7208, 7247), 'matplotlib.pyplot.plot', 'py.plot', (['t', 'vapor2'], {'lw': '(2)', 'label': '"""vapor"""'}), "(t, vapor2, lw=2, label='vapor')\n", (7215, 7247), True, 'import matplotlib.pyplot as py\n'), ((7323, 7344), 'matplotlib.pyplot.xlabel', 'py.xlabel', (['"""Time (s)"""'], {}), "('Time (s)')\n", (7332, 7344), True, 'import matplotlib.pyplot as py\n'), ((7345, 7384), 'matplotlib.pyplot.ylabel', 'py.ylabel', (['"""Concentration (m.f. basis)"""'], {}), "('Concentration (m.f. basis)')\n", (7354, 7384), True, 'import matplotlib.pyplot as py\n'), ((7385, 7432), 'matplotlib.pyplot.legend', 'py.legend', ([], {'loc': '"""best"""', 'numpoints': '(1)', 'fontsize': '(12)'}), "(loc='best', numpoints=1, fontsize=12)\n", (7394, 7432), True, 'import matplotlib.pyplot as py\n'), ((7433, 7442), 'matplotlib.pyplot.grid', 'py.grid', ([], {}), '()\n', (7440, 7442), True, 'import matplotlib.pyplot as py\n'), ((4857, 4868), 'numpy.ones', 'np.ones', (['nt'], {}), '(nt)\n', (4864, 4868), True, 'import numpy as np\n'), ((5088, 5099), 'numpy.ones', 'np.ones', (['nt'], {}), '(nt)\n', (5095, 5099), True, 'import numpy as np\n'), ((5549, 5560), 'numpy.ones', 'np.ones', (['nt'], {}), '(nt)\n', (5556, 5560), True, 'import numpy as np\n'), ((5780, 5791), 'numpy.ones', 'np.ones', (['nt'], {}), '(nt)\n', (5787, 5791), True, 'import numpy as np\n'), ((2706, 2727), 'numpy.exp', 'np.exp', (['(-E1 / (R * T))'], {}), '(-E1 / (R * T))\n', (2712, 2727), True, 'import numpy as np\n'), ((2758, 2779), 'numpy.exp', 'np.exp', (['(-E2 / (R * T))'], {}), '(-E2 / (R * T))\n', (2764, 2779), True, 'import numpy as np\n'), ((2810, 2831), 'numpy.exp', 'np.exp', (['(-E3 / (R * T))'], {}), '(-E3 / (R * T))\n', (2816, 2831), True, 'import numpy as np\n'), ((2863, 2884), 'numpy.exp', 'np.exp', (['(-E4 / (R * T))'], {}), '(-E4 / (R * T))\n', (2869, 2884), True, 'import numpy as np\n'), ((2914, 2935), 'numpy.exp', 'np.exp', (['(-E5 / (R * T))'], {}), '(-E5 / (R * T))\n', (2920, 2935), True, 'import numpy as np\n'), ((2966, 2987), 'numpy.exp', 'np.exp', (['(-Ew / (R * T))'], {}), '(-Ew / (R * T))\n', (2972, 2987), True, 'import numpy as np\n')] |
from pathlib import Path
from boucanpy.core import logger
from boucanpy.cli.base import BaseCommand
from boucanpy.db.models import models
class DbTruncate(BaseCommand):
name = "db-truncate"
aliases = ["truncate"]
description = "truncate db"
add_log_level = True
add_debug = True
@classmethod
def parser(cls, parser):
parser.add_argument("-c", "--confirm", action="store_true", help="seed data")
return parser
async def run(self):
self.db_register()
failed = []
if self.option("confirm"):
for class_name, model in models.items():
for item in self.session().query(model).all():
logger.warning(f"run@db_truncate.py - Deleting {item}")
try:
self.session().delete(item)
self.session().commit()
except Exception as e:
failed.append((item, e))
else:
logger.warning("run@db_truncate.py - You must confirm to drop data")
if len(failed) > 0:
logger.warning("run@db_truncate.py - Encountered errors")
for f in failed:
print("Failed:", item[0])
print("Error", item[1])
| [
"boucanpy.db.models.models.items",
"boucanpy.core.logger.warning"
] | [((601, 615), 'boucanpy.db.models.models.items', 'models.items', ([], {}), '()\n', (613, 615), False, 'from boucanpy.db.models import models\n'), ((999, 1067), 'boucanpy.core.logger.warning', 'logger.warning', (['"""run@db_truncate.py - You must confirm to drop data"""'], {}), "('run@db_truncate.py - You must confirm to drop data')\n", (1013, 1067), False, 'from boucanpy.core import logger\n'), ((1109, 1166), 'boucanpy.core.logger.warning', 'logger.warning', (['"""run@db_truncate.py - Encountered errors"""'], {}), "('run@db_truncate.py - Encountered errors')\n", (1123, 1166), False, 'from boucanpy.core import logger\n'), ((700, 755), 'boucanpy.core.logger.warning', 'logger.warning', (['f"""run@db_truncate.py - Deleting {item}"""'], {}), "(f'run@db_truncate.py - Deleting {item}')\n", (714, 755), False, 'from boucanpy.core import logger\n')] |
import random
from bit_vector import BitVector
import fault
import common
from fault.actions import Poke, Expect, Eval, Step, Print
from fault.array import Array
from fault.vector_builder import VectorBuilder
def test_tester_basic():
circ = common.TestBasicCircuit
builder = VectorBuilder(circ)
builder.process(Poke(circ.I, BitVector(0, 1)))
builder.process(Expect(circ.O, BitVector(0, 1)))
assert builder.vectors == [[BitVector(0, 1), BitVector(0, 1)]]
builder.process(Eval())
assert builder.vectors == [[BitVector(0, 1), BitVector(0, 1)],
[BitVector(0, 1), fault.AnyValue]]
def test_tester_clock():
circ = common.TestBasicClkCircuit
builder = VectorBuilder(circ)
builder.process(Poke(circ.I, BitVector(0, 1)))
builder.process(Print(circ.O))
builder.process(Expect(circ.O, BitVector(0, 1)))
assert builder.vectors == [
[BitVector(0, 1), BitVector(0, 1), fault.AnyValue]
]
builder.process(Poke(circ.CLK, BitVector(0, 1)))
assert builder.vectors == [
[BitVector(0, 1), BitVector(0, 1), BitVector(0, 1)]
]
builder.process(Step(circ.CLK, 1))
assert builder.vectors == [
[BitVector(0, 1), BitVector(0, 1), BitVector(0, 1)],
[BitVector(0, 1), fault.AnyValue, BitVector(1, 1)]
]
def test_tester_nested_arrays():
circ = common.TestNestedArraysCircuit
builder = VectorBuilder(circ)
expected = []
for i in range(3):
val = random.randint(0, (1 << 4) - 1)
builder.process(Poke(circ.I[i], BitVector(val, 4)))
builder.process(Expect(circ.O[i], BitVector(val, 4)))
expected.append(val)
assert builder.vectors == [[Array(expected, 3), Array(expected, 3)]]
| [
"fault.vector_builder.VectorBuilder",
"fault.actions.Eval",
"fault.actions.Print",
"bit_vector.BitVector",
"fault.array.Array",
"fault.actions.Step",
"random.randint"
] | [((285, 304), 'fault.vector_builder.VectorBuilder', 'VectorBuilder', (['circ'], {}), '(circ)\n', (298, 304), False, 'from fault.vector_builder import VectorBuilder\n'), ((716, 735), 'fault.vector_builder.VectorBuilder', 'VectorBuilder', (['circ'], {}), '(circ)\n', (729, 735), False, 'from fault.vector_builder import VectorBuilder\n'), ((1411, 1430), 'fault.vector_builder.VectorBuilder', 'VectorBuilder', (['circ'], {}), '(circ)\n', (1424, 1430), False, 'from fault.vector_builder import VectorBuilder\n'), ((496, 502), 'fault.actions.Eval', 'Eval', ([], {}), '()\n', (500, 502), False, 'from fault.actions import Poke, Expect, Eval, Step, Print\n'), ((807, 820), 'fault.actions.Print', 'Print', (['circ.O'], {}), '(circ.O)\n', (812, 820), False, 'from fault.actions import Poke, Expect, Eval, Step, Print\n'), ((1143, 1160), 'fault.actions.Step', 'Step', (['circ.CLK', '(1)'], {}), '(circ.CLK, 1)\n', (1147, 1160), False, 'from fault.actions import Poke, Expect, Eval, Step, Print\n'), ((1486, 1517), 'random.randint', 'random.randint', (['(0)', '((1 << 4) - 1)'], {}), '(0, (1 << 4) - 1)\n', (1500, 1517), False, 'import random\n'), ((338, 353), 'bit_vector.BitVector', 'BitVector', (['(0)', '(1)'], {}), '(0, 1)\n', (347, 353), False, 'from bit_vector import BitVector\n'), ((391, 406), 'bit_vector.BitVector', 'BitVector', (['(0)', '(1)'], {}), '(0, 1)\n', (400, 406), False, 'from bit_vector import BitVector\n'), ((769, 784), 'bit_vector.BitVector', 'BitVector', (['(0)', '(1)'], {}), '(0, 1)\n', (778, 784), False, 'from bit_vector import BitVector\n'), ((857, 872), 'bit_vector.BitVector', 'BitVector', (['(0)', '(1)'], {}), '(0, 1)\n', (866, 872), False, 'from bit_vector import BitVector\n'), ((1007, 1022), 'bit_vector.BitVector', 'BitVector', (['(0)', '(1)'], {}), '(0, 1)\n', (1016, 1022), False, 'from bit_vector import BitVector\n'), ((441, 456), 'bit_vector.BitVector', 'BitVector', (['(0)', '(1)'], {}), '(0, 1)\n', (450, 456), False, 'from bit_vector import BitVector\n'), ((458, 473), 'bit_vector.BitVector', 'BitVector', (['(0)', '(1)'], {}), '(0, 1)\n', (467, 473), False, 'from bit_vector import BitVector\n'), ((536, 551), 'bit_vector.BitVector', 'BitVector', (['(0)', '(1)'], {}), '(0, 1)\n', (545, 551), False, 'from bit_vector import BitVector\n'), ((553, 568), 'bit_vector.BitVector', 'BitVector', (['(0)', '(1)'], {}), '(0, 1)\n', (562, 568), False, 'from bit_vector import BitVector\n'), ((603, 618), 'bit_vector.BitVector', 'BitVector', (['(0)', '(1)'], {}), '(0, 1)\n', (612, 618), False, 'from bit_vector import BitVector\n'), ((916, 931), 'bit_vector.BitVector', 'BitVector', (['(0)', '(1)'], {}), '(0, 1)\n', (925, 931), False, 'from bit_vector import BitVector\n'), ((933, 948), 'bit_vector.BitVector', 'BitVector', (['(0)', '(1)'], {}), '(0, 1)\n', (942, 948), False, 'from bit_vector import BitVector\n'), ((1066, 1081), 'bit_vector.BitVector', 'BitVector', (['(0)', '(1)'], {}), '(0, 1)\n', (1075, 1081), False, 'from bit_vector import BitVector\n'), ((1083, 1098), 'bit_vector.BitVector', 'BitVector', (['(0)', '(1)'], {}), '(0, 1)\n', (1092, 1098), False, 'from bit_vector import BitVector\n'), ((1100, 1115), 'bit_vector.BitVector', 'BitVector', (['(0)', '(1)'], {}), '(0, 1)\n', (1109, 1115), False, 'from bit_vector import BitVector\n'), ((1203, 1218), 'bit_vector.BitVector', 'BitVector', (['(0)', '(1)'], {}), '(0, 1)\n', (1212, 1218), False, 'from bit_vector import BitVector\n'), ((1220, 1235), 'bit_vector.BitVector', 'BitVector', (['(0)', '(1)'], {}), '(0, 1)\n', (1229, 1235), False, 'from bit_vector import BitVector\n'), ((1237, 1252), 'bit_vector.BitVector', 'BitVector', (['(0)', '(1)'], {}), '(0, 1)\n', (1246, 1252), False, 'from bit_vector import BitVector\n'), ((1264, 1279), 'bit_vector.BitVector', 'BitVector', (['(0)', '(1)'], {}), '(0, 1)\n', (1273, 1279), False, 'from bit_vector import BitVector\n'), ((1297, 1312), 'bit_vector.BitVector', 'BitVector', (['(1)', '(1)'], {}), '(1, 1)\n', (1306, 1312), False, 'from bit_vector import BitVector\n'), ((1558, 1575), 'bit_vector.BitVector', 'BitVector', (['val', '(4)'], {}), '(val, 4)\n', (1567, 1575), False, 'from bit_vector import BitVector\n'), ((1620, 1637), 'bit_vector.BitVector', 'BitVector', (['val', '(4)'], {}), '(val, 4)\n', (1629, 1637), False, 'from bit_vector import BitVector\n'), ((1701, 1719), 'fault.array.Array', 'Array', (['expected', '(3)'], {}), '(expected, 3)\n', (1706, 1719), False, 'from fault.array import Array\n'), ((1721, 1739), 'fault.array.Array', 'Array', (['expected', '(3)'], {}), '(expected, 3)\n', (1726, 1739), False, 'from fault.array import Array\n')] |
import os
path = ''
if not path:
eixt(1)
for root, dirs, files in os.walk(path):
for f in files:
if f == 'readme.md':
src = os.path.join(root, f)
# print(src)
dst = os.path.join(root, 'README.md')
os.rename(src, dst)
| [
"os.rename",
"os.path.join",
"os.walk"
] | [((73, 86), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (80, 86), False, 'import os\n'), ((155, 176), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (167, 176), False, 'import os\n'), ((220, 251), 'os.path.join', 'os.path.join', (['root', '"""README.md"""'], {}), "(root, 'README.md')\n", (232, 251), False, 'import os\n'), ((264, 283), 'os.rename', 'os.rename', (['src', 'dst'], {}), '(src, dst)\n', (273, 283), False, 'import os\n')] |
import mysql.connector
from utils.dataIO import dataIO
db_cfg = dataIO.load_json("data/mysql.config.json")
db = mysql.connector.connect(host=db_cfg["host"], autocommit=True,
user=db_cfg["user"], password=db_cfg["pass"], database=db_cfg["db"])
dbc = db.cursor()
def sanitize(val):
return "".join(x for x in val if x.isalnum())
def check():
if not db.is_connected():
db.reconnect()
def create_table(name: str):
check()
name = sanitize(name)
dbc.execute(f"CREATE TABLE IF NOT EXISTS {name}(sid BIGINT UNSIGNED, name TEXT, value TEXT)")
def delete_table(name: str):
check()
name = sanitize(name)
dbc.execute("SET foreign_key_checks = 0")
dbc.execute(f"DROP TABLE IF EXISTS {name}")
dbc.execute("SET foreign_key_checks = 1")
def read(table: str, sid: int, name: str):
check()
table = sanitize(table)
try:
dbc.execute(f"SELECT value FROM {table} WHERE sid=%s AND name=%s", (sid, name))
value = dbc.fetchall()[0][0]
except IndexError:
value = None
return value
def write(table: str, sid: int, name: str, value: str):
check()
table = sanitize(table)
if read(table, sid, name) is None:
dbc.execute(f"INSERT INTO {table}(sid, name, value) VALUES (%s, %s, %s)", (sid, name, value))
else:
dbc.execute(f"UPDATE {table} SET value=%s WHERE sid=%s AND name=%s", (value, sid, name))
def delete(table: str, sid: int, name: str):
check()
table = sanitize(table)
dbc.execute(f"DELETE FROM {table} WHERE sid=%s AND name=%s", (sid, name))
| [
"utils.dataIO.dataIO.load_json"
] | [((65, 107), 'utils.dataIO.dataIO.load_json', 'dataIO.load_json', (['"""data/mysql.config.json"""'], {}), "('data/mysql.config.json')\n", (81, 107), False, 'from utils.dataIO import dataIO\n')] |
# Generated by Django 3.0 on 2020-04-02 13:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0024_auto_20200330_1441'),
]
operations = [
migrations.AddField(
model_name='socialmedia',
name='order',
field=models.IntegerField(blank=True, help_text='The order to display the links in, if any', null=True, verbose_name='order'),
),
]
| [
"django.db.models.IntegerField"
] | [((337, 466), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'help_text': '"""The order to display the links in, if any"""', 'null': '(True)', 'verbose_name': '"""order"""'}), "(blank=True, help_text=\n 'The order to display the links in, if any', null=True, verbose_name=\n 'order')\n", (356, 466), False, 'from django.db import migrations, models\n')] |
from components.base.ecu.types.impl_ecu_simple import SimpleECU
from components.base.ecu.software.ecu_software import ECUSoftware
from components.security.ecu.software.impl_app_layer_secure import SecureApplicationLayer
from layers.impl_comm_module_my_protocol import MyProtocolCommModule
class MyProtocolECU(SimpleECU):
def __init__(self, sim_env=None, ecu_id=None, data_rate=None, size_sending_buffer=None, size_receive_buffer=None):
''' Constructor
Input: sim_env simpy.Environment environment in which this ECU lives
ecu_id string id of this ECU component
data_rate integer data_rate of the connected bus
size_sending_buffer float size of the sending buffer of this ECU
size_receive_buffer float size of the receiving buffer of this ECU
Output: -
'''
# set settings
self.set_settings()
if sim_env == None: return
# set SW and HW
SimpleECU.__init__(self, sim_env, ecu_id, data_rate, size_sending_buffer, size_receive_buffer)
self.ecuSW = ECUSoftware(sim_env, MyProtocolCommModule(sim_env, ecu_id), SecureApplicationLayer(sim_env, ecu_id))
# connect
self._connect_hw_sw()
def add_sending(self, start_time, interval, message_id, data, data_len):
''' this method adds a new sending action to the application layer of this
ECU. Then the message will start sending messages in the defined interval
starting at the specified start_time
Input: start_time float time at which the first message is sent
interval float period within which the messages are sent
message_id integer message identifier of the messages that are sent
data object/.. content of the messages that are sent
data_length float size of one message
Output: -
'''
self.ecuSW.app_lay.add_sending(start_time, interval, message_id, data, data_len)
def get_type_id(self):
''' returns the id of this ECU type
Input: -
Output: ecu_type string type of this ECU; e.g.'TLSECU'
'''
return "MyProtocolECU"
def add_stream(self, new_stream):
''' this method adds a new stream that is allowed to the TESLA environment.
This stream will then be legal and the ECUs will send according to those
streams.
Input: new_stream MessageStream message stream that is added to the environment
Output: -
'''
# push to communication module
self.ecuSW.comm_mod.add_stream(new_stream)
# add HW filter
if self.ecu_id in new_stream.receivers and \
new_stream.message_id not in self._allowed_streams:
self._allowed_streams += [new_stream.message_id]
self.ecuHW.transceiver.install_filter(self._allowed_streams)
def set_max_message_number(self, nr_messages):
''' sets the number of messages that are sent by this ecu per
stream
Input: nr_messages int number of messages sent
Output: -
'''
self.ecuSW.app_lay.set_max_message_number(nr_messages)
def set_settings(self):
''' sets the initial setting association between the settings variables
and the actual parameter
Input: -
Output: -
'''
self.settings = {}
return self.settings
def monitor_update(self):
''' returns a list of monitor inputs
Input: -
Output: list list list of MonitorInput objects
'''
return self.ecuSW.comm_mod.monitor_update()
'''class StdTLSECUTimingFunctions(object):
def __init__(self, main_library_tag='CyaSSL'):
self.available_tags = ['CyaSSL', 'Crypto_Lib_HW', 'Crypto_Lib_SW']
self.library_tag = main_library_tag # e.g. CyaSSL, or CryptoLib
self.function_map = {}
# Record Layer
self.function_map['t_tls_record_compression'] = self.c_t_tls_record_compression
self.function_map['t_tls_record_decompression'] = self.c_t_tls_record_decompression
def get_function_map(self):
return self.function_map
def c_t_timing_function_1(self, msg_size, compr_alg):
if compr_alg == CompressionMethod.NULL:
return 0
return 0
def c_t_timing_function_2(self, compressed_msg_size, compr_alg):
if compr_alg == CompressionMethod.NULL:
return 0
return 0
''' | [
"components.base.ecu.types.impl_ecu_simple.SimpleECU.__init__",
"components.security.ecu.software.impl_app_layer_secure.SecureApplicationLayer",
"layers.impl_comm_module_my_protocol.MyProtocolCommModule"
] | [((1163, 1261), 'components.base.ecu.types.impl_ecu_simple.SimpleECU.__init__', 'SimpleECU.__init__', (['self', 'sim_env', 'ecu_id', 'data_rate', 'size_sending_buffer', 'size_receive_buffer'], {}), '(self, sim_env, ecu_id, data_rate, size_sending_buffer,\n size_receive_buffer)\n', (1181, 1261), False, 'from components.base.ecu.types.impl_ecu_simple import SimpleECU\n'), ((1316, 1353), 'layers.impl_comm_module_my_protocol.MyProtocolCommModule', 'MyProtocolCommModule', (['sim_env', 'ecu_id'], {}), '(sim_env, ecu_id)\n', (1336, 1353), False, 'from layers.impl_comm_module_my_protocol import MyProtocolCommModule\n'), ((1355, 1394), 'components.security.ecu.software.impl_app_layer_secure.SecureApplicationLayer', 'SecureApplicationLayer', (['sim_env', 'ecu_id'], {}), '(sim_env, ecu_id)\n', (1377, 1394), False, 'from components.security.ecu.software.impl_app_layer_secure import SecureApplicationLayer\n')] |
from molecules.dna_sequence import DNASequence
class Primer:
"""
Represent a Primer DNA Sequence which can be attached to DNA sequence in order to mark specific
segment in the DNA sequence.
"""
def __init__(self, searched_sequence):
"""
Initialize Primer sequence.
:param searched_sequence: DNA Sequence to search.
"""
self.__searched_sequence = searched_sequence
@property
def searched_sequence(self):
return self.__searched_sequence
def try_attach_to_dna_sequence(self, dna_sequence):
"""
Try attach to a given DNA Sequence.
:param dna_sequence: DNA Sequence to try attach to.
:return: The index in the DNA Sequence where the attachment occurred, Otherwise None.
"""
if not isinstance(dna_sequence, DNASequence):
raise ValueError('DNA Sequence given is not instance of DNASequence.')
# Mark if attachment point found
found_attachment_point = False
# Search the complement sequence of the search sequence in the DNA sequence
complement_search_sequence = [DNASequence.complement_table.get(base) for base in self.__searched_sequence]
# Current index navigated
current_index = 0
# Iterate all sub sequences in the DNA sequence to find the complement sequence of the searched sequence
while current_index <= dna_sequence.length - len(self.__searched_sequence):
# If the complement sequence of the search sequence is found, break and return the index
if dna_sequence.bases[current_index: current_index + len(self.__searched_sequence)] ==\
complement_search_sequence:
found_attachment_point = True
break
current_index += 1
# If found attachment point, return it
if found_attachment_point:
return current_index
return None
def __eq__(self, other):
"""
Equality checking of 2 Primers.
:param other: Primer object.
:return: True if both Primers represents the same search sequence, Otherwise False.
"""
return isinstance(other, self.__class__) and self.searched_sequence == other.searched_sequence
| [
"molecules.dna_sequence.DNASequence.complement_table.get"
] | [((1137, 1175), 'molecules.dna_sequence.DNASequence.complement_table.get', 'DNASequence.complement_table.get', (['base'], {}), '(base)\n', (1169, 1175), False, 'from molecules.dna_sequence import DNASequence\n')] |
# Copyright (c) 2021 Huawei Technologies Co.,Ltd. All rights reserved.
#
# StratoVirt is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan
# PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
# http:#license.coscl.org.cn/MulanPSL2
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
"""Create session"""
import threading
import time
import aexpect
from utils.utils_logging import TestLog
from utils.exception import ConsoleBusyError
from utils.exception import NoConsoleError
from utils.exception import LoginAuthenticationError
from utils.exception import LoginTimeoutError
from utils.exception import LoginProcessTerminatedError
LOG = TestLog.get_global_log()
def lock(function):
"""
Get the ConsoleManager lock, run the function, then release the lock.
Args:
function: Function to package.
"""
def package(*args, **kwargs):
console_manager = args[0]
if console_manager.console_lock.acquire_lock(False) is False:
raise ConsoleBusyError
try:
return function(*args, **kwargs)
finally:
console_manager.console_lock.release_lock()
return package
class ConsoleManager():
"""A class for console session communication pipeline."""
def __init__(self):
self._console = None
self.status_test_command = None
self.console_lock = threading.Lock()
@lock
def login_session(self, status_test_command, prompt, username, password, timeout):
"""Login session by handle_session()"""
self._console.set_status_test_command(status_test_command)
self.handle_session(self._console, username, password, prompt, timeout, True)
def create_session(self, status_test_command,
prompt, username, password, timeout):
"""Return a console session with itself as the manager."""
if self._console is None:
raise NoConsoleError
self.login_session(status_test_command, prompt, username, password, timeout)
return ConsoleSession(self)
def config_console(self, console):
"""Configure console"""
self._console = console
self.status_test_command = self._console.status_test_command
def close(self):
"""Close console"""
self._console.close()
@lock
def get_func(self, func, *args, **kwargs):
"""
Get the func provided by a Console.
Args:
func: function name
"""
_func = getattr(self._console, func)
return _func(*args, **kwargs)
@staticmethod
def handle_session(session, username, password, prompt, timeout=10,
debug=False):
"""
Connect to a remote host (guest) using SSH or Telnet or else.
Provide answers to each questions.
"""
password_prompt_count = 0
login_prompt_count = 0
last_chance = False
last_line = [r"[Aa]re you sure", # continue connect
r"[Pp]assword:\s*", # password:
r"(?<![Ll]ast )[Ll]ogin:\s*$", # login:
r"[Ee]nter.*username", # login:
r"[Ee]nter.*password", # password:
prompt, # prompt
r"[Ww]arning"] # Warning added RSA
output = ""
def _continue_connect(debug, session):
if debug:
LOG.debug("Got 'Are you sure...', sending 'yes'")
session.sendline("yes")
def _send_passwd(debug, session, password):
if debug:
LOG.debug("Got password prompt, sending '%s'",
password)
session.sendline(password)
def _send_username(debug, session, username):
if debug:
LOG.debug("Got username prompt, sending '%s'",
username)
session.send(username)
while True:
try:
session.sendline()
match, text = session.read_until_last_line_matches(last_line, timeout=timeout,
internal_timeout=0.5, print_func=None)
output += text
if match == 0:
_continue_connect(debug, session)
continue
if match in (1, 4):
if password_prompt_count == 0:
_send_passwd(debug, session, password)
password_prompt_count += 1
continue
raise LoginAuthenticationError("Got password prompt twice", text)
if match in (2, 3):
if login_prompt_count == 0 and password_prompt_count == 0:
_send_username(debug, session, username)
login_prompt_count += 1
continue
if login_prompt_count > 0:
raise LoginAuthenticationError("Got username prompt twice", text)
raise LoginAuthenticationError("Got username prompt after password prompt", text)
if match == 5:
if debug:
LOG.debug("Got shell prompt, logged successfully")
break
if match == 6:
if debug:
LOG.debug("Got 'Warning added RSA to known host list")
continue
except aexpect.ExpectTimeoutError as err:
# send a empty line to avoid unexpected login timeout
# because some message from linux kernel maybe impact match
if not last_chance:
time.sleep(0.5)
session.sendline()
last_chance = True
continue
raise LoginTimeoutError(err.output)
except aexpect.ExpectProcessTerminatedError as err:
raise LoginProcessTerminatedError(err.status, err.output)
return output
class ConsoleSession():
"""
The wrapper of ShellSession from aexpect.
"""
def __init__(self, manager):
self.__closed = False
self.__manager = manager
self.status_test_command = manager.status_test_command
def __repr__(self):
return "console session id <%s>" % id(self)
def run_func(self, name, *args, **kwargs):
"""
Execute console session function
Args:
name: function name. available name: is_responsive cmd_output cmd_output_safe
cmd_status_output cmd_status cmd close send sendline sendcontrol send_ctrl set_linesep
read_nonblocking read_until_output_matches read_until_last_line_matches
read_until_any_line_matches read_up_to_prompt
"""
if name == "close":
if self.__closed:
raise RuntimeError("%s is closed." % self)
self.__manager.close()
self.__closed = True
else:
return self.__manager.get_func(name, *args, **kwargs)
return None
| [
"utils.exception.LoginAuthenticationError",
"utils.exception.LoginTimeoutError",
"threading.Lock",
"utils.exception.LoginProcessTerminatedError",
"time.sleep",
"utils.utils_logging.TestLog.get_global_log"
] | [((903, 927), 'utils.utils_logging.TestLog.get_global_log', 'TestLog.get_global_log', ([], {}), '()\n', (925, 927), False, 'from utils.utils_logging import TestLog\n'), ((1622, 1638), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1636, 1638), False, 'import threading\n'), ((4845, 4904), 'utils.exception.LoginAuthenticationError', 'LoginAuthenticationError', (['"""Got password prompt twice"""', 'text'], {}), "('Got password prompt twice', text)\n", (4869, 4904), False, 'from utils.exception import LoginAuthenticationError\n'), ((5329, 5404), 'utils.exception.LoginAuthenticationError', 'LoginAuthenticationError', (['"""Got username prompt after password prompt"""', 'text'], {}), "('Got username prompt after password prompt', text)\n", (5353, 5404), False, 'from utils.exception import LoginAuthenticationError\n'), ((6137, 6166), 'utils.exception.LoginTimeoutError', 'LoginTimeoutError', (['err.output'], {}), '(err.output)\n', (6154, 6166), False, 'from utils.exception import LoginTimeoutError\n'), ((6253, 6304), 'utils.exception.LoginProcessTerminatedError', 'LoginProcessTerminatedError', (['err.status', 'err.output'], {}), '(err.status, err.output)\n', (6280, 6304), False, 'from utils.exception import LoginProcessTerminatedError\n'), ((5243, 5302), 'utils.exception.LoginAuthenticationError', 'LoginAuthenticationError', (['"""Got username prompt twice"""', 'text'], {}), "('Got username prompt twice', text)\n", (5267, 5302), False, 'from utils.exception import LoginAuthenticationError\n'), ((5992, 6007), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (6002, 6007), False, 'import time\n')] |
from neo4j import GraphDatabase
class AddressGraph:
def __init__(self, address, password):
self.connection = GraphDatabase.driver(address, auth=("neo4j", password))
def write(self, query):
with self.connection.session() as session:
session.run(query)
def read(self, query):
with self.connection.session() as session:
return session.run(query)
| [
"neo4j.GraphDatabase.driver"
] | [((117, 172), 'neo4j.GraphDatabase.driver', 'GraphDatabase.driver', (['address'], {'auth': "('neo4j', password)"}), "(address, auth=('neo4j', password))\n", (137, 172), False, 'from neo4j import GraphDatabase\n')] |
from vedacore.misc import build_from_cfg, registry
def build_loss(cfg):
return build_from_cfg(cfg, registry, 'loss')
| [
"vedacore.misc.build_from_cfg"
] | [((85, 122), 'vedacore.misc.build_from_cfg', 'build_from_cfg', (['cfg', 'registry', '"""loss"""'], {}), "(cfg, registry, 'loss')\n", (99, 122), False, 'from vedacore.misc import build_from_cfg, registry\n')] |
# Authors: <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import math
######### Linear Models Functions #########
validator = lambda x: max(2, int(x))
predictor = lambda t, m, x: t+m*(x)
######### Perplexity Parameters #########
P_INTERCEPT_STRUCTURAL = -37.360438135651975
P_COEFFICIENT_STRUCTURAL = 8.578963490544542
def perplexity_structural(sample_length):
prediction = predictor(P_INTERCEPT_STRUCTURAL,
P_COEFFICIENT_STRUCTURAL,
math.log(sample_length))
prediction = validator(prediction)
return prediction
P_INTERCEPT_TAILORED = -2.1210847692307038
P_COEFFICIENT_TAILORED = 0.9442229439797486
def perplexity_tailored(sample_length):
prediction = predictor(P_INTERCEPT_TAILORED,
P_COEFFICIENT_TAILORED,
math.log(sample_length))**2
prediction = validator(prediction)
return prediction
P_INTERCEPT_STRUCTURAL_PCA = -4.897067968319856
P_COEFFICIENT_STRUCTURAL_PCA = 1.415629186176671
def perplexity_structural_pca(sample_length):
prediction = predictor(P_INTERCEPT_STRUCTURAL_PCA,
P_COEFFICIENT_STRUCTURAL_PCA,
math.log(sample_length))**2
prediction = validator(prediction)
return prediction
######### N_neighbors Parameters #########
N_INTERCEPT_STRUCTURAL = -2.050415832404518
N_COEFFICIENT_STRUCTURAL = 0.617757208655686
def n_neighbors_structural(sample_length):
prediction = math.exp(predictor(N_INTERCEPT_STRUCTURAL,
N_COEFFICIENT_STRUCTURAL,
math.log(sample_length)))
prediction = validator(prediction)
return prediction
N_INTERCEPT_TAILORED = -12.268898898548853
N_COEFFICIENT_TAILORED = 3.516519699104097
def n_neighbors_tailored(sample_length):
prediction = predictor(N_INTERCEPT_TAILORED,
N_COEFFICIENT_TAILORED,
math.log(sample_length))
prediction = validator(prediction)
return prediction
N_INTERCEPT_STRUCTURAL_PCA = -1.267586478241988
N_COEFFICIENT_STRUCTURAL_PCA = 0.49349366477471657
def n_neighbors_structural_pca(sample_length):
prediction = math.exp(predictor(N_INTERCEPT_STRUCTURAL_PCA,
N_COEFFICIENT_STRUCTURAL_PCA,
math.log(sample_length)))
prediction = validator(prediction)
return prediction
######### Min_dist Parameters #########
MIN_DIST_STRUCTURAL = 0.485
MIN_DIST_TAILORED = 0.47
MIN_DIST_STRUCTURAL_PCA = 0.36
######### Tooltips Parameters #########
TOOLTIPS_TARGET = """
<div>
<div>
<img
src="@imgs" height="130" alt="@imgs" width="200"
style="float: left; margin: 0px 15px 15px 0px;"
border="2"
></img>
</div>
<div>
<span style="font-size: 15px;">Target Value:</span>
<span style="font-size: 13px; color: #696;">@target</span>
</div>
</div>
"""
TOOLTIPS_NO_TARGET = """
<div>
<div>
<img
src="@imgs" height="130" alt="@imgs" width="200"
style="float: left; margin: 0px 15px 15px 0px;"
border="2"
></img>
</div>
</div>
""" | [
"math.log"
] | [((510, 533), 'math.log', 'math.log', (['sample_length'], {}), '(sample_length)\n', (518, 533), False, 'import math\n'), ((1970, 1993), 'math.log', 'math.log', (['sample_length'], {}), '(sample_length)\n', (1978, 1993), False, 'import math\n'), ((851, 874), 'math.log', 'math.log', (['sample_length'], {}), '(sample_length)\n', (859, 874), False, 'import math\n'), ((1223, 1246), 'math.log', 'math.log', (['sample_length'], {}), '(sample_length)\n', (1231, 1246), False, 'import math\n'), ((1628, 1651), 'math.log', 'math.log', (['sample_length'], {}), '(sample_length)\n', (1636, 1651), False, 'import math\n'), ((2351, 2374), 'math.log', 'math.log', (['sample_length'], {}), '(sample_length)\n', (2359, 2374), False, 'import math\n')] |
#Essential Modules
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Dashboard Modules
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objects as go
from dash.dependencies import Input, Output
import dash_table
import plotly.express as px
import datetime as dt
from io import BytesIO
from wordcloud import WordCloud
from collections import deque
import pybase64
import os
import json
import sqlite3
from unidecode import unidecode
import time
from application import app
##from apps import live_twitter_sentiment_streaming
#Twitter Modules
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
import random
import plotly
from navbar import Navbar
navs=Navbar()
conn = sqlite3.connect('twitter.db')
c = conn.cursor()
# app=dash.Dash(__name__,external_stylesheets=[dbc.themes.BOOTSTRAP])
layout=dbc.Card(
[
dbc.CardBody(
[
html.H4("Live Twitter Sentiment", className="card-title"),
dcc.Graph(id="live-graph"),
dcc.Interval(
id="graph-update",
interval=1*1000
)
]
),
],
)
X = deque(maxlen=20)
X.append(1)
Y = deque(maxlen=20)
Y.append(1)
@app.callback(Output('live-graph', 'figure'),
[Input('graph-update', 'n_intervals')])
def update_graph_scatter(input_data):
conn = sqlite3.connect('twitter.db')
c = conn.cursor()
df = pd.read_sql("SELECT * FROM sentiment ORDER BY unix DESC LIMIT 1000", conn)
# df.sort_values('unix', inplace=True)
df['sentiment_smoothed'] = df['sentiment'].rolling(int(len(df)/5)).mean()
df.dropna(inplace=True)
X = df.unix.values[:]
Y = df.sentiment_smoothed.values[:]
data = plotly.graph_objs.Scatter(
x=list(X),
y=list(Y),
name='Scatter',
mode= 'lines+markers'
)
return {'data': [data],'layout' : go.Layout(dict(xaxis={'range':[min(X),max(X)],'title':'Timestamp(ms)'},
yaxis={'range':[min(Y),max(Y)],'title':'Compound Sentiment'}
)
)
}
# if __name__=="__main__":
# application.run_server(debug=True)
| [
"dash_core_components.Interval",
"collections.deque",
"sqlite3.connect",
"dash.dependencies.Output",
"dash.dependencies.Input",
"navbar.Navbar",
"dash_core_components.Graph",
"pandas.read_sql",
"dash_html_components.H4"
] | [((820, 828), 'navbar.Navbar', 'Navbar', ([], {}), '()\n', (826, 828), False, 'from navbar import Navbar\n'), ((837, 866), 'sqlite3.connect', 'sqlite3.connect', (['"""twitter.db"""'], {}), "('twitter.db')\n", (852, 866), False, 'import sqlite3\n'), ((1301, 1317), 'collections.deque', 'deque', ([], {'maxlen': '(20)'}), '(maxlen=20)\n', (1306, 1317), False, 'from collections import deque\n'), ((1334, 1350), 'collections.deque', 'deque', ([], {'maxlen': '(20)'}), '(maxlen=20)\n', (1339, 1350), False, 'from collections import deque\n'), ((1513, 1542), 'sqlite3.connect', 'sqlite3.connect', (['"""twitter.db"""'], {}), "('twitter.db')\n", (1528, 1542), False, 'import sqlite3\n'), ((1574, 1648), 'pandas.read_sql', 'pd.read_sql', (['"""SELECT * FROM sentiment ORDER BY unix DESC LIMIT 1000"""', 'conn'], {}), "('SELECT * FROM sentiment ORDER BY unix DESC LIMIT 1000', conn)\n", (1585, 1648), True, 'import pandas as pd\n'), ((1378, 1408), 'dash.dependencies.Output', 'Output', (['"""live-graph"""', '"""figure"""'], {}), "('live-graph', 'figure')\n", (1384, 1408), False, 'from dash.dependencies import Input, Output\n'), ((1425, 1461), 'dash.dependencies.Input', 'Input', (['"""graph-update"""', '"""n_intervals"""'], {}), "('graph-update', 'n_intervals')\n", (1430, 1461), False, 'from dash.dependencies import Input, Output\n'), ((1038, 1095), 'dash_html_components.H4', 'html.H4', (['"""Live Twitter Sentiment"""'], {'className': '"""card-title"""'}), "('Live Twitter Sentiment', className='card-title')\n", (1045, 1095), True, 'import dash_html_components as html\n'), ((1113, 1139), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""live-graph"""'}), "(id='live-graph')\n", (1122, 1139), True, 'import dash_core_components as dcc\n'), ((1157, 1207), 'dash_core_components.Interval', 'dcc.Interval', ([], {'id': '"""graph-update"""', 'interval': '(1 * 1000)'}), "(id='graph-update', interval=1 * 1000)\n", (1169, 1207), True, 'import dash_core_components as dcc\n')] |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.oauth2.credentials import Credentials
from googleapiclient.discovery import build
class SheetsConfig:
def __init__(self, oauth_credentials):
self._oauth_credentials = oauth_credentials
self._sheets_service = None
def _get_sheets_service(self):
if not self._sheets_service:
credentials = Credentials(
token=self._oauth_credentials.get_access_token(),
refresh_token=self._oauth_credentials.get_refresh_token(),
client_id=self._oauth_credentials.get_client_id(),
client_secret=self._oauth_credentials.get_client_secret(),
token_uri='https://accounts.google.com/o/oauth2/token',
scopes=['https://www.googleapis.com/auth/spreadsheets.readonly'])
self._sheets_service = build('sheets', 'v4', credentials=credentials)
return self._sheets_service
def to_dict(self, config):
return dict(map(lambda x: (x[0], {"op": x[1], "value": x[2], "multiplier": x[3]}), config))
def get_config(self, sheet_id, range):
config_range = self.get_range(sheet_id, range)
return self.to_dict(config_range['values'])
def get_range(self, sheet_id, range):
return self._get_sheets_service().spreadsheets().values().get(spreadsheetId=sheet_id, range=range).execute()
def get_value(self, sheet_id, range):
range = self.get_range(sheet_id, range)
if range.get('values') is None:
return None
return range['values'][0][0]
| [
"googleapiclient.discovery.build"
] | [((1332, 1378), 'googleapiclient.discovery.build', 'build', (['"""sheets"""', '"""v4"""'], {'credentials': 'credentials'}), "('sheets', 'v4', credentials=credentials)\n", (1337, 1378), False, 'from googleapiclient.discovery import build\n')] |
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from pubtrack.pubs.api.views import (AuthorListCreateAPIView,
AuthorRUDAPIView,
MetaAuthorListCreateAPIView,
MetaAuthorRUDAPIView,
PublicationListCreateAPIView,
PublicationRUDAPIView,
PublicationStatusListCreateAPIView,
PublicationStatusRUDAPIView,
InstitutionListCreateAPIView,
InstitutionRUDAPIView,
AuthoringListCreateAPIVIew,
AuthoringRUDAPIView,
AffiliationListCreateAPIView,
AffiliationRUDAPIView,
BlacklistingListCreateAPIView,
BlacklistingRUDAPIView,
ConfigAPIView,
ContactAPIView,
ReadmeAPIView)
router = DefaultRouter()
urlpatterns = [
# AUTHORS
# -------
# /authors/ GET POST
path("authors/",
AuthorListCreateAPIView.as_view(),
name="authors-list"),
# /authors/:slug GET PUT DELETE
path("authors/<slug:slug>/",
AuthorRUDAPIView.as_view(),
name="authors-detail"),
# PUBLICATION
# -----------
# /publications/ GET POST
path("publications/",
PublicationListCreateAPIView.as_view(),
name="publications-list"),
# /publications/:uuid/ GET PUT DELETE
path("publications/<uuid:uuid>/",
PublicationRUDAPIView.as_view(),
name="publications-detail"),
# PUBLICATION STATUS
# ------------------
# /publication-statuses/
path("publication-statuses/",
PublicationStatusListCreateAPIView.as_view(),
name="publication-statuses-list"),
# /publication-statuses/:publication_uuid/
path("publication-statuses/<uuid:publication>/",
PublicationStatusRUDAPIView.as_view(),
name="publication-statuses-detail"),
# INSTITUTIONS
# ------------
# /institutions/ GET POST
path("institutions/",
InstitutionListCreateAPIView.as_view(),
name="institutions-list"),
# /institutions/:slug/ GET PUT DELETE
path("institutions/<slug:slug>",
InstitutionRUDAPIView.as_view(),
name="institutions-detail"),
# META AUTHORS
# ------------
# /meta_authors/ GET POST
path("meta-authors/",
MetaAuthorListCreateAPIView.as_view(),
name="meta-authors-list"),
# /meta_authors/:slug/ GET PUT DELETE
path("meta-authors/<slug:slug>/",
MetaAuthorRUDAPIView.as_view(),
name="meta-authors-list"),
# AUTHORINGS
# ----------
# /authorings/ GET POST
path("authorings/",
AuthoringListCreateAPIVIew.as_view(),
name="authorings-list"),
# /authorings/:author_slug/:publication_uuid/ GET PUT DELETE
path("authorings/<slug:author_slug>/<uuid:publication_uuid>/",
AuthoringRUDAPIView.as_view(),
name="authorings-detail"),
# AFFILIATIONS
# ------------
# /affiliations/ GET POST
path("affiliations/",
AffiliationListCreateAPIView.as_view(),
name="affiliations-list"),
# /affiliations/:author_slug/:institution_slug/ GET PUT DELETE
path("affiliations/<slug:author_slug>/<slug:institution_slug>/",
AffiliationRUDAPIView.as_view(),
name="affiliations-detail"),
# BLACKLISTINGS
# -------------
# /blacklistings/ GET POST
path("blacklistings/",
BlacklistingListCreateAPIView.as_view(),
name="blacklistings-list"),
# /blacklistings/:meta_author_slug/:institution_slug GET PUT DELETE
path("blacklistings/<slug:meta_author_slug>/<slug:institution_slug>/",
BlacklistingRUDAPIView.as_view(),
name="blacklistings-detail"),
# NON MODEL BASED ENDPOINTS
# -------------------------
# CONFIG
path("config/",
ConfigAPIView.as_view(),
name="config"),
# CONTACT
path("contact/",
ContactAPIView.as_view(),
name='contact'),
# README
path("readme/",
ReadmeAPIView.as_view(),
name='readme')
]
| [
"pubtrack.pubs.api.views.PublicationRUDAPIView.as_view",
"pubtrack.pubs.api.views.AuthorListCreateAPIView.as_view",
"pubtrack.pubs.api.views.AuthoringRUDAPIView.as_view",
"pubtrack.pubs.api.views.AffiliationListCreateAPIView.as_view",
"pubtrack.pubs.api.views.BlacklistingListCreateAPIView.as_view",
"pubtr... | [((1271, 1286), 'rest_framework.routers.DefaultRouter', 'DefaultRouter', ([], {}), '()\n', (1284, 1286), False, 'from rest_framework.routers import DefaultRouter\n'), ((1448, 1481), 'pubtrack.pubs.api.views.AuthorListCreateAPIView.as_view', 'AuthorListCreateAPIView.as_view', ([], {}), '()\n', (1479, 1481), False, 'from pubtrack.pubs.api.views import AuthorListCreateAPIView, AuthorRUDAPIView, MetaAuthorListCreateAPIView, MetaAuthorRUDAPIView, PublicationListCreateAPIView, PublicationRUDAPIView, PublicationStatusListCreateAPIView, PublicationStatusRUDAPIView, InstitutionListCreateAPIView, InstitutionRUDAPIView, AuthoringListCreateAPIVIew, AuthoringRUDAPIView, AffiliationListCreateAPIView, AffiliationRUDAPIView, BlacklistingListCreateAPIView, BlacklistingRUDAPIView, ConfigAPIView, ContactAPIView, ReadmeAPIView\n'), ((1648, 1674), 'pubtrack.pubs.api.views.AuthorRUDAPIView.as_view', 'AuthorRUDAPIView.as_view', ([], {}), '()\n', (1672, 1674), False, 'from pubtrack.pubs.api.views import AuthorListCreateAPIView, AuthorRUDAPIView, MetaAuthorListCreateAPIView, MetaAuthorRUDAPIView, PublicationListCreateAPIView, PublicationRUDAPIView, PublicationStatusListCreateAPIView, PublicationStatusRUDAPIView, InstitutionListCreateAPIView, InstitutionRUDAPIView, AuthoringListCreateAPIVIew, AuthoringRUDAPIView, AffiliationListCreateAPIView, AffiliationRUDAPIView, BlacklistingListCreateAPIView, BlacklistingRUDAPIView, ConfigAPIView, ContactAPIView, ReadmeAPIView\n'), ((1866, 1904), 'pubtrack.pubs.api.views.PublicationListCreateAPIView.as_view', 'PublicationListCreateAPIView.as_view', ([], {}), '()\n', (1902, 1904), False, 'from pubtrack.pubs.api.views import AuthorListCreateAPIView, AuthorRUDAPIView, MetaAuthorListCreateAPIView, MetaAuthorRUDAPIView, PublicationListCreateAPIView, PublicationRUDAPIView, PublicationStatusListCreateAPIView, PublicationStatusRUDAPIView, InstitutionListCreateAPIView, InstitutionRUDAPIView, AuthoringListCreateAPIVIew, AuthoringRUDAPIView, AffiliationListCreateAPIView, AffiliationRUDAPIView, BlacklistingListCreateAPIView, BlacklistingRUDAPIView, ConfigAPIView, ContactAPIView, ReadmeAPIView\n'), ((2081, 2112), 'pubtrack.pubs.api.views.PublicationRUDAPIView.as_view', 'PublicationRUDAPIView.as_view', ([], {}), '()\n', (2110, 2112), False, 'from pubtrack.pubs.api.views import AuthorListCreateAPIView, AuthorRUDAPIView, MetaAuthorListCreateAPIView, MetaAuthorRUDAPIView, PublicationListCreateAPIView, PublicationRUDAPIView, PublicationStatusListCreateAPIView, PublicationStatusRUDAPIView, InstitutionListCreateAPIView, InstitutionRUDAPIView, AuthoringListCreateAPIVIew, AuthoringRUDAPIView, AffiliationListCreateAPIView, AffiliationRUDAPIView, BlacklistingListCreateAPIView, BlacklistingRUDAPIView, ConfigAPIView, ContactAPIView, ReadmeAPIView\n'), ((2275, 2319), 'pubtrack.pubs.api.views.PublicationStatusListCreateAPIView.as_view', 'PublicationStatusListCreateAPIView.as_view', ([], {}), '()\n', (2317, 2319), False, 'from pubtrack.pubs.api.views import AuthorListCreateAPIView, AuthorRUDAPIView, MetaAuthorListCreateAPIView, MetaAuthorRUDAPIView, PublicationListCreateAPIView, PublicationRUDAPIView, PublicationStatusListCreateAPIView, PublicationStatusRUDAPIView, InstitutionListCreateAPIView, InstitutionRUDAPIView, AuthoringListCreateAPIVIew, AuthoringRUDAPIView, AffiliationListCreateAPIView, AffiliationRUDAPIView, BlacklistingListCreateAPIView, BlacklistingRUDAPIView, ConfigAPIView, ContactAPIView, ReadmeAPIView\n'), ((2475, 2512), 'pubtrack.pubs.api.views.PublicationStatusRUDAPIView.as_view', 'PublicationStatusRUDAPIView.as_view', ([], {}), '()\n', (2510, 2512), False, 'from pubtrack.pubs.api.views import AuthorListCreateAPIView, AuthorRUDAPIView, MetaAuthorListCreateAPIView, MetaAuthorRUDAPIView, PublicationListCreateAPIView, PublicationRUDAPIView, PublicationStatusListCreateAPIView, PublicationStatusRUDAPIView, InstitutionListCreateAPIView, InstitutionRUDAPIView, AuthoringListCreateAPIVIew, AuthoringRUDAPIView, AffiliationListCreateAPIView, AffiliationRUDAPIView, BlacklistingListCreateAPIView, BlacklistingRUDAPIView, ConfigAPIView, ContactAPIView, ReadmeAPIView\n'), ((2718, 2756), 'pubtrack.pubs.api.views.InstitutionListCreateAPIView.as_view', 'InstitutionListCreateAPIView.as_view', ([], {}), '()\n', (2754, 2756), False, 'from pubtrack.pubs.api.views import AuthorListCreateAPIView, AuthorRUDAPIView, MetaAuthorListCreateAPIView, MetaAuthorRUDAPIView, PublicationListCreateAPIView, PublicationRUDAPIView, PublicationStatusListCreateAPIView, PublicationStatusRUDAPIView, InstitutionListCreateAPIView, InstitutionRUDAPIView, AuthoringListCreateAPIVIew, AuthoringRUDAPIView, AffiliationListCreateAPIView, AffiliationRUDAPIView, BlacklistingListCreateAPIView, BlacklistingRUDAPIView, ConfigAPIView, ContactAPIView, ReadmeAPIView\n'), ((2932, 2963), 'pubtrack.pubs.api.views.InstitutionRUDAPIView.as_view', 'InstitutionRUDAPIView.as_view', ([], {}), '()\n', (2961, 2963), False, 'from pubtrack.pubs.api.views import AuthorListCreateAPIView, AuthorRUDAPIView, MetaAuthorListCreateAPIView, MetaAuthorRUDAPIView, PublicationListCreateAPIView, PublicationRUDAPIView, PublicationStatusListCreateAPIView, PublicationStatusRUDAPIView, InstitutionListCreateAPIView, InstitutionRUDAPIView, AuthoringListCreateAPIVIew, AuthoringRUDAPIView, AffiliationListCreateAPIView, AffiliationRUDAPIView, BlacklistingListCreateAPIView, BlacklistingRUDAPIView, ConfigAPIView, ContactAPIView, ReadmeAPIView\n'), ((3162, 3199), 'pubtrack.pubs.api.views.MetaAuthorListCreateAPIView.as_view', 'MetaAuthorListCreateAPIView.as_view', ([], {}), '()\n', (3197, 3199), False, 'from pubtrack.pubs.api.views import AuthorListCreateAPIView, AuthorRUDAPIView, MetaAuthorListCreateAPIView, MetaAuthorRUDAPIView, PublicationListCreateAPIView, PublicationRUDAPIView, PublicationStatusListCreateAPIView, PublicationStatusRUDAPIView, InstitutionListCreateAPIView, InstitutionRUDAPIView, AuthoringListCreateAPIVIew, AuthoringRUDAPIView, AffiliationListCreateAPIView, AffiliationRUDAPIView, BlacklistingListCreateAPIView, BlacklistingRUDAPIView, ConfigAPIView, ContactAPIView, ReadmeAPIView\n'), ((3376, 3406), 'pubtrack.pubs.api.views.MetaAuthorRUDAPIView.as_view', 'MetaAuthorRUDAPIView.as_view', ([], {}), '()\n', (3404, 3406), False, 'from pubtrack.pubs.api.views import AuthorListCreateAPIView, AuthorRUDAPIView, MetaAuthorListCreateAPIView, MetaAuthorRUDAPIView, PublicationListCreateAPIView, PublicationRUDAPIView, PublicationStatusListCreateAPIView, PublicationStatusRUDAPIView, InstitutionListCreateAPIView, InstitutionRUDAPIView, AuthoringListCreateAPIVIew, AuthoringRUDAPIView, AffiliationListCreateAPIView, AffiliationRUDAPIView, BlacklistingListCreateAPIView, BlacklistingRUDAPIView, ConfigAPIView, ContactAPIView, ReadmeAPIView\n'), ((3597, 3633), 'pubtrack.pubs.api.views.AuthoringListCreateAPIVIew.as_view', 'AuthoringListCreateAPIVIew.as_view', ([], {}), '()\n', (3631, 3633), False, 'from pubtrack.pubs.api.views import AuthorListCreateAPIView, AuthorRUDAPIView, MetaAuthorListCreateAPIView, MetaAuthorRUDAPIView, PublicationListCreateAPIView, PublicationRUDAPIView, PublicationStatusListCreateAPIView, PublicationStatusRUDAPIView, InstitutionListCreateAPIView, InstitutionRUDAPIView, AuthoringListCreateAPIVIew, AuthoringRUDAPIView, AffiliationListCreateAPIView, AffiliationRUDAPIView, BlacklistingListCreateAPIView, BlacklistingRUDAPIView, ConfigAPIView, ContactAPIView, ReadmeAPIView\n'), ((3837, 3866), 'pubtrack.pubs.api.views.AuthoringRUDAPIView.as_view', 'AuthoringRUDAPIView.as_view', ([], {}), '()\n', (3864, 3866), False, 'from pubtrack.pubs.api.views import AuthorListCreateAPIView, AuthorRUDAPIView, MetaAuthorListCreateAPIView, MetaAuthorRUDAPIView, PublicationListCreateAPIView, PublicationRUDAPIView, PublicationStatusListCreateAPIView, PublicationStatusRUDAPIView, InstitutionListCreateAPIView, InstitutionRUDAPIView, AuthoringListCreateAPIVIew, AuthoringRUDAPIView, AffiliationListCreateAPIView, AffiliationRUDAPIView, BlacklistingListCreateAPIView, BlacklistingRUDAPIView, ConfigAPIView, ContactAPIView, ReadmeAPIView\n'), ((4063, 4101), 'pubtrack.pubs.api.views.AffiliationListCreateAPIView.as_view', 'AffiliationListCreateAPIView.as_view', ([], {}), '()\n', (4099, 4101), False, 'from pubtrack.pubs.api.views import AuthorListCreateAPIView, AuthorRUDAPIView, MetaAuthorListCreateAPIView, MetaAuthorRUDAPIView, PublicationListCreateAPIView, PublicationRUDAPIView, PublicationStatusListCreateAPIView, PublicationStatusRUDAPIView, InstitutionListCreateAPIView, InstitutionRUDAPIView, AuthoringListCreateAPIVIew, AuthoringRUDAPIView, AffiliationListCreateAPIView, AffiliationRUDAPIView, BlacklistingListCreateAPIView, BlacklistingRUDAPIView, ConfigAPIView, ContactAPIView, ReadmeAPIView\n'), ((4309, 4340), 'pubtrack.pubs.api.views.AffiliationRUDAPIView.as_view', 'AffiliationRUDAPIView.as_view', ([], {}), '()\n', (4338, 4340), False, 'from pubtrack.pubs.api.views import AuthorListCreateAPIView, AuthorRUDAPIView, MetaAuthorListCreateAPIView, MetaAuthorRUDAPIView, PublicationListCreateAPIView, PublicationRUDAPIView, PublicationStatusListCreateAPIView, PublicationStatusRUDAPIView, InstitutionListCreateAPIView, InstitutionRUDAPIView, AuthoringListCreateAPIVIew, AuthoringRUDAPIView, AffiliationListCreateAPIView, AffiliationRUDAPIView, BlacklistingListCreateAPIView, BlacklistingRUDAPIView, ConfigAPIView, ContactAPIView, ReadmeAPIView\n'), ((4542, 4581), 'pubtrack.pubs.api.views.BlacklistingListCreateAPIView.as_view', 'BlacklistingListCreateAPIView.as_view', ([], {}), '()\n', (4579, 4581), False, 'from pubtrack.pubs.api.views import AuthorListCreateAPIView, AuthorRUDAPIView, MetaAuthorListCreateAPIView, MetaAuthorRUDAPIView, PublicationListCreateAPIView, PublicationRUDAPIView, PublicationStatusListCreateAPIView, PublicationStatusRUDAPIView, InstitutionListCreateAPIView, InstitutionRUDAPIView, AuthoringListCreateAPIVIew, AuthoringRUDAPIView, AffiliationListCreateAPIView, AffiliationRUDAPIView, BlacklistingListCreateAPIView, BlacklistingRUDAPIView, ConfigAPIView, ContactAPIView, ReadmeAPIView\n'), ((4796, 4828), 'pubtrack.pubs.api.views.BlacklistingRUDAPIView.as_view', 'BlacklistingRUDAPIView.as_view', ([], {}), '()\n', (4826, 4828), False, 'from pubtrack.pubs.api.views import AuthorListCreateAPIView, AuthorRUDAPIView, MetaAuthorListCreateAPIView, MetaAuthorRUDAPIView, PublicationListCreateAPIView, PublicationRUDAPIView, PublicationStatusListCreateAPIView, PublicationStatusRUDAPIView, InstitutionListCreateAPIView, InstitutionRUDAPIView, AuthoringListCreateAPIVIew, AuthoringRUDAPIView, AffiliationListCreateAPIView, AffiliationRUDAPIView, BlacklistingListCreateAPIView, BlacklistingRUDAPIView, ConfigAPIView, ContactAPIView, ReadmeAPIView\n'), ((4976, 4999), 'pubtrack.pubs.api.views.ConfigAPIView.as_view', 'ConfigAPIView.as_view', ([], {}), '()\n', (4997, 4999), False, 'from pubtrack.pubs.api.views import AuthorListCreateAPIView, AuthorRUDAPIView, MetaAuthorListCreateAPIView, MetaAuthorRUDAPIView, PublicationListCreateAPIView, PublicationRUDAPIView, PublicationStatusListCreateAPIView, PublicationStatusRUDAPIView, InstitutionListCreateAPIView, InstitutionRUDAPIView, AuthoringListCreateAPIVIew, AuthoringRUDAPIView, AffiliationListCreateAPIView, AffiliationRUDAPIView, BlacklistingListCreateAPIView, BlacklistingRUDAPIView, ConfigAPIView, ContactAPIView, ReadmeAPIView\n'), ((5071, 5095), 'pubtrack.pubs.api.views.ContactAPIView.as_view', 'ContactAPIView.as_view', ([], {}), '()\n', (5093, 5095), False, 'from pubtrack.pubs.api.views import AuthorListCreateAPIView, AuthorRUDAPIView, MetaAuthorListCreateAPIView, MetaAuthorRUDAPIView, PublicationListCreateAPIView, PublicationRUDAPIView, PublicationStatusListCreateAPIView, PublicationStatusRUDAPIView, InstitutionListCreateAPIView, InstitutionRUDAPIView, AuthoringListCreateAPIVIew, AuthoringRUDAPIView, AffiliationListCreateAPIView, AffiliationRUDAPIView, BlacklistingListCreateAPIView, BlacklistingRUDAPIView, ConfigAPIView, ContactAPIView, ReadmeAPIView\n'), ((5166, 5189), 'pubtrack.pubs.api.views.ReadmeAPIView.as_view', 'ReadmeAPIView.as_view', ([], {}), '()\n', (5187, 5189), False, 'from pubtrack.pubs.api.views import AuthorListCreateAPIView, AuthorRUDAPIView, MetaAuthorListCreateAPIView, MetaAuthorRUDAPIView, PublicationListCreateAPIView, PublicationRUDAPIView, PublicationStatusListCreateAPIView, PublicationStatusRUDAPIView, InstitutionListCreateAPIView, InstitutionRUDAPIView, AuthoringListCreateAPIVIew, AuthoringRUDAPIView, AffiliationListCreateAPIView, AffiliationRUDAPIView, BlacklistingListCreateAPIView, BlacklistingRUDAPIView, ConfigAPIView, ContactAPIView, ReadmeAPIView\n')] |
'''
original implementation credit: https://github.com/openai/baselines
heavily adapted to suit our needs.
'''
import argparse
import tempfile
import os.path as osp
import gym
import logging
from tqdm import tqdm
import tensorflow as tf
import numpy as np
import os
import sys
import glob
file_path = os.path.dirname(os.path.realpath(__file__))
src_path = os.path.abspath(os.path.join(file_path, os.pardir))
root_path = os.path.abspath(os.path.join(src_path, os.pardir))
sys.path.insert(0, src_path)
from contrib.baselines.gail import mlp_policy
from contrib.baselines import bench
from contrib.baselines import logger
from contrib.baselines.common import set_global_seeds, tf_util as U
from contrib.baselines.common.misc_util import boolean_flag
from contrib.baselines.common.mpi_adam import MpiAdam
from core.data_util import GymDataset, SepsisDataset
from core.run_gym import run_gym
def learn_original(pi, dataset, env_name, n_action, prefix, traj_lim, seed,
optim_batch_size=128, max_iters=5e3,
adam_epsilon=1e-4, optim_stepsize=1e-4,
ckpt_dir=None, plot_dir=None, task_name=None,
verbose=False):
"""
learn without regularization
"""
# custom hyperparams
seed = 0
max_iters = 5e4
val_per_iter = int(max_iters/10)
# placeholder
ob = U.get_placeholder_cached(name="ob")
ac = pi.pdtype.sample_placeholder([None])
stochastic = U.get_placeholder_cached(name="stochastic")
loss = tf.reduce_mean(tf.square(tf.to_float(ac-pi.ac)))
var_list = pi.get_trainable_variables()
adam = MpiAdam(var_list, epsilon=adam_epsilon)
lossandgrad = U.function([ob, ac, stochastic], [loss]+[U.flatgrad(loss, var_list)])
U.initialize()
adam.sync()
logger.log("Training a policy with Behavior Cloning")
logger.log("with {} trajs, {} steps".format(dataset.num_traj, dataset.num_transition))
loss_history = {}
loss_history["train_action_loss"] = []
loss_history["val_action_loss"] = []
for iter_so_far in tqdm(range(int(max_iters))):
ob_expert, ac_expert, _, _ = dataset.get_next_batch(optim_batch_size, 'train')
train_loss, g = lossandgrad(ob_expert, ac_expert, True)
adam.update(g, optim_stepsize)
if verbose and iter_so_far % val_per_iter == 0:
ob_expert, ac_expert, _, _ = dataset.get_next_batch(-1, 'val')
val_loss, _ = lossandgrad(ob_expert, ac_expert, True)
logger.log("Training loss: {}, Validation loss: {}".format(train_loss, val_loss))
loss_history["train_action_loss"].append(train_loss)
loss_history["val_action_loss"].append(val_loss)
plot(env_name, loss_history, traj_lim, plot_dir)
os.makedirs(ckpt_dir, exist_ok=True)
if ckpt_dir is None:
savedir_fname = tempfile.TemporaryDirectory().name
else:
ckpt_fname = "ckpt.bc.{}.{}".format(traj_lim, seed)
savedir_fname = osp.join(ckpt_dir, ckpt_fname)
U.save_state(savedir_fname, var_list=pi.get_variables())
return savedir_fname
def learn(network, dataset, env_name, n_action, prefix, traj_lim, seed,
optim_batch_size=32, max_iters=1e4,
adam_epsilon=1e-4, optim_stepsize=3e-4,
ckpt_dir=None, plot_dir=None, task_name=None,
verbose=False):
"""
learn with regularization
"""
seed = 0
alpha = 0.7
beta = 1.0
pi = network.pi
T = network.T
val_per_iter = int(max_iters/20)
ob = U.get_placeholder_cached(name="ob")
T_ac = U.get_placeholder_cached(name="T_ac")
pi_stochastic = U.get_placeholder_cached(name="pi_stochastic")
T_stochastic = U.get_placeholder_cached(name="T_stochastic")
ac = network.pdtype.sample_placeholder([None])
ob_next = network.ob_next_pdtype.sample_placeholder([None])
onehot_ac = tf.one_hot(ac, depth=n_action)
ce_loss = tf.losses.softmax_cross_entropy(logits=pi.logits,
onehot_labels=onehot_ac)
ce_loss = tf.reduce_mean(ce_loss)
reg_loss = tf.reduce_mean(tf.square(tf.to_float(ob_next-network.ob_next)))
losses = [ce_loss, reg_loss]
total_loss = alpha * ce_loss + beta * reg_loss
var_list = network.get_trainable_variables()
adam = MpiAdam(var_list, epsilon=adam_epsilon)
lossandgrad = U.function([ob, ac, T_ac, ob_next, pi_stochastic, T_stochastic],
losses +[U.flatgrad(total_loss, var_list)])
U.initialize()
adam.sync()
logger.log("Training a policy with Behavior Cloning")
logger.log("with {} trajs, {} steps".format(dataset.num_traj, dataset.num_transition))
loss_history = {}
loss_history["train_action_loss"] = []
loss_history["train_transition_loss"] = []
loss_history["val_action_loss"] = []
loss_history["val_transition_loss"] = []
for iter_so_far in tqdm(range(int(max_iters))):
#ob_expert, ac_expert = dataset.get_next_batch(optim_batch_size, 'train')
ob_expert, ac_expert, ob_next_expert, info = dataset.get_next_batch(optim_batch_size, 'train')
train_loss_ce, train_loss_reg, g = lossandgrad(ob_expert, ac_expert, ac_expert, ob_next_expert, True, True)
adam.update(g, optim_stepsize)
if verbose and iter_so_far % val_per_iter == 0:
#ob_expert, ac_expert = dataset.get_next_batch(-1, 'val')
ob_expert, ac_expert, ob_next_expert, info = dataset.get_next_batch(-1, 'val')
val_loss_ce, val_loss_reg, _ = lossandgrad(ob_expert, ac_expert, ac_expert, ob_next_expert, True, True)
items = [train_loss_ce, train_loss_reg, val_loss_ce, val_loss_reg]
logger.log("Training Action loss: {}\n" \
"Training Transition loss: {}\n" \
"Validation Action loss: {}\n" \
"Validation Transition Loss:{}\n".format(*items))
loss_history["train_action_loss"].append(train_loss_ce)
loss_history["train_transition_loss"].append(train_loss_reg)
loss_history["val_action_loss"].append(val_loss_ce)
loss_history["val_transition_loss"].append(val_loss_reg)
#if len(loss_history["val_action_loss"]) > 1:
# val_loss_ce_delta = loss_history["val_action_loss"][-1] - val_loss_ce
# if np.abs(val_loss_ce_delta) < val_stop_threshold:
# logger.log("validation error seems to have converged.")
# break
plot(env_name, loss_history, traj_lim, plot_dir)
os.makedirs(ckpt_dir, exist_ok=True)
if ckpt_dir is None:
savedir_fname = tempfile.TemporaryDirectory().name
else:
ckpt_fname = "ckpt.bc.{}.{}".format(traj_lim, seed)
savedir_fname = osp.join(ckpt_dir, ckpt_fname)
U.save_state(savedir_fname, var_list=network.get_variables())
return savedir_fname
def plot(env_name, loss, traj_lim, save_path):
"""TODO: Docstring for plot.
Parameters
----------
arg1 : TODO
Returns
-------
TODO
"""
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
num_data= len(loss["train_action_loss"])
#plt.ylim([0, 0.1])
plt.ylabel('loss')
plt.title('pretraining loss for {}'.format(env_name))
plt.plot(np.arange(num_data), loss["train_action_loss"], c="r",
linestyle="--")
plt.plot(np.arange(num_data), loss["val_action_loss"], c="r")
if "train_transition_loss" in loss:
plt.plot(np.arange(num_data), loss["train_transition_loss"], c="b", linestyle="--")
plt.plot(np.arange(num_data), loss["val_transition_loss"], c="b")
plt.legend(['train_action', 'train_transition', 'val_action', 'val_transition'], loc='best')
plt.legend(['train_action', 'val_action'], loc='best')
plt.savefig(os.path.join(save_path, "loss.{}.{}.png".format(env_name,
traj_lim)), format="png")
plt.close()
def train_bc(task, params, ob_space, ac_space, args, env):
task_path = os.path.join(root_path, "task", args.task)
plot_path = os.path.join(task_path, "result")
dataset = GymDataset(expert_path=args.expert_path,
traj_limitation=args.traj_limitation)
U.make_session(num_cpu=1).__enter__()
set_global_seeds(args.seed)
def policy_fn(name, ob_space, ac_space, reuse=False):
return mlp_policy.MlpPolicy(name=name,
ob_space=ob_space,
ac_space=ac_space,
reuse=reuse,
hid_size_phi=args.policy_hidden_size,
num_hid_layers_phi=2,
dim_phi=args.dim_phi)
env_name = task["env_id"]
name = "pi.{}.{}".format(env_name.lower().split("-")[0], args.traj_limitation)
pi = policy_fn(name, ob_space, ac_space)
n_action = env.action_space.n
fname = "ckpt.bc.{}.{}".format(args.traj_limitation, args.seed)
savedir_fname = osp.join(args.checkpoint_dir, fname, fname)
if not os.path.exists(savedir_fname + ".index"):
savedir_fname = learn(pi,
dataset,
env_name,
n_action,
prefix="bc",
seed=args.seed,
traj_lim=args.traj_limitation,
max_iters=args.BC_max_iter,
ckpt_dir=osp.join(args.checkpoint_dir, fname),
plot_dir=plot_path,
task_name=task["env_id"],
verbose=True)
logger.log(savedir_fname + "saved")
# avg_len, avg_ret = run_gym(env,
# policy_fn,
# savedir_fname,
# timesteps_per_batch=args.horizon,
# number_trajs=10,
# stochastic_policy=args.stochastic_policy,
# save=args.save_sample,
# reuse=True)
#
#
return savedir_fname
def train_bc_sepsis(task, params, ob_space, ac_space, args):
task_path = os.path.join(root_path, "task", args.task)
plot_path = os.path.join(task_path, "result")
dataset = SepsisDataset(expert_path=args.expert_path,
traj_limitation=args.traj_limitation)
U.make_session(num_cpu=1).__enter__()
set_global_seeds(args.seed)
# just im
#def policy_fn(name, ob_space, ac_space, reuse=False):
# return mlp_policy.MlpPolicyOriginal(name=name,
# ob_space=ob_space,
# ac_space=ac_space,
# reuse=reuse,
# hid_size=args.policy_hidden_size,
# num_hid_layers=2)
# im + reg
def policy_fn(name, ob_space, ac_space, reuse=False):
return mlp_policy.MlpPolicy(name=name,
ob_space=ob_space,
ac_space=ac_space,
reuse=reuse,
hid_size_phi=args.policy_hidden_size,
num_hid_layers_phi=2,
dim_phi=args.dim_phi)
env_name = task["env_id"]
name = "pi.{}.{}".format(env_name.lower().split("-")[0], args.traj_limitation)
pi = policy_fn(name, ob_space, ac_space)
n_action = ac_space.n
fname = "ckpt.bc.{}.{}".format(args.traj_limitation, args.seed)
savedir_fname = osp.join(args.checkpoint_dir, fname, fname)
if not os.path.exists(savedir_fname + ".index"):
#savedir_fname = learn_original(pi,
# dataset,
# env_name,
# n_action,
# prefix="bc",
# seed=args.seed,
# traj_lim=args.traj_limitation,
# max_iters=args.BC_max_iter,
# ckpt_dir=osp.join(args.checkpoint_dir, fname),
# plot_dir=plot_path,
# task_name=task["env_id"],
# verbose=True)
savedir_fname = learn(pi,
dataset,
env_name,
n_action,
prefix="bc",
seed=args.seed,
traj_lim=args.traj_limitation,
max_iters=args.BC_max_iter,
ckpt_dir=osp.join(args.checkpoint_dir, fname),
plot_dir=plot_path,
task_name=task["env_id"],
verbose=True)
logger.log(savedir_fname + "saved")
# avg_len, avg_ret = run_gym(env,
# policy_fn,
# savedir_fname,
# timesteps_per_batch=args.horizon,
# number_trajs=10,
# stochastic_policy=args.stochastic_policy,
# save=args.save_sample,
# reuse=True)
return savedir_fname
| [
"sys.path.insert",
"matplotlib.pyplot.ylabel",
"tensorflow.reduce_mean",
"numpy.arange",
"contrib.baselines.gail.mlp_policy.MlpPolicy",
"os.path.exists",
"contrib.baselines.common.set_global_seeds",
"matplotlib.pyplot.style.use",
"matplotlib.pyplot.close",
"contrib.baselines.common.tf_util.flatgra... | [((476, 504), 'sys.path.insert', 'sys.path.insert', (['(0)', 'src_path'], {}), '(0, src_path)\n', (491, 504), False, 'import sys\n'), ((322, 348), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (338, 348), False, 'import os\n'), ((377, 411), 'os.path.join', 'os.path.join', (['file_path', 'os.pardir'], {}), '(file_path, os.pardir)\n', (389, 411), False, 'import os\n'), ((441, 474), 'os.path.join', 'os.path.join', (['src_path', 'os.pardir'], {}), '(src_path, os.pardir)\n', (453, 474), False, 'import os\n'), ((1323, 1358), 'contrib.baselines.common.tf_util.get_placeholder_cached', 'U.get_placeholder_cached', ([], {'name': '"""ob"""'}), "(name='ob')\n", (1347, 1358), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((1422, 1465), 'contrib.baselines.common.tf_util.get_placeholder_cached', 'U.get_placeholder_cached', ([], {'name': '"""stochastic"""'}), "(name='stochastic')\n", (1446, 1465), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((1581, 1620), 'contrib.baselines.common.mpi_adam.MpiAdam', 'MpiAdam', (['var_list'], {'epsilon': 'adam_epsilon'}), '(var_list, epsilon=adam_epsilon)\n', (1588, 1620), False, 'from contrib.baselines.common.mpi_adam import MpiAdam\n'), ((1714, 1728), 'contrib.baselines.common.tf_util.initialize', 'U.initialize', ([], {}), '()\n', (1726, 1728), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((1749, 1802), 'contrib.baselines.logger.log', 'logger.log', (['"""Training a policy with Behavior Cloning"""'], {}), "('Training a policy with Behavior Cloning')\n", (1759, 1802), False, 'from contrib.baselines import logger\n'), ((2723, 2759), 'os.makedirs', 'os.makedirs', (['ckpt_dir'], {'exist_ok': '(True)'}), '(ckpt_dir, exist_ok=True)\n', (2734, 2759), False, 'import os\n'), ((3484, 3519), 'contrib.baselines.common.tf_util.get_placeholder_cached', 'U.get_placeholder_cached', ([], {'name': '"""ob"""'}), "(name='ob')\n", (3508, 3519), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((3531, 3568), 'contrib.baselines.common.tf_util.get_placeholder_cached', 'U.get_placeholder_cached', ([], {'name': '"""T_ac"""'}), "(name='T_ac')\n", (3555, 3568), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((3589, 3635), 'contrib.baselines.common.tf_util.get_placeholder_cached', 'U.get_placeholder_cached', ([], {'name': '"""pi_stochastic"""'}), "(name='pi_stochastic')\n", (3613, 3635), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((3655, 3700), 'contrib.baselines.common.tf_util.get_placeholder_cached', 'U.get_placeholder_cached', ([], {'name': '"""T_stochastic"""'}), "(name='T_stochastic')\n", (3679, 3700), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((3834, 3864), 'tensorflow.one_hot', 'tf.one_hot', (['ac'], {'depth': 'n_action'}), '(ac, depth=n_action)\n', (3844, 3864), True, 'import tensorflow as tf\n'), ((3879, 3953), 'tensorflow.losses.softmax_cross_entropy', 'tf.losses.softmax_cross_entropy', ([], {'logits': 'pi.logits', 'onehot_labels': 'onehot_ac'}), '(logits=pi.logits, onehot_labels=onehot_ac)\n', (3910, 3953), True, 'import tensorflow as tf\n'), ((3981, 4004), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['ce_loss'], {}), '(ce_loss)\n', (3995, 4004), True, 'import tensorflow as tf\n'), ((4232, 4271), 'contrib.baselines.common.mpi_adam.MpiAdam', 'MpiAdam', (['var_list'], {'epsilon': 'adam_epsilon'}), '(var_list, epsilon=adam_epsilon)\n', (4239, 4271), False, 'from contrib.baselines.common.mpi_adam import MpiAdam\n'), ((4416, 4430), 'contrib.baselines.common.tf_util.initialize', 'U.initialize', ([], {}), '()\n', (4428, 4430), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((4451, 4504), 'contrib.baselines.logger.log', 'logger.log', (['"""Training a policy with Behavior Cloning"""'], {}), "('Training a policy with Behavior Cloning')\n", (4461, 4504), False, 'from contrib.baselines import logger\n'), ((6496, 6532), 'os.makedirs', 'os.makedirs', (['ckpt_dir'], {'exist_ok': '(True)'}), '(ckpt_dir, exist_ok=True)\n', (6507, 6532), False, 'import os\n'), ((7045, 7079), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-whitegrid"""'], {}), "('seaborn-whitegrid')\n", (7058, 7079), True, 'import matplotlib.pyplot as plt\n'), ((7153, 7171), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (7163, 7171), True, 'import matplotlib.pyplot as plt\n'), ((7699, 7753), 'matplotlib.pyplot.legend', 'plt.legend', (["['train_action', 'val_action']"], {'loc': '"""best"""'}), "(['train_action', 'val_action'], loc='best')\n", (7709, 7753), True, 'import matplotlib.pyplot as plt\n'), ((7866, 7877), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (7875, 7877), True, 'import matplotlib.pyplot as plt\n'), ((7955, 7997), 'os.path.join', 'os.path.join', (['root_path', '"""task"""', 'args.task'], {}), "(root_path, 'task', args.task)\n", (7967, 7997), False, 'import os\n'), ((8014, 8047), 'os.path.join', 'os.path.join', (['task_path', '"""result"""'], {}), "(task_path, 'result')\n", (8026, 8047), False, 'import os\n'), ((8063, 8141), 'core.data_util.GymDataset', 'GymDataset', ([], {'expert_path': 'args.expert_path', 'traj_limitation': 'args.traj_limitation'}), '(expert_path=args.expert_path, traj_limitation=args.traj_limitation)\n', (8073, 8141), False, 'from core.data_util import GymDataset, SepsisDataset\n'), ((8212, 8239), 'contrib.baselines.common.set_global_seeds', 'set_global_seeds', (['args.seed'], {}), '(args.seed)\n', (8228, 8239), False, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((8979, 9022), 'os.path.join', 'osp.join', (['args.checkpoint_dir', 'fname', 'fname'], {}), '(args.checkpoint_dir, fname, fname)\n', (8987, 9022), True, 'import os.path as osp\n'), ((10235, 10277), 'os.path.join', 'os.path.join', (['root_path', '"""task"""', 'args.task'], {}), "(root_path, 'task', args.task)\n", (10247, 10277), False, 'import os\n'), ((10294, 10327), 'os.path.join', 'os.path.join', (['task_path', '"""result"""'], {}), "(task_path, 'result')\n", (10306, 10327), False, 'import os\n'), ((10343, 10429), 'core.data_util.SepsisDataset', 'SepsisDataset', ([], {'expert_path': 'args.expert_path', 'traj_limitation': 'args.traj_limitation'}), '(expert_path=args.expert_path, traj_limitation=args.\n traj_limitation)\n', (10356, 10429), False, 'from core.data_util import GymDataset, SepsisDataset\n'), ((10501, 10528), 'contrib.baselines.common.set_global_seeds', 'set_global_seeds', (['args.seed'], {}), '(args.seed)\n', (10517, 10528), False, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((11698, 11741), 'os.path.join', 'osp.join', (['args.checkpoint_dir', 'fname', 'fname'], {}), '(args.checkpoint_dir, fname, fname)\n', (11706, 11741), True, 'import os.path as osp\n'), ((2938, 2968), 'os.path.join', 'osp.join', (['ckpt_dir', 'ckpt_fname'], {}), '(ckpt_dir, ckpt_fname)\n', (2946, 2968), True, 'import os.path as osp\n'), ((6711, 6741), 'os.path.join', 'osp.join', (['ckpt_dir', 'ckpt_fname'], {}), '(ckpt_dir, ckpt_fname)\n', (6719, 6741), True, 'import os.path as osp\n'), ((7243, 7262), 'numpy.arange', 'np.arange', (['num_data'], {}), '(num_data)\n', (7252, 7262), True, 'import numpy as np\n'), ((7335, 7354), 'numpy.arange', 'np.arange', (['num_data'], {}), '(num_data)\n', (7344, 7354), True, 'import numpy as np\n'), ((7602, 7698), 'matplotlib.pyplot.legend', 'plt.legend', (["['train_action', 'train_transition', 'val_action', 'val_transition']"], {'loc': '"""best"""'}), "(['train_action', 'train_transition', 'val_action',\n 'val_transition'], loc='best')\n", (7612, 7698), True, 'import matplotlib.pyplot as plt\n'), ((8314, 8487), 'contrib.baselines.gail.mlp_policy.MlpPolicy', 'mlp_policy.MlpPolicy', ([], {'name': 'name', 'ob_space': 'ob_space', 'ac_space': 'ac_space', 'reuse': 'reuse', 'hid_size_phi': 'args.policy_hidden_size', 'num_hid_layers_phi': '(2)', 'dim_phi': 'args.dim_phi'}), '(name=name, ob_space=ob_space, ac_space=ac_space, reuse\n =reuse, hid_size_phi=args.policy_hidden_size, num_hid_layers_phi=2,\n dim_phi=args.dim_phi)\n', (8334, 8487), False, 'from contrib.baselines.gail import mlp_policy\n'), ((9035, 9075), 'os.path.exists', 'os.path.exists', (["(savedir_fname + '.index')"], {}), "(savedir_fname + '.index')\n", (9049, 9075), False, 'import os\n'), ((9673, 9708), 'contrib.baselines.logger.log', 'logger.log', (["(savedir_fname + 'saved')"], {}), "(savedir_fname + 'saved')\n", (9683, 9708), False, 'from contrib.baselines import logger\n'), ((11038, 11211), 'contrib.baselines.gail.mlp_policy.MlpPolicy', 'mlp_policy.MlpPolicy', ([], {'name': 'name', 'ob_space': 'ob_space', 'ac_space': 'ac_space', 'reuse': 'reuse', 'hid_size_phi': 'args.policy_hidden_size', 'num_hid_layers_phi': '(2)', 'dim_phi': 'args.dim_phi'}), '(name=name, ob_space=ob_space, ac_space=ac_space, reuse\n =reuse, hid_size_phi=args.policy_hidden_size, num_hid_layers_phi=2,\n dim_phi=args.dim_phi)\n', (11058, 11211), False, 'from contrib.baselines.gail import mlp_policy\n'), ((11754, 11794), 'os.path.exists', 'os.path.exists', (["(savedir_fname + '.index')"], {}), "(savedir_fname + '.index')\n", (11768, 11794), False, 'import os\n'), ((13003, 13038), 'contrib.baselines.logger.log', 'logger.log', (["(savedir_fname + 'saved')"], {}), "(savedir_fname + 'saved')\n", (13013, 13038), False, 'from contrib.baselines import logger\n'), ((1502, 1525), 'tensorflow.to_float', 'tf.to_float', (['(ac - pi.ac)'], {}), '(ac - pi.ac)\n', (1513, 1525), True, 'import tensorflow as tf\n'), ((2809, 2838), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2836, 2838), False, 'import tempfile\n'), ((4046, 4084), 'tensorflow.to_float', 'tf.to_float', (['(ob_next - network.ob_next)'], {}), '(ob_next - network.ob_next)\n', (4057, 4084), True, 'import tensorflow as tf\n'), ((6582, 6611), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (6609, 6611), False, 'import tempfile\n'), ((7445, 7464), 'numpy.arange', 'np.arange', (['num_data'], {}), '(num_data)\n', (7454, 7464), True, 'import numpy as np\n'), ((7537, 7556), 'numpy.arange', 'np.arange', (['num_data'], {}), '(num_data)\n', (7546, 7556), True, 'import numpy as np\n'), ((8170, 8195), 'contrib.baselines.common.tf_util.make_session', 'U.make_session', ([], {'num_cpu': '(1)'}), '(num_cpu=1)\n', (8184, 8195), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((10459, 10484), 'contrib.baselines.common.tf_util.make_session', 'U.make_session', ([], {'num_cpu': '(1)'}), '(num_cpu=1)\n', (10473, 10484), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((1680, 1706), 'contrib.baselines.common.tf_util.flatgrad', 'U.flatgrad', (['loss', 'var_list'], {}), '(loss, var_list)\n', (1690, 1706), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((4376, 4408), 'contrib.baselines.common.tf_util.flatgrad', 'U.flatgrad', (['total_loss', 'var_list'], {}), '(total_loss, var_list)\n', (4386, 4408), True, 'from contrib.baselines.common import set_global_seeds, tf_util as U\n'), ((9477, 9513), 'os.path.join', 'osp.join', (['args.checkpoint_dir', 'fname'], {}), '(args.checkpoint_dir, fname)\n', (9485, 9513), True, 'import os.path as osp\n'), ((12807, 12843), 'os.path.join', 'osp.join', (['args.checkpoint_dir', 'fname'], {}), '(args.checkpoint_dir, fname)\n', (12815, 12843), True, 'import os.path as osp\n')] |
from __future__ import absolute_import
import errno
import logging
from ..codes import errorcodes
from ..util.utils import is_windows, range, get_temp_path, to_bytes, bytes, to_unicode, is_python3, is_callable
import struct
import sys
if is_windows():
# we're going to have to do some ugly things, because Windows sucks
import ctypes
GENERIC_READ = 0x80000000
GENERIC_WRITE = 0x40000000
OPEN_EXISTING = 0x3
INVALID_HANDLE_VALUE = -1
PIPE_READMODE_MESSAGE = 0x2
ERROR_FILE_NOT_FOUND = 0x2
ERROR_PIPE_BUSY = 0xE7
ERROR_MORE_DATA = 0xEA
BUFSIZE = 512
else:
try:
from socket import MSG_NOSIGNAL
_msg_flags = MSG_NOSIGNAL
except ImportError:
_msg_flags = 0
try:
from socket import SO_NOSIGPIPE
_do_sock_opt = True
except ImportError:
_do_sock_opt = False
import socket
import fcntl
from os import O_NONBLOCK
class BaseConnection(object):
"""Generate IPC Connection handler."""
# *nix specific
__sock = None
# Windows specific
__pipe = None
__open = False
__logger = None
__is_logging = False
def __init__(self, log=True, logger=None, log_file=None, log_level=logging.INFO):
if not isinstance(log, bool):
raise TypeError('log must be of bool type!')
if log:
if logger is not None:
# Panda3D notifies are similar, so we simply check if we can make the same calls as logger
if not hasattr(logger, 'debug'):
raise TypeError('logger must be of type logging!')
self.__logger = logger
else:
self.__logger = logging.getLogger(__name__)
log_fmt = logging.Formatter('[%(asctime)s][%(levelname)s] ' + '%(name)s - %(message)s')
if log_file is not None and hasattr(log_file, 'strip'):
fhandle = logging.FileHandler(log_file)
fhandle.setLevel(log_level)
fhandle.setFormatter(log_fmt)
self.__logger.addHandler(fhandle)
shandle = logging.StreamHandler(sys.stdout)
shandle.setLevel(log_level)
shandle.setFormatter(log_fmt)
self.__logger.addHandler(shandle)
self.__is_logging = True
def log(self, callback_name, *args):
if self.__logger is not None:
if hasattr(self.__logger, callback_name) and is_callable(self.__logger.__getattribute__(callback_name)):
self.__logger.__getattribute__(callback_name)(*args)
def __open_pipe(self, pipe_name, log_type='warning'):
"""
:param pipe_name: the named pipe string
:param log_type: the log type to use (default 'warning')
:return: opened(bool), try_again(bool)
"""
if not is_windows():
self.log('error', 'Attempted to call a Windows call on a non-Windows OS.')
return
pipe = ctypes.windll.kernel32.CreateFileW(pipe_name, GENERIC_READ | GENERIC_WRITE, 0, None, OPEN_EXISTING, 0,
None)
if pipe != INVALID_HANDLE_VALUE:
self.__pipe = pipe
return True, False
err = ctypes.windll.kernel32.GetLastError()
if err == ERROR_FILE_NOT_FOUND:
self.log(log_type, 'File not found.')
self.log(log_type, 'Pipe name: {}'.format(pipe_name))
return False, False
elif err == ERROR_PIPE_BUSY:
if ctypes.windll.kernel32.WaitNamedPipeW(pipe_name, 10000) == 0:
self.log(log_type, 'Pipe busy.')
return False, False
else:
# try again, should be free now
self.log('debug', 'Pipe was busy, but should be free now. Try again.')
return False, True
# some other error we don't care about
self.log('debug', 'Unknown error: {}'.format(err))
return False, False
def open(self, pipe_no=None):
if pipe_no is not None:
if not isinstance(pipe_no, int):
raise TypeError('pipe_no must be of type int!')
if pipe_no not in range(0, 10):
raise ValueError('pipe_no must be within range (0 <= pipe number < 10)!')
if is_windows():
# NOTE: don't forget to use a number after ipc-
pipe_name = u'\\\\.\\pipe\\discord-ipc-{}'
if pipe_no is not None:
# we only care about the first value if pipe_no isn't None
opened, try_again = self.__open_pipe(pipe_name.format(pipe_no))
if opened:
self.__open = True
self.log('info', 'Connected to pipe {}, as user requested.'.format(pipe_no))
return
elif try_again:
self.open(pipe_no=pipe_no)
return
else:
num = 0
while True:
if num >= 10:
break
opened, try_again = self.__open_pipe(pipe_name.format(num), log_type='debug')
if opened:
self.__open = True
self.log('debug', 'Automatically connected to pipe {}.'.format(num))
return
if try_again:
continue
num += 1
# we failed to get a pipe
self.__pipe = None
self.log('warning', 'Could not open a connection.')
else:
self.__sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if self.__sock is None or self.__sock == -1:
self.log('warning', 'Could not open socket.')
self.close()
return
try:
fcntl.fcntl(self.__sock, fcntl.F_SETFL, O_NONBLOCK)
except Exception as e:
self.log('warning', e)
self.close()
return
if _do_sock_opt:
try:
socket.setsockopt(socket.SOL_SOCKET, SO_NOSIGPIPE)
except Exception as e:
self.log('warning', e)
self.log('debug', 'Attempting to use sock as is. Notify a developer if an error occurs.')
sock_addr = get_temp_path()
if sock_addr.endswith('/'):
sock_addr = sock_addr[:-1]
sock_addr += '/discord-ipc-{}'
if pipe_no is not None:
ret_val = self.__sock.connect_ex(sock_addr.format(pipe_no))
if ret_val == 0:
self.__open = True
self.log('info', 'Connected to socket {}, as user requested.'.format(pipe_no))
return
else:
self.log('warning', 'Could not open socket {}.'.format(pipe_no))
self.close()
else:
for num in range(0, 10):
ret_val = self.__sock.connect_ex(sock_addr.format(num))
if ret_val == 0:
self.__open = True
self.log('debug', 'Automatically connected to socket {}.'.format(num))
return
self.log('warning', 'Could not open socket.')
self.close()
def write(self, data, opcode):
if not self.connected():
self.log('warning', 'Cannot write if we aren\'t connected yet!')
return False
if not isinstance(opcode, int):
raise TypeError('Opcode must be of int type!')
if data is None:
data = ''
try:
data = to_bytes(data)
except Exception as e:
self.log('warning', e)
return False
data_len = len(data)
# the following data must be little endian unsigned ints
# see: https://github.com/discordapp/discord-rpc/blob/master/documentation/hard-mode.md#notes
header = struct.pack('<II', opcode, data_len)
# append header to data
data = header + data
# get new data size
data_len = len(data)
if self.__pipe is not None:
written = ctypes.c_ulong(0)
success = ctypes.windll.kernel32.WriteFile(self.__pipe, ctypes.c_char_p(data), data_len,
ctypes.byref(written), None)
if (not success) or (data_len != written.value):
self.log('warning', 'Failed to write data onto pipe.')
return False
return True
elif self.__sock is not None:
data_sent = 0
while data_sent < data_len:
try:
sent = self.__sock.send(data[data_sent:], _msg_flags)
except Exception as e:
self.log('warning', e)
return False
if sent == 0:
self.log('warning', 'Socket connection broken!')
if data_sent == 0:
self.log('warning', 'No data sent; closing connection.')
self.close()
return False
data_sent += sent
return True
self.log('warning', 'write() executed code that shouldn\'t have run.')
return False
def read(self):
ret_val = [False, None, None]
if not self.connected():
self.log('warning', 'Cannot read if we haven\'t opened a connection!')
return ret_val
data = bytes()
header_size = struct.calcsize('<II')
# (is_successful_read, OpCode, data)
if self.__pipe is not None:
available = ctypes.c_ulong(0)
if not ctypes.windll.kernel32.PeekNamedPipe(self.__pipe, None, 0, None, ctypes.byref(available), None):
self.log('warning', 'Peek on pipe for header failed.')
self.close()
ret_val[2] = [errorcodes.PipeClosed, 'Pipe closed']
return ret_val
if available.value < header_size:
self.log('debug', 'Pipe doesn\'t have enough data to read in header.')
# assume this is like errno.EAGAIN
ret_val[2] = [errorcodes.PipeClosed, 'Pipe closed']
return ret_val
cb_read = ctypes.c_ulong(0)
buff = ctypes.create_string_buffer(header_size)
success = 0
while not success:
success = ctypes.windll.kernel32.ReadFile(self.__pipe, buff, header_size, ctypes.byref(cb_read), None)
if success == 1:
# we successfully read the HEADER :O
# Note: we use RAW here, otherwise it'll be a 1 byte kinda weird thing
header = buff.raw
break
elif ctypes.windll.kernel32.GetLastError() != ERROR_MORE_DATA:
# we don't have more data; close pipe
self.log('warning', 'Failed to read in header from pipe.')
self.close()
ret_val[2] = [errorcodes.PipeClosed, 'Pipe closed']
return ret_val
opcode, data_len = struct.unpack('<II', header)
cb_read = ctypes.c_ulong(0)
buff = ctypes.create_string_buffer(data_len)
success = 0
available = ctypes.c_ulong(0)
if not ctypes.windll.kernel32.PeekNamedPipe(self.__pipe, None, 0, None, ctypes.byref(available), None):
self.log('warning', 'Peek on pipe for data failed.')
self.close()
ret_val[2] = [errorcodes.ReadCorrupt, 'Partial data in frame']
return ret_val
if available.value < data_len:
self.log('warning', 'Pipe doesn\'t have enough data to read in data.')
# assume this is like errno.EAGAIN
ret_val[2] = [errorcodes.ReadCorrupt, 'Partial data in frame']
return ret_val
while not success:
success = ctypes.windll.kernel32.ReadFile(self.__pipe, buff, data_len, ctypes.byref(cb_read), None)
if success == 1:
# we successfully read the DATA :O
ret_val[0] = True
ret_val[1] = opcode
# value here actually works okay, so use that
# Note: raw also seems to work, but meh
data = buff.value
break
elif ctypes.windll.kernel32.GetLastError() != ERROR_MORE_DATA:
# we don't have more data; close pipe
self.log('warning', 'Failed to read in data from pipe.')
self.close()
ret_val[2] = [errorcodes.ReadCorrupt, 'Partial data in frame']
return ret_val
elif self.__sock is not None:
packets = list()
while len(bytes().join(packets)) < header_size:
try:
packet = self.__sock.recv(header_size - len(bytes().join(packets)))
except Exception as e:
ret_val[2] = [errorcodes.PipeClosed, 'Pipe closed']
if hasattr(e, 'errno'):
if e.errno == errno.EAGAIN:
self.log('debug', e)
self.log('debug', 'errno == EAGAIN')
return ret_val
self.log('warning', 'Failed to read in header!')
self.log('warning', e)
self.close()
if packet is None or len(packet) == 0:
self.log('warning', 'Socket connection broken!')
if len(bytes().join(packets)) == 0:
self.log('warning', 'No data sent; closing connection.')
self.close()
ret_val[2] = [errorcodes.PipeClosed, 'Pipe closed']
return ret_val
packets.append(packet)
header = bytes().join(packets)
packets = list()
opcode, data_len = struct.unpack('<II', header)
self.log('debug', 'Opcode: {}, data length: {}'.format(opcode, data_len))
while len(bytes().join(packets)) < data_len:
try:
packet = self.__sock.recv(data_len - len(bytes().join(packets)))
except Exception as e:
ret_val[2] = [errorcodes.ReadCorrupt, 'Partial data in frame']
if hasattr(e, 'errno'):
if e.errno == errno.EAGAIN:
self.log('debug', e)
self.log('debug', 'errno == EAGAIN')
return ret_val
self.log('warning', 'Failed to read in data!')
self.log('warning', e)
if packet is None or len(packet) == 0:
self.log('warning', 'Socket connection broken!')
if len(bytes().join(packets)) == 0:
self.log('warning', 'No data sent; closing connection.')
self.close()
ret_val[2] = [errorcodes.ReadCorrupt, 'Partial data in frame']
return ret_val
packets.append(packet)
data = bytes().join(packets)
ret_val[0] = True
ret_val[1] = opcode
if ret_val[0]:
if is_python3():
data = to_unicode(data)
ret_val[2] = data
self.log('debug', 'Return values: {}'.format(ret_val))
return ret_val
def close(self):
# ensure we're using Windows before trying to close a pipe
# Note: This should **never** execute on a non-Windows machine!
if self.__pipe is not None and is_windows():
ctypes.windll.kernel32.CloseHandle(self.__pipe)
self.__pipe = None
if self.__sock is not None:
try:
self.__sock.shutdown(socket.SHUT_RDWR)
self.__sock.close()
except Exception as e:
self.log('warning', e)
finally:
self.__sock = None
if self.__open:
self.__open = False
self.log('debug', 'Closed IPC connection.')
def destroy(self):
# make sure we close everything
self.close()
# if we automatically set our own logger, clean it up
if self.__is_logging:
for handle in self.__logger.handlers[:]:
handle.close()
self.__logger.removeHandler(handle)
self.__logger = None
@property
def is_open(self):
return self.__open
def connected(self):
return self.is_open
| [
"ctypes.windll.kernel32.CreateFileW",
"struct.calcsize",
"logging.getLogger",
"logging.StreamHandler",
"ctypes.byref",
"socket.socket",
"logging.Formatter",
"ctypes.c_ulong",
"struct.pack",
"ctypes.create_string_buffer",
"fcntl.fcntl",
"struct.unpack",
"ctypes.windll.kernel32.GetLastError",
... | [((3015, 3127), 'ctypes.windll.kernel32.CreateFileW', 'ctypes.windll.kernel32.CreateFileW', (['pipe_name', '(GENERIC_READ | GENERIC_WRITE)', '(0)', 'None', 'OPEN_EXISTING', '(0)', 'None'], {}), '(pipe_name, GENERIC_READ | GENERIC_WRITE,\n 0, None, OPEN_EXISTING, 0, None)\n', (3049, 3127), False, 'import ctypes\n'), ((3291, 3328), 'ctypes.windll.kernel32.GetLastError', 'ctypes.windll.kernel32.GetLastError', ([], {}), '()\n', (3326, 3328), False, 'import ctypes\n'), ((8135, 8171), 'struct.pack', 'struct.pack', (['"""<II"""', 'opcode', 'data_len'], {}), "('<II', opcode, data_len)\n", (8146, 8171), False, 'import struct\n'), ((9744, 9766), 'struct.calcsize', 'struct.calcsize', (['"""<II"""'], {}), "('<II')\n", (9759, 9766), False, 'import struct\n'), ((5674, 5723), 'socket.socket', 'socket.socket', (['socket.AF_UNIX', 'socket.SOCK_STREAM'], {}), '(socket.AF_UNIX, socket.SOCK_STREAM)\n', (5687, 5723), False, 'import socket\n'), ((8348, 8365), 'ctypes.c_ulong', 'ctypes.c_ulong', (['(0)'], {}), '(0)\n', (8362, 8365), False, 'import ctypes\n'), ((9872, 9889), 'ctypes.c_ulong', 'ctypes.c_ulong', (['(0)'], {}), '(0)\n', (9886, 9889), False, 'import ctypes\n'), ((10510, 10527), 'ctypes.c_ulong', 'ctypes.c_ulong', (['(0)'], {}), '(0)\n', (10524, 10527), False, 'import ctypes\n'), ((10547, 10587), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', (['header_size'], {}), '(header_size)\n', (10574, 10587), False, 'import ctypes\n'), ((11394, 11422), 'struct.unpack', 'struct.unpack', (['"""<II"""', 'header'], {}), "('<II', header)\n", (11407, 11422), False, 'import struct\n'), ((11445, 11462), 'ctypes.c_ulong', 'ctypes.c_ulong', (['(0)'], {}), '(0)\n', (11459, 11462), False, 'import ctypes\n'), ((11482, 11519), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', (['data_len'], {}), '(data_len)\n', (11509, 11519), False, 'import ctypes\n'), ((11568, 11585), 'ctypes.c_ulong', 'ctypes.c_ulong', (['(0)'], {}), '(0)\n', (11582, 11585), False, 'import ctypes\n'), ((16136, 16183), 'ctypes.windll.kernel32.CloseHandle', 'ctypes.windll.kernel32.CloseHandle', (['self.__pipe'], {}), '(self.__pipe)\n', (16170, 16183), False, 'import ctypes\n'), ((1691, 1718), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1708, 1718), False, 'import logging\n'), ((1745, 1822), 'logging.Formatter', 'logging.Formatter', (["('[%(asctime)s][%(levelname)s] ' + '%(name)s - %(message)s')"], {}), "('[%(asctime)s][%(levelname)s] ' + '%(name)s - %(message)s')\n", (1762, 1822), False, 'import logging\n'), ((2133, 2166), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (2154, 2166), False, 'import logging\n'), ((5928, 5979), 'fcntl.fcntl', 'fcntl.fcntl', (['self.__sock', 'fcntl.F_SETFL', 'O_NONBLOCK'], {}), '(self.__sock, fcntl.F_SETFL, O_NONBLOCK)\n', (5939, 5979), False, 'import fcntl\n'), ((8434, 8455), 'ctypes.c_char_p', 'ctypes.c_char_p', (['data'], {}), '(data)\n', (8449, 8455), False, 'import ctypes\n'), ((8522, 8543), 'ctypes.byref', 'ctypes.byref', (['written'], {}), '(written)\n', (8534, 8543), False, 'import ctypes\n'), ((14373, 14401), 'struct.unpack', 'struct.unpack', (['"""<II"""', 'header'], {}), "('<II', header)\n", (14386, 14401), False, 'import struct\n'), ((1925, 1954), 'logging.FileHandler', 'logging.FileHandler', (['log_file'], {}), '(log_file)\n', (1944, 1954), False, 'import logging\n'), ((3569, 3624), 'ctypes.windll.kernel32.WaitNamedPipeW', 'ctypes.windll.kernel32.WaitNamedPipeW', (['pipe_name', '(10000)'], {}), '(pipe_name, 10000)\n', (3606, 3624), False, 'import ctypes\n'), ((6176, 6226), 'socket.setsockopt', 'socket.setsockopt', (['socket.SOL_SOCKET', 'SO_NOSIGPIPE'], {}), '(socket.SOL_SOCKET, SO_NOSIGPIPE)\n', (6193, 6226), False, 'import socket\n'), ((9974, 9997), 'ctypes.byref', 'ctypes.byref', (['available'], {}), '(available)\n', (9986, 9997), False, 'import ctypes\n'), ((10733, 10754), 'ctypes.byref', 'ctypes.byref', (['cb_read'], {}), '(cb_read)\n', (10745, 10754), False, 'import ctypes\n'), ((11670, 11693), 'ctypes.byref', 'ctypes.byref', (['available'], {}), '(available)\n', (11682, 11693), False, 'import ctypes\n'), ((12319, 12340), 'ctypes.byref', 'ctypes.byref', (['cb_read'], {}), '(cb_read)\n', (12331, 12340), False, 'import ctypes\n'), ((11028, 11065), 'ctypes.windll.kernel32.GetLastError', 'ctypes.windll.kernel32.GetLastError', ([], {}), '()\n', (11063, 11065), False, 'import ctypes\n'), ((12725, 12762), 'ctypes.windll.kernel32.GetLastError', 'ctypes.windll.kernel32.GetLastError', ([], {}), '()\n', (12760, 12762), False, 'import ctypes\n')] |
# region HEADER
# !/usr/bin/env python
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, The MUDCake Project"
__credits__ = "<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>"
__license__ = """MIT License
Copyright (c) 2021 MUDCake Project
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
# endregion
import json
import uuid
import logging
import sys
from DatabaseHandler.DatabaseHandler import DatabaseHandler
from DungeonPackage.AccessList import AccessList
from DungeonPackage.ActiveDungeon import ActiveDungeon
from DungeonPackage.Character import Character
from DungeonPackage.Class import Class
from DungeonPackage.DungeonData import DungeonData
from DungeonPackage.Item import Item
from DungeonPackage.Npc import Npc
from DungeonPackage.Race import Race
from DungeonPackage.Room import Room
class DungeonManager:
""" Main class for handling everything dungeon based
"""
def __init__(self, data=None):
"""
Args:
data (json, optional): A JSON Object which contains all necessary information for a dungeon
- List of all Rooms which are saved as an dictionary
- List of all Races which are saved as an dictionary
- List of all Classes which are saved as an dictionary
- List of all Items which are saved as an dictionary
- List of all Npcs which are saved as an dictionary
- AccessList which contains all users which are saved on there
"""
self.managed_dungeon = DungeonData()
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
self.data = data
self.db_handler = DatabaseHandler()
self.room_list = []
self.race_list = []
self.class_list = []
self.item_list = []
self.npc_list = []
self.accesslist = []
self.fill_attributes(data)
def fill_attributes(self, data):
""" Fills attributes which may not be given in the init
Args:
data (json, optional): A JSON Object which contains all necessary information for a dungeon
- List of all Rooms which are saved as an dictionary
- List of all Races which are saved as an dictionary
- List of all Classes which are saved as an dictionary
- List of all Items which are saved as an dictionary
- List of all Npcs which are saved as an dictionary
- Accesslist which contains all users which are saved on there
"""
if data:
self.managed_dungeon = DungeonData(dungeon_id=self.data['dungeonID'],
dungeon_master_id=self.data['dungeonMasterID'],
max_players=self.data['maxPlayers'],
name=self.data['dungeonName'],
description=self.data['dungeonDescription'],
private=self.data['private'],
access_list=AccessList(data['dungeonID']))
self.check_for_dungeon_id()
logging.debug("constructor: " + self.managed_dungeon.dungeon_id)
self.parse_config_data()
else:
logging.debug("data is none")
def parse_config_data(self):
""" Parses the config data of every json list into an Object
"""
race_data = self.data['races']
items_data = self.data['items']
npcs_data = self.data['npcs']
room_data = self.data['rooms']
class_data = self.data['classes']
accesslist_data = self.data['accessList']
for race in race_data:
logging.debug(race)
new_race = Race(name=race['name'], description=race['description'],
dungeon_id=self.managed_dungeon.dungeon_id)
check_for_race_id = 'raceID' in race
if check_for_race_id:
new_race.race_id = race['raceID']
else:
new_race.race_id = str(uuid.uuid4())
self.race_list.append(new_race)
for item in items_data:
logging.debug(item)
new_item = Item(name=item['name'], description=item['description'])
new_item.item_id = item['itemID'] if 'itemID' in item else str(uuid.uuid4())
self.item_list.append(new_item)
for npc in npcs_data:
logging.debug(npc)
new_npc = Npc(npc_id=npc['npcID'], name=npc['name'],
description=npc['description'], dungeon_id=self.managed_dungeon.dungeon_id)
new_npc.item = None if npc['equipment'] is None else npc['equipment']['itemID']
self.npc_list.append(new_npc)
for classes in class_data:
logging.debug(classes)
new_class = Class(name=classes['name'], description=classes['description'],
dungeon_id=self.managed_dungeon.dungeon_id)
check_for_class_id = 'classID' in classes
if check_for_class_id:
new_class.class_id = classes['classID']
else:
new_class.class_id = str(uuid.uuid4())
if classes['equipment'] is None:
new_class.item_id = None
else:
new_class.item_id = classes['equipment']['itemID']
logging.debug(classes['equipment']['itemID'])
self.class_list.append(new_class)
for room in room_data:
logging.debug(room)
new_room = Room(coordinate_x=room['x'], coordinate_y=room['y'], north=room['north'], east=room['east'],
south=room['south'], west=room['west'], dungeon_id=self.managed_dungeon.dungeon_id)
check_for_room_id = 'roomID' in room
if check_for_room_id:
new_room.room_id = room['roomID']
logging.debug("roomID assigned")
else:
new_room.room_id = str(uuid.uuid4())
logging.debug("roomID generated")
check_for_name = 'name' in room
if check_for_name:
new_room.room_name = room['name']
else:
new_room.room_name = None
check_for_description = 'description' in room
if check_for_description:
new_room.room_description = room['description']
else:
new_room.room_description = None
check_for_start_room = 'isStartRoom' in room
if check_for_start_room:
new_room.is_start_room = room['isStartRoom']
else:
new_room.is_start_room = False
check_for_npc = 'npc' in room
if check_for_npc and room['npc']:
new_room.npc_id = room['npc']['npcID']
else:
new_room.npc_id = None
check_for_item = 'item' in room
if check_for_item and room['item']:
new_room.item_id = room['item']['itemID']
else:
new_room.item_id = None
self.room_list.append(new_room)
for ac_l in accesslist_data:
self.managed_dungeon.access_list.add_user_to_access_list(user_name=ac_l['name'],
is_allowed=ac_l['isAllowed'])
def write_dungeon_to_database(self):
""" Writes the dungeon to the database.
All data which gets used here is from the already initialised object
"""
active_dungeon = ActiveDungeon(rooms=self.room_list, classes=self.class_list, npcs=self.npc_list,
items=self.item_list, dungeon_data=self.managed_dungeon, races=self.race_list,
user_ids=None, character_ids=None)
try:
self.db_handler.save_or_update_dungeon(active_dungeon)
logging.debug("Dungeon saved")
self.__write_races_to_database()
logging.debug("Races saved")
self.__write_items_to_database()
logging.debug("Item saved")
self.__write_classes_to_database()
logging.debug("Classes saved")
self.__write_npcs_to_database()
logging.debug("Npcs saved")
self.__write_rooms_to_database()
logging.debug("Rooms saved")
logging.debug("write dungeon to database: self.managed_dungeon.dungeon_id")
self.write_accesslist_to_database()
return self.managed_dungeon.dungeon_id
except IOError:
pass
def check_for_dungeon_id(self):
""" Checks if the user already has an id
If not the methods creates one and sets it as the dungeon id of the managed dungeon
"""
if self.managed_dungeon.dungeon_id is None:
self.managed_dungeon.dungeon_id = str(uuid.uuid4())
def __write_races_to_database(self):
""" Private method that writes a list of races to the database via the database handler
"""
logging.debug(self.race_list)
for race in self.race_list:
try:
self.db_handler.write_race_to_database(race=race, dungeon_id=self.managed_dungeon.dungeon_id)
except IOError:
pass
def __write_classes_to_database(self):
""" Private method that writes a list of classes to the database via the database handler
"""
logging.debug(self.class_list)
for classes in self.class_list:
try:
self.db_handler.write_class_to_database(class_object=classes,
dungeon_id=self.managed_dungeon.dungeon_id)
except IOError:
pass
def __write_rooms_to_database(self):
""" Private method that writes a list of rooms to the database via the database handler
"""
logging.debug(self.room_list)
for room in self.room_list:
try:
self.db_handler.write_room_to_database(room=room, dungeon_id=self.managed_dungeon.dungeon_id)
except IOError:
pass
def write_character_to_database(self, character: Character):
""" writes a given character to the database
Args:
character (Character): character to be written to the database
Returns: void
"""
try:
self.db_handler.write_character_to_database(character, character.dungeon_id)
except IOError:
pass
def __write_items_to_database(self):
""" Private method that writes a list of items to the database via the database handler
"""
logging.debug(self.item_list)
for item in self.item_list:
try:
self.db_handler.write_item_to_database(item=item, dungeon_id=self.managed_dungeon.dungeon_id)
except IOError:
pass
def __write_npcs_to_database(self):
""" Private method that writes a list of npcs to the database via the database handler
"""
logging.debug(self.npc_list)
for npc in self.npc_list:
try:
self.db_handler.write_npc_to_database(npc=npc, dungeon_id=self.managed_dungeon.dungeon_id)
except IOError:
pass
def write_accesslist_to_database(self):
""" Private method that writes a list of users in the accesslist to the database via the database handler
"""
for user in self.managed_dungeon.access_list.access_list:
self.db_handler.write_user_to_acceslist(access_list_user=user, dungeon_id=self.managed_dungeon.dungeon_id)
def get_dungeon_by_id(self, user_id_data):
""" Gets the dungeons via the user id
Args:
user_id_data (str): The user id which holds the dungeons
Returns:
All dungeons which the user has created
"""
return self.db_handler.get_dungeon_by_id(user_id_data)
def get_dungeon_data_by_dungeon_id(self, dungeon_id):
""" Retrieves first glance data of the dungeon via the id
Args:
dungeon_id (str): Dungeon of the dungeon id which needs data
Returns:
DungeonID, DungeonMasterID, DungeonName, DungeonDescription, the private status
and the max players of the given dungeon
"""
return self.db_handler.get_dungeon_data_by_dungeon_id(dungeon_id)
def delete_dungeon(self, dungeon_id):
""" Deletes a dungeon via the id in the database
Args:
dungeon_id (str): The dungeon id which is supposed to be deleted
"""
try:
self.db_handler.delete_dungeon_by_id(dungeon_id)
except IOError:
pass
def copy_dungeon(self, dungeon_id):
""" Copy's a given dungeon via the dungeon id
Args:
dungeon_id (str): The dungeon which is supposed to be copied
"""
self.room_list = []
self.race_list = []
self.class_list = []
self.item_list = []
self.npc_list = []
try:
dungeon = self.db_handler.get_dungeon_data_by_dungeon_id(dungeon_id)
logging.debug("Dungeon: ", dungeon)
self.managed_dungeon.dungeon_id = str(uuid.uuid4())
self.managed_dungeon.dungeon_master_id = dungeon[0][1]
self.managed_dungeon.name = dungeon[0][2] + " - copy"
self.managed_dungeon.description = dungeon[0][3]
self.managed_dungeon.private = dungeon[0][4]
self.managed_dungeon.max_players = dungeon[0][5]
items = self.db_handler.get_item_by_dungeon_id(dungeon_id)
for item in items:
copied_item = Item(item_id=item[0], name=item[1], description=item[2],
dungeon_id=self.managed_dungeon.dungeon_id)
self.item_list.append(copied_item)
logging.debug("Item List: ")
logging.debug(self.item_list)
rooms = self.db_handler.get_all_rooms_by_dungeon_id_as_dict(dungeon_id)
for room in rooms:
copied_room = Room(room_id=room['roomID'], room_name=room['roomName'],
room_description=room['roomDescription'],
coordinate_x=room['x'], coordinate_y=room['y'], north=room['north'],
east=room['east'], south=room['south'], west=room['west'],
is_start_room=room['isStartRoom'], npc_id=room['npcID'], item_id=room['roomItemID'],
dungeon_id=self.managed_dungeon.dungeon_id)
self.room_list.append(copied_room)
logging.debug("Room List:")
logging.debug(self.room_list)
races = self.db_handler.get_race_by_dungeon_id(dungeon_id)
for race in races:
copied_race = Race(race_id=race[0], name=race[1], description=race[2],
dungeon_id=self.managed_dungeon.dungeon_id)
self.race_list.append(copied_race)
logging.debug("Race List:")
logging.debug(self.race_list)
classes = self.db_handler.get_class_by_dungeon_id(dungeon_id)
for class_tuple in classes:
copied_class = Class(class_id=class_tuple[0], name=class_tuple[1], description=class_tuple[2],
dungeon_id=self.managed_dungeon.dungeon_id)
self.class_list.append(copied_class)
logging.debug("Class List:")
logging.debug(self.class_list)
npcs = self.db_handler.get_npc_by_dungeon_id(dungeon_id)
for npc in npcs:
copied_npc = Npc(npc_id=npc[0], name=npc[1], description=npc[2], item=npc[3],
dungeon_id=self.managed_dungeon.dungeon_id)
self.npc_list.append(copied_npc)
logging.debug("NPC List:")
logging.debug(self.npc_list)
# TODO: AccessList
self.write_dungeon_to_database()
except IOError:
pass
def get_start_rooms_in_dungeon(self, dungeon_id: str):
""" Gets all start rooms of a dungeon based on the dungeon id
Args:
dungeon_id (str): Dungeon id of the dungeon
Returns:
All starting rooms in a dungeon
"""
all_starting_rooms = []
for room in self.get_all_from_room_as_json(dungeon_id):
if room['isStartRoom']:
all_starting_rooms.append(room)
return all_starting_rooms
def get_all_from_room_as_json(self, data):
""" Gets all room data as json from dungeon id
Args:
data (str): Dungeon id of the dungeon
Returns:
All Rooms in the dungeon as list
"""
rooms_dict = self.db_handler.get_all_rooms_by_dungeon_id_as_dict(dungeon_id=data)
room_list = []
for room_dict in rooms_dict:
room = {'roomID': room_dict['roomID'], 'name': room_dict['roomName'],
'isStartRoom': bool(room_dict['isStartRoom']),
'description': room_dict['roomDescription'], 'x': room_dict['x'], 'y': room_dict['y'],
'north': bool(room_dict['north']), 'east': bool(room_dict['east']), 'south': bool(room_dict['south']),
'west': bool(room_dict['west']), 'npc': {'npcID': room_dict['npcID'], 'name': room_dict['npcName'],
'description': room_dict['npcDescription'],
'equipment': {'itemID': room_dict['npcItemID'],
'name': room_dict['npcItemName'],
'description': room_dict['npcItemDesc']}},
'item': {'itemID': room_dict['roomItemID'], 'name': room_dict['roomItemName'],
'description': room_dict['roomItemDescription']}}
room_list.append(room)
logging.debug(room_list)
return room_list
def get_data_for_room_list(self, room_ids: [str], dungeon_id: str):
""" Gets all room data as json from dungeon id
Args:
data (str): Dungeon id of the dungeon
Returns:
All Rooms in the dungeon as list
"""
room_list = []
for id in room_ids:
try:
room_dict = self.db_handler.get_room_by_room_id_as_dict(dungeon_id=dungeon_id, room_id=id)
room = {'roomID': room_dict['roomID'], 'name': room_dict['roomName'],
'isStartRoom': bool(room_dict['isStartRoom']),
'description': room_dict['roomDescription'], 'x': room_dict['x'], 'y': room_dict['y'],
'north': bool(room_dict['north']), 'east': bool(room_dict['east']), 'south': bool(room_dict['south']),
'west': bool(room_dict['west']), 'npc': {'npcID': room_dict['npcID'], 'name': room_dict['npcName'],
'description': room_dict['npcDescription'],
'equipment': {'itemID': room_dict['npcItemID'],
'name': room_dict['npcItemName'],
'description': room_dict['npcItemDesc']}},
'item': {'itemID': room_dict['roomItemID'], 'name': room_dict['roomItemName'],
'description': room_dict['roomItemDescription']}}
room_list.append(room)
except TypeError:
print("TypeError bei get_data_for_room_list")
return room_list
def get_all_from_classes_as_json(self, data):
""" Gets all classes as json from dungeon id
Args:
data (str): Dungeon id of the dungeon
Returns:
All Classes in the dungeon as list
"""
classes_dict = self.db_handler.get_all_classes_by_dungeon_id_as_dict(dungeon_id=data)
class_list = []
for class_dict in classes_dict:
class_data = {'classID': class_dict['classID'], 'name': class_dict['name'],
'description': class_dict['description'],
'equipment': {'itemID': class_dict['itemID'], 'name': class_dict['itemName'],
'description': class_dict['itemDescription']}}
class_list.append(class_data)
logging.debug(class_list)
return class_list
def get_all_from_races_as_json(self, data):
""" Gets all races as json from dungeon id
Args:
data (str): Dungeon id of the dungeon
Returns:
All Races in the dungeon as list
"""
races = self.db_handler.get_all_races_by_dungeon_id_as_dict(dungeon_id=data)
logging.debug(races)
return races
def get_all_from_items_as_json(self, data):
""" Gets all items as json from dungeon id
Args:
data (str): Dungeon id of the dungeon
Returns:
All items in the dungeon as JSON
"""
items = self.db_handler.get_all_item_by_dungeon_id_as_dict(dungeon_id=data)
logging.debug(items)
return json.dumps(items).encode(encoding='utf_8')
def get_all_from_npcs_as_json(self, data):
npcs_dict = self.db_handler.get_all_npc_by_dungeon_id_as_dict(dungeon_id=data)
npc_list = []
for npc_dict in npcs_dict:
npc = {'npcID': npc_dict['npcID'], 'name': npc_dict['name'], 'description': npc_dict['description'],
'equipment': {'itemID': npc_dict['itemID'], 'name': npc_dict['itemName'],
'description': npc_dict['itemDescription']}
}
npc_list.append(npc)
logging.debug(npc_list)
return json.dumps(npc_list).encode(encoding='utf_8')
def get_character_config(self, data):
""" Gets the Races and Classes in a dungeon together as one json
Args:
data (str): Dungeon id of the dungeon
Returns:
List of all races and classes from one dungeon
"""
try:
char_config = [self.get_all_from_classes_as_json(data), self.get_all_from_races_as_json(data)]
return char_config
except IOError:
pass
def get_accesslist(self, dungeon_id):
""" Gets an accesslist of a specific dungeon
Args:
dungeon_id (str): Dungeon id of the dungeon
Returns:
Accesslist of the dungeon
"""
try:
access_list = self.db_handler.get_access_list_by_dungeon_id_as_dict(dungeon_id)
for entry in access_list:
entry['isAllowed'] = bool(entry['isAllowed'])
return access_list
except IOError:
pass
def get_item_by_class_id(self, class_id: str):
""" Gets an item via the class id
Args:
class_id (str): Id of the class which has an item
Returns:
Item object
"""
item_data = self.db_handler.get_item_by_class_id(class_id)
return Item(item_id=item_data['itemID'], name=item_data['itemName'], description=item_data['itemDescription'])
def delete_user_from_accesslist(self, data):
try:
self.db_handler.delete_user_from_accesslist(username=data['userName'], dungeon_id=data['dungeonID'])
except IOError:
pass
def remove_config_data(self, data):
try:
if 'deletedRooms' in data:
deleted_room_ids = data['deletedRooms']
for room_id in deleted_room_ids:
self.db_handler.remove_room_by_room_id(room_id)
except AttributeError:
pass
def get_userid_by_character_name(self, character_name: str, dungeon_id: str):
try:
user = self.db_handler.get_userid_by_character_name(character_name=character_name, dungeon_id=dungeon_id)
print("user: ", user)
userID = user['userID']
return userID
except TypeError:
pass
def load_room_coordinates(self, room_id):
return self.db_handler.get_coordinates_by_room_id(room_id)
def delete_character(self, user_id: str, dungeon_id: str):
self.db_handler.delete_character(user_id, dungeon_id)
def delete_inventory(self, user_id, dungeon_id):
self.db_handler.delete_inventory(user_id, dungeon_id)
def delete_discovered_rooms(self, user_id: str, dungeon_id: str):
self.db_handler.delete_discovered_rooms(user_id, dungeon_id) | [
"logging.basicConfig",
"DungeonPackage.Class.Class",
"DungeonPackage.Room.Room",
"logging.debug",
"DungeonPackage.AccessList.AccessList",
"DungeonPackage.ActiveDungeon.ActiveDungeon",
"DungeonPackage.DungeonData.DungeonData",
"json.dumps",
"uuid.uuid4",
"DatabaseHandler.DatabaseHandler.DatabaseHan... | [((3144, 3157), 'DungeonPackage.DungeonData.DungeonData', 'DungeonData', ([], {}), '()\n', (3155, 3157), False, 'from DungeonPackage.DungeonData import DungeonData\n'), ((3166, 3225), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stderr', 'level': 'logging.DEBUG'}), '(stream=sys.stderr, level=logging.DEBUG)\n', (3185, 3225), False, 'import logging\n'), ((3278, 3295), 'DatabaseHandler.DatabaseHandler.DatabaseHandler', 'DatabaseHandler', ([], {}), '()\n', (3293, 3295), False, 'from DatabaseHandler.DatabaseHandler import DatabaseHandler\n'), ((9432, 9635), 'DungeonPackage.ActiveDungeon.ActiveDungeon', 'ActiveDungeon', ([], {'rooms': 'self.room_list', 'classes': 'self.class_list', 'npcs': 'self.npc_list', 'items': 'self.item_list', 'dungeon_data': 'self.managed_dungeon', 'races': 'self.race_list', 'user_ids': 'None', 'character_ids': 'None'}), '(rooms=self.room_list, classes=self.class_list, npcs=self.\n npc_list, items=self.item_list, dungeon_data=self.managed_dungeon,\n races=self.race_list, user_ids=None, character_ids=None)\n', (9445, 9635), False, 'from DungeonPackage.ActiveDungeon import ActiveDungeon\n'), ((10954, 10983), 'logging.debug', 'logging.debug', (['self.race_list'], {}), '(self.race_list)\n', (10967, 10983), False, 'import logging\n'), ((11359, 11389), 'logging.debug', 'logging.debug', (['self.class_list'], {}), '(self.class_list)\n', (11372, 11389), False, 'import logging\n'), ((11833, 11862), 'logging.debug', 'logging.debug', (['self.room_list'], {}), '(self.room_list)\n', (11846, 11862), False, 'import logging\n'), ((12621, 12650), 'logging.debug', 'logging.debug', (['self.item_list'], {}), '(self.item_list)\n', (12634, 12650), False, 'import logging\n'), ((13020, 13048), 'logging.debug', 'logging.debug', (['self.npc_list'], {}), '(self.npc_list)\n', (13033, 13048), False, 'import logging\n'), ((20182, 20206), 'logging.debug', 'logging.debug', (['room_list'], {}), '(room_list)\n', (20195, 20206), False, 'import logging\n'), ((22780, 22805), 'logging.debug', 'logging.debug', (['class_list'], {}), '(class_list)\n', (22793, 22805), False, 'import logging\n'), ((23165, 23185), 'logging.debug', 'logging.debug', (['races'], {}), '(races)\n', (23178, 23185), False, 'import logging\n'), ((23539, 23559), 'logging.debug', 'logging.debug', (['items'], {}), '(items)\n', (23552, 23559), False, 'import logging\n'), ((24155, 24178), 'logging.debug', 'logging.debug', (['npc_list'], {}), '(npc_list)\n', (24168, 24178), False, 'import logging\n'), ((25521, 25629), 'DungeonPackage.Item.Item', 'Item', ([], {'item_id': "item_data['itemID']", 'name': "item_data['itemName']", 'description': "item_data['itemDescription']"}), "(item_id=item_data['itemID'], name=item_data['itemName'], description=\n item_data['itemDescription'])\n", (25525, 25629), False, 'from DungeonPackage.Item import Item\n'), ((4948, 5012), 'logging.debug', 'logging.debug', (["('constructor: ' + self.managed_dungeon.dungeon_id)"], {}), "('constructor: ' + self.managed_dungeon.dungeon_id)\n", (4961, 5012), False, 'import logging\n'), ((5076, 5105), 'logging.debug', 'logging.debug', (['"""data is none"""'], {}), "('data is none')\n", (5089, 5105), False, 'import logging\n'), ((5515, 5534), 'logging.debug', 'logging.debug', (['race'], {}), '(race)\n', (5528, 5534), False, 'import logging\n'), ((5559, 5664), 'DungeonPackage.Race.Race', 'Race', ([], {'name': "race['name']", 'description': "race['description']", 'dungeon_id': 'self.managed_dungeon.dungeon_id'}), "(name=race['name'], description=race['description'], dungeon_id=self.\n managed_dungeon.dungeon_id)\n", (5563, 5664), False, 'from DungeonPackage.Race import Race\n'), ((5982, 6001), 'logging.debug', 'logging.debug', (['item'], {}), '(item)\n', (5995, 6001), False, 'import logging\n'), ((6025, 6081), 'DungeonPackage.Item.Item', 'Item', ([], {'name': "item['name']", 'description': "item['description']"}), "(name=item['name'], description=item['description'])\n", (6029, 6081), False, 'from DungeonPackage.Item import Item\n'), ((6258, 6276), 'logging.debug', 'logging.debug', (['npc'], {}), '(npc)\n', (6271, 6276), False, 'import logging\n'), ((6299, 6421), 'DungeonPackage.Npc.Npc', 'Npc', ([], {'npc_id': "npc['npcID']", 'name': "npc['name']", 'description': "npc['description']", 'dungeon_id': 'self.managed_dungeon.dungeon_id'}), "(npc_id=npc['npcID'], name=npc['name'], description=npc['description'],\n dungeon_id=self.managed_dungeon.dungeon_id)\n", (6302, 6421), False, 'from DungeonPackage.Npc import Npc\n'), ((6626, 6648), 'logging.debug', 'logging.debug', (['classes'], {}), '(classes)\n', (6639, 6648), False, 'import logging\n'), ((6673, 6785), 'DungeonPackage.Class.Class', 'Class', ([], {'name': "classes['name']", 'description': "classes['description']", 'dungeon_id': 'self.managed_dungeon.dungeon_id'}), "(name=classes['name'], description=classes['description'], dungeon_id=\n self.managed_dungeon.dungeon_id)\n", (6678, 6785), False, 'from DungeonPackage.Class import Class\n'), ((7354, 7373), 'logging.debug', 'logging.debug', (['room'], {}), '(room)\n', (7367, 7373), False, 'import logging\n'), ((7397, 7582), 'DungeonPackage.Room.Room', 'Room', ([], {'coordinate_x': "room['x']", 'coordinate_y': "room['y']", 'north': "room['north']", 'east': "room['east']", 'south': "room['south']", 'west': "room['west']", 'dungeon_id': 'self.managed_dungeon.dungeon_id'}), "(coordinate_x=room['x'], coordinate_y=room['y'], north=room['north'],\n east=room['east'], south=room['south'], west=room['west'], dungeon_id=\n self.managed_dungeon.dungeon_id)\n", (7401, 7582), False, 'from DungeonPackage.Room import Room\n'), ((9797, 9827), 'logging.debug', 'logging.debug', (['"""Dungeon saved"""'], {}), "('Dungeon saved')\n", (9810, 9827), False, 'import logging\n'), ((9885, 9913), 'logging.debug', 'logging.debug', (['"""Races saved"""'], {}), "('Races saved')\n", (9898, 9913), False, 'import logging\n'), ((9971, 9998), 'logging.debug', 'logging.debug', (['"""Item saved"""'], {}), "('Item saved')\n", (9984, 9998), False, 'import logging\n'), ((10058, 10088), 'logging.debug', 'logging.debug', (['"""Classes saved"""'], {}), "('Classes saved')\n", (10071, 10088), False, 'import logging\n'), ((10145, 10172), 'logging.debug', 'logging.debug', (['"""Npcs saved"""'], {}), "('Npcs saved')\n", (10158, 10172), False, 'import logging\n'), ((10230, 10258), 'logging.debug', 'logging.debug', (['"""Rooms saved"""'], {}), "('Rooms saved')\n", (10243, 10258), False, 'import logging\n'), ((10271, 10346), 'logging.debug', 'logging.debug', (['"""write dungeon to database: self.managed_dungeon.dungeon_id"""'], {}), "('write dungeon to database: self.managed_dungeon.dungeon_id')\n", (10284, 10346), False, 'import logging\n'), ((15162, 15197), 'logging.debug', 'logging.debug', (['"""Dungeon: """', 'dungeon'], {}), "('Dungeon: ', dungeon)\n", (15175, 15197), False, 'import logging\n'), ((15907, 15935), 'logging.debug', 'logging.debug', (['"""Item List: """'], {}), "('Item List: ')\n", (15920, 15935), False, 'import logging\n'), ((15948, 15977), 'logging.debug', 'logging.debug', (['self.item_list'], {}), '(self.item_list)\n', (15961, 15977), False, 'import logging\n'), ((16718, 16745), 'logging.debug', 'logging.debug', (['"""Room List:"""'], {}), "('Room List:')\n", (16731, 16745), False, 'import logging\n'), ((16758, 16787), 'logging.debug', 'logging.debug', (['self.room_list'], {}), '(self.room_list)\n', (16771, 16787), False, 'import logging\n'), ((17120, 17147), 'logging.debug', 'logging.debug', (['"""Race List:"""'], {}), "('Race List:')\n", (17133, 17147), False, 'import logging\n'), ((17160, 17189), 'logging.debug', 'logging.debug', (['self.race_list'], {}), '(self.race_list)\n', (17173, 17189), False, 'import logging\n'), ((17563, 17591), 'logging.debug', 'logging.debug', (['"""Class List:"""'], {}), "('Class List:')\n", (17576, 17591), False, 'import logging\n'), ((17604, 17634), 'logging.debug', 'logging.debug', (['self.class_list'], {}), '(self.class_list)\n', (17617, 17634), False, 'import logging\n'), ((17967, 17993), 'logging.debug', 'logging.debug', (['"""NPC List:"""'], {}), "('NPC List:')\n", (17980, 17993), False, 'import logging\n'), ((18006, 18034), 'logging.debug', 'logging.debug', (['self.npc_list'], {}), '(self.npc_list)\n', (18019, 18034), False, 'import logging\n'), ((7218, 7263), 'logging.debug', 'logging.debug', (["classes['equipment']['itemID']"], {}), "(classes['equipment']['itemID'])\n", (7231, 7263), False, 'import logging\n'), ((7752, 7784), 'logging.debug', 'logging.debug', (['"""roomID assigned"""'], {}), "('roomID assigned')\n", (7765, 7784), False, 'import logging\n'), ((7872, 7905), 'logging.debug', 'logging.debug', (['"""roomID generated"""'], {}), "('roomID generated')\n", (7885, 7905), False, 'import logging\n'), ((10781, 10793), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10791, 10793), False, 'import uuid\n'), ((15248, 15260), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (15258, 15260), False, 'import uuid\n'), ((15707, 15812), 'DungeonPackage.Item.Item', 'Item', ([], {'item_id': 'item[0]', 'name': 'item[1]', 'description': 'item[2]', 'dungeon_id': 'self.managed_dungeon.dungeon_id'}), '(item_id=item[0], name=item[1], description=item[2], dungeon_id=self.\n managed_dungeon.dungeon_id)\n', (15711, 15812), False, 'from DungeonPackage.Item import Item\n'), ((16124, 16497), 'DungeonPackage.Room.Room', 'Room', ([], {'room_id': "room['roomID']", 'room_name': "room['roomName']", 'room_description': "room['roomDescription']", 'coordinate_x': "room['x']", 'coordinate_y': "room['y']", 'north': "room['north']", 'east': "room['east']", 'south': "room['south']", 'west': "room['west']", 'is_start_room': "room['isStartRoom']", 'npc_id': "room['npcID']", 'item_id': "room['roomItemID']", 'dungeon_id': 'self.managed_dungeon.dungeon_id'}), "(room_id=room['roomID'], room_name=room['roomName'], room_description=\n room['roomDescription'], coordinate_x=room['x'], coordinate_y=room['y'],\n north=room['north'], east=room['east'], south=room['south'], west=room[\n 'west'], is_start_room=room['isStartRoom'], npc_id=room['npcID'],\n item_id=room['roomItemID'], dungeon_id=self.managed_dungeon.dungeon_id)\n", (16128, 16497), False, 'from DungeonPackage.Room import Room\n'), ((16921, 17026), 'DungeonPackage.Race.Race', 'Race', ([], {'race_id': 'race[0]', 'name': 'race[1]', 'description': 'race[2]', 'dungeon_id': 'self.managed_dungeon.dungeon_id'}), '(race_id=race[0], name=race[1], description=race[2], dungeon_id=self.\n managed_dungeon.dungeon_id)\n', (16925, 17026), False, 'from DungeonPackage.Race import Race\n'), ((17336, 17464), 'DungeonPackage.Class.Class', 'Class', ([], {'class_id': 'class_tuple[0]', 'name': 'class_tuple[1]', 'description': 'class_tuple[2]', 'dungeon_id': 'self.managed_dungeon.dungeon_id'}), '(class_id=class_tuple[0], name=class_tuple[1], description=class_tuple\n [2], dungeon_id=self.managed_dungeon.dungeon_id)\n', (17341, 17464), False, 'from DungeonPackage.Class import Class\n'), ((17763, 17876), 'DungeonPackage.Npc.Npc', 'Npc', ([], {'npc_id': 'npc[0]', 'name': 'npc[1]', 'description': 'npc[2]', 'item': 'npc[3]', 'dungeon_id': 'self.managed_dungeon.dungeon_id'}), '(npc_id=npc[0], name=npc[1], description=npc[2], item=npc[3], dungeon_id\n =self.managed_dungeon.dungeon_id)\n', (17766, 17876), False, 'from DungeonPackage.Npc import Npc\n'), ((23575, 23592), 'json.dumps', 'json.dumps', (['items'], {}), '(items)\n', (23585, 23592), False, 'import json\n'), ((24194, 24214), 'json.dumps', 'json.dumps', (['npc_list'], {}), '(npc_list)\n', (24204, 24214), False, 'import json\n'), ((4864, 4893), 'DungeonPackage.AccessList.AccessList', 'AccessList', (["data['dungeonID']"], {}), "(data['dungeonID'])\n", (4874, 4893), False, 'from DungeonPackage.AccessList import AccessList\n'), ((5879, 5891), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5889, 5891), False, 'import uuid\n'), ((6157, 6169), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (6167, 6169), False, 'import uuid\n'), ((7016, 7028), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7026, 7028), False, 'import uuid\n'), ((7842, 7854), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7852, 7854), False, 'import uuid\n')] |
import logging
from mmc.mobilitytrace import MobilityTrace
class Djcluster (Cluster):
def __init__ (self,trialMobilityTraces, speed=0.1):
self._trialMobilityTraces = trialMobilityTraces
self._speed=speed
self._clusters =[] # List of sets of Mobility traces grouped by cluster
""" self._clusters =[] List of sets of Mobility traces grouped by cluster
self._clusters = [ [ [mt_1,mt_2,mt_n],mt_medioid], [mt_1,...],mt_medioid], ... ]
"""
self._noise = []
self._userid = ""
super(Djcluster,self).__init__()
def preProcess(self):
#Pre-proccess
logging.info("Number of traces: {0}".format(len(self._trialMobilityTraces)))
static_mts = MobilityTrace.filterSpeed(self._trialMobilityTraces,self._speed)
logging.info("Number of traces after speed filter: {0}".format(len(static_mts)))
static_spaceFiltered = MobilityTrace.spatial_filter(static_mts)
logging.info("Number of traces after contiguos repeated: {0}".format(len(static_spaceFiltered)))
return static_spaceFiltered
#end preProcess
def doCluster(self, preprocess = True):
static_spaceFiltered = dict()
#Pre-proccess
if (preprocess):
static_spaceFiltered = self.preProcess()
else:
static_spaceFiltered = self._trialMobilityTraces
new_cluster = set()
for mt in static_spaceFiltered:
new_cluster = set()
new_cluster.add(mt)
for mti in static_spaceFiltered:
if (mt.distance(mti) <= self._eps):
new_cluster.add(mti)
#print "new cluster length: {0}, {1}".format(len(new_cluster),(len(new_cluster) >= self._mintPts ))
if (len(new_cluster) >= self._mintPts ):
#verify if cluster intersecs with the clusters already built
merge = False
for c in self._clusters:
if (new_cluster.isdisjoint(c) == False):
merge = True
c = c.union(new_cluster)
break;
if (merge == False):
self._clusters.append(new_cluster)
else:
self._noise.append(new_cluster)
logging.info("Clusters: {0}".format(len(self._clusters)))
logging.info("Noise: {0}".format(len(self._noise)))
def doCluster2(self):
static_spaceFiltered = dict()
static_spaceFiltered = self._trialMobilityTraces
new_cluster = set()
for mt in static_spaceFiltered:
new_cluster = set()
new_cluster.add(mt)
for mti in static_spaceFiltered:
if (mt.distance(mti) <= self._eps):
new_cluster.add(mti)
#print "new cluster length: {0}, {1}".format(len(new_cluster),(len(new_cluster) >= self._mintPts ))
if (len(new_cluster) >= self._mintPts ):
#verify if cluster intersecs with the clusters already built
merge = False
for c in self._clusters:
if (new_cluster.isdisjoint(c) == False):
merge = True
c = c.union(new_cluster)
break;
if (merge == False):
self._clusters.append(new_cluster)
else:
self._noise.append(new_cluster)
logging.info("Clusters: {0}".format(len(self._clusters)))
logging.info("Noise: {0}".format(len(self._noise)))
def post_proccessing(self):
index = 0
for c in self._clusters:
#computes medioid
aux_medioid=MobilityTrace.computeMediod(list(c))
#add to dictionary
self.dict_clusters[index]=[c,aux_medioid]
index += 1
def getClusters (self):
return self.dict_clusters
#######################################
# Properties
#######################################
@property
def userid (self):
return self._userid
@userid.setter
def userid (self,value):
self._userid = value
#######################################
# export clusters
#######################################
def getStops (self):
str_result = ""
setClusters = self.getClusters()
for key in setClusters:
aux_medioid = (setClusters[key])[1]
str_result += str(self.userid)+","+str(aux_medioid.latitude)+","+str(aux_medioid.longitude)+"\n"
return str_result[:-2]
| [
"mmc.mobilitytrace.MobilityTrace.spatial_filter",
"mmc.mobilitytrace.MobilityTrace.filterSpeed"
] | [((744, 809), 'mmc.mobilitytrace.MobilityTrace.filterSpeed', 'MobilityTrace.filterSpeed', (['self._trialMobilityTraces', 'self._speed'], {}), '(self._trialMobilityTraces, self._speed)\n', (769, 809), False, 'from mmc.mobilitytrace import MobilityTrace\n'), ((930, 970), 'mmc.mobilitytrace.MobilityTrace.spatial_filter', 'MobilityTrace.spatial_filter', (['static_mts'], {}), '(static_mts)\n', (958, 970), False, 'from mmc.mobilitytrace import MobilityTrace\n')] |
import glob
import numpy as np
import sys
def group_memory_footprint(memory_list, th_size):
# group similar consecutive entries in the memory list (ts, size)
ts = 0
size_list = [memory_list[0][1]]
group_footprint = []
for i in range(1, len(memory_list)):
if abs(memory_list[i][1] - memory_list[i-1][1]) > th_size:
if len(group_footprint) == 0 or \
abs(np.mean(size_list) - group_footprint[-1][1]) > th_size:
group_footprint.append([ts, np.mean(size_list)])
ts = memory_list[i][0]
size_list = []
size_list.append(memory_list[i][1])
return group_footprint
def pad_array(a, n):
if len(a) >= n:
return a
pad_a = np.zeros((n, 2))
if len(a) > 0:
pad_a[:len(a), :] = a
return pad_a
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Usage: %s memory_file_prefix [output_file]" %(sys.argv[0]))
exit()
memory_log = []
for file_name in glob.glob(sys.argv[1]+"*"):
print(file_name)
data = np.loadtxt(file_name, delimiter=',')
maxlen = max(len(data), len(memory_log))
memory_log = pad_array(memory_log, maxlen)
data = pad_array(data, maxlen)
memory_log = np.maximum(data, memory_log)
# threshold of 200 MB
memory_log = group_memory_footprint(memory_log, 204800)
# store data in MB format
memory_log = np.array([[i[0], i[1]/1024] for i in memory_log])
outfile = "out_memory.log"
if len(sys.argv) == 3:
outfile = sys.argv[2]
np.savetxt(outfile, memory_log, delimiter=',', fmt='%.2f')
| [
"numpy.mean",
"numpy.array",
"numpy.zeros",
"numpy.savetxt",
"numpy.maximum",
"numpy.loadtxt",
"glob.glob"
] | [((742, 758), 'numpy.zeros', 'np.zeros', (['(n, 2)'], {}), '((n, 2))\n', (750, 758), True, 'import numpy as np\n'), ((1011, 1039), 'glob.glob', 'glob.glob', (["(sys.argv[1] + '*')"], {}), "(sys.argv[1] + '*')\n", (1020, 1039), False, 'import glob\n'), ((1448, 1499), 'numpy.array', 'np.array', (['[[i[0], i[1] / 1024] for i in memory_log]'], {}), '([[i[0], i[1] / 1024] for i in memory_log])\n', (1456, 1499), True, 'import numpy as np\n'), ((1590, 1648), 'numpy.savetxt', 'np.savetxt', (['outfile', 'memory_log'], {'delimiter': '""","""', 'fmt': '"""%.2f"""'}), "(outfile, memory_log, delimiter=',', fmt='%.2f')\n", (1600, 1648), True, 'import numpy as np\n'), ((1079, 1115), 'numpy.loadtxt', 'np.loadtxt', (['file_name'], {'delimiter': '""","""'}), "(file_name, delimiter=',')\n", (1089, 1115), True, 'import numpy as np\n'), ((1276, 1304), 'numpy.maximum', 'np.maximum', (['data', 'memory_log'], {}), '(data, memory_log)\n', (1286, 1304), True, 'import numpy as np\n'), ((509, 527), 'numpy.mean', 'np.mean', (['size_list'], {}), '(size_list)\n', (516, 527), True, 'import numpy as np\n'), ((409, 427), 'numpy.mean', 'np.mean', (['size_list'], {}), '(size_list)\n', (416, 427), True, 'import numpy as np\n')] |
from __future__ import unicode_literals
import codecs
from collections import namedtuple
import sys
import six
from typing import Union, Optional, List
from namedlist import namedlist
class CoNLLNode(object):
@classmethod
def from_line(cls, line):
fields = line.split()
try:
return cls(*fields)
except TypeError:
print("Line: " + line)
raise
def to_line(self, sep='\t'):
return sep.join(six.text_type(i) for i in self)
class CoNLLUNode(CoNLLNode,
namedlist("_", ["id_", "form", "lemma", "cpostag", "postag", "feats",
"head", "deprel", "deps", "misc"])):
@classmethod
def from_line(cls, line):
fields = line.split()
if len(fields) < len(cls._fields):
fields.extend(["_" for _ in range(len(cls._fields) - len(fields))])
try:
return cls(*fields)
except TypeError:
print("Line: " + line)
raise
class CoNLL09Node(namedtuple("_", ["id_", "form", "lemma", "plemma", "postag", "ppostag", "feats",
"pfeats", "head", "phead", "deprel", "pdeprel", "fillpred", "pred", "arg"])):
@classmethod
def from_line(cls, line):
fields = line.split()
if len(fields) < 13:
raise AttributeError("too few fields: {}".format(line))
if len(fields) < 14:
fields.extend(["-"] * (14 - len(fields)))
cols = fields[0:14]
cols.append(fields[14:])
return cls(*cols)
def to_line(self, sep="\t"):
return sep.join(list(self[0:-1]) + self[-1])
class CoNLL08Node(namedtuple("_", ["id_", "form", "lemma", "postag", "ppostag",
"split_form", "split_lemma", "ppostags",
"head", "deprel", "pred", "arg"])):
@classmethod
def from_line(cls, line):
fields = line.split()
cols = fields[0:11]
cols.append(fields[11:])
try:
return cls(*cols)
except TypeError:
print("Line: " + line)
raise
def to_line(self, sep="\t"):
return sep.join(six.text_type(i) for i in list(self[0:-1]) + self[-1])
def __hash__(self):
return int(self.id_)
class SDPNode(namedlist("_", ["id_", "form", "lemma", "postag",
"top", "pred", "sense", "arg"])):
@classmethod
def from_line(cls, line):
fields = line.split()
cols = fields[0:7]
cols.append(fields[7:])
try:
return cls(*cols)
except TypeError:
print("Line: " + line)
raise
def to_line(self, sep="\t"):
return sep.join(six.text_type(i) for i in list(self[0:-1]) + self[-1])
def __hash__(self):
return int(self.id_)
class OldSDPNode(namedlist("_", ["id_", "form", "lemma", "postag",
"top", "pred", "arg"])):
@classmethod
def from_line(cls, line):
fields = line.split()
cols = fields[0:6]
if len(cols) == 4:
cols.extend(["-", "-"])
cols.append(fields[6:])
try:
return cls(*cols)
except TypeError:
print("Line: " + line)
raise
def to_line(self, sep="\t"):
return sep.join(six.text_type(i) for i in list(self[0:-1]) + self[-1])
def __hash__(self):
return int(self.id_)
class TTNode(namedtuple("_", ["start", "end", "form", "lemma", "postag"])):
@classmethod
def from_line(cls, line):
fields = line.split()
return cls(*fields)
def to_line(self):
raise NotImplementedError
class SimpleNode(namedtuple("_", ["word", "postag", "head", "deprel"])):
@classmethod
def from_line(cls, line):
fields = line.split()
return cls(*fields)
def to_line(self, sep='\t'):
return sep.join(six.text_type(i) for i in self)
class CoNLL06Node(CoNLLNode,
namedtuple("_", ["id_", "form", "lemma", "cpostag", "postag",
"feats", "head", "deprel", "phead", "pdeprel"])):
pass
class BaseConLLSentence(list):
NodeType = None
def __init__(self, *args):
super(BaseConLLSentence, self).__init__(*args)
self.comment = None # type: Optional[Union[str, List[str]]]
@classmethod
def get_sentence(cls, file_object):
sentence = cls()
start = True
for i in file_object:
line = i.strip()
if not line:
break
if start:
if line.startswith("#") and line[1] != "\t":
comment = line[1:].strip()
if sentence.comment is None:
sentence.comment = comment
elif isinstance(sentence.comment, six.string_types):
sentence.comment = [sentence.comment, comment]
else:
assert isinstance(sentence.comment, list)
sentence.comment.append(comment)
continue
sentence.append(cls.NodeType.from_line(line))
start = False
return sentence
@classmethod
def get_all_sentences(cls, file_object, limit=None):
result = []
while True:
sentence = cls.get_sentence(file_object)
if not sentence:
break
result.append(sentence)
if limit is not None and len(result) == limit:
break
return result
def get_comment_line(self):
if self.comment is None:
return ""
elif isinstance(self.comment, six.string_types):
return "# {}".format(self.comment.strip()).replace("\n", " ") + "\n"
else:
assert isinstance(self.comment, (list, tuple))
return "\n".join("# {}".format(i.strip()).replace("\n", " ") for i in self.comment) + "\n"
def to_string(self, sep="\t"):
return self.get_comment_line() + "\n".join(i.to_line(sep) for i in self) + "\n\n"
class CoNLL09Sentence(BaseConLLSentence):
NodeType = CoNLL09Node
class CoNLLUSentence(BaseConLLSentence):
NodeType = CoNLLUNode
class TTSentence(BaseConLLSentence):
NodeType = TTNode
class CoNLL08Sentence(BaseConLLSentence):
NodeType = CoNLL08Node
class SimpleSentence(BaseConLLSentence):
NodeType = SimpleNode
class CoNLL06Sentence(BaseConLLSentence):
NodeType = CoNLL06Node
class SDPSentence(BaseConLLSentence):
NodeType = SDPNode
@classmethod
def get_sentence(cls, file_object):
sentence = cls()
for i in file_object:
line = i.strip()
if line.startswith("#"):
sentence.comment = line[1:].strip()
continue
if line == "null":
continue
if not line:
break
sentence.append(cls.NodeType.from_line(line))
return sentence
class OldSDPSentence(BaseConLLSentence):
NodeType = OldSDPNode
@classmethod
def get_sentence(cls, file_object):
sentence = cls()
for i in file_object:
line = i.strip()
if line.startswith("#"):
sentence.comment = line[1:].strip()
continue
if line == "null":
continue
if not line:
break
sentence.append(cls.NodeType.from_line(line))
return sentence
def sent_convert(sent):
result = CoNLLUSentence()
for i in sent:
# noinspection PyArgumentList
result.append(
CoNLLUNode(i.id_, i.form, i.lemma, '_', i.postag, i.feats, i.head, i.deprel, '_', '_', []))
return result
def main():
with codecs.open(sys.argv[1]) as f:
sents = CoNLL09Sentence.get_all_sentences(f)
with codecs.open(sys.argv[2], "w") as f_w:
for i in sents:
f_w.write(sent_convert(i).to_string())
def make_converter(SourceSentence, TargetSentence, node_converter, sep="\t"):
def converter(source_file, output_file):
with codecs.open(source_file) as f:
sents = SourceSentence.get_all_sentences(f)
with codecs.open(output_file, "w") as f_w:
for source_sent in sents:
converted_sent = TargetSentence(node_converter(i) for i in source_sent)
f_w.write(converted_sent.to_string(sep))
return converter
if __name__ == '__main__':
main()
| [
"namedlist.namedlist",
"codecs.open",
"collections.namedtuple",
"six.text_type"
] | [((554, 662), 'namedlist.namedlist', 'namedlist', (['"""_"""', "['id_', 'form', 'lemma', 'cpostag', 'postag', 'feats', 'head', 'deprel',\n 'deps', 'misc']"], {}), "('_', ['id_', 'form', 'lemma', 'cpostag', 'postag', 'feats',\n 'head', 'deprel', 'deps', 'misc'])\n", (563, 662), False, 'from namedlist import namedlist\n'), ((1038, 1202), 'collections.namedtuple', 'namedtuple', (['"""_"""', "['id_', 'form', 'lemma', 'plemma', 'postag', 'ppostag', 'feats', 'pfeats',\n 'head', 'phead', 'deprel', 'pdeprel', 'fillpred', 'pred', 'arg']"], {}), "('_', ['id_', 'form', 'lemma', 'plemma', 'postag', 'ppostag',\n 'feats', 'pfeats', 'head', 'phead', 'deprel', 'pdeprel', 'fillpred',\n 'pred', 'arg'])\n", (1048, 1202), False, 'from collections import namedtuple\n'), ((1683, 1823), 'collections.namedtuple', 'namedtuple', (['"""_"""', "['id_', 'form', 'lemma', 'postag', 'ppostag', 'split_form', 'split_lemma',\n 'ppostags', 'head', 'deprel', 'pred', 'arg']"], {}), "('_', ['id_', 'form', 'lemma', 'postag', 'ppostag', 'split_form',\n 'split_lemma', 'ppostags', 'head', 'deprel', 'pred', 'arg'])\n", (1693, 1823), False, 'from collections import namedtuple\n'), ((2335, 2420), 'namedlist.namedlist', 'namedlist', (['"""_"""', "['id_', 'form', 'lemma', 'postag', 'top', 'pred', 'sense', 'arg']"], {}), "('_', ['id_', 'form', 'lemma', 'postag', 'top', 'pred', 'sense',\n 'arg'])\n", (2344, 2420), False, 'from namedlist import namedlist\n'), ((2893, 2965), 'namedlist.namedlist', 'namedlist', (['"""_"""', "['id_', 'form', 'lemma', 'postag', 'top', 'pred', 'arg']"], {}), "('_', ['id_', 'form', 'lemma', 'postag', 'top', 'pred', 'arg'])\n", (2902, 2965), False, 'from namedlist import namedlist\n'), ((3504, 3564), 'collections.namedtuple', 'namedtuple', (['"""_"""', "['start', 'end', 'form', 'lemma', 'postag']"], {}), "('_', ['start', 'end', 'form', 'lemma', 'postag'])\n", (3514, 3564), False, 'from collections import namedtuple\n'), ((3749, 3802), 'collections.namedtuple', 'namedtuple', (['"""_"""', "['word', 'postag', 'head', 'deprel']"], {}), "('_', ['word', 'postag', 'head', 'deprel'])\n", (3759, 3802), False, 'from collections import namedtuple\n'), ((4049, 4162), 'collections.namedtuple', 'namedtuple', (['"""_"""', "['id_', 'form', 'lemma', 'cpostag', 'postag', 'feats', 'head', 'deprel',\n 'phead', 'pdeprel']"], {}), "('_', ['id_', 'form', 'lemma', 'cpostag', 'postag', 'feats',\n 'head', 'deprel', 'phead', 'pdeprel'])\n", (4059, 4162), False, 'from collections import namedtuple\n'), ((7877, 7901), 'codecs.open', 'codecs.open', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (7888, 7901), False, 'import codecs\n'), ((7974, 8003), 'codecs.open', 'codecs.open', (['sys.argv[2]', '"""w"""'], {}), "(sys.argv[2], 'w')\n", (7985, 8003), False, 'import codecs\n'), ((8233, 8257), 'codecs.open', 'codecs.open', (['source_file'], {}), '(source_file)\n', (8244, 8257), False, 'import codecs\n'), ((475, 491), 'six.text_type', 'six.text_type', (['i'], {}), '(i)\n', (488, 491), False, 'import six\n'), ((2210, 2226), 'six.text_type', 'six.text_type', (['i'], {}), '(i)\n', (2223, 2226), False, 'import six\n'), ((2765, 2781), 'six.text_type', 'six.text_type', (['i'], {}), '(i)\n', (2778, 2781), False, 'import six\n'), ((3380, 3396), 'six.text_type', 'six.text_type', (['i'], {}), '(i)\n', (3393, 3396), False, 'import six\n'), ((3968, 3984), 'six.text_type', 'six.text_type', (['i'], {}), '(i)\n', (3981, 3984), False, 'import six\n'), ((8337, 8366), 'codecs.open', 'codecs.open', (['output_file', '"""w"""'], {}), "(output_file, 'w')\n", (8348, 8366), False, 'import codecs\n')] |
"""Test for hydromt.gis_utils submodule"""
import pytest
import numpy as np
from hydromt import gis_utils as gu
from hydromt.raster import full_from_transform, RasterDataArray
from rasterio.transform import from_origin
def test_crs():
bbox = [3, 51.5, 4, 52] # NL
assert gu.utm_crs(bbox).to_epsg() == 32631
assert gu.parse_crs("utm", bbox).to_epsg() == 32631
bbox1 = [-77.5, -12.2, -77.0, -12.0]
assert gu.utm_crs(bbox1).to_epsg() == 32718
_, _, xattrs, yattrs = gu.axes_attrs(gu.parse_crs(4326))
assert xattrs["units"] == "degrees_east"
assert yattrs["units"] == "degrees_north"
_, _, xattrs, yattrs = gu.axes_attrs(gu.utm_crs(bbox1))
assert xattrs["units"] == yattrs["units"] == "m"
def test_transform():
transform = from_origin(0, 90, 1, 1)
shape = (180, 360)
xs, ys = gu.affine_to_coords(transform, shape)
assert np.all(ys == 90 - np.arange(0.5, shape[0]))
assert np.all(xs == np.arange(0.5, shape[1]))
# offset for geographic crs
da = full_from_transform(transform, shape, crs=4326)
da1 = gu.meridian_offset(da, x_name="x")
assert da1.raster.bounds[0] == -180
da2 = gu.meridian_offset(da1, x_name="x", bbox=[170, 0, 190, 10])
assert da2.raster.bounds[0] == 170
da3 = gu.meridian_offset(da1, x_name="x", bbox=[-190, 0, -170, 10])
assert da3.raster.bounds[2] == -170
def test_area_res():
# surface area of earth should be approx 510.100.000 km2
transform = from_origin(-180, 90, 1, 1)
shape = (180, 360)
da = full_from_transform(transform, shape, crs=4326)
assert np.isclose(da.raster.area_grid().sum() / 1e6, 510064511.156224)
assert gu.cellres(0) == (111319.458, 110574.2727)
def test_gdf(world):
country = world.iloc[[0], :].to_crs(3857)
assert np.all(gu.filter_gdf(world, country) == 0)
idx0 = gu.filter_gdf(world, bbox=[3, 51.5, 4, 52])[0]
assert (
world.iloc[
idx0,
]["iso_a3"]
== "NLD"
)
with pytest.raises(ValueError, match="Unknown geometry mask type"):
gu.filter_gdf(world, geom=[3, 51.5, 4, 52])
def test_nearest(world, geodf):
idx, _ = gu.nearest(geodf, geodf)
assert np.all(idx == geodf.index)
idx, dst = gu.nearest(geodf, world)
assert np.all(dst == 0)
assert np.all(world.loc[idx, "name"].values == geodf["country"].values)
gdf0 = geodf.copy()
gdf0["iso_a3"] = ""
gdf1 = gu.nearest_merge(geodf, world.drop(idx), max_dist=1e6)
assert np.all(gdf1.loc[gdf1["distance_right"] > 1e6, "index_right"] == -1)
assert np.all(gdf1.loc[gdf1["distance_right"] > 1e6, "iso_a3"] != "")
def test_spread():
transform = from_origin(-15, 10, 1, 1)
shape = (20, 30)
data = np.zeros(shape)
data[10, 10] = 1 # lin index 310
frc = np.ones(shape)
msk = np.ones(shape, dtype=bool)
da_obs = RasterDataArray.from_numpy(data, transform=transform, nodata=0, crs=4326)
da_msk = RasterDataArray.from_numpy(msk, transform=transform, crs=4326)
da_frc = RasterDataArray.from_numpy(frc, transform=transform, crs=4326)
# only testing the wrapping of pyflwdir method, not the method itself
ds_out = gu.spread2d(da_obs, da_friction=da_frc, da_mask=da_msk)
assert np.all(ds_out["source_value"] == 1)
assert np.all(ds_out["source_idx"] == 310)
assert ds_out["source_dst"].values[10, 10] == 0
with pytest.raises(ValueError, match='"nodata" must be a finite value'):
gu.spread2d(da_obs, nodata=np.nan)
| [
"hydromt.raster.full_from_transform",
"numpy.ones",
"rasterio.transform.from_origin",
"hydromt.gis_utils.spread2d",
"hydromt.gis_utils.parse_crs",
"hydromt.gis_utils.filter_gdf",
"hydromt.gis_utils.affine_to_coords",
"hydromt.raster.RasterDataArray.from_numpy",
"hydromt.gis_utils.utm_crs",
"numpy.... | [((769, 793), 'rasterio.transform.from_origin', 'from_origin', (['(0)', '(90)', '(1)', '(1)'], {}), '(0, 90, 1, 1)\n', (780, 793), False, 'from rasterio.transform import from_origin\n'), ((830, 867), 'hydromt.gis_utils.affine_to_coords', 'gu.affine_to_coords', (['transform', 'shape'], {}), '(transform, shape)\n', (849, 867), True, 'from hydromt import gis_utils as gu\n'), ((1015, 1062), 'hydromt.raster.full_from_transform', 'full_from_transform', (['transform', 'shape'], {'crs': '(4326)'}), '(transform, shape, crs=4326)\n', (1034, 1062), False, 'from hydromt.raster import full_from_transform, RasterDataArray\n'), ((1073, 1107), 'hydromt.gis_utils.meridian_offset', 'gu.meridian_offset', (['da'], {'x_name': '"""x"""'}), "(da, x_name='x')\n", (1091, 1107), True, 'from hydromt import gis_utils as gu\n'), ((1158, 1217), 'hydromt.gis_utils.meridian_offset', 'gu.meridian_offset', (['da1'], {'x_name': '"""x"""', 'bbox': '[170, 0, 190, 10]'}), "(da1, x_name='x', bbox=[170, 0, 190, 10])\n", (1176, 1217), True, 'from hydromt import gis_utils as gu\n'), ((1267, 1328), 'hydromt.gis_utils.meridian_offset', 'gu.meridian_offset', (['da1'], {'x_name': '"""x"""', 'bbox': '[-190, 0, -170, 10]'}), "(da1, x_name='x', bbox=[-190, 0, -170, 10])\n", (1285, 1328), True, 'from hydromt import gis_utils as gu\n'), ((1469, 1496), 'rasterio.transform.from_origin', 'from_origin', (['(-180)', '(90)', '(1)', '(1)'], {}), '(-180, 90, 1, 1)\n', (1480, 1496), False, 'from rasterio.transform import from_origin\n'), ((1529, 1576), 'hydromt.raster.full_from_transform', 'full_from_transform', (['transform', 'shape'], {'crs': '(4326)'}), '(transform, shape, crs=4326)\n', (1548, 1576), False, 'from hydromt.raster import full_from_transform, RasterDataArray\n'), ((2152, 2176), 'hydromt.gis_utils.nearest', 'gu.nearest', (['geodf', 'geodf'], {}), '(geodf, geodf)\n', (2162, 2176), True, 'from hydromt import gis_utils as gu\n'), ((2188, 2214), 'numpy.all', 'np.all', (['(idx == geodf.index)'], {}), '(idx == geodf.index)\n', (2194, 2214), True, 'import numpy as np\n'), ((2230, 2254), 'hydromt.gis_utils.nearest', 'gu.nearest', (['geodf', 'world'], {}), '(geodf, world)\n', (2240, 2254), True, 'from hydromt import gis_utils as gu\n'), ((2266, 2282), 'numpy.all', 'np.all', (['(dst == 0)'], {}), '(dst == 0)\n', (2272, 2282), True, 'import numpy as np\n'), ((2294, 2358), 'numpy.all', 'np.all', (["(world.loc[idx, 'name'].values == geodf['country'].values)"], {}), "(world.loc[idx, 'name'].values == geodf['country'].values)\n", (2300, 2358), True, 'import numpy as np\n'), ((2484, 2557), 'numpy.all', 'np.all', (["(gdf1.loc[gdf1['distance_right'] > 1000000.0, 'index_right'] == -1)"], {}), "(gdf1.loc[gdf1['distance_right'] > 1000000.0, 'index_right'] == -1)\n", (2490, 2557), True, 'import numpy as np\n'), ((2563, 2631), 'numpy.all', 'np.all', (["(gdf1.loc[gdf1['distance_right'] > 1000000.0, 'iso_a3'] != '')"], {}), "(gdf1.loc[gdf1['distance_right'] > 1000000.0, 'iso_a3'] != '')\n", (2569, 2631), True, 'import numpy as np\n'), ((2663, 2689), 'rasterio.transform.from_origin', 'from_origin', (['(-15)', '(10)', '(1)', '(1)'], {}), '(-15, 10, 1, 1)\n', (2674, 2689), False, 'from rasterio.transform import from_origin\n'), ((2722, 2737), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (2730, 2737), True, 'import numpy as np\n'), ((2786, 2800), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (2793, 2800), True, 'import numpy as np\n'), ((2811, 2837), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'bool'}), '(shape, dtype=bool)\n', (2818, 2837), True, 'import numpy as np\n'), ((2851, 2924), 'hydromt.raster.RasterDataArray.from_numpy', 'RasterDataArray.from_numpy', (['data'], {'transform': 'transform', 'nodata': '(0)', 'crs': '(4326)'}), '(data, transform=transform, nodata=0, crs=4326)\n', (2877, 2924), False, 'from hydromt.raster import full_from_transform, RasterDataArray\n'), ((2938, 3000), 'hydromt.raster.RasterDataArray.from_numpy', 'RasterDataArray.from_numpy', (['msk'], {'transform': 'transform', 'crs': '(4326)'}), '(msk, transform=transform, crs=4326)\n', (2964, 3000), False, 'from hydromt.raster import full_from_transform, RasterDataArray\n'), ((3014, 3076), 'hydromt.raster.RasterDataArray.from_numpy', 'RasterDataArray.from_numpy', (['frc'], {'transform': 'transform', 'crs': '(4326)'}), '(frc, transform=transform, crs=4326)\n', (3040, 3076), False, 'from hydromt.raster import full_from_transform, RasterDataArray\n'), ((3164, 3219), 'hydromt.gis_utils.spread2d', 'gu.spread2d', (['da_obs'], {'da_friction': 'da_frc', 'da_mask': 'da_msk'}), '(da_obs, da_friction=da_frc, da_mask=da_msk)\n', (3175, 3219), True, 'from hydromt import gis_utils as gu\n'), ((3231, 3266), 'numpy.all', 'np.all', (["(ds_out['source_value'] == 1)"], {}), "(ds_out['source_value'] == 1)\n", (3237, 3266), True, 'import numpy as np\n'), ((3278, 3313), 'numpy.all', 'np.all', (["(ds_out['source_idx'] == 310)"], {}), "(ds_out['source_idx'] == 310)\n", (3284, 3313), True, 'import numpy as np\n'), ((505, 523), 'hydromt.gis_utils.parse_crs', 'gu.parse_crs', (['(4326)'], {}), '(4326)\n', (517, 523), True, 'from hydromt import gis_utils as gu\n'), ((657, 674), 'hydromt.gis_utils.utm_crs', 'gu.utm_crs', (['bbox1'], {}), '(bbox1)\n', (667, 674), True, 'from hydromt import gis_utils as gu\n'), ((1663, 1676), 'hydromt.gis_utils.cellres', 'gu.cellres', (['(0)'], {}), '(0)\n', (1673, 1676), True, 'from hydromt import gis_utils as gu\n'), ((1840, 1883), 'hydromt.gis_utils.filter_gdf', 'gu.filter_gdf', (['world'], {'bbox': '[3, 51.5, 4, 52]'}), '(world, bbox=[3, 51.5, 4, 52])\n', (1853, 1883), True, 'from hydromt import gis_utils as gu\n'), ((1990, 2051), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Unknown geometry mask type"""'}), "(ValueError, match='Unknown geometry mask type')\n", (2003, 2051), False, 'import pytest\n'), ((2061, 2104), 'hydromt.gis_utils.filter_gdf', 'gu.filter_gdf', (['world'], {'geom': '[3, 51.5, 4, 52]'}), '(world, geom=[3, 51.5, 4, 52])\n', (2074, 2104), True, 'from hydromt import gis_utils as gu\n'), ((3375, 3441), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '""""nodata" must be a finite value"""'}), '(ValueError, match=\'"nodata" must be a finite value\')\n', (3388, 3441), False, 'import pytest\n'), ((3451, 3485), 'hydromt.gis_utils.spread2d', 'gu.spread2d', (['da_obs'], {'nodata': 'np.nan'}), '(da_obs, nodata=np.nan)\n', (3462, 3485), True, 'from hydromt import gis_utils as gu\n'), ((947, 971), 'numpy.arange', 'np.arange', (['(0.5)', 'shape[1]'], {}), '(0.5, shape[1])\n', (956, 971), True, 'import numpy as np\n'), ((1793, 1822), 'hydromt.gis_utils.filter_gdf', 'gu.filter_gdf', (['world', 'country'], {}), '(world, country)\n', (1806, 1822), True, 'from hydromt import gis_utils as gu\n'), ((283, 299), 'hydromt.gis_utils.utm_crs', 'gu.utm_crs', (['bbox'], {}), '(bbox)\n', (293, 299), True, 'from hydromt import gis_utils as gu\n'), ((330, 355), 'hydromt.gis_utils.parse_crs', 'gu.parse_crs', (['"""utm"""', 'bbox'], {}), "('utm', bbox)\n", (342, 355), True, 'from hydromt import gis_utils as gu\n'), ((427, 444), 'hydromt.gis_utils.utm_crs', 'gu.utm_crs', (['bbox1'], {}), '(bbox1)\n', (437, 444), True, 'from hydromt import gis_utils as gu\n'), ((897, 921), 'numpy.arange', 'np.arange', (['(0.5)', 'shape[0]'], {}), '(0.5, shape[0])\n', (906, 921), True, 'import numpy as np\n')] |
"""This module scrapes game information from wikipedia lists to compile into a database"""
import pandas as pd
import pymysql
def connectDatabase():
"""Create database connection"""
global db
db = pymysql.connect(host='localhost', user='root', password='',
db='vg_dapi', cursorclass=pymysql.cursors.DictCursor,charset='utf8mb4')
def scrape_ps4_games():
"""Scrapes PS4 games info from wikipedia lists"""
url = r'https://en.wikipedia.org/wiki/List_of_PlayStation_4_games'
url2 = r'https://en.wikipedia.org/wiki/List_of_PlayStation_4_games_(M-Z)'
tables = pd.read_html(url) # Returns list of all tables on page
tables2 = pd.read_html(url2)
ps4games = pd.concat([tables[2], tables2[0]]) # Select table of interest
titles = ps4games[ps4games.columns[0]].tolist()
genres = ps4games[ps4games.columns[1]].tolist()
developers = ps4games[ps4games.columns[2]].tolist()
publishers = ps4games[ps4games.columns[3]].tolist()
#not needed exclusive = ps4games[ps4games.columns[4]].tolist()
dateUS = ps4games[ps4games.columns[6]].tolist()
dateJP = ps4games[ps4games.columns[5]].tolist()
dateEU = ps4games[ps4games.columns[7]].tolist()
for count in range(0, len(titles)):
if(titles[count]!="Title" and titles[count]!="JP"):
tempList = []
tempList.append(titles[count])
if(type(publishers[count]) is float):
tempList.append("Unknown")
else:
tempList.append(publishers[count])
if(type(developers[count]) is float):
tempList.append("Unknown")
else:
tempList.append(developers[count])
if(type(dateUS[count]) is float):
tempList.append("Unreleased")
else:
if(len(dateUS[count])>14):
tempList.append(dateUS[count][8:18])
else:
tempList.append(dateUS[count])
if(type(dateJP[count]) is float):
tempList.append("Unreleased")
else:
if(len(dateJP[count])>14):
tempList.append(dateJP[count][8:18])
else:
tempList.append(dateJP[count])
if(type(dateEU[count]) is float):
tempList.append("Unreleased")
else:
if(len(dateEU[count])>14):
tempList.append(dateEU[count][8:18])
else:
tempList.append(dateEU[count])
newGameId = insertGame(tempList) #function returns database id of last inserted game
insertGamePlatform(newGameId,1)
GenreIDs = []
GenreIDs = insertGenres(genres[count])
insertGameGenres(newGameId,GenreIDs)
def scrape_xboxone_games():
"""Scrapes XOne games info from wikipedia lists"""
url = r'https://en.wikipedia.org/wiki/List_of_Xbox_One_games'
tables = pd.read_html(url) # Returns list of all tables on page
xboxgames = tables[2]
titles = xboxgames[xboxgames.columns[0]].tolist()
genres = xboxgames[xboxgames.columns[1]].tolist()
developers = xboxgames[xboxgames.columns[2]].tolist()
publishers = xboxgames[xboxgames.columns[3]].tolist()
#not needed exclusive = xboxgames[xboxgames.columns[4]].tolist()
dateUS = xboxgames[xboxgames.columns[6]].tolist()
dateJP = xboxgames[xboxgames.columns[5]].tolist()
dateEU = xboxgames[xboxgames.columns[7]].tolist()
for count in range(0, len(titles)):
if(titles[count]!="Title" and titles[count]!="JP"):
tempList = []
tempList.append(titles[count])
if(type(publishers[count]) is float):
tempList.append("Unknown")
else:
tempList.append(publishers[count])
if(type(developers[count]) is float):
tempList.append("Unknown")
else:
tempList.append(developers[count])
if(type(dateUS[count]) is float):
tempList.append("Unreleased")
else:
if(len(dateUS[count])>14):
tempList.append(dateUS[count][8:18])
else:
tempList.append(dateUS[count])
if(type(dateJP[count]) is float):
tempList.append("Unreleased")
else:
if(len(dateJP[count])>14):
tempList.append(dateJP[count][8:18])
else:
tempList.append(dateJP[count])
if(type(dateEU[count]) is float):
tempList.append("Unreleased")
else:
if(len(dateEU[count])>14):
tempList.append(dateEU[count][8:18])
else:
tempList.append(dateEU[count])
newGameId = insertGame(tempList) #function returns database id of last inserted game
insertGamePlatform(newGameId,2)
GenreIDs = []
GenreIDs = insertGenres(genres[count])
insertGameGenres(newGameId,GenreIDs)
def scrape_switch_games():
"""Scrapes NSwitch games info from wikipedia lists"""
url = r'https://en.wikipedia.org/wiki/List_of_Nintendo_Switch_games'
tables = pd.read_html(url) # Returns list of all tables on page
switchgames = tables[0]
titles = switchgames[switchgames.columns[0]].tolist()
genres = switchgames[switchgames.columns[1]].tolist()
developers = switchgames[switchgames.columns[2]].tolist()
publishers = switchgames[switchgames.columns[3]].tolist()
#not needed exclusive = switchgames[switchgames.columns[4]].tolist()
dateUS = switchgames[switchgames.columns[6]].tolist()
dateJP = switchgames[switchgames.columns[5]].tolist()
dateEU = switchgames[switchgames.columns[7]].tolist()
for count in range(0, len(titles)):
if(titles[count]!="Title" and titles[count]!="JP"):
tempList = []
tempList.append(titles[count])
if(type(publishers[count]) is float):
tempList.append("Unknown")
else:
tempList.append(publishers[count])
if(type(developers[count]) is float):
tempList.append("Unknown")
else:
tempList.append(developers[count])
if(type(dateUS[count]) is float):
tempList.append("Unreleased")
else:
if(len(dateUS[count])>14):
tempList.append(dateUS[count][8:18])
else:
tempList.append(dateUS[count])
if(type(dateJP[count]) is float):
tempList.append("Unreleased")
else:
if(len(dateJP[count])>14):
tempList.append(dateJP[count][8:18])
else:
tempList.append(dateJP[count])
if(type(dateEU[count]) is float):
tempList.append("Unreleased")
else:
if(len(dateEU[count])>14):
tempList.append(dateEU[count][8:18])
else:
tempList.append(dateEU[count])
newGameId = insertGame(tempList) #function returns database id of last inserted game
insertGamePlatform(newGameId,3)
GenreIDs = []
GenreIDs = insertGenres(genres[count])
insertGameGenres(newGameId,GenreIDs)
def insertGenres(genres):
"""Insert genres into the database"""
idList = []
currentId = 0
if(type(genres) is float):
return
allGenres=genres.split(',')
for count in range(0,len(allGenres)):
try:
with db.cursor() as cursor:
# Create a new record
sql = "SELECT `id`,`name` FROM `genre` WHERE LEVENSHTEIN_RATIO(`name`,%s)>75 AND LEVENSHTEIN(`name`,%s)<4"
cursor.execute(sql,(allGenres[count],allGenres[count]))
result = cursor.fetchone()
if(result is None):
pass
else:
for k, v in result.items():
if(k=="id"):
currentId=v
if(k=="name"):
if(v!=allGenres[count]):
if(v.find('2D')!=-1 and allGenres[count].find('3D')!=-1):
pass
elif(v.find('3D')!=-1 and allGenres[count].find('2D')!=-1):
pass
else:
idList.append(v)
return
cursor.close()
with db.cursor() as cursor:
# Create a new record
sql = "INSERT INTO `genre` (`name`) VALUES (%s)"
cursor.execute(sql, allGenres[count])
db.commit()
idList.append(cursor.lastrowid)
except pymysql.err.IntegrityError:
cursor.close()
with db.cursor() as cursor:
sql = "SELECT `id` FROM `genre` WHERE `name`=%s"
cursor.execute(sql, allGenres[count])
result = cursor.fetchone()
# print("Integrity: Tried to insert duplicate row - Already exists at ID " + str(result['id']))
idList.append(result['id'])
except pymysql.err.InternalError as e:
print(str(e))
cursor.close()
return idList
def insertGame(game_details_list):
"""Insert a game into the database"""
try:
with db.cursor() as cursor:
# Create a new record
sql = "INSERT INTO `game` (`name`, `publishers`, `developers`,`dateUS`,`dateJP`,`dateEU`) VALUES (%s, %s, %s, %s, %s, %s)"
cursor.execute(sql, game_details_list)
db.commit()
return cursor.lastrowid
except pymysql.err.IntegrityError:
cursor.close()
with db.cursor() as cursor:
sql = "SELECT `id` FROM `game` WHERE `name`=%s"
cursor.execute(sql,game_details_list[0])
result = cursor.fetchone()
#print("Integrity: Tried to insert duplicate row - Already exists at ID " + str(result['id']))
return result['id']
except pymysql.err.InternalError as e:
print(str(e))
def insertGamePlatform(gameId,platformId):
"""Insert gameplatform object into database"""
try:
with db.cursor() as cursor:
# Create a new record
sql = "INSERT INTO `gameplatform` (`platformID`,`gameID`) VALUES (%s, %s)"
cursor.execute(sql, [platformId,gameId])
db.commit()
except pymysql.err.IntegrityError as e:
print(str(e))
except pymysql.err.InternalError as e:
print(str(e))
def insertGameGenres(gameId,genreIDs):
"""Insert gamegenre object into database"""
if(type(genreIDs) is list):
for count in range(0,len(genreIDs)):
try:
with db.cursor() as cursor:
# Create a new record
sql = "INSERT INTO `gamegenre` (`genreID`,`gameID`) VALUES (%s, %s)"
cursor.execute(sql, [genreIDs[count],gameId])
db.commit()
except pymysql.err.IntegrityError:
pass
# print("Integrity: Tried to insert duplicate row")
except pymysql.err.InternalError as e:
print(str(e))
cursor.close()
def main():
"""Entry Point"""
connectDatabase()
scrape_ps4_games()
scrape_xboxone_games()
scrape_switch_games()
if __name__ == '__main__':
main()
| [
"pandas.read_html",
"pymysql.connect",
"pandas.concat"
] | [((210, 346), 'pymysql.connect', 'pymysql.connect', ([], {'host': '"""localhost"""', 'user': '"""root"""', 'password': '""""""', 'db': '"""vg_dapi"""', 'cursorclass': 'pymysql.cursors.DictCursor', 'charset': '"""utf8mb4"""'}), "(host='localhost', user='root', password='', db='vg_dapi',\n cursorclass=pymysql.cursors.DictCursor, charset='utf8mb4')\n", (225, 346), False, 'import pymysql\n'), ((609, 626), 'pandas.read_html', 'pd.read_html', (['url'], {}), '(url)\n', (621, 626), True, 'import pandas as pd\n'), ((678, 696), 'pandas.read_html', 'pd.read_html', (['url2'], {}), '(url2)\n', (690, 696), True, 'import pandas as pd\n'), ((712, 746), 'pandas.concat', 'pd.concat', (['[tables[2], tables2[0]]'], {}), '([tables[2], tables2[0]])\n', (721, 746), True, 'import pandas as pd\n'), ((3041, 3058), 'pandas.read_html', 'pd.read_html', (['url'], {}), '(url)\n', (3053, 3058), True, 'import pandas as pd\n'), ((5412, 5429), 'pandas.read_html', 'pd.read_html', (['url'], {}), '(url)\n', (5424, 5429), True, 'import pandas as pd\n')] |
from main.save_and_get import Abrufen
import config
example1 = Abrufen(config.bucket_name, config.subdir4rss, 'example1')
example2 = Abrufen(config.bucket_name, config.subdir4rss, 'example2')
| [
"main.save_and_get.Abrufen"
] | [((64, 122), 'main.save_and_get.Abrufen', 'Abrufen', (['config.bucket_name', 'config.subdir4rss', '"""example1"""'], {}), "(config.bucket_name, config.subdir4rss, 'example1')\n", (71, 122), False, 'from main.save_and_get import Abrufen\n'), ((134, 192), 'main.save_and_get.Abrufen', 'Abrufen', (['config.bucket_name', 'config.subdir4rss', '"""example2"""'], {}), "(config.bucket_name, config.subdir4rss, 'example2')\n", (141, 192), False, 'from main.save_and_get import Abrufen\n')] |
import pandas as pd
import dash
from dash.dependencies import Input, Output
import plotly.express as px
import dash_html_components as html
import dash_core_components as dcc
import dash_bootstrap_components as dbc
# self packages
from .data_generator import load_transactions, comparisons_df
from .nav_bar import nav_bar_template
all_click = []
# takes in preprocessed click data from the maps and returns the html table rendered version
# note default is invisible table
def render_table(click_data = None, default=False, max_rows=26):
if default:
return html.Table(
id = 'comparison_table'
)
df = comparisons_df()
click_data = click_data[-4:]
for data in click_data:
df = df.append(data)
df.index = [[f'Property {i}' for i in range(len(df))]]
rownames = df.columns
df = df.T
df['info'] = rownames
df.drop(columns=['Property 0'], inplace=True)
columns = list(df.columns)
columns = columns[-1:] + columns[:-1]
df = df[columns]
return html.Table(
# Header
[html.Tr([html.Th(col) for col in df.columns]) ] +
# Body
[html.Tr([
html.Td(df.iloc[i][col]) for col in df.columns
]) for i in range(min(len(df), max_rows))],
id = 'comparison_table',
className = 'table table-bordered active'
)
# inits transactions dash app that links to flask app
def init_transactions(server):
dashApp = dash.Dash(
server=server,
routes_pathname_prefix='/transactions/',
external_stylesheets=[dbc.themes.BOOTSTRAP]
)
dashApp.index_string = nav_bar_template
# Create Layout
dashApp.layout = html.Div([
html.H1('Transactions Dashboard', style={'text-align': 'center'}),
html.Div([
html.H2(children='Transactions Map', style = {'text-align': 'left'}),
html.Div(children=''' map to visualize transactions '''),
html.Br(),
dcc.Loading(
id = 'loading-map',
type = 'default',
children = dcc.Graph(id='transactions_map', figure={})
)
]),
html.Div(
children = render_table(default=True),
id = 'comparison_table_div',
)
], className = 'container')
@dashApp.callback(
Output(component_id='transactions_map', component_property='figure'),
Input(component_id='transactions_map', component_property='figure'),
)
def make_map(figure):
transactions = load_transactions()
fig = px.scatter_mapbox(transactions, lat='x', lon='y', hover_name='project', hover_data=['price', 'noOfUnits', 'propertyType', 'floorRange', 'project', 'tenure', 'region', 'street', 'area'],
color_discrete_sequence=['fuchsia'], zoom=12, height=450)
fig.update_layout(mapbox_style='open-street-map')
fig.update_layout(clickmode='event+select')
fig.update_layout(margin={'r':0,'t':0,'l':0,'b':0})
return fig
@dashApp.callback(
Output(component_id='comparison_table_div', component_property='children'),
Input(component_id='transactions_map', component_property='clickData'),
)
def display_click_data(click):
if click is None:
return render_table(default=True)
# preprocess the click data from maps
points = click['points'][0]
customdata = points['customdata']
data = {
'x': points['lat'],
'y': points['lon'],
'price': customdata[0],
'noOfUnits': customdata[1],
'propertyType': customdata[2],
'floorRange': customdata[3],
'project': customdata[4],
'tenure': customdata[5],
'region': customdata[6],
'street': customdata[7],
'area': customdata[8]
}
data = {k: [str(v)] for k, v in data.items()}
data = pd.DataFrame.from_dict(data)
all_click.append(data)
return render_table(click_data=all_click)
return dashApp.server
| [
"dash_html_components.Table",
"dash.dependencies.Output",
"dash_html_components.Br",
"pandas.DataFrame.from_dict",
"dash.dependencies.Input",
"dash_html_components.Td",
"dash_html_components.Th",
"dash_html_components.Div",
"dash_html_components.H2",
"dash_html_components.H1",
"plotly.express.sc... | [((1448, 1562), 'dash.Dash', 'dash.Dash', ([], {'server': 'server', 'routes_pathname_prefix': '"""/transactions/"""', 'external_stylesheets': '[dbc.themes.BOOTSTRAP]'}), "(server=server, routes_pathname_prefix='/transactions/',\n external_stylesheets=[dbc.themes.BOOTSTRAP])\n", (1457, 1562), False, 'import dash\n'), ((572, 605), 'dash_html_components.Table', 'html.Table', ([], {'id': '"""comparison_table"""'}), "(id='comparison_table')\n", (582, 605), True, 'import dash_html_components as html\n'), ((2569, 2824), 'plotly.express.scatter_mapbox', 'px.scatter_mapbox', (['transactions'], {'lat': '"""x"""', 'lon': '"""y"""', 'hover_name': '"""project"""', 'hover_data': "['price', 'noOfUnits', 'propertyType', 'floorRange', 'project', 'tenure',\n 'region', 'street', 'area']", 'color_discrete_sequence': "['fuchsia']", 'zoom': '(12)', 'height': '(450)'}), "(transactions, lat='x', lon='y', hover_name='project',\n hover_data=['price', 'noOfUnits', 'propertyType', 'floorRange',\n 'project', 'tenure', 'region', 'street', 'area'],\n color_discrete_sequence=['fuchsia'], zoom=12, height=450)\n", (2586, 2824), True, 'import plotly.express as px\n'), ((2332, 2400), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""transactions_map"""', 'component_property': '"""figure"""'}), "(component_id='transactions_map', component_property='figure')\n", (2338, 2400), False, 'from dash.dependencies import Input, Output\n'), ((2410, 2477), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""transactions_map"""', 'component_property': '"""figure"""'}), "(component_id='transactions_map', component_property='figure')\n", (2415, 2477), False, 'from dash.dependencies import Input, Output\n'), ((4019, 4047), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['data'], {}), '(data)\n', (4041, 4047), True, 'import pandas as pd\n'), ((3067, 3141), 'dash.dependencies.Output', 'Output', ([], {'component_id': '"""comparison_table_div"""', 'component_property': '"""children"""'}), "(component_id='comparison_table_div', component_property='children')\n", (3073, 3141), False, 'from dash.dependencies import Input, Output\n'), ((3151, 3221), 'dash.dependencies.Input', 'Input', ([], {'component_id': '"""transactions_map"""', 'component_property': '"""clickData"""'}), "(component_id='transactions_map', component_property='clickData')\n", (3156, 3221), False, 'from dash.dependencies import Input, Output\n'), ((1695, 1760), 'dash_html_components.H1', 'html.H1', (['"""Transactions Dashboard"""'], {'style': "{'text-align': 'center'}"}), "('Transactions Dashboard', style={'text-align': 'center'})\n", (1702, 1760), True, 'import dash_html_components as html\n'), ((1793, 1859), 'dash_html_components.H2', 'html.H2', ([], {'children': '"""Transactions Map"""', 'style': "{'text-align': 'left'}"}), "(children='Transactions Map', style={'text-align': 'left'})\n", (1800, 1859), True, 'import dash_html_components as html\n'), ((1875, 1927), 'dash_html_components.Div', 'html.Div', ([], {'children': '""" map to visualize transactions """'}), "(children=' map to visualize transactions ')\n", (1883, 1927), True, 'import dash_html_components as html\n'), ((1945, 1954), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (1952, 1954), True, 'import dash_html_components as html\n'), ((1073, 1085), 'dash_html_components.Th', 'html.Th', (['col'], {}), '(col)\n', (1080, 1085), True, 'import dash_html_components as html\n'), ((1160, 1184), 'dash_html_components.Td', 'html.Td', (['df.iloc[i][col]'], {}), '(df.iloc[i][col])\n', (1167, 1184), True, 'import dash_html_components as html\n'), ((2078, 2121), 'dash_core_components.Graph', 'dcc.Graph', ([], {'id': '"""transactions_map"""', 'figure': '{}'}), "(id='transactions_map', figure={})\n", (2087, 2121), True, 'import dash_core_components as dcc\n')] |
import os
from AppKit import NSApp, NSImage
import vanilla
from mojo.UI import CurrentGlyphWindow, UpdateCurrentGlyphView,\
StatusInteractivePopUpWindow
from fontParts.world import CurrentGlyph
resourcesDirectory = os.path.dirname(__file__)
resourcesDirectory = os.path.dirname(resourcesDirectory)
resourcesDirectory = os.path.dirname(resourcesDirectory)
resourcesDirectory = os.path.join(resourcesDirectory, "resources")
imageCache = {}
def getImage(name):
if name not in imageCache:
imagePath = os.path.join(resourcesDirectory, name + ".pdf")
image = NSImage.alloc().initWithContentsOfFile_(imagePath)
image.setTemplate_(True)
imageCache[name] = image
return imageCache[name]
def getActiveGlyphWindow():
window = CurrentGlyphWindow()
# there is no glyph window
if window is None:
return None
# the editor is not the first responder
if not window.getGlyphView().isFirstResponder():
return None
return window
# ---------------
# Base Controller
# ---------------
class BaseActionWindowController(object):
def __init__(self):
glyphWindow = getActiveGlyphWindow()
if glyphWindow is None:
return
self.w = ActionWindow(
(1, 1),
centerInView=CurrentGlyphWindow().getGlyphView()
)
self.w.responderWillBecomeFirstCallback = self.responderWillBecomeFirstCallback
# There is probably a better way to set
# the escape key to close the window but
# I am lazy so I'm using a hidden button.
self.w._closeButton = vanilla.ImageButton(
(0, 0, 0, 0),
bordered=False,
callback=self._closeButtonCallback
)
self.w._closeButton.bind("\u001B", [])
# Build the interface
self.metrics = dict(
margin=15,
iconPadding=5,
iconButtonWidth=30,
iconButtonHeight=30,
groupPadding=15,
)
rules = self.buildInterface(self.w)
if rules is not None:
self.w.addAutoPosSizeRules(rules, self.metrics)
# Bind close.
self.w.bind("close", self.windowCloseCallback)
# Go
self.w.open()
def _closeButtonCallback(self, sender):
self.w.responderWillBecomeFirstCallback = None
self.w.close()
def buildInterface(self):
pass
def windowCloseCallback(self, sender):
pass
def responderWillBecomeFirstCallback(self, responder):
pass
# ------
# Window
# ------
class TSActionNSWindow(StatusInteractivePopUpWindow.nsWindowClass):
def makeFirstResponder_(self, responder):
value = super(TSActionNSWindow, self).makeFirstResponder_(responder)
if value:
delegate = self.delegate()
if delegate is not None and delegate.responderWillBecomeFirstCallback is not None:
delegate.responderWillBecomeFirstCallback(responder)
return responder
class ActionWindow(StatusInteractivePopUpWindow):
nsWindowClass = TSActionNSWindow
# -------------
# Action Button
# -------------
class IconButton(vanilla.ImageButton):
def __init__(self, imageName, actionName="Quick Action", actionCallback=None, closesWindow=True):
super(IconButton, self).__init__(
"auto",
callback=self.performAction,
imageObject=getImage(imageName),
bordered=False
)
self.actionName = actionName
self.actionCallback = actionCallback
self.closesWindow = closesWindow
button = self.getNSButton()
button.setToolTip_(actionName)
def performAction(self, sender):
if self.actionCallback is not None:
glyph = CurrentGlyph()
glyph.prepareUndo(self.actionName)
self.actionCallback(glyph)
glyph.performUndo()
glyph.changed()
UpdateCurrentGlyphView()
if self.closesWindow:
window = self.getNSButton().window().delegate()
window.close()
| [
"vanilla.ImageButton",
"AppKit.NSImage.alloc",
"os.path.join",
"os.path.dirname",
"fontParts.world.CurrentGlyph",
"mojo.UI.CurrentGlyphWindow",
"mojo.UI.UpdateCurrentGlyphView"
] | [((220, 245), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (235, 245), False, 'import os\n'), ((267, 302), 'os.path.dirname', 'os.path.dirname', (['resourcesDirectory'], {}), '(resourcesDirectory)\n', (282, 302), False, 'import os\n'), ((324, 359), 'os.path.dirname', 'os.path.dirname', (['resourcesDirectory'], {}), '(resourcesDirectory)\n', (339, 359), False, 'import os\n'), ((381, 426), 'os.path.join', 'os.path.join', (['resourcesDirectory', '"""resources"""'], {}), "(resourcesDirectory, 'resources')\n", (393, 426), False, 'import os\n'), ((767, 787), 'mojo.UI.CurrentGlyphWindow', 'CurrentGlyphWindow', ([], {}), '()\n', (785, 787), False, 'from mojo.UI import CurrentGlyphWindow, UpdateCurrentGlyphView, StatusInteractivePopUpWindow\n'), ((516, 563), 'os.path.join', 'os.path.join', (['resourcesDirectory', "(name + '.pdf')"], {}), "(resourcesDirectory, name + '.pdf')\n", (528, 563), False, 'import os\n'), ((1604, 1694), 'vanilla.ImageButton', 'vanilla.ImageButton', (['(0, 0, 0, 0)'], {'bordered': '(False)', 'callback': 'self._closeButtonCallback'}), '((0, 0, 0, 0), bordered=False, callback=self.\n _closeButtonCallback)\n', (1623, 1694), False, 'import vanilla\n'), ((3776, 3790), 'fontParts.world.CurrentGlyph', 'CurrentGlyph', ([], {}), '()\n', (3788, 3790), False, 'from fontParts.world import CurrentGlyph\n'), ((3949, 3973), 'mojo.UI.UpdateCurrentGlyphView', 'UpdateCurrentGlyphView', ([], {}), '()\n', (3971, 3973), False, 'from mojo.UI import CurrentGlyphWindow, UpdateCurrentGlyphView, StatusInteractivePopUpWindow\n'), ((580, 595), 'AppKit.NSImage.alloc', 'NSImage.alloc', ([], {}), '()\n', (593, 595), False, 'from AppKit import NSApp, NSImage\n'), ((1293, 1313), 'mojo.UI.CurrentGlyphWindow', 'CurrentGlyphWindow', ([], {}), '()\n', (1311, 1313), False, 'from mojo.UI import CurrentGlyphWindow, UpdateCurrentGlyphView, StatusInteractivePopUpWindow\n')] |
import Winglets
import json
import math
## Prepare Data
dataDict = {}
dataArray = []
f = open('./testFile.json', 'r')
dataDict = json.loads(f.read())
for curKey in dataDict['dots'].keys():
curArrDictData = dataDict['dots'][curKey]
curKeyArr = []
for i in range(len(curArrDictData)):
curKeyArr.append([curArrDictData[i]['x'], curArrDictData[i]['y']])
dataArray.append(curKeyArr)
dataArrayTest = [
[
[2,3],
[4,6],
[4,5],
[3,2],
[6,5],
[5,6],
[1,2],
[2,4]
]
]
dataArrayTest1 = [
[
[-1.8467827164233757, -0.32359462877408124],
[-0.7646302727249497, -0.20169742403492624],
[-1.9490266612960963, -0.06872107311219157],
[-1.3985176033785436, -0.1394565639809605],
[-1.3192884234119042, 0.10408209719246732],
[-1.3008685617349127, -0.11898413174114462],
[-1.6118388742131056, -0.0825317789879439],
[-1.5480362947416044, 0.05477850586236834],
[-0.5204018281010724, -0.0064287721502440396],
[-1.3260328260134568, 0.0192675336368897],
[-1.3673899592569305, 0.02269954670743994],
[-1.8011236429571018, -0.2943896776054888],
[-1.2543877120537488, 0.013826796210314464],
[-1.7624360292903625, -0.13316404689552958],
[-2.1375891691271023, -0.10481913067508605],
[-1.0797929311491847, -0.006604367860836293],
[-1.9291588152470303, -0.05091304686418803],
[-2.070795065814931, 0.3587612219744936],
[-1.930805837668947, 0.19656344116061403],
[-1.7169013639633552, -0.19956534844023005],
[-1.030997817981172, 0.019922204353402286]
],
[
[-0.5245786836857915, -0.009288441027319272],
[-1.5975406720074177, -0.164711589213956],
[-0.5238814441706277, -0.009185694875345709],
[-0.5376809953080502, -0.020119170111257204],
[-0.6552283992534788, -0.07678937700237982]
]
]
for i in range(len(dataArrayTest1)):
for j in range(len(dataArrayTest1[i])):
if dataArrayTest1[i][j][0] < 0:
dataArrayTest1[i][j][0] *= -1
if dataArrayTest1[i][j][1] < 0:
dataArrayTest1[i][j][1] *= -1
dataArrayTest1[i][j] = [dataArrayTest1[i][j][0] * 100, dataArrayTest1[i][j][1] * 100 ]
## Test Circle
# Winglets.drawCirlce(dataArray, ['#d7191c', '#fdae61', '#abdda4','#2b83ba'], False)
# Winglets.drawCirlce(dataDict['dots'], ['#d7191c', '#fdae61', '#abdda4','#2b83ba'], False)
# Winglets.drawCirlce(dataArray, ['#d7191c', '#fdae61', '#abdda4','#2b83ba'])
# Winglets.drawCirlce(dataDict['dots'], ['#d7191c', '#fdae61', '#abdda4','#2b83ba'])
# Winglets.drawCirlce(dataDict['dots'], ['#d7191c', '#abdda4','#2b83ba'])
## Test Winglets
# Winglets.drawWinglets(dataArrayTest1, ['#d7191c', '#fdae61', '#abdda4','#2b83ba'], False)
# Winglets.drawWinglets(dataArray, ['#d7191c', '#fdae61', '#abdda4','#2b83ba'])
Winglets.drawWinglets(dataDict['dots'], ['#d7191c', '#fdae61', '#abdda4','#2b83ba'])
# Winglets.drawWinglets(dataDict['dots'], ['#d7191c', '#fdae61', '#abdda4','#2b83ba'], False)
# Winglets.drawWinglets(dataArray, ['#d7191c', '#fdae61', '#abdda4'])
## Test CommonFate
# Winglets.drawCommonFate(dataArray, ['#d7191c', '#fdae61', '#abdda4','#2b83ba'])
# Winglets.drawCommonFate(dataDict['dots'], ['#d7191c', '#fdae61', '#abdda4','#2b83ba'])
# Winglets.drawCommonFate(dataDict['dots'], ['#d7191c', '#fdae61', '#abdda4'])
## Test Proximity
# Winglets.drawProximity(dataArray, ['#d7191c', '#fdae61', '#abdda4','#2b83ba'])
# Winglets.drawProximity(dataDict['dots'], ['#d7191c', '#fdae61', '#abdda4','#2b83ba']) | [
"Winglets.drawWinglets"
] | [((3293, 3382), 'Winglets.drawWinglets', 'Winglets.drawWinglets', (["dataDict['dots']", "['#d7191c', '#fdae61', '#abdda4', '#2b83ba']"], {}), "(dataDict['dots'], ['#d7191c', '#fdae61', '#abdda4',\n '#2b83ba'])\n", (3314, 3382), False, 'import Winglets\n')] |
import unittest
from programy.processors.pre.toupper import ToUpperPreProcessor
from programy.processors.pre.normalize import NormalizePreProcessor
from programy.bot import Bot
from programy.brain import Brain
from programy.config.brain import BrainConfiguration
from programy.config.bot import BotConfiguration
class PreProcessingTests(unittest.TestCase):
def setUp(self):
self.bot = Bot(Brain(BrainConfiguration()), config=BotConfiguration())
def test_pre_cleanup(self):
test_str = "Hello World"
normalize_processor = NormalizePreProcessor()
pass1_str = normalize_processor.process(self.bot, "testid", test_str)
self.assertEqual("Hello World", pass1_str)
toupper_processor = ToUpperPreProcessor()
pass2_str = toupper_processor.process(self.bot, "testid", pass1_str)
self.assertEqual("HELLO WORLD", pass2_str)
| [
"programy.config.brain.BrainConfiguration",
"programy.processors.pre.normalize.NormalizePreProcessor",
"programy.processors.pre.toupper.ToUpperPreProcessor",
"programy.config.bot.BotConfiguration"
] | [((558, 581), 'programy.processors.pre.normalize.NormalizePreProcessor', 'NormalizePreProcessor', ([], {}), '()\n', (579, 581), False, 'from programy.processors.pre.normalize import NormalizePreProcessor\n'), ((740, 761), 'programy.processors.pre.toupper.ToUpperPreProcessor', 'ToUpperPreProcessor', ([], {}), '()\n', (759, 761), False, 'from programy.processors.pre.toupper import ToUpperPreProcessor\n'), ((410, 430), 'programy.config.brain.BrainConfiguration', 'BrainConfiguration', ([], {}), '()\n', (428, 430), False, 'from programy.config.brain import BrainConfiguration\n'), ((440, 458), 'programy.config.bot.BotConfiguration', 'BotConfiguration', ([], {}), '()\n', (456, 458), False, 'from programy.config.bot import BotConfiguration\n')] |
import os
import time
import datetime
import json
import threading
from consumer import Consumer
from kafka import KafkaProducer
def test_consumer_receives_objects(kafka_admin_client, postgres_client):
(db, db_cur) = postgres_client
db_cur.execute("DELETE FROM website_status;")
db.commit()
kafka_admin_client.delete_topics(["reports"])
consumer = Consumer()
producer = KafkaProducer(
bootstrap_servers=[f"{os.getenv('KAFKA_HOST', 'localhost:29092')}"],
api_version=(0, 10),
)
def launch_consumer():
consumer.connect()
consumer.start()
launcher = threading.Thread(target=launch_consumer)
launcher.start()
time.sleep(3)
url = "https://www.qwe.com"
body_bytes = bytes(
json.dumps(
{
"url": url,
"status_code": 200,
"content_check": True,
"time": 2.56,
"report_time": datetime.datetime.now().isoformat(),
}
),
encoding="utf-8",
)
producer.send("reports", key=bytes(url, encoding="utf-8"), value=body_bytes)
records = []
while not len(records):
db_cur.execute("SELECT * from website_status;")
records = db_cur.fetchall()
consumer.stop()
assert len(records) == 1
for row in records:
assert row[1] == "https://www.qwe.com"
assert row[2] == 200
assert row[3] > 0
time.sleep(5)
| [
"os.getenv",
"time.sleep",
"consumer.Consumer",
"datetime.datetime.now",
"threading.Thread"
] | [((374, 384), 'consumer.Consumer', 'Consumer', ([], {}), '()\n', (382, 384), False, 'from consumer import Consumer\n'), ((624, 664), 'threading.Thread', 'threading.Thread', ([], {'target': 'launch_consumer'}), '(target=launch_consumer)\n', (640, 664), False, 'import threading\n'), ((691, 704), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (701, 704), False, 'import time\n'), ((1455, 1468), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (1465, 1468), False, 'import time\n'), ((446, 488), 'os.getenv', 'os.getenv', (['"""KAFKA_HOST"""', '"""localhost:29092"""'], {}), "('KAFKA_HOST', 'localhost:29092')\n", (455, 488), False, 'import os\n'), ((960, 983), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (981, 983), False, 'import datetime\n')] |
from mock import call
from nose.tools import eq_, raises
from simian import patch
from simian.test.my_package import internal_module
from simian.test.my_package import external_module
def test_patch_with_multiple_arguments():
@patch(
module=internal_module,
external=(
'simian.test.my_package.external_module.external_fn_a',
'simian.test.my_package.external_module.external_fn_b'),
internal=(
'internal_fn_a',
'internal_fn_b'))
def inner(master_mock):
internal_module.my_fn()
eq_(master_mock.mock_calls, [
call.external_fn_a(),
call.external_fn_b(),
call.internal_fn_a(),
call.internal_fn_b()])
inner() # pylint: disable=E1120
@raises(RuntimeError)
def test_patch_with_no_external():
@patch(
module=internal_module,
internal=(
'internal_fn_a',
'internal_fn_b'))
def inner(master_mock):
try:
internal_module.my_fn()
except RuntimeError as e:
eq_(str(e), 'called external_fn_a()')
eq_(master_mock.mock_calls, [])
raise
inner() # pylint: disable=E1120
def test_patch_with_no_external_does_not_reload():
@patch(
module=internal_module,
internal=(
'internal_fn_a',
'internal_fn_b'))
def inner(master_mock):
assert master_mock
ne_(internal_fn_a, internal_module.internal_fn_a)
ne_(internal_fn_b, internal_module.internal_fn_b)
eq_(external_fn_a, external_module.external_fn_a)
eq_(external_fn_b, external_module.external_fn_b)
internal_fn_a = internal_module.internal_fn_a
internal_fn_b = internal_module.internal_fn_b
external_fn_a = external_module.external_fn_a
external_fn_b = external_module.external_fn_b
inner() # pylint: disable=E1120
eq_(internal_fn_a, internal_module.internal_fn_a)
eq_(internal_fn_b, internal_module.internal_fn_b)
eq_(external_fn_a, external_module.external_fn_a)
eq_(external_fn_b, external_module.external_fn_b)
@raises(RuntimeError)
def test_patch_with_no_internal():
@patch(
module=internal_module,
external=(
'simian.test.my_package.external_module.external_fn_a',
'simian.test.my_package.external_module.external_fn_b'))
def inner(master_mock):
try:
internal_module.my_fn()
except RuntimeError as e:
eq_(str(e), 'called internal_fn_a()')
eq_(master_mock.mock_calls, [
call.external_fn_a(),
call.external_fn_b()])
raise
inner() # pylint: disable=E1120
def test_patch_with_internal_restores_targets():
@patch(
module=internal_module,
external=(
'simian.test.my_package.external_module.external_fn_a',
'simian.test.my_package.external_module.external_fn_b'),
internal=(
'internal_fn_a',
'internal_fn_b'))
def inner(master_mock):
internal_module.my_fn()
eq_(master_mock.mock_calls, [
call.external_fn_a(),
call.external_fn_b(),
call.internal_fn_a(),
call.internal_fn_b()])
inner() # pylint: disable=E1120
@raises(RuntimeError)
def ensure_target_unpatched(target):
target()
ensure_target_unpatched(external_module.external_fn_a)
ensure_target_unpatched(external_module.external_fn_b)
ensure_target_unpatched(internal_module.internal_fn_a)
ensure_target_unpatched(internal_module.internal_fn_b)
def test_patch_with_test_generator_targets():
@patch(
module=internal_module,
external=(
'simian.test.my_package.external_module.external_fn_a',
'simian.test.my_package.external_module.external_fn_b'),
internal=(
'internal_fn_a',
'internal_fn_b'))
def inner(master_mock):
internal_module.my_fn()
eq_(master_mock.mock_calls, [
call.external_fn_a(),
call.external_fn_b(),
call.internal_fn_a(),
call.internal_fn_b()])
yield inner
yield inner
@raises(RuntimeError)
def test_patch_with_no_internal_no_external():
@patch(module=internal_module)
def inner(master_mock):
try:
internal_module.my_fn()
except RuntimeError as e:
eq_(str(e), 'called external_fn_a()')
eq_(master_mock.mock_calls, [])
raise
inner() # pylint: disable=E1120
def test_patch_with_generated_targets():
external_format = 'simian.test.my_package.external_module.external_fn_{c}'
internal_format = 'internal_fn_{c}'
# noinspection PyUnresolvedReferences
@patch(
module=internal_module,
external=(external_format.format(c=c) for c in 'ab'),
internal=(internal_format.format(c=c) for c in 'ab'))
def inner(master_mock):
internal_module.my_fn()
eq_(master_mock.mock_calls, [
call.external_fn_a(),
call.external_fn_b(),
call.internal_fn_a(),
call.internal_fn_b()])
inner() # pylint: disable=E1120
@raises(RuntimeError)
def test_no_patch():
try:
internal_module.my_fn()
except RuntimeError as e:
eq_(str(e), 'called external_fn_a()')
raise
#
# Test Helpers
#
def ne_(a, b, msg=None):
if a == b:
raise AssertionError(
msg or "{a!r} == {b!r}".format(a=a, b=b)) # pragma: no cover
| [
"nose.tools.eq_",
"mock.call.internal_fn_b",
"simian.test.my_package.internal_module.my_fn",
"mock.call.internal_fn_a",
"simian.patch",
"nose.tools.raises",
"mock.call.external_fn_b",
"mock.call.external_fn_a"
] | [((781, 801), 'nose.tools.raises', 'raises', (['RuntimeError'], {}), '(RuntimeError)\n', (787, 801), False, 'from nose.tools import eq_, raises\n'), ((2140, 2160), 'nose.tools.raises', 'raises', (['RuntimeError'], {}), '(RuntimeError)\n', (2146, 2160), False, 'from nose.tools import eq_, raises\n'), ((4252, 4272), 'nose.tools.raises', 'raises', (['RuntimeError'], {}), '(RuntimeError)\n', (4258, 4272), False, 'from nose.tools import eq_, raises\n'), ((5263, 5283), 'nose.tools.raises', 'raises', (['RuntimeError'], {}), '(RuntimeError)\n', (5269, 5283), False, 'from nose.tools import eq_, raises\n'), ((233, 444), 'simian.patch', 'patch', ([], {'module': 'internal_module', 'external': "('simian.test.my_package.external_module.external_fn_a',\n 'simian.test.my_package.external_module.external_fn_b')", 'internal': "('internal_fn_a', 'internal_fn_b')"}), "(module=internal_module, external=(\n 'simian.test.my_package.external_module.external_fn_a',\n 'simian.test.my_package.external_module.external_fn_b'), internal=(\n 'internal_fn_a', 'internal_fn_b'))\n", (238, 444), False, 'from simian import patch\n'), ((842, 916), 'simian.patch', 'patch', ([], {'module': 'internal_module', 'internal': "('internal_fn_a', 'internal_fn_b')"}), "(module=internal_module, internal=('internal_fn_a', 'internal_fn_b'))\n", (847, 916), False, 'from simian import patch\n'), ((1277, 1351), 'simian.patch', 'patch', ([], {'module': 'internal_module', 'internal': "('internal_fn_a', 'internal_fn_b')"}), "(module=internal_module, internal=('internal_fn_a', 'internal_fn_b'))\n", (1282, 1351), False, 'from simian import patch\n'), ((1925, 1974), 'nose.tools.eq_', 'eq_', (['internal_fn_a', 'internal_module.internal_fn_a'], {}), '(internal_fn_a, internal_module.internal_fn_a)\n', (1928, 1974), False, 'from nose.tools import eq_, raises\n'), ((1979, 2028), 'nose.tools.eq_', 'eq_', (['internal_fn_b', 'internal_module.internal_fn_b'], {}), '(internal_fn_b, internal_module.internal_fn_b)\n', (1982, 2028), False, 'from nose.tools import eq_, raises\n'), ((2033, 2082), 'nose.tools.eq_', 'eq_', (['external_fn_a', 'external_module.external_fn_a'], {}), '(external_fn_a, external_module.external_fn_a)\n', (2036, 2082), False, 'from nose.tools import eq_, raises\n'), ((2087, 2136), 'nose.tools.eq_', 'eq_', (['external_fn_b', 'external_module.external_fn_b'], {}), '(external_fn_b, external_module.external_fn_b)\n', (2090, 2136), False, 'from nose.tools import eq_, raises\n'), ((2201, 2362), 'simian.patch', 'patch', ([], {'module': 'internal_module', 'external': "('simian.test.my_package.external_module.external_fn_a',\n 'simian.test.my_package.external_module.external_fn_b')"}), "(module=internal_module, external=(\n 'simian.test.my_package.external_module.external_fn_a',\n 'simian.test.my_package.external_module.external_fn_b'))\n", (2206, 2362), False, 'from simian import patch\n'), ((2787, 2998), 'simian.patch', 'patch', ([], {'module': 'internal_module', 'external': "('simian.test.my_package.external_module.external_fn_a',\n 'simian.test.my_package.external_module.external_fn_b')", 'internal': "('internal_fn_a', 'internal_fn_b')"}), "(module=internal_module, external=(\n 'simian.test.my_package.external_module.external_fn_a',\n 'simian.test.my_package.external_module.external_fn_b'), internal=(\n 'internal_fn_a', 'internal_fn_b'))\n", (2792, 2998), False, 'from simian import patch\n'), ((3339, 3359), 'nose.tools.raises', 'raises', (['RuntimeError'], {}), '(RuntimeError)\n', (3345, 3359), False, 'from nose.tools import eq_, raises\n'), ((3708, 3919), 'simian.patch', 'patch', ([], {'module': 'internal_module', 'external': "('simian.test.my_package.external_module.external_fn_a',\n 'simian.test.my_package.external_module.external_fn_b')", 'internal': "('internal_fn_a', 'internal_fn_b')"}), "(module=internal_module, external=(\n 'simian.test.my_package.external_module.external_fn_a',\n 'simian.test.my_package.external_module.external_fn_b'), internal=(\n 'internal_fn_a', 'internal_fn_b'))\n", (3713, 3919), False, 'from simian import patch\n'), ((4325, 4354), 'simian.patch', 'patch', ([], {'module': 'internal_module'}), '(module=internal_module)\n', (4330, 4354), False, 'from simian import patch\n'), ((542, 565), 'simian.test.my_package.internal_module.my_fn', 'internal_module.my_fn', ([], {}), '()\n', (563, 565), False, 'from simian.test.my_package import internal_module\n'), ((1573, 1622), 'nose.tools.eq_', 'eq_', (['external_fn_a', 'external_module.external_fn_a'], {}), '(external_fn_a, external_module.external_fn_a)\n', (1576, 1622), False, 'from nose.tools import eq_, raises\n'), ((1631, 1680), 'nose.tools.eq_', 'eq_', (['external_fn_b', 'external_module.external_fn_b'], {}), '(external_fn_b, external_module.external_fn_b)\n', (1634, 1680), False, 'from nose.tools import eq_, raises\n'), ((3096, 3119), 'simian.test.my_package.internal_module.my_fn', 'internal_module.my_fn', ([], {}), '()\n', (3117, 3119), False, 'from simian.test.my_package import internal_module\n'), ((4017, 4040), 'simian.test.my_package.internal_module.my_fn', 'internal_module.my_fn', ([], {}), '()\n', (4038, 4040), False, 'from simian.test.my_package import internal_module\n'), ((5024, 5047), 'simian.test.my_package.internal_module.my_fn', 'internal_module.my_fn', ([], {}), '()\n', (5045, 5047), False, 'from simian.test.my_package import internal_module\n'), ((5322, 5345), 'simian.test.my_package.internal_module.my_fn', 'internal_module.my_fn', ([], {}), '()\n', (5343, 5345), False, 'from simian.test.my_package import internal_module\n'), ((1012, 1035), 'simian.test.my_package.internal_module.my_fn', 'internal_module.my_fn', ([], {}), '()\n', (1033, 1035), False, 'from simian.test.my_package import internal_module\n'), ((2449, 2472), 'simian.test.my_package.internal_module.my_fn', 'internal_module.my_fn', ([], {}), '()\n', (2470, 2472), False, 'from simian.test.my_package import internal_module\n'), ((4408, 4431), 'simian.test.my_package.internal_module.my_fn', 'internal_module.my_fn', ([], {}), '()\n', (4429, 4431), False, 'from simian.test.my_package import internal_module\n'), ((616, 636), 'mock.call.external_fn_a', 'call.external_fn_a', ([], {}), '()\n', (634, 636), False, 'from mock import call\n'), ((650, 670), 'mock.call.external_fn_b', 'call.external_fn_b', ([], {}), '()\n', (668, 670), False, 'from mock import call\n'), ((684, 704), 'mock.call.internal_fn_a', 'call.internal_fn_a', ([], {}), '()\n', (702, 704), False, 'from mock import call\n'), ((718, 738), 'mock.call.internal_fn_b', 'call.internal_fn_b', ([], {}), '()\n', (736, 738), False, 'from mock import call\n'), ((1132, 1163), 'nose.tools.eq_', 'eq_', (['master_mock.mock_calls', '[]'], {}), '(master_mock.mock_calls, [])\n', (1135, 1163), False, 'from nose.tools import eq_, raises\n'), ((3170, 3190), 'mock.call.external_fn_a', 'call.external_fn_a', ([], {}), '()\n', (3188, 3190), False, 'from mock import call\n'), ((3204, 3224), 'mock.call.external_fn_b', 'call.external_fn_b', ([], {}), '()\n', (3222, 3224), False, 'from mock import call\n'), ((3238, 3258), 'mock.call.internal_fn_a', 'call.internal_fn_a', ([], {}), '()\n', (3256, 3258), False, 'from mock import call\n'), ((3272, 3292), 'mock.call.internal_fn_b', 'call.internal_fn_b', ([], {}), '()\n', (3290, 3292), False, 'from mock import call\n'), ((4091, 4111), 'mock.call.external_fn_a', 'call.external_fn_a', ([], {}), '()\n', (4109, 4111), False, 'from mock import call\n'), ((4125, 4145), 'mock.call.external_fn_b', 'call.external_fn_b', ([], {}), '()\n', (4143, 4145), False, 'from mock import call\n'), ((4159, 4179), 'mock.call.internal_fn_a', 'call.internal_fn_a', ([], {}), '()\n', (4177, 4179), False, 'from mock import call\n'), ((4193, 4213), 'mock.call.internal_fn_b', 'call.internal_fn_b', ([], {}), '()\n', (4211, 4213), False, 'from mock import call\n'), ((4528, 4559), 'nose.tools.eq_', 'eq_', (['master_mock.mock_calls', '[]'], {}), '(master_mock.mock_calls, [])\n', (4531, 4559), False, 'from nose.tools import eq_, raises\n'), ((5098, 5118), 'mock.call.external_fn_a', 'call.external_fn_a', ([], {}), '()\n', (5116, 5118), False, 'from mock import call\n'), ((5132, 5152), 'mock.call.external_fn_b', 'call.external_fn_b', ([], {}), '()\n', (5150, 5152), False, 'from mock import call\n'), ((5166, 5186), 'mock.call.internal_fn_a', 'call.internal_fn_a', ([], {}), '()\n', (5184, 5186), False, 'from mock import call\n'), ((5200, 5220), 'mock.call.internal_fn_b', 'call.internal_fn_b', ([], {}), '()\n', (5218, 5220), False, 'from mock import call\n'), ((2615, 2635), 'mock.call.external_fn_a', 'call.external_fn_a', ([], {}), '()\n', (2633, 2635), False, 'from mock import call\n'), ((2653, 2673), 'mock.call.external_fn_b', 'call.external_fn_b', ([], {}), '()\n', (2671, 2673), False, 'from mock import call\n')] |
#
# Shell methods.
#
import os
from os import path
import shutil
from shutil import make_archive
from zipfile import ZipFile
def main():
# Duplicate of an existing file
if path.exists("myFile.txt"):
# get the path to the file in the current directory
src = path.realpath("myFile.txt");
# # let's make a backup copy by appending "bak" to the name
dst = src + ".bak"
# # Use the shell to make a copy of the file
shutil.copy(src,dst)
# # copy over the permissions, modification times, and other info
shutil.copystat(src, dst)
# Rename the original file
os.rename("myFile.txt", "myFile.txt")
# Backup Archive
root_dir,tail = path.split(src)
shutil.make_archive("backup", "zip", root_dir)
# Shell ZIP files
with ZipFile("Shell.zip","w") as newzip:
newzip.write("myFile.txt")
newzip.write("myFile.txt.bak")
if __name__ == "__main__":
main()
| [
"os.path.exists",
"shutil.make_archive",
"zipfile.ZipFile",
"os.rename",
"os.path.split",
"os.path.realpath",
"shutil.copy",
"shutil.copystat"
] | [((182, 207), 'os.path.exists', 'path.exists', (['"""myFile.txt"""'], {}), "('myFile.txt')\n", (193, 207), False, 'from os import path\n'), ((275, 302), 'os.path.realpath', 'path.realpath', (['"""myFile.txt"""'], {}), "('myFile.txt')\n", (288, 302), False, 'from os import path\n'), ((458, 479), 'shutil.copy', 'shutil.copy', (['src', 'dst'], {}), '(src, dst)\n', (469, 479), False, 'import shutil\n'), ((558, 583), 'shutil.copystat', 'shutil.copystat', (['src', 'dst'], {}), '(src, dst)\n', (573, 583), False, 'import shutil\n'), ((625, 662), 'os.rename', 'os.rename', (['"""myFile.txt"""', '"""myFile.txt"""'], {}), "('myFile.txt', 'myFile.txt')\n", (634, 662), False, 'import os\n'), ((709, 724), 'os.path.split', 'path.split', (['src'], {}), '(src)\n', (719, 724), False, 'from os import path\n'), ((729, 775), 'shutil.make_archive', 'shutil.make_archive', (['"""backup"""', '"""zip"""', 'root_dir'], {}), "('backup', 'zip', root_dir)\n", (748, 775), False, 'import shutil\n'), ((808, 833), 'zipfile.ZipFile', 'ZipFile', (['"""Shell.zip"""', '"""w"""'], {}), "('Shell.zip', 'w')\n", (815, 833), False, 'from zipfile import ZipFile\n')] |
# 应用脚本
import os
from webapp import create_app
from dotenv import load_dotenv
dotenv_path = os.path.join(os.path.dirname(__file__), '.env')
if os.path.exists(dotenv_path):
load_dotenv(dotenv_path)
app = create_app(os.getenv("APP_CONFIG") or "production")
| [
"os.path.dirname",
"os.path.exists",
"os.getenv",
"dotenv.load_dotenv"
] | [((144, 171), 'os.path.exists', 'os.path.exists', (['dotenv_path'], {}), '(dotenv_path)\n', (158, 171), False, 'import os\n'), ((106, 131), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (121, 131), False, 'import os\n'), ((177, 201), 'dotenv.load_dotenv', 'load_dotenv', (['dotenv_path'], {}), '(dotenv_path)\n', (188, 201), False, 'from dotenv import load_dotenv\n'), ((221, 244), 'os.getenv', 'os.getenv', (['"""APP_CONFIG"""'], {}), "('APP_CONFIG')\n", (230, 244), False, 'import os\n')] |
from s1.parsers import S1Parser
def test_integration_tha(tha_lines, tha_encounter):
parser = S1Parser()
for line in tha_lines:
parser.feed(line)
parser.finish()
records = parser.flush()
assert len(records) == 1
assert records[0].as_dict() == tha_encounter
| [
"s1.parsers.S1Parser"
] | [((99, 109), 's1.parsers.S1Parser', 'S1Parser', ([], {}), '()\n', (107, 109), False, 'from s1.parsers import S1Parser\n')] |
# -*- coding: UTF-8 -*-
import os
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
def show_images(name):
folders = {"A", "b", "c", "d", "e", "f", "g", "h", "i", "j"}
images = []
for folder in folders:
findInFolder = "output/notMNIST_large/" + folder + "/" + name
images.append(np.asarray(Image.open(findInFolder).convert('RGB')))
return np.asarray(images)
def show_image(path):
image = Image.open(path)
Image._show(image)
def gallery(array, ncols=10):
nindex, height, width, intensity = array.shape
nrows = nindex//ncols
assert nindex == nrows*ncols
# want result.shape = (height*nrows, width*ncols, intensity)
result = (array.reshape((nrows, ncols, height, width, intensity)).swapaxes(1,2).reshape((height*nrows, width*ncols, intensity)))
return result
def make_array(png_path):
pics = recognize(png_path)
print("image size:", len(pics))
images = []
for pic in pics:
images.append(np.asarray(Image.open(pic).convert('RGB')))
return np.asarray(images)
def recognize(png_path, max = 1000):
image_files = os.listdir(png_path)
folders = {"A","b","c","d","e","f","g","h","i","j"}
#folders = {"f"}
images = []
errorImages = ['RnJlaWdodERpc3BCb29rSXRhbGljLnR0Zg==.png',
'SG90IE11c3RhcmQgQlROIFBvc3Rlci50dGY=.png',
'Um9tYW5hIEJvbGQucGZi.png'
]
for image in image_files:
name = str(image)
try:
aaa = errorImages.index(name)
print(aaa,name)
continue
except:
if name.endswith(".png"):
#print(name)
for folder in folders:
try:
findInFolder = "output/notMNIST_large/" + folder + "/" + name
images.append(findInFolder)
if len(images) == (max):
return images
except IOError as e:
print('Could not read:', e)
return images
def show_filtered_dir():
'''
输入一个文件夹名。将文件夹内的文件做为查找字符串。去查找a-j内相同文件名的文件。并显示
:return:
'''
array = make_array("output/notMNIST_large/A11")
result = gallery(array)
plt.imshow(result)
plt.show()
#show_filtered_dir()
def showImageInAtoJByApath():
'''
输入一个文件的文件名,并在a-j文件夹内找到这个文件显示
:return:
'''
result = gallery(show_images("RGV2aWwgQm9sZC50dGY=.png"))
plt.imshow(result)
plt.show()
#showImageInAtoJByApath()
def delFiles(files):
for file in files:
delFile(file=file)
def delFile(file):
if os.path.exists(file):
os.remove(file)
else:
print('no such file:%s' % file)
#delFile("output/notMNIST_large/A12/a2Fua2FuYSBLLnR0Zg==.png")
def delFileByIndexFolder(indexFolder):
'''
indexFolder做为"索引文件夹",该文件夹内的所有文件作为要删除的文件。
indexFolder文件夹的每一个文件,在a-j文件夹内都有对应文件。原因是
它们都属于同一类字体。同类字体容易发生在表达字母a-j存在相同的缺陷,至少人不能理解其为字母
所以该任务将移除这类"错误"(有些图片只是表达了字母意思。比如数字1-10对应字母a-j。单显然数字1就是1。我们不去猜想非视觉意外的内涵)的字体
:param indexFolder:
:return:
'''
pics = recognize(indexFolder, 65535)
print("file size:(%s)" % len(pics))#18340
delFiles(pics)
#delFileByIndexFolder("output/notMNIST_large/A11")
| [
"matplotlib.pyplot.imshow",
"os.path.exists",
"os.listdir",
"PIL.Image.open",
"PIL.Image._show",
"numpy.asarray",
"os.remove",
"matplotlib.pyplot.show"
] | [((397, 415), 'numpy.asarray', 'np.asarray', (['images'], {}), '(images)\n', (407, 415), True, 'import numpy as np\n'), ((451, 467), 'PIL.Image.open', 'Image.open', (['path'], {}), '(path)\n', (461, 467), False, 'from PIL import Image\n'), ((472, 490), 'PIL.Image._show', 'Image._show', (['image'], {}), '(image)\n', (483, 490), False, 'from PIL import Image\n'), ((1056, 1074), 'numpy.asarray', 'np.asarray', (['images'], {}), '(images)\n', (1066, 1074), True, 'import numpy as np\n'), ((1132, 1152), 'os.listdir', 'os.listdir', (['png_path'], {}), '(png_path)\n', (1142, 1152), False, 'import os\n'), ((2281, 2299), 'matplotlib.pyplot.imshow', 'plt.imshow', (['result'], {}), '(result)\n', (2291, 2299), True, 'import matplotlib.pyplot as plt\n'), ((2304, 2314), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2312, 2314), True, 'import matplotlib.pyplot as plt\n'), ((2497, 2515), 'matplotlib.pyplot.imshow', 'plt.imshow', (['result'], {}), '(result)\n', (2507, 2515), True, 'import matplotlib.pyplot as plt\n'), ((2520, 2530), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2528, 2530), True, 'import matplotlib.pyplot as plt\n'), ((2658, 2678), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (2672, 2678), False, 'import os\n'), ((2688, 2703), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (2697, 2703), False, 'import os\n'), ((344, 368), 'PIL.Image.open', 'Image.open', (['findInFolder'], {}), '(findInFolder)\n', (354, 368), False, 'from PIL import Image\n'), ((1012, 1027), 'PIL.Image.open', 'Image.open', (['pic'], {}), '(pic)\n', (1022, 1027), False, 'from PIL import Image\n')] |
from sqlalchemy import Column, String, Integer, Date, literal
try:
from db.driver import Base, Session, engine
except ModuleNotFoundError:
from puzzle.db.driver import Base, Session, engine
class SolutionModel:
def __init__(self, **kwargs):
Base.metadata.create_all(engine)
self.session = Session()
self.size = kwargs['N']
self.solution_number = kwargs['solution_number']
self.positions = kwargs['positions']
def exists(self):
q = self.session.query(Solution).filter(
Solution.size == self.size,
Solution.number == self.solution_number,
Solution.positions == self.positions
)
return self.session.query(literal(True)).filter(q.exists()).scalar()
def save(self):
if not self.exists():
solution = Solution(self.size, self.solution_number, self.positions)
self.session.add(solution)
self.session.commit()
self.session.close()
class Solution(Base):
__tablename__ = 'solutions'
id = Column(Integer, primary_key=True)
size = Column(Integer)
number = Column(Integer)
positions = Column(String)
def __init__(self, size, number, positions):
self.size = size
self.number = number
self.positions = positions
| [
"puzzle.db.driver.Session",
"puzzle.db.driver.Base.metadata.create_all",
"sqlalchemy.Column",
"sqlalchemy.literal"
] | [((1066, 1099), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (1072, 1099), False, 'from sqlalchemy import Column, String, Integer, Date, literal\n'), ((1111, 1126), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (1117, 1126), False, 'from sqlalchemy import Column, String, Integer, Date, literal\n'), ((1140, 1155), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (1146, 1155), False, 'from sqlalchemy import Column, String, Integer, Date, literal\n'), ((1172, 1186), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (1178, 1186), False, 'from sqlalchemy import Column, String, Integer, Date, literal\n'), ((264, 296), 'puzzle.db.driver.Base.metadata.create_all', 'Base.metadata.create_all', (['engine'], {}), '(engine)\n', (288, 296), False, 'from puzzle.db.driver import Base, Session, engine\n'), ((320, 329), 'puzzle.db.driver.Session', 'Session', ([], {}), '()\n', (327, 329), False, 'from puzzle.db.driver import Base, Session, engine\n'), ((723, 736), 'sqlalchemy.literal', 'literal', (['(True)'], {}), '(True)\n', (730, 736), False, 'from sqlalchemy import Column, String, Integer, Date, literal\n')] |
#!/usr/bin/env python
import io
import json
from copy import deepcopy
from on_demand_eval.pipeline import Pipeline, Action, ACTION_TYPE, Table, EfficientTable
from on_demand_eval.pipeline import ActionEncoder, ActionDecoder
from on_demand_eval.rule_dg import RuleDependencyGraph
from on_demand_eval.flow_space import FlowSpace
class SFPSpeaker():
"""
An implementation of SFP speaker.
"""
def __init__(self, peer='0.0.0.0'):
self.subs = {}
self.peer = peer
def config_pipeline(self, pipeline):
self.pipeline = pipeline
def config_pipeline_from_file(self, filename, table_cls=EfficientTable):
if isinstance(filename, io.IOBase):
self.pipeline = Pipeline.from_dict(json.load(filename, cls=ActionDecoder), cls=table_cls)
return self.pipeline
with open(filename, 'r') as f:
self.pipeline = Pipeline.from_dict(json.load(f, cls=ActionDecoder), cls=table_cls)
def dump_pipeline(self):
print(json.dumps(self.pipeline, cls=ActionEncoder))
def dump_pipeline_to_file(self, filename):
if isinstance(filename, io.IOBase):
json.dump(self.pipeline, filename, cls=ActionEncoder)
return
with open(filename, 'w') as f:
json.dump(self.pipeline, f, cls=ActionEncoder)
def receive_sub(self, flow_space, peer):
self.subs[peer] = flow_space
def max_odi(self, pkt_query, peer):
flow_space = self.subs.get(peer, FlowSpace())
_, execution_idx = self.pipeline.lookup(pkt_query, ret_index=True)
odi_pipeline = Pipeline(self.pipeline.layout)
for t, i in execution_idx:
table = self.pipeline.tables[t]
rule = table.rules[i]
odi_table = odi_pipeline.tables[t]
odi_table.insert(deepcopy(rule))
rDAG = table.project(flow_space)
for j in rDAG.predecessors(rule.id):
od_rule = rDAG.node[j]['rule']
odi_table.insert(od_rule.modify_action(
Action(action=ACTION_TYPE.ON_DEMAND)))
return odi_pipeline
| [
"on_demand_eval.pipeline.Action",
"on_demand_eval.flow_space.FlowSpace",
"json.dumps",
"on_demand_eval.pipeline.Pipeline",
"copy.deepcopy",
"json.load",
"json.dump"
] | [((1601, 1631), 'on_demand_eval.pipeline.Pipeline', 'Pipeline', (['self.pipeline.layout'], {}), '(self.pipeline.layout)\n', (1609, 1631), False, 'from on_demand_eval.pipeline import Pipeline, Action, ACTION_TYPE, Table, EfficientTable\n'), ((1003, 1047), 'json.dumps', 'json.dumps', (['self.pipeline'], {'cls': 'ActionEncoder'}), '(self.pipeline, cls=ActionEncoder)\n', (1013, 1047), False, 'import json\n'), ((1153, 1206), 'json.dump', 'json.dump', (['self.pipeline', 'filename'], {'cls': 'ActionEncoder'}), '(self.pipeline, filename, cls=ActionEncoder)\n', (1162, 1206), False, 'import json\n'), ((1277, 1323), 'json.dump', 'json.dump', (['self.pipeline', 'f'], {'cls': 'ActionEncoder'}), '(self.pipeline, f, cls=ActionEncoder)\n', (1286, 1323), False, 'import json\n'), ((1489, 1500), 'on_demand_eval.flow_space.FlowSpace', 'FlowSpace', ([], {}), '()\n', (1498, 1500), False, 'from on_demand_eval.flow_space import FlowSpace\n'), ((737, 775), 'json.load', 'json.load', (['filename'], {'cls': 'ActionDecoder'}), '(filename, cls=ActionDecoder)\n', (746, 775), False, 'import json\n'), ((911, 942), 'json.load', 'json.load', (['f'], {'cls': 'ActionDecoder'}), '(f, cls=ActionDecoder)\n', (920, 942), False, 'import json\n'), ((1822, 1836), 'copy.deepcopy', 'deepcopy', (['rule'], {}), '(rule)\n', (1830, 1836), False, 'from copy import deepcopy\n'), ((2055, 2091), 'on_demand_eval.pipeline.Action', 'Action', ([], {'action': 'ACTION_TYPE.ON_DEMAND'}), '(action=ACTION_TYPE.ON_DEMAND)\n', (2061, 2091), False, 'from on_demand_eval.pipeline import Pipeline, Action, ACTION_TYPE, Table, EfficientTable\n')] |
import matplotlib.pyplot as plt
import rebalancer
from sp500_data_loader import load_data
import numpy as np
import itertools
import os
from multiprocessing import Process
def interpet_results(assets, rebalance_inv, bah_inv, data, condition, dir):
prices = []
for key in data.keys():
prices.append(data[key][data.index[-1]])
# rebalancer.writeResults('REBALANCE:', data, prices, rebalance_inv)
# rebalancer.writeResults('B&H:', data, prices, bah_inv)
print('rebalance: %f' % rebalance_inv.history[-1])
print('b&h: %f' % bah_inv.history[-1])
if condition:
for key in data.keys():
plt.plot(data[key], color='black')
plt.axis('off')
plt.savefig(dir + assets[0] + '_' + assets[1] + '.png')
plt.clf()
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def process_stock_list(stock_list):
start_date = '2010-01-01'
end_date = '2017-12-12'
for stock in stock_list:
stock = list(stock)
print('simulating: ' + str(stock))
dir_reb = 'stock_results_50_perc_reb/'
dir_bah = 'stock_results_50_perc_bah/'
if not os.path.isdir(dir_reb):
os.makedirs(dir_reb)
if not os.path.isdir(dir_bah):
os.makedirs(dir_bah)
file = stock[0] + '_' + stock[1] + '.png'
file2 = stock[1] + '_' + stock[0] + '.png'
if os.path.isfile(dir_reb + file) or os.path.isfile(dir_reb + file2) or os.path.isfile(
dir_bah + file) or os.path.isfile(dir_bah + file2):
continue
df_open, df_close, df_high, df_low, df_adj_close = load_data(stock, start_date, end_date)
i0, = np.shape(df_adj_close[stock[0]])
i1, = np.shape(df_adj_close[stock[1]])
if i0 == 0 or i1 == 0:
continue
rebalance_inv, bah_inv = rebalancer.simulate(df_adj_close, df_high, df_low, crypto=False)
condition = (rebalance_inv.history[-1] - bah_inv.history[-1]) / bah_inv.history[-1] > 0.5
if condition:
interpet_results(stock, rebalance_inv, bah_inv, df_adj_close, condition, dir_reb)
else:
condition = (bah_inv.history[-1] - rebalance_inv.history[-1]) / rebalance_inv.history[-1] > 0.5
if condition:
interpet_results(stock, rebalance_inv, bah_inv, df_adj_close, condition, dir_bah)
def main():
with open('s&p500.txt', 'r') as fd:
stocks = list(fd.read().splitlines())
stock_list = list(itertools.combinations(stocks, 2))
stock_lists = chunkIt(stock_list, 4)
processes = []
for stock_list in stock_lists:
print(stock_list)
process = Process(target=process_stock_list, args=([stock_list]))
process.start()
processes.append(process)
for process in processes:
process.join()
if __name__ == '__main__':
main()
| [
"matplotlib.pyplot.savefig",
"os.makedirs",
"multiprocessing.Process",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.plot",
"itertools.combinations",
"os.path.isfile",
"sp500_data_loader.load_data",
"os.path.isdir",
"rebalancer.simulate",
"matplotlib.pyplot.axis",
"numpy.shape"
] | [((682, 697), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (690, 697), True, 'import matplotlib.pyplot as plt\n'), ((706, 761), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(dir + assets[0] + '_' + assets[1] + '.png')"], {}), "(dir + assets[0] + '_' + assets[1] + '.png')\n", (717, 761), True, 'import matplotlib.pyplot as plt\n'), ((770, 779), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (777, 779), True, 'import matplotlib.pyplot as plt\n'), ((1771, 1809), 'sp500_data_loader.load_data', 'load_data', (['stock', 'start_date', 'end_date'], {}), '(stock, start_date, end_date)\n', (1780, 1809), False, 'from sp500_data_loader import load_data\n'), ((1824, 1856), 'numpy.shape', 'np.shape', (['df_adj_close[stock[0]]'], {}), '(df_adj_close[stock[0]])\n', (1832, 1856), True, 'import numpy as np\n'), ((1871, 1903), 'numpy.shape', 'np.shape', (['df_adj_close[stock[1]]'], {}), '(df_adj_close[stock[1]])\n', (1879, 1903), True, 'import numpy as np\n'), ((1989, 2053), 'rebalancer.simulate', 'rebalancer.simulate', (['df_adj_close', 'df_high', 'df_low'], {'crypto': '(False)'}), '(df_adj_close, df_high, df_low, crypto=False)\n', (2008, 2053), False, 'import rebalancer\n'), ((2639, 2672), 'itertools.combinations', 'itertools.combinations', (['stocks', '(2)'], {}), '(stocks, 2)\n', (2661, 2672), False, 'import itertools\n'), ((2814, 2867), 'multiprocessing.Process', 'Process', ([], {'target': 'process_stock_list', 'args': '[stock_list]'}), '(target=process_stock_list, args=[stock_list])\n', (2821, 2867), False, 'from multiprocessing import Process\n'), ((639, 673), 'matplotlib.pyplot.plot', 'plt.plot', (['data[key]'], {'color': '"""black"""'}), "(data[key], color='black')\n", (647, 673), True, 'import matplotlib.pyplot as plt\n'), ((1287, 1309), 'os.path.isdir', 'os.path.isdir', (['dir_reb'], {}), '(dir_reb)\n', (1300, 1309), False, 'import os\n'), ((1323, 1343), 'os.makedirs', 'os.makedirs', (['dir_reb'], {}), '(dir_reb)\n', (1334, 1343), False, 'import os\n'), ((1360, 1382), 'os.path.isdir', 'os.path.isdir', (['dir_bah'], {}), '(dir_bah)\n', (1373, 1382), False, 'import os\n'), ((1396, 1416), 'os.makedirs', 'os.makedirs', (['dir_bah'], {}), '(dir_bah)\n', (1407, 1416), False, 'import os\n'), ((1530, 1560), 'os.path.isfile', 'os.path.isfile', (['(dir_reb + file)'], {}), '(dir_reb + file)\n', (1544, 1560), False, 'import os\n'), ((1564, 1595), 'os.path.isfile', 'os.path.isfile', (['(dir_reb + file2)'], {}), '(dir_reb + file2)\n', (1578, 1595), False, 'import os\n'), ((1599, 1629), 'os.path.isfile', 'os.path.isfile', (['(dir_bah + file)'], {}), '(dir_bah + file)\n', (1613, 1629), False, 'import os\n'), ((1658, 1689), 'os.path.isfile', 'os.path.isfile', (['(dir_bah + file2)'], {}), '(dir_bah + file2)\n', (1672, 1689), False, 'import os\n')] |
"""
===============
GTK Spreadsheet
===============
Example of embedding Matplotlib in an application and interacting with a
treeview to store data. Double click on an entry to update plot data.
"""
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
from gi.repository import Gtk, Gdk
from matplotlib.backends.backend_gtk3agg import FigureCanvas # or gtk3cairo.
from numpy.random import random
from matplotlib.figure import Figure
class DataManager(Gtk.Window):
num_rows, num_cols = 20, 10
data = random((num_rows, num_cols))
def __init__(self):
super().__init__()
self.set_default_size(600, 600)
self.connect('destroy', lambda win: Gtk.main_quit())
self.set_title('GtkListStore demo')
self.set_border_width(8)
vbox = Gtk.VBox(homogeneous=False, spacing=8)
self.add(vbox)
label = Gtk.Label(label='Double click a row to plot the data')
vbox.pack_start(label, False, False, 0)
sw = Gtk.ScrolledWindow()
sw.set_shadow_type(Gtk.ShadowType.ETCHED_IN)
sw.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
vbox.pack_start(sw, True, True, 0)
model = self.create_model()
self.treeview = Gtk.TreeView(model=model)
# Matplotlib stuff
fig = Figure(figsize=(6, 4))
self.canvas = FigureCanvas(fig) # a Gtk.DrawingArea
vbox.pack_start(self.canvas, True, True, 0)
ax = fig.add_subplot(111)
self.line, = ax.plot(self.data[0, :], 'go') # plot the first row
self.treeview.connect('row-activated', self.plot_row)
sw.add(self.treeview)
self.add_columns()
self.add_events(Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.KEY_PRESS_MASK |
Gdk.EventMask.KEY_RELEASE_MASK)
def plot_row(self, treeview, path, view_column):
ind, = path # get the index into data
points = self.data[ind, :]
self.line.set_ydata(points)
self.canvas.draw()
def add_columns(self):
for i in range(self.num_cols):
column = Gtk.TreeViewColumn(str(i), Gtk.CellRendererText(), text=i)
self.treeview.append_column(column)
def create_model(self):
types = [float] * self.num_cols
store = Gtk.ListStore(*types)
for row in self.data:
store.append(tuple(row))
return store
manager = DataManager()
manager.show_all()
Gtk.main()
| [
"gi.repository.Gtk.TreeView",
"gi.repository.Gtk.main_quit",
"numpy.random.random",
"matplotlib.figure.Figure",
"gi.repository.Gtk.ListStore",
"gi.require_version",
"gi.repository.Gtk.Label",
"matplotlib.backends.backend_gtk3agg.FigureCanvas",
"gi.repository.Gtk.CellRendererText",
"gi.repository.G... | [((212, 244), 'gi.require_version', 'gi.require_version', (['"""Gtk"""', '"""3.0"""'], {}), "('Gtk', '3.0')\n", (230, 244), False, 'import gi\n'), ((245, 277), 'gi.require_version', 'gi.require_version', (['"""Gdk"""', '"""3.0"""'], {}), "('Gdk', '3.0')\n", (263, 277), False, 'import gi\n'), ((2499, 2509), 'gi.repository.Gtk.main', 'Gtk.main', ([], {}), '()\n', (2507, 2509), False, 'from gi.repository import Gtk, Gdk\n'), ((539, 567), 'numpy.random.random', 'random', (['(num_rows, num_cols)'], {}), '((num_rows, num_cols))\n', (545, 567), False, 'from numpy.random import random\n'), ((815, 853), 'gi.repository.Gtk.VBox', 'Gtk.VBox', ([], {'homogeneous': '(False)', 'spacing': '(8)'}), '(homogeneous=False, spacing=8)\n', (823, 853), False, 'from gi.repository import Gtk, Gdk\n'), ((894, 948), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': '"""Double click a row to plot the data"""'}), "(label='Double click a row to plot the data')\n", (903, 948), False, 'from gi.repository import Gtk, Gdk\n'), ((1012, 1032), 'gi.repository.Gtk.ScrolledWindow', 'Gtk.ScrolledWindow', ([], {}), '()\n', (1030, 1032), False, 'from gi.repository import Gtk, Gdk\n'), ((1261, 1286), 'gi.repository.Gtk.TreeView', 'Gtk.TreeView', ([], {'model': 'model'}), '(model=model)\n', (1273, 1286), False, 'from gi.repository import Gtk, Gdk\n'), ((1329, 1351), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(6, 4)'}), '(figsize=(6, 4))\n', (1335, 1351), False, 'from matplotlib.figure import Figure\n'), ((1375, 1392), 'matplotlib.backends.backend_gtk3agg.FigureCanvas', 'FigureCanvas', (['fig'], {}), '(fig)\n', (1387, 1392), False, 'from matplotlib.backends.backend_gtk3agg import FigureCanvas\n'), ((2344, 2365), 'gi.repository.Gtk.ListStore', 'Gtk.ListStore', (['*types'], {}), '(*types)\n', (2357, 2365), False, 'from gi.repository import Gtk, Gdk\n'), ((704, 719), 'gi.repository.Gtk.main_quit', 'Gtk.main_quit', ([], {}), '()\n', (717, 719), False, 'from gi.repository import Gtk, Gdk\n'), ((2179, 2201), 'gi.repository.Gtk.CellRendererText', 'Gtk.CellRendererText', ([], {}), '()\n', (2199, 2201), False, 'from gi.repository import Gtk, Gdk\n')] |
# coding: utf-8
import cv2
import os,sys
import time
import os.path
import math
sys.path.insert(0, '../facealign')
sys.path.insert(0, '../util')
from fileutil import *
from MtcnnPycaffe import MtcnnDetector, draw_and_show
from alignment import *
from logfile import *
import json
import argparse
def IoU(bbox1, bbox2):
intersect_bbox = [max(bbox1[0], bbox2[0]), max(bbox1[1], bbox2[1]),
min(bbox1[0]+bbox1[2], bbox2[0]+bbox2[2]), min(bbox1[1]+bbox1[3], bbox2[1]+bbox2[3])]
overlap = (intersect_bbox[2] - intersect_bbox[0]) * (intersect_bbox[3] - intersect_bbox[1])
overlap_rate = overlap / (bbox1[2]*bbox1[3] + bbox2[2]*bbox2[3] - overlap)
return overlap_rate
def load_bbox_file(path, dict):
lines = read_lines(path)
# skip first
for i in range(1,len(lines)):
line = lines[i]
segs = line.split('\t')
name = segs[0]
face_id = segs[2]
bbox = segs[4]
vals = bbox.split(',')
x0 = int(vals[0])
y0 = int(vals[1])
x1 = int(vals[2])
y1 = int(vals[3])
rect = [x0,y0,x1 - x0, y1 - y0]
# name_faceid
key = name + '_' + face_id
dict[key] = rect
return dict
class FacescrubAlignVisitor(object):
"""
Megaface alignment
"""
def __init__(self,
src_prefix,
dst_prefix,
detector,
bbox,
skip_exist = False,
transform = 'sililarity',
pading = 0):
self.src_prefix = src_prefix
self.dst_prefix = dst_prefix
self.skip_exist = skip_exist
self.detector = detector
self.bbox = bbox
self.transform = transform
self.pading = pading
# statistic
self.done_count = 0
self.fail_count = 0
def process(self, path):
if not is_image_file(path):
return True
dst_path = translate_path(self.src_prefix, self.dst_prefix, path)
if self.skip_exist and os.path.exists(dst_path):
# print('skip:%s' % path)
return True
#print('%s -> %s' % (path, dst_path))
img = cv2_imread(path)
if img is None:
print('load error:%s'%(path))
log_write(path)
self.fail_count += 1
return False
#print('run:%s/%s'%(subdir,filename))
try:
boxes, points = self.detector.detect_face(img)
except:
print('detect error:%s'%(path))
log_write(path)
self.fail_count += 1
return False
if points is None or len(points) == 0:
log_write(path)
self.fail_count += 1
return False
# find the one largest IoU
dir, fname = os.path.split(path)
key, _ = os.path.splitext(fname)
target_box = self.bbox[key]
max_idx = 0
max_iou = 0
for i, box in enumerate(boxes):
box = [box[0], box[1], box[2] - box[0], box[3] - box[1]]
iou = IoU(box, target_box)
if iou > max_iou:
max_iou = iou
max_idx = i
# check iou
if max_iou < 0.3:
#cv2.rectangle(img, (target_box[0],target_box[1]),
# (target_box[0] + target_box[2], target_box[1] + target_box[3]), (0,255,0), 2)
#draw_and_show(img, boxes, points )
#ch = cv2.waitKey(0)
ch = 0
if ch == 27:
log_write(path)
self.fail_count += 1
return False
max_chip = align_to_96x112(img, points[max_idx], self.pading, trans_type = self.transform)
#draw_and_show(img,boxes, points )
#cv2.imshow('chip', max_chip)
#cv2.waitKey(0)
makedirs(dst_path)
ret = cv2_imwrite(dst_path, max_chip)
if ret == False:
print('imwrite error:%s'%(path))
log_write(path)
self.fail_count += 1
return False
# report
if self.done_count % 100 == 0:
print('done:%05d, fail:%05d img:%s'%(self.done_count, self.fail_count, path))
self.done_count += 1
return True
def align_facescrub_uncropped(src_dir, dst_dir, templatelists_path, dict, gpu_id = 0):
# load json
with open(templatelists_path, 'r') as f:
data = json.load(f)
rel_list = data['path']
# to fullpath
path_list = [ os.path.join(src_dir,p) for p in rel_list ]
# init detector
detector = MtcnnDetector( minsize=36, gpu_id = gpu_id )
# align by detection
visitor = FacescrubAlignVisitor(src_dir,dst_dir,detector, dict)
detect_fail_list = templatelists_path + '.detect-fail.txt'
log_open(detect_fail_list)
total_size = len(path_list)
for i in range(total_size):
path = path_list[i]
#print('%d/%d %s' % (i,total_size,path))
visitor.process(path)
log_close()
def align_facescrub_fail(src_dir, dst_dir, templatelists_path, dict, gpu_id = 0):
# init detector
detector = MtcnnDetector( minsize=36, gpu_id = gpu_id )
# align by detection
visitor = FacescrubAlignVisitor(src_dir,dst_dir,detector, dict)
detect_fail_list = templatelists_path + '.detect-fail.txt'
log_open(templatelists_path + '.final-fail.txt')
list_walker(detect_fail_list,visitor)
log_close()
def align_facescrub_fail_json(src_dir, dst_dir, templatelists_path, dict, json_path):
# load json
with open(json_path, 'r') as f:
data = json.load(f)
print(data)
list = read_lines(templatelists_path + '.final-fail.txt')
for path in list:
dst_path = translate_path(src_dir, dst_dir, path)
dir, fname = os.path.split(path)
key, _ = os.path.splitext(fname)
print(key)
target_box = dict[key]
img = cv2_imread(path)
point = data[key]
xxyy = []
for i in range(5):
xxyy.append(point[i*2])
for i in range(5):
xxyy.append(point[i*2+1])
print(xxyy)
max_chip = align_to_96x112(img, xxyy)
makedirs(dst_path)
cv2_imwrite(dst_path, max_chip)
#draw_and_show(img, [target_box], [xxyy] )
#ch = cv2.waitKey(0)
def detect_facescrub_landmarks(src_dir, templatelists_path, bbox, detector):
# load json
with open(templatelists_path, 'r') as f:
data = json.load(f)
rel_list = data['path']
landmarks = {}
for rel_path in rel_list:
# to fullpath
path = os.path.join(src_dir, rel_path)
img = cv2_imread(path)
try:
boxes, points = detector.detect_face(img)
except:
print('detect error:%s'%(path))
if points is None or len(points) == 0:
continue
# find the one largest IoU
dir, fname = os.path.split(path)
key, _ = os.path.splitext(fname)
target_box = bbox[key]
max_idx = 0
max_iou = 0
for i, box in enumerate(boxes):
box = [box[0], box[1], box[2] - box[0], box[3] - box[1]]
iou = IoU(box, target_box)
if iou > max_iou:
max_iou = iou
max_idx = i
landmarks[key] = points[max_idx].tolist()
return landmarks
def correct_facescrub_json(src_dir, dst_dir, dict, json_path):
# load json
with open(json_path, 'r') as f:
data = json.load(f)
print(data)
for key, value in data.items():
name, image_id = key.split('_')
path = os.path.join(src_dir,name+'/'+key+'.jpg')
dst_path = translate_path(src_dir, dst_dir, path)
target_box = dict[key]
img = cv2_imread(path)
point = data[key]
xxyy = []
for i in range(5):
xxyy.append(point[i*2])
for i in range(5):
xxyy.append(point[i*2+1])
print(xxyy)
print(key)
max_chip = align_to_96x112(img, xxyy)
makedirs(dst_path)
#cv2_imwrite(dst_path, max_chip)
draw_and_show(img, [target_box], [xxyy] )
cv2.imshow('chip', max_chip)
ch = cv2.waitKey(0)
def merge_landmarks(labeled_json, detect_json, dst_json):
# load json
with open(labeled_json, 'r') as f:
data = json.load(f)
# load detect
with open(detect_json, 'r') as f:
landmarks = json.load(f)
# merge
for key, value in data.items():
point = value
xxyy = []
for i in range(5):
xxyy.append(point[i*2])
for i in range(5):
xxyy.append(point[i*2+1])
landmarks[key] = xxyy
# output
with open(dst_json, 'w') as f:
f.write(json.dumps(landmarks))
print(len(landmarks))
def align_facescrub_by_landmark(src_dir, dst_dir, templatelists_path, landmarks_path):
# path list
with open(templatelists_path, 'r') as f:
data = json.load(f)
rel_list = data['path']
# landmarks
with open(landmarks_path, 'r') as f:
landmarks = json.load(f)
for rel_path in rel_list:
# to fullpath
path = os.path.join(src_dir, rel_path)
img = cv2_imread(path)
dst_path = translate_path(src_dir, dst_dir, path)
dir, fname = os.path.split(path)
key, _ = os.path.splitext(fname)
points = landmarks[key]
max_chip = align_to_96x112(img, points)
makedirs(dst_path)
cv2_imwrite(dst_path, max_chip)
#cv2.imshow('face', max_chip)
#ch = cv2.waitKey(1)
'''
wrong label:Richard Madden_48806
'''
if __name__=='__main__':
if len(sys.argv) < 3:
print('facescrub_image_dir aligned_dir features_list_json_path')
exit()
#
src_dir = sys.argv[1]
dst_dir = sys.argv[2]
templatelists_path = sys.argv[3]
merged_json = './facescrub_80_landmark5.json'
align_facescrub_by_landmark(src_dir, dst_dir, templatelists_path, merged_json)
| [
"MtcnnPycaffe.MtcnnDetector",
"os.path.exists",
"sys.path.insert",
"json.dumps",
"os.path.splitext",
"os.path.join",
"os.path.split",
"cv2.imshow",
"json.load",
"MtcnnPycaffe.draw_and_show",
"cv2.waitKey"
] | [((81, 115), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../facealign"""'], {}), "(0, '../facealign')\n", (96, 115), False, 'import os, sys\n'), ((116, 145), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../util"""'], {}), "(0, '../util')\n", (131, 145), False, 'import os, sys\n'), ((4701, 4741), 'MtcnnPycaffe.MtcnnDetector', 'MtcnnDetector', ([], {'minsize': '(36)', 'gpu_id': 'gpu_id'}), '(minsize=36, gpu_id=gpu_id)\n', (4714, 4741), False, 'from MtcnnPycaffe import MtcnnDetector, draw_and_show\n'), ((5252, 5292), 'MtcnnPycaffe.MtcnnDetector', 'MtcnnDetector', ([], {'minsize': '(36)', 'gpu_id': 'gpu_id'}), '(minsize=36, gpu_id=gpu_id)\n', (5265, 5292), False, 'from MtcnnPycaffe import MtcnnDetector, draw_and_show\n'), ((2904, 2923), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (2917, 2923), False, 'import os, sys\n'), ((2941, 2964), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (2957, 2964), False, 'import os, sys\n'), ((4544, 4556), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4553, 4556), False, 'import json\n'), ((4622, 4646), 'os.path.join', 'os.path.join', (['src_dir', 'p'], {}), '(src_dir, p)\n', (4634, 4646), False, 'import os, sys\n'), ((5721, 5733), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5730, 5733), False, 'import json\n'), ((5913, 5932), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (5926, 5932), False, 'import os, sys\n'), ((5950, 5973), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (5966, 5973), False, 'import os, sys\n'), ((6617, 6629), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6626, 6629), False, 'import json\n'), ((6745, 6776), 'os.path.join', 'os.path.join', (['src_dir', 'rel_path'], {}), '(src_dir, rel_path)\n', (6757, 6776), False, 'import os, sys\n'), ((7075, 7094), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (7088, 7094), False, 'import os, sys\n'), ((7112, 7135), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (7128, 7135), False, 'import os, sys\n'), ((7669, 7681), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7678, 7681), False, 'import json\n'), ((7789, 7837), 'os.path.join', 'os.path.join', (['src_dir', "(name + '/' + key + '.jpg')"], {}), "(src_dir, name + '/' + key + '.jpg')\n", (7801, 7837), False, 'import os, sys\n'), ((8284, 8324), 'MtcnnPycaffe.draw_and_show', 'draw_and_show', (['img', '[target_box]', '[xxyy]'], {}), '(img, [target_box], [xxyy])\n', (8297, 8324), False, 'from MtcnnPycaffe import MtcnnDetector, draw_and_show\n'), ((8334, 8362), 'cv2.imshow', 'cv2.imshow', (['"""chip"""', 'max_chip'], {}), "('chip', max_chip)\n", (8344, 8362), False, 'import cv2\n'), ((8376, 8390), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (8387, 8390), False, 'import cv2\n'), ((8522, 8534), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8531, 8534), False, 'import json\n'), ((8611, 8623), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8620, 8623), False, 'import json\n'), ((9153, 9165), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9162, 9165), False, 'import json\n'), ((9271, 9283), 'json.load', 'json.load', (['f'], {}), '(f)\n', (9280, 9283), False, 'import json\n'), ((9360, 9391), 'os.path.join', 'os.path.join', (['src_dir', 'rel_path'], {}), '(src_dir, rel_path)\n', (9372, 9391), False, 'import os, sys\n'), ((9502, 9521), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (9515, 9521), False, 'import os, sys\n'), ((9539, 9562), 'os.path.splitext', 'os.path.splitext', (['fname'], {}), '(fname)\n', (9555, 9562), False, 'import os, sys\n'), ((2102, 2126), 'os.path.exists', 'os.path.exists', (['dst_path'], {}), '(dst_path)\n', (2116, 2126), False, 'import os, sys\n'), ((8934, 8955), 'json.dumps', 'json.dumps', (['landmarks'], {}), '(landmarks)\n', (8944, 8955), False, 'import json\n')] |
#!/usr/bin/env python3
"""Decrypt the data needed for the next level"""
import base64
import binascii
import hashlib
import json
import numpy
import sys
from Crypto.Cipher import AES
# x^128 + x^7 + x^2 + x + 1
GCM_POLY = (1 << 128) + (1 << 7) + (1 << 2) + (1 << 1) + (1 << 0)
def invert_poly(p):
"""Invert the given polynomial in the GCM field"""
assert p > 0
assert p.bit_length() <= 128
e, f = GCM_POLY, p
l, m = 0, 1
# m, l so that m * p + l * GCM_POLY = 1
while f != 1:
j = f.bit_length() - e.bit_length()
if j < 0:
e, f = f, e
l, m = m, l
j = -j
f ^= e << j
m ^= l << j
return m
def multiply_poly(x, y):
"""Multiply two polynomials in the GCM field"""
assert x.bit_length() <= 128
assert y.bit_length() <= 128
result = 0
for bitpos in range(128):
if y & (1 << bitpos):
result ^= x
x = x << 1
if x.bit_length() > 128:
x ^= GCM_POLY
return result
# Sanity checks
assert multiply_poly(2, 1 << 127) == 0x87
assert invert_poly(1) == 1
assert invert_poly(2) == 0x80000000000000000000000000000043
assert multiply_poly(3, invert_poly(3)) == 1
if len(sys.argv) < 4:
print("Usage: {} path/to/next/data.json path/to/current/data.json path/to/passcode/1 ...".format(sys.argv[0]))
sys.exit(1)
with open(sys.argv[2]) as data_file:
jsondata = json.load(data_file)
ssspoints = []
for passcode_path in sys.argv[3:]:
# Read the code found after solving an enigma
with open(passcode_path, 'r') as pass_file:
passcode = binascii.unhexlify(pass_file.read().strip())
# Decrypt the encrypted shares
hpass = hashlib.sha256(passcode).hexdigest()
assert hpass in jsondata['shares'], "Invalid code!"
iv = binascii.unhexlify(jsondata['shares'][hpass]['iv'])
data = base64.b64decode(jsondata['shares'][hpass]['data'])
key = passcode
decrypted = AES.new(key, AES.MODE_CBC, iv).decrypt(data)
padlen = decrypted[-1]
assert all(x == padlen for x in decrypted[-padlen:])
decrypted = decrypted[:-padlen]
# Load the new points
for point in json.loads(decrypted.decode('ascii')):
# Convert y to a polynom
ssspoints.append((point['x'], int(point['y'], 16)))
# Interpolate the coefficients of "y = x^2 + a * x + b" curve
assert len(ssspoints) >= 2
x1, y1 = ssspoints[0]
x2, y2 = ssspoints[1]
y1 ^= multiply_poly(x1, x1)
y2 ^= multiply_poly(x2, x2)
coef_a = multiply_poly(y1 ^ y2, invert_poly(x1 ^ x2))
coef_b = y1 ^ multiply_poly(coef_a, x1)
for x, y in ssspoints:
assert y == multiply_poly(x, x) ^ multiply_poly(coef_a, x) ^ coef_b
# The key is the value of the curve at x=0
key = binascii.unhexlify(hex(coef_b)[2:])
# Decrypt data of next level
iv = binascii.unhexlify(jsondata['next_level']['iv'])
data = base64.b64decode(jsondata['next_level']['data'])
decrypted = AES.new(key, AES.MODE_CBC, iv).decrypt(data)
padlen = decrypted[-1]
assert all(x == padlen for x in decrypted[-padlen:])
decrypted = decrypted[:-padlen]
with open(sys.argv[1], 'wb') as next_file:
next_file.write(decrypted)
| [
"hashlib.sha256",
"base64.b64decode",
"Crypto.Cipher.AES.new",
"sys.exit",
"json.load",
"binascii.unhexlify"
] | [((2804, 2852), 'binascii.unhexlify', 'binascii.unhexlify', (["jsondata['next_level']['iv']"], {}), "(jsondata['next_level']['iv'])\n", (2822, 2852), False, 'import binascii\n'), ((2860, 2908), 'base64.b64decode', 'base64.b64decode', (["jsondata['next_level']['data']"], {}), "(jsondata['next_level']['data'])\n", (2876, 2908), False, 'import base64\n'), ((1362, 1373), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1370, 1373), False, 'import sys\n'), ((1427, 1447), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (1436, 1447), False, 'import json\n'), ((1812, 1863), 'binascii.unhexlify', 'binascii.unhexlify', (["jsondata['shares'][hpass]['iv']"], {}), "(jsondata['shares'][hpass]['iv'])\n", (1830, 1863), False, 'import binascii\n'), ((1875, 1926), 'base64.b64decode', 'base64.b64decode', (["jsondata['shares'][hpass]['data']"], {}), "(jsondata['shares'][hpass]['data'])\n", (1891, 1926), False, 'import base64\n'), ((2921, 2951), 'Crypto.Cipher.AES.new', 'AES.new', (['key', 'AES.MODE_CBC', 'iv'], {}), '(key, AES.MODE_CBC, iv)\n', (2928, 2951), False, 'from Crypto.Cipher import AES\n'), ((1710, 1734), 'hashlib.sha256', 'hashlib.sha256', (['passcode'], {}), '(passcode)\n', (1724, 1734), False, 'import hashlib\n'), ((1962, 1992), 'Crypto.Cipher.AES.new', 'AES.new', (['key', 'AES.MODE_CBC', 'iv'], {}), '(key, AES.MODE_CBC, iv)\n', (1969, 1992), False, 'from Crypto.Cipher import AES\n')] |
def ano(n=0):
from datetime import date#ao fazer a importação somente durante a execução da função, vc economiza memoria
idade = date.today().year - n
if idade >= 18 and idade <= 70:
return (f'Com {idade} anos o voto é \033[1:31mOBRIGATORIO\033[m')
if idade >= 16 and idade <= 17 or idade >= 65:
return (f'Com {idade} anos o voto é \033[1:31mOPCIONAL\033[m')
if idade <= 15:
return (f'Com {idade} anos \033[1:31mNão\033[m vota')
i = int(input('Em que ano você nasceu? '))
print(ano(i))
| [
"datetime.date.today"
] | [((139, 151), 'datetime.date.today', 'date.today', ([], {}), '()\n', (149, 151), False, 'from datetime import date\n')] |
#!/usr/bin/env python3
import time
from Message.CANMessage import MessageToCan
from Message.EnvironmentStatus import EnvironmentStatus
from Message.MotorStatus import MotorStatus
from Message.Parameters.Current import Current
from Message.Parameters.Humidity import Humidity
from Message.Parameters.Vibration import Vibration
from Message.Parameters.Voltage import Voltage
from Message.Parameters.WaterTemperature import WaterTemperature
from config import Config
from lib.pythoncan import can
from Message.Parameters.RPM import Rpm
tasks = {}
def main():
bus = can.interface.Bus(channel=Config.getChannel(), bustype=Config.getBusType())
print('start_transmit()')
sendMotorStatusMessage()
sendEnvironmentStatusMessage()
while True:
time.sleep(20)
def sendMotorStatusMessage():
msg = MotorStatus(rpm=Rpm(5000), voltage=Voltage(12.5), current=Current(2), vibration=Vibration(0.2))
canmsg = MessageToCan(msg, deviceId=Config.getDeviceId())
tasks['motorStatus'] = can.send_periodic(Config.getChannel(), canmsg, msg.period)
def sendEnvironmentStatusMessage():
msg = EnvironmentStatus(waterTemperature=WaterTemperature(5), humidity=Humidity(0.12))
canmsg = MessageToCan(msg, deviceId=Config.getDeviceId())
tasks['EnvironmentStatus'] = can.send_periodic(Config.getChannel(), canmsg, msg.period)
if __name__ == "__main__":
main() | [
"Message.Parameters.Humidity.Humidity",
"config.Config.getBusType",
"Message.Parameters.Current.Current",
"Message.Parameters.RPM.Rpm",
"Message.Parameters.WaterTemperature.WaterTemperature",
"time.sleep",
"Message.Parameters.Vibration.Vibration",
"config.Config.getChannel",
"Message.Parameters.Volt... | [((765, 779), 'time.sleep', 'time.sleep', (['(20)'], {}), '(20)\n', (775, 779), False, 'import time\n'), ((1025, 1044), 'config.Config.getChannel', 'Config.getChannel', ([], {}), '()\n', (1042, 1044), False, 'from config import Config\n'), ((1307, 1326), 'config.Config.getChannel', 'Config.getChannel', ([], {}), '()\n', (1324, 1326), False, 'from config import Config\n'), ((597, 616), 'config.Config.getChannel', 'Config.getChannel', ([], {}), '()\n', (614, 616), False, 'from config import Config\n'), ((626, 645), 'config.Config.getBusType', 'Config.getBusType', ([], {}), '()\n', (643, 645), False, 'from config import Config\n'), ((838, 847), 'Message.Parameters.RPM.Rpm', 'Rpm', (['(5000)'], {}), '(5000)\n', (841, 847), False, 'from Message.Parameters.RPM import Rpm\n'), ((857, 870), 'Message.Parameters.Voltage.Voltage', 'Voltage', (['(12.5)'], {}), '(12.5)\n', (864, 870), False, 'from Message.Parameters.Voltage import Voltage\n'), ((880, 890), 'Message.Parameters.Current.Current', 'Current', (['(2)'], {}), '(2)\n', (887, 890), False, 'from Message.Parameters.Current import Current\n'), ((902, 916), 'Message.Parameters.Vibration.Vibration', 'Vibration', (['(0.2)'], {}), '(0.2)\n', (911, 916), False, 'from Message.Parameters.Vibration import Vibration\n'), ((958, 978), 'config.Config.getDeviceId', 'Config.getDeviceId', ([], {}), '()\n', (976, 978), False, 'from config import Config\n'), ((1148, 1167), 'Message.Parameters.WaterTemperature.WaterTemperature', 'WaterTemperature', (['(5)'], {}), '(5)\n', (1164, 1167), False, 'from Message.Parameters.WaterTemperature import WaterTemperature\n'), ((1178, 1192), 'Message.Parameters.Humidity.Humidity', 'Humidity', (['(0.12)'], {}), '(0.12)\n', (1186, 1192), False, 'from Message.Parameters.Humidity import Humidity\n'), ((1234, 1254), 'config.Config.getDeviceId', 'Config.getDeviceId', ([], {}), '()\n', (1252, 1254), False, 'from config import Config\n')] |
"""Create a Dash app within a Flask app."""
import os
import json
import dash
import dash_table
# from dash_table.Format import Format, Scheme
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_core_components as dcc
# from .layout import html_layout
from application.utils.es import connect_es
from datetime import datetime, timedelta
import logging
logger = logging.getLogger(__name__)
ES_ENDPOINT = os.environ.get('ES_ENDPOINT', 'localhost')
ES_PORT = int(os.environ.get('ES_PORT', '9200'))
ES_VERIFY_CERTS = eval(os.environ.get('ES_VERIFY_CERTS', 'False'))
APP_NAME = 'tcpdump'
APP_CONFIG = json.loads(os.environ['APP_CONFIG'])[APP_NAME]
def add_dash(server):
"""Create a Dash app."""
external_stylesheets = [
'/dash-apps/static/dist/css/style.css',
'https://fonts.googleapis.com/css?family=Lato',
]
dash_app = dash.Dash(server=server,
external_stylesheets=external_stylesheets,
routes_pathname_prefix=f'/dash-apps/{APP_NAME}/')
# Override the underlying HTML template
# dash_app.index_string = html_layout
# Create Dash Layout comprised of Data Tables
dash_app.layout = html.Div([
dcc.Interval(
id='graph-update',
interval=APP_CONFIG.get('refresh_interval', 6000)
),
html.Div(
children=dash_table.DataTable(
id='stats_table',
columns=[
{"name": 'source_ip', "id": 'source_ip'},
{"name": 'source_port', "id": 'source_port'},
{"name": 'destination_ip', "id": 'destination_ip'},
{"name": 'destination_port', "id": 'destination_port'},
{"name": 'protocol', "id": 'protocol'},
{"name": 'count', "id": 'count'},
{"name": 'total_size', "id": 'total_size'},
],
)
)
])
# Initialize callbacks after our app is loaded
# Pass dash_app as a parameter
init_callbacks(dash_app)
return dash_app.server
def get_network_traffic(es, start_dt, end_dt):
"""
Query ES to get network traffic. You may use native DSL.
:param es: Elasticsearch client
:type es: elasticsearch.client.Elasticsearch
:param start_dt:
:type start_dt: datetime.datetime
:param end_dt:
:type end_dt: datetime.datetime
:return: query response
"""
base_query = """
SELECT
source_ip.keyword
,source_port.keyword
,destination_ip.keyword
,destination_port.keyword
,protocol.keyword
,count(*) count
,sum(size) total_size
FROM tcpdump-*
WHERE ts between '{start_dt}' and '{end_dt}'
GROUP BY 1, 2, 3, 4, 5
LIMIT 20
"""
query = base_query.format(start_dt=start_dt.strftime('%Y-%m-%dT%H:%M:%S'),
end_dt=end_dt.strftime('%Y-%m-%dT%H:%M:%S'), )
# Get data from ES
return es.transport.perform_request(
method='POST',
url='/_opendistro/_sql',
body={'query': query}
)
def init_callbacks(dash_app):
# Create an ES connection
es = connect_es(ES_ENDPOINT, port=ES_PORT, verify_certs=ES_VERIFY_CERTS)
@dash_app.callback(
Output('stats_table', 'data'),
[Input('graph-update', 'n_intervals')]
)
def update_stats_table(n):
"""
Define the callback function here
"""
nonlocal es
window = timedelta(hours=APP_CONFIG.get('window', 1))
end_dt = datetime.utcnow()
start_dt = end_dt - window
response = get_network_traffic(es, start_dt, end_dt)
# Parse the response
data = []
if response.get('datarows'):
for row in response['datarows']:
data.append({
'source_ip': row[0],
'source_port': row[1],
'destination_ip': row[2],
'destination_port': row[3],
'protocol': row[4],
'count': row[5],
'total_size': row[6],
'last_update': str(end_dt)
})
data = sorted(data, key=lambda i: i['count'], reverse=True)[:20]
return data
| [
"logging.getLogger",
"json.loads",
"datetime.datetime.utcnow",
"dash.dependencies.Output",
"os.environ.get",
"dash.dependencies.Input",
"application.utils.es.connect_es",
"dash_table.DataTable",
"dash.Dash"
] | [((402, 429), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (419, 429), False, 'import logging\n'), ((445, 487), 'os.environ.get', 'os.environ.get', (['"""ES_ENDPOINT"""', '"""localhost"""'], {}), "('ES_ENDPOINT', 'localhost')\n", (459, 487), False, 'import os\n'), ((502, 535), 'os.environ.get', 'os.environ.get', (['"""ES_PORT"""', '"""9200"""'], {}), "('ES_PORT', '9200')\n", (516, 535), False, 'import os\n'), ((560, 602), 'os.environ.get', 'os.environ.get', (['"""ES_VERIFY_CERTS"""', '"""False"""'], {}), "('ES_VERIFY_CERTS', 'False')\n", (574, 602), False, 'import os\n'), ((638, 674), 'json.loads', 'json.loads', (["os.environ['APP_CONFIG']"], {}), "(os.environ['APP_CONFIG'])\n", (648, 674), False, 'import json\n'), ((892, 1013), 'dash.Dash', 'dash.Dash', ([], {'server': 'server', 'external_stylesheets': 'external_stylesheets', 'routes_pathname_prefix': 'f"""/dash-apps/{APP_NAME}/"""'}), "(server=server, external_stylesheets=external_stylesheets,\n routes_pathname_prefix=f'/dash-apps/{APP_NAME}/')\n", (901, 1013), False, 'import dash\n'), ((3266, 3333), 'application.utils.es.connect_es', 'connect_es', (['ES_ENDPOINT'], {'port': 'ES_PORT', 'verify_certs': 'ES_VERIFY_CERTS'}), '(ES_ENDPOINT, port=ES_PORT, verify_certs=ES_VERIFY_CERTS)\n', (3276, 3333), False, 'from application.utils.es import connect_es\n'), ((3648, 3665), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (3663, 3665), False, 'from datetime import datetime, timedelta\n'), ((3367, 3396), 'dash.dependencies.Output', 'Output', (['"""stats_table"""', '"""data"""'], {}), "('stats_table', 'data')\n", (3373, 3396), False, 'from dash.dependencies import Input, Output\n'), ((3407, 3443), 'dash.dependencies.Input', 'Input', (['"""graph-update"""', '"""n_intervals"""'], {}), "('graph-update', 'n_intervals')\n", (3412, 3443), False, 'from dash.dependencies import Input, Output\n'), ((1396, 1779), 'dash_table.DataTable', 'dash_table.DataTable', ([], {'id': '"""stats_table"""', 'columns': "[{'name': 'source_ip', 'id': 'source_ip'}, {'name': 'source_port', 'id':\n 'source_port'}, {'name': 'destination_ip', 'id': 'destination_ip'}, {\n 'name': 'destination_port', 'id': 'destination_port'}, {'name':\n 'protocol', 'id': 'protocol'}, {'name': 'count', 'id': 'count'}, {\n 'name': 'total_size', 'id': 'total_size'}]"}), "(id='stats_table', columns=[{'name': 'source_ip', 'id':\n 'source_ip'}, {'name': 'source_port', 'id': 'source_port'}, {'name':\n 'destination_ip', 'id': 'destination_ip'}, {'name': 'destination_port',\n 'id': 'destination_port'}, {'name': 'protocol', 'id': 'protocol'}, {\n 'name': 'count', 'id': 'count'}, {'name': 'total_size', 'id':\n 'total_size'}])\n", (1416, 1779), False, 'import dash_table\n')] |
"""Test SSD model."""
import pytest
import torch
from pytorch_ssd.modeling.model import SSD
def test_model_setup(ssd_params):
"""Test model building."""
model = SSD(**ssd_params)
assert model
def test_model_forward(ssd_params):
"""Test model forward method."""
model = SSD(**ssd_params)
data = torch.rand((1, 3, 300, 300))
detections = model(data)
assert detections
@pytest.mark.parametrize("n_predictions", [0, 1, 2])
def test_model_output_processing(n_predictions, ssd_params):
"""Test processing model output for using it."""
model = SSD(**ssd_params)
cls_logits = torch.rand(n_predictions, 2, 3)
bbox_pred = torch.rand(n_predictions, 2, 4)
processed = list(
model.process_model_output(
detections=(cls_logits, bbox_pred), confidence_threshold=0.1
)
)
assert 0 <= len(processed) <= n_predictions
| [
"pytorch_ssd.modeling.model.SSD",
"pytest.mark.parametrize",
"torch.rand"
] | [((406, 457), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""n_predictions"""', '[0, 1, 2]'], {}), "('n_predictions', [0, 1, 2])\n", (429, 457), False, 'import pytest\n'), ((172, 189), 'pytorch_ssd.modeling.model.SSD', 'SSD', ([], {}), '(**ssd_params)\n', (175, 189), False, 'from pytorch_ssd.modeling.model import SSD\n'), ((294, 311), 'pytorch_ssd.modeling.model.SSD', 'SSD', ([], {}), '(**ssd_params)\n', (297, 311), False, 'from pytorch_ssd.modeling.model import SSD\n'), ((323, 351), 'torch.rand', 'torch.rand', (['(1, 3, 300, 300)'], {}), '((1, 3, 300, 300))\n', (333, 351), False, 'import torch\n'), ((584, 601), 'pytorch_ssd.modeling.model.SSD', 'SSD', ([], {}), '(**ssd_params)\n', (587, 601), False, 'from pytorch_ssd.modeling.model import SSD\n'), ((619, 650), 'torch.rand', 'torch.rand', (['n_predictions', '(2)', '(3)'], {}), '(n_predictions, 2, 3)\n', (629, 650), False, 'import torch\n'), ((667, 698), 'torch.rand', 'torch.rand', (['n_predictions', '(2)', '(4)'], {}), '(n_predictions, 2, 4)\n', (677, 698), False, 'import torch\n')] |
import pytest
from django.urls import reverse
from django.test import override_settings
from django.contrib.auth import authenticate
@pytest.mark.django_db
def test_login(api_client, django_user_model):
"""Tests the ability to login."""
username = "user"
password = "<PASSWORD>"
user = django_user_model.objects.create_user(username=username, password=password)
response = api_client.post(
reverse("api:accounts-login"), {"username": username, "password": password}
)
# User should be redirected to the profile
assert response.status_code == 302
assert response.url == reverse("api:accounts-detail", kwargs={"pk": user.pk})
# Try wrong password
response = api_client.post(
reverse("api:accounts-login"),
{"username": username, "password": password + "_"},
)
assert response.status_code == 400
# Disable user
user.is_active = False
user.save()
response = api_client.post(
reverse("api:accounts-login"), {"username": username, "password": password}
)
assert response.status_code == 400
@pytest.mark.django_db
def test_account_permissions(api_client, user, admin_user):
"""Tests permission on profiles"""
assert (
api_client.get(
reverse("api:accounts-detail", kwargs={"pk": user.pk})
).status_code
== 403
)
# Users should only be able to see themselves
api_client.force_login(user)
assert (
api_client.get(
reverse("api:accounts-detail", kwargs={"pk": user.pk})
).status_code
== 200
)
assert (
api_client.get(
reverse("api:accounts-detail", kwargs={"pk": admin_user.pk})
).status_code
== 403
)
# Admins should be able to see all users
api_client.force_login(admin_user)
assert (
api_client.get(
reverse("api:accounts-detail", kwargs={"pk": user.pk})
).status_code
== 200
)
@pytest.mark.django_db
def test_logout(api_client, user):
"""Tests if users are logged out correctly."""
api_client.force_login(user)
assert "_auth_user_id" in api_client.session.keys()
response = api_client.post(reverse("api:accounts-logout"))
assert response.status_code == 200
assert not "_auth_user_id" in api_client.session.keys()
@pytest.mark.django_db
def test_create_new_account(api_client, mailoutbox, django_user_model):
"""Tests creation of a new account."""
response = api_client.post(
reverse("api:accounts-signup"),
{
"password1": "password",
"password2": "password",
"username": "user",
"email": "<EMAIL>",
},
)
user = django_user_model.objects.get(username="user")
# Check email and extract link
assert response.status_code == 200
assert not user.is_active
assert len(mailoutbox) == 1
assert list(mailoutbox[0].to) == ["<EMAIL>"]
url = mailoutbox[0].body.splitlines()[7]
# Activate account
response = api_client.get(url)
# Do query again, user changeqd
user = django_user_model.objects.get(username="user")
# Redirect to home page
assert response.status_code == 302
assert user.is_active
# Link should be invalid by now
response = api_client.get(url)
assert response.status_code == 400
def test_invalid_account_links(api_client):
"""Tests if account activation can handle broken links."""
# Run without token and uid parameters
response = api_client.get(reverse("api:accounts-signup-confirm"))
assert response.status_code == 400
# Run with invalid token and uid
response = api_client.get(
reverse("api:accounts-signup-confirm") + "?token=bs&uid=r23"
)
assert response.status_code == 400
@override_settings(SIGNUP_TIMEOUT_DAYS=-1)
def test_expired_account_link(api_client, django_user_model, mailoutbox):
"""Tests if account activation renews activation link after the given timeout."""
response = api_client.post(
reverse("api:accounts-signup"),
{
"password1": "password",
"password2": "password",
"username": "user",
"email": "<EMAIL>",
},
)
url = mailoutbox[0].body.splitlines()[7]
response = api_client.get(url)
assert response.status_code == 400
# Link should be sent again in an email after clicking on the expired link.
assert len(mailoutbox) == 2
@pytest.mark.django_db
def test_password_reset(api_client, user, mailoutbox):
"""Tests resetting of a user password."""
user.email = "<EMAIL>"
user.save()
response = api_client.post(reverse("api:accounts-reset"), {"email": user.email})
assert response.status_code == 200
assert len(mailoutbox) == 1
uid, token = mailoutbox[0].body.splitlines()[7].split("/")[-2:]
password = "<PASSWORD>"
response = api_client.post(
reverse("api:accounts-reset-confirm"),
{
"token": token,
"uid": uid,
"new_password1": password,
"new_password2": password,
},
)
assert response.status_code == 200
assert authenticate(username=user.username, password=password) is not None
| [
"django.test.override_settings",
"django.contrib.auth.authenticate",
"django.urls.reverse"
] | [((3831, 3872), 'django.test.override_settings', 'override_settings', ([], {'SIGNUP_TIMEOUT_DAYS': '(-1)'}), '(SIGNUP_TIMEOUT_DAYS=-1)\n', (3848, 3872), False, 'from django.test import override_settings\n'), ((423, 452), 'django.urls.reverse', 'reverse', (['"""api:accounts-login"""'], {}), "('api:accounts-login')\n", (430, 452), False, 'from django.urls import reverse\n'), ((619, 673), 'django.urls.reverse', 'reverse', (['"""api:accounts-detail"""'], {'kwargs': "{'pk': user.pk}"}), "('api:accounts-detail', kwargs={'pk': user.pk})\n", (626, 673), False, 'from django.urls import reverse\n'), ((740, 769), 'django.urls.reverse', 'reverse', (['"""api:accounts-login"""'], {}), "('api:accounts-login')\n", (747, 769), False, 'from django.urls import reverse\n'), ((979, 1008), 'django.urls.reverse', 'reverse', (['"""api:accounts-login"""'], {}), "('api:accounts-login')\n", (986, 1008), False, 'from django.urls import reverse\n'), ((2222, 2252), 'django.urls.reverse', 'reverse', (['"""api:accounts-logout"""'], {}), "('api:accounts-logout')\n", (2229, 2252), False, 'from django.urls import reverse\n'), ((2535, 2565), 'django.urls.reverse', 'reverse', (['"""api:accounts-signup"""'], {}), "('api:accounts-signup')\n", (2542, 2565), False, 'from django.urls import reverse\n'), ((3566, 3604), 'django.urls.reverse', 'reverse', (['"""api:accounts-signup-confirm"""'], {}), "('api:accounts-signup-confirm')\n", (3573, 3604), False, 'from django.urls import reverse\n'), ((4074, 4104), 'django.urls.reverse', 'reverse', (['"""api:accounts-signup"""'], {}), "('api:accounts-signup')\n", (4081, 4104), False, 'from django.urls import reverse\n'), ((4706, 4735), 'django.urls.reverse', 'reverse', (['"""api:accounts-reset"""'], {}), "('api:accounts-reset')\n", (4713, 4735), False, 'from django.urls import reverse\n'), ((4970, 5007), 'django.urls.reverse', 'reverse', (['"""api:accounts-reset-confirm"""'], {}), "('api:accounts-reset-confirm')\n", (4977, 5007), False, 'from django.urls import reverse\n'), ((5217, 5272), 'django.contrib.auth.authenticate', 'authenticate', ([], {'username': 'user.username', 'password': 'password'}), '(username=user.username, password=password)\n', (5229, 5272), False, 'from django.contrib.auth import authenticate\n'), ((3722, 3760), 'django.urls.reverse', 'reverse', (['"""api:accounts-signup-confirm"""'], {}), "('api:accounts-signup-confirm')\n", (3729, 3760), False, 'from django.urls import reverse\n'), ((1274, 1328), 'django.urls.reverse', 'reverse', (['"""api:accounts-detail"""'], {'kwargs': "{'pk': user.pk}"}), "('api:accounts-detail', kwargs={'pk': user.pk})\n", (1281, 1328), False, 'from django.urls import reverse\n'), ((1505, 1559), 'django.urls.reverse', 'reverse', (['"""api:accounts-detail"""'], {'kwargs': "{'pk': user.pk}"}), "('api:accounts-detail', kwargs={'pk': user.pk})\n", (1512, 1559), False, 'from django.urls import reverse\n'), ((1652, 1712), 'django.urls.reverse', 'reverse', (['"""api:accounts-detail"""'], {'kwargs': "{'pk': admin_user.pk}"}), "('api:accounts-detail', kwargs={'pk': admin_user.pk})\n", (1659, 1712), False, 'from django.urls import reverse\n'), ((1890, 1944), 'django.urls.reverse', 'reverse', (['"""api:accounts-detail"""'], {'kwargs': "{'pk': user.pk}"}), "('api:accounts-detail', kwargs={'pk': user.pk})\n", (1897, 1944), False, 'from django.urls import reverse\n')] |
from rest_framework import viewsets, permissions
from messier_objects import serializers, models
class MessierViewSet(viewsets.ModelViewSet):
"""Messier Object API View"""
serializer_class = serializers.MessierSerializer
queryset = models.MessierObject.objects.all()
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
)
| [
"messier_objects.models.MessierObject.objects.all"
] | [((247, 281), 'messier_objects.models.MessierObject.objects.all', 'models.MessierObject.objects.all', ([], {}), '()\n', (279, 281), False, 'from messier_objects import serializers, models\n')] |
"""Collection of loss and metric functions and classes.
A good overview and collection can be found e.g. here:
https://lars76.github.io/neural-networks/object-detection/losses-for-segmentation/
*(the contained code samples are quite instructive but in tensorflow, thus not
used here)*.
"""
# Copyright (c) 2020 Continental Automotive GmbH
import abc
import enum
from typing import Tuple, Callable, Union, Dict, Sequence, Any
import torch
# When overriding the forward method, the parameters should get more specific:
# pylint: disable=arguments-differ
def _settings_to_repr(obj, settings: Dict) -> str:
"""Given an object and a dict of its settings, return a representation str.
The object is just used to derive the class name."""
return "{}({})".format(str(obj.__class__.__name__),
', '.join(['='.join([str(k), str(v)])
for k, v in settings.items()]))
class BatchReduction(enum.Enum):
"""Aggregation types to reduce the 0th (meaning the batch) dimension of a
tensor. The values are tuples of description and function."""
mean = ("Reduce by mean", lambda t: t.mean())
sum = ("Reduce by sum", lambda t: t.sum())
def __call__(self, batch_tensor: torch.Tensor) -> torch.Tensor:
"""Reduce the given tensor according to the chosen aggregation method.
"""
# pylint: disable=unsubscriptable-object
return self.value[1](batch_tensor)
# pylint: enable=unsubscriptable-object
class WeightedLossSum(torch.nn.Module): # TODO: tests
"""Weighted sum of loss results."""
def __init__(self,
losses: Sequence[Union[torch.nn.Module, Callable]],
weights: Sequence[float] = None):
"""Init.
:param losses: list of losses to sum; all losses must return the same
output format for sum
:param weights: list of weights for the losses
"""
# Value checks:
if len(losses) == 0:
raise ValueError("Empty loss list")
if weights is not None and len(weights) != len(losses):
raise ValueError(("Lengths of losses ({}) and weights ({}) do not "
"coincide").format(len(losses), len(weights)))
super(WeightedLossSum, self).__init__()
self.losses: Sequence[Union[torch.nn.Module, Callable]] = losses
"""The losses the results of which to sum up."""
self.weights: Sequence[float] = \
weights or [1 / len(self.losses)] * len(self.losses)
"""Weights for each loss. Defaults to equal weights summing up to 1."""
def __repr__(self) -> str:
return (str(self.__class__.__name__) + '(' +
'losses=[' + ', '.join(
[repr(loss_fn) for loss_fn in self.losses]) + '], ' +
'weights=' + str(self.weights))
def forward(self, *inp: Any) -> Any:
"""Forward method: Weighted sum of the loss values.
All losses from :py:attr:`losses` are considered."""
# pylint: disable=no-member
return torch.stack([w * l(*inp)
for w, l in zip(self.weights, self.losses)]
).sum(dim=0)
# pylint: enable=no-member
class BalancedBCELoss(torch.nn.Module):
r"""Balanced binary cross entropy loss.
This is a wrapper around torch.nn.functional.binary_cross_entropy which
allows to enter a class weighting factor :math:`b` to have for a batch
:math:`B` of outputs and targets :math:`(x, y)` the formula
.. math::
\text{BalancedBCELoss}(B) = \text{reduction}(
\sum_{(x,y)\in B} b \cdot y \cdot \log(x) + (1-b)(1-y)\log(1-x)
)
The reduction can be ``mean``, ``sum``, or ``none``.
"""
def __init__(self, factor_pos_class: float, reduction: str = 'mean'):
"""Init.
:param factor_pos_class: balancing factor in [0,1] applied to the
zero class; one of
- ``none``: no reduction
- ``mean``: mean over batch dimension 0;
- ``sum``: sum over batch dimension 0
"""
if not factor_pos_class >= 0 and factor_pos_class <= 1:
raise ValueError("factor_pos_class must be in [0,1], but was {}"
.format(factor_pos_class))
super(BalancedBCELoss, self).__init__()
self.factor_pos_class = factor_pos_class
"""Balancing factor b applied to the zero class;
(1-b) is applied to the positive class."""
self.reduction: str = reduction
"""Reduction method to aggregate batch results.
One of 'none', 'mean', 'sum'"""
def forward(self, *inps_targets: torch.Tensor) -> torch.Tensor:
"""Pytorch forward method."""
if len(inps_targets) != 2:
raise ValueError("Wrong number of arguments: Got {}, expected 2"
.format(len(inps_targets)))
inputs: torch.Tensor = inps_targets[0]
targets: torch.Tensor = inps_targets[1]
weight = (self.factor_pos_class * (targets > 0).int()) + \
((1 - self.factor_pos_class) * (targets == 0).int())
bce = torch.nn.functional.binary_cross_entropy(inputs, targets,
weight=weight,
reduction=self.reduction)
return bce
@property
def settings(self) -> Dict[str, Any]:
"""Settings dict to reproduce the instance"""
return dict(factor_pos_class=self.factor_pos_class,
reduction=self.reduction)
def __repr__(self) -> str:
return _settings_to_repr(self, self.settings)
def __str__(self) -> str:
return repr(self)
class MaskRCNNLoss(torch.nn.Module):
"""Loss and associated data for a standard Mask R-CNN model."""
# noinspection PyMethodMayBeStatic
def forward(self,
*args: Dict[str, torch.Tensor]
): # pylint: disable=no-self-use
"""Calculate loss from output of the standard pytorch Mask R-CNN model
in train mode.
The model is assumed to provide an output format as is expected
in the original
`pytorch source code <https://github.com/pytorch/vision/blob/master/references/detection/engine.py>`_
which is a dict of loss values for each optimization aspect.
:param args: first argument is the output of pytorch Mask R-CNN
"""
loss_dict = args[0]
return sum(loss for loss in loss_dict.values())
class AbstractIoULike(torch.nn.Module, metaclass=abc.ABCMeta):
"""General functions for intersection over union like calculation on
binarized in- and output.
See sub-classes for details."""
@staticmethod
def _validate_dimensions(labels: torch.Tensor,
outputs: torch.Tensor) -> None:
"""Validate whether the dimensions of labels and outputs are correct.
Raise if not. Criteria: must have
- same sizes
- be at least 1D
"""
tensor_dimensionality: int = len(outputs.size())
if len(outputs.size()) != len(labels.size()):
raise ValueError(("Outputs ({}) and labels ({}) have different "
"sizes!").format(outputs.size(), labels.size()))
if not tensor_dimensionality >= 1:
raise ValueError(("Output dimension ({}) too small; must be at "
"least 1D").format(outputs.size()))
@staticmethod
def get_area_axes(outputs: torch.Tensor
) -> Union[Tuple[int, int], Tuple[int]]:
"""Get axe[s] that describe width [and height], meaning 2D or 1D areas
to test for IoU.
:If >=2D:
the last 2 axes (2D areas)
:If 1D:
the last axis (1D area)
:return: tuple with the indices of the area axes
"""
tensor_dimensionality: int = len(outputs.size())
w_axis = tensor_dimensionality - 1
area_axes: Tuple[int] = (w_axis, w_axis - 1) \
if tensor_dimensionality > 1 else (w_axis,)
return area_axes
@staticmethod
def binarize(tensor: torch.Tensor, thresh: float) -> torch.Tensor:
"""Binarize a tensor to an int tensor according to threshold.
:return: binarized tensor with entries 1 if > threshold,
and 0 if < threshold.
"""
return (tensor > thresh).int()
def __repr__(self):
return _settings_to_repr(self, self.settings)
def __str__(self):
return repr(self)
@abc.abstractmethod
def forward(self, *inp: Any, **kwargs: Any) -> Any:
"""Loss or metric function definition in sub-classes."""
raise NotImplementedError()
class AbstractIoUMetric(AbstractIoULike):
"""Common properties of IoU calculation."""
def __init__(self, output_thresh: float = 0.5, label_thresh: float = 0.,
smooth: float = 1e-6):
"""Init.
:param output_thresh: threshold for binarizing the output
:param label_thresh: threshold for binarizing the labels
:param smooth: summand to smooth the IoU value (evade division by 0)
"""
super(AbstractIoUMetric, self).__init__()
self.output_thresh: float = output_thresh
"""Threshold for binarizing the output; 1 if > output, 0 else."""
self.label_thresh: float = label_thresh
"""Threshold for binarizing the labels; 1 if > output, 0 else."""
self.smooth: float = smooth
r"""Smoothening summand to avoid division by zero.
Division :math:`\frac{a}{b}` is changed to
:math:`\frac{a + \text{smooth}}{b + \text{smooth}}`."""
@abc.abstractmethod
def forward(self, *inp: Any, **kwargs: Any) -> Any:
"""Metric function definition in sub-classes."""
raise NotImplementedError()
@property
def settings(self):
"""Dictionary with settings to reproduce instance."""
return dict(output_thresh=self.output_thresh,
label_thresh=self.label_thresh,
smooth=self.smooth)
def smooth_division(self, dividend, divisor):
"""Smoothed division using smoothening summand to avoid division by 0.
:return: result of smooth division."""
return (dividend + self.smooth) / (divisor + self.smooth)
class SetIoU(AbstractIoUMetric):
r"""Calc set intersection over union (IoU) value for a batch of outputs.
The set intersection over union is a special reduction of the
intersection over union for batch tensors. Given a batch :math:`B` it
calculates as
.. math::
\frac{\sum_B intersection} {\sum_B union}
= \frac{\sum_B TP} {\sum_B TP + TN + FP + FN}
with
- FP / TP: false / true positives,
i.e. in- / correctly predicted foreground pixels
- FN / TN: false / true positives,
i.e. in- / correctly predicted background pixels
The following tensor dimensions are allowed:
- 1D: The tensor is assumed to be 1D without batch dimension.
- 2D: The tensor is assumed to be 2D without batch dimension.
- >2D: The tensor is assumed to be 2D with batch dimension 0,
width dim. -1, height dim. -2.
"""
def __init__(self, output_thresh: float = 0.5, label_thresh: float = 0.,
smooth: float = 1e-6):
"""Init.
:param output_thresh: threshold for binarizing the output
:param label_thresh: threshold for binarizing the labels
:param smooth: summand to smooth the IoU value (evade division by 0)
"""
super(SetIoU, self).__init__(output_thresh=output_thresh,
label_thresh=label_thresh,
smooth=smooth)
def forward(self, outputs: torch.Tensor,
labels: torch.Tensor) -> torch.Tensor:
"""Smooth set intersection over union between binarized in- and output.
:param outputs: Output tensors of shape ``(BATCH x 1 x H x W)``
:param labels: Label tensors of shape ``(BATCH x H x W)``
"""
# Validate, binarize and turn into integer tensors:
self._validate_dimensions(labels, outputs)
labels = self.binarize(labels, self.label_thresh)
outputs = self.binarize(outputs, self.output_thresh)
# Calculate and sum each intersections and unions
# zero if Truth=0 or Prediction=0
intersection_sum = (outputs & labels).float().sum()
union_sum = (outputs | labels).float().sum() # zero if both are 0
# Smoothed division to avoid division by 0:
set_iou_tensor = self.smooth_division(intersection_sum, union_sum)
return set_iou_tensor
class IoU(AbstractIoUMetric):
r"""Calc sample-wise intersection over union (IoU) values output batch.
The intersection over union for one instance calculates as
.. math::
\frac{intersection}{union} = \frac{TP} {(TP + TN + FP + FN)}
with
- FP / TP: false / true positives,
i.e. in- / correctly predicted foreground pixels
- FN / TN: false / true positives,
i.e. in- / correctly predicted background pixels
The following tensor dimensions are allowed:
- 1D: The tensor is assumed to be 1D without batch dimension.
- 2D: The tensor is assumed to be 2D without batch dimension.
- >2D: The tensor is assumed to be 2D with batch dimension 0,
width dim. -1, height dim. -2.
"""
def __init__(
self,
reduction: Union[
BatchReduction, Callable[[torch.Tensor], torch.Tensor]
] = BatchReduction.mean,
output_thresh: float = 0.5, label_thresh: float = 0.,
smooth: float = 1e-6):
"""Init.
:param reduction: reduction method to aggregate the instance-wise
results of the batch;
must be a callable on a tensor which reduces the 0th dimension;
see BatchReduction instances for examples
:param output_thresh: threshold for binarizing the output
:param label_thresh: threshold for binarizing the labels
:param smooth: summand to smooth the IoU value (evade division by 0)
"""
super(IoU, self).__init__(output_thresh=output_thresh,
label_thresh=label_thresh,
smooth=smooth)
self.reduction: Union[
BatchReduction,
Callable[[torch.Tensor], torch.Tensor]
] = reduction
"""Reduction method to aggregate the instance-wise results of the
batch into one value."""
def forward(self, outputs: torch.Tensor,
labels: torch.Tensor) -> torch.Tensor:
"""Sample-wise reduced IoU between binarized in- and output.
Applied reduction is :py:attr:`reduction`.
:param outputs: Output tensors of shape ``(BATCH x H x W)``;
values must be in [0, 1], and a pixel value > output_thresh means
it is foreground
:param labels: Label tensors of shape ``(BATCH x H x W)``;
values must be in [0, 1], and a pixel value > label_thresh means
it is foreground
:return: tensor containing IoU for each sample along axis 0 reduced
by reduction scheme
"""
# Validate, binarize and turn into integer tensors:
self._validate_dimensions(labels, outputs)
labels = self.binarize(labels, self.label_thresh)
outputs = self.binarize(outputs, self.output_thresh)
# Get axes that describe width (and height), i.e. 2D or 1D areas to
# test on IoU
area_axes = self.get_area_axes(outputs)
# Calculate IoU per sample
# intersections for each area:
intersections = (outputs & labels).float().sum(area_axes)
# unions for each area:
unions = (outputs | labels).float().sum(area_axes)
# smoothed set IoU for each area:
ious = self.smooth_division(intersections, unions)
return self.reduction(ious)
@property
def settings(self):
"""Settings dict for reproduction of instance."""
return dict(**super(IoU, self).settings, reduction=self.reduction)
class AbstractIoULoss(AbstractIoULike):
"""Shared settings for intersection over union based losses.
The difference to IoU based metrics is that only the targets are binarized,
not the outputs.
Thus, the function on the DNN outputs stays smoothly differentiable.
"""
def __init__(self,
reduction: Union[
BatchReduction, Callable[[torch.Tensor], torch.Tensor]
] = BatchReduction.mean,
target_thresh: float = 0.):
"""Init.
:param target_thresh: threshold to binarize targets
:param reduction: reduction method to aggregate the instance-wise
results of the batch;
must be a callable on a tensor which reduces the 0th dimension;
for examples see BatchReduction instances
"""
super(AbstractIoULoss, self).__init__()
self.target_thresh: float = target_thresh
"""Threshold to binarize the targets."""
self.reduction: Union[BatchReduction,
Callable[[torch.Tensor], torch.Tensor]] = \
reduction
"""Reduction method to aggregate the instance-wise results of the batch.
"""
@property
def settings(self) -> Dict[str, Any]:
"""Settings dict to reproduce instance."""
return dict(target_thresh=self.target_thresh, reduction=self.reduction)
@abc.abstractmethod
def forward(self, *inp: Any, **kwargs: Any) -> Any:
"""Loss function definition in sub-classes."""
raise NotImplementedError()
class TverskyLoss(AbstractIoULoss): # TODO: tests
# noinspection SpellCheckingInspection
r"""Calc Tversky loss (balanced Dice loss) for given outputs amd targets.
The Tversky loss [Salehi2017]_ works on masks of prediction and ground
truth (gt) indicating the foreground (fg) area.
The masks may be binary, non-binary or mixed.
The target masks are binarized.
Given a balancing factor b, the loss is calculated for one instance as
.. math::
:label: tversky
\text{Tversky} = \frac{TP} {(TP + b\cdot FP + (1-b) \cdot FN)}
with
- TP: true positives,
respectively the intersection of predicted fg area and gt fg area
- FP: false positives,
respectively the predicted fg area minus the gt fg area
For b=0.5 this is regular Dice loss.
The following tensor dimensions are allowed:
- 1D: The tensor is assumed to be 1D without batch dimension.
- 2D: The tensor is assumed to be 2D without batch dimension.
- >2D: The tensor is assumed to be 2D with batch dimension 0,
width dim. -1, height dim. -2.
.. [Salehi2017] <NAME>, <NAME>, and <NAME>.
Tversky loss function for image segmentation using 3D fully
convolutional deep networks, 2017.
https://arxiv.org/abs/1706.05721
"""
def __init__(self,
factor_false_positives: float = 0.7,
reduction: Union[
BatchReduction, Callable[[torch.Tensor], torch.Tensor]
] = BatchReduction.mean,
target_thresh: float = 0.):
"""Init.
:param target_thresh: threshold to binarize targets
:param factor_false_positives: factor in [0,1] applied to the false
positives (see Tversky loss formula :math:numref:`tversky`)
:param reduction: reduction method to aggregate the instance-wise
results of the batch;
must be a callable on a tensor which reduces the 0th dimension;
for examples see instances of
:py:class:`~hybrid_learning.concepts.kpis.BatchReduction`.
"""
# Value check:
if not 0 <= factor_false_positives <= 1:
raise ValueError(("factor_false_positives must be in [0,1] but "
"was {}").format(factor_false_positives))
super(TverskyLoss, self).__init__(target_thresh=target_thresh,
reduction=reduction)
self.factor_false_positives: float = factor_false_positives
"""Factor applied to the false positives"""
@property
def settings(self) -> Dict[str, Any]:
"""Settings to reproduce the instance."""
return dict(factor_false_positives=self.factor_false_positives,
**super(TverskyLoss, self).settings)
def forward(self, outputs: torch.Tensor,
targets: torch.Tensor) -> torch.Tensor:
"""Tversky loss :math:numref:`tversky` calculation.
:param outputs: input tensor (at least 1D); items must be floats
in the range [0,1]
:param targets: targets to compare outputs with (at least 1D;
same dimension as input)
:return: aggregated Tversky loss :math:numref:`tversky` of outputs
for given targets
"""
# Validate dimensions and binarize targets:
self._validate_dimensions(outputs, targets)
targets: torch.Tensor = self.binarize(targets, self.target_thresh)
# Get axes to work on (i.e. 2D or 1D areas to test on IoU)
area_axes: Tuple[int] = self.get_area_axes(outputs)
# Calculate Tversky loss
factor_false_negatives = 1.0 - self.factor_false_positives
true_pos = (targets * outputs).sum(area_axes)
false_pos = (- (targets - 1) * outputs).sum(area_axes)
false_neg = (- targets * (outputs - 1)).sum(area_axes)
tversky = (true_pos / (true_pos +
self.factor_false_positives * false_pos +
factor_false_negatives * false_neg))
loss = - tversky + 1
# reduction
loss = self.reduction(loss)
return loss
class Net2VecLoss(AbstractIoULoss): # TODO: tests
# noinspection SpellCheckingInspection,SpellCheckingInspection
r"""Simplified intersection over union as loss.
This loss is the one used for the
`original implementation <https://github.com/ruthcfong/net2vec>`_ of the
Net2Vec framework [Fong2018]_
*(even though this is a rewrite and no code is used from there)*.
It works on masks of prediction and ground truth (gt) indicating the
foreground (fg) area.
The masks may be binary, non-binary or mixed.
The target masks are binarized.
Given For an instance, it calculates as
.. math::
:label: net2vec
\text{Net2Vec}(instance) = b \cdot TP + (1-b) \cdot TN
with
- TP: true positives, resp. the intersection of predicted fg area and
gt fg area
- TN: true negatives, resp. the intersection of predicted background (bg)
area and gt bg area
The following tensor dimensions are allowed:
- 1D: The tensor is assumed to be 1D without batch dimension.
- 2D: The tensor is assumed to be 2D without batch dimension.
- >2D: The tensor is assumed to be 2D with batch dimension 0,
width dim. -1, height dim. -2.
.. [Fong2018] <NAME> and <NAME>, “Net2Vec: Quantifying and explaining
how concepts are encoded by filters in deep neural networks”
in Proc. 2018 IEEE Conf. Comput. Vision and Pattern Recognition,
Salt Lake City, UT, USA, 2018, pp. 8730–8738,
https://arxiv.org/abs/1801.03454
"""
def __init__(
self,
factor_pos_class: float = 0.5,
reduction: Union[
BatchReduction, Callable[[torch.Tensor], torch.Tensor]
] = BatchReduction.mean,
target_thresh: float = 0.):
"""Init.
:param target_thresh: threshold to binarize targets
:param factor_pos_class: balancing factor :math:`b` in [0,1] applied
to the foreground (1) class; defaults to 0.5 (i.e. equal weighting)
:param reduction: reduction method to aggregate the instance-wise
results of the batch;
must be a callable on a tensor which reduces the 0th dimension;
for examples see instances of
:py:class:`~hybrid_learning.concepts.kpis.BatchReduction`
"""
# Value check:
if not 0 <= factor_pos_class <= 1:
raise ValueError("factor_pos_class must be in [0,1] but was {}"
.format(factor_pos_class))
super(Net2VecLoss, self).__init__(target_thresh=target_thresh,
reduction=reduction)
self.factor_pos_class: float = factor_pos_class
"""Balancing factor :math:`b` applied to the foreground (value 1) class.
See loss formula :math:numref:`net2vec`."""
@property
def settings(self) -> Dict[str, Any]:
"""Settings to reproduce the instance."""
return dict(factor_pos_class=self.factor_pos_class,
**super(Net2VecLoss, self).settings)
def forward(self, outputs: torch.Tensor,
targets: torch.Tensor) -> torch.Tensor:
"""Calculate Net2Vec loss :math:numref:`net2vec`.
:param outputs: tensor of predicted masks (at least 1D); items must be
floats in [0,1]
:param targets: ground truth masks
:return: net2vec loss for each instance; reduced to one value if batch
"""
# Validate dimensions and binarize targets
self._validate_dimensions(targets, outputs)
targets = self.binarize(targets, self.target_thresh)
# Get axes to work on (i.e. 2D or 1D areas to test on IoU)
area_axes: Tuple[int] = self.get_area_axes(outputs)
# Calculate net2vec loss
net2vec = (self.factor_pos_class * targets * outputs +
(1 - self.factor_pos_class) * (1 - targets) * (1 - outputs)
).sum(area_axes)
return self.reduction(net2vec)
| [
"torch.nn.functional.binary_cross_entropy"
] | [((5210, 5312), 'torch.nn.functional.binary_cross_entropy', 'torch.nn.functional.binary_cross_entropy', (['inputs', 'targets'], {'weight': 'weight', 'reduction': 'self.reduction'}), '(inputs, targets, weight=weight,\n reduction=self.reduction)\n', (5250, 5312), False, 'import torch\n')] |
"""
A prior can be any distribution in `scipy.stats`. You specify them in YAML
as follows:
.. code-block:: yaml
priors:
module: firecrown.priors
param1:
# here 'norm' is the name of the function/class in scipy.stats
kind: norm
# any keywors to this function are listd by name
# these are passed to the `logpdf` method
loc: 0.5
scale: 0.5
"""
import copy
import scipy.stats
def parse_config(analysis):
"""Parse priors for an analysis.
Parameters
----------
analysis : dict
Dictionary containing the parsed YAML.
Returns
-------
data : dict
The dictionary.
"""
# we need a copy here since we are not promised that the input analysis
# dict will not be changed
return copy.deepcopy(analysis)
def compute_loglike(
*,
cosmo,
parameters,
data):
"""Compute the log-likelihood of the priors.
Parameters
----------
cosmo : a `pyccl.Cosmology` object
A cosmology.
parameters : dict
Dictionary mapping parameters to their values.
data : dict
The output of `parse_config` above.
Returns
-------
loglike : float
The computed log-likelihood.
measured : array-like, shape (n,)
Always None for the priors.
predicted : array-like, shape (n,)
Always None for the priors.
covmat : array-like, shape (n, n)
Always None for the priors.
inv_covmat : array-like, shape (n, n)
Always None for the priors.
stats : dict or other data
Always None foe the priors.
"""
loglike = 0.0
for param in parameters:
if param in data and param != 'module':
if not hasattr(scipy.stats, data[param]['kind']):
raise ValueError("Prior dist %s not defined!" % data[param])
dist = getattr(scipy.stats, data[param]['kind'])
keys = {k: v for k, v in data[param].items() if k != 'kind'}
loglike += dist.logpdf(parameters[param], **keys)
return loglike, None, None, None, None, None
def write_stats(*, output_path, data, stats):
"""Dummy function for API compatibility."""
pass
| [
"copy.deepcopy"
] | [((786, 809), 'copy.deepcopy', 'copy.deepcopy', (['analysis'], {}), '(analysis)\n', (799, 809), False, 'import copy\n')] |
import subprocess
import os
import errno
import json
import datetime
from pymongo import MongoClient
import time
import logging
from dotenv import load_dotenv
from timeout import timeout
logging_level = logging.DEBUG
def get_module_logger(mod_name):
"""
To use this, do logger = get_module_logger(__name__)
"""
global logging_level
logger = logging.getLogger(mod_name)
# Reset the logger.handlers if it already exists.
if logger.handlers:
logger.handlers = []
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s [%(name)-12s] %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging_level)
return logger
def set_global_logging_level(logging_level_string):
global logging_level
switcher={
'DEBUG': logging.DEBUG,
'ERROR': logging.ERROR,
'INFO' : logging.INFO,
'WARNING' : logging.WARNING,
'CRITICAL' : logging.CRITICAL
}
logging_level = switcher.get(logging_level_string,logging.DEBUG)
@timeout(120, os.strerror(errno.ETIMEDOUT))
def speedtest(mongo_uri, database, collection):
try:
get_module_logger(__name__).info("Performing speedtest...")
#get speedtest
result = json.loads(subprocess.run(['speedtest', "--accept-license", "--accept-gdpr" , '-f', 'json'], stdout=subprocess.PIPE).stdout.decode('utf-8'))
get_module_logger(__name__).debug("speedtest result: {0}".format(result))
# convert timestamp
result['timestamp'] = datetime.datetime.strptime(result['timestamp'], "%Y-%m-%dT%H:%M:%S%z")
# insert object in db
client = MongoClient(mongo_uri)
db = client[database]
collection = db[collection]
object_id = collection.insert_one(result).inserted_id
client.close()
get_module_logger(__name__).debug("object id: {0}".format(object_id))
get_module_logger(__name__).debug("inserted obj: {0}".format(collection.find_one({"_id" : object_id})))
get_module_logger(__name__).info("Speedtest completed!")
except Exception as e:
get_module_logger(__name__).error("Error in speedtest function: {0}".format(e))
def create_collections(mongo_uri, database, collection):
try:
# insert object in db
client = MongoClient(mongo_uri)
db = client[database]
filter = {"name": {"$regex": rf"^(?!system\.)\b(\w*{collection}\w*)\b"}}
collection_list = db.list_collection_names(filter=filter)
get_module_logger(__name__).debug("Collection list filtered: {0}".format(collection_list))
if collection not in collection_list :
db.command('create', collection)
get_module_logger(__name__).info("Created collection {0}".format(collection))
else:
get_module_logger(__name__).debug("Collection {0} already exists".format(collection))
if 'normalized_' + collection not in collection_list :
pipeline=[{"$project": {
"ts": '$timestamp',
"downloadMbps": {
"$divide": [
"$download.bandwidth",
125000
]
},
"uploadMbps": {
"$divide": [
"$upload.bandwidth",
125000
]
},
"pingJitter": '$ping.jitter',
"pingLatency": '$ping.latency',
"packetLoss": 1
}}]
db.command('create', 'normalized_' + collection, viewOn=collection, pipeline=pipeline)
get_module_logger(__name__).info("Created view {0}".format('normalized_' + collection))
else:
get_module_logger(__name__).debug("View {0} already exists".format('normalized_' + collection))
client.close()
except Exception as e:
get_module_logger(__name__).error("Error in create_collection function: {0}".format(e))
def main():
try:
load_dotenv()
#get config main config
delay_seconds = int(os.getenv("DELAY_SECONDS", 60))
get_module_logger(__name__).debug("Delay between speetests is {0}".format(delay_seconds))
logging_level_string = os.getenv("LOGGING_LEVEL", "DEBUG")
get_module_logger(__name__).debug("Logging level is {0}".format(logging_level_string))
#specific mongo config
mongo_uri = os.getenv("MONGODB_URI", "mongodb://localhost:27017")
get_module_logger(__name__).debug("MongoDB URI is {0}".format(mongo_uri))
database = os.getenv("MONGODB_DB", "network_monitoring")
get_module_logger(__name__).debug("MongoDB DB is {0}".format(database))
collection = os.getenv("MONGODB_COLLECTION", "speedtest")
get_module_logger(__name__).debug("MongoDB collection is {0}".format(collection))
#set logging level
set_global_logging_level(logging_level_string)
starttime=time.time()
while True:
time.sleep(delay_seconds - ((time.time() - starttime) % delay_seconds))
get_module_logger(__name__).debug("Waiting for {0} seconds".format(delay_seconds))
create_collections(mongo_uri, database, collection)
speedtest(mongo_uri, database, collection)
except Exception as e:
get_module_logger(__name__).error("Error in main function: {0}".format(e))
if __name__ == '__main__':
main() | [
"logging.getLogger",
"logging.StreamHandler",
"os.getenv",
"datetime.datetime.strptime",
"logging.Formatter",
"subprocess.run",
"dotenv.load_dotenv",
"time.time",
"pymongo.MongoClient",
"os.strerror"
] | [((365, 392), 'logging.getLogger', 'logging.getLogger', (['mod_name'], {}), '(mod_name)\n', (382, 392), False, 'import logging\n'), ((518, 541), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (539, 541), False, 'import logging\n'), ((558, 632), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s [%(name)-12s] %(levelname)-8s %(message)s"""'], {}), "('%(asctime)s [%(name)-12s] %(levelname)-8s %(message)s')\n", (575, 632), False, 'import logging\n'), ((1164, 1192), 'os.strerror', 'os.strerror', (['errno.ETIMEDOUT'], {}), '(errno.ETIMEDOUT)\n', (1175, 1192), False, 'import os\n'), ((1642, 1712), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["result['timestamp']", '"""%Y-%m-%dT%H:%M:%S%z"""'], {}), "(result['timestamp'], '%Y-%m-%dT%H:%M:%S%z')\n", (1668, 1712), False, 'import datetime\n'), ((1761, 1783), 'pymongo.MongoClient', 'MongoClient', (['mongo_uri'], {}), '(mongo_uri)\n', (1772, 1783), False, 'from pymongo import MongoClient\n'), ((2420, 2442), 'pymongo.MongoClient', 'MongoClient', (['mongo_uri'], {}), '(mongo_uri)\n', (2431, 2442), False, 'from pymongo import MongoClient\n'), ((4280, 4293), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (4291, 4293), False, 'from dotenv import load_dotenv\n'), ((4516, 4551), 'os.getenv', 'os.getenv', (['"""LOGGING_LEVEL"""', '"""DEBUG"""'], {}), "('LOGGING_LEVEL', 'DEBUG')\n", (4525, 4551), False, 'import os\n'), ((4700, 4753), 'os.getenv', 'os.getenv', (['"""MONGODB_URI"""', '"""mongodb://localhost:27017"""'], {}), "('MONGODB_URI', 'mongodb://localhost:27017')\n", (4709, 4753), False, 'import os\n'), ((4856, 4901), 'os.getenv', 'os.getenv', (['"""MONGODB_DB"""', '"""network_monitoring"""'], {}), "('MONGODB_DB', 'network_monitoring')\n", (4865, 4901), False, 'import os\n'), ((5004, 5048), 'os.getenv', 'os.getenv', (['"""MONGODB_COLLECTION"""', '"""speedtest"""'], {}), "('MONGODB_COLLECTION', 'speedtest')\n", (5013, 5048), False, 'import os\n'), ((5241, 5252), 'time.time', 'time.time', ([], {}), '()\n', (5250, 5252), False, 'import time\n'), ((4354, 4384), 'os.getenv', 'os.getenv', (['"""DELAY_SECONDS"""', '(60)'], {}), "('DELAY_SECONDS', 60)\n", (4363, 4384), False, 'import os\n'), ((1371, 1479), 'subprocess.run', 'subprocess.run', (["['speedtest', '--accept-license', '--accept-gdpr', '-f', 'json']"], {'stdout': 'subprocess.PIPE'}), "(['speedtest', '--accept-license', '--accept-gdpr', '-f',\n 'json'], stdout=subprocess.PIPE)\n", (1385, 1479), False, 'import subprocess\n'), ((5314, 5325), 'time.time', 'time.time', ([], {}), '()\n', (5323, 5325), False, 'import time\n')] |
import sqlite3
con = sqlite3.connect("database.db")
c = con.cursor()
c.execute("UPDATE names SET fname='OSAMA', lname='MOHAMED', age=22, salary=5000 WHERE age=20")
con.commit()
con.close()
| [
"sqlite3.connect"
] | [((21, 51), 'sqlite3.connect', 'sqlite3.connect', (['"""database.db"""'], {}), "('database.db')\n", (36, 51), False, 'import sqlite3\n')] |
import cv2
import numpy as np
def MaxPooling(_img):
img = _img.copy()
result = np.zeros_like(img)
for i in range(img.shape[0]//8):
ind_11 = i * 8
ind_12 = ind_11 + 8
for j in range(img.shape[1]//8):
ind_21 = j * 8
ind_22 = ind_21 + 8
result[ind_11:ind_12, ind_21:ind_22, 0] = np.max(img[ind_11:ind_12, ind_21:ind_22, 0])
result[ind_11:ind_12, ind_21:ind_22, 1] = np.max(img[ind_11:ind_12, ind_21:ind_22, 1])
result[ind_11:ind_12, ind_21:ind_22, 2] = np.max(img[ind_11:ind_12, ind_21:ind_22, 2])
return result
img = cv2.imread("imori.jpg")
result = MaxPooling(img)
cv2.imwrite("myans_08.jpg", result)
| [
"cv2.imwrite",
"numpy.zeros_like",
"cv2.imread",
"numpy.max"
] | [((618, 641), 'cv2.imread', 'cv2.imread', (['"""imori.jpg"""'], {}), "('imori.jpg')\n", (628, 641), False, 'import cv2\n'), ((668, 703), 'cv2.imwrite', 'cv2.imwrite', (['"""myans_08.jpg"""', 'result'], {}), "('myans_08.jpg', result)\n", (679, 703), False, 'import cv2\n'), ((88, 106), 'numpy.zeros_like', 'np.zeros_like', (['img'], {}), '(img)\n', (101, 106), True, 'import numpy as np\n'), ((349, 393), 'numpy.max', 'np.max', (['img[ind_11:ind_12, ind_21:ind_22, 0]'], {}), '(img[ind_11:ind_12, ind_21:ind_22, 0])\n', (355, 393), True, 'import numpy as np\n'), ((448, 492), 'numpy.max', 'np.max', (['img[ind_11:ind_12, ind_21:ind_22, 1]'], {}), '(img[ind_11:ind_12, ind_21:ind_22, 1])\n', (454, 492), True, 'import numpy as np\n'), ((547, 591), 'numpy.max', 'np.max', (['img[ind_11:ind_12, ind_21:ind_22, 2]'], {}), '(img[ind_11:ind_12, ind_21:ind_22, 2])\n', (553, 591), True, 'import numpy as np\n')] |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
from pathlib import Path
import torchaudio
import progressbar
import argparse
import torch
import tqdm
def findAllSeqs(dirName,
extension='.flac',
loadCache=False):
r"""
Lists all the sequences with the given extension in the dirName directory.
Output:
outSequences, speakers
outSequence
A list of tuples seq_path, speaker where:
- seq_path is the relative path of each sequence relative to the
parent directory
- speaker is the corresponding speaker index
outSpeakers
The speaker labels (in order)
The speaker labels are organized the following way
\dirName
\speaker_label
\..
...
seqName.extension
"""
cache_path = os.path.join(dirName, '_seqs_cache.txt')
if loadCache:
try:
outSequences, speakers = torch.load(cache_path)
print(f'Loaded from cache {cache_path} successfully')
return outSequences, speakers
except OSError as err:
print(f'Ran in an error while loading {cache_path}: {err}')
print('Could not load cache, rebuilding')
if dirName[-1] != os.sep:
dirName += os.sep
prefixSize = len(dirName)
speakersTarget = {}
outSequences = []
for root, dirs, filenames in tqdm.tqdm(os.walk(dirName)):
filtered_files = [f for f in filenames if f.endswith(extension)]
if len(filtered_files) > 0:
speakerStr = root[prefixSize:].split(os.sep)[0]
if speakerStr not in speakersTarget:
speakersTarget[speakerStr] = len(speakersTarget)
speaker = speakersTarget[speakerStr]
for filename in filtered_files:
full_path = os.path.join(root[prefixSize:], filename)
outSequences.append((speaker, full_path))
outSpeakers = [None for x in speakersTarget]
for key, index in speakersTarget.items():
outSpeakers[index] = key
try:
torch.save((outSequences, outSpeakers), cache_path)
print(f'Saved cache file at {cache_path}')
except OSError as err:
print(f'Ran in an error while saving {cache_path}: {err}')
return outSequences, outSpeakers
def get_file_duration_ms(path_file):
info = torchaudio.info(path_file)[0]
return 1000*(info.length // (info.rate))
def get_lst(path_db, file_list):
bar = progressbar.ProgressBar(maxval=len(file_list))
bar.start()
path_db = Path(path_db)
out = []
for index, file_name in enumerate(file_list):
bar.update(index)
full_path = str(path_db / file_name)
duration = get_file_duration_ms(full_path)
out.append((full_path, full_path, int(duration)))
bar.finish()
return out
def save_lst(data, path_out):
with open(path_out, 'w') as file:
for id, path, val in data:
file.write(' '.join((id, path, str(val))) + '\n')
def reorder_vad(path_vad, lst):
path_vad = Path(path_vad)
for id, full_path_wav, _ in lst:
full_path_vad = (path_vad / id).with_suffix('.vad')
full_path_out = Path(full_path_wav).with_suffix('.vad')
full_path_vad.replace(full_path_out)
full_path_vad.with_suffix('.fwt').unlink(missing_ok=True)
full_path_vad.with_suffix('.tsc').unlink(missing_ok=True)
full_path_vad.with_suffix('.sts').unlink(missing_ok=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Build the vad inputs")
parser.add_argument('path_db', type=str,
help="Path to the dataset directory")
parser.add_argument('path_out', type=str)
parser.add_argument('--ignore_cache', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--extension', type=str, default='.wav')
args = parser.parse_args()
seqList, _ = findAllSeqs(args.path_db, extension=args.extension,
loadCache=not args.ignore_cache)
if args.debug:
seqList = seqList[:10]
seqList = [i[1] for i in seqList]
vad_data = get_lst(args.path_db, seqList)
save_lst(vad_data, args.path_out)
| [
"argparse.ArgumentParser",
"pathlib.Path",
"torch.load",
"os.path.join",
"torchaudio.info",
"torch.save",
"os.walk"
] | [((891, 931), 'os.path.join', 'os.path.join', (['dirName', '"""_seqs_cache.txt"""'], {}), "(dirName, '_seqs_cache.txt')\n", (903, 931), False, 'import os\n'), ((2612, 2625), 'pathlib.Path', 'Path', (['path_db'], {}), '(path_db)\n', (2616, 2625), False, 'from pathlib import Path\n'), ((3122, 3136), 'pathlib.Path', 'Path', (['path_vad'], {}), '(path_vad)\n', (3126, 3136), False, 'from pathlib import Path\n'), ((3587, 3646), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Build the vad inputs"""'}), "(description='Build the vad inputs')\n", (3610, 3646), False, 'import argparse\n'), ((1460, 1476), 'os.walk', 'os.walk', (['dirName'], {}), '(dirName)\n', (1467, 1476), False, 'import os\n'), ((2129, 2180), 'torch.save', 'torch.save', (['(outSequences, outSpeakers)', 'cache_path'], {}), '((outSequences, outSpeakers), cache_path)\n', (2139, 2180), False, 'import torch\n'), ((2413, 2439), 'torchaudio.info', 'torchaudio.info', (['path_file'], {}), '(path_file)\n', (2428, 2439), False, 'import torchaudio\n'), ((1000, 1022), 'torch.load', 'torch.load', (['cache_path'], {}), '(cache_path)\n', (1010, 1022), False, 'import torch\n'), ((1884, 1925), 'os.path.join', 'os.path.join', (['root[prefixSize:]', 'filename'], {}), '(root[prefixSize:], filename)\n', (1896, 1925), False, 'import os\n'), ((3260, 3279), 'pathlib.Path', 'Path', (['full_path_wav'], {}), '(full_path_wav)\n', (3264, 3279), False, 'from pathlib import Path\n')] |
import click
from uumpa_ci_toolbox import common
from . import api
@click.group()
def kubectl():
pass
@kubectl.command()
@click.option('--version', required=True, help='Kubectl version to install, e.g. "v1.19.0"')
@click.option('--target-filename', default='/usr/local/bin/kubectl')
@click.option('--with-sudo', is_flag=True)
def install(**kwargs):
"""Install Kubectl in the given version"""
api.install(**kwargs)
common.cli_success()
| [
"click.group",
"click.option",
"uumpa_ci_toolbox.common.cli_success"
] | [((72, 85), 'click.group', 'click.group', ([], {}), '()\n', (83, 85), False, 'import click\n'), ((132, 228), 'click.option', 'click.option', (['"""--version"""'], {'required': '(True)', 'help': '"""Kubectl version to install, e.g. "v1.19.0\\""""'}), '(\'--version\', required=True, help=\n \'Kubectl version to install, e.g. "v1.19.0"\')\n', (144, 228), False, 'import click\n'), ((225, 292), 'click.option', 'click.option', (['"""--target-filename"""'], {'default': '"""/usr/local/bin/kubectl"""'}), "('--target-filename', default='/usr/local/bin/kubectl')\n", (237, 292), False, 'import click\n'), ((294, 335), 'click.option', 'click.option', (['"""--with-sudo"""'], {'is_flag': '(True)'}), "('--with-sudo', is_flag=True)\n", (306, 335), False, 'import click\n'), ((436, 456), 'uumpa_ci_toolbox.common.cli_success', 'common.cli_success', ([], {}), '()\n', (454, 456), False, 'from uumpa_ci_toolbox import common\n')] |
#!/usr/bin/python
# Copyright: (c) 2021, <NAME> <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: pvc_info
short_description: Get expand info
version_added: "2.9.0"
description: Get expand info of a mount/pvc.
options:
path:
description:
- Path to the mount point (e.g. C(/mnt/files))
required: true
type: str
recommend_size:
description:
- Whether size recommendation is done based on usage, 'increment_gib' and 'cap_gib'
default: false
type: bool
increment_gib:
description:
- Recommended GiB increments if expansion required
default: 5
type: int
cap_gib:
description:
- Cap / max size in GiB to recommend
default: 30
type: int
author:
- "<NAME> (@jobcespedes)"
'''
EXAMPLES = r'''
- name: Get expand info of mount/pvc
krestomatio.k8s.pvc_info:
path: /mypvc
recommend_size: true
increment_gib: 5
cap_gib: 30
register: pvc_info
'''
RETURN = r'''
status:
type: complex
description: A dictionary of mount/pvc expand status output
returned: only when release exists
contains:
size_available_gib:
description: Current available storage (GiB)
type: float
returned: success
sample: 0.5
size_total_gib:
description: Current total storage(GiB)
type: float
returned: success
sample: 5.0
expansion_required:
description: Whether expansion is required if storage available is below percentage
type: bool
returned: always
sample: false
recommended_size_gib:
description: Recommended size after checking available and total storage
type: int
returned: success
sample: 10
cap_reached:
description: Whether cap / max recommendation has been reached
type: bool
returned: success
sample: false
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.common.text.converters import to_native
from ansible_collections.krestomatio.k8s.plugins.module_utils.storage import (
get_mount_info,
b_to_gib,
below_twenty_pct,
recommended_size_gib
)
def run_module():
# define available arguments/parameters a user can pass to the module
module_args = dict(
path=dict(type='str', required=True),
recommend_size=dict(type='bool', default=False),
increment_gib=dict(type='int', default=5),
cap_gib=dict(type='int', default=30)
)
# seed the result dict in the object
# we primarily care about changed and state
# changed is if this module effectively modified the target
# state will include any data that you want your module to pass back
# for consumption, for example, in a subsequent task
expansion_required = False
status = dict(
expansion_required=expansion_required
)
result = dict(
changed=False,
status=status
)
# the AnsibleModule object will be our abstraction working with Ansible
# this includes instantiation, a couple of common attr would be the
# args/params passed to the execution, as well as if the module
# supports check mode
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
# if the user is working with this module in only check mode we do not
# want to make any changes to the environment, just return the current
# state with no modifications
# if module.check_mode:
# module.exit_json(**result)
# manipulate or modify the state as needed (this is going to be the
# part where your module will do what it needs to do)
mount_info = {}
try:
mount_info = get_mount_info(module)
except Exception as e:
module.fail_json(msg=to_native(e), **result)
if not mount_info:
module.fail_json(msg="Mount path is not present", **result)
recommend_size = module.params['recommend_size']
increment_gib = module.params['increment_gib']
cap_gib = module.params['cap_gib']
size_available = mount_info['size_available']
size_total = mount_info['size_total']
size_available_gib = b_to_gib(size_available)
size_total_gib = b_to_gib(size_total)
# expansion required threshold: storage available should be below 20%
# of total storage AND less GiB than 'increment_gib'; otherwise expansion
# is not required
if below_twenty_pct(size_available, size_total) and size_available_gib < increment_gib:
expansion_required = True
status['size_available_gib'] = b_to_gib(size_available)
status['size_total_gib'] = b_to_gib(size_total)
status['expansion_required'] = expansion_required
if recommend_size:
this_recommended_size_gib = recommended_size_gib(
size_total_gib,
increment_gib,
cap_gib,
expansion_required
)
status['recommended_size_gib'] = this_recommended_size_gib
status['cap_reached'] = bool(this_recommended_size_gib >= cap_gib)
result['status'] = status
# in the event of a successful module execution, you will want to
# simple AnsibleModule.exit_json(), passing the key/value results
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main()
| [
"ansible.module_utils.basic.AnsibleModule",
"ansible_collections.krestomatio.k8s.plugins.module_utils.storage.b_to_gib",
"ansible_collections.krestomatio.k8s.plugins.module_utils.storage.recommended_size_gib",
"ansible_collections.krestomatio.k8s.plugins.module_utils.storage.below_twenty_pct",
"ansible.modu... | [((3372, 3438), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'module_args', 'supports_check_mode': '(True)'}), '(argument_spec=module_args, supports_check_mode=True)\n', (3385, 3438), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((4349, 4373), 'ansible_collections.krestomatio.k8s.plugins.module_utils.storage.b_to_gib', 'b_to_gib', (['size_available'], {}), '(size_available)\n', (4357, 4373), False, 'from ansible_collections.krestomatio.k8s.plugins.module_utils.storage import get_mount_info, b_to_gib, below_twenty_pct, recommended_size_gib\n'), ((4395, 4415), 'ansible_collections.krestomatio.k8s.plugins.module_utils.storage.b_to_gib', 'b_to_gib', (['size_total'], {}), '(size_total)\n', (4403, 4415), False, 'from ansible_collections.krestomatio.k8s.plugins.module_utils.storage import get_mount_info, b_to_gib, below_twenty_pct, recommended_size_gib\n'), ((4753, 4777), 'ansible_collections.krestomatio.k8s.plugins.module_utils.storage.b_to_gib', 'b_to_gib', (['size_available'], {}), '(size_available)\n', (4761, 4777), False, 'from ansible_collections.krestomatio.k8s.plugins.module_utils.storage import get_mount_info, b_to_gib, below_twenty_pct, recommended_size_gib\n'), ((4809, 4829), 'ansible_collections.krestomatio.k8s.plugins.module_utils.storage.b_to_gib', 'b_to_gib', (['size_total'], {}), '(size_total)\n', (4817, 4829), False, 'from ansible_collections.krestomatio.k8s.plugins.module_utils.storage import get_mount_info, b_to_gib, below_twenty_pct, recommended_size_gib\n'), ((3892, 3914), 'ansible_collections.krestomatio.k8s.plugins.module_utils.storage.get_mount_info', 'get_mount_info', (['module'], {}), '(module)\n', (3906, 3914), False, 'from ansible_collections.krestomatio.k8s.plugins.module_utils.storage import get_mount_info, b_to_gib, below_twenty_pct, recommended_size_gib\n'), ((4598, 4642), 'ansible_collections.krestomatio.k8s.plugins.module_utils.storage.below_twenty_pct', 'below_twenty_pct', (['size_available', 'size_total'], {}), '(size_available, size_total)\n', (4614, 4642), False, 'from ansible_collections.krestomatio.k8s.plugins.module_utils.storage import get_mount_info, b_to_gib, below_twenty_pct, recommended_size_gib\n'), ((4944, 5029), 'ansible_collections.krestomatio.k8s.plugins.module_utils.storage.recommended_size_gib', 'recommended_size_gib', (['size_total_gib', 'increment_gib', 'cap_gib', 'expansion_required'], {}), '(size_total_gib, increment_gib, cap_gib, expansion_required\n )\n', (4964, 5029), False, 'from ansible_collections.krestomatio.k8s.plugins.module_utils.storage import get_mount_info, b_to_gib, below_twenty_pct, recommended_size_gib\n'), ((3971, 3983), 'ansible.module_utils.common.text.converters.to_native', 'to_native', (['e'], {}), '(e)\n', (3980, 3983), False, 'from ansible.module_utils.common.text.converters import to_native\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
from reporter import Reporter
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--orgname", help="Название организации")
parser.add_argument("--login", help="Логин на portal.rfc-revizor.ru")
parser.add_argument("--password", help="<PASSWORD> на portal.rfc-reviz<PASSWORD>")
parser.add_argument("--date", help="Дата в формате 01.01.2018")
parser.add_argument("--retry-count", help="Кол-во попыток.")
parser.add_argument("--notify", help="Куда отправлять (telegram/email)")
parser.add_argument("--contact", help="адрес (почта или tg-id)")
parser.add_argument("--config", help="Файл конфигурации.")
args = parser.parse_args()
argsdict = vars(args)
reporter = Reporter(**argsdict)
is_ok = reporter.get_report()
if is_ok:
reporter.parse_and_send()
| [
"argparse.ArgumentParser",
"reporter.Reporter"
] | [((137, 162), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (160, 162), False, 'import argparse\n'), ((806, 826), 'reporter.Reporter', 'Reporter', ([], {}), '(**argsdict)\n', (814, 826), False, 'from reporter import Reporter\n')] |
# Generated by Django 2.2.1 on 2019-06-02 18:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('daauth', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='permissionsmixin',
options={},
),
migrations.AlterModelOptions(
name='user',
options={},
),
migrations.AddField(
model_name='user',
name='username',
field=models.CharField(default='', max_length=30, unique=True, verbose_name='username'),
preserve_default=False,
),
migrations.AlterField(
model_name='user',
name='avatar',
field=models.ImageField(blank=True, null=True, upload_to='dapricot/avatars/'),
),
]
| [
"django.db.migrations.AlterModelOptions",
"django.db.models.ImageField",
"django.db.models.CharField"
] | [((223, 288), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""permissionsmixin"""', 'options': '{}'}), "(name='permissionsmixin', options={})\n", (251, 288), False, 'from django.db import migrations, models\n'), ((333, 386), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""user"""', 'options': '{}'}), "(name='user', options={})\n", (361, 386), False, 'from django.db import migrations, models\n'), ((530, 616), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(30)', 'unique': '(True)', 'verbose_name': '"""username"""'}), "(default='', max_length=30, unique=True, verbose_name=\n 'username')\n", (546, 616), False, 'from django.db import migrations, models\n'), ((767, 838), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'null': '(True)', 'upload_to': '"""dapricot/avatars/"""'}), "(blank=True, null=True, upload_to='dapricot/avatars/')\n", (784, 838), False, 'from django.db import migrations, models\n')] |
"""
Render the models from 24 elevation angles, as in thesis NMR
Save as an image.
9. 17. 2020
created by <NAME>
9. 19. 2020
ALL RENDER ARE FINISHED WITHOUT TEXTURE
Run from anaconda console
NOTE:
RENDER FROM ORIGINAL SHOULD BE RANGE(360, 0, -15)
HERE RANGE(0, 360, 15)
SOLUTION: RENAME FILES OR GENERATE DATASETS IN FOLLOWING SEQUENCES:
0, 23, 22, ..., 1
"""
import matplotlib.pyplot as plt
import os
import tqdm
import numpy as np
import imageio
import soft_renderer as sr
os.environ["CUDA_VISIBLE_DEVICES"] = "7"
RENDER_IMAGE_NAME_RGB = 'RGB'
RENDER_IMAGE_NAME_D = 'depth'
RENDER_IMAGE_NAME_NORMAL = 'normal'
camera_distance = 10
elevation = 30
azimuth = 0
obj_file_i = os.path.join("/mnt/zhengwen/model_synthesis/SF_temp/data/obj/sphere/sphere_642.obj")
img_file_rgb = "/mnt/zhengwen/model_synthesis/SF_temp/data/obj/sphere/sphere_642.obj" + RENDER_IMAGE_NAME_RGB
img_file_depth = "/mnt/zhengwen/model_synthesis/SF_temp/data/obj/sphere/sphere_642.obj" + RENDER_IMAGE_NAME_D
img_file_normal = "/mnt/zhengwen/model_synthesis/SF_temp/data/obj/sphere/sphere_642.obj" + RENDER_IMAGE_NAME_NORMAL
mesh = sr.Mesh.from_obj(obj_file_i)
renderer = sr.SoftRenderer(camera_mode='look_at')
for azimuth in range(0, 360, 15):
count = azimuth // 15
# rest mesh to initial state
mesh.reset_()
renderer.transform.set_eyes_from_angles(camera_distance, elevation, azimuth)
images = renderer.render_mesh(mesh)
image_rgb = images[0].detach().cpu().numpy()[0]
imageio.imwrite(img_file_rgb + '_' + str(count) + '.png', (255 * image_rgb[:, 128 - 32: 128 + 32, 128 - 32: 128 + 32]).transpose((1, 2, 0)).astype(np.uint8))
image_d = images[1].detach().cpu().numpy()[0]
image_d[image_d != 0] = 2 * 1 / image_d[image_d != 0]
imageio.imwrite(img_file_depth + '_' + str(count) + '.png', (255 * image_d[:, 128 - 32: 128 + 32, 128 - 32: 128 + 32]).transpose((1, 2, 0)).astype(np.uint8))
image_normal = images[2].detach().cpu().numpy()[0]
imageio.imwrite(img_file_normal + '_' + str(count) + '.png', (255 * image_normal[:, 128 - 32: 128 + 32, 128 - 32: 128 + 32]).transpose((1, 2, 0)).astype(np.uint8))
| [
"soft_renderer.SoftRenderer",
"soft_renderer.Mesh.from_obj",
"os.path.join"
] | [((705, 794), 'os.path.join', 'os.path.join', (['"""/mnt/zhengwen/model_synthesis/SF_temp/data/obj/sphere/sphere_642.obj"""'], {}), "(\n '/mnt/zhengwen/model_synthesis/SF_temp/data/obj/sphere/sphere_642.obj')\n", (717, 794), False, 'import os\n'), ((1133, 1161), 'soft_renderer.Mesh.from_obj', 'sr.Mesh.from_obj', (['obj_file_i'], {}), '(obj_file_i)\n', (1149, 1161), True, 'import soft_renderer as sr\n'), ((1173, 1211), 'soft_renderer.SoftRenderer', 'sr.SoftRenderer', ([], {'camera_mode': '"""look_at"""'}), "(camera_mode='look_at')\n", (1188, 1211), True, 'import soft_renderer as sr\n')] |
#! /usr/bin/python
__author__="panos"
__date__ ="$Jun 29, 2016 6:10:32 PM$"
import pika, json
import os, threading, time, signal, sys
if __name__ == "__main__":
def fail():
sys.stdout.write("False")
sys.stdout.flush()
os._exit(0)
threading.Timer(120, fail).start()
credentials = pika.PlainCredentials('guest', 'guest')
connection = pika.BlockingConnection(pika.ConnectionParameters('sp.int3.sonata-nfv.eu',5672,'/',credentials))
channel = connection.channel()
channel.queue_declare(queue='son.monitoring')
def callback(ch, method, properties, body):
obj = json.loads(body)
if obj["exported_instance"] == 'INT_TEST_VM' and obj["id"] == '0123456789' and obj["alertname"] == 'mon_rule_vm_cpu_perc_test':
sys.stdout.write("True")
sys.stdout.flush()
os._exit(0)
channel.basic_consume(callback,
queue='son.monitoring',
no_ack=True)
channel.start_consuming()
| [
"json.loads",
"threading.Timer",
"pika.ConnectionParameters",
"pika.PlainCredentials",
"os._exit",
"sys.stdout.flush",
"sys.stdout.write"
] | [((329, 368), 'pika.PlainCredentials', 'pika.PlainCredentials', (['"""guest"""', '"""guest"""'], {}), "('guest', 'guest')\n", (350, 368), False, 'import pika, json\n'), ((190, 215), 'sys.stdout.write', 'sys.stdout.write', (['"""False"""'], {}), "('False')\n", (206, 215), False, 'import os, threading, time, signal, sys\n'), ((224, 242), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (240, 242), False, 'import os, threading, time, signal, sys\n'), ((251, 262), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (259, 262), False, 'import os, threading, time, signal, sys\n'), ((410, 484), 'pika.ConnectionParameters', 'pika.ConnectionParameters', (['"""sp.int3.sonata-nfv.eu"""', '(5672)', '"""/"""', 'credentials'], {}), "('sp.int3.sonata-nfv.eu', 5672, '/', credentials)\n", (435, 484), False, 'import pika, json\n'), ((635, 651), 'json.loads', 'json.loads', (['body'], {}), '(body)\n', (645, 651), False, 'import pika, json\n'), ((276, 302), 'threading.Timer', 'threading.Timer', (['(120)', 'fail'], {}), '(120, fail)\n', (291, 302), False, 'import os, threading, time, signal, sys\n'), ((800, 824), 'sys.stdout.write', 'sys.stdout.write', (['"""True"""'], {}), "('True')\n", (816, 824), False, 'import os, threading, time, signal, sys\n'), ((837, 855), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (853, 855), False, 'import os, threading, time, signal, sys\n'), ((868, 879), 'os._exit', 'os._exit', (['(0)'], {}), '(0)\n', (876, 879), False, 'import os, threading, time, signal, sys\n')] |
import datetime
import glob
import os.path
import tensorflow as tf
def make_summary_writer(exp_name="AIRL", graph=None):
summary_base = os.path.join("output/", exp_name, "summary/")
today_str = datetime.datetime.today().strftime('%Y-%m-%d')
dir_list = glob.glob(os.path.join(summary_base, today_str + "*/"))
i = 0
done = False
run_name = None
while not done:
run_name = today_str + "_run{}/".format(i)
run_dir = os.path.join(summary_base, run_name)
done = run_dir not in dir_list
i += 1
tf.logging.info("building summary directory at " + run_dir)
if not os.path.exists(run_dir):
os.makedirs(run_dir)
summary_writer = tf.summary.FileWriter(run_dir, graph=graph)
return summary_writer
| [
"datetime.datetime.today",
"tensorflow.logging.info",
"tensorflow.summary.FileWriter"
] | [((524, 583), 'tensorflow.logging.info', 'tf.logging.info', (["('building summary directory at ' + run_dir)"], {}), "('building summary directory at ' + run_dir)\n", (539, 583), True, 'import tensorflow as tf\n'), ((663, 706), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['run_dir'], {'graph': 'graph'}), '(run_dir, graph=graph)\n', (684, 706), True, 'import tensorflow as tf\n'), ((201, 226), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (224, 226), False, 'import datetime\n')] |
import math
from datetime import datetime
from flask import Flask, jsonify, request
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
db = SQLAlchemy(app)
class Price(db.Model):
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(80), unique=True, nullable=False)
cost = db.Column(db.Integer, nullable=False)
def __repr__(self):
return '<Price %s: %d>' % (self.type, self.cost)
class Holiday(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True, nullable=False)
date = db.Column(db.Date, nullable=False)
def __repr__(self):
return '<Holiday %s: %s>' % (self.name, self.date)
@app.route('/prices')
def prices():
price = Price.query.filter_by(type=request.args.get('type')).first()
if int(request.args.get('age')) < 6:
return jsonify({'cost': 0})
else:
if request.args.get('type') != 'Night':
holidays = Holiday.query.all()
is_holiday = False
reduction = 0
for holiday in holidays:
if request.args.get('date'):
date = datetime.strptime(
request.args.get('date'), '%Y-%m-%d').date()
if date == holiday.date:
is_holiday = True
if not is_holiday and datetime.strptime(request.args.get('date'), '%Y-%m-%d').weekday() == 0:
reduction = 35
if int(request.args.get('age')) < 15:
return jsonify({'cost': math.ceil(price.cost * 0.7)})
else:
if not request.args.get('age'):
cost = price.cost * (1 - reduction / 100)
return jsonify({'cost': math.ceil(cost)})
else:
if int(request.args.get('age')) > 64:
cost = price.cost * 0.75 * (1 - reduction / 100)
return jsonify({'cost': math.ceil(cost)})
else:
cost = price.cost * (1 - reduction / 100)
return jsonify({'cost': math.ceil(cost)})
else:
if int(request.args.get('age')) >= 6:
if int(request.args.get('age')) > 64:
return jsonify({'cost': math.ceil(price.cost * 0.4)})
else:
return jsonify({'cost': price.cost})
else:
return jsonify({'cost': 0})
| [
"flask.request.args.get",
"math.ceil",
"flask.Flask",
"flask_sqlalchemy.SQLAlchemy",
"flask.jsonify"
] | [((132, 147), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (137, 147), False, 'from flask import Flask, jsonify, request\n'), ((213, 228), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (223, 228), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((938, 958), 'flask.jsonify', 'jsonify', (["{'cost': 0}"], {}), "({'cost': 0})\n", (945, 958), False, 'from flask import Flask, jsonify, request\n'), ((893, 916), 'flask.request.args.get', 'request.args.get', (['"""age"""'], {}), "('age')\n", (909, 916), False, 'from flask import Flask, jsonify, request\n'), ((980, 1004), 'flask.request.args.get', 'request.args.get', (['"""type"""'], {}), "('type')\n", (996, 1004), False, 'from flask import Flask, jsonify, request\n'), ((1175, 1199), 'flask.request.args.get', 'request.args.get', (['"""date"""'], {}), "('date')\n", (1191, 1199), False, 'from flask import Flask, jsonify, request\n'), ((2541, 2561), 'flask.jsonify', 'jsonify', (["{'cost': 0}"], {}), "({'cost': 0})\n", (2548, 2561), False, 'from flask import Flask, jsonify, request\n'), ((847, 871), 'flask.request.args.get', 'request.args.get', (['"""type"""'], {}), "('type')\n", (863, 871), False, 'from flask import Flask, jsonify, request\n'), ((1561, 1584), 'flask.request.args.get', 'request.args.get', (['"""age"""'], {}), "('age')\n", (1577, 1584), False, 'from flask import Flask, jsonify, request\n'), ((1703, 1726), 'flask.request.args.get', 'request.args.get', (['"""age"""'], {}), "('age')\n", (1719, 1726), False, 'from flask import Flask, jsonify, request\n'), ((2262, 2285), 'flask.request.args.get', 'request.args.get', (['"""age"""'], {}), "('age')\n", (2278, 2285), False, 'from flask import Flask, jsonify, request\n'), ((2470, 2499), 'flask.jsonify', 'jsonify', (["{'cost': price.cost}"], {}), "({'cost': price.cost})\n", (2477, 2499), False, 'from flask import Flask, jsonify, request\n'), ((1632, 1659), 'math.ceil', 'math.ceil', (['(price.cost * 0.7)'], {}), '(price.cost * 0.7)\n', (1641, 1659), False, 'import math\n'), ((2316, 2339), 'flask.request.args.get', 'request.args.get', (['"""age"""'], {}), "('age')\n", (2332, 2339), False, 'from flask import Flask, jsonify, request\n'), ((1834, 1849), 'math.ceil', 'math.ceil', (['cost'], {}), '(cost)\n', (1843, 1849), False, 'import math\n'), ((1901, 1924), 'flask.request.args.get', 'request.args.get', (['"""age"""'], {}), "('age')\n", (1917, 1924), False, 'from flask import Flask, jsonify, request\n'), ((2391, 2418), 'math.ceil', 'math.ceil', (['(price.cost * 0.4)'], {}), '(price.cost * 0.4)\n', (2400, 2418), False, 'import math\n'), ((1271, 1295), 'flask.request.args.get', 'request.args.get', (['"""date"""'], {}), "('date')\n", (1287, 1295), False, 'from flask import Flask, jsonify, request\n'), ((1456, 1480), 'flask.request.args.get', 'request.args.get', (['"""date"""'], {}), "('date')\n", (1472, 1480), False, 'from flask import Flask, jsonify, request\n'), ((2053, 2068), 'math.ceil', 'math.ceil', (['cost'], {}), '(cost)\n', (2062, 2068), False, 'import math\n'), ((2211, 2226), 'math.ceil', 'math.ceil', (['cost'], {}), '(cost)\n', (2220, 2226), False, 'import math\n')] |
import numpy as np
import pandas as pd
import datetime
import pickle
data = pickle.load(open("./data.pkl",'rb'))
class get_data():
def __init__(self, data):
self.c_data = data
self.data = self.clean_data()
def clean_data(self):
for d in range(len(data)-1, -1, -1):
if data[d]['event_type'] != 'encounter':
data.pop(d)
data_clean = data
return data_clean
def time_in_range(self, start, end, xs,xe):
#Return true if x is in the range [start, end]
if start <= end:
return start<= xs and xe <= end
else:
return start<= xs or xe <= end
def max_time(self):
# Initialize maximum time
#print(self.data[0])
max_time1 = self.data[0]['time']
for d in self.data:
if d['time'] > max_time1:
#print(type(d['time']))
max_time1 = d['time']
return max_time1
def time_slice(self):
# The Time Span is from days chosen by user to current date time
# The date-time of encounter + duration should fall in the time slice
#self.clean_data()
time_slice_start = self.max_time() - datetime.timedelta(days = int(input("Enter Number of Days of Simulation")))
#print(time_slice_start)
time_slice_end = self.max_time()
#print(time_slice_end)
timestep = int(input("Enter timestep"))
dfinal=[]
interactions={}
cdata = self.data
while(time_slice_start < time_slice_end):
dlist=[]
for d in cdata:
interactions={}
time_interact_start = d['time']
if (d['time'] >= time_slice_start) and (d['time'] < (time_slice_start+datetime.timedelta(hours=timestep))):
interactions['lambda']=d['payload']['unobserved'].get('signal_strength')
interactions['human_id']=d['human_id']
interactions['other_human_id']=d['payload']['unobserved']['human2_id']
interactions['human1_isinfected']=d['payload']['unobserved']['human1']['is_infected']
interactions['human2_isinfected']=d['payload']['unobserved']['human2']['is_infected']
dlist.append(interactions)
dfinal.append(dlist)
time_slice_start+=datetime.timedelta(hours = int(timestep))
# print(len(dfinal))
# print(dfinal)
return dfinal | [
"datetime.timedelta"
] | [((1823, 1857), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'timestep'}), '(hours=timestep)\n', (1841, 1857), False, 'import datetime\n')] |
import tensorflow as tf
#让tf不要一次性分配所有显存(性能会有影响,但是不容易出现无内存错误)
# physical_devices = tf.config.list_physical_devices('GPU')
# try:
# tf.config.experimental.set_virtual_device_configuration(
# physical_devices[0],
# [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024*6)])
# # tf.config.experimental.set_memory_growth(physical_devices[0], True)
# except:
# # Invalid device or cannot modify virtual devices once initialized.
# pass
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(x_train, y_train, epochs=5, verbose=2)
model.evaluate(x_test, y_test, verbose = 2)
| [
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Dropout"
] | [((659, 704), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {'input_shape': '(28, 28)'}), '(input_shape=(28, 28))\n', (682, 704), True, 'import tensorflow as tf\n'), ((710, 759), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(512)'], {'activation': 'tf.nn.relu'}), '(512, activation=tf.nn.relu)\n', (731, 759), True, 'import tensorflow as tf\n'), ((765, 793), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), '(0.2)\n', (788, 793), True, 'import tensorflow as tf\n'), ((799, 850), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': 'tf.nn.softmax'}), '(10, activation=tf.nn.softmax)\n', (820, 850), True, 'import tensorflow as tf\n')] |