code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
"""
Copyright 2020 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: <EMAIL>
"""
import os
from typing import AsyncIterator
import pytest
from asyncpg import Connection
from asyncpg.cursor import Cursor
from db.common import PGRestore
from inmanta.server.bootloader import InmantaBootloader
@pytest.fixture
@pytest.mark.slowtest
async def migrate_v6_to_v7(
hard_clean_db, hard_clean_db_post, postgresql_client: Connection, async_finalizer, server_config
) -> AsyncIterator[None]:
"""
Performs a v6 database restore and migrates to v7.
"""
# Get old tables
with open(os.path.join(os.path.dirname(__file__), "dumps/v6.sql"), "r") as fh:
await PGRestore(fh.readlines(), postgresql_client).run()
ibl = InmantaBootloader()
await ibl.start()
# When the bootloader is started, it also executes the migration to v7
yield
await ibl.stop()
@pytest.mark.asyncio(timeout=20)
async def test_unique_agent_instances(migrate_v6_to_v7: None, postgresql_client: Connection) -> None:
# assert that existing documents have been merged and expired state has been set correctly
async with postgresql_client.transaction():
records: Cursor = postgresql_client.cursor(
"""
SELECT COUNT(*)
FROM public.agentinstance
GROUP BY tid, process, name
;
"""
)
assert all([record["count"] == 1 async for record in records])
# assert unique constraint is present
constraints = await postgresql_client.fetch(
"""
SELECT pg_catalog.pg_get_constraintdef(r.oid, true) as condef
FROM pg_catalog.pg_constraint r
WHERE conname='agentinstance_unique'
"""
)
assert len(constraints) == 1
assert constraints[0]["condef"] == "UNIQUE (tid, process, name)"
|
[
"inmanta.server.bootloader.InmantaBootloader",
"pytest.mark.asyncio",
"os.path.dirname"
] |
[((1429, 1460), 'pytest.mark.asyncio', 'pytest.mark.asyncio', ([], {'timeout': '(20)'}), '(timeout=20)\n', (1448, 1460), False, 'import pytest\n'), ((1277, 1296), 'inmanta.server.bootloader.InmantaBootloader', 'InmantaBootloader', ([], {}), '()\n', (1294, 1296), False, 'from inmanta.server.bootloader import InmantaBootloader\n'), ((1145, 1170), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1160, 1170), False, 'import os\n')]
|
# -*- encoding: utf-8 -*-
'''Main module for the application.'''
# pylint: disable=invalid-name, wrong-import-position
# Hide pygame's support prompt.
from os import environ
environ['PYGAME_HIDE_SUPPORT_PROMPT'] = '1'
import sys
import pygame
from pygame.locals import KEYDOWN, QUIT, K_q, K_ESCAPE, MOUSEBUTTONDOWN # pylint: disable=no-name-in-module
from manager import Manager, TreeManager
from sounds import Sounds
# Initialize game
pygame.init() # pylint: disable=no-member
pygame.mixer.init()
pygame.display.set_caption('开心消消乐')
pygame.mouse.set_visible(False)
tree = TreeManager()
m = Manager(0, 0)
sound_sign = 0
world_bgm = pygame.mixer.Sound(Sounds.WORLD_BGM.value)
game_bgm = pygame.mixer.Sound(Sounds.GAME_BGM.value)
# This improves the performance of the game
get_events, update_window = pygame.event.get, pygame.display.flip
while True:
if m.level == 0:
if sound_sign == 0:
game_bgm.stop()
world_bgm.play(-1)
sound_sign = 1
else:
if sound_sign == 1:
world_bgm.stop()
game_bgm.play(-1)
sound_sign = 0
if m.level == 0:
tree.draw_tree(m.energy_num, m.money)
else:
m.set_level_mode(m.level)
sprite_group = m.draw()
if m.type == 0:
m.eliminate_animals()
m.death_map()
m.swap(sprite_group)
m.judge_level()
for event in get_events():
if event.type == KEYDOWN:
if event.key in (K_q, K_ESCAPE):
sys.exit()
elif event.type == QUIT:
sys.exit()
elif event.type == MOUSEBUTTONDOWN:
mousex, mousey = event.pos
if m.level == 0:
tree.mouse_select(m, mousex, mousey, m.level, m.energy_num, m.money)
m.mouse_select(mousex, mousey)
m.mouse_image()
update_window()
|
[
"pygame.mouse.set_visible",
"manager.Manager",
"pygame.mixer.init",
"manager.TreeManager",
"pygame.init",
"sys.exit",
"pygame.display.set_caption",
"pygame.mixer.Sound"
] |
[((439, 452), 'pygame.init', 'pygame.init', ([], {}), '()\n', (450, 452), False, 'import pygame\n'), ((481, 500), 'pygame.mixer.init', 'pygame.mixer.init', ([], {}), '()\n', (498, 500), False, 'import pygame\n'), ((501, 536), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""开心消消乐"""'], {}), "('开心消消乐')\n", (527, 536), False, 'import pygame\n'), ((537, 568), 'pygame.mouse.set_visible', 'pygame.mouse.set_visible', (['(False)'], {}), '(False)\n', (561, 568), False, 'import pygame\n'), ((577, 590), 'manager.TreeManager', 'TreeManager', ([], {}), '()\n', (588, 590), False, 'from manager import Manager, TreeManager\n'), ((595, 608), 'manager.Manager', 'Manager', (['(0)', '(0)'], {}), '(0, 0)\n', (602, 608), False, 'from manager import Manager, TreeManager\n'), ((636, 678), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['Sounds.WORLD_BGM.value'], {}), '(Sounds.WORLD_BGM.value)\n', (654, 678), False, 'import pygame\n'), ((690, 731), 'pygame.mixer.Sound', 'pygame.mixer.Sound', (['Sounds.GAME_BGM.value'], {}), '(Sounds.GAME_BGM.value)\n', (708, 731), False, 'import pygame\n'), ((1526, 1536), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1534, 1536), False, 'import sys\n'), ((1582, 1592), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1590, 1592), False, 'import sys\n')]
|
import os
from flask import request
from flask_restful import Resource, reqparse, url_for
from models.user import User, UserSchema
from marshmallow import ValidationError
from werkzeug import datastructures
from werkzeug.utils import secure_filename
from flask_jwt_extended import jwt_required
from uuid import uuid4
from datetime import datetime
from config import APP_ROOT, APP_STATIC
from utils.custom_response import ok, bad_request
from utils.jwt_custom_decorator import admin_required
from utils.utils import allowed_file, get_file_extension
from utils import db_helper
parser = reqparse.RequestParser()
parser.add_argument(
'file', type=datastructures.FileStorage, location='files')
class UsersResources(Resource):
def get(self):
users_schema = UserSchema(many=True, exclude=['password'])
users = User.query.all()
users = users_schema.dump(users)
return ok(users, 200)
def post(self):
json_data = request.get_json(force=True)
if not json_data:
return bad_request('No input data provided', 400)
try:
data = UserSchema().load(json_data)
user_in_db = User.get_user_by_email(data.get('email'))
if user_in_db:
return bad_request('User already exist, please supply another email address', 422)
users = User(data)
db_helper.insert(users)
ser_data = UserSchema(exclude=['password']).dump(users)
return ok(ser_data, 201)
except ValidationError as e:
return bad_request(e.messages, 422)
except Exception:
return bad_request('Something went wrong', 500)
class UserResources(Resource):
def get(self, user_id):
user_schema = UserSchema(exclude=['password'])
users = User.query.get(user_id)
if not users:
return {'status': 'error', 'data': 'not found'}, 404
print(users.image)
users = user_schema.dump(users)
return {'status': 'ok', 'data': users}, 200
def put(self, user_id):
try:
req_data = request.get_json(force=True)
user_in_db = User.get_user_by_id(user_id)
if not user_in_db:
return bad_request('User not found!', 422)
data = UserSchema().load(req_data, partial=True)
db_helper.update(user_in_db, data)
data = UserSchema(exclude=['password']).dump(user_in_db)
return ok(data, 200)
except ValidationError as e:
return bad_request(e.messages, 422)
except Exception as e:
print(e)
return bad_request('Something went wrong', 500)
@admin_required
def delete(self, user_id):
try:
user_in_db = User.get_user_by_id(user_id)
if not user_in_db:
return bad_request('User not found!', 422)
if user_in_db.image:
os.remove(os.path.join(APP_STATIC, user_in_db.image))
db_helper.delete(user_in_db)
return ok('user deleted', 200)
except Exception as e:
return bad_request('Something went wrong', 500)
class UserImageResources(Resource):
@admin_required
def patch(self, user_id):
try:
data_parser = parser.parse_args()
users = User.get_user_by_id(user_id)
if not users:
return bad_request('User not found!', 422)
if data_parser['file'] == "":
return bad_request('File not found!', 422)
photo = data_parser['file']
if photo and allowed_file(photo.filename):
secure_name = secure_filename(photo.filename)
secure_name = get_file_extension(secure_name)
secure_name = '{}{:-%Y%m%d%H%M%S}.{}'.format(
str(uuid4().hex), datetime.now(), secure_name)
photo.save(os.path.join(APP_STATIC, secure_name))
if users.image:
os.remove(os.path.join(APP_STATIC, users.image))
# photo_url = request.url_root + url_for(
# 'static', filename="image/" + secure_name)
users.image = secure_name
users.updated_at = datetime.now()
db_helper.only_save()
users = UserSchema(exclude=['password']).dump(users)
return ok(users, 200)
else:
return bad_request('File not allowed!', 422)
except Exception as e:
return bad_request('Something went wrong', 500)
|
[
"models.user.UserSchema",
"utils.db_helper.insert",
"utils.db_helper.only_save",
"utils.db_helper.delete",
"models.user.User.get_user_by_id",
"os.path.join",
"flask.request.get_json",
"utils.utils.allowed_file",
"utils.custom_response.ok",
"datetime.datetime.now",
"utils.utils.get_file_extension",
"flask_restful.reqparse.RequestParser",
"models.user.User.query.all",
"werkzeug.utils.secure_filename",
"models.user.User",
"utils.db_helper.update",
"utils.custom_response.bad_request",
"uuid.uuid4",
"models.user.User.query.get"
] |
[((588, 612), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (610, 612), False, 'from flask_restful import Resource, reqparse, url_for\n'), ((773, 816), 'models.user.UserSchema', 'UserSchema', ([], {'many': '(True)', 'exclude': "['password']"}), "(many=True, exclude=['password'])\n", (783, 816), False, 'from models.user import User, UserSchema\n'), ((833, 849), 'models.user.User.query.all', 'User.query.all', ([], {}), '()\n', (847, 849), False, 'from models.user import User, UserSchema\n'), ((906, 920), 'utils.custom_response.ok', 'ok', (['users', '(200)'], {}), '(users, 200)\n', (908, 920), False, 'from utils.custom_response import ok, bad_request\n'), ((962, 990), 'flask.request.get_json', 'request.get_json', ([], {'force': '(True)'}), '(force=True)\n', (978, 990), False, 'from flask import request\n'), ((1759, 1791), 'models.user.UserSchema', 'UserSchema', ([], {'exclude': "['password']"}), "(exclude=['password'])\n", (1769, 1791), False, 'from models.user import User, UserSchema\n'), ((1808, 1831), 'models.user.User.query.get', 'User.query.get', (['user_id'], {}), '(user_id)\n', (1822, 1831), False, 'from models.user import User, UserSchema\n'), ((1036, 1078), 'utils.custom_response.bad_request', 'bad_request', (['"""No input data provided"""', '(400)'], {}), "('No input data provided', 400)\n", (1047, 1078), False, 'from utils.custom_response import ok, bad_request\n'), ((1353, 1363), 'models.user.User', 'User', (['data'], {}), '(data)\n', (1357, 1363), False, 'from models.user import User, UserSchema\n'), ((1376, 1399), 'utils.db_helper.insert', 'db_helper.insert', (['users'], {}), '(users)\n', (1392, 1399), False, 'from utils import db_helper\n'), ((1487, 1504), 'utils.custom_response.ok', 'ok', (['ser_data', '(201)'], {}), '(ser_data, 201)\n', (1489, 1504), False, 'from utils.custom_response import ok, bad_request\n'), ((2103, 2131), 'flask.request.get_json', 'request.get_json', ([], {'force': '(True)'}), '(force=True)\n', (2119, 2131), False, 'from flask import request\n'), ((2157, 2185), 'models.user.User.get_user_by_id', 'User.get_user_by_id', (['user_id'], {}), '(user_id)\n', (2176, 2185), False, 'from models.user import User, UserSchema\n'), ((2349, 2383), 'utils.db_helper.update', 'db_helper.update', (['user_in_db', 'data'], {}), '(user_in_db, data)\n', (2365, 2383), False, 'from utils import db_helper\n'), ((2472, 2485), 'utils.custom_response.ok', 'ok', (['data', '(200)'], {}), '(data, 200)\n', (2474, 2485), False, 'from utils.custom_response import ok, bad_request\n'), ((2773, 2801), 'models.user.User.get_user_by_id', 'User.get_user_by_id', (['user_id'], {}), '(user_id)\n', (2792, 2801), False, 'from models.user import User, UserSchema\n'), ((3007, 3035), 'utils.db_helper.delete', 'db_helper.delete', (['user_in_db'], {}), '(user_in_db)\n', (3023, 3035), False, 'from utils import db_helper\n'), ((3055, 3078), 'utils.custom_response.ok', 'ok', (['"""user deleted"""', '(200)'], {}), "('user deleted', 200)\n", (3057, 3078), False, 'from utils.custom_response import ok, bad_request\n'), ((3337, 3365), 'models.user.User.get_user_by_id', 'User.get_user_by_id', (['user_id'], {}), '(user_id)\n', (3356, 3365), False, 'from models.user import User, UserSchema\n'), ((1257, 1332), 'utils.custom_response.bad_request', 'bad_request', (['"""User already exist, please supply another email address"""', '(422)'], {}), "('User already exist, please supply another email address', 422)\n", (1268, 1332), False, 'from utils.custom_response import ok, bad_request\n'), ((1561, 1589), 'utils.custom_response.bad_request', 'bad_request', (['e.messages', '(422)'], {}), '(e.messages, 422)\n', (1572, 1589), False, 'from utils.custom_response import ok, bad_request\n'), ((1635, 1675), 'utils.custom_response.bad_request', 'bad_request', (['"""Something went wrong"""', '(500)'], {}), "('Something went wrong', 500)\n", (1646, 1675), False, 'from utils.custom_response import ok, bad_request\n'), ((2240, 2275), 'utils.custom_response.bad_request', 'bad_request', (['"""User not found!"""', '(422)'], {}), "('User not found!', 422)\n", (2251, 2275), False, 'from utils.custom_response import ok, bad_request\n'), ((2542, 2570), 'utils.custom_response.bad_request', 'bad_request', (['e.messages', '(422)'], {}), '(e.messages, 422)\n', (2553, 2570), False, 'from utils.custom_response import ok, bad_request\n'), ((2642, 2682), 'utils.custom_response.bad_request', 'bad_request', (['"""Something went wrong"""', '(500)'], {}), "('Something went wrong', 500)\n", (2653, 2682), False, 'from utils.custom_response import ok, bad_request\n'), ((2856, 2891), 'utils.custom_response.bad_request', 'bad_request', (['"""User not found!"""', '(422)'], {}), "('User not found!', 422)\n", (2867, 2891), False, 'from utils.custom_response import ok, bad_request\n'), ((3129, 3169), 'utils.custom_response.bad_request', 'bad_request', (['"""Something went wrong"""', '(500)'], {}), "('Something went wrong', 500)\n", (3140, 3169), False, 'from utils.custom_response import ok, bad_request\n'), ((3415, 3450), 'utils.custom_response.bad_request', 'bad_request', (['"""User not found!"""', '(422)'], {}), "('User not found!', 422)\n", (3426, 3450), False, 'from utils.custom_response import ok, bad_request\n'), ((3516, 3551), 'utils.custom_response.bad_request', 'bad_request', (['"""File not found!"""', '(422)'], {}), "('File not found!', 422)\n", (3527, 3551), False, 'from utils.custom_response import ok, bad_request\n'), ((3617, 3645), 'utils.utils.allowed_file', 'allowed_file', (['photo.filename'], {}), '(photo.filename)\n', (3629, 3645), False, 'from utils.utils import allowed_file, get_file_extension\n'), ((3677, 3708), 'werkzeug.utils.secure_filename', 'secure_filename', (['photo.filename'], {}), '(photo.filename)\n', (3692, 3708), False, 'from werkzeug.utils import secure_filename\n'), ((3739, 3770), 'utils.utils.get_file_extension', 'get_file_extension', (['secure_name'], {}), '(secure_name)\n', (3757, 3770), False, 'from utils.utils import allowed_file, get_file_extension\n'), ((4267, 4281), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4279, 4281), False, 'from datetime import datetime\n'), ((4298, 4319), 'utils.db_helper.only_save', 'db_helper.only_save', ([], {}), '()\n', (4317, 4319), False, 'from utils import db_helper\n'), ((4412, 4426), 'utils.custom_response.ok', 'ok', (['users', '(200)'], {}), '(users, 200)\n', (4414, 4426), False, 'from utils.custom_response import ok, bad_request\n'), ((4468, 4505), 'utils.custom_response.bad_request', 'bad_request', (['"""File not allowed!"""', '(422)'], {}), "('File not allowed!', 422)\n", (4479, 4505), False, 'from utils.custom_response import ok, bad_request\n'), ((4556, 4596), 'utils.custom_response.bad_request', 'bad_request', (['"""Something went wrong"""', '(500)'], {}), "('Something went wrong', 500)\n", (4567, 4596), False, 'from utils.custom_response import ok, bad_request\n'), ((1111, 1123), 'models.user.UserSchema', 'UserSchema', ([], {}), '()\n', (1121, 1123), False, 'from models.user import User, UserSchema\n'), ((1423, 1455), 'models.user.UserSchema', 'UserSchema', ([], {'exclude': "['password']"}), "(exclude=['password'])\n", (1433, 1455), False, 'from models.user import User, UserSchema\n'), ((2295, 2307), 'models.user.UserSchema', 'UserSchema', ([], {}), '()\n', (2305, 2307), False, 'from models.user import User, UserSchema\n'), ((2403, 2435), 'models.user.UserSchema', 'UserSchema', ([], {'exclude': "['password']"}), "(exclude=['password'])\n", (2413, 2435), False, 'from models.user import User, UserSchema\n'), ((2951, 2993), 'os.path.join', 'os.path.join', (['APP_STATIC', 'user_in_db.image'], {}), '(APP_STATIC, user_in_db.image)\n', (2963, 2993), False, 'import os\n'), ((3871, 3885), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3883, 3885), False, 'from datetime import datetime\n'), ((3927, 3964), 'os.path.join', 'os.path.join', (['APP_STATIC', 'secure_name'], {}), '(APP_STATIC, secure_name)\n', (3939, 3964), False, 'import os\n'), ((4028, 4065), 'os.path.join', 'os.path.join', (['APP_STATIC', 'users.image'], {}), '(APP_STATIC, users.image)\n', (4040, 4065), False, 'import os\n'), ((4344, 4376), 'models.user.UserSchema', 'UserSchema', ([], {'exclude': "['password']"}), "(exclude=['password'])\n", (4354, 4376), False, 'from models.user import User, UserSchema\n'), ((3857, 3864), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (3862, 3864), False, 'from uuid import uuid4\n')]
|
from hypothesis import given
from tests.binding_tests.utils import BoundPolygonKind
from tests.integration_tests.utils import (
BoundPortedFillKindsPair,
BoundPortedMultipolygonsPair,
BoundPortedWagyusPair,
are_bound_ported_multipolygons_equal,
are_bound_ported_wagyus_equal)
from tests.port_tests.utils import PortedPolygonKind
from . import strategies
@given(strategies.wagyus_pairs, strategies.multipolygons_pairs,
strategies.multipolygons_pairs, strategies.fill_kinds_pairs,
strategies.fill_kinds_pairs)
def test_basic(wagyus_pair: BoundPortedWagyusPair,
subjects_pair: BoundPortedMultipolygonsPair,
clips_pair: BoundPortedMultipolygonsPair,
subject_fill_kinds_pair: BoundPortedFillKindsPair,
clip_fill_kinds_pair: BoundPortedFillKindsPair) -> None:
bound, ported = wagyus_pair
bound_subject, ported_subject = subjects_pair
bound_clip, ported_clip = clips_pair
bound_subject_fill_kind, ported_subject_fill_kind = subject_fill_kinds_pair
bound_clip_fill_kind, ported_clip_fill_kind = clip_fill_kinds_pair
for bound_subject_polygon in bound_subject:
bound.add_polygon(bound_subject_polygon, BoundPolygonKind.SUBJECT)
for bound_clip_polygon in bound_clip:
bound.add_polygon(bound_clip_polygon, BoundPolygonKind.CLIP)
for ported_subject_polygon in ported_subject:
ported.add_polygon(ported_subject_polygon, PortedPolygonKind.SUBJECT)
for ported_clip_polygon in ported_clip:
ported.add_polygon(ported_clip_polygon, PortedPolygonKind.CLIP)
bound_result = bound.subtract(bound_subject_fill_kind,
bound_clip_fill_kind)
ported_result = ported.subtract(ported_subject_fill_kind,
ported_clip_fill_kind)
assert are_bound_ported_multipolygons_equal(bound_result, ported_result)
assert are_bound_ported_wagyus_equal(bound, ported)
|
[
"tests.integration_tests.utils.are_bound_ported_wagyus_equal",
"tests.integration_tests.utils.are_bound_ported_multipolygons_equal",
"hypothesis.given"
] |
[((378, 540), 'hypothesis.given', 'given', (['strategies.wagyus_pairs', 'strategies.multipolygons_pairs', 'strategies.multipolygons_pairs', 'strategies.fill_kinds_pairs', 'strategies.fill_kinds_pairs'], {}), '(strategies.wagyus_pairs, strategies.multipolygons_pairs, strategies.\n multipolygons_pairs, strategies.fill_kinds_pairs, strategies.\n fill_kinds_pairs)\n', (383, 540), False, 'from hypothesis import given\n'), ((1853, 1918), 'tests.integration_tests.utils.are_bound_ported_multipolygons_equal', 'are_bound_ported_multipolygons_equal', (['bound_result', 'ported_result'], {}), '(bound_result, ported_result)\n', (1889, 1918), False, 'from tests.integration_tests.utils import BoundPortedFillKindsPair, BoundPortedMultipolygonsPair, BoundPortedWagyusPair, are_bound_ported_multipolygons_equal, are_bound_ported_wagyus_equal\n'), ((1930, 1974), 'tests.integration_tests.utils.are_bound_ported_wagyus_equal', 'are_bound_ported_wagyus_equal', (['bound', 'ported'], {}), '(bound, ported)\n', (1959, 1974), False, 'from tests.integration_tests.utils import BoundPortedFillKindsPair, BoundPortedMultipolygonsPair, BoundPortedWagyusPair, are_bound_ported_multipolygons_equal, are_bound_ported_wagyus_equal\n')]
|
import io
import aiofiles
import aiohttp
from PIL import Image, ImageFont, ImageDraw
def GetMiddle(x, y):
return (x - y) / 2
def GetBlendColor(Rarity):
if Rarity == "frozen":
blendColor = (148, 223, 255)
elif Rarity == "lava":
blendColor = (234, 141, 35)
elif Rarity == "legendary":
blendColor = (255, 255, 255)
elif Rarity == "dark":
blendColor = (251, 34, 223)
elif Rarity == "starwars":
blendColor = (231, 196, 19)
elif Rarity == "marvel":
blendColor = (197, 51, 52)
elif Rarity == "dc":
blendColor = (84, 117, 199)
elif Rarity == "icon":
blendColor = (54, 183, 183)
elif Rarity == "shadow":
blendColor = (113, 113, 113)
elif Rarity == "epic":
blendColor = (177, 91, 226)
elif Rarity == "rare":
blendColor = (73, 172, 242)
elif Rarity == "uncommon":
blendColor = (96, 170, 58)
elif Rarity == "common":
blendColor = (190, 190, 190)
elif Rarity == "slurp":
blendColor = (17, 189, 240)
elif Rarity == "gaminglegends":
blendColor = (84, 195, 246)
else:
blendColor = (255, 255, 255)
return blendColor
async def GenerateShopImage(Store: dict, background_user: str = "https://peely.de/api/background.jpg",
text: str = "Fortnite Item Shop"):
# Featured items
FeaturedItemsCount = len(Store["featured"]['entries'])
F_Lines = 1
F_Height = (545 * F_Lines) + 20
F_Width = (300 * FeaturedItemsCount) + 20
while F_Width > F_Height:
F_Lines += 1
F_ImagesPerLine = round((FeaturedItemsCount / F_Lines) + 0.49)
F_Height = (545 * F_Lines) + 20
F_Width = (300 * F_ImagesPerLine) + 20
while ((F_Lines * F_ImagesPerLine) - FeaturedItemsCount) > F_ImagesPerLine or (
(F_Lines * F_ImagesPerLine) - FeaturedItemsCount) == F_ImagesPerLine:
F_Lines -= 1
F_Height = (545 * F_Lines) + 20
F_Width = (300 * F_ImagesPerLine) + 20
# Daily items
try:
DailyItemsCount = len(Store["daily"]['entries'])
except KeyError:
DailyItemsCount = 0
D_Lines = 1
D_Height = (545 * D_Lines)
D_Width = (300 * DailyItemsCount)
while D_Width > D_Height and D_Lines < F_Lines:
D_Lines += 1
D_ImagesPerLine = round(((DailyItemsCount) / D_Lines) + 0.49)
D_Height = (545 * D_Lines)
D_Width = (300 * D_ImagesPerLine)
while ((D_Lines * D_ImagesPerLine) - DailyItemsCount) > D_ImagesPerLine or (
(D_Lines * D_ImagesPerLine) - DailyItemsCount) == D_ImagesPerLine:
D_Lines -= 1
D_Width = (300 * D_ImagesPerLine)
# Open Background
async with aiohttp.ClientSession() as session:
async with session.get(background_user) as resp:
if resp.status == 200:
f = await aiofiles.open('assets/cache/temp.png', mode='wb')
await f.write(await resp.read())
await f.close()
Background = Image.open(
io.BytesIO(await (await aiofiles.open("assets/cache/temp.png", mode='rb')).read())).resize(
(int(F_Width + D_Width + 20 + 50), int(F_Height + 510)),
Image.ANTIALIAS)
Draw = ImageDraw.Draw(Background)
Burbank = ImageFont.truetype(f"assets/Fonts/BurbankBigCondensed-Black.otf", 100)
# Adspace
NewsAdpsace = Image.open(
io.BytesIO(await (await aiofiles.open("assets/Images/T_newPVP_Texture.png", mode='rb')).read()))
AdspaceFont = ImageFont.truetype('assets/Fonts/BurbankBigCondensed-Black.otf', 32)
def Adspace(X, Y, SpaceText):
X -= 14
Y -= 14
AdspaceLeft = NewsAdpsace.crop((0, 0, 23, 50))
AdspaceMiddle = NewsAdpsace.crop((23, 0, 66, 50)).resize((AdspaceFont.getsize(SpaceText)[0] - 15, 50),
Image.ANTIALIAS)
AdspaceRight = NewsAdpsace.crop((66, 0, 100, 50))
Background.paste(AdspaceLeft, (X, Y), AdspaceLeft)
Background.paste(AdspaceMiddle, (X + AdspaceLeft.width, Y), AdspaceMiddle)
Background.paste(AdspaceRight, (X + AdspaceLeft.width + AdspaceMiddle.width, Y), AdspaceRight)
AdspaceLeft = NewsAdpsace.crop((0, 0, 21, 50))
Draw.text((X + AdspaceLeft.width - 3, Y + 4), SpaceText, font=AdspaceFont)
# Pasting items
currentHeight = 510
currentWidth = 20
# Paste Featured
for Item in Store["featured"]['entries']:
card = await GenerateStoreCard(Item)
Background.paste(card, (currentWidth, currentHeight))
try:
if Item["banner"]:
Adspace(currentWidth, currentHeight, Item["banner"]['value'])
except KeyError:
pass
currentWidth += 300
if F_Width == currentWidth:
currentWidth = 20
currentHeight += 545
D_Width = Background.width - 20
dailyStarts = F_Width + 50
currentWidth = dailyStarts
currentHeight = 510
try:
# Paste Daily
for Item in Store["daily"]['entries']:
card = await GenerateStoreCard(Item)
Background.paste(card, (currentWidth, currentHeight))
try:
if Item["banner"]:
Adspace(currentWidth, currentHeight, Item["banner"]['value'])
except KeyError:
pass
currentWidth += 300
if D_Width == currentWidth:
currentWidth = dailyStarts
currentHeight += 545
# Draw Featured and Daily
FMiddle = GetMiddle(F_Width, Burbank.getsize(Store['featured']['name'])[0])
Draw.text((FMiddle + 20, 350), Store['featured']['name'], (255, 255, 255), font=Burbank)
DMiddle = GetMiddle(Background.width - 20 - dailyStarts, Burbank.getsize(Store['daily']['name'])[0])
Draw.text((DMiddle + dailyStarts, 350), Store['daily']['name'], (255, 255, 255), font=Burbank)
except KeyError:
pass
# Draw Fortnite Item Shop
size = 300
BurbankBigCondensed = ImageFont.truetype(f"assets/Fonts/BurbankBigCondensed-Black.otf", size)
while Background.width <= BurbankBigCondensed.getsize(text)[0]:
size -= 1
BurbankBigCondensed = ImageFont.truetype(f"assets/Fonts/BurbankBigCondensed-Black.otf", size)
size -= 15
BurbankBigCondensed = ImageFont.truetype(f"assets/Fonts/BurbankBigCondensed-Black.otf", size)
Middle = GetMiddle(Background.width, BurbankBigCondensed.getsize(text)[0])
Draw.text((Middle, (375 - BurbankBigCondensed.getsize(text)[1])/2), text, (255, 255, 255), font=BurbankBigCondensed)
return Background
async def GenerateCard(Item):
card = Image.new("RGBA", (300, 545))
Draw = ImageDraw.Draw(card)
Name = Item["name"]
Rarity = Item["rarity"]['value']
blendColor = GetBlendColor(Rarity.lower())
Category = Item["type"]['value']
if Item["images"]["featured"]:
Icon = Item["images"]["featured"]
elif Item["images"]["icon"]:
Icon = Item["images"]["icon"]
elif Item["images"]["smallIcon"]:
Icon = Item["images"]["smallIcon"]
else:
print(Item["name"] + " Image not found!")
return card
try:
layer = Image.open(
io.BytesIO(await (await aiofiles.open(f"assets/Images/card_inside_{Rarity}.png", mode='rb')).read()))
except:
layer = Image.open(
io.BytesIO(await (await aiofiles.open("assets/Images/card_inside_common.png", mode='rb')).read()))
card.paste(layer)
# Download the Item icon
try:
async with aiohttp.ClientSession() as cs:
async with cs.get(Icon) as data:
Icon = Image.open(io.BytesIO(await data.read()))
except Exception as ex:
print("DOWNLOAD ITEM ICON ERROR", ex)
return
if (Category == "outfit") or (Category == "emote"):
ratio = max(285 / Icon.width, 365 / Icon.height)
elif Category == "wrap":
ratio = max(230 / Icon.width, 310 / Icon.height)
else:
ratio = max(310 / Icon.width, 390 / Icon.height)
Icon = Icon.resize((int(Icon.width * ratio), int(Icon.height * ratio)), Image.ANTIALIAS)
Icon = Icon.convert("RGBA")
Middle = int((card.width - Icon.width) / 2) # Get the middle of card and icon
# Paste the image
if (Category == "outfit") or (Category == "emote"):
card.paste(Icon, (Middle, 0), Icon)
else:
card.paste(Icon, (Middle, 15), Icon)
try:
layer = Image.open(
io.BytesIO(await (await aiofiles.open(f"assets/Images/card_faceplate_{Rarity}.png", mode='rb')).read()))
except:
layer = Image.open(
io.BytesIO(await (await aiofiles.open("assets/Images/card_faceplate_common.png", mode='rb')).read()))
try:
card.paste(layer, layer)
except:
pass
BurbankBigCondensed = ImageFont.truetype(f"assets/Fonts/BurbankBigCondensed-Black.otf", 30)
textWidth = BurbankBigCondensed.getsize(f"{Item['type']['displayValue']}")[0]
Middle = int((card.width - textWidth) / 2)
Draw.text((Middle, 385), f"{Item['type']['displayValue']}", blendColor, font=BurbankBigCondensed)
FontSize = 56
while ImageFont.truetype(f"assets/Fonts/BurbankBigCondensed-Black.otf", FontSize).getsize(Name)[0] > 265:
FontSize -= 1
BurbankBigCondensed = ImageFont.truetype(f"assets/Fonts/BurbankBigCondensed-Black.otf", FontSize)
textWidth = BurbankBigCondensed.getsize(Name)[0]
change = 56 - FontSize
Middle = int((card.width - textWidth) / 2)
Top = 425 + change / 2
Draw.text((Middle, Top), Name, (255, 255, 0), font=BurbankBigCondensed)
return card
async def GenerateStoreCard(Item):
card = await GenerateCard(Item["items"][0])
Draw = ImageDraw.Draw(card)
Name = Item["items"][0]["name"]
if len(Item["items"]) > 1:
i = 0
for extra in Item["items"][1:]:
try:
extraRarity = extra["rarity"]
extraIcon = extra["images"]["smallIcon"]
except:
pass
try:
layer = Image.open(io.BytesIO(
await (await aiofiles.open(f"assets/Images/box_bottom_{extraRarity}.png", mode='rb')).read()))
except:
layer = Image.open(
io.BytesIO(await (await aiofiles.open("assets/Images/box_bottom_common.png", mode='rb')).read()))
card.paste(layer, ((card.width - (layer.width + 9)), (9 + ((i // 1) * (layer.height)))))
# Download the icon
try:
async with aiohttp.ClientSession() as cs:
async with cs.get(extraIcon) as data:
extraIcon = Image.open(io.BytesIO(await data.read()))
except Exception as ex:
print("ERROR BEIM NORMALEN ICON", ex)
pass
ratio = max(75 / extraIcon.width, 75 / extraIcon.height)
extraIcon = extraIcon.resize((int(extraIcon.width * ratio), int(extraIcon.height * ratio)), Image.ANTIALIAS)
# Paste icon
try:
layer = Image.open(io.BytesIO(
await (await aiofiles.open(f"assets/Images/box_faceplate_{extraRarity}.png", mode='rb')).read()))
except:
layer = Image.open(
io.BytesIO(await (await aiofiles.open("assets/Images/box_faceplate_common.png", mode='rb')).read()))
extraIcon = extraIcon.convert("RGBA")
card.paste(extraIcon, ((card.width - (layer.width + 9)), (9 + ((i // 1) * (extraIcon.height))),), extraIcon)
card.paste(layer, ((card.width - (layer.width + 9)), (9 + ((i // 1) * (layer.height)))), layer)
i += 1
vbucks = Image.open(io.BytesIO(await (await aiofiles.open("assets/Images/vbucks.png", mode='rb')).read()))
if Item["finalPrice"] == 0:
price = "Free"
else:
price = str(Item["finalPrice"])
BurbankBigCondensed = ImageFont.truetype(f"assets/Fonts/BurbankBigCondensed-Black.otf", 30)
textWidth = BurbankBigCondensed.getsize(price)[0]
Middle = int((card.width - ((textWidth - 5) - vbucks.width)) / 2)
Draw.text((Middle, 490), price, (255, 255, 255), font=BurbankBigCondensed)
Middle = int((card.width - (vbucks.width + (textWidth + 5))) / 2)
card.paste(vbucks, (Middle, 495), vbucks)
FontSize = 56
while ImageFont.truetype(f"assets/Fonts/BurbankBigCondensed-Black.otf", FontSize).getsize(Name)[0] > 265:
FontSize -= 1
BurbankBigCondensed = ImageFont.truetype(f"assets/Fonts/BurbankBigCondensed-Black.otf", FontSize)
textWidth = BurbankBigCondensed.getsize(Name)[0]
change = 56 - FontSize
Middle = int((card.width - textWidth) / 2)
Top = 425 + change / 2
Draw.text((Middle, Top), Name, (255, 255, 255), font=BurbankBigCondensed)
return card
|
[
"PIL.Image.new",
"aiofiles.open",
"PIL.ImageFont.truetype",
"aiohttp.ClientSession",
"PIL.ImageDraw.Draw"
] |
[((3306, 3332), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['Background'], {}), '(Background)\n', (3320, 3332), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((3347, 3417), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['f"""assets/Fonts/BurbankBigCondensed-Black.otf"""', '(100)'], {}), "(f'assets/Fonts/BurbankBigCondensed-Black.otf', 100)\n", (3365, 3417), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((3586, 3654), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['"""assets/Fonts/BurbankBigCondensed-Black.otf"""', '(32)'], {}), "('assets/Fonts/BurbankBigCondensed-Black.otf', 32)\n", (3604, 3654), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((6133, 6204), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['f"""assets/Fonts/BurbankBigCondensed-Black.otf"""', 'size'], {}), "(f'assets/Fonts/BurbankBigCondensed-Black.otf', size)\n", (6151, 6204), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((6435, 6506), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['f"""assets/Fonts/BurbankBigCondensed-Black.otf"""', 'size'], {}), "(f'assets/Fonts/BurbankBigCondensed-Black.otf', size)\n", (6453, 6506), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((6773, 6802), 'PIL.Image.new', 'Image.new', (['"""RGBA"""', '(300, 545)'], {}), "('RGBA', (300, 545))\n", (6782, 6802), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((6814, 6834), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['card'], {}), '(card)\n', (6828, 6834), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((8957, 9026), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['f"""assets/Fonts/BurbankBigCondensed-Black.otf"""', '(30)'], {}), "(f'assets/Fonts/BurbankBigCondensed-Black.otf', 30)\n", (8975, 9026), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((9437, 9512), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['f"""assets/Fonts/BurbankBigCondensed-Black.otf"""', 'FontSize'], {}), "(f'assets/Fonts/BurbankBigCondensed-Black.otf', FontSize)\n", (9455, 9512), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((9857, 9877), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['card'], {}), '(card)\n', (9871, 9877), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((12089, 12158), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['f"""assets/Fonts/BurbankBigCondensed-Black.otf"""', '(30)'], {}), "(f'assets/Fonts/BurbankBigCondensed-Black.otf', 30)\n", (12107, 12158), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((12657, 12732), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['f"""assets/Fonts/BurbankBigCondensed-Black.otf"""', 'FontSize'], {}), "(f'assets/Fonts/BurbankBigCondensed-Black.otf', FontSize)\n", (12675, 12732), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((2743, 2766), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (2764, 2766), False, 'import aiohttp\n'), ((6322, 6393), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['f"""assets/Fonts/BurbankBigCondensed-Black.otf"""', 'size'], {}), "(f'assets/Fonts/BurbankBigCondensed-Black.otf', size)\n", (6340, 6393), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((7673, 7696), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (7694, 7696), False, 'import aiohttp\n'), ((2897, 2946), 'aiofiles.open', 'aiofiles.open', (['"""assets/cache/temp.png"""'], {'mode': '"""wb"""'}), "('assets/cache/temp.png', mode='wb')\n", (2910, 2946), False, 'import aiofiles\n'), ((9288, 9363), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['f"""assets/Fonts/BurbankBigCondensed-Black.otf"""', 'FontSize'], {}), "(f'assets/Fonts/BurbankBigCondensed-Black.otf', FontSize)\n", (9306, 9363), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((10694, 10717), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (10715, 10717), False, 'import aiohttp\n'), ((12508, 12583), 'PIL.ImageFont.truetype', 'ImageFont.truetype', (['f"""assets/Fonts/BurbankBigCondensed-Black.otf"""', 'FontSize'], {}), "(f'assets/Fonts/BurbankBigCondensed-Black.otf', FontSize)\n", (12526, 12583), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((3495, 3557), 'aiofiles.open', 'aiofiles.open', (['"""assets/Images/T_newPVP_Texture.png"""'], {'mode': '"""rb"""'}), "('assets/Images/T_newPVP_Texture.png', mode='rb')\n", (3508, 3557), False, 'import aiofiles\n'), ((11893, 11945), 'aiofiles.open', 'aiofiles.open', (['"""assets/Images/vbucks.png"""'], {'mode': '"""rb"""'}), "('assets/Images/vbucks.png', mode='rb')\n", (11906, 11945), False, 'import aiofiles\n'), ((7364, 7431), 'aiofiles.open', 'aiofiles.open', (['f"""assets/Images/card_inside_{Rarity}.png"""'], {'mode': '"""rb"""'}), "(f'assets/Images/card_inside_{Rarity}.png', mode='rb')\n", (7377, 7431), False, 'import aiofiles\n'), ((8628, 8698), 'aiofiles.open', 'aiofiles.open', (['f"""assets/Images/card_faceplate_{Rarity}.png"""'], {'mode': '"""rb"""'}), "(f'assets/Images/card_faceplate_{Rarity}.png', mode='rb')\n", (8641, 8698), False, 'import aiofiles\n'), ((7518, 7582), 'aiofiles.open', 'aiofiles.open', (['"""assets/Images/card_inside_common.png"""'], {'mode': '"""rb"""'}), "('assets/Images/card_inside_common.png', mode='rb')\n", (7531, 7582), False, 'import aiofiles\n'), ((8785, 8852), 'aiofiles.open', 'aiofiles.open', (['"""assets/Images/card_faceplate_common.png"""'], {'mode': '"""rb"""'}), "('assets/Images/card_faceplate_common.png', mode='rb')\n", (8798, 8852), False, 'import aiofiles\n'), ((10260, 10331), 'aiofiles.open', 'aiofiles.open', (['f"""assets/Images/box_bottom_{extraRarity}.png"""'], {'mode': '"""rb"""'}), "(f'assets/Images/box_bottom_{extraRarity}.png', mode='rb')\n", (10273, 10331), False, 'import aiofiles\n'), ((11284, 11358), 'aiofiles.open', 'aiofiles.open', (['f"""assets/Images/box_faceplate_{extraRarity}.png"""'], {'mode': '"""rb"""'}), "(f'assets/Images/box_faceplate_{extraRarity}.png', mode='rb')\n", (11297, 11358), False, 'import aiofiles\n'), ((10442, 10505), 'aiofiles.open', 'aiofiles.open', (['"""assets/Images/box_bottom_common.png"""'], {'mode': '"""rb"""'}), "('assets/Images/box_bottom_common.png', mode='rb')\n", (10455, 10505), False, 'import aiofiles\n'), ((11469, 11535), 'aiofiles.open', 'aiofiles.open', (['"""assets/Images/box_faceplate_common.png"""'], {'mode': '"""rb"""'}), "('assets/Images/box_faceplate_common.png', mode='rb')\n", (11482, 11535), False, 'import aiofiles\n'), ((3113, 3162), 'aiofiles.open', 'aiofiles.open', (['"""assets/cache/temp.png"""'], {'mode': '"""rb"""'}), "('assets/cache/temp.png', mode='rb')\n", (3126, 3162), False, 'import aiofiles\n')]
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for aqt.jax.quantization."""
import itertools
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen as nn
import jax
from jax import random
import jax.numpy as jnp
import numpy as onp
from aqt.jax import fp_cast
from aqt.jax import get_bounds
from aqt.jax import primitives
from aqt.jax import quant_config
from aqt.jax import quantization
from aqt.jax import test_utils
from aqt.jax.get_bounds import GetBounds
from aqt.jax.quantization import QuantOps
from aqt.jax.quantization import QuantType
from aqt.jax.quantization import SCALE_DTYPE
fp32 = onp.float32
test_utils.configure_jax()
class QuantOpsTest(parameterized.TestCase):
def setUp(self):
super(QuantOpsTest, self).setUp()
quantization.DISABLE_EPSILON_IN_SCALE_FUN_FOR_TESTING = True
@parameterized.named_parameters(
dict(testcase_name='prec_2', bounds=6.0, prec=2),
dict(testcase_name='prec_4', bounds=6.0, prec=4),
dict(testcase_name='prec_8', bounds=6.0, prec=8),
dict(
testcase_name='2_features_prec_8',
bounds=[6., 12.],
prec=8),
)
def test_attributes_create_positive(self, bounds, prec):
bounds = jnp.array(bounds)
relu6 = QuantOps.create_positive(bounds=bounds, prec=prec)
onp.testing.assert_array_equal(relu6._scale, 2**prec / bounds)
self.assertEqual(relu6._symmetric, False)
self.assertEqual(relu6._prec, prec)
@parameterized.named_parameters(
dict(testcase_name='prec_2', bounds=6.0, prec=2),
dict(testcase_name='prec_4', bounds=6.0, prec=4),
dict(testcase_name='prec_8', bounds=6.0, prec=8),
dict(
testcase_name='2_features_prec_8',
bounds=[6., 12.],
prec=8),
)
def test_attributes_create_symmetric(self, bounds, prec):
bounds = jnp.array(bounds)
act_signed = QuantOps.create_symmetric(
bounds=bounds, prec=prec, half_shift=False)
onp.testing.assert_array_equal(act_signed._scale,
(2**(prec - 1) - 1) / bounds)
self.assertEqual(act_signed._symmetric, True)
self.assertEqual(act_signed._prec, prec)
@parameterized.named_parameters(
dict(
testcase_name='fp8_143',
weight_range=[2.0, 64.0],
weight_shape=(10, 1),
fp_quant=QuantOps.FloatQuant(
is_scaled=True,
fp_spec=QuantOps.FloatQuant.FloatPrec(
exp_min=-11,
exp_max=4,
sig_bits=3,
),
),
),
dict(
testcase_name='fp8_152',
weight_range=[2.0, 64.0],
weight_shape=(10, 1),
fp_quant=QuantOps.FloatQuant(
is_scaled=True,
fp_spec=QuantOps.FloatQuant.FloatPrec(
exp_min=-23,
exp_max=8,
sig_bits=2,
),
),
),
)
def test_attributes_create_weights_op_fp(
self,
weight_range,
weight_shape,
fp_quant,
):
weights = jnp.array(
fp32(onp.random.uniform(*weight_range, size=weight_shape)))
axis = None if weight_shape[1] == 1 else 0
weights_quant_op = QuantOps.create_weights_ops(
w=weights,
weight_params=QuantOps.WeightParams(
prec=fp_quant, axis=axis, half_shift=False))
max_weight = onp.max(abs(weights), axis=0)
onp.testing.assert_array_equal(
jnp.squeeze(weights_quant_op._scale),
jnp.exp2(-jnp.floor(jnp.log2(max_weight))))
self.assertEqual(weights_quant_op._symmetric, True)
self.assertIs(weights_quant_op._prec, fp_quant)
weights_scaled = (weights * weights_quant_op._scale).astype(weights.dtype)
weights_quant_expected = fp_cast.downcast_sat_ftz(
weights_scaled,
fp_quant.fp_spec.exp_min,
fp_quant.fp_spec.exp_max,
fp_quant.fp_spec.sig_bits,
)
weights_quant_calculated = weights_quant_op.to_quantized(
weights, dtype=SCALE_DTYPE)
onp.testing.assert_array_equal(weights_quant_expected,
weights_quant_calculated)
# Test the lower (23 - fp_quant.fp_spec.sig_bits) bits of the calculated
# quantized weights are zero.
sig_mask = jnp.int32((1 << (23 - fp_quant.fp_spec.sig_bits)) - 1)
onp.testing.assert_array_equal(
weights_quant_calculated.view(jnp.int32) & sig_mask,
jnp.zeros_like(weights))
@parameterized.named_parameters(
dict(
testcase_name='fp_act_symmetric',
act_distribution='symmetric',
use_hparams_bounds=False,
),
# TODO(b/193561347): FP quantization with positive input distribution is
# not supported yet
dict(
testcase_name='fp_act_positive',
act_distribution='positive',
use_hparams_bounds=False,
),
dict(
testcase_name='fp_act_symmetric_hyper_bounds',
act_distribution='symmetric',
use_hparams_bounds=True,
),
dict(
testcase_name='fp_act_positive_hyper_bounds',
act_distribution='positive',
use_hparams_bounds=True,
),
)
def test_attributes_create_acts_op_fp(
self,
act_distribution,
use_hparams_bounds,
):
inputs = jnp.array(fp32(2.0 * onp.random.uniform(0, 1.0, size=(10, 4))))
fp_quant = QuantOps.FloatQuant(
is_scaled=True,
fp_spec=QuantOps.FloatQuant.FloatPrec(
exp_min=-15,
exp_max=15,
sig_bits=2,
),
)
if use_hparams_bounds:
bounds = get_bounds.GetBounds.Hyper(
initial_bound=6.0,
stddev_coeff=1,
absdev_coeff=0,
mix_coeff=1,
reset_stats=True,
ema_coeff=None,
use_cams=False,
granularity=quant_config.QuantGranularity.per_tensor)
else:
bounds = 6.0
hparams = QuantOps.ActHParams(
input_distribution=act_distribution, bounds=bounds, prec=fp_quant,
half_shift=False)
class TestModule(nn.Module):
hparams: QuantOps.ActHParams
@nn.compact
def __call__(self, inputs):
return QuantOps.create_input_ops(
inputs,
hparams=hparams,
get_bounds_params=GetBounds.Params(
update_stats=False,
update_bounds=False))
test_module = TestModule(hparams=hparams)
state = test_module.init(jax.random.PRNGKey(0), inputs=inputs)
act_quant_op = test_module.apply(state, inputs=inputs)
act_scaled = (inputs * act_quant_op._scale).astype(inputs.dtype)
act_quant_expected = fp_cast.downcast_sat_ftz(
act_scaled,
fp_quant.fp_spec.exp_min,
fp_quant.fp_spec.exp_max,
fp_quant.fp_spec.sig_bits,
)
act_quant_calculated = act_quant_op.to_quantized(inputs, dtype=SCALE_DTYPE)
onp.testing.assert_array_equal(act_quant_expected, act_quant_calculated)
@parameterized.named_parameters(
dict(
testcase_name='pos_weight_prec_2',
weight_range=[2.0, 10.0],
weight_shape=(10, 1),
prec=2),
dict(
testcase_name='pos_weight_prec_4',
weight_range=[2.0, 10.0],
weight_shape=(10, 1),
prec=4),
dict(
testcase_name='pos_weight_prec_8',
weight_range=[2.0, 10.0],
weight_shape=(10, 1),
prec=8),
dict(
testcase_name='neg_weight_prec_8',
weight_range=[-12.0, 2.0],
weight_shape=(10, 1),
prec=8),
dict(
testcase_name='neg_weight_2_features_prec_8',
weight_range=[-12.0, 2.0],
weight_shape=(10, 2),
prec=8),
)
def test_attributes_create_weights_ops(self, weight_range, weight_shape,
prec):
weights = jnp.array(
fp32(
onp.random.uniform(
weight_range[0], weight_range[1], size=weight_shape)))
axis = 0 if weight_shape[1] != 1 else None
weights_quant = QuantOps.create_weights_ops(
w=weights,
weight_params=QuantOps.WeightParams(
prec=prec, axis=axis, half_shift=False))
max_weight = onp.max(abs(weights), axis=0)
onp.testing.assert_array_equal(
jnp.squeeze(weights_quant._scale), (2**(prec - 1) - 1) / max_weight)
self.assertEqual(weights_quant._symmetric, True)
self.assertEqual(weights_quant._prec, prec)
@parameterized.named_parameters(
dict(testcase_name='per_layer_quant', axis=None),
dict(testcase_name='per_channel_quant', axis=(0,)))
def test_weight_scale_shape_is_expected(self, axis):
# Tests if scale is as expected for weights quantization.
num_features = 4
expected_scale_shape = (1, 1) if axis is None else (1, num_features)
# Weight Quantization
weights = jnp.array(
fp32(2.0 * onp.random.uniform(0, 1.0, size=(10, num_features))))
_ = QuantOps.create_weights_fake_quant(
w=weights,
weight_params=QuantOps.WeightParams(
prec=8.0,
axis=axis,
expected_scale_shape=expected_scale_shape,
half_shift=False))
def test_inputs_scale_shape_is_expected(self):
# Inputs quantization
inputs = jnp.array(fp32(2.0 * onp.random.uniform(0, 1.0, size=(10, 4))))
bounds = 6.0
expected_inputs_scale_shape = ()
_ = QuantOps.create_inputs_fake_quant(
inputs=inputs,
hparams=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution.symmetric,
bounds=bounds,
prec=8.0,
half_shift=False),
get_bounds_params=GetBounds.Params(
update_stats=False,
update_bounds=False,
expected_bounds_shape=expected_inputs_scale_shape))
@parameterized.named_parameters(
dict(testcase_name='prec_2',
prec=2), dict(testcase_name='prec_4', prec=4),
dict(testcase_name='prec_8', prec=8))
def test_positive_activation_quantization_clips_outside_bounds(self, prec):
# Activation values less than 0 get clipped to 0, and values greater than
# upper_bound get clipped to upper_bound
relu6 = QuantOps.create_positive(bounds=6.0, prec=prec)
activation = jnp.array(fp32([-0.5, 6.2, 3.141]))
quantized_activations = relu6.to_quantized(activation, dtype=SCALE_DTYPE)
onp.testing.assert_array_equal(quantized_activations[0:2],
[0.0, 2**prec - 1])
activations = relu6.from_quantized(quantized_activations, dtype=jnp.float32)
max_clipped_val = (2**prec - 1) * (6.0 / 2**prec)
onp.testing.assert_array_equal(activations[0:2], [0.0, max_clipped_val])
@parameterized.named_parameters(
dict(testcase_name='prec_2', prec=2),
dict(testcase_name='prec_4', prec=4),
dict(testcase_name='prec_8', prec=8)
)
def test_per_feature_dim_unsigned_activation_quantization_clips_outside_bounds(
self, prec):
# Activation values less than -upper_bound get clipped to -upper_bound, and
# values greater than upper_bound get clipped to upper_bound
act_quant = QuantOps.create_symmetric(
bounds=jnp.array([[6.0, 8.0]]), prec=prec, half_shift=False)
activation = jnp.array(fp32([[-7, -8.9], [6.2, 9.4], [0, 0.]]))
quantized_activations = act_quant.to_quantized(
activation, dtype=SCALE_DTYPE)
onp.testing.assert_array_equal(
quantized_activations,
jnp.array([[-2**(prec - 1.0) + 1.0], [2**(prec - 1.0) - 1.0], [0.0]]) *
jnp.array([[1., 1.]]))
activations = act_quant.from_quantized(
quantized_activations, dtype=jnp.float32)
onp.testing.assert_array_equal(activations,
[[-6.0, -8.0], [6.0, 8.], [0, 0.]])
@parameterized.named_parameters(
dict(testcase_name='prec_2', prec=2),
dict(testcase_name='prec_4', prec=4),
dict(testcase_name='prec_8', prec=8)
)
def test_scale_invariance_signed_activation_quantization(self, prec):
# Scaling activation by power of 2 and bounds by same factor,
# should scale the output by the same scale.
activations = random.uniform(random.PRNGKey(0), (10, 1))
act_scale = 8.
scaled_activations = activations * act_scale
bounds = 6.
activations = QuantOps.create_inputs_fake_quant(
inputs=activations,
get_bounds_params=GetBounds.Params(
update_stats=False, update_bounds=False),
hparams=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution.symmetric,
bounds=bounds,
prec=prec,
half_shift=False))
scaled_activations = QuantOps.create_inputs_fake_quant(
inputs=scaled_activations,
get_bounds_params=GetBounds.Params(
update_stats=False, update_bounds=False),
hparams=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution.symmetric,
bounds=bounds * act_scale,
prec=prec,
half_shift=False))
onp.testing.assert_array_equal(activations * act_scale, scaled_activations)
@parameterized.named_parameters(
dict(testcase_name='prec_2', prec=2),
dict(testcase_name='prec_4', prec=4),
dict(testcase_name='prec_8', prec=8)
)
def test_per_feature_dim_scale_invariance_pos_activation_quantization(
self, prec):
# Scaling each channel of activations by a different power of 2 and upper
# bound with same scale, should scale the respective channel of output by
# the same scale.
activations = random.uniform(random.PRNGKey(0), (3, 4))
act_scale = 2**jnp.arange(4)
scaled_activations = activations * act_scale[jnp.newaxis, :]
upper_bound = 6.0 * jnp.ones((3, 4), jnp.float32)
act_quant_ops = QuantOps.create_positive(bounds=upper_bound, prec=prec)
activations = act_quant_ops.fake_quant(
activations, quantized_type=SCALE_DTYPE)
scaled_act_quant_ops = QuantOps.create_positive(
bounds=upper_bound * act_scale[jnp.newaxis, :], prec=prec)
scaled_activations = scaled_act_quant_ops.fake_quant(
scaled_activations, quantized_type=SCALE_DTYPE)
onp.testing.assert_array_equal(activations * act_scale[jnp.newaxis, :],
scaled_activations)
@parameterized.named_parameters(
dict(testcase_name='prec_4', prec=4),
dict(testcase_name='prec_8', prec=8))
def test_int_positive_act_quantization(self, prec):
# Integer activations within upper_bound and upper_bound == 2^i s.t. i<prec
# quantizes correctly.
upper_bound = 2**(prec - 3)
activations = random.randint(random.PRNGKey(0), (10, 1), 0, upper_bound)
rescaled_activations = QuantOps.create_inputs_fake_quant(
inputs=activations,
get_bounds_params=GetBounds.Params(
update_stats=False, update_bounds=False),
hparams=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution.positive,
bounds=upper_bound,
prec=prec,
half_shift=False))
onp.testing.assert_array_equal(activations, rescaled_activations)
@parameterized.named_parameters(
dict(testcase_name='prec_2', prec=2),
dict(testcase_name='prec_4', prec=4),
dict(testcase_name='prec_8', prec=8)
)
def test_int_symmetric_act_quantization(self, prec):
# Integer activations within bounds and abs(bounds) == 2^(prec -1) - 1
# quantizes correctly.
bounds = 2**(prec - 1) - 1
activations = random.randint(random.PRNGKey(0), (10, 1), -bounds, bounds)
rescaled_activations = QuantOps.create_inputs_fake_quant(
inputs=activations,
get_bounds_params=GetBounds.Params(
update_stats=False, update_bounds=False),
hparams=QuantOps.ActHParams(
input_distribution=QuantOps.ActHParams.InputDistribution.symmetric,
bounds=bounds,
prec=prec,
half_shift=False))
onp.testing.assert_array_equal(activations, rescaled_activations)
@parameterized.named_parameters(
dict(testcase_name='prec_4', prec=4),
dict(testcase_name='prec_8', prec=8))
def test_float_weights_quantization(self, prec):
# Tests that quantized and rescaled float weights are close to original
# weights.
weights = jnp.array(fp32(2.0 * onp.random.uniform(0, 1.0, size=(10, 1))))
rescaled_weights = QuantOps.create_weights_fake_quant(
w=weights,
weight_params=QuantOps.WeightParams(
prec=prec, axis=None, half_shift=False))
test_utils.assert_all_close_prec(weights, rescaled_weights, prec=prec)
@parameterized.named_parameters(
dict(testcase_name='prec_2', prec=2),
dict(testcase_name='prec_4', prec=4),
dict(testcase_name='prec_8', prec=8)
)
def test_full_range_int_weight_quantization(self, prec):
# Integer weights in full range [-maxmin_signed_int, maxmin_signed_int]
# quantizes correctly.
minval = -2**(prec - 1) + 1
maxval = 2**(prec - 1) - 1
weights = random.randint(random.PRNGKey(0), (10, 1), minval, maxval + 1)
weights = jax.ops.index_update(weights, jax.ops.index[0, :], maxval)
weight_quant = QuantOps.create_weights_ops(
w=weights,
weight_params=QuantOps.WeightParams(
prec=prec, axis=None, half_shift=False))
quantized_weights = weight_quant.to_quantized(weights, dtype=SCALE_DTYPE)
onp.testing.assert_array_equal(quantized_weights[0],
(2**(prec - 1.0) - 1.0))
rescaled_weights = weight_quant.from_quantized(
quantized_weights, dtype=jnp.float32)
onp.testing.assert_array_equal(weights, rescaled_weights)
@parameterized.named_parameters(
dict(testcase_name='prec_2', prec=2),
dict(testcase_name='prec_4', prec=4),
dict(testcase_name='prec_8', prec=8))
def test_scale_invariance_weight_quantization(self, prec):
# Scaling weights by power of 2, should scale the output by the same scale.
weights = random.uniform(random.PRNGKey(0), (10, 1))
weight_scale = 16
scaled_weights = weights * weight_scale
weights = QuantOps.create_weights_fake_quant(
w=weights,
weight_params=QuantOps.WeightParams(
prec=prec, axis=None, half_shift=False))
scaled_weights = QuantOps.create_weights_fake_quant(
w=scaled_weights,
weight_params=QuantOps.WeightParams(
prec=prec, axis=None, half_shift=False))
onp.testing.assert_array_equal(weights * weight_scale, scaled_weights)
@parameterized.named_parameters(
dict(testcase_name='prec_2', prec=2),
dict(testcase_name='prec_4', prec=4),
dict(testcase_name='prec_8', prec=8)
)
def test_per_feature_dim_scale_invariance_weight_quantization(self, prec):
# Scaling each channel of weights by a different power of 2, should scale
# the respective channel of output by the same scale.
weights = random.uniform(random.PRNGKey(0), (3, 4))
weight_scale = 2**jnp.arange(4)[jnp.newaxis, :]
scaled_weights = weights * weight_scale
weights = quantization.QuantOps.create_weights_fake_quant(
w=weights,
weight_params=QuantOps.WeightParams(
prec=prec, axis=0, half_shift=False))
scaled_weights = quantization.QuantOps.create_weights_fake_quant(
w=scaled_weights,
weight_params=QuantOps.WeightParams(
prec=prec, axis=0, half_shift=False))
onp.testing.assert_array_equal(weights * weight_scale, scaled_weights)
@parameterized.named_parameters(
dict(
testcase_name='fp_prec_scaled',
prec=QuantOps.FloatQuant(
is_scaled=True,
fp_spec=QuantOps.FloatQuant.FloatPrec(
exp_min=-11,
exp_max=4,
sig_bits=3,
),
),
),
dict(
testcase_name='fp_prec_unscaled',
prec=QuantOps.FloatQuant(
is_scaled=False,
fp_spec=QuantOps.FloatQuant.FloatPrec(
exp_min=-11,
exp_max=4,
sig_bits=3,
),
),
),
dict(
testcase_name='int_prec',
prec=4.0,
),
)
def test_no_quantization(self, prec):
# If initial_bound==-1 when using GetBounds, then create_inputs_fake_quant
# should be a no-op.
inputs = jnp.array([[.3, 1.4], [-5.2, 4.0]])
bounds = get_bounds.GetBounds.Hyper(
initial_bound=-1,
stddev_coeff=1,
absdev_coeff=0,
mix_coeff=1,
reset_stats=True,
ema_coeff=None,
use_cams=False,
granularity=quant_config.QuantGranularity.per_tensor)
hparams = quantization.QuantOps.ActHParams(
input_distribution='symmetric',
bounds=bounds,
prec=prec,
half_shift=False)
# The call to create_inputs_fake_quant has to occur from within a Flax
# module since it calls GetBounds, which is itself a Flax module.
# Thus we create a wrapper module for testing.
class TestModule(nn.Module):
hparams: quantization.QuantOps.ActHParams
@nn.compact
def __call__(self, inputs):
return quantization.QuantOps.create_inputs_fake_quant(
inputs,
hparams=hparams,
get_bounds_params=GetBounds.Params(
update_stats=True, update_bounds=False))
test_module = TestModule(hparams=hparams)
state = test_module.init(jax.random.PRNGKey(0), inputs=inputs)
inputs_after_fake_quant, _ = test_module.apply(
state, inputs=inputs, mutable=True)
onp.testing.assert_array_equal(inputs, inputs_after_fake_quant)
# TODO(shivaniagrawal): Add tests for auto clip activation quantizations.
class AQTTest(parameterized.TestCase):
def setUp(self):
super().setUp()
key1, key2 = jax.random.split(jax.random.PRNGKey(0), 2)
self.rhs = jax.random.normal(key1, (2, 4)) * 20
self.lhs = jax.random.normal(key2, (3, 2)) * 2 + 3
@parameterized.named_parameters(
dict(
testcase_name='per_layer_act_per_column_weight',
act_bounds=4.0,
weight_prec=16,
weight_axis=(0,),
),
dict(
testcase_name='per_column_act_per_column_weight',
act_bounds=[[3.0, 4.0]],
weight_prec=16,
weight_axis=(0,)),
dict(
testcase_name='per_layer_act_per_layer_weight',
act_bounds=4.0,
weight_prec=16,
weight_axis=None),
dict(
testcase_name='per_column_act_per_layer_weight',
act_bounds=[[3.0, 4.0]],
weight_prec=16,
weight_axis=None),
dict(
testcase_name='per_layer_act_no_weight_quant',
act_bounds=4.0,
weight_prec=None,
weight_axis=None),
dict(
testcase_name='per_column_act_no_weight_quant',
act_bounds=[[3.0, 4.0]],
weight_prec=None,
weight_axis=None),
dict(
testcase_name='no_act_quant_per_column_weight',
act_bounds=None,
weight_prec=16,
weight_axis=(0,)),
dict(
testcase_name='no_act_quant_no_weight_quant',
act_bounds=None,
weight_prec=None,
weight_axis=None),)
def test_quantized_dot_aqt(self, act_bounds, weight_prec, weight_axis):
# With a high enough precision, we expect results from fakequant and AQT to
# be very similar.
weight_params = QuantOps.WeightParams(
prec=weight_prec, axis=weight_axis, half_shift=False)
if act_bounds is None:
act_params = None
else:
act_params = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=jnp.array(act_bounds),
prec=16,
half_shift=False)
def quantized_matmul(quant_type):
return quantization.quantized_dot(
w=self.rhs,
act=self.lhs,
weight_params=weight_params,
act_hparams=act_params,
get_bounds_params=None,
quant_type=quant_type,
prefer_int8_to_int32_dot=True)
aqt_result = quantized_matmul(QuantType.aqt)
fakequant_result = quantized_matmul(QuantType.fake_quant)
onp.testing.assert_allclose(
aqt_result,
fakequant_result,
rtol=1e-2,
err_msg='AQT and fakequant significantly disagree')
@parameterized.named_parameters(
dict(
testcase_name='per_layer_act_per_column_weight',
act_bounds=4.0,
weight_prec=16,
weight_axis=(0,),
),
dict(
testcase_name='per_column_act_per_column_weight',
act_bounds=[[[3.0, 4.0]]],
weight_prec=16,
weight_axis=(0,)),
dict(
testcase_name='per_layer_act_per_layer_weight',
act_bounds=4.0,
weight_prec=16,
weight_axis=None),
dict(
testcase_name='per_column_act_per_layer_weight',
act_bounds=[[[3.0, 4.0]]],
weight_prec=16,
weight_axis=None),
dict(
testcase_name='per_layer_act_no_weight_quant',
act_bounds=4.0,
weight_prec=None,
weight_axis=None),
dict(
testcase_name='per_column_act_no_weight_quant',
act_bounds=[[[3.0, 4.0]]],
weight_prec=None,
weight_axis=None),
dict(
testcase_name='no_act_quant_per_column_weight',
act_bounds=None,
weight_prec=16,
weight_axis=(0,)),
dict(
testcase_name='no_act_quant_no_weight_quant',
act_bounds=None,
weight_prec=None,
weight_axis=None),
)
def test_quantized_dot_general_aqt(self, act_bounds, weight_prec,
weight_axis):
# With a high enough precision, we expect results from fakequant and AQT to
# be very similar.
weight_params = QuantOps.WeightParams(
prec=weight_prec, axis=weight_axis, half_shift=False)
if act_bounds is None:
act_params = None
else:
act_params = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=jnp.array(act_bounds),
prec=16,
half_shift=False)
lhs_ndims_3 = jnp.array(
fp32(2.0 * onp.random.uniform(0, 1.0, size=(4, 3, 2))))
def quantized_matmul(quant_type):
return quantization.quantized_dot_general(
w=self.rhs,
act=lhs_ndims_3,
weight_params=weight_params,
act_hparams=act_params,
get_bounds_params=None,
quant_type=quant_type,
dimension_numbers=(((lhs_ndims_3.ndim - 1,), (0,)), ((), ())),
prefer_int8_to_int32_dot=True)
aqt_result = quantized_matmul(QuantType.aqt)
self.assertEqual(aqt_result.shape, (4, 3, 4))
fakequant_result = quantized_matmul(QuantType.fake_quant)
onp.testing.assert_allclose(
aqt_result,
fakequant_result,
rtol=1e-2,
err_msg='AQT and fakequant significantly disagree')
def assert_is_integer_in_range(self, x, *, prec, distribution):
if distribution == 'symmetric':
x_clipped = primitives.round_and_clip_to_signed_int(
x, prec=prec, dtype=x.dtype, half_shift=False)
elif distribution == 'positive':
x_clipped = primitives.floor_and_clip_to_unsigned_int(
x, prec=prec, dtype=x.dtype, half_shift=False)
else:
raise ValueError(f'Invalid distribution {distribution}')
onp.testing.assert_array_equal(
x, x_clipped,
f'Array cannot be losslessly cast to integer with precision {prec} '
f'and {distribution} distribution.')
@parameterized.parameters(
dict(act_distribution='symmetric', prefer_int8_to_int32_dot=True, prec=4),
dict(act_distribution='symmetric', prefer_int8_to_int32_dot=True, prec=8),
dict(act_distribution='positive', prefer_int8_to_int32_dot=True, prec=4),
dict(act_distribution='positive', prefer_int8_to_int32_dot=True, prec=8),
dict(
act_distribution='symmetric', prefer_int8_to_int32_dot=False, prec=4))
@mock.patch.object(jax.lax, 'dot_general')
def test_lax_dot_has_integer_inputs_in_quantized_dot(self, mock_dot_general,
act_distribution,
prefer_int8_to_int32_dot,
prec):
weight_params = QuantOps.WeightParams(
prec=prec, axis=(0,), half_shift=False)
act_params = QuantOps.ActHParams(
input_distribution=act_distribution,
bounds=jnp.array([[3.0, 1.5]]),
prec=prec,
half_shift=False)
act = self.lhs
if act_distribution == 'positive':
act = jnp.abs(act)
# We need this context manager to stop Jax from trying to compile the arms
# of the `lax.cond` call in `dot_general_aqt`. By default, Jax will always
# try to compile the functions passed to `lax.cond`, even if outside of a
# JITed context. JIT compilation is incompatible with using a mock for the
# call to 'dot_general' because during compilation Jax will expect
# 'dot_general' to return a tracer and will throw an error if it returns a
# mock instead. By explicily using jax.disable_jit, Jax will not try to
# compile the arms to lax.cond and so using a mock will work fine.
with jax.disable_jit():
quantization.quantized_dot(
w=self.rhs,
act=act,
weight_params=weight_params,
act_hparams=act_params,
get_bounds_params=None,
quant_type=QuantType.aqt,
prefer_int8_to_int32_dot=prefer_int8_to_int32_dot)
act_inputs, weight_inputs = mock_dot_general.call_args[0]
self.assert_is_integer_in_range(
act_inputs, prec=prec, distribution=act_distribution)
self.assert_is_integer_in_range(
weight_inputs, prec=prec, distribution='symmetric')
if prefer_int8_to_int32_dot and not (act_distribution == 'positive' and
prec == 8):
expected_input_dtype = jnp.int8
else:
expected_input_dtype = jnp.float32
self.assertEqual(act_inputs.dtype, expected_input_dtype)
self.assertEqual(weight_inputs.dtype, expected_input_dtype)
@parameterized.parameters(
itertools.product(
(jnp.bfloat16, jnp.float32), (4, None),
(quantization.QuantType.aqt, quantization.QuantType.fake_quant)))
def test_quantized_dot_has_correct_dtype(self, input_dtype, act_prec,
quant_type):
weight_params = QuantOps.WeightParams(prec=4, axis=(0,), half_shift=False)
act_params = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=jnp.array([[3.0, 1.5]]),
prec=act_prec,
half_shift=False)
act = self.lhs.astype(input_dtype)
w = self.rhs.astype(input_dtype)
output = quantization.quantized_dot(
w=w,
act=act,
weight_params=weight_params,
act_hparams=act_params,
get_bounds_params=None,
quant_type=quant_type,
prefer_int8_to_int32_dot=True)
self.assertEqual(output.dtype, input_dtype)
@parameterized.parameters(
dict(quant_type=quantization.QuantType.aqt),
dict(quant_type=quantization.QuantType.fake_quant))
def test_quantized_dot_raises_with_mixed_dtype(self, quant_type):
weight_params = QuantOps.WeightParams(prec=4, axis=(0,), half_shift=False)
act_params = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=jnp.array([[3.0, 1.5]]),
prec=4,
half_shift=False)
act = self.lhs.astype(jnp.bfloat16)
w = self.rhs.astype(jnp.float32)
with self.assertRaises(TypeError):
quantization.quantized_dot(
w=w,
act=act,
weight_params=weight_params,
act_hparams=act_params,
get_bounds_params=None,
quant_type=quant_type,
prefer_int8_to_int32_dot=True)
@parameterized.parameters(
itertools.product(
(jnp.bfloat16, jnp.float32), (4, None),
(quantization.QuantType.aqt, quantization.QuantType.fake_quant)))
def test_dynamic_quantized_dot_general_has_correct_dtype(
self, input_dtype, act_prec, quant_type):
lhs_params = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=2.0,
prec=act_prec,
half_shift=False)
rhs_params = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=1.5,
prec=act_prec,
half_shift=False)
lhs_act = self.lhs.astype(input_dtype)
rhs_act = self.rhs.astype(input_dtype)
output = quantization.quantized_dynamic_dot_general(
lhs_act=lhs_act,
rhs_act=rhs_act,
lhs_act_hparams=lhs_params,
rhs_act_hparams=rhs_params,
lhs_get_bounds_params=None,
rhs_get_bounds_params=None,
dot_dimension_numbers=(((1,), (0,)), ((), ())),
quant_type=quant_type)
self.assertEqual(output.dtype, input_dtype)
def test_dynamic_quantized_dot_general_raises_with_mixed_dtype(self):
lhs_params = QuantOps.ActHParams(
input_distribution='symmetric', bounds=2.0, prec=4, half_shift=False)
rhs_params = QuantOps.ActHParams(
input_distribution='symmetric', bounds=1.5, prec=4, half_shift=False)
lhs_act = self.lhs.astype(jnp.bfloat16)
rhs_act = self.rhs.astype(jnp.float32)
with self.assertRaises(TypeError):
quantization.quantized_dynamic_dot_general(
lhs_act=lhs_act,
rhs_act=rhs_act,
lhs_act_hparams=lhs_params,
rhs_act_hparams=rhs_params,
lhs_get_bounds_params=None,
rhs_get_bounds_params=None,
dot_dimension_numbers=(((1,), (0,)), ((), ())),
quant_type=QuantType.aqt)
@parameterized.parameters(
dict(lhs_prec=16, rhs_prec=16), dict(lhs_prec=None, rhs_prec=16),
dict(lhs_prec=16, rhs_prec=None), dict(lhs_prec=None, rhs_prec=None))
def test_quantized_dynamic_dot_general(self, lhs_prec, rhs_prec):
lhs_bounds = 2.0
rhs_bounds = 1.5
lhs_params = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=lhs_bounds,
prec=lhs_prec,
half_shift=False)
rhs_params = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=rhs_bounds,
prec=rhs_prec,
half_shift=False)
def quantized_matmul(quant_type):
return quantization.quantized_dynamic_dot_general(
lhs_act=self.lhs,
rhs_act=self.rhs,
lhs_act_hparams=lhs_params,
rhs_act_hparams=rhs_params,
lhs_get_bounds_params=None,
rhs_get_bounds_params=None,
dot_dimension_numbers=(((1,), (0,)), ((), ())),
quant_type=quant_type)
aqt_result = quantized_matmul(QuantType.aqt)
fakequant_result = quantized_matmul(QuantType.fake_quant)
onp.testing.assert_allclose(
aqt_result,
fakequant_result,
rtol=1e-2,
err_msg='AQT and fakequant significantly disagree')
def test_quantized_dynamic_dot_general_get_bounds(self):
class TestModule(nn.Module):
@nn.compact
def __call__(self, lhs, rhs):
lhs_get_bounds = GetBounds.Hyper(
initial_bound=10.0,
stddev_coeff=0,
absdev_coeff=0,
mix_coeff=0,
granularity=quant_config.QuantGranularity.per_tensor)
rhs_get_bounds = GetBounds.Hyper(
initial_bound=5.0,
stddev_coeff=0,
absdev_coeff=0,
mix_coeff=0,
granularity=quant_config.QuantGranularity.per_tensor)
lhs_params = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=lhs_get_bounds,
prec=8,
half_shift=False)
rhs_params = QuantOps.ActHParams(
input_distribution='symmetric',
bounds=rhs_get_bounds,
prec=8,
half_shift=False)
lhs_get_bounds_params = get_bounds.GetBounds.Params(
update_stats=True, update_bounds=False, module_name='lhs')
rhs_get_bounds_params = get_bounds.GetBounds.Params(
update_stats=True, update_bounds=False, module_name='rhs')
out = quantization.quantized_dynamic_dot_general(
lhs_act=lhs,
rhs_act=rhs,
lhs_act_hparams=lhs_params,
rhs_act_hparams=rhs_params,
dot_dimension_numbers=(((1,), (0,)), ((), ())),
quant_type=QuantType.aqt,
lhs_get_bounds_params=lhs_get_bounds_params,
rhs_get_bounds_params=rhs_get_bounds_params)
return out
lhs = jnp.array([[2.0]])
rhs = jnp.array([[3.0]])
module = TestModule()
state = module.init(jax.random.PRNGKey(0), lhs, rhs)
out, _ = module.apply(state, lhs, rhs, mutable=True)
lhs_scale = 127.0 / 10.0
rhs_scale = 127.0 / 5.0
expected_out = (round(lhs_scale * 2.0) * round(rhs_scale * 3.0)) / (
lhs_scale * rhs_scale)
onp.testing.assert_allclose(out, [[expected_out]])
@parameterized.parameters(
dict(lhs_distribution='symmetric', rhs_distribution='symmetric'),
dict(lhs_distribution='positive', rhs_distribution='symmetric'),
dict(lhs_distribution='symmetric', rhs_distribution='positive'),
dict(lhs_distribution='positive', rhs_distribution='positive'))
@mock.patch.object(jax.lax, 'dot_general')
def test_lax_dot_has_integer_inputs_in_dynamic_dot_general(
self, mock_dot_general, lhs_distribution, rhs_distribution):
lhs_params = QuantOps.ActHParams(
input_distribution=lhs_distribution,
bounds=2.0,
prec=4,
half_shift=False)
rhs_params = QuantOps.ActHParams(
input_distribution=rhs_distribution,
bounds=1.5,
prec=4,
half_shift=False)
lhs_act = self.lhs
if lhs_distribution == 'positive':
lhs_act = jnp.abs(lhs_act)
rhs_act = self.rhs
if rhs_distribution == 'positive':
rhs_act = jnp.abs(rhs_act)
quantization.quantized_dynamic_dot_general(
lhs_act=lhs_act,
rhs_act=rhs_act,
lhs_act_hparams=lhs_params,
rhs_act_hparams=rhs_params,
lhs_get_bounds_params=None,
rhs_get_bounds_params=None,
dot_dimension_numbers=(((1,), (0,)), ((), ())),
quant_type=QuantType.aqt)
lhs_inputs, rhs_inputs = mock_dot_general.call_args[0]
self.assert_is_integer_in_range(
lhs_inputs, prec=4, distribution=lhs_distribution)
self.assert_is_integer_in_range(
rhs_inputs, prec=4, distribution=rhs_distribution)
def test_quantized_dot_no_quant(self):
act_hparams = QuantOps.ActHParams(
input_distribution='symmetric', bounds=-1.0, prec=4, half_shift=False)
weight_params = QuantOps.WeightParams(prec=4, axis=(0,), half_shift=False)
act = jnp.array([[-5.0]])
w = jnp.array([[-4.99]])
res = quantization.quantized_dot(
w=w,
act=act,
quant_type=quantization.QuantType.aqt,
weight_params=weight_params,
act_hparams=act_hparams,
get_bounds_params=None,
prefer_int8_to_int32_dot=True)
onp.testing.assert_allclose(res, act * w)
def test_quantized_dynamic_dot_general_no_quant(self):
act_hparams = QuantOps.ActHParams(
input_distribution='symmetric', bounds=-1.0, prec=4, half_shift=False)
lhs_act = jnp.array([[-5.0]])
rhs_act = jnp.array([[-4.99]])
res = quantization.quantized_dynamic_dot_general(
lhs_act=lhs_act,
rhs_act=rhs_act,
quant_type=quantization.QuantType.aqt,
lhs_act_hparams=act_hparams,
rhs_act_hparams=act_hparams,
lhs_get_bounds_params=None,
rhs_get_bounds_params=None,
dot_dimension_numbers=(((1,), (0,)), ((), ())))
onp.testing.assert_allclose(res, lhs_act * rhs_act)
class QuantizedDotFakeQuantTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.act = jnp.ones((3, 7))
self.weight = jnp.ones((7, 4))
@parameterized.named_parameters(
dict(testcase_name='no_quantization', weight_prec=None, act_prec=None),
dict(testcase_name='weight_only_quant', weight_prec=8., act_prec=None),
dict(testcase_name='act_only_quant', weight_prec=None, act_prec=4),
dict(testcase_name='both_quantized', weight_prec=4, act_prec=8),
dict(
testcase_name='both_quantized_fq_int',
weight_prec=4,
act_prec=8,
strategy=QuantType.fake_quant_with_int),
)
@mock.patch.object(QuantOps, 'create_weights_fake_quant')
@mock.patch.object(QuantOps, 'create_inputs_fake_quant')
def test_quantized_dot_general_should_call_weights_and_inputs_quantization(
self,
mock_act_fq,
mock_w_fq,
weight_prec,
act_prec,
strategy=QuantType.fake_quant):
mock_w_fq.side_effect = lambda inputs, **_: inputs
mock_act_fq.side_effect = lambda inputs, **_: inputs
weight_params = QuantOps.WeightParams(
prec=weight_prec, axis=None, half_shift=False)
act_hparams = QuantOps.ActHParams( # pylint: disable=g-long-ternary
bounds=6.,
prec=act_prec,
input_distribution=QuantOps.ActHParams.InputDistribution.symmetric,
half_shift=False) if act_prec else None
get_bounds_params = GetBounds.Params(
update_stats=False, update_bounds=False)
quantization.quantized_dot(
w=self.weight,
act=self.act,
quant_type=strategy,
weight_params=weight_params,
act_hparams=act_hparams,
get_bounds_params=get_bounds_params,
prefer_int8_to_int32_dot=True)
quantized_type = strategy.to_jax_type()
mock_w_fq.assert_called_with(
mock.ANY,
weight_params=weight_params,
quantized_type=quantized_type,
fake_dependency=mock.ANY)
if act_hparams:
mock_act_fq.assert_called_with(
mock.ANY, hparams=act_hparams, get_bounds_params=get_bounds_params)
else:
mock_act_fq.assert_not_called()
class QuantizedDynamicDotGeneralTest(parameterized.TestCase):
def setUp(self):
super(QuantizedDynamicDotGeneralTest, self).setUp()
self.lhs_act = jnp.ones((4, 2, 3, 7))
self.rhs_act = jnp.ones((3, 7, 5, 6))
self.dimension_numbers = (((2, 3), (0, 1)), ((), ()))
@parameterized.named_parameters(
dict(
testcase_name='no_quantization', lhs_act_prec=None,
rhs_act_prec=None),
dict(testcase_name='lhs_only_quant', lhs_act_prec=8., rhs_act_prec=None),
dict(testcase_name='rhs_only_quant', lhs_act_prec=None, rhs_act_prec=4),
dict(testcase_name='both_quantized', lhs_act_prec=4, rhs_act_prec=8),
dict(
testcase_name='both_quantized_fq_int',
lhs_act_prec=4,
rhs_act_prec=8,
strategy=QuantType.fake_quant_with_int),
)
@mock.patch.object(QuantOps, 'create_inputs_fake_quant')
def test_quantized_dynamic_dot_general_should_call_inputs_quantization(
self,
mock_act_fq,
lhs_act_prec,
rhs_act_prec,
strategy=QuantType.fake_quant):
mock_act_fq.side_effect = lambda inputs, hparams, get_bounds_params: inputs
# pylint: disable=g-long-ternary
lhs_act_hparams = QuantOps.ActHParams(
bounds=6.,
prec=lhs_act_prec,
input_distribution=QuantOps.ActHParams.InputDistribution.symmetric,
half_shift=False) if lhs_act_prec else None
rhs_act_hparams = QuantOps.ActHParams(
bounds=6.,
prec=rhs_act_prec,
input_distribution=QuantOps.ActHParams.InputDistribution.symmetric,
half_shift=False) if rhs_act_prec else None
# pylint: enable=g-long-ternary
get_bounds_params = GetBounds.Params(
update_stats=False, update_bounds=False)
quantization.quantized_dynamic_dot_general(
lhs_act=self.lhs_act,
rhs_act=self.rhs_act,
quant_type=strategy,
dot_dimension_numbers=self.dimension_numbers,
lhs_act_hparams=lhs_act_hparams,
lhs_get_bounds_params=get_bounds_params,
rhs_act_hparams=rhs_act_hparams,
rhs_get_bounds_params=get_bounds_params,
)
calls = []
for prec in [lhs_act_prec, rhs_act_prec]:
if prec is not None:
act_hparams = QuantOps.ActHParams(
bounds=6., prec=prec, input_distribution=mock.ANY, half_shift=False)
calls.append(
mock.call(
mock.ANY,
hparams=act_hparams,
get_bounds_params=get_bounds_params))
self.assertLen(calls, mock_act_fq.call_count)
mock_act_fq.assert_has_calls(calls, any_order=True)
class QuantizedSumTest(parameterized.TestCase):
@parameterized.parameters(
# This roughly corresponds to float32, so we expect no difference vs a
# float32 sum.
dict(exp_min=-2**7, exp_max=2**7, sig_bits=23, expected_result=100.001),
# In this low precision case, the addition of .001 to the accumulator will
# have no effect after quantization
dict(exp_min=-2**3, exp_max=2**3, sig_bits=1, expected_result=100.0))
def test_quantized_sum(self, exp_min, exp_max, sig_bits, expected_result):
x = jnp.array([0.001, 100.0])
prec = QuantOps.FloatQuant.FloatPrec(exp_min, exp_max, sig_bits)
x_quantized_sum, x_grad = jax.value_and_grad(quantization.quantized_sum)(
x, axis=0, keepdims=False, prec=prec)
onp.testing.assert_allclose(
x_quantized_sum, onp.array(expected_result), rtol=1e-6)
# This tests that the gradient is using the straight-through-estimator
onp.testing.assert_equal(x_grad, onp.array([1.0, 1.0]))
@parameterized.parameters(
dict(keepdims=True, axis=(0, 1), expected_shape=(1, 1)),
dict(keepdims=False, axis=(0, 1), expected_shape=()),
dict(keepdims=True, axis=(0,), expected_shape=(1, 2)),
dict(keepdims=False, axis=(1,), expected_shape=(3,)))
def test_keepdims_and_axis(self, keepdims, axis, expected_shape):
x = jnp.arange(6).reshape((3, 2)).astype(jnp.float32)
prec = QuantOps.FloatQuant.FloatPrec(-2**7, 2**7, 23)
x_quantized_sum = quantization.quantized_sum(
x, keepdims=keepdims, axis=axis, prec=prec)
self.assertEqual(x_quantized_sum.shape, expected_shape)
if __name__ == '__main__':
absltest.main()
|
[
"absl.testing.absltest.main",
"jax.disable_jit",
"jax.numpy.log2",
"aqt.jax.primitives.round_and_clip_to_signed_int",
"jax.random.PRNGKey",
"aqt.jax.quantization.quantized_dot_general",
"aqt.jax.test_utils.configure_jax",
"aqt.jax.quantization.quantized_dot",
"unittest.mock.patch.object",
"aqt.jax.quantization.QuantOps.create_positive",
"aqt.jax.quantization.quantized_sum",
"jax.numpy.squeeze",
"jax.random.normal",
"jax.numpy.int32",
"aqt.jax.test_utils.assert_all_close_prec",
"aqt.jax.quantization.QuantOps.create_symmetric",
"itertools.product",
"numpy.testing.assert_allclose",
"aqt.jax.quantization.QuantOps.FloatQuant.FloatPrec",
"jax.numpy.abs",
"aqt.jax.get_bounds.GetBounds.Hyper",
"aqt.jax.quantization.QuantOps.WeightParams",
"numpy.testing.assert_array_equal",
"jax.numpy.zeros_like",
"aqt.jax.quantization.quantized_dynamic_dot_general",
"jax.numpy.ones",
"aqt.jax.quantization.QuantOps.ActHParams",
"jax.numpy.array",
"numpy.random.uniform",
"jax.numpy.arange",
"aqt.jax.fp_cast.downcast_sat_ftz",
"numpy.array",
"jax.value_and_grad",
"aqt.jax.get_bounds.GetBounds.Params",
"jax.ops.index_update",
"unittest.mock.call",
"aqt.jax.primitives.floor_and_clip_to_unsigned_int"
] |
[((1250, 1276), 'aqt.jax.test_utils.configure_jax', 'test_utils.configure_jax', ([], {}), '()\n', (1274, 1276), False, 'from aqt.jax import test_utils\n'), ((28850, 28891), 'unittest.mock.patch.object', 'mock.patch.object', (['jax.lax', '"""dot_general"""'], {}), "(jax.lax, 'dot_general')\n", (28867, 28891), False, 'from unittest import mock\n'), ((38225, 38266), 'unittest.mock.patch.object', 'mock.patch.object', (['jax.lax', '"""dot_general"""'], {}), "(jax.lax, 'dot_general')\n", (38242, 38266), False, 'from unittest import mock\n'), ((41382, 41438), 'unittest.mock.patch.object', 'mock.patch.object', (['QuantOps', '"""create_weights_fake_quant"""'], {}), "(QuantOps, 'create_weights_fake_quant')\n", (41399, 41438), False, 'from unittest import mock\n'), ((41442, 41497), 'unittest.mock.patch.object', 'mock.patch.object', (['QuantOps', '"""create_inputs_fake_quant"""'], {}), "(QuantOps, 'create_inputs_fake_quant')\n", (41459, 41497), False, 'from unittest import mock\n'), ((43719, 43774), 'unittest.mock.patch.object', 'mock.patch.object', (['QuantOps', '"""create_inputs_fake_quant"""'], {}), "(QuantOps, 'create_inputs_fake_quant')\n", (43736, 43774), False, 'from unittest import mock\n'), ((47139, 47154), 'absl.testing.absltest.main', 'absltest.main', ([], {}), '()\n', (47152, 47154), False, 'from absl.testing import absltest\n'), ((1830, 1847), 'jax.numpy.array', 'jnp.array', (['bounds'], {}), '(bounds)\n', (1839, 1847), True, 'import jax.numpy as jnp\n'), ((1860, 1910), 'aqt.jax.quantization.QuantOps.create_positive', 'QuantOps.create_positive', ([], {'bounds': 'bounds', 'prec': 'prec'}), '(bounds=bounds, prec=prec)\n', (1884, 1910), False, 'from aqt.jax.quantization import QuantOps\n'), ((1915, 1979), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['relu6._scale', '(2 ** prec / bounds)'], {}), '(relu6._scale, 2 ** prec / bounds)\n', (1945, 1979), True, 'import numpy as onp\n'), ((2449, 2466), 'jax.numpy.array', 'jnp.array', (['bounds'], {}), '(bounds)\n', (2458, 2466), True, 'import jax.numpy as jnp\n'), ((2484, 2553), 'aqt.jax.quantization.QuantOps.create_symmetric', 'QuantOps.create_symmetric', ([], {'bounds': 'bounds', 'prec': 'prec', 'half_shift': '(False)'}), '(bounds=bounds, prec=prec, half_shift=False)\n', (2509, 2553), False, 'from aqt.jax.quantization import QuantOps\n'), ((2567, 2652), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['act_signed._scale', '((2 ** (prec - 1) - 1) / bounds)'], {}), '(act_signed._scale, (2 ** (prec - 1) - 1) /\n bounds)\n', (2597, 2652), True, 'import numpy as onp\n'), ((4378, 4502), 'aqt.jax.fp_cast.downcast_sat_ftz', 'fp_cast.downcast_sat_ftz', (['weights_scaled', 'fp_quant.fp_spec.exp_min', 'fp_quant.fp_spec.exp_max', 'fp_quant.fp_spec.sig_bits'], {}), '(weights_scaled, fp_quant.fp_spec.exp_min, fp_quant\n .fp_spec.exp_max, fp_quant.fp_spec.sig_bits)\n', (4402, 4502), False, 'from aqt.jax import fp_cast\n'), ((4639, 4724), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['weights_quant_expected', 'weights_quant_calculated'], {}), '(weights_quant_expected, weights_quant_calculated\n )\n', (4669, 4724), True, 'import numpy as onp\n'), ((4881, 4933), 'jax.numpy.int32', 'jnp.int32', (['((1 << 23 - fp_quant.fp_spec.sig_bits) - 1)'], {}), '((1 << 23 - fp_quant.fp_spec.sig_bits) - 1)\n', (4890, 4933), True, 'import jax.numpy as jnp\n'), ((6539, 6647), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': 'act_distribution', 'bounds': 'bounds', 'prec': 'fp_quant', 'half_shift': '(False)'}), '(input_distribution=act_distribution, bounds=bounds,\n prec=fp_quant, half_shift=False)\n', (6558, 6647), False, 'from aqt.jax.quantization import QuantOps\n'), ((7264, 7384), 'aqt.jax.fp_cast.downcast_sat_ftz', 'fp_cast.downcast_sat_ftz', (['act_scaled', 'fp_quant.fp_spec.exp_min', 'fp_quant.fp_spec.exp_max', 'fp_quant.fp_spec.sig_bits'], {}), '(act_scaled, fp_quant.fp_spec.exp_min, fp_quant.\n fp_spec.exp_max, fp_quant.fp_spec.sig_bits)\n', (7288, 7384), False, 'from aqt.jax import fp_cast\n'), ((7503, 7575), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['act_quant_expected', 'act_quant_calculated'], {}), '(act_quant_expected, act_quant_calculated)\n', (7533, 7575), True, 'import numpy as onp\n'), ((10844, 10891), 'aqt.jax.quantization.QuantOps.create_positive', 'QuantOps.create_positive', ([], {'bounds': '(6.0)', 'prec': 'prec'}), '(bounds=6.0, prec=prec)\n', (10868, 10891), False, 'from aqt.jax.quantization import QuantOps\n'), ((11027, 11112), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['quantized_activations[0:2]', '[0.0, 2 ** prec - 1]'], {}), '(quantized_activations[0:2], [0.0, 2 ** prec - 1]\n )\n', (11057, 11112), True, 'import numpy as onp\n'), ((11280, 11352), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['activations[0:2]', '[0.0, max_clipped_val]'], {}), '(activations[0:2], [0.0, max_clipped_val])\n', (11310, 11352), True, 'import numpy as onp\n'), ((12317, 12403), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['activations', '[[-6.0, -8.0], [6.0, 8.0], [0, 0.0]]'], {}), '(activations, [[-6.0, -8.0], [6.0, 8.0], [0, \n 0.0]])\n', (12347, 12403), True, 'import numpy as onp\n'), ((13722, 13797), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['(activations * act_scale)', 'scaled_activations'], {}), '(activations * act_scale, scaled_activations)\n', (13752, 13797), True, 'import numpy as onp\n'), ((14473, 14528), 'aqt.jax.quantization.QuantOps.create_positive', 'QuantOps.create_positive', ([], {'bounds': 'upper_bound', 'prec': 'prec'}), '(bounds=upper_bound, prec=prec)\n', (14497, 14528), False, 'from aqt.jax.quantization import QuantOps\n'), ((14650, 14737), 'aqt.jax.quantization.QuantOps.create_positive', 'QuantOps.create_positive', ([], {'bounds': '(upper_bound * act_scale[jnp.newaxis, :])', 'prec': 'prec'}), '(bounds=upper_bound * act_scale[jnp.newaxis, :],\n prec=prec)\n', (14674, 14737), False, 'from aqt.jax.quantization import QuantOps\n'), ((14861, 14956), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['(activations * act_scale[jnp.newaxis, :])', 'scaled_activations'], {}), '(activations * act_scale[jnp.newaxis, :],\n scaled_activations)\n', (14891, 14956), True, 'import numpy as onp\n'), ((15777, 15842), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['activations', 'rescaled_activations'], {}), '(activations, rescaled_activations)\n', (15807, 15842), True, 'import numpy as onp\n'), ((16671, 16736), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['activations', 'rescaled_activations'], {}), '(activations, rescaled_activations)\n', (16701, 16736), True, 'import numpy as onp\n'), ((17261, 17331), 'aqt.jax.test_utils.assert_all_close_prec', 'test_utils.assert_all_close_prec', (['weights', 'rescaled_weights'], {'prec': 'prec'}), '(weights, rescaled_weights, prec=prec)\n', (17293, 17331), False, 'from aqt.jax import test_utils\n'), ((17819, 17877), 'jax.ops.index_update', 'jax.ops.index_update', (['weights', 'jax.ops.index[0, :]', 'maxval'], {}), '(weights, jax.ops.index[0, :], maxval)\n', (17839, 17877), False, 'import jax\n'), ((18125, 18202), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['quantized_weights[0]', '(2 ** (prec - 1.0) - 1.0)'], {}), '(quantized_weights[0], 2 ** (prec - 1.0) - 1.0)\n', (18155, 18202), True, 'import numpy as onp\n'), ((18340, 18397), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['weights', 'rescaled_weights'], {}), '(weights, rescaled_weights)\n', (18370, 18397), True, 'import numpy as onp\n'), ((19185, 19255), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['(weights * weight_scale)', 'scaled_weights'], {}), '(weights * weight_scale, scaled_weights)\n', (19215, 19255), True, 'import numpy as onp\n'), ((20167, 20237), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['(weights * weight_scale)', 'scaled_weights'], {}), '(weights * weight_scale, scaled_weights)\n', (20197, 20237), True, 'import numpy as onp\n'), ((21119, 21155), 'jax.numpy.array', 'jnp.array', (['[[0.3, 1.4], [-5.2, 4.0]]'], {}), '([[0.3, 1.4], [-5.2, 4.0]])\n', (21128, 21155), True, 'import jax.numpy as jnp\n'), ((21168, 21369), 'aqt.jax.get_bounds.GetBounds.Hyper', 'get_bounds.GetBounds.Hyper', ([], {'initial_bound': '(-1)', 'stddev_coeff': '(1)', 'absdev_coeff': '(0)', 'mix_coeff': '(1)', 'reset_stats': '(True)', 'ema_coeff': 'None', 'use_cams': '(False)', 'granularity': 'quant_config.QuantGranularity.per_tensor'}), '(initial_bound=-1, stddev_coeff=1, absdev_coeff=0,\n mix_coeff=1, reset_stats=True, ema_coeff=None, use_cams=False,\n granularity=quant_config.QuantGranularity.per_tensor)\n', (21194, 21369), False, 'from aqt.jax import get_bounds\n'), ((21441, 21554), 'aqt.jax.quantization.QuantOps.ActHParams', 'quantization.QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': 'bounds', 'prec': 'prec', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=\n bounds, prec=prec, half_shift=False)\n", (21473, 21554), False, 'from aqt.jax import quantization\n'), ((22346, 22409), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['inputs', 'inputs_after_fake_quant'], {}), '(inputs, inputs_after_fake_quant)\n', (22376, 22409), True, 'import numpy as onp\n'), ((24220, 24295), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'weight_prec', 'axis': 'weight_axis', 'half_shift': '(False)'}), '(prec=weight_prec, axis=weight_axis, half_shift=False)\n', (24241, 24295), False, 'from aqt.jax.quantization import QuantOps\n'), ((24959, 25083), 'numpy.testing.assert_allclose', 'onp.testing.assert_allclose', (['aqt_result', 'fakequant_result'], {'rtol': '(0.01)', 'err_msg': '"""AQT and fakequant significantly disagree"""'}), "(aqt_result, fakequant_result, rtol=0.01,\n err_msg='AQT and fakequant significantly disagree')\n", (24986, 25083), True, 'import numpy as onp\n'), ((26653, 26728), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'weight_prec', 'axis': 'weight_axis', 'half_shift': '(False)'}), '(prec=weight_prec, axis=weight_axis, half_shift=False)\n', (26674, 26728), False, 'from aqt.jax.quantization import QuantOps\n'), ((27621, 27745), 'numpy.testing.assert_allclose', 'onp.testing.assert_allclose', (['aqt_result', 'fakequant_result'], {'rtol': '(0.01)', 'err_msg': '"""AQT and fakequant significantly disagree"""'}), "(aqt_result, fakequant_result, rtol=0.01,\n err_msg='AQT and fakequant significantly disagree')\n", (27648, 27745), True, 'import numpy as onp\n'), ((28226, 28381), 'numpy.testing.assert_array_equal', 'onp.testing.assert_array_equal', (['x', 'x_clipped', 'f"""Array cannot be losslessly cast to integer with precision {prec} and {distribution} distribution."""'], {}), "(x, x_clipped,\n f'Array cannot be losslessly cast to integer with precision {prec} and {distribution} distribution.'\n )\n", (28256, 28381), True, 'import numpy as onp\n'), ((29207, 29268), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'prec', 'axis': '(0,)', 'half_shift': '(False)'}), '(prec=prec, axis=(0,), half_shift=False)\n', (29228, 29268), False, 'from aqt.jax.quantization import QuantOps\n'), ((31378, 31436), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': '(4)', 'axis': '(0,)', 'half_shift': '(False)'}), '(prec=4, axis=(0,), half_shift=False)\n', (31399, 31436), False, 'from aqt.jax.quantization import QuantOps\n'), ((31693, 31872), 'aqt.jax.quantization.quantized_dot', 'quantization.quantized_dot', ([], {'w': 'w', 'act': 'act', 'weight_params': 'weight_params', 'act_hparams': 'act_params', 'get_bounds_params': 'None', 'quant_type': 'quant_type', 'prefer_int8_to_int32_dot': '(True)'}), '(w=w, act=act, weight_params=weight_params,\n act_hparams=act_params, get_bounds_params=None, quant_type=quant_type,\n prefer_int8_to_int32_dot=True)\n', (31719, 31872), False, 'from aqt.jax import quantization\n'), ((31085, 31212), 'itertools.product', 'itertools.product', (['(jnp.bfloat16, jnp.float32)', '(4, None)', '(quantization.QuantType.aqt, quantization.QuantType.fake_quant)'], {}), '((jnp.bfloat16, jnp.float32), (4, None), (quantization.\n QuantType.aqt, quantization.QuantType.fake_quant))\n', (31102, 31212), False, 'import itertools\n'), ((32197, 32255), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': '(4)', 'axis': '(0,)', 'half_shift': '(False)'}), '(prec=4, axis=(0,), half_shift=False)\n', (32218, 32255), False, 'from aqt.jax.quantization import QuantOps\n'), ((33087, 33188), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': '(2.0)', 'prec': 'act_prec', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=2.0, prec=\n act_prec, half_shift=False)\n", (33106, 33188), False, 'from aqt.jax.quantization import QuantOps\n'), ((33234, 33335), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': '(1.5)', 'prec': 'act_prec', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=1.5, prec=\n act_prec, half_shift=False)\n", (33253, 33335), False, 'from aqt.jax.quantization import QuantOps\n'), ((33463, 33734), 'aqt.jax.quantization.quantized_dynamic_dot_general', 'quantization.quantized_dynamic_dot_general', ([], {'lhs_act': 'lhs_act', 'rhs_act': 'rhs_act', 'lhs_act_hparams': 'lhs_params', 'rhs_act_hparams': 'rhs_params', 'lhs_get_bounds_params': 'None', 'rhs_get_bounds_params': 'None', 'dot_dimension_numbers': '(((1,), (0,)), ((), ()))', 'quant_type': 'quant_type'}), '(lhs_act=lhs_act, rhs_act=rhs_act,\n lhs_act_hparams=lhs_params, rhs_act_hparams=rhs_params,\n lhs_get_bounds_params=None, rhs_get_bounds_params=None,\n dot_dimension_numbers=(((1,), (0,)), ((), ())), quant_type=quant_type)\n', (33505, 33734), False, 'from aqt.jax import quantization\n'), ((32817, 32944), 'itertools.product', 'itertools.product', (['(jnp.bfloat16, jnp.float32)', '(4, None)', '(quantization.QuantType.aqt, quantization.QuantType.fake_quant)'], {}), '((jnp.bfloat16, jnp.float32), (4, None), (quantization.\n QuantType.aqt, quantization.QuantType.fake_quant))\n', (32834, 32944), False, 'import itertools\n'), ((33926, 34019), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': '(2.0)', 'prec': '(4)', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=2.0, prec=4,\n half_shift=False)\n", (33945, 34019), False, 'from aqt.jax.quantization import QuantOps\n'), ((34042, 34135), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': '(1.5)', 'prec': '(4)', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=1.5, prec=4,\n half_shift=False)\n", (34061, 34135), False, 'from aqt.jax.quantization import QuantOps\n'), ((34922, 35030), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': 'lhs_bounds', 'prec': 'lhs_prec', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=lhs_bounds, prec\n =lhs_prec, half_shift=False)\n", (34941, 35030), False, 'from aqt.jax.quantization import QuantOps\n'), ((35076, 35184), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': 'rhs_bounds', 'prec': 'rhs_prec', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=rhs_bounds, prec\n =rhs_prec, half_shift=False)\n", (35095, 35184), False, 'from aqt.jax.quantization import QuantOps\n'), ((35724, 35848), 'numpy.testing.assert_allclose', 'onp.testing.assert_allclose', (['aqt_result', 'fakequant_result'], {'rtol': '(0.01)', 'err_msg': '"""AQT and fakequant significantly disagree"""'}), "(aqt_result, fakequant_result, rtol=0.01,\n err_msg='AQT and fakequant significantly disagree')\n", (35751, 35848), True, 'import numpy as onp\n'), ((37504, 37522), 'jax.numpy.array', 'jnp.array', (['[[2.0]]'], {}), '([[2.0]])\n', (37513, 37522), True, 'import jax.numpy as jnp\n'), ((37533, 37551), 'jax.numpy.array', 'jnp.array', (['[[3.0]]'], {}), '([[3.0]])\n', (37542, 37551), True, 'import jax.numpy as jnp\n'), ((37857, 37907), 'numpy.testing.assert_allclose', 'onp.testing.assert_allclose', (['out', '[[expected_out]]'], {}), '(out, [[expected_out]])\n', (37884, 37907), True, 'import numpy as onp\n'), ((38413, 38511), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': 'lhs_distribution', 'bounds': '(2.0)', 'prec': '(4)', 'half_shift': '(False)'}), '(input_distribution=lhs_distribution, bounds=2.0, prec=4,\n half_shift=False)\n', (38432, 38511), False, 'from aqt.jax.quantization import QuantOps\n'), ((38558, 38656), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': 'rhs_distribution', 'bounds': '(1.5)', 'prec': '(4)', 'half_shift': '(False)'}), '(input_distribution=rhs_distribution, bounds=1.5, prec=4,\n half_shift=False)\n', (38577, 38656), False, 'from aqt.jax.quantization import QuantOps\n'), ((38880, 39154), 'aqt.jax.quantization.quantized_dynamic_dot_general', 'quantization.quantized_dynamic_dot_general', ([], {'lhs_act': 'lhs_act', 'rhs_act': 'rhs_act', 'lhs_act_hparams': 'lhs_params', 'rhs_act_hparams': 'rhs_params', 'lhs_get_bounds_params': 'None', 'rhs_get_bounds_params': 'None', 'dot_dimension_numbers': '(((1,), (0,)), ((), ()))', 'quant_type': 'QuantType.aqt'}), '(lhs_act=lhs_act, rhs_act=rhs_act,\n lhs_act_hparams=lhs_params, rhs_act_hparams=rhs_params,\n lhs_get_bounds_params=None, rhs_get_bounds_params=None,\n dot_dimension_numbers=(((1,), (0,)), ((), ())), quant_type=QuantType.aqt)\n', (38922, 39154), False, 'from aqt.jax import quantization\n'), ((39519, 39613), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': '(-1.0)', 'prec': '(4)', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=-1.0, prec=4,\n half_shift=False)\n", (39538, 39613), False, 'from aqt.jax.quantization import QuantOps\n'), ((39639, 39697), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': '(4)', 'axis': '(0,)', 'half_shift': '(False)'}), '(prec=4, axis=(0,), half_shift=False)\n', (39660, 39697), False, 'from aqt.jax.quantization import QuantOps\n'), ((39708, 39727), 'jax.numpy.array', 'jnp.array', (['[[-5.0]]'], {}), '([[-5.0]])\n', (39717, 39727), True, 'import jax.numpy as jnp\n'), ((39736, 39756), 'jax.numpy.array', 'jnp.array', (['[[-4.99]]'], {}), '([[-4.99]])\n', (39745, 39756), True, 'import jax.numpy as jnp\n'), ((39767, 39964), 'aqt.jax.quantization.quantized_dot', 'quantization.quantized_dot', ([], {'w': 'w', 'act': 'act', 'quant_type': 'quantization.QuantType.aqt', 'weight_params': 'weight_params', 'act_hparams': 'act_hparams', 'get_bounds_params': 'None', 'prefer_int8_to_int32_dot': '(True)'}), '(w=w, act=act, quant_type=quantization.QuantType.\n aqt, weight_params=weight_params, act_hparams=act_hparams,\n get_bounds_params=None, prefer_int8_to_int32_dot=True)\n', (39793, 39964), False, 'from aqt.jax import quantization\n'), ((40017, 40058), 'numpy.testing.assert_allclose', 'onp.testing.assert_allclose', (['res', '(act * w)'], {}), '(res, act * w)\n', (40044, 40058), True, 'import numpy as onp\n'), ((40135, 40229), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': '(-1.0)', 'prec': '(4)', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=-1.0, prec=4,\n half_shift=False)\n", (40154, 40229), False, 'from aqt.jax.quantization import QuantOps\n'), ((40249, 40268), 'jax.numpy.array', 'jnp.array', (['[[-5.0]]'], {}), '([[-5.0]])\n', (40258, 40268), True, 'import jax.numpy as jnp\n'), ((40283, 40303), 'jax.numpy.array', 'jnp.array', (['[[-4.99]]'], {}), '([[-4.99]])\n', (40292, 40303), True, 'import jax.numpy as jnp\n'), ((40314, 40603), 'aqt.jax.quantization.quantized_dynamic_dot_general', 'quantization.quantized_dynamic_dot_general', ([], {'lhs_act': 'lhs_act', 'rhs_act': 'rhs_act', 'quant_type': 'quantization.QuantType.aqt', 'lhs_act_hparams': 'act_hparams', 'rhs_act_hparams': 'act_hparams', 'lhs_get_bounds_params': 'None', 'rhs_get_bounds_params': 'None', 'dot_dimension_numbers': '(((1,), (0,)), ((), ()))'}), '(lhs_act=lhs_act, rhs_act=rhs_act,\n quant_type=quantization.QuantType.aqt, lhs_act_hparams=act_hparams,\n rhs_act_hparams=act_hparams, lhs_get_bounds_params=None,\n rhs_get_bounds_params=None, dot_dimension_numbers=(((1,), (0,)), ((), ())))\n', (40356, 40603), False, 'from aqt.jax import quantization\n'), ((40661, 40712), 'numpy.testing.assert_allclose', 'onp.testing.assert_allclose', (['res', '(lhs_act * rhs_act)'], {}), '(res, lhs_act * rhs_act)\n', (40688, 40712), True, 'import numpy as onp\n'), ((40827, 40843), 'jax.numpy.ones', 'jnp.ones', (['(3, 7)'], {}), '((3, 7))\n', (40835, 40843), True, 'import jax.numpy as jnp\n'), ((40862, 40878), 'jax.numpy.ones', 'jnp.ones', (['(7, 4)'], {}), '((7, 4))\n', (40870, 40878), True, 'import jax.numpy as jnp\n'), ((41830, 41898), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'weight_prec', 'axis': 'None', 'half_shift': '(False)'}), '(prec=weight_prec, axis=None, half_shift=False)\n', (41851, 41898), False, 'from aqt.jax.quantization import QuantOps\n'), ((42171, 42228), 'aqt.jax.get_bounds.GetBounds.Params', 'GetBounds.Params', ([], {'update_stats': '(False)', 'update_bounds': '(False)'}), '(update_stats=False, update_bounds=False)\n', (42187, 42228), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((42243, 42450), 'aqt.jax.quantization.quantized_dot', 'quantization.quantized_dot', ([], {'w': 'self.weight', 'act': 'self.act', 'quant_type': 'strategy', 'weight_params': 'weight_params', 'act_hparams': 'act_hparams', 'get_bounds_params': 'get_bounds_params', 'prefer_int8_to_int32_dot': '(True)'}), '(w=self.weight, act=self.act, quant_type=strategy,\n weight_params=weight_params, act_hparams=act_hparams, get_bounds_params\n =get_bounds_params, prefer_int8_to_int32_dot=True)\n', (42269, 42450), False, 'from aqt.jax import quantization\n'), ((43050, 43072), 'jax.numpy.ones', 'jnp.ones', (['(4, 2, 3, 7)'], {}), '((4, 2, 3, 7))\n', (43058, 43072), True, 'import jax.numpy as jnp\n'), ((43092, 43114), 'jax.numpy.ones', 'jnp.ones', (['(3, 7, 5, 6)'], {}), '((3, 7, 5, 6))\n', (43100, 43114), True, 'import jax.numpy as jnp\n'), ((44571, 44628), 'aqt.jax.get_bounds.GetBounds.Params', 'GetBounds.Params', ([], {'update_stats': '(False)', 'update_bounds': '(False)'}), '(update_stats=False, update_bounds=False)\n', (44587, 44628), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((44643, 44963), 'aqt.jax.quantization.quantized_dynamic_dot_general', 'quantization.quantized_dynamic_dot_general', ([], {'lhs_act': 'self.lhs_act', 'rhs_act': 'self.rhs_act', 'quant_type': 'strategy', 'dot_dimension_numbers': 'self.dimension_numbers', 'lhs_act_hparams': 'lhs_act_hparams', 'lhs_get_bounds_params': 'get_bounds_params', 'rhs_act_hparams': 'rhs_act_hparams', 'rhs_get_bounds_params': 'get_bounds_params'}), '(lhs_act=self.lhs_act, rhs_act=\n self.rhs_act, quant_type=strategy, dot_dimension_numbers=self.\n dimension_numbers, lhs_act_hparams=lhs_act_hparams,\n lhs_get_bounds_params=get_bounds_params, rhs_act_hparams=\n rhs_act_hparams, rhs_get_bounds_params=get_bounds_params)\n', (44685, 44963), False, 'from aqt.jax import quantization\n'), ((46037, 46062), 'jax.numpy.array', 'jnp.array', (['[0.001, 100.0]'], {}), '([0.001, 100.0])\n', (46046, 46062), True, 'import jax.numpy as jnp\n'), ((46074, 46131), 'aqt.jax.quantization.QuantOps.FloatQuant.FloatPrec', 'QuantOps.FloatQuant.FloatPrec', (['exp_min', 'exp_max', 'sig_bits'], {}), '(exp_min, exp_max, sig_bits)\n', (46103, 46131), False, 'from aqt.jax.quantization import QuantOps\n'), ((46899, 46949), 'aqt.jax.quantization.QuantOps.FloatQuant.FloatPrec', 'QuantOps.FloatQuant.FloatPrec', (['(-2 ** 7)', '(2 ** 7)', '(23)'], {}), '(-2 ** 7, 2 ** 7, 23)\n', (46928, 46949), False, 'from aqt.jax.quantization import QuantOps\n'), ((46968, 47038), 'aqt.jax.quantization.quantized_sum', 'quantization.quantized_sum', (['x'], {'keepdims': 'keepdims', 'axis': 'axis', 'prec': 'prec'}), '(x, keepdims=keepdims, axis=axis, prec=prec)\n', (46994, 47038), False, 'from aqt.jax import quantization\n'), ((4072, 4108), 'jax.numpy.squeeze', 'jnp.squeeze', (['weights_quant_op._scale'], {}), '(weights_quant_op._scale)\n', (4083, 4108), True, 'import jax.numpy as jnp\n'), ((5041, 5064), 'jax.numpy.zeros_like', 'jnp.zeros_like', (['weights'], {}), '(weights)\n', (5055, 5064), True, 'import jax.numpy as jnp\n'), ((6219, 6422), 'aqt.jax.get_bounds.GetBounds.Hyper', 'get_bounds.GetBounds.Hyper', ([], {'initial_bound': '(6.0)', 'stddev_coeff': '(1)', 'absdev_coeff': '(0)', 'mix_coeff': '(1)', 'reset_stats': '(True)', 'ema_coeff': 'None', 'use_cams': '(False)', 'granularity': 'quant_config.QuantGranularity.per_tensor'}), '(initial_bound=6.0, stddev_coeff=1, absdev_coeff=\n 0, mix_coeff=1, reset_stats=True, ema_coeff=None, use_cams=False,\n granularity=quant_config.QuantGranularity.per_tensor)\n', (6245, 6422), False, 'from aqt.jax import get_bounds\n'), ((7072, 7093), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (7090, 7093), False, 'import jax\n'), ((8918, 8951), 'jax.numpy.squeeze', 'jnp.squeeze', (['weights_quant._scale'], {}), '(weights_quant._scale)\n', (8929, 8951), True, 'import jax.numpy as jnp\n'), ((12823, 12840), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (12837, 12840), False, 'from jax import random\n'), ((14272, 14289), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (14286, 14289), False, 'from jax import random\n'), ((14318, 14331), 'jax.numpy.arange', 'jnp.arange', (['(4)'], {}), '(4)\n', (14328, 14331), True, 'import jax.numpy as jnp\n'), ((14422, 14451), 'jax.numpy.ones', 'jnp.ones', (['(3, 4)', 'jnp.float32'], {}), '((3, 4), jnp.float32)\n', (14430, 14451), True, 'import jax.numpy as jnp\n'), ((15338, 15355), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (15352, 15355), False, 'from jax import random\n'), ((16235, 16252), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (16249, 16252), False, 'from jax import random\n'), ((17757, 17774), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (17771, 17774), False, 'from jax import random\n'), ((18736, 18753), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (18750, 18753), False, 'from jax import random\n'), ((19669, 19686), 'jax.random.PRNGKey', 'random.PRNGKey', (['(0)'], {}), '(0)\n', (19683, 19686), False, 'from jax import random\n'), ((22208, 22229), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (22226, 22229), False, 'import jax\n'), ((22601, 22622), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (22619, 22622), False, 'import jax\n'), ((22642, 22673), 'jax.random.normal', 'jax.random.normal', (['key1', '(2, 4)'], {}), '(key1, (2, 4))\n', (22659, 22673), False, 'import jax\n'), ((24588, 24780), 'aqt.jax.quantization.quantized_dot', 'quantization.quantized_dot', ([], {'w': 'self.rhs', 'act': 'self.lhs', 'weight_params': 'weight_params', 'act_hparams': 'act_params', 'get_bounds_params': 'None', 'quant_type': 'quant_type', 'prefer_int8_to_int32_dot': '(True)'}), '(w=self.rhs, act=self.lhs, weight_params=\n weight_params, act_hparams=act_params, get_bounds_params=None,\n quant_type=quant_type, prefer_int8_to_int32_dot=True)\n', (24614, 24780), False, 'from aqt.jax import quantization\n'), ((27115, 27386), 'aqt.jax.quantization.quantized_dot_general', 'quantization.quantized_dot_general', ([], {'w': 'self.rhs', 'act': 'lhs_ndims_3', 'weight_params': 'weight_params', 'act_hparams': 'act_params', 'get_bounds_params': 'None', 'quant_type': 'quant_type', 'dimension_numbers': '(((lhs_ndims_3.ndim - 1,), (0,)), ((), ()))', 'prefer_int8_to_int32_dot': '(True)'}), '(w=self.rhs, act=lhs_ndims_3,\n weight_params=weight_params, act_hparams=act_params, get_bounds_params=\n None, quant_type=quant_type, dimension_numbers=(((lhs_ndims_3.ndim - 1,\n ), (0,)), ((), ())), prefer_int8_to_int32_dot=True)\n', (27149, 27386), False, 'from aqt.jax import quantization\n'), ((27896, 27986), 'aqt.jax.primitives.round_and_clip_to_signed_int', 'primitives.round_and_clip_to_signed_int', (['x'], {'prec': 'prec', 'dtype': 'x.dtype', 'half_shift': '(False)'}), '(x, prec=prec, dtype=x.dtype,\n half_shift=False)\n', (27935, 27986), False, 'from aqt.jax import primitives\n'), ((29516, 29528), 'jax.numpy.abs', 'jnp.abs', (['act'], {}), '(act)\n', (29523, 29528), True, 'import jax.numpy as jnp\n'), ((30150, 30167), 'jax.disable_jit', 'jax.disable_jit', ([], {}), '()\n', (30165, 30167), False, 'import jax\n'), ((30175, 30385), 'aqt.jax.quantization.quantized_dot', 'quantization.quantized_dot', ([], {'w': 'self.rhs', 'act': 'act', 'weight_params': 'weight_params', 'act_hparams': 'act_params', 'get_bounds_params': 'None', 'quant_type': 'QuantType.aqt', 'prefer_int8_to_int32_dot': 'prefer_int8_to_int32_dot'}), '(w=self.rhs, act=act, weight_params=weight_params,\n act_hparams=act_params, get_bounds_params=None, quant_type=QuantType.\n aqt, prefer_int8_to_int32_dot=prefer_int8_to_int32_dot)\n', (30201, 30385), False, 'from aqt.jax import quantization\n'), ((32538, 32717), 'aqt.jax.quantization.quantized_dot', 'quantization.quantized_dot', ([], {'w': 'w', 'act': 'act', 'weight_params': 'weight_params', 'act_hparams': 'act_params', 'get_bounds_params': 'None', 'quant_type': 'quant_type', 'prefer_int8_to_int32_dot': '(True)'}), '(w=w, act=act, weight_params=weight_params,\n act_hparams=act_params, get_bounds_params=None, quant_type=quant_type,\n prefer_int8_to_int32_dot=True)\n', (32564, 32717), False, 'from aqt.jax import quantization\n'), ((34273, 34547), 'aqt.jax.quantization.quantized_dynamic_dot_general', 'quantization.quantized_dynamic_dot_general', ([], {'lhs_act': 'lhs_act', 'rhs_act': 'rhs_act', 'lhs_act_hparams': 'lhs_params', 'rhs_act_hparams': 'rhs_params', 'lhs_get_bounds_params': 'None', 'rhs_get_bounds_params': 'None', 'dot_dimension_numbers': '(((1,), (0,)), ((), ()))', 'quant_type': 'QuantType.aqt'}), '(lhs_act=lhs_act, rhs_act=rhs_act,\n lhs_act_hparams=lhs_params, rhs_act_hparams=rhs_params,\n lhs_get_bounds_params=None, rhs_get_bounds_params=None,\n dot_dimension_numbers=(((1,), (0,)), ((), ())), quant_type=QuantType.aqt)\n', (34315, 34547), False, 'from aqt.jax import quantization\n'), ((35265, 35539), 'aqt.jax.quantization.quantized_dynamic_dot_general', 'quantization.quantized_dynamic_dot_general', ([], {'lhs_act': 'self.lhs', 'rhs_act': 'self.rhs', 'lhs_act_hparams': 'lhs_params', 'rhs_act_hparams': 'rhs_params', 'lhs_get_bounds_params': 'None', 'rhs_get_bounds_params': 'None', 'dot_dimension_numbers': '(((1,), (0,)), ((), ()))', 'quant_type': 'quant_type'}), '(lhs_act=self.lhs, rhs_act=self.\n rhs, lhs_act_hparams=lhs_params, rhs_act_hparams=rhs_params,\n lhs_get_bounds_params=None, rhs_get_bounds_params=None,\n dot_dimension_numbers=(((1,), (0,)), ((), ())), quant_type=quant_type)\n', (35307, 35539), False, 'from aqt.jax import quantization\n'), ((37602, 37623), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(0)'], {}), '(0)\n', (37620, 37623), False, 'import jax\n'), ((38764, 38780), 'jax.numpy.abs', 'jnp.abs', (['lhs_act'], {}), '(lhs_act)\n', (38771, 38780), True, 'import jax.numpy as jnp\n'), ((38859, 38875), 'jax.numpy.abs', 'jnp.abs', (['rhs_act'], {}), '(rhs_act)\n', (38866, 38875), True, 'import jax.numpy as jnp\n'), ((41926, 42063), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'bounds': '(6.0)', 'prec': 'act_prec', 'input_distribution': 'QuantOps.ActHParams.InputDistribution.symmetric', 'half_shift': '(False)'}), '(bounds=6.0, prec=act_prec, input_distribution=QuantOps.\n ActHParams.InputDistribution.symmetric, half_shift=False)\n', (41945, 42063), False, 'from aqt.jax.quantization import QuantOps\n'), ((44098, 44239), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'bounds': '(6.0)', 'prec': 'lhs_act_prec', 'input_distribution': 'QuantOps.ActHParams.InputDistribution.symmetric', 'half_shift': '(False)'}), '(bounds=6.0, prec=lhs_act_prec, input_distribution=\n QuantOps.ActHParams.InputDistribution.symmetric, half_shift=False)\n', (44117, 44239), False, 'from aqt.jax.quantization import QuantOps\n'), ((44315, 44456), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'bounds': '(6.0)', 'prec': 'rhs_act_prec', 'input_distribution': 'QuantOps.ActHParams.InputDistribution.symmetric', 'half_shift': '(False)'}), '(bounds=6.0, prec=rhs_act_prec, input_distribution=\n QuantOps.ActHParams.InputDistribution.symmetric, half_shift=False)\n', (44334, 44456), False, 'from aqt.jax.quantization import QuantOps\n'), ((46162, 46208), 'jax.value_and_grad', 'jax.value_and_grad', (['quantization.quantized_sum'], {}), '(quantization.quantized_sum)\n', (46180, 46208), False, 'import jax\n'), ((46314, 46340), 'numpy.array', 'onp.array', (['expected_result'], {}), '(expected_result)\n', (46323, 46340), True, 'import numpy as onp\n'), ((46465, 46486), 'numpy.array', 'onp.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (46474, 46486), True, 'import numpy as onp\n'), ((3706, 3758), 'numpy.random.uniform', 'onp.random.uniform', (['*weight_range'], {'size': 'weight_shape'}), '(*weight_range, size=weight_shape)\n', (3724, 3758), True, 'import numpy as onp\n'), ((3901, 3966), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'fp_quant', 'axis': 'axis', 'half_shift': '(False)'}), '(prec=fp_quant, axis=axis, half_shift=False)\n', (3922, 3966), False, 'from aqt.jax.quantization import QuantOps\n'), ((6056, 6122), 'aqt.jax.quantization.QuantOps.FloatQuant.FloatPrec', 'QuantOps.FloatQuant.FloatPrec', ([], {'exp_min': '(-15)', 'exp_max': '(15)', 'sig_bits': '(2)'}), '(exp_min=-15, exp_max=15, sig_bits=2)\n', (6085, 6122), False, 'from aqt.jax.quantization import QuantOps\n'), ((8523, 8594), 'numpy.random.uniform', 'onp.random.uniform', (['weight_range[0]', 'weight_range[1]'], {'size': 'weight_shape'}), '(weight_range[0], weight_range[1], size=weight_shape)\n', (8541, 8594), True, 'import numpy as onp\n'), ((8751, 8812), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'prec', 'axis': 'axis', 'half_shift': '(False)'}), '(prec=prec, axis=axis, half_shift=False)\n', (8772, 8812), False, 'from aqt.jax.quantization import QuantOps\n'), ((9660, 9768), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': '(8.0)', 'axis': 'axis', 'expected_scale_shape': 'expected_scale_shape', 'half_shift': '(False)'}), '(prec=8.0, axis=axis, expected_scale_shape=\n expected_scale_shape, half_shift=False)\n', (9681, 9768), False, 'from aqt.jax.quantization import QuantOps\n'), ((10104, 10239), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': 'QuantOps.ActHParams.InputDistribution.symmetric', 'bounds': 'bounds', 'prec': '(8.0)', 'half_shift': '(False)'}), '(input_distribution=QuantOps.ActHParams.\n InputDistribution.symmetric, bounds=bounds, prec=8.0, half_shift=False)\n', (10123, 10239), False, 'from aqt.jax.quantization import QuantOps\n'), ((10311, 10423), 'aqt.jax.get_bounds.GetBounds.Params', 'GetBounds.Params', ([], {'update_stats': '(False)', 'update_bounds': '(False)', 'expected_bounds_shape': 'expected_inputs_scale_shape'}), '(update_stats=False, update_bounds=False,\n expected_bounds_shape=expected_inputs_scale_shape)\n', (10327, 10423), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((11828, 11851), 'jax.numpy.array', 'jnp.array', (['[[6.0, 8.0]]'], {}), '([[6.0, 8.0]])\n', (11837, 11851), True, 'import jax.numpy as jnp\n'), ((12116, 12189), 'jax.numpy.array', 'jnp.array', (['[[-2 ** (prec - 1.0) + 1.0], [2 ** (prec - 1.0) - 1.0], [0.0]]'], {}), '([[-2 ** (prec - 1.0) + 1.0], [2 ** (prec - 1.0) - 1.0], [0.0]])\n', (12125, 12189), True, 'import jax.numpy as jnp\n'), ((12196, 12219), 'jax.numpy.array', 'jnp.array', (['[[1.0, 1.0]]'], {}), '([[1.0, 1.0]])\n', (12205, 12219), True, 'import jax.numpy as jnp\n'), ((13044, 13101), 'aqt.jax.get_bounds.GetBounds.Params', 'GetBounds.Params', ([], {'update_stats': '(False)', 'update_bounds': '(False)'}), '(update_stats=False, update_bounds=False)\n', (13060, 13101), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((13132, 13268), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': 'QuantOps.ActHParams.InputDistribution.symmetric', 'bounds': 'bounds', 'prec': 'prec', 'half_shift': '(False)'}), '(input_distribution=QuantOps.ActHParams.\n InputDistribution.symmetric, bounds=bounds, prec=prec, half_shift=False)\n', (13151, 13268), False, 'from aqt.jax.quantization import QuantOps\n'), ((13436, 13493), 'aqt.jax.get_bounds.GetBounds.Params', 'GetBounds.Params', ([], {'update_stats': '(False)', 'update_bounds': '(False)'}), '(update_stats=False, update_bounds=False)\n', (13452, 13493), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((13524, 13676), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': 'QuantOps.ActHParams.InputDistribution.symmetric', 'bounds': '(bounds * act_scale)', 'prec': 'prec', 'half_shift': '(False)'}), '(input_distribution=QuantOps.ActHParams.\n InputDistribution.symmetric, bounds=bounds * act_scale, prec=prec,\n half_shift=False)\n', (13543, 13676), False, 'from aqt.jax.quantization import QuantOps\n'), ((15499, 15556), 'aqt.jax.get_bounds.GetBounds.Params', 'GetBounds.Params', ([], {'update_stats': '(False)', 'update_bounds': '(False)'}), '(update_stats=False, update_bounds=False)\n', (15515, 15556), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((15587, 15732), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': 'QuantOps.ActHParams.InputDistribution.positive', 'bounds': 'upper_bound', 'prec': 'prec', 'half_shift': '(False)'}), '(input_distribution=QuantOps.ActHParams.\n InputDistribution.positive, bounds=upper_bound, prec=prec, half_shift=False\n )\n', (15606, 15732), False, 'from aqt.jax.quantization import QuantOps\n'), ((16396, 16453), 'aqt.jax.get_bounds.GetBounds.Params', 'GetBounds.Params', ([], {'update_stats': '(False)', 'update_bounds': '(False)'}), '(update_stats=False, update_bounds=False)\n', (16412, 16453), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((16484, 16620), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': 'QuantOps.ActHParams.InputDistribution.symmetric', 'bounds': 'bounds', 'prec': 'prec', 'half_shift': '(False)'}), '(input_distribution=QuantOps.ActHParams.\n InputDistribution.symmetric, bounds=bounds, prec=prec, half_shift=False)\n', (16503, 16620), False, 'from aqt.jax.quantization import QuantOps\n'), ((17181, 17242), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'prec', 'axis': 'None', 'half_shift': '(False)'}), '(prec=prec, axis=None, half_shift=False)\n', (17202, 17242), False, 'from aqt.jax.quantization import QuantOps\n'), ((17967, 18028), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'prec', 'axis': 'None', 'half_shift': '(False)'}), '(prec=prec, axis=None, half_shift=False)\n', (17988, 18028), False, 'from aqt.jax.quantization import QuantOps\n'), ((18922, 18983), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'prec', 'axis': 'None', 'half_shift': '(False)'}), '(prec=prec, axis=None, half_shift=False)\n', (18943, 18983), False, 'from aqt.jax.quantization import QuantOps\n'), ((19104, 19165), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'prec', 'axis': 'None', 'half_shift': '(False)'}), '(prec=prec, axis=None, half_shift=False)\n', (19125, 19165), False, 'from aqt.jax.quantization import QuantOps\n'), ((19718, 19731), 'jax.numpy.arange', 'jnp.arange', (['(4)'], {}), '(4)\n', (19728, 19731), True, 'import jax.numpy as jnp\n'), ((19897, 19955), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'prec', 'axis': '(0)', 'half_shift': '(False)'}), '(prec=prec, axis=0, half_shift=False)\n', (19918, 19955), False, 'from aqt.jax.quantization import QuantOps\n'), ((20089, 20147), 'aqt.jax.quantization.QuantOps.WeightParams', 'QuantOps.WeightParams', ([], {'prec': 'prec', 'axis': '(0)', 'half_shift': '(False)'}), '(prec=prec, axis=0, half_shift=False)\n', (20110, 20147), False, 'from aqt.jax.quantization import QuantOps\n'), ((22694, 22725), 'jax.random.normal', 'jax.random.normal', (['key2', '(3, 2)'], {}), '(key2, (3, 2))\n', (22711, 22725), False, 'import jax\n'), ((28049, 28141), 'aqt.jax.primitives.floor_and_clip_to_unsigned_int', 'primitives.floor_and_clip_to_unsigned_int', (['x'], {'prec': 'prec', 'dtype': 'x.dtype', 'half_shift': '(False)'}), '(x, prec=prec, dtype=x.dtype,\n half_shift=False)\n', (28090, 28141), False, 'from aqt.jax import primitives\n'), ((29376, 29399), 'jax.numpy.array', 'jnp.array', (['[[3.0, 1.5]]'], {}), '([[3.0, 1.5]])\n', (29385, 29399), True, 'import jax.numpy as jnp\n'), ((31530, 31553), 'jax.numpy.array', 'jnp.array', (['[[3.0, 1.5]]'], {}), '([[3.0, 1.5]])\n', (31539, 31553), True, 'import jax.numpy as jnp\n'), ((32349, 32372), 'jax.numpy.array', 'jnp.array', (['[[3.0, 1.5]]'], {}), '([[3.0, 1.5]])\n', (32358, 32372), True, 'import jax.numpy as jnp\n'), ((36052, 36190), 'aqt.jax.get_bounds.GetBounds.Hyper', 'GetBounds.Hyper', ([], {'initial_bound': '(10.0)', 'stddev_coeff': '(0)', 'absdev_coeff': '(0)', 'mix_coeff': '(0)', 'granularity': 'quant_config.QuantGranularity.per_tensor'}), '(initial_bound=10.0, stddev_coeff=0, absdev_coeff=0,\n mix_coeff=0, granularity=quant_config.QuantGranularity.per_tensor)\n', (36067, 36190), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((36273, 36410), 'aqt.jax.get_bounds.GetBounds.Hyper', 'GetBounds.Hyper', ([], {'initial_bound': '(5.0)', 'stddev_coeff': '(0)', 'absdev_coeff': '(0)', 'mix_coeff': '(0)', 'granularity': 'quant_config.QuantGranularity.per_tensor'}), '(initial_bound=5.0, stddev_coeff=0, absdev_coeff=0,\n mix_coeff=0, granularity=quant_config.QuantGranularity.per_tensor)\n', (36288, 36410), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((36489, 36593), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': 'lhs_get_bounds', 'prec': '(8)', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=lhs_get_bounds,\n prec=8, half_shift=False)\n", (36508, 36593), False, 'from aqt.jax.quantization import QuantOps\n'), ((36660, 36764), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'input_distribution': '"""symmetric"""', 'bounds': 'rhs_get_bounds', 'prec': '(8)', 'half_shift': '(False)'}), "(input_distribution='symmetric', bounds=rhs_get_bounds,\n prec=8, half_shift=False)\n", (36679, 36764), False, 'from aqt.jax.quantization import QuantOps\n'), ((36842, 36932), 'aqt.jax.get_bounds.GetBounds.Params', 'get_bounds.GetBounds.Params', ([], {'update_stats': '(True)', 'update_bounds': '(False)', 'module_name': '"""lhs"""'}), "(update_stats=True, update_bounds=False,\n module_name='lhs')\n", (36869, 36932), False, 'from aqt.jax import get_bounds\n'), ((36974, 37064), 'aqt.jax.get_bounds.GetBounds.Params', 'get_bounds.GetBounds.Params', ([], {'update_stats': '(True)', 'update_bounds': '(False)', 'module_name': '"""rhs"""'}), "(update_stats=True, update_bounds=False,\n module_name='rhs')\n", (37001, 37064), False, 'from aqt.jax import get_bounds\n'), ((37088, 37394), 'aqt.jax.quantization.quantized_dynamic_dot_general', 'quantization.quantized_dynamic_dot_general', ([], {'lhs_act': 'lhs', 'rhs_act': 'rhs', 'lhs_act_hparams': 'lhs_params', 'rhs_act_hparams': 'rhs_params', 'dot_dimension_numbers': '(((1,), (0,)), ((), ()))', 'quant_type': 'QuantType.aqt', 'lhs_get_bounds_params': 'lhs_get_bounds_params', 'rhs_get_bounds_params': 'rhs_get_bounds_params'}), '(lhs_act=lhs, rhs_act=rhs,\n lhs_act_hparams=lhs_params, rhs_act_hparams=rhs_params,\n dot_dimension_numbers=(((1,), (0,)), ((), ())), quant_type=QuantType.\n aqt, lhs_get_bounds_params=lhs_get_bounds_params, rhs_get_bounds_params\n =rhs_get_bounds_params)\n', (37130, 37394), False, 'from aqt.jax import quantization\n'), ((45126, 45219), 'aqt.jax.quantization.QuantOps.ActHParams', 'QuantOps.ActHParams', ([], {'bounds': '(6.0)', 'prec': 'prec', 'input_distribution': 'mock.ANY', 'half_shift': '(False)'}), '(bounds=6.0, prec=prec, input_distribution=mock.ANY,\n half_shift=False)\n', (45145, 45219), False, 'from aqt.jax.quantization import QuantOps\n'), ((5937, 5977), 'numpy.random.uniform', 'onp.random.uniform', (['(0)', '(1.0)'], {'size': '(10, 4)'}), '(0, 1.0, size=(10, 4))\n', (5955, 5977), True, 'import numpy as onp\n'), ((9521, 9572), 'numpy.random.uniform', 'onp.random.uniform', (['(0)', '(1.0)'], {'size': '(10, num_features)'}), '(0, 1.0, size=(10, num_features))\n', (9539, 9572), True, 'import numpy as onp\n'), ((9924, 9964), 'numpy.random.uniform', 'onp.random.uniform', (['(0)', '(1.0)'], {'size': '(10, 4)'}), '(0, 1.0, size=(10, 4))\n', (9942, 9964), True, 'import numpy as onp\n'), ((17038, 17078), 'numpy.random.uniform', 'onp.random.uniform', (['(0)', '(1.0)'], {'size': '(10, 1)'}), '(0, 1.0, size=(10, 1))\n', (17056, 17078), True, 'import numpy as onp\n'), ((24466, 24487), 'jax.numpy.array', 'jnp.array', (['act_bounds'], {}), '(act_bounds)\n', (24475, 24487), True, 'import jax.numpy as jnp\n'), ((26899, 26920), 'jax.numpy.array', 'jnp.array', (['act_bounds'], {}), '(act_bounds)\n', (26908, 26920), True, 'import jax.numpy as jnp\n'), ((27018, 27060), 'numpy.random.uniform', 'onp.random.uniform', (['(0)', '(1.0)'], {'size': '(4, 3, 2)'}), '(0, 1.0, size=(4, 3, 2))\n', (27036, 27060), True, 'import numpy as onp\n'), ((45262, 45339), 'unittest.mock.call', 'mock.call', (['mock.ANY'], {'hparams': 'act_hparams', 'get_bounds_params': 'get_bounds_params'}), '(mock.ANY, hparams=act_hparams, get_bounds_params=get_bounds_params)\n', (45271, 45339), False, 'from unittest import mock\n'), ((4138, 4158), 'jax.numpy.log2', 'jnp.log2', (['max_weight'], {}), '(max_weight)\n', (4146, 4158), True, 'import jax.numpy as jnp\n'), ((3020, 3085), 'aqt.jax.quantization.QuantOps.FloatQuant.FloatPrec', 'QuantOps.FloatQuant.FloatPrec', ([], {'exp_min': '(-11)', 'exp_max': '(4)', 'sig_bits': '(3)'}), '(exp_min=-11, exp_max=4, sig_bits=3)\n', (3049, 3085), False, 'from aqt.jax.quantization import QuantOps\n'), ((3387, 3452), 'aqt.jax.quantization.QuantOps.FloatQuant.FloatPrec', 'QuantOps.FloatQuant.FloatPrec', ([], {'exp_min': '(-23)', 'exp_max': '(8)', 'sig_bits': '(2)'}), '(exp_min=-23, exp_max=8, sig_bits=2)\n', (3416, 3452), False, 'from aqt.jax.quantization import QuantOps\n'), ((6904, 6961), 'aqt.jax.get_bounds.GetBounds.Params', 'GetBounds.Params', ([], {'update_stats': '(False)', 'update_bounds': '(False)'}), '(update_stats=False, update_bounds=False)\n', (6920, 6961), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((22057, 22113), 'aqt.jax.get_bounds.GetBounds.Params', 'GetBounds.Params', ([], {'update_stats': '(True)', 'update_bounds': '(False)'}), '(update_stats=True, update_bounds=False)\n', (22073, 22113), False, 'from aqt.jax.get_bounds import GetBounds\n'), ((20416, 20481), 'aqt.jax.quantization.QuantOps.FloatQuant.FloatPrec', 'QuantOps.FloatQuant.FloatPrec', ([], {'exp_min': '(-11)', 'exp_max': '(4)', 'sig_bits': '(3)'}), '(exp_min=-11, exp_max=4, sig_bits=3)\n', (20445, 20481), False, 'from aqt.jax.quantization import QuantOps\n'), ((20721, 20786), 'aqt.jax.quantization.QuantOps.FloatQuant.FloatPrec', 'QuantOps.FloatQuant.FloatPrec', ([], {'exp_min': '(-11)', 'exp_max': '(4)', 'sig_bits': '(3)'}), '(exp_min=-11, exp_max=4, sig_bits=3)\n', (20750, 20786), False, 'from aqt.jax.quantization import QuantOps\n'), ((46838, 46851), 'jax.numpy.arange', 'jnp.arange', (['(6)'], {}), '(6)\n', (46848, 46851), True, 'import jax.numpy as jnp\n')]
|
import codecs
import logging
import os
import sys
from pathlib import Path
import methodtools
import regex
import toml
from timebudget import timebudget
from jyotisha import custom_transliteration
from jyotisha.panchaanga.temporal import names
from sanskrit_data.schema import common
from indic_transliteration import xsanscript
def transliterate_quoted_text(text, script):
transliterated_text = text
pieces = transliterated_text.split('`')
if len(pieces) > 1:
if len(pieces) % 2 == 1:
# We much have matching backquotes, the contents of which can be neatly transliterated
for i, piece in enumerate(pieces):
if (i % 2) == 1:
pieces[i] = custom_transliteration.tr(piece, script, titled=True)
transliterated_text = ''.join(pieces)
else:
logging.warning('Unmatched backquotes in string: %s' % transliterated_text)
return transliterated_text
def clean_id(id):
id = id.replace('/','__').strip('{}')
id = regex.sub(" +", "_", id)
return id
class HinduCalendarEventTiming(common.JsonObject):
schema = common.recursively_merge_json_schemas(common.JsonObject.schema, ({
"type": "object",
"properties": {
common.TYPE_FIELD: {
"enum": ["HinduCalendarEventTiming"]
},
"month_type": {
"type": "string",
"enum": ["lunar_month", "sidereal_solar_month", "tropical_month"],
"description": "",
},
"month_number": {
"type": "integer",
"description": "",
},
"anga_type": {
"type": "string",
"enum": ["tithi", "nakshatra", "yoga", "day"],
"description": "",
},
"anga_number": {
"type": "integer",
"description": "",
},
"kaala": {
"type": "string",
"description": "",
},
"priority": {
"type": "string",
"description": "",
},
"year_start": {
"type": "integer",
"description": "",
},
"anchor_festival_id": {
"type": "string",
"description": "A festival may be (say) 8 days before some other event xyz. The xyz is stored here.",
},
"offset": {
"type": "integer",
"description": "A festival may be 8 days before some other event xyz. The 8 is stored here.",
},
}
}))
def __init__(self, month_type, month_number, anga_type, anga_number, kaala, year_start):
self.month_type = month_type
self.month_number = month_number
self.anga_type = anga_type
self.anga_number = anga_number
self.kaala = kaala
self.year_start = year_start
self.anchor_festival_id = None
self.offset = None
self.julian_handling = None
def get_kaala(self):
return "सूर्योदयः" if self.kaala is None else self.kaala
def get_priority(self):
return "puurvaviddha" if self.priority is None else self.priority
def get_month_name_en(self, script):
return names.get_month_name_en(month_type=self.month_type, month_number=self.month_number, script=script)
# noinspection PyUnresolvedReferences
class HinduCalendarEvent(common.JsonObject):
schema = common.recursively_merge_json_schemas(common.JsonObject.schema, ({
"type": "object",
"properties": {
common.TYPE_FIELD: {
"enum": ["HinduCalendarEvent"]
},
"timing": HinduCalendarEventTiming.schema,
"tags": {
"type": "array",
"items": {
"type": "string"
},
"description": "",
},
"comments": {
"type": "string",
"description": "",
},
"image": {
"type": "string",
"description": "",
},
"description": {
"type": "object",
"description": "Language code to text mapping.",
},
"names": {
"type": "object",
"description": "Language code to text array mapping.",
},
"shlokas": {
"type": "array",
"items": {
"type": "string"
}
},
"references_primary": {
"type": "array",
"items": {
"type": "string"
}
},
"references_secondary": {
"type": "array",
"items": {
"type": "string"
}
},
}
}))
def __init__(self, id):
self.id = id
self.timing = None
self.tags = None
self.references_primary = None
self.references_secondary = None
self.names = None
self.description = None
self.image = None
self.path_actual = None
def get_storage_file_name(self, base_dir):
return self.get_storage_file_name_granular(base_dir=base_dir)
def get_storage_file_name_flat(self, base_dir):
return "%(base_dir)s/%(id)s.toml" % dict(
base_dir=base_dir,
id=self.id
)
def get_storage_file_name_granular(self, base_dir):
if self.timing.anchor_festival_id is not None:
path = "relative_event/%(anchor_festival_id)s/offset__%(offset)02d/%(id)s.toml" % dict(
anchor_festival_id=self.timing.anchor_festival_id.replace('/','__'),
offset=self.timing.offset,
id=self.id
)
elif self.timing is None or self.timing.month_number is None:
path = "description_only/%(id)s.toml" % dict(
id=self.id
)
else:
try:
path = "%(month_type)s/%(anga_type)s/%(month_number)02d/%(anga_number)02d/%(id)s.toml" % dict(
month_type=self.timing.month_type,
anga_type=self.timing.anga_type,
month_number=self.timing.month_number,
anga_number=self.timing.anga_number,
id=self.id
)
except Exception:
logging.error(str(self))
raise
if base_dir.startswith("http"):
from urllib.parse import quote
path = quote(path)
return "%s/%s" % (base_dir, path)
def get_url(self):
# encoded_url = "https://" + quote(self.path_actual.replace(self.repo.path, self.repo.base_url.replace("https://", "")))
encoded_url = self.get_storage_file_name(base_dir=self.repo.base_url)
# https://github.com/jyotisham/jyotisha/runs/1229399248?check_suite_focus=true shows that ~ is being replaced there, which breaks tests. Hence the below.
return encoded_url.replace("%7E", "~")
def get_description_string(self, script, include_url=False, include_images=False,
include_shlokas=False, is_brief=False, truncate=False, header_md="#####"):
from jyotisha.panchaanga.temporal.festival.rules import summary
final_description_string = summary.describe_fest(self, include_images, include_shlokas, include_url, is_brief, script,
truncate, header_md=header_md)
return final_description_string
def get_description_dict(self, script):
from jyotisha.panchaanga.temporal.festival.rules import summary
description_dict = {}
description_dict['blurb'] = summary.get_timing_summary(self)
description_dict['detailed'] = summary.get_description_str_with_shlokas(False, self, script)
if self.image is None:
description_dict['image'] = ''
else:
description_dict['image'] = self.image
description_dict['references'] = summary.get_references_md(self)
if self.shlokas is not None:
description_dict['shlokas'] = xsanscript.transliterate(self.shlokas.replace("\n", " \n"), xsanscript.DEVANAGARI, script)
else:
description_dict['shlokas'] = ''
return description_dict
def to_gregorian(self, julian_handling):
if self.timing.month_type != RulesRepo.JULIAN_MONTH_DIR:
return
if julian_handling == RulesCollection.JULIAN_TO_GREGORIAN:
from jyotisha.panchaanga.temporal import time
greg_date = time.Date.from_julian_date(year=self.timing.year_start, month=self.timing.month_number, day=self.timing.anga_number)
self.timing.julian_handling = julian_handling
self.timing.anga_number = greg_date.day
self.timing.month_number = greg_date.month
self.timing.month_type = RulesRepo.GREGORIAN_MONTH_DIR
elif julian_handling == RulesCollection.JULIAN_AS_GREGORIAN:
self.timing.julian_handling = julian_handling
self.timing.month_type = RulesRepo.GREGORIAN_MONTH_DIR
def get_festival_rules_map(dir_path, julian_handling, repo=None):
toml_file_paths = sorted(Path(dir_path).glob("**/*.toml"))
festival_rules = {}
if len(toml_file_paths) == 0:
logging.warning("No festival rule found at %s", dir_path)
return festival_rules
for file_path in toml_file_paths:
event = HinduCalendarEvent.read_from_file(filename=str(file_path))
event.path_actual = str(file_path)
event.repo = repo
event.to_gregorian(julian_handling=julian_handling)
festival_rules[event.id] = event
return festival_rules
DATA_ROOT = os.path.join(os.path.dirname(__file__), "../data")
_ADYATITHI_REPOS_PATH = os.path.join(DATA_ROOT, "repos.toml")
class RulesRepo(common.JsonObject):
LUNAR_MONTH_DIR = "lunar_month"
SIDEREAL_SOLAR_MONTH_DIR = "sidereal_solar_month"
TROPICAL_MONTH_DIR = "tropical"
GREGORIAN_MONTH_DIR = "gregorian"
ISLAMIC_MONTH_DIR = "islamic"
JULIAN_MONTH_DIR = "julian"
RELATIVE_EVENT_DIR = "relative_event"
DAY_DIR = "day"
TITHI_DIR = "tithi"
NAKSHATRA_DIR = "nakshatra"
YOGA_DIR = "yoga"
def __init__(self, name, path=None, base_url='https://github.com/jyotisham/adyatithi/tree/master'):
super().__init__()
self.name = name
self.path = path
self.base_url = os.path.join(base_url, name)
def get_path(self):
# We don't set the path in __init__ so as to avoid storing machine-specific paths for canonical repos_tuple.
return self.path if self.path is not None else os.path.join(DATA_ROOT, self.name)
class RulesCollection(common.JsonObject):
JULIAN_AS_GREGORIAN = "treated as Gregorian"
JULIAN_TO_GREGORIAN = "converted to Gregorian"
def __init__(self, repos, julian_handling=JULIAN_TO_GREGORIAN):
super().__init__()
self.repos = repos
self.name_to_rule = {}
self.tree = None
self.set_rule_dicts(julian_handling=julian_handling)
@methodtools.lru_cache() # the order is important!
@classmethod
def get_cached(cls, repos_tuple, julian_handling=JULIAN_TO_GREGORIAN):
return RulesCollection(repos=repos_tuple, julian_handling=julian_handling)
def fix_content(self):
for repo in self.repos:
base_dir = repo.get_path()
rules_map = get_festival_rules_map(
os.path.join(DATA_ROOT, repo.get_path(), julian_handling=None), repo=repo)
for rule in rules_map.values():
if rule.shlokas is not None:
rule.shlokas = rule.shlokas.replace("\\n", " \n")
rule.path_actual = None
rule.repo = None
rule.dump_to_file(filename=rule.get_storage_file_name(base_dir=base_dir))
def fix_filenames(self):
for repo in self.repos:
base_dir = repo.get_path()
rules_map = get_festival_rules_map(
os.path.join(DATA_ROOT, repo.get_path()), repo=repo, julian_handling=None)
for rule in rules_map.values():
rule.id = clean_id(rule.id)
expected_path = rule.get_storage_file_name(base_dir=base_dir)
if rule.path_actual != expected_path:
logging.info(str((rule.path_actual, expected_path)))
os.makedirs(os.path.dirname(expected_path), exist_ok=True)
os.rename(rule.path_actual, expected_path)
@timebudget
def set_rule_dicts(self, julian_handling):
for repo in self.repos:
self.name_to_rule.update(get_festival_rules_map(
os.path.join(DATA_ROOT, repo.get_path()), repo=repo, julian_handling=julian_handling))
from sanskrit_data import collection_helper
self.tree = collection_helper.tree_maker(leaves=self.name_to_rule.values(), path_fn=lambda x: x.get_storage_file_name_granular(base_dir="").replace(".toml", ""))
def get_month_anga_fests(self, month_type, month, anga_type_id, anga):
if int(month) != month:
# Deal with adhika mAsas
month_str = "%02d.5" % month
else:
month_str = "%02d" % month
from jyotisha.panchaanga.temporal.zodiac import Anga
if isinstance(anga, Anga):
anga = anga.index
try:
return self.tree[month_type.lower()][anga_type_id.lower()][month_str]["%02d" % anga]
except KeyError:
return {}
def get_possibly_relevant_fests(self, month_type, month, anga_type_id, angas):
fest_dict = {}
for anga in angas:
from jyotisha.panchaanga.temporal.zodiac.angas import Tithi
if isinstance(anga, Tithi) and month_type == RulesRepo.LUNAR_MONTH_DIR:
month = anga.month.index
for m in [month, 0]:
fest_dict.update(self.get_month_anga_fests(month_type=month_type, month=m, anga_type_id=anga_type_id, anga=anga))
return fest_dict
# Essential for depickling to work.
common.update_json_class_index(sys.modules[__name__])
# logging.debug(common.json_class_index)
# The below is filled by load_repos() below.
rule_repos = ()
def dump_repos():
repos = [repo.to_json_map() for repo in rule_repos]
repos.sort(key=lambda x: x["name"])
with codecs.open(_ADYATITHI_REPOS_PATH, "w") as fp:
toml.dump({"data": repos}, fp)
def load_repos():
"""
common.update_json_class_index should be called before calling this.
:return:
"""
global rule_repos
with codecs.open(_ADYATITHI_REPOS_PATH, "r") as fp:
repos = toml.load(fp)
rule_repos = tuple(common.JsonObject.make_from_dict_list(repos["data"]))
load_repos()
if __name__ == '__main__':
# dump_repos()
rules_collection = RulesCollection.get_cached(repos_tuple=rule_repos, julian_handling=None)
# rules_collection = RulesCollection(repos=[RulesRepo(name="mahApuruSha/xatra-later")], julian_handling=None)
rules_collection.fix_filenames()
# rules_collection.fix_content()
|
[
"jyotisha.custom_transliteration.tr",
"pathlib.Path",
"jyotisha.panchaanga.temporal.festival.rules.summary.get_description_str_with_shlokas",
"jyotisha.panchaanga.temporal.time.Date.from_julian_date",
"os.path.join",
"codecs.open",
"logging.warning",
"os.path.dirname",
"jyotisha.panchaanga.temporal.festival.rules.summary.describe_fest",
"toml.load",
"sanskrit_data.schema.common.recursively_merge_json_schemas",
"jyotisha.panchaanga.temporal.festival.rules.summary.get_references_md",
"jyotisha.panchaanga.temporal.festival.rules.summary.get_timing_summary",
"os.rename",
"regex.sub",
"methodtools.lru_cache",
"sanskrit_data.schema.common.update_json_class_index",
"jyotisha.panchaanga.temporal.names.get_month_name_en",
"sanskrit_data.schema.common.JsonObject.make_from_dict_list",
"urllib.parse.quote",
"toml.dump"
] |
[((8806, 8843), 'os.path.join', 'os.path.join', (['DATA_ROOT', '"""repos.toml"""'], {}), "(DATA_ROOT, 'repos.toml')\n", (8818, 8843), False, 'import os\n'), ((12750, 12803), 'sanskrit_data.schema.common.update_json_class_index', 'common.update_json_class_index', (['sys.modules[__name__]'], {}), '(sys.modules[__name__])\n', (12780, 12803), False, 'from sanskrit_data.schema import common\n'), ((966, 990), 'regex.sub', 'regex.sub', (['""" +"""', '"""_"""', 'id'], {}), "(' +', '_', id)\n", (975, 990), False, 'import regex\n'), ((1067, 2037), 'sanskrit_data.schema.common.recursively_merge_json_schemas', 'common.recursively_merge_json_schemas', (['common.JsonObject.schema', "{'type': 'object', 'properties': {common.TYPE_FIELD: {'enum': [\n 'HinduCalendarEventTiming']}, 'month_type': {'type': 'string', 'enum':\n ['lunar_month', 'sidereal_solar_month', 'tropical_month'],\n 'description': ''}, 'month_number': {'type': 'integer', 'description':\n ''}, 'anga_type': {'type': 'string', 'enum': ['tithi', 'nakshatra',\n 'yoga', 'day'], 'description': ''}, 'anga_number': {'type': 'integer',\n 'description': ''}, 'kaala': {'type': 'string', 'description': ''},\n 'priority': {'type': 'string', 'description': ''}, 'year_start': {\n 'type': 'integer', 'description': ''}, 'anchor_festival_id': {'type':\n 'string', 'description':\n 'A festival may be (say) 8 days before some other event xyz. The xyz is stored here.'\n }, 'offset': {'type': 'integer', 'description':\n 'A festival may be 8 days before some other event xyz. The 8 is stored here.'\n }}}"], {}), "(common.JsonObject.schema, {'type':\n 'object', 'properties': {common.TYPE_FIELD: {'enum': [\n 'HinduCalendarEventTiming']}, 'month_type': {'type': 'string', 'enum':\n ['lunar_month', 'sidereal_solar_month', 'tropical_month'],\n 'description': ''}, 'month_number': {'type': 'integer', 'description':\n ''}, 'anga_type': {'type': 'string', 'enum': ['tithi', 'nakshatra',\n 'yoga', 'day'], 'description': ''}, 'anga_number': {'type': 'integer',\n 'description': ''}, 'kaala': {'type': 'string', 'description': ''},\n 'priority': {'type': 'string', 'description': ''}, 'year_start': {\n 'type': 'integer', 'description': ''}, 'anchor_festival_id': {'type':\n 'string', 'description':\n 'A festival may be (say) 8 days before some other event xyz. The xyz is stored here.'\n }, 'offset': {'type': 'integer', 'description':\n 'A festival may be 8 days before some other event xyz. The 8 is stored here.'\n }}})\n", (1104, 2037), False, 'from sanskrit_data.schema import common\n'), ((3121, 3901), 'sanskrit_data.schema.common.recursively_merge_json_schemas', 'common.recursively_merge_json_schemas', (['common.JsonObject.schema', "{'type': 'object', 'properties': {common.TYPE_FIELD: {'enum': [\n 'HinduCalendarEvent']}, 'timing': HinduCalendarEventTiming.schema,\n 'tags': {'type': 'array', 'items': {'type': 'string'}, 'description':\n ''}, 'comments': {'type': 'string', 'description': ''}, 'image': {\n 'type': 'string', 'description': ''}, 'description': {'type': 'object',\n 'description': 'Language code to text mapping.'}, 'names': {'type':\n 'object', 'description': 'Language code to text array mapping.'},\n 'shlokas': {'type': 'array', 'items': {'type': 'string'}},\n 'references_primary': {'type': 'array', 'items': {'type': 'string'}},\n 'references_secondary': {'type': 'array', 'items': {'type': 'string'}}}}"], {}), "(common.JsonObject.schema, {'type':\n 'object', 'properties': {common.TYPE_FIELD: {'enum': [\n 'HinduCalendarEvent']}, 'timing': HinduCalendarEventTiming.schema,\n 'tags': {'type': 'array', 'items': {'type': 'string'}, 'description':\n ''}, 'comments': {'type': 'string', 'description': ''}, 'image': {\n 'type': 'string', 'description': ''}, 'description': {'type': 'object',\n 'description': 'Language code to text mapping.'}, 'names': {'type':\n 'object', 'description': 'Language code to text array mapping.'},\n 'shlokas': {'type': 'array', 'items': {'type': 'string'}},\n 'references_primary': {'type': 'array', 'items': {'type': 'string'}},\n 'references_secondary': {'type': 'array', 'items': {'type': 'string'}}}})\n", (3158, 3901), False, 'from sanskrit_data.schema import common\n'), ((8744, 8769), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (8759, 8769), False, 'import os\n'), ((10034, 10057), 'methodtools.lru_cache', 'methodtools.lru_cache', ([], {}), '()\n', (10055, 10057), False, 'import methodtools\n'), ((2927, 3030), 'jyotisha.panchaanga.temporal.names.get_month_name_en', 'names.get_month_name_en', ([], {'month_type': 'self.month_type', 'month_number': 'self.month_number', 'script': 'script'}), '(month_type=self.month_type, month_number=self.\n month_number, script=script)\n', (2950, 3030), False, 'from jyotisha.panchaanga.temporal import names\n'), ((6504, 6630), 'jyotisha.panchaanga.temporal.festival.rules.summary.describe_fest', 'summary.describe_fest', (['self', 'include_images', 'include_shlokas', 'include_url', 'is_brief', 'script', 'truncate'], {'header_md': 'header_md'}), '(self, include_images, include_shlokas, include_url,\n is_brief, script, truncate, header_md=header_md)\n', (6525, 6630), False, 'from jyotisha.panchaanga.temporal.festival.rules import summary\n'), ((6839, 6871), 'jyotisha.panchaanga.temporal.festival.rules.summary.get_timing_summary', 'summary.get_timing_summary', (['self'], {}), '(self)\n', (6865, 6871), False, 'from jyotisha.panchaanga.temporal.festival.rules import summary\n'), ((6907, 6968), 'jyotisha.panchaanga.temporal.festival.rules.summary.get_description_str_with_shlokas', 'summary.get_description_str_with_shlokas', (['(False)', 'self', 'script'], {}), '(False, self, script)\n', (6947, 6968), False, 'from jyotisha.panchaanga.temporal.festival.rules import summary\n'), ((7126, 7157), 'jyotisha.panchaanga.temporal.festival.rules.summary.get_references_md', 'summary.get_references_md', (['self'], {}), '(self)\n', (7151, 7157), False, 'from jyotisha.panchaanga.temporal.festival.rules import summary\n'), ((8348, 8405), 'logging.warning', 'logging.warning', (['"""No festival rule found at %s"""', 'dir_path'], {}), "('No festival rule found at %s', dir_path)\n", (8363, 8405), False, 'import logging\n'), ((9418, 9446), 'os.path.join', 'os.path.join', (['base_url', 'name'], {}), '(base_url, name)\n', (9430, 9446), False, 'import os\n'), ((13027, 13066), 'codecs.open', 'codecs.open', (['_ADYATITHI_REPOS_PATH', '"""w"""'], {}), "(_ADYATITHI_REPOS_PATH, 'w')\n", (13038, 13066), False, 'import codecs\n'), ((13078, 13108), 'toml.dump', 'toml.dump', (["{'data': repos}", 'fp'], {}), "({'data': repos}, fp)\n", (13087, 13108), False, 'import toml\n'), ((13255, 13294), 'codecs.open', 'codecs.open', (['_ADYATITHI_REPOS_PATH', '"""r"""'], {}), "(_ADYATITHI_REPOS_PATH, 'r')\n", (13266, 13294), False, 'import codecs\n'), ((13314, 13327), 'toml.load', 'toml.load', (['fp'], {}), '(fp)\n', (13323, 13327), False, 'import toml\n'), ((794, 869), 'logging.warning', 'logging.warning', (["('Unmatched backquotes in string: %s' % transliterated_text)"], {}), "('Unmatched backquotes in string: %s' % transliterated_text)\n", (809, 869), False, 'import logging\n'), ((5743, 5754), 'urllib.parse.quote', 'quote', (['path'], {}), '(path)\n', (5748, 5754), False, 'from urllib.parse import quote\n'), ((7651, 7772), 'jyotisha.panchaanga.temporal.time.Date.from_julian_date', 'time.Date.from_julian_date', ([], {'year': 'self.timing.year_start', 'month': 'self.timing.month_number', 'day': 'self.timing.anga_number'}), '(year=self.timing.year_start, month=self.timing.\n month_number, day=self.timing.anga_number)\n', (7677, 7772), False, 'from jyotisha.panchaanga.temporal import time\n'), ((9635, 9669), 'os.path.join', 'os.path.join', (['DATA_ROOT', 'self.name'], {}), '(DATA_ROOT, self.name)\n', (9647, 9669), False, 'import os\n'), ((13351, 13403), 'sanskrit_data.schema.common.JsonObject.make_from_dict_list', 'common.JsonObject.make_from_dict_list', (["repos['data']"], {}), "(repos['data'])\n", (13388, 13403), False, 'from sanskrit_data.schema import common\n'), ((8256, 8270), 'pathlib.Path', 'Path', (['dir_path'], {}), '(dir_path)\n', (8260, 8270), False, 'from pathlib import Path\n'), ((680, 733), 'jyotisha.custom_transliteration.tr', 'custom_transliteration.tr', (['piece', 'script'], {'titled': '(True)'}), '(piece, script, titled=True)\n', (705, 733), False, 'from jyotisha import custom_transliteration\n'), ((11285, 11327), 'os.rename', 'os.rename', (['rule.path_actual', 'expected_path'], {}), '(rule.path_actual, expected_path)\n', (11294, 11327), False, 'import os\n'), ((11228, 11258), 'os.path.dirname', 'os.path.dirname', (['expected_path'], {}), '(expected_path)\n', (11243, 11258), False, 'import os\n')]
|
import rNet as rNet
import numpy as np
def generate(net,seed_,num_to_gen):
net.reset()
x=np.zeros((1,1,vocab_size))
x[0,0,seed_]=1
out=index_to_char[seed_]
for t in range(0,num_to_gen):
p=net(x)[0,0,:]
ix = np.random.choice(range(vocab_size), p=p.ravel())
x=np.zeros(x.shape)
x[0,0,ix]=1
out+=index_to_char[ix]
return out
path_='data/tiny_shakespeare.txt'
# path_='data/tiny_nietzsche.txt'
raw=open(path_, 'r').read()
chars = list(set(raw))
chars.sort()
data_size, vocab_size = (len(raw), len(chars))
print('data has %d characters, %d unique.' % (data_size, vocab_size))
char_to_index = {ch: i for i, ch in enumerate(chars)}
index_to_char = {i: ch for i, ch in enumerate(chars)}
time_steps, batch_size, input_size, hidden_size, output_size = (100, 100, vocab_size, 512, vocab_size)
net = rNet.rNet()
net.add(rNet.LSTM([input_size,hidden_size]))
net.add(rNet.FC([hidden_size,output_size],activation=rNet.softmax()))
net.init()
# where to save the model
model_path='model/L'
cost = rNet.softmax_loss()
# settings for RMSprop + momentum
first_moment=[np.zeros_like(l.weights) for l in net.layers]
second_moment=[np.zeros_like(l.weights) for l in net.layers]
momentum=[np.zeros_like(l.weights) for l in net.layers]
smooth_loss = -np.log(1.0/vocab_size)*time_steps * batch_size # loss at iteration 0
cache0=None
count, count_t=(0,0)
epoch=0
text_pointers = np.random.randint(data_size-time_steps-1, size=batch_size)
learning_rate, nu, mom_decay,=(1e-3, 0.97, 0.9)
clip_range=(-5,5)
print('Learning rate: %f, nu: %f, mom_decay: %f'%(learning_rate,nu,mom_decay))
print('Clip range: ',clip_range)
while True:
# reset the state every 100 sequences (10000 characters) and save the model
if count % 100 == 0:
cache0 = None
print('Cache cleared')
net.save(model_path)
print('Model saved in %s'%(model_path))
for i in range(text_pointers.size):
if text_pointers[i] + time_steps + 1 >= data_size:
text_pointers[i] = 0
batch_in=np.zeros([time_steps, batch_size, vocab_size])
batch_out=np.zeros([time_steps, batch_size], dtype=np.uint8)
for i in range(batch_size):
b_=[char_to_index[c] for c in raw[text_pointers[i]:text_pointers[i] + time_steps + 1]]
batch_in[range(time_steps),i,b_[:-1]]=1
batch_out[:,i]=np.array(b_[1:])
loss,dW, cache0 = net.train_step(batch_in,batch_out, cache0=cache0, cost=cost)
smooth_loss = smooth_loss * 0.999 + loss * 0.001
if count % 10 == 0:
txt = generate(net, np.random.randint(vocab_size), 200)
print('----\n %s \n----' % (txt))
print('epoch: %d, iter %d, smooth loss: %f, loss: %f' % (epoch, count, smooth_loss/(time_steps*batch_size),loss/(time_steps*batch_size)))
# RMSprop + momentum parameter update
for param, dparam, mem, mem1, mom in zip(net.layers, dW, second_moment,first_moment,momentum):
np.clip(dparam,clip_range[0],clip_range[1],dparam)
mem = nu*mem + (1-nu)*dparam * dparam
mem1 = nu * mem1 + (1-nu) * dparam
mom=mom_decay*mom-learning_rate * dparam / np.sqrt(mem - mem1**2 + 1e-8)
param.weights += mom
text_pointers += time_steps
count_t+=time_steps
count += 1
if count_t >= data_size:
epoch += 1
count_t=0
text_pointers = np.random.randint(data_size - time_steps - 1, size=batch_size)
|
[
"rNet.rNet",
"numpy.zeros_like",
"numpy.log",
"rNet.LSTM",
"numpy.zeros",
"numpy.clip",
"numpy.random.randint",
"numpy.array",
"rNet.softmax",
"rNet.softmax_loss",
"numpy.sqrt"
] |
[((859, 870), 'rNet.rNet', 'rNet.rNet', ([], {}), '()\n', (868, 870), True, 'import rNet as rNet\n'), ((1052, 1071), 'rNet.softmax_loss', 'rNet.softmax_loss', ([], {}), '()\n', (1069, 1071), True, 'import rNet as rNet\n'), ((1426, 1488), 'numpy.random.randint', 'np.random.randint', (['(data_size - time_steps - 1)'], {'size': 'batch_size'}), '(data_size - time_steps - 1, size=batch_size)\n', (1443, 1488), True, 'import numpy as np\n'), ((98, 126), 'numpy.zeros', 'np.zeros', (['(1, 1, vocab_size)'], {}), '((1, 1, vocab_size))\n', (106, 126), True, 'import numpy as np\n'), ((879, 915), 'rNet.LSTM', 'rNet.LSTM', (['[input_size, hidden_size]'], {}), '([input_size, hidden_size])\n', (888, 915), True, 'import rNet as rNet\n'), ((1121, 1145), 'numpy.zeros_like', 'np.zeros_like', (['l.weights'], {}), '(l.weights)\n', (1134, 1145), True, 'import numpy as np\n'), ((1182, 1206), 'numpy.zeros_like', 'np.zeros_like', (['l.weights'], {}), '(l.weights)\n', (1195, 1206), True, 'import numpy as np\n'), ((1238, 1262), 'numpy.zeros_like', 'np.zeros_like', (['l.weights'], {}), '(l.weights)\n', (1251, 1262), True, 'import numpy as np\n'), ((2055, 2101), 'numpy.zeros', 'np.zeros', (['[time_steps, batch_size, vocab_size]'], {}), '([time_steps, batch_size, vocab_size])\n', (2063, 2101), True, 'import numpy as np\n'), ((2116, 2166), 'numpy.zeros', 'np.zeros', (['[time_steps, batch_size]'], {'dtype': 'np.uint8'}), '([time_steps, batch_size], dtype=np.uint8)\n', (2124, 2166), True, 'import numpy as np\n'), ((303, 320), 'numpy.zeros', 'np.zeros', (['x.shape'], {}), '(x.shape)\n', (311, 320), True, 'import numpy as np\n'), ((2365, 2381), 'numpy.array', 'np.array', (['b_[1:]'], {}), '(b_[1:])\n', (2373, 2381), True, 'import numpy as np\n'), ((2943, 2996), 'numpy.clip', 'np.clip', (['dparam', 'clip_range[0]', 'clip_range[1]', 'dparam'], {}), '(dparam, clip_range[0], clip_range[1], dparam)\n', (2950, 2996), True, 'import numpy as np\n'), ((3354, 3416), 'numpy.random.randint', 'np.random.randint', (['(data_size - time_steps - 1)'], {'size': 'batch_size'}), '(data_size - time_steps - 1, size=batch_size)\n', (3371, 3416), True, 'import numpy as np\n'), ((969, 983), 'rNet.softmax', 'rNet.softmax', ([], {}), '()\n', (981, 983), True, 'import rNet as rNet\n'), ((1299, 1323), 'numpy.log', 'np.log', (['(1.0 / vocab_size)'], {}), '(1.0 / vocab_size)\n', (1305, 1323), True, 'import numpy as np\n'), ((2570, 2599), 'numpy.random.randint', 'np.random.randint', (['vocab_size'], {}), '(vocab_size)\n', (2587, 2599), True, 'import numpy as np\n'), ((3134, 3166), 'numpy.sqrt', 'np.sqrt', (['(mem - mem1 ** 2 + 1e-08)'], {}), '(mem - mem1 ** 2 + 1e-08)\n', (3141, 3166), True, 'import numpy as np\n')]
|
# Copyright 2004-2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import division, absolute_import, with_statement, print_function, unicode_literals
from renpy.compat import *
import renpy.translation
import re
import os
import time
import collections
import shutil
from renpy.translation import quote_unicode
from renpy.parser import elide_filename
################################################################################
# Translation Generation
################################################################################
def scan_comments(filename):
rv = [ ]
if filename not in renpy.config.translate_comments:
return rv
comment = [ ]
start = 0
with open(filename, "r", encoding="utf-8") as f:
lines = [ i.rstrip() for i in f.read().replace(u"\ufeff", "").split('\n') ]
for i, l in enumerate(lines):
if not comment:
start = i + 1
m = re.match(r'\s*## (.*)', l)
if m:
c = m.group(1)
if comment:
c = c.strip()
comment.append(c)
elif comment:
s = "## " + " ".join(comment)
if s.endswith("#"):
s = s.rstrip("# ")
comment = [ ]
rv.append((start, s))
return rv
tl_file_cache = { }
# Should we write the TODO marker?
todo = True
def open_tl_file(fn):
if fn in tl_file_cache:
return tl_file_cache[fn]
if not os.path.exists(fn):
dn = os.path.dirname(fn)
try:
os.makedirs(dn)
except:
pass
f = open(fn, "a", encoding="utf-8")
f.write(u"\ufeff")
else:
f = open(fn, "a", encoding="utf-8")
if todo:
f.write(u"# TO" + "DO: Translation updated at {}\n".format(time.strftime("%Y-%m-%d %H:%M")))
f.write(u"\n")
tl_file_cache[fn] = f
return f
def close_tl_files():
for i in tl_file_cache.values():
i.close()
tl_file_cache.clear()
def shorten_filename(filename):
"""
Shortens a file name. Returns the shortened filename, and a flag that says
if the filename is in the common directory.
"""
commondir = os.path.normpath(renpy.config.commondir)
gamedir = os.path.normpath(renpy.config.gamedir)
if filename.startswith(commondir):
fn = os.path.relpath(filename, commondir)
common = True
elif filename.startswith(gamedir):
fn = os.path.relpath(filename, gamedir)
common = False
else:
fn = os.path.basename(filename)
common = False
return fn, common
def write_translates(filename, language, filter): # @ReservedAssignment
fn, common = shorten_filename(filename)
# The common directory should not have dialogue in it.
if common:
return
tl_filename = os.path.join(renpy.config.gamedir, renpy.config.tl_directory, language, fn)
if tl_filename[-1] == "m":
tl_filename = tl_filename[:-1]
if language == "None":
language = None
translator = renpy.game.script.translator
for label, t in translator.file_translates[filename]:
if (t.identifier, language) in translator.language_translates:
continue
if hasattr(t, "alternate"):
if (t.alternate, language) in translator.language_translates:
continue
f = open_tl_file(tl_filename)
if label is None:
label = ""
f.write(u"# {}:{}\n".format(t.filename, t.linenumber))
f.write(u"translate {} {}:\n".format(language, t.identifier.replace('.', '_')))
f.write(u"\n")
for n in t.block:
f.write(u" # " + n.get_code() + "\n")
for n in t.block:
f.write(u" " + n.get_code(filter) + "\n")
f.write(u"\n")
def translation_filename(s):
if renpy.config.translate_launcher:
return s.launcher_file
if s.common:
return "common.rpy"
filename = s.elided
if filename[-1] == "m":
filename = filename[:-1]
return filename
def write_strings(language, filter, min_priority, max_priority, common_only): # @ReservedAssignment
"""
Writes strings to the file.
"""
if language == "None":
stl = renpy.game.script.translator.strings[None] # @UndefinedVariable
else:
stl = renpy.game.script.translator.strings[language] # @UndefinedVariable
# If this function changes, count_missing may also need to
# change.
strings = renpy.translation.scanstrings.scan(min_priority, max_priority, common_only)
stringfiles = collections.defaultdict(list)
for s in strings:
tlfn = translation_filename(s)
if tlfn is None:
continue
# Already seen.
if s.text in stl.translations:
continue
if language == "None" and tlfn == "common.rpy":
tlfn = "common.rpym"
stringfiles[tlfn].append(s)
for tlfn, sl in stringfiles.items():
# sl.sort(key=lambda s : (s.filename, s.line))
tlfn = os.path.join(renpy.config.gamedir, renpy.config.tl_directory, language, tlfn)
f = open_tl_file(tlfn)
f.write(u"translate {} strings:\n".format(language))
f.write(u"\n")
for s in sl:
text = filter(s.text)
f.write(u" # {}:{}\n".format(elide_filename(s.filename), s.line))
f.write(u" old \"{}\"\n".format(quote_unicode(s.text)))
f.write(u" new \"{}\"\n".format(quote_unicode(text)))
f.write(u"\n")
def null_filter(s):
return s
def empty_filter(s):
return ""
def generic_filter(s, function):
"""
:doc: text_utility
Transforms `s`, while leaving text tags and interpolation the same.
`function`
A function that is called with strings corresponding to runs of
text, and should return a second string that replaces that run
of text.
::
init python:
def upper(s):
return s.upper()
$ upper_string = renpy.transform_text("{b}Not Upper{/b}", upper)
"""
def remove_special(s, start, end, process):
specials = 0
first = False
rv = ""
buf = ""
for i in s:
if i == start:
if first:
specials = 0
else:
rv += process(buf)
buf = ""
if specials == 0:
first = True
specials += 1
rv += start
elif i == end:
first = False
specials -= 1
if specials < 0:
specials += 1
rv += end
else:
if specials:
rv += i
else:
buf += i
if buf:
rv += process(buf)
return rv
def remove_braces(s):
return remove_special(s, "{", "}", function)
return remove_special(s, "[", "]", remove_braces)
def rot13_transform(s):
ROT13 = { }
for i, j in zip("ABCDEFGHIJKLM", "NOPQRSTUVWXYZ"):
ROT13[i] = j
ROT13[j] = i
i = i.lower()
j = j.lower()
ROT13[i] = j
ROT13[j] = i
return "".join(ROT13.get(i, i) for i in s)
def rot13_filter(s):
return generic_filter(s, rot13_transform)
def piglatin_transform(s):
# Based on http://stackoverflow.com/a/23177629/3549890
lst = ['sh', 'gl', 'ch', 'ph', 'tr', 'br', 'fr', 'bl', 'gr', 'st', 'sl', 'cl', 'pl', 'fl']
def replace(m):
i = m.group(0)
if i[0] in ['a', 'e', 'i', 'o', 'u']:
rv = i + 'ay'
elif i[:2] in lst:
rv = i[2:] + i[:2] + 'ay'
else:
rv = i[1:] + i[0] + 'ay'
if i[0].isupper():
rv = rv.capitalize()
return rv
return re.sub(r'\w+', replace, s)
def piglatin_filter(s):
return generic_filter(s, piglatin_transform)
def translate_list_files():
"""
Returns a list of files that exist and should be scanned for translations.
"""
filenames = list(renpy.config.translate_files)
for dirname, filename in renpy.loader.listdirfiles():
if dirname is None:
continue
if filename.startswith("tl/"):
continue
filename = os.path.join(dirname, filename)
if not (filename.endswith(".rpy") or filename.endswith(".rpym")):
continue
filename = os.path.normpath(filename)
if not os.path.exists(filename):
continue
filenames.append(filename)
return filenames
def count_missing(language, min_priority, max_priority, common_only):
"""
Prints a count of missing translations for `language`.
"""
translator = renpy.game.script.translator
missing_translates = 0
for filename in translate_list_files():
for _, t in translator.file_translates[filename]:
if (t.identifier, language) not in translator.language_translates:
missing_translates += 1
missing_strings = 0
stl = renpy.game.script.translator.strings[language] # @UndefinedVariable
strings = renpy.translation.scanstrings.scan(min_priority, max_priority, common_only)
for s in strings:
tlfn = translation_filename(s)
if tlfn is None:
continue
if s.text in stl.translations:
continue
missing_strings += 1
print("{}: {} missing dialogue translations, {} missing string translations.".format(
language,
missing_translates,
missing_strings
))
def translate_command():
"""
The translate command. When called from the command line, this generates
the translations.
"""
ap = renpy.arguments.ArgumentParser(description="Generates or updates translations.")
ap.add_argument("language", help="The language to generate translations for.")
ap.add_argument("--rot13", help="Apply rot13 while generating translations.", dest="rot13", action="store_true")
ap.add_argument("--piglatin", help="Apply pig latin while generating translations.", dest="piglatin", action="store_true")
ap.add_argument("--empty", help="Produce empty strings while generating translations.", dest="empty", action="store_true")
ap.add_argument("--count", help="Instead of generating files, print a count of missing translations.", dest="count", action="store_true")
ap.add_argument("--min-priority", help="Translate strings with more than this priority.", dest="min_priority", default=0, type=int)
ap.add_argument("--max-priority", help="Translate strings with more than this priority.", dest="max_priority", default=0, type=int)
ap.add_argument("--strings-only", help="Only translate strings (not dialogue).", dest="strings_only", default=False, action="store_true")
ap.add_argument("--common-only", help="Only translate string from the common code.", dest="common_only", default=False, action="store_true")
ap.add_argument("--no-todo", help="Do not include the TODO flag.", dest="todo", default=True, action="store_false")
args = ap.parse_args()
global todo
todo = args.todo
if renpy.config.translate_launcher:
max_priority = args.max_priority or 499
else:
max_priority = args.max_priority or 299
if args.count:
count_missing(args.language, args.min_priority, max_priority, args.common_only)
return False
if args.rot13:
filter = rot13_filter # @ReservedAssignment
elif args.piglatin:
filter = piglatin_filter # @ReservedAssignment
elif args.empty:
filter = empty_filter # @ReservedAssignment
else:
filter = null_filter # @ReservedAssignment
if not args.strings_only:
for filename in translate_list_files():
write_translates(filename, args.language, filter)
write_strings(args.language, filter, args.min_priority, max_priority, args.common_only)
close_tl_files()
if renpy.config.translate_launcher and (not args.strings_only):
src = os.path.join(renpy.config.renpy_base, "gui", "game", "script.rpy")
dst = os.path.join(renpy.config.gamedir, "tl", args.language, "script.rpym")
if os.path.exists(src) and not os.path.exists(dst):
shutil.copy(src, dst)
return False
renpy.arguments.register_command("translate", translate_command)
|
[
"os.makedirs",
"os.path.basename",
"os.path.dirname",
"os.path.exists",
"re.match",
"time.strftime",
"renpy.parser.elide_filename",
"collections.defaultdict",
"renpy.translation.quote_unicode",
"os.path.normpath",
"os.path.relpath",
"shutil.copy",
"os.path.join",
"re.sub"
] |
[((3245, 3285), 'os.path.normpath', 'os.path.normpath', (['renpy.config.commondir'], {}), '(renpy.config.commondir)\n', (3261, 3285), False, 'import os\n'), ((3300, 3338), 'os.path.normpath', 'os.path.normpath', (['renpy.config.gamedir'], {}), '(renpy.config.gamedir)\n', (3316, 3338), False, 'import os\n'), ((3887, 3962), 'os.path.join', 'os.path.join', (['renpy.config.gamedir', 'renpy.config.tl_directory', 'language', 'fn'], {}), '(renpy.config.gamedir, renpy.config.tl_directory, language, fn)\n', (3899, 3962), False, 'import os\n'), ((5666, 5695), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (5689, 5695), False, 'import collections\n'), ((9022, 9048), 're.sub', 're.sub', (['"""\\\\w+"""', 'replace', 's'], {}), "('\\\\w+', replace, s)\n", (9028, 9048), False, 'import re\n'), ((1980, 2006), 're.match', 're.match', (['"""\\\\s*## (.*)"""', 'l'], {}), "('\\\\s*## (.*)', l)\n", (1988, 2006), False, 'import re\n'), ((2513, 2531), 'os.path.exists', 'os.path.exists', (['fn'], {}), '(fn)\n', (2527, 2531), False, 'import os\n'), ((2546, 2565), 'os.path.dirname', 'os.path.dirname', (['fn'], {}), '(fn)\n', (2561, 2565), False, 'import os\n'), ((3392, 3428), 'os.path.relpath', 'os.path.relpath', (['filename', 'commondir'], {}), '(filename, commondir)\n', (3407, 3428), False, 'import os\n'), ((6132, 6209), 'os.path.join', 'os.path.join', (['renpy.config.gamedir', 'renpy.config.tl_directory', 'language', 'tlfn'], {}), '(renpy.config.gamedir, renpy.config.tl_directory, language, tlfn)\n', (6144, 6209), False, 'import os\n'), ((9490, 9521), 'os.path.join', 'os.path.join', (['dirname', 'filename'], {}), '(dirname, filename)\n', (9502, 9521), False, 'import os\n'), ((9638, 9664), 'os.path.normpath', 'os.path.normpath', (['filename'], {}), '(filename)\n', (9654, 9664), False, 'import os\n'), ((13272, 13338), 'os.path.join', 'os.path.join', (['renpy.config.renpy_base', '"""gui"""', '"""game"""', '"""script.rpy"""'], {}), "(renpy.config.renpy_base, 'gui', 'game', 'script.rpy')\n", (13284, 13338), False, 'import os\n'), ((13353, 13423), 'os.path.join', 'os.path.join', (['renpy.config.gamedir', '"""tl"""', 'args.language', '"""script.rpym"""'], {}), "(renpy.config.gamedir, 'tl', args.language, 'script.rpym')\n", (13365, 13423), False, 'import os\n'), ((2592, 2607), 'os.makedirs', 'os.makedirs', (['dn'], {}), '(dn)\n', (2603, 2607), False, 'import os\n'), ((3504, 3538), 'os.path.relpath', 'os.path.relpath', (['filename', 'gamedir'], {}), '(filename, gamedir)\n', (3519, 3538), False, 'import os\n'), ((3586, 3612), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (3602, 3612), False, 'import os\n'), ((9681, 9705), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (9695, 9705), False, 'import os\n'), ((13436, 13455), 'os.path.exists', 'os.path.exists', (['src'], {}), '(src)\n', (13450, 13455), False, 'import os\n'), ((13497, 13518), 'shutil.copy', 'shutil.copy', (['src', 'dst'], {}), '(src, dst)\n', (13508, 13518), False, 'import shutil\n'), ((13464, 13483), 'os.path.exists', 'os.path.exists', (['dst'], {}), '(dst)\n', (13478, 13483), False, 'import os\n'), ((2849, 2880), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d %H:%M"""'], {}), "('%Y-%m-%d %H:%M')\n", (2862, 2880), False, 'import time\n'), ((6427, 6453), 'renpy.parser.elide_filename', 'elide_filename', (['s.filename'], {}), '(s.filename)\n', (6441, 6453), False, 'from renpy.parser import elide_filename\n'), ((6511, 6532), 'renpy.translation.quote_unicode', 'quote_unicode', (['s.text'], {}), '(s.text)\n', (6524, 6532), False, 'from renpy.translation import quote_unicode\n'), ((6582, 6601), 'renpy.translation.quote_unicode', 'quote_unicode', (['text'], {}), '(text)\n', (6595, 6601), False, 'from renpy.translation import quote_unicode\n')]
|
# -*- coding: utf-8 -*-
"""
Convert ECoG to NWB.
:Author: <NAME>, <NAME>
Modified by <NAME> on May 30, 2020
"""
from __future__ import print_function
import os
from datetime import datetime
from os import path
from pathlib import Path
import numpy as np
import pandas as pd
from hdmf.backends.hdf5 import H5DataIO
from ndx_ecog import ECoGSubject
from ndx_bipolar_scheme import BipolarSchemeTable, EcephysExt
from pynwb.file import DynamicTableRegion
from pynwb import NWBFile, TimeSeries, get_manager, NWBHDF5IO
from pynwb.ecephys import ElectricalSeries, LFP
import scipy.io as sio
from scipy.io.wavfile import read as wavread
from tqdm import tqdm
from ecogvis.functions.htk_to_nwb.HTK import readHTK
# get_manager must come after dynamic imports
manager = get_manager()
def get_analog(anin_path, num=1):
"""
Load analog data. Try:
1) analog[num].wav
2) ANIN[num].htk
Parameters
----------
blockpath: str
num: int
Returns
-------
fs, data
"""
wav_path = path.join(anin_path, 'analog' + str(num) + '.wav')
if os.path.isfile(wav_path):
rate, data = wavread(wav_path)
return float(rate), np.array(data, dtype=float)
htk_path = path.join(anin_path, 'ANIN' + str(num) + '.htk')
if os.path.isfile(htk_path):
htk_out = readHTK(htk_path, scale_s_rate=True)
return htk_out['sampling_rate'], htk_out['data'].ravel()
print('no analog path found for ' + str(num))
return None, None
def readhtks(htk_path, elecs=None, use_tqdm=True):
# First fix the order of htk files
all_files = np.array([f for f in Path(htk_path).glob('*.htk')])
numbers = [f.name.split('.')[0].split('Wav')[1] for f in Path(htk_path).glob('*.htk') if '._' not in str(f)]
new_numbers = [n[0] + '0' + n[1] if len(n) == 2 else n for n in numbers]
sorted_index = np.argsort(new_numbers)
sorted_files = all_files[sorted_index]
# Load data from files in correct order
data = []
if use_tqdm:
this_iter = tqdm(sorted_files, desc='reading electrodes')
else:
this_iter = sorted_files
for i in this_iter:
htk = readHTK(i, scale_s_rate=True)
data.append(htk['data'])
data = np.stack(data)
if len(data.shape) == 3:
data = data.transpose([2, 0, 1])
rate = htk['sampling_rate']
return rate, data.squeeze()
def get_bad_elecs(blockpath):
bad_channels_file = os.path.join(blockpath, 'Artifacts', 'badChannels.txt')
# I think bad channels is 1-indexed but I'm not sure
if os.path.isfile(bad_channels_file) and os.stat(
bad_channels_file).st_size:
dat = pd.read_csv(bad_channels_file, header=None, delimiter=' ',
engine='python')
bad_elecs_inds = dat.values.ravel() - 1
bad_elecs_inds = bad_elecs_inds[np.isfinite(bad_elecs_inds)]
else:
bad_elecs_inds = []
return bad_elecs_inds
def elecs_to_electrode_table(nwbfile, elecspath):
"""
Takes an NWB file and the elecs .mat file path, loads the anatomical and
location information for each electrode,
and writes this information to the NWB file.
Parameters:
-----------
nwbfile : object
An NWB file object.
elecspath : str
Path to the TDT_elecs_all.mat file for this subject. First, second,
and third columns of the key 'elecmatrix'
should be x, y, and z coordinates, respectively. For the 'anatomy'
field, second column should be the full electrode label and the
fourth column should be the anatomical location name.
Returns:
--------
nwb_file : object
The edited NWB file with the added electrode information.
"""
# Get anatomical and location information for electrodes.
elec_mat = sio.loadmat(elecspath)
labels = elec_mat['anatomy'][:, 1]
location = elec_mat['anatomy'][:, 3]
x = elec_mat['elecmatrix'][:, 0]
y = elec_mat['elecmatrix'][:, 1]
z = elec_mat['elecmatrix'][:, 2]
# Get MNI warped electrode coordinates.
if Path(elecspath.as_posix().split('.')[0] + '_warped.mat').is_file():
elec_mat_warped = sio.loadmat(elecspath.split('.')[0] + '_warped.mat')
x_warped = elec_mat_warped['elecmatrix'][:, 0]
y_warped = elec_mat_warped['elecmatrix'][:, 1]
z_warped = elec_mat_warped['elecmatrix'][:, 2]
else:
print('No warped electrode information found...filling with zeros.')
x_warped = np.zeros_like(x)
y_warped = np.zeros_like(y)
z_warped = np.zeros_like(z)
# Define electrode device label names.
group_labels = []
for current_group in labels:
name = current_group[0].rstrip('0123456789')
# Replace 'NaN' for 'null'
if name == 'NaN':
name = 'null'
group_labels.append(name)
# Get the list of unique electrode device label names
unique_group_indexes = np.unique(group_labels, return_index=True)[1]
unique_group_labels = [group_labels[f] for f in sorted(unique_group_indexes)]
# Add additional columns to the electodes table.
nwbfile.add_electrode_column('label', 'label of electrode')
nwbfile.add_electrode_column('bad', 'electrode identified as too noisy')
nwbfile.add_electrode_column('x_warped', 'x warped onto cvs_avg35_inMNI152')
nwbfile.add_electrode_column('y_warped', 'y warped onto cvs_avg35_inMNI152')
nwbfile.add_electrode_column('z_warped', 'z warped onto cvs_avg35_inMNI152')
nwbfile.add_electrode_column('null', 'if not connected to real electrode')
for group_label in unique_group_labels:
# Get region name and device label for the group.
if 'Depth' in group_label:
brain_area = group_label.split('Depth')[0]
elif 'Strip' in group_label:
brain_area = group_label.split('Strip')[0]
elif 'Grid' in group_label:
brain_area = group_label.split('Grid')[0]
elif 'Pole' in group_label:
brain_area = group_label.split('Pole')[0]
elif 'HeschlsGyrus' in group_label:
brain_area = 'HeschlsGyrus'
elif 'null' in group_label:
brain_area = 'null'
else:
brain_area = 'other'
# Create electrode device (same as the group).
device = nwbfile.create_device(group_label)
# Create electrode group with name, description, device object,
# and general location.
electrode_group = nwbfile.create_electrode_group(
name='{} electrodes'.format(group_label),
description='{}'.format(group_label),
device=device,
location=str(brain_area)
)
# Loop through the number of electrodes in this electrode group
elec_nums = np.where(np.array(group_labels) == group_label)[0]
for elec_num in elec_nums:
# Add the electrode information to the table.
elec_location = location[elec_num]
if len(elec_location) == 0:
# If no label is recorded for this electrode, set it to null
elec_location = 'null'
is_null = True
else:
elec_location = elec_location[0]
is_null = False
nwbfile.add_electrode(
id=elec_num,
x=x[elec_num],
y=y[elec_num],
z=z[elec_num],
imp=np.nan,
x_warped=x_warped[elec_num],
y_warped=y_warped[elec_num],
z_warped=z_warped[elec_num],
location=str(elec_location),
filtering='filtering',
group=electrode_group,
label=str(labels[elec_num][0]),
bad=False,
null=is_null,
)
return nwbfile
def chang2nwb(blockpath, out_file_path=None, save_to_file=False, htk_config=None):
"""
Parameters
----------
blockpath: str
out_file_path: None | str
if None, output = [blockpath]/[blockname].nwb
save_to_file : bool
If True, saves to file. If False, just returns nwbfile object
htk_config : dict
Dictionary cotaining HTK conversion paths and options. Example:
{
ecephys_path: 'path_to/ecephys_htk_files',
ecephys_type: 'raw', 'preprocessed' or 'high_gamma',
analog_path: 'path_to/analog_htk_files',
anin1: {present: True, name: 'microphone', type: 'acquisition'},
anin2: {present: True, name: 'speaker1', type: 'stimulus'},
anin3: {present: False, name: 'speaker2', type: 'stimulus'},
anin4: {present: False, name: 'custom', type: 'acquisition'},
metadata: metadata,
electrodes_file: electrodes_file,
bipolar_file: bipolar_file
}
Returns
-------
"""
metadata = {}
if htk_config is None:
blockpath = Path(blockpath)
else:
blockpath = Path(htk_config['ecephys_path'])
metadata = htk_config['metadata']
blockname = blockpath.parent.name
subject_id = blockpath.parent.parent.name[2:]
if out_file_path is None:
out_file_path = blockpath.resolve().parent / ''.join(['EC', subject_id, '_', blockname, '.nwb'])
# file paths
ecog_path = blockpath
anin_path = htk_config['analog_path']
bad_time_file = path.join(blockpath, 'Artifacts', 'badTimeSegments.mat')
# Create the NWB file object
nwbfile_dict = {
'session_description': blockname,
'identifier': blockname,
'session_start_time': datetime.now().astimezone(),
'institution': 'University of California, San Francisco',
'lab': 'Chang Lab'
}
if 'NWBFile' in metadata:
nwbfile_dict.update(metadata['NWBFile'])
nwbfile = NWBFile(**nwbfile_dict)
# Read electrophysiology data from HTK files
print('reading htk acquisition...', flush=True)
ecog_rate, data = readhtks(ecog_path)
data = data.squeeze()
print('done', flush=True)
# Get electrodes info from mat file
if htk_config['electrodes_file'] is not None:
nwbfile = elecs_to_electrode_table(
nwbfile=nwbfile,
elecspath=htk_config['electrodes_file'],
)
n_electrodes = nwbfile.electrodes[:].shape[0]
all_elecs = list(range(n_electrodes))
elecs_region = nwbfile.create_electrode_table_region(
region=all_elecs,
description='ECoG electrodes on brain'
)
else:
ecephys_dict = {
'Device': [{'name': 'auto_device'}],
'ElectricalSeries': [{'name': 'ECoG', 'description': 'description'}],
'ElectrodeGroup': [{'name': 'auto_group', 'description': 'auto_group',
'location': 'location', 'device': 'auto_device'}]
}
if 'Ecephys' in metadata:
ecephys_dict.update(metadata['Ecephys'])
# Create devices
for dev in ecephys_dict['Device']:
device = nwbfile.create_device(dev['name'])
# Electrode groups
for el_grp in ecephys_dict['ElectrodeGroup']:
device = nwbfile.devices[el_grp['device']]
electrode_group = nwbfile.create_electrode_group(
name=el_grp['name'],
description=el_grp['description'],
location=el_grp['location'],
device=device
)
# Electrodes table
n_electrodes = data.shape[1]
nwbfile.add_electrode_column('label', 'label of electrode')
nwbfile.add_electrode_column('bad', 'electrode identified as too noisy')
nwbfile.add_electrode_column('x_warped', 'x warped onto cvs_avg35_inMNI152')
nwbfile.add_electrode_column('y_warped', 'y warped onto cvs_avg35_inMNI152')
nwbfile.add_electrode_column('z_warped', 'z warped onto cvs_avg35_inMNI152')
nwbfile.add_electrode_column('null', 'if not connected to real electrode')
bad_elecs_inds = get_bad_elecs(blockpath)
for elec_counter in range(n_electrodes):
bad = elec_counter in bad_elecs_inds
nwbfile.add_electrode(
id=elec_counter,
x=np.nan,
y=np.nan,
z=np.nan,
imp=np.nan,
x_warped=np.nan,
y_warped=np.nan,
z_warped=np.nan,
location='',
filtering='none',
group=electrode_group,
label='',
bad=bad,
null=False,
)
all_elecs = list(range(n_electrodes))
elecs_region = nwbfile.create_electrode_table_region(
region=all_elecs,
description='ECoG electrodes on brain'
)
# Get Bipolar table from file
if htk_config['bipolar_file'] is not None:
df = pd.read_csv(htk_config['bipolar_file'], index_col='id', sep='\t')
# Create bipolar scheme table
bipolar_scheme_table = BipolarSchemeTable(
name='bipolar_scheme_table',
description='desc'
)
# Columns for bipolar scheme - all anodes and cathodes within the same
# bipolar row are considered to have the same group and location
bipolar_scheme_table.add_column(
name='group_name',
description='electrode group name'
)
bipolar_scheme_table.add_column(
name='location',
description='electrode location'
)
# Iterate over anode / cathode rows
for i, r in df.iterrows():
if isinstance(r['anodes'], str):
anodes = [int(a) for a in r['anodes'].split(',')]
else:
anodes = [int(r['anodes'])]
if isinstance(r['cathodes'], str):
cathodes = [int(a) for a in r['cathodes'].split(',')]
else:
cathodes = [int(r['cathodes'])]
bipolar_scheme_table.add_row(
anodes=anodes,
cathodes=cathodes,
group_name=nwbfile.electrodes['group_name'][anodes[0]],
location=nwbfile.electrodes['location'][anodes[0]]
)
bipolar_scheme_table.anodes.table = nwbfile.electrodes
bipolar_scheme_table.cathodes.table = nwbfile.electrodes
# Creates bipolar table region
elecs_region = DynamicTableRegion(
name='electrodes',
data=np.arange(0, df.shape[0]),
description='desc',
table=bipolar_scheme_table
)
ecephys_ext = EcephysExt(name='ecephys_ext')
ecephys_ext.bipolar_scheme_table = bipolar_scheme_table
nwbfile.add_lab_meta_data(ecephys_ext)
# Stores HTK electrophysiology data as raw, preprocessed or high gamma
if htk_config['ecephys_type'] == 'raw':
ecog_es = ElectricalSeries(name='ECoG',
data=H5DataIO(data[:, 0:n_electrodes], compression='gzip'),
electrodes=elecs_region,
rate=ecog_rate,
description='all Wav data')
nwbfile.add_acquisition(ecog_es)
elif htk_config['ecephys_type'] == 'preprocessed':
lfp = LFP()
ecog_es = ElectricalSeries(name='preprocessed',
data=H5DataIO(data[:, 0:n_electrodes], compression='gzip'),
electrodes=elecs_region,
rate=ecog_rate,
description='all Wav data')
lfp.add_electrical_series(ecog_es)
# Creates the ecephys processing module
ecephys_module = nwbfile.create_processing_module(
name='ecephys',
description='preprocessed electrophysiology data'
)
ecephys_module.add_data_interface(lfp)
elif htk_config['ecephys_type'] == 'high_gamma':
ecog_es = ElectricalSeries(name='high_gamma',
data=H5DataIO(data[:, 0:n_electrodes], compression='gzip'),
electrodes=elecs_region,
rate=ecog_rate,
description='all Wav data')
# Creates the ecephys processing module
ecephys_module = nwbfile.create_processing_module(
name='ecephys',
description='preprocessed electrophysiology data'
)
ecephys_module.add_data_interface(ecog_es)
# Add ANIN 1
if htk_config['anin1']['present']:
fs, data = get_analog(anin_path, 1)
ts = TimeSeries(
name=htk_config['anin1']['name'],
data=data,
unit='NA',
rate=fs,
)
if htk_config['anin1']['type'] == 'acquisition':
nwbfile.add_acquisition(ts)
else:
nwbfile.add_stimulus(ts)
print('ANIN1 saved with name "', htk_config['anin1']['name'], '" in ',
htk_config['anin1']['type'])
# Add ANIN 2
if htk_config['anin2']['present']:
fs, data = get_analog(anin_path, 2)
ts = TimeSeries(
name=htk_config['anin2']['name'],
data=data,
unit='NA',
rate=fs,
)
if htk_config['anin2']['type'] == 'acquisition':
nwbfile.add_acquisition(ts)
else:
nwbfile.add_stimulus(ts)
print('ANIN2 saved with name "', htk_config['anin2']['name'], '" in ',
htk_config['anin2']['type'])
# Add ANIN 3
if htk_config['anin3']['present']:
fs, data = get_analog(anin_path, 3)
ts = TimeSeries(
name=htk_config['anin3']['name'],
data=data,
unit='NA',
rate=fs,
)
if htk_config['anin3']['type'] == 'acquisition':
nwbfile.add_acquisition(ts)
else:
nwbfile.add_stimulus(ts)
print('ANIN3 saved with name "', htk_config['anin3']['name'], '" in ',
htk_config['anin3']['type'])
# Add ANIN 4
if htk_config['anin4']['present']:
fs, data = get_analog(anin_path, 4)
ts = TimeSeries(
name=htk_config['anin4']['name'],
data=data,
unit='NA',
rate=fs,
)
if htk_config['anin4']['type'] == 'acquisition':
nwbfile.add_acquisition(ts)
else:
nwbfile.add_stimulus(ts)
print('ANIN4 saved with name "', htk_config['anin4']['name'], '" in ',
htk_config['anin4']['type'])
# Add bad time segments
if os.path.exists(bad_time_file) and os.stat(bad_time_file).st_size:
bad_time = sio.loadmat(bad_time_file)['badTimeSegments']
for row in bad_time:
nwbfile.add_invalid_time_interval(start_time=row[0],
stop_time=row[1],
tags=('ECoG artifact',),
timeseries=ecog_es)
# Subject
subject_dict = {'subject_id': subject_id}
if 'Subject' in metadata:
subject_dict.update(metadata['Subject'])
subject = ECoGSubject(**subject_dict)
nwbfile.subject = subject
if save_to_file:
print('Saving HTK content to NWB file...')
# Export the NWB file
with NWBHDF5IO(str(out_file_path), manager=manager, mode='w') as io:
io.write(nwbfile)
# read check
with NWBHDF5IO(str(out_file_path), manager=manager, mode='r') as io:
io.read()
print('NWB file saved: ', str(out_file_path))
return nwbfile, out_file_path, subject_id, blockname
|
[
"scipy.io.loadmat",
"pandas.read_csv",
"hdmf.backends.hdf5.H5DataIO",
"scipy.io.wavfile.read",
"numpy.argsort",
"os.path.isfile",
"pathlib.Path",
"numpy.arange",
"pynwb.NWBFile",
"os.path.join",
"numpy.unique",
"numpy.zeros_like",
"os.path.exists",
"numpy.isfinite",
"ndx_bipolar_scheme.EcephysExt",
"ndx_ecog.ECoGSubject",
"datetime.datetime.now",
"numpy.stack",
"tqdm.tqdm",
"pynwb.ecephys.LFP",
"os.stat",
"ecogvis.functions.htk_to_nwb.HTK.readHTK",
"pynwb.TimeSeries",
"numpy.array",
"pynwb.get_manager",
"ndx_bipolar_scheme.BipolarSchemeTable"
] |
[((765, 778), 'pynwb.get_manager', 'get_manager', ([], {}), '()\n', (776, 778), False, 'from pynwb import NWBFile, TimeSeries, get_manager, NWBHDF5IO\n'), ((1076, 1100), 'os.path.isfile', 'os.path.isfile', (['wav_path'], {}), '(wav_path)\n', (1090, 1100), False, 'import os\n'), ((1268, 1292), 'os.path.isfile', 'os.path.isfile', (['htk_path'], {}), '(htk_path)\n', (1282, 1292), False, 'import os\n'), ((1855, 1878), 'numpy.argsort', 'np.argsort', (['new_numbers'], {}), '(new_numbers)\n', (1865, 1878), True, 'import numpy as np\n'), ((2218, 2232), 'numpy.stack', 'np.stack', (['data'], {}), '(data)\n', (2226, 2232), True, 'import numpy as np\n'), ((2425, 2480), 'os.path.join', 'os.path.join', (['blockpath', '"""Artifacts"""', '"""badChannels.txt"""'], {}), "(blockpath, 'Artifacts', 'badChannels.txt')\n", (2437, 2480), False, 'import os\n'), ((3800, 3822), 'scipy.io.loadmat', 'sio.loadmat', (['elecspath'], {}), '(elecspath)\n', (3811, 3822), True, 'import scipy.io as sio\n'), ((9414, 9470), 'os.path.join', 'path.join', (['blockpath', '"""Artifacts"""', '"""badTimeSegments.mat"""'], {}), "(blockpath, 'Artifacts', 'badTimeSegments.mat')\n", (9423, 9470), False, 'from os import path\n'), ((9852, 9875), 'pynwb.NWBFile', 'NWBFile', ([], {}), '(**nwbfile_dict)\n', (9859, 9875), False, 'from pynwb import NWBFile, TimeSeries, get_manager, NWBHDF5IO\n'), ((19324, 19351), 'ndx_ecog.ECoGSubject', 'ECoGSubject', ([], {}), '(**subject_dict)\n', (19335, 19351), False, 'from ndx_ecog import ECoGSubject\n'), ((1123, 1140), 'scipy.io.wavfile.read', 'wavread', (['wav_path'], {}), '(wav_path)\n', (1130, 1140), True, 'from scipy.io.wavfile import read as wavread\n'), ((1312, 1348), 'ecogvis.functions.htk_to_nwb.HTK.readHTK', 'readHTK', (['htk_path'], {'scale_s_rate': '(True)'}), '(htk_path, scale_s_rate=True)\n', (1319, 1348), False, 'from ecogvis.functions.htk_to_nwb.HTK import readHTK\n'), ((2017, 2062), 'tqdm.tqdm', 'tqdm', (['sorted_files'], {'desc': '"""reading electrodes"""'}), "(sorted_files, desc='reading electrodes')\n", (2021, 2062), False, 'from tqdm import tqdm\n'), ((2144, 2173), 'ecogvis.functions.htk_to_nwb.HTK.readHTK', 'readHTK', (['i'], {'scale_s_rate': '(True)'}), '(i, scale_s_rate=True)\n', (2151, 2173), False, 'from ecogvis.functions.htk_to_nwb.HTK import readHTK\n'), ((2546, 2579), 'os.path.isfile', 'os.path.isfile', (['bad_channels_file'], {}), '(bad_channels_file)\n', (2560, 2579), False, 'import os\n'), ((2647, 2722), 'pandas.read_csv', 'pd.read_csv', (['bad_channels_file'], {'header': 'None', 'delimiter': '""" """', 'engine': '"""python"""'}), "(bad_channels_file, header=None, delimiter=' ', engine='python')\n", (2658, 2722), True, 'import pandas as pd\n'), ((4484, 4500), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (4497, 4500), True, 'import numpy as np\n'), ((4520, 4536), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (4533, 4536), True, 'import numpy as np\n'), ((4556, 4572), 'numpy.zeros_like', 'np.zeros_like', (['z'], {}), '(z)\n', (4569, 4572), True, 'import numpy as np\n'), ((4932, 4974), 'numpy.unique', 'np.unique', (['group_labels'], {'return_index': '(True)'}), '(group_labels, return_index=True)\n', (4941, 4974), True, 'import numpy as np\n'), ((8963, 8978), 'pathlib.Path', 'Path', (['blockpath'], {}), '(blockpath)\n', (8967, 8978), False, 'from pathlib import Path\n'), ((9009, 9041), 'pathlib.Path', 'Path', (["htk_config['ecephys_path']"], {}), "(htk_config['ecephys_path'])\n", (9013, 9041), False, 'from pathlib import Path\n'), ((12948, 13013), 'pandas.read_csv', 'pd.read_csv', (["htk_config['bipolar_file']"], {'index_col': '"""id"""', 'sep': '"""\t"""'}), "(htk_config['bipolar_file'], index_col='id', sep='\\t')\n", (12959, 13013), True, 'import pandas as pd\n'), ((13084, 13151), 'ndx_bipolar_scheme.BipolarSchemeTable', 'BipolarSchemeTable', ([], {'name': '"""bipolar_scheme_table"""', 'description': '"""desc"""'}), "(name='bipolar_scheme_table', description='desc')\n", (13102, 13151), False, 'from ndx_bipolar_scheme import BipolarSchemeTable, EcephysExt\n'), ((14681, 14711), 'ndx_bipolar_scheme.EcephysExt', 'EcephysExt', ([], {'name': '"""ecephys_ext"""'}), "(name='ecephys_ext')\n", (14691, 14711), False, 'from ndx_bipolar_scheme import BipolarSchemeTable, EcephysExt\n'), ((16746, 16821), 'pynwb.TimeSeries', 'TimeSeries', ([], {'name': "htk_config['anin1']['name']", 'data': 'data', 'unit': '"""NA"""', 'rate': 'fs'}), "(name=htk_config['anin1']['name'], data=data, unit='NA', rate=fs)\n", (16756, 16821), False, 'from pynwb import NWBFile, TimeSeries, get_manager, NWBHDF5IO\n'), ((17265, 17340), 'pynwb.TimeSeries', 'TimeSeries', ([], {'name': "htk_config['anin2']['name']", 'data': 'data', 'unit': '"""NA"""', 'rate': 'fs'}), "(name=htk_config['anin2']['name'], data=data, unit='NA', rate=fs)\n", (17275, 17340), False, 'from pynwb import NWBFile, TimeSeries, get_manager, NWBHDF5IO\n'), ((17784, 17859), 'pynwb.TimeSeries', 'TimeSeries', ([], {'name': "htk_config['anin3']['name']", 'data': 'data', 'unit': '"""NA"""', 'rate': 'fs'}), "(name=htk_config['anin3']['name'], data=data, unit='NA', rate=fs)\n", (17794, 17859), False, 'from pynwb import NWBFile, TimeSeries, get_manager, NWBHDF5IO\n'), ((18303, 18378), 'pynwb.TimeSeries', 'TimeSeries', ([], {'name': "htk_config['anin4']['name']", 'data': 'data', 'unit': '"""NA"""', 'rate': 'fs'}), "(name=htk_config['anin4']['name'], data=data, unit='NA', rate=fs)\n", (18313, 18378), False, 'from pynwb import NWBFile, TimeSeries, get_manager, NWBHDF5IO\n'), ((18744, 18773), 'os.path.exists', 'os.path.exists', (['bad_time_file'], {}), '(bad_time_file)\n', (18758, 18773), False, 'import os\n'), ((1169, 1196), 'numpy.array', 'np.array', (['data'], {'dtype': 'float'}), '(data, dtype=float)\n', (1177, 1196), True, 'import numpy as np\n'), ((2584, 2610), 'os.stat', 'os.stat', (['bad_channels_file'], {}), '(bad_channels_file)\n', (2591, 2610), False, 'import os\n'), ((2837, 2864), 'numpy.isfinite', 'np.isfinite', (['bad_elecs_inds'], {}), '(bad_elecs_inds)\n', (2848, 2864), True, 'import numpy as np\n'), ((15370, 15375), 'pynwb.ecephys.LFP', 'LFP', ([], {}), '()\n', (15373, 15375), False, 'from pynwb.ecephys import ElectricalSeries, LFP\n'), ((18778, 18800), 'os.stat', 'os.stat', (['bad_time_file'], {}), '(bad_time_file)\n', (18785, 18800), False, 'import os\n'), ((18829, 18855), 'scipy.io.loadmat', 'sio.loadmat', (['bad_time_file'], {}), '(bad_time_file)\n', (18840, 18855), True, 'import scipy.io as sio\n'), ((9631, 9645), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9643, 9645), False, 'from datetime import datetime\n'), ((14550, 14575), 'numpy.arange', 'np.arange', (['(0)', 'df.shape[0]'], {}), '(0, df.shape[0])\n', (14559, 14575), True, 'import numpy as np\n'), ((15031, 15084), 'hdmf.backends.hdf5.H5DataIO', 'H5DataIO', (['data[:, 0:n_electrodes]'], {'compression': '"""gzip"""'}), "(data[:, 0:n_electrodes], compression='gzip')\n", (15039, 15084), False, 'from hdmf.backends.hdf5 import H5DataIO\n'), ((1707, 1721), 'pathlib.Path', 'Path', (['htk_path'], {}), '(htk_path)\n', (1711, 1721), False, 'from pathlib import Path\n'), ((6792, 6814), 'numpy.array', 'np.array', (['group_labels'], {}), '(group_labels)\n', (6800, 6814), True, 'import numpy as np\n'), ((15472, 15525), 'hdmf.backends.hdf5.H5DataIO', 'H5DataIO', (['data[:, 0:n_electrodes]'], {'compression': '"""gzip"""'}), "(data[:, 0:n_electrodes], compression='gzip')\n", (15480, 15525), False, 'from hdmf.backends.hdf5 import H5DataIO\n'), ((1615, 1629), 'pathlib.Path', 'Path', (['htk_path'], {}), '(htk_path)\n', (1619, 1629), False, 'from pathlib import Path\n'), ((16145, 16198), 'hdmf.backends.hdf5.H5DataIO', 'H5DataIO', (['data[:, 0:n_electrodes]'], {'compression': '"""gzip"""'}), "(data[:, 0:n_electrodes], compression='gzip')\n", (16153, 16198), False, 'from hdmf.backends.hdf5 import H5DataIO\n')]
|
"""Fruit Basket CLI."""
import argparse
import sys
from fruit_basket import __version__, FruitBasket, report
from fruit_basket.exceptions import FruitBasketError
def _create_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description="Fruit Basket CLI Help", allow_abbrev=False)
parser.add_argument(
"-v", "--version", action="version", version=f"%(prog)s {__version__}",
)
parser.add_argument(
"file", metavar="FILE", type=argparse.FileType(), help="Fruit basket inventory file(csv)."
)
return parser
def main():
"""Entrypoint for console."""
parser = _create_parser()
if len(sys.argv) == 1:
parser.error("File required")
args = parser.parse_args()
args.file.close()
try:
print(report(FruitBasket(args.file.name)))
except FruitBasketError as err:
sys.exit(err)
if __name__ == "__main__":
main()
|
[
"fruit_basket.FruitBasket",
"argparse.ArgumentParser",
"sys.exit",
"argparse.FileType"
] |
[((227, 312), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Fruit Basket CLI Help"""', 'allow_abbrev': '(False)'}), "(description='Fruit Basket CLI Help', allow_abbrev=False\n )\n", (250, 312), False, 'import argparse\n'), ((482, 501), 'argparse.FileType', 'argparse.FileType', ([], {}), '()\n', (499, 501), False, 'import argparse\n'), ((871, 884), 'sys.exit', 'sys.exit', (['err'], {}), '(err)\n', (879, 884), False, 'import sys\n'), ((797, 824), 'fruit_basket.FruitBasket', 'FruitBasket', (['args.file.name'], {}), '(args.file.name)\n', (808, 824), False, 'from fruit_basket import __version__, FruitBasket, report\n')]
|
from functools import partial
import colossalai
from colossalai.utils.cuda import get_current_device
import pytest
import torch
import torch.multiprocessing as mp
from colossalai.amp import convert_to_apex_amp
from colossalai.nn.optimizer import CPUAdam
from colossalai.testing import parameterize, rerun_on_exception
from colossalai.utils import free_port
from colossalai.zero.init_ctx import ZeroInitContext
from colossalai.zero.shard_utils import (BucketTensorShardStrategy, TensorShardStrategy)
from colossalai.zero.sharded_model import ShardedModelV2
from colossalai.zero.sharded_model.utils import col_model_deepcopy
from colossalai.zero.sharded_optim import ShardedOptimizerV2
from colossalai.zero.sharded_optim._utils import has_inf_or_nan
from colossalai.utils import get_current_device
from tests.components_to_test.registry import non_distributed_component_funcs
from colossalai.engine.gradient_handler import MoeGradientHandler
from colossalai.context import MOE_CONTEXT
from colossalai.testing import assert_equal_in_group
from tests.test_zero_data_parallel.common import CONFIG, check_sharded_model_params
from tests.test_moe.test_moe_zero_init import MoeModel
def _run_step(model, optimizer, data, label, criterion, grad_handler):
model.train()
optimizer.zero_grad()
if criterion:
y = model(data)
loss = criterion(y, label)
else:
loss = model(data, label)
loss = loss.float()
if isinstance(model, ShardedModelV2):
optimizer.backward(loss)
else:
loss.backward()
if grad_handler is not None:
grad_handler.handle_gradient()
optimizer.step()
@parameterize("cpu_offload", [True, False])
@parameterize("use_cpuadam", [True, False])
@parameterize("shard_strategy_class", [TensorShardStrategy, BucketTensorShardStrategy])
def _run_test_sharded_optim_v2(cpu_offload, shard_strategy_class, use_cpuadam, gpu_margin_mem_ratio=0.0):
MOE_CONTEXT.reset_loss()
shard_strategy = shard_strategy_class()
if use_cpuadam and cpu_offload is False:
return
get_components_func = non_distributed_component_funcs.get_callable('no_leaf_module')
_, train_dataloader, _, optimizer_class, criterion = get_components_func()
with ZeroInitContext(
target_device=torch.device('cpu') if cpu_offload else torch.device(f'cuda:{get_current_device()}'),
shard_strategy=shard_strategy,
shard_param=True,
rm_torch_payload_on_the_fly=False):
zero_model = MoeModel()
zero_model = ShardedModelV2(
zero_model,
shard_strategy,
offload_config=dict(device='cpu') if cpu_offload else None,
use_memory_tracer=gpu_margin_mem_ratio > 0.0,
reuse_fp16_shard=use_cpuadam,
)
# check whether parameters are identical in ddp
for name, p in zero_model.named_parameters():
if not p.colo_attr.param_is_sharded and p.is_replicated:
assert_equal_in_group(p.data.to(get_current_device()))
model = MoeModel().half()
col_model_deepcopy(zero_model, model)
model = model.cuda().float()
if use_cpuadam:
optimizer_class = CPUAdam
optim = optimizer_class(model.parameters(), lr=1e-3)
sharded_optim = optimizer_class(zero_model.parameters(), lr=1e-3)
sharded_optim = ShardedOptimizerV2(zero_model,
sharded_optim,
cpu_offload=cpu_offload,
initial_scale=2**5,
gpu_margin_mem_ratio=gpu_margin_mem_ratio,
keep_unsharded=True)
amp_config = dict(opt_level='O2', keep_batchnorm_fp32=False)
apex_model, apex_optimizer = convert_to_apex_amp(model, optim, amp_config)
apex_grad_handler = MoeGradientHandler(model)
# Since MOE is not compatible with apex_amp now, we need to convert gate weight to fp32
for (n, p), zp in zip(apex_model.named_parameters(), zero_model.parameters()):
if 'gate' in n:
p.data = p.float()
p.data.copy_(zp.data)
for i, (data, label) in enumerate(train_dataloader):
if i > 5:
break
data, label = data.cuda(), label.cuda()
_run_step(apex_model, apex_optimizer, data, label, criterion, apex_grad_handler)
_run_step(zero_model, sharded_optim, data, label, criterion, None)
check_sharded_model_params(model, zero_model, loose=True, reuse_fp16_shard=use_cpuadam)
for param in model.parameters():
assert not has_inf_or_nan(param)
def _run_dist(rank, world_size, port):
colossalai.launch(config=CONFIG, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
MOE_CONTEXT.setup(seed=42)
_run_test_sharded_optim_v2()
# use_cpuadam = True can be used with cpu_offload = False
@pytest.mark.dist
@pytest.mark.parametrize("world_size", [2])
@rerun_on_exception(exception_type=mp.ProcessRaisedException, pattern=".*Address already in use.*")
def test_moe_zero_optim(world_size):
run_func = partial(_run_dist, world_size=world_size, port=free_port())
mp.spawn(run_func, nprocs=world_size)
if __name__ == '__main__':
test_moe_zero_optim(world_size=2)
|
[
"colossalai.utils.free_port",
"colossalai.context.MOE_CONTEXT.reset_loss",
"torch.multiprocessing.spawn",
"tests.test_zero_data_parallel.common.check_sharded_model_params",
"colossalai.testing.parameterize",
"colossalai.zero.sharded_model.utils.col_model_deepcopy",
"colossalai.zero.sharded_optim.ShardedOptimizerV2",
"colossalai.amp.convert_to_apex_amp",
"colossalai.launch",
"colossalai.testing.rerun_on_exception",
"tests.components_to_test.registry.non_distributed_component_funcs.get_callable",
"colossalai.zero.sharded_optim._utils.has_inf_or_nan",
"torch.device",
"colossalai.utils.get_current_device",
"colossalai.engine.gradient_handler.MoeGradientHandler",
"tests.test_moe.test_moe_zero_init.MoeModel",
"pytest.mark.parametrize",
"colossalai.context.MOE_CONTEXT.setup"
] |
[((1697, 1739), 'colossalai.testing.parameterize', 'parameterize', (['"""cpu_offload"""', '[True, False]'], {}), "('cpu_offload', [True, False])\n", (1709, 1739), False, 'from colossalai.testing import parameterize, rerun_on_exception\n'), ((1742, 1784), 'colossalai.testing.parameterize', 'parameterize', (['"""use_cpuadam"""', '[True, False]'], {}), "('use_cpuadam', [True, False])\n", (1754, 1784), False, 'from colossalai.testing import parameterize, rerun_on_exception\n'), ((1787, 1877), 'colossalai.testing.parameterize', 'parameterize', (['"""shard_strategy_class"""', '[TensorShardStrategy, BucketTensorShardStrategy]'], {}), "('shard_strategy_class', [TensorShardStrategy,\n BucketTensorShardStrategy])\n", (1799, 1877), False, 'from colossalai.testing import parameterize, rerun_on_exception\n'), ((5033, 5075), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""world_size"""', '[2]'], {}), "('world_size', [2])\n", (5056, 5075), False, 'import pytest\n'), ((5078, 5181), 'colossalai.testing.rerun_on_exception', 'rerun_on_exception', ([], {'exception_type': 'mp.ProcessRaisedException', 'pattern': '""".*Address already in use.*"""'}), "(exception_type=mp.ProcessRaisedException, pattern=\n '.*Address already in use.*')\n", (5096, 5181), False, 'from colossalai.testing import parameterize, rerun_on_exception\n'), ((1986, 2010), 'colossalai.context.MOE_CONTEXT.reset_loss', 'MOE_CONTEXT.reset_loss', ([], {}), '()\n', (2008, 2010), False, 'from colossalai.context import MOE_CONTEXT\n'), ((2147, 2209), 'tests.components_to_test.registry.non_distributed_component_funcs.get_callable', 'non_distributed_component_funcs.get_callable', (['"""no_leaf_module"""'], {}), "('no_leaf_module')\n", (2191, 2209), False, 'from tests.components_to_test.registry import non_distributed_component_funcs\n'), ((3119, 3156), 'colossalai.zero.sharded_model.utils.col_model_deepcopy', 'col_model_deepcopy', (['zero_model', 'model'], {}), '(zero_model, model)\n', (3137, 3156), False, 'from colossalai.zero.sharded_model.utils import col_model_deepcopy\n'), ((3399, 3563), 'colossalai.zero.sharded_optim.ShardedOptimizerV2', 'ShardedOptimizerV2', (['zero_model', 'sharded_optim'], {'cpu_offload': 'cpu_offload', 'initial_scale': '(2 ** 5)', 'gpu_margin_mem_ratio': 'gpu_margin_mem_ratio', 'keep_unsharded': '(True)'}), '(zero_model, sharded_optim, cpu_offload=cpu_offload,\n initial_scale=2 ** 5, gpu_margin_mem_ratio=gpu_margin_mem_ratio,\n keep_unsharded=True)\n', (3417, 3563), False, 'from colossalai.zero.sharded_optim import ShardedOptimizerV2\n'), ((3856, 3901), 'colossalai.amp.convert_to_apex_amp', 'convert_to_apex_amp', (['model', 'optim', 'amp_config'], {}), '(model, optim, amp_config)\n', (3875, 3901), False, 'from colossalai.amp import convert_to_apex_amp\n'), ((3927, 3952), 'colossalai.engine.gradient_handler.MoeGradientHandler', 'MoeGradientHandler', (['model'], {}), '(model)\n', (3945, 3952), False, 'from colossalai.engine.gradient_handler import MoeGradientHandler\n'), ((4771, 4887), 'colossalai.launch', 'colossalai.launch', ([], {'config': 'CONFIG', 'rank': 'rank', 'world_size': 'world_size', 'host': '"""localhost"""', 'port': 'port', 'backend': '"""nccl"""'}), "(config=CONFIG, rank=rank, world_size=world_size, host=\n 'localhost', port=port, backend='nccl')\n", (4788, 4887), False, 'import colossalai\n'), ((4888, 4914), 'colossalai.context.MOE_CONTEXT.setup', 'MOE_CONTEXT.setup', ([], {'seed': '(42)'}), '(seed=42)\n', (4905, 4914), False, 'from colossalai.context import MOE_CONTEXT\n'), ((5296, 5333), 'torch.multiprocessing.spawn', 'mp.spawn', (['run_func'], {'nprocs': 'world_size'}), '(run_func, nprocs=world_size)\n', (5304, 5333), True, 'import torch.multiprocessing as mp\n'), ((2578, 2588), 'tests.test_moe.test_moe_zero_init.MoeModel', 'MoeModel', ([], {}), '()\n', (2586, 2588), False, 'from tests.test_moe.test_moe_zero_init import MoeModel\n'), ((4546, 4638), 'tests.test_zero_data_parallel.common.check_sharded_model_params', 'check_sharded_model_params', (['model', 'zero_model'], {'loose': '(True)', 'reuse_fp16_shard': 'use_cpuadam'}), '(model, zero_model, loose=True, reuse_fp16_shard=\n use_cpuadam)\n', (4572, 4638), False, 'from tests.test_zero_data_parallel.common import CONFIG, check_sharded_model_params\n'), ((3096, 3106), 'tests.test_moe.test_moe_zero_init.MoeModel', 'MoeModel', ([], {}), '()\n', (3104, 3106), False, 'from tests.test_moe.test_moe_zero_init import MoeModel\n'), ((5278, 5289), 'colossalai.utils.free_port', 'free_port', ([], {}), '()\n', (5287, 5289), False, 'from colossalai.utils import free_port\n'), ((4700, 4721), 'colossalai.zero.sharded_optim._utils.has_inf_or_nan', 'has_inf_or_nan', (['param'], {}), '(param)\n', (4714, 4721), False, 'from colossalai.zero.sharded_optim._utils import has_inf_or_nan\n'), ((2346, 2365), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2358, 2365), False, 'import torch\n'), ((3058, 3078), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (3076, 3078), False, 'from colossalai.utils import get_current_device\n'), ((2407, 2427), 'colossalai.utils.get_current_device', 'get_current_device', ([], {}), '()\n', (2425, 2427), False, 'from colossalai.utils import get_current_device\n')]
|
from aula05082019.ListaDuplamenteLigada import ListaDuplamenteLigada
listaDuplamenteLigada = ListaDuplamenteLigada()
listaDuplamenteLigada.inserirInicio(2)
listaDuplamenteLigada.inserirInicio(1)
listaDuplamenteLigada.inserirFim(3)
listaDuplamenteLigada.inserir(100, 3)
print('---Imprimir Diretamente:')
listaDuplamenteLigada.imprimirLista()
print('---Remoções:')
listaDuplamenteLigada.remover(1)
print('---Imprimir Lista Após Remoções:')
listaDuplamenteLigada.imprimirLista()
print('Impressão do inicio:')
print(listaDuplamenteLigada.inicio.chave)
print('Impressão do fim:')
print(listaDuplamenteLigada.fim.chave)
print('---Imprimir Reversamente:')
listaDuplamenteLigada.imprimirListaReversa()
print('---Tamanho:')
print(listaDuplamenteLigada.imprimirTamanho())
|
[
"aula05082019.ListaDuplamenteLigada.ListaDuplamenteLigada"
] |
[((94, 117), 'aula05082019.ListaDuplamenteLigada.ListaDuplamenteLigada', 'ListaDuplamenteLigada', ([], {}), '()\n', (115, 117), False, 'from aula05082019.ListaDuplamenteLigada import ListaDuplamenteLigada\n')]
|
import setuptools
def readme():
with open('README.md', encoding='utf8') as fp:
return fp.read()
def requirements():
with open('requirements.txt') as fp:
return fp.readlines()
setuptools.setup(
name = 'nr.dnn',
version = '1.0.0',
author = '<NAME>',
author_email = '<EMAIL>',
description = 'git',
long_description = readme(),
long_description_content_type = 'text/markdown',
url = 'https://github.com/NiklasRosenstein-Python/nr.dnn',
license = 'MIT',
install_requires = requirements(),
packages = setuptools.find_packages('src'),
package_dir = {'': 'src'}
)
|
[
"setuptools.find_packages"
] |
[((533, 564), 'setuptools.find_packages', 'setuptools.find_packages', (['"""src"""'], {}), "('src')\n", (557, 564), False, 'import setuptools\n')]
|
from __future__ import absolute_import, division, print_function
# LIBTBX_SET_DISPATCHER_NAME phenix.fake_f_obs
from cctbx import adptbx
from cctbx.array_family import flex
import random, math, sys, os
import iotbx.pdb
import mmtbx.utils
from libtbx import easy_run
import mmtbx.dynamics.cartesian_dynamics as cartesian_dynamics
from mmtbx import monomer_library
import mmtbx.monomer_library.pdb_interpretation
import mmtbx.monomer_library.server
from mmtbx.tls import ladp
from mmtbx.utils import run_reduce_with_timeout
import mmtbx.tls.tools
import mmtbx.f_model
import iotbx.phil
import mmtbx.masks
from libtbx.utils import Sorry
from six.moves import range
import mmtbx.model
if(1):
random.seed(0)
flex.set_random_seed(0)
master_params_str="""\
f_obs {
high_resolution = 2.0
.type = float
low_resolution = 15.0
.type = float
scattering_table = wk1995 it1992 *n_gaussian neutron
f_calc {
atomic_model {
ensemble_size = 20
.type = int
add_hydrogens = False
.type = bool
tls {
max_tl = 2
.type = float
min_tl = 0
.type = float
}
apply_cartesian_dynamics = True
.type = bool
regularize_geometry {
rmsd_bonds_target = 0.025
.type = float
rmsd_angles_target = 2.5
.type = float
}
ladp_angle = 3.0
.type = float
switch_rotamers = True
.type = bool
shake_sites_rmsd = 0.01
.type = float
rigid_body_shift {
rotation_angle = 1.0
.type = float
translation_length = 0.1
.type = float
}
stop_cartesian_dynamics_at_diff = 0.5
.type = float
use_ramachandran_plot_restraints = True
.type = bool
output_file_name = fake_model.pdb
.type = str
}
accuracy {
include scope mmtbx.f_model.sf_and_grads_accuracy_master_params
}
}
f_bulk {
k_sol = 0.35
.type = float
b_sol = 50.0
.type = float
mask {
include scope mmtbx.masks.mask_master_params
}
}
overall_scale = 1.0
overall_anisotropic_scale_matrix_b_cart {
max = 10
.type = float
min = 0
.type = float
}
experimental_noise {
add_random_error_to_amplitudes_percent = 5
.type = float
}
output_file_name = fake_f_obs.mtz
.type = str
}
"""
class show(object):
def __init__(self,
xrs,
xrs_start,
grm,
prefix=""):
esg = grm.energies_sites(
sites_cart = xrs.sites_cart(), compute_gradients = False).geometry
self.bond_rmsd = esg.bond_deviations()[2]
self.angle_rmsd = esg.angle_deviations()[2]
self.error = flex.mean(xrs.distances(other = xrs_start))
print(" %s err=%8.3f rmsd: bonds=%6.3f angles=%6.3f"%(prefix, self.error,
self.bond_rmsd, self.angle_rmsd))
def switch_rotamers(xray_structure, pdb_hierarchy):
x = xray_structure.deep_copy_scatterers()
p = pdb_hierarchy.deep_copy()
p.atoms().reset_i_seq()
p = mmtbx.utils.switch_rotamers(
pdb_hierarchy = p,
mode = "min_distant")
x.set_sites_cart(sites_cart = p.atoms().extract_xyz())
return x, p
def set_ladp(xray_structure, pdb_hierarchy, angle):
axes_and_atoms_i_seqs = ladp.get_axes_and_atoms_i_seqs(
pdb_hierarchy = pdb_hierarchy,
mon_lib_srv = monomer_library.server.server())
xray_structure = xray_structure.set_b_iso(value=random.randrange(5,10))
xray_structure.convert_to_isotropic()
xray_structure = ladp.set_ladp(
xray_structure = xray_structure,
axes_and_atoms_i_seqs = axes_and_atoms_i_seqs,
value = angle,
enable_recursion = True,
depth = 0)
return xray_structure
def random_aniso_adp(space_group, unit_cell, u_scale=2, u_min=0):
return adptbx.u_star_as_u_cart(unit_cell, space_group.average_u_star(
u_star = adptbx.u_cart_as_u_star(unit_cell, adptbx.random_u_cart(
u_scale=u_scale, u_min=u_min))))
def apply_tls(xray_structure, params):
uc = xray_structure.unit_cell()
sg = xray_structure.space_group()
selections_1d = flex.bool(xray_structure.scatterers().size(),True)
selections = [selections_1d.iselection()]
T=random_aniso_adp(space_group=sg, unit_cell=uc, u_scale=params.max_tl,
u_min=params.min_tl)
L=random_aniso_adp(space_group=sg, unit_cell=uc, u_scale=params.max_tl,
u_min=params.min_tl)
print(" T: %s"%",".join([("%7.3f"%i).strip() for i in T]))
print(" L: %s"%",".join([("%7.3f"%i).strip() for i in L]))
tlsos = mmtbx.tls.tools.generate_tlsos(
selections = selections,
xray_structure = xray_structure,
T=[T],
L=[L],
S=[[0,0,0,0,0,0,0,0,0]])
u_cart_from_tls = mmtbx.tls.tools.u_cart_from_tls(
sites_cart = xray_structure.sites_cart(),
selections = selections,
tlsos = tlsos)
xray_structure.convert_to_anisotropic()
u_cart = xray_structure.scatterers().extract_u_cart(uc)
utot = u_cart_from_tls+u_cart
xray_structure.set_u_cart(u_cart=utot, selection = selections_1d.iselection())
xray_structure.tidy_us()
return xray_structure
def apply_rigid_body_shift(xray_structure, params):
import scitbx.matrix
mt = flex#.mersenne_twister(seed=0)
rot_axis = scitbx.matrix.col(mt.random_double_point_on_sphere())
rot_matrix = scitbx.math.r3_rotation_axis_and_angle_as_matrix(
axis=rot_axis, angle=params.rotation_angle, deg=True)
run_away_counter = 0
while True:
transl = mt.random_double_point_on_sphere()
transl_no_cont_sh = scitbx.matrix.col(xray_structure.crystal_symmetry()
.subtract_continuous_allowed_origin_shifts(translation_cart=transl))
l = abs(transl_no_cont_sh)
if(l > 0.1):
break
run_away_counter += 1
assert run_away_counter < 100
transl = transl_no_cont_sh * (params.translation_length/l)
sites_cart = xray_structure.sites_cart()
cm = xray_structure.center_of_mass()
ns = rot_matrix * (sites_cart-cm) + transl + cm
xray_structure.set_sites_cart(sites_cart =
rot_matrix * (sites_cart-cm) + transl + cm)
return xray_structure
def simulate_f_obs(root, crystal_symmetry, params):
f_calc_data = None
f_masks_data = []
for i_m, m in enumerate(root.models()):
raw_records = flex.std_string()
raw_records.append(
iotbx.pdb.format_cryst1_record(crystal_symmetry = crystal_symmetry))
for atom in m.atoms():
ra = atom.format_atom_record()
ru = atom.format_anisou_record()
raw_records.append(ra[:])
raw_records.append(ru[:])
xrs = iotbx.pdb.input(lines = raw_records,
source_info=None).xray_structure_simple()
if(i_m==0):
dummy = abs(xrs.structure_factors(
d_min=params.f_obs.high_resolution).f_calc())
dummy = dummy.resolution_filter(d_max = params.f_obs.low_resolution)
fmodel = mmtbx.f_model.manager(
f_obs = dummy,
xray_structure = xrs,
mask_params = params.f_obs.f_bulk.mask,
sf_and_grads_accuracy_params = params.f_obs.f_calc.accuracy)
fcd = fmodel.f_calc().data()
fms = fmodel.f_masks()
if(i_m==0):
f_calc_data = fcd
f_masks_data = []
for f in fms:
f_masks_data.append(f.data())
else:
f_calc_data += fcd
fmsks = fms
assert len(f_masks_data) == len(fmsks)
for ifmd in range(len(f_masks_data)):
f_masks_data[ifmd] += fmsks[ifmd].data()
fcalc_average = fmodel.f_obs().array(data = f_calc_data)
f_masks_data_average = []
for f in f_masks_data:
f_masks_data_average.append(fmodel.f_obs().array(data = f/len(root.models())))
b_cart = None
if([params.f_obs.overall_anisotropic_scale_matrix_b_cart.max,
params.f_obs.overall_anisotropic_scale_matrix_b_cart.min].count(None)==0):
b_cart = random_aniso_adp(
space_group=crystal_symmetry.space_group(),
unit_cell=crystal_symmetry.unit_cell(),
u_scale=params.f_obs.overall_anisotropic_scale_matrix_b_cart.max,
u_min=params.f_obs.overall_anisotropic_scale_matrix_b_cart.min)
print("\noverall_anisotropic_scale_matrix_b_cart: %s"%",".join(
[("%7.3f"%i).strip() for i in b_cart]))
fmodel = mmtbx.f_model.manager(
f_obs = dummy,
f_calc = fcalc_average,
f_mask = f_masks_data_average,
k_sol = params.f_obs.f_bulk.k_sol,
b_sol = params.f_obs.f_bulk.b_sol,
b_cart = b_cart)
#
f_obs = abs(fmodel.f_model())
f_obs.set_observation_type_xray_amplitude()
mtz_dataset = f_obs.as_mtz_dataset(column_root_label="F(ake)obs")
r_free_flags = f_obs.generate_r_free_flags()
mtz_dataset.add_miller_array(
miller_array=r_free_flags, column_root_label="R-free-flags")
mtz_object = mtz_dataset.mtz_object()
mtz_object.write(file_name=params.f_obs.output_file_name)
def regularize_geometry(xray_structure, restraints_manager, params):
from mmtbx.refinement import geometry_minimization as gm
import scitbx.lbfgs
sites_cart = xray_structure.sites_cart()
minimized = gm.lbfgs(
sites_cart = sites_cart,
correct_special_position_tolerance = 1.0,
geometry_restraints_manager = restraints_manager.geometry,
geometry_restraints_flags = gm.geometry_restraints.flags.flags(default=True),
rmsd_bonds_termination_cutoff=params.rmsd_bonds_target,
rmsd_angles_termination_cutoff=params.rmsd_angles_target,
lbfgs_termination_params=scitbx.lbfgs.termination_parameters(
max_iterations=500))
xray_structure = xray_structure.replace_sites_cart(new_sites = sites_cart)
return xray_structure
def cd(xray_structure, restraints_manager, params):
gradients_calculator=cartesian_dynamics.gradients_calculator_reciprocal_space(
restraints_manager = restraints_manager,
sites_cart = xray_structure.sites_cart(),
wc = 1)
cartesian_dynamics.run(
gradients_calculator = gradients_calculator,
xray_structure = xray_structure,
temperature = 3000,
n_steps = 500000,
time_step = 0.0005,
initial_velocities_zero_fraction = 0,
n_print = 100,
stop_cm_motion = True,
log = None,
stop_at_diff = params.stop_cartesian_dynamics_at_diff,
verbose = -1)
def loop_2(params, xray_structure, pdb_hierarchy, restraints_manager, root):
print("model:")
amp = params.f_obs.f_calc.atomic_model
grm = restraints_manager
xrs = xray_structure.deep_copy_scatterers()
show(xrs = xrs, xrs_start = xrs, grm = grm, prefix = "start:")
xrs_sh = xrs.deep_copy_scatterers()
if(amp.shake_sites_rmsd is not None):
xrs_sh.shake_sites_in_place(rms_difference = amp.shake_sites_rmsd)
if(amp.apply_cartesian_dynamics):
cd(xray_structure = xrs_sh, restraints_manager = grm, params = amp)
show(xrs = xrs_sh, xrs_start = xrs, grm = grm, prefix = "cd: ")
if([amp.regularize_geometry.rmsd_bonds_target,
amp.regularize_geometry.rmsd_angles_target].count(None)==0):
xrs_sh = regularize_geometry(xray_structure = xrs_sh,
restraints_manager = grm, params = amp.regularize_geometry)
show(xrs = xrs_sh, xrs_start = xrs, grm = grm, prefix = "min: ")
if(amp.ladp_angle is not None):
xrs_sh = set_ladp(xray_structure = xrs_sh, pdb_hierarchy = pdb_hierarchy,
angle = amp.ladp_angle)
if([amp.tls.max_tl, amp.tls.min_tl].count(None)==0):
xrs_sh = apply_tls(xray_structure = xrs_sh, params = amp.tls)
if([amp.rigid_body_shift.rotation_angle,
amp.rigid_body_shift.translation_length].count(None)==0):
xrs_sh = apply_rigid_body_shift(xray_structure = xrs_sh,
params = amp.rigid_body_shift)
show(xrs = xrs_sh, xrs_start = xrs, grm = grm, prefix = "rb: ")
#
h = pdb_hierarchy.deep_copy()
h.atoms().reset_i_seq() # XXX
h.atoms().set_xyz(xrs_sh.sites_cart().deep_copy())
h.atoms().set_uij(xrs_sh.scatterers().extract_u_cart(xrs_sh.unit_cell()))
h.atoms().set_b(xrs_sh.extract_u_iso_or_u_equiv()*adptbx.u_as_b(1.))
m = h.models()[0].detached_copy()
m.id = str(None)
root.append_model(m)
def loop_1(params, root, xray_structure, pdb_hierarchy, restraints_manager):
xh = [(xray_structure,pdb_hierarchy)]
if(params.f_obs.f_calc.atomic_model.switch_rotamers):
xh.append(switch_rotamers(
xray_structure = xray_structure.deep_copy_scatterers(),
pdb_hierarchy = pdb_hierarchy.deep_copy()))
counter = 0
size = int(math.ceil(params.f_obs.f_calc.atomic_model.ensemble_size/len(xh)))
for xh_ in xh:
x_, h_ = xh_
for mc in range(size):
loop_2(
params = params,
xray_structure = x_,
pdb_hierarchy = h_,
restraints_manager = restraints_manager,
root = root)
for i_model, model in enumerate(root.models()):
model.id = str(i_model)
root.atoms().set_occ(root.atoms().extract_occ()/len(root.models()))
def defaults(log):
print("Default params::\n", file=log)
parsed = iotbx.phil.parse(master_params_str, process_includes=True)
print(file=log)
return parsed
def run(args, log = sys.stdout):
if(len(args)==0):
parsed = defaults(log=log)
parsed.show(prefix=" ", out=log)
return
parsed = defaults(log=log)
processed_args = mmtbx.utils.process_command_line_args(args = args,
log = sys.stdout, master_params = parsed)
processed_args.params.show()
params = processed_args.params.extract()
if(len(processed_args.pdb_file_names)==0):
raise Sorry("No PDB file found.")
if(len(processed_args.pdb_file_names)>1):
raise Sorry("More than one PDB file found.")
pdb_file_name = processed_args.pdb_file_names[0]
if(params.f_obs.f_calc.atomic_model.add_hydrogens):
pdb_file_name_r = os.path.basename(pdb_file_name)+"_reduce"
# easy_run.go("phenix.reduce %s > %s"% (pdb_file_name, pdb_file_name_r))
run_reduce_with_timeout(file_name=pdb_file_name, parameters=" > %s" % pdb_file_name_r)
pdb_file_name = pdb_file_name_r
pdbi_params = mmtbx.model.manager.get_default_pdb_interpretation_params()
if(params.f_obs.f_calc.atomic_model.use_ramachandran_plot_restraints):
pdbi_params.pdb_interpretation.ramachandran_plot_restraints.enabled=True
model = mmtbx.model.manager(
model_input = iotbx.pdb.input(file_name = pdb_file_name))
model.process(make_restraints=True,
pdb_interpretation_params = pdbi_params)
root = iotbx.pdb.hierarchy.root()
loop_1(
params = params,
root = root,
xray_structure = model.get_xray_structure(),
pdb_hierarchy = model.get_hierarchy(),
restraints_manager = model.get_restraints_manager())
root.write_pdb_file(
file_name = params.f_obs.f_calc.atomic_model.output_file_name,
crystal_symmetry = model.crystal_symmetry())
simulate_f_obs(root=root, crystal_symmetry=model.crystal_symmetry(),
params = params)
if (__name__ == "__main__"):
run(sys.argv[1:])
|
[
"cctbx.array_family.flex.set_random_seed",
"mmtbx.tls.ladp.set_ladp",
"six.moves.range",
"cctbx.array_family.flex.std_string",
"os.path.basename",
"cctbx.adptbx.random_u_cart",
"mmtbx.utils.run_reduce_with_timeout",
"cctbx.adptbx.u_as_b",
"mmtbx.monomer_library.server.server",
"random.seed",
"random.randrange",
"mmtbx.refinement.geometry_minimization.geometry_restraints.flags.flags",
"mmtbx.dynamics.cartesian_dynamics.run",
"libtbx.utils.Sorry"
] |
[((692, 706), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (703, 706), False, 'import random, math, sys, os\n'), ((709, 732), 'cctbx.array_family.flex.set_random_seed', 'flex.set_random_seed', (['(0)'], {}), '(0)\n', (729, 732), False, 'from cctbx.array_family import flex\n'), ((3527, 3666), 'mmtbx.tls.ladp.set_ladp', 'ladp.set_ladp', ([], {'xray_structure': 'xray_structure', 'axes_and_atoms_i_seqs': 'axes_and_atoms_i_seqs', 'value': 'angle', 'enable_recursion': '(True)', 'depth': '(0)'}), '(xray_structure=xray_structure, axes_and_atoms_i_seqs=\n axes_and_atoms_i_seqs, value=angle, enable_recursion=True, depth=0)\n', (3540, 3666), False, 'from mmtbx.tls import ladp\n'), ((9773, 10083), 'mmtbx.dynamics.cartesian_dynamics.run', 'cartesian_dynamics.run', ([], {'gradients_calculator': 'gradients_calculator', 'xray_structure': 'xray_structure', 'temperature': '(3000)', 'n_steps': '(500000)', 'time_step': '(0.0005)', 'initial_velocities_zero_fraction': '(0)', 'n_print': '(100)', 'stop_cm_motion': '(True)', 'log': 'None', 'stop_at_diff': 'params.stop_cartesian_dynamics_at_diff', 'verbose': '(-1)'}), '(gradients_calculator=gradients_calculator,\n xray_structure=xray_structure, temperature=3000, n_steps=500000,\n time_step=0.0005, initial_velocities_zero_fraction=0, n_print=100,\n stop_cm_motion=True, log=None, stop_at_diff=params.\n stop_cartesian_dynamics_at_diff, verbose=-1)\n', (9795, 10083), True, 'import mmtbx.dynamics.cartesian_dynamics as cartesian_dynamics\n'), ((6257, 6274), 'cctbx.array_family.flex.std_string', 'flex.std_string', ([], {}), '()\n', (6272, 6274), False, 'from cctbx.array_family import flex\n'), ((12605, 12616), 'six.moves.range', 'range', (['size'], {}), '(size)\n', (12610, 12616), False, 'from six.moves import range\n'), ((13527, 13554), 'libtbx.utils.Sorry', 'Sorry', (['"""No PDB file found."""'], {}), "('No PDB file found.')\n", (13532, 13554), False, 'from libtbx.utils import Sorry\n'), ((13609, 13647), 'libtbx.utils.Sorry', 'Sorry', (['"""More than one PDB file found."""'], {}), "('More than one PDB file found.')\n", (13614, 13647), False, 'from libtbx.utils import Sorry\n'), ((13898, 13988), 'mmtbx.utils.run_reduce_with_timeout', 'run_reduce_with_timeout', ([], {'file_name': 'pdb_file_name', 'parameters': "(' > %s' % pdb_file_name_r)"}), "(file_name=pdb_file_name, parameters=' > %s' %\n pdb_file_name_r)\n", (13921, 13988), False, 'from mmtbx.utils import run_reduce_with_timeout\n'), ((3361, 3392), 'mmtbx.monomer_library.server.server', 'monomer_library.server.server', ([], {}), '()\n', (3390, 3392), False, 'from mmtbx import monomer_library\n'), ((3444, 3467), 'random.randrange', 'random.randrange', (['(5)', '(10)'], {}), '(5, 10)\n', (3460, 3467), False, 'import random, math, sys, os\n'), ((9144, 9192), 'mmtbx.refinement.geometry_minimization.geometry_restraints.flags.flags', 'gm.geometry_restraints.flags.flags', ([], {'default': '(True)'}), '(default=True)\n', (9178, 9192), True, 'from mmtbx.refinement import geometry_minimization as gm\n'), ((12049, 12067), 'cctbx.adptbx.u_as_b', 'adptbx.u_as_b', (['(1.0)'], {}), '(1.0)\n', (12062, 12067), False, 'from cctbx import adptbx\n'), ((13775, 13806), 'os.path.basename', 'os.path.basename', (['pdb_file_name'], {}), '(pdb_file_name)\n', (13791, 13806), False, 'import random, math, sys, os\n'), ((3948, 3998), 'cctbx.adptbx.random_u_cart', 'adptbx.random_u_cart', ([], {'u_scale': 'u_scale', 'u_min': 'u_min'}), '(u_scale=u_scale, u_min=u_min)\n', (3968, 3998), False, 'from cctbx import adptbx\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os,time,cv2,scipy.io
import tensorflow as tf
# import tensorflow.contrib.slim as slim
import scipy.misc as sic
# import network as network
import subprocess
import numpy as np
from matplotlib.colors import hsv_to_rgb
# from skimage.measure import compare_ssim, compare_psnr
from glob import glob
def prepare_data(data_path='../data_new/Data_Polar_Clean/crop_npy/'):
train_items, val_items = [], []
folders1 = glob(data_path+'/*')
# print(folders1)
folders2 = []
for folder1 in folders1:
folders2 = folders2 + glob(folder1+'/Indoor/*') + glob(folder1+'/Outdoor/*')
# print(folders2)
folders2.sort()
for folder2 in folders2[1::5] + folders2[2::5]+folders2[3::5]+folders2[4::5]:
folder = folder2
imgs = glob(folder + '/*.npy')
imgs.sort()
# print(folder, len(imgs))
for idx in range(len(imgs)//2):
tmp_M = imgs[2*idx+1]
tmp_R = imgs[2*idx]
train_items.append([tmp_M,tmp_R])
# print(tmp_R, tmp_M)
for folder2 in folders2[::5]:
folder = folder2
imgs = glob(folder + '/*.npy')
imgs.sort()
print(folder, len(imgs))
for idx in range(len(imgs)//2):
tmp_M = imgs[2*idx+1]
tmp_R = imgs[2*idx]
val_items.append([tmp_M,tmp_R])
# print(tmp_R, tmp_M)
return train_items, val_items[::3]
def prepare_final_data(data_path='../data_new/Data_Polar_Clean/crop_npy/'):
train_items,val_items,test_items=[],[],[]
imgs = glob("../data_new/Data_Polar_Clean/MMR_1/train/*npy")
imgs.sort()
for idx in range(len(imgs)//2):
tmp_M = imgs[2*idx+1]
tmp_R = imgs[2*idx]
train_items.append([tmp_M,tmp_R])
# print(tmp_R, tmp_M)
imgs = glob("../data_new/Data_Polar_Clean/MMR_1/test/*npy")
imgs.sort()
for idx in range(len(imgs)//2):
tmp_M = imgs[2*idx+1]
tmp_R = imgs[2*idx]
test_items.append([tmp_M,tmp_R])
# print(tmp_R, tmp_M)
imgs = glob("../data_new/Data_Polar_Clean/MMR_1/val/*npy")
imgs.sort()
for idx in range(len(imgs)//2):
tmp_M = imgs[2*idx+1]
tmp_R = imgs[2*idx]
val_items.append([tmp_M,tmp_R])
# print(tmp_R, tmp_M)
return train_items, val_items, test_items
def prepare_item(item):
M_name, R_name = item
tmp_M = np.load(M_name)
tmp_R = np.load(R_name)
return tmp_M,tmp_R
def light_mask(h, w):
mid_h = h//5 + np.random.randint(h//5*3)
mid_w = w//5 + np.random.randint(w//5*3)
Light_low = 0.1+0.3*np.random.random()
Light_high= Light_low + 1*np.random.random()
row2 = np.concatenate([np.linspace(Light_low,0.8,mid_w),np.linspace(0.8,Light_low, w-mid_w)],axis=0)
mat2 = np.tile(row2[np.newaxis,:],[h,1])
row1 = np.concatenate([np.linspace(Light_low,0.8,mid_h),np.linspace(0.8,Light_low, h-mid_h)],axis=0)
mat1 = np.tile(row1[:,np.newaxis],[1,w])
mat = np.power(mat1*mat2, 2)
# mat = np.power(mat, 1/2.2)
sz = (20 + np.random.randint(20))*2 + 1
mask1=cv2.GaussianBlur(mat,(sz,sz),cv2.BORDER_DEFAULT)
return mask1
def shadow_mask(img):
h_orig,w_orig = img.shape[:2]
mask = np.ones((h_orig, w_orig))
w_crop = np.random.randint(10, w_orig//3)
h_crop = np.random.randint(10, h_orig//3)
try:
w_offset = np.random.randint(0, w_orig-w_crop-1)
h_offset = np.random.randint(0, h_orig-h_crop-1)
except:
print("Original W %d, desired W %d"%(w_orig,w_crop))
print("Original H %d, desired H %d"%(h_orig,h_crop))
print(mask.shape)
mask[h_offset:h_offset+h_crop-1,w_offset:w_offset+w_crop-1] = 0.2 + 0.4*np.random.rand()
w_crop = np.random.randint(10, w_orig//3)
h_crop = np.random.randint(10, h_orig//3)
try:
w_offset = np.random.randint(0, w_orig-w_crop-1)
h_offset = np.random.randint(0, h_orig-h_crop-1)
except:
print("Original W %d, desired W %d"%(w_orig,w_crop))
print("Original H %d, desired H %d"%(h_orig,h_crop))
print(mask.shape)
mask[h_offset:h_offset+h_crop-1,w_offset:w_offset+w_crop-1] = 0.3 + 0.4*np.random.rand()
w_crop = np.random.randint(10, w_orig//3)
h_crop = np.random.randint(10, h_orig//3)
try:
w_offset = np.random.randint(0, w_orig-w_crop-1)
h_offset = np.random.randint(0, h_orig-h_crop-1)
except:
print("Original W %d, desired W %d"%(w_orig,w_crop))
print("Original H %d, desired H %d"%(h_orig,h_crop))
print(mask.shape)
mask[h_offset:h_offset+h_crop-1,w_offset:w_offset+w_crop-1] = 0.4 + 0.4*np.random.rand()
return mask
def prepare_FNF(item):
#---------------Get R&T----------------$
T_name, R_name = item
syn_image1=cv2.imread(T_name,-1)
w=np.random.randint(256, 480)
h=round((w/syn_image1.shape[1])*syn_image1.shape[0])
t=cv2.resize(np.float32(syn_image1),(w,h),cv2.INTER_CUBIC)/255.0
r=cv2.resize(np.float32(cv2.imread(R_name,-1)),(w,h),cv2.INTER_CUBIC)/255.0
# h, w = nf.shape[:2]
alpha = 0.25 + 0.5*np.random.random()
gt_r = r
gt_r = (1-alpha)*gt_r
gt_t = alpha * t
nf = np.power(np.power(gt_t,2.2) + np.power(gt_r,2.2), 1/2.2)
pf = (0.5+ 0.7*np.random.random()) * t #
mask1= light_mask(h,w)
mask2= light_mask(h,w)
mask = np.sqrt(mask1*mask2)
shadow = shadow_mask(pf)
if np.random.random() < 0.5:
pf = np.power(np.power(pf,2.2) + 0.5* mask[:,:,np.newaxis],1/2.2)
else:
pf = np.power(np.power(pf,2.2) * mask[:,:,np.newaxis],1/2.2)
pf = pf*shadow[:,:,np.newaxis]
h = h//32 * 32
w = w//32 * 32
return pf[np.newaxis, :h, :w, :], gt_t[np.newaxis, :h, :w, :],gt_r[np.newaxis, :h, :w, :],nf[np.newaxis, :h, :w, :]
# def get_metrics(metrics,out_mask, gt_target,gt_reflection,pred_image_t,pred_image_r):
# metrics["T_ssim"] += compare_ssim(0.5*gt_target[0,:,:,4]*out_mask[0,:,:,0], 0.5*pred_image_t[0,:,:,4]*out_mask[0,:,:,0])
# metrics["T_psnr"] += compare_psnr(0.5*gt_target[0,:,:,4]*out_mask[0,:,:,0], 0.5*pred_image_t[0,:,:,4]*out_mask[0,:,:,0], 1)
# metrics["R_ssim"] += compare_ssim(0.5*gt_reflection[0,:,:,4]*out_mask[0,:,:,0], 0.5*pred_image_r[0,:,:,4]*out_mask[0,:,:,0])
# metrics["R_psnr"] += compare_psnr(0.5*gt_reflection[0,:,:,4]*out_mask[0,:,:,0], 0.5*pred_image_r[0,:,:,4]*out_mask[0,:,:,0], 1)
# return metrics
def save_concat_img(gt_input, gt_target, gt_reflection, pureflash, pred_image_t, pred_image_r, save_path, in_flash=None, is_test=False):
if is_test == True:
sic.imsave(save_path.replace(".jpg", "_0_input_ambient.jpg"), np.uint8(gt_input[0].clip(0,1) * 255.))
sic.imsave(save_path.replace(".jpg", "_5_input_flash.jpg"), np.uint8(in_flash[0].clip(0,1) * 255.))
sic.imsave(save_path.replace(".jpg", "_6_input_pureflash.jpg"), np.uint8(pureflash[0].clip(0,1) * 255.))
sic.imsave(save_path.replace(".jpg", "_1_pred_transmission.jpg"), np.uint8(pred_image_t[0].clip(0,1) * 255.))
sic.imsave(save_path.replace(".jpg", "_2_pred_refletion.jpg"), np.uint8(pred_image_r[0].clip(0,1) * 255.))
sic.imsave(save_path.replace(".jpg", "_3_gt_transmission.jpg"), np.uint8(gt_target[0].clip(0,1) * 255.))
sic.imsave(save_path.replace(".jpg", "_4_gt_reflection.jpg"), np.uint8(gt_reflection[0].clip(0,1) * 255.))
return 0
# out_img1= np.concatenate([gt_input[0], gt_target[0], gt_reflection[0]], axis=1)
h, w = gt_input.shape[1:3]
out_img1 = [gt_input[0], pred_image_t[0], gt_target[0]]
names = ["Input", "Pred", "GT"]
for idx, img in enumerate(out_img1):
cv2.putText(img, names[idx], (w//2-len(names[idx])*10, h-20), cv2.FONT_HERSHEY_PLAIN, 2.0, (255, 255, 255), 2)
out_img1 = np.hstack(out_img1)
# out_img2= np.concatenate([pureflash[0], pred_image_t[0],pred_image_r[0]], axis=1)
out_img2= [pureflash[0], gt_reflection[0], pred_image_r[0]]
# names = ["I_fo", "pred_R_a", "R_a"]
# for idx, img in enumerate(out_img2):
# print(img.shape)
# cv2.putText(img, names[idx], (w//2-len(names[idx])*10, h-20), cv2.FONT_HERSHEY_PLAIN, 2.0, (255, 255, 255), 2)
out_img2 = np.hstack(out_img2)
out_img = np.vstack([out_img1, out_img2])
out_img = np.minimum(np.maximum(out_img,0.0),1.0)*255.0
# cv2.imwrite("result/%s/%04d/val_%06d.jpg"%(task, epoch, id), np.uint8(out_img[::2,::2]))
# cv2.imwrite(save_path, np.uint8(out_img[::2,::2]))
sic.imsave(save_path, np.uint8(out_img[::2,::2]))
return out_img
def save_results(all_loss_test, metrics, id, task,epoch):
result=open("result/%s/score.txt"%task,'a')
result.write("Epc: %03d Loss: %.5f | SSIM: %.3f PSNR: %.2f | SSIM: %.3f PSNR: %.2f \n"%\
(epoch, np.mean(all_loss_test[np.where(all_loss_test)]), metrics["T_ssim"]/(id+1), metrics["T_psnr"]/(id+1), metrics["R_ssim"]/(id+1), metrics["R_psnr"]/(id+1)))
result.close()
def crop_shape(tmp_all, size=32):
h,w = tmp_all.shape[1:3]
h = h // size * size
w = w // size * size
return h, w
def cnts_add_display(epoch, cnts, step,crt, crt_t, st):
cnts["cnt"]+=1
step+=1
cnts["all_r"] += crt
cnts["all_t"] += crt_t
cnt, all_r, all_t = cnts["cnt"],cnts["all_r"],cnts["all_t"]
print("iter: %03d %03d %d || r:%.3f %.3f | t:%.3f %.3f |time:%.2f"%\
(epoch,cnt,step,crt,all_r/cnt,crt_t,all_t/cnt,time.time()-st))
return cnts, step
def save_all_out(output, path_prefix, HSV=0, I14=0,AoLP=0,DoLP=0):
sic.imsave("%s_I.jpg"%path_prefix,np.uint8(np.maximum(np.minimum(output[0,:,:,4]*255.0,255.0),0.0)))
if I14:
sic.imsave("%s_I14.jpg"%path_prefix,np.uint8(np.maximum(np.minimum(np.concatenate([output[0,:,:,i] for i in range(4)],axis=0)*255.0,255.0),0.0)))
if HSV:
sic.imsave("%s_HSV.jpg"%path_prefix,np.uint8(np.maximum(np.minimum(output[0,:,:,-3:]*255.0,255.0),0.0)))
if AoLP:
sic.imsave("%s_AoLP.jpg"%path_prefix,np.uint8(np.maximum(np.minimum(output[0,:,:,6]*255.0,255.0),0.0)))
if DoLP:
sic.imsave("%s_DoLP.jpg"%path_prefix,np.uint8(np.maximum(np.minimum(output[0,:,:,5]*255.0,255.0),0.0)))
def get_input(path, id):
raw_in_name = path + '/in/%04d.png'%id
raw_outR_name=path + '/out/%04d.png'%id
raw_outT_name=path + '/out/%04d.png'%id
temp_input = get_from_raw(raw_in_name, raw=None)
temp_output= np.concatenate([temp_input, temp_input],axis=3)
# temp_output= np.concatenate([get_from_raw(raw_outR_name,raw=None), get_from_raw(raw_outT_name,raw=None)],axis=3)
# temp_output=None
return temp_input, temp_output
def load_data(train_path, test_path, train_num, test_num):
train_in = []
test_in = []
train_out= []
test_out = []
for i in range(train_num):
temp_input, temp_output = get_input(train_path, i+1)
print('Train: ', i, temp_input.shape, temp_output.shape)
train_in.append(temp_input)
train_out.append(temp_output)
for i in range(test_num):
temp_input, temp_output = get_input(test_path, i+1)
print('Test: ', i, temp_input.shape, temp_output.shape)
test_in.append(temp_input)
test_out.append(temp_output)
return train_in, train_out, test_in, test_out
def get_from_raw(raw_name, raw=True):
if raw:
raw_img = read_raw(raw_name)
else:
raw_img = sic.imread(raw_name, mode='L')/255.
# print(np.mean(raw_img))
# print(raw_name, raw_img.shape)
h=raw_img.shape[0]//32*32
w=raw_img.shape[1]//32*32
return raw_split(raw_img[:h,:w])
# return raw2imgs(raw_img[:h,:w])
def raw2imgs(raw_img):
I3=raw_img[::2,::2]
I2=raw_img[::2,1::2]
I4=raw_img[1::2,::2]
I1=raw_img[1::2,1::2]
# I3=raw_img[::2,::2][::2,::2]
# I2=raw_img[::2,1::2][::2,::2]
# I4=raw_img[1::2,::2][::2,::2]
# I1=raw_img[1::2,1::2][::2,::2]
I = 0.5*(I1 + I3 + I2 + I4)
#print('I1: ', I1[np.isnan(I1)].shape)
##print('I2: ', I1[np.isnan(I2)].shape)
#print('I3: ', I1[np.isnan(I3)].shape)
#print('I4: ', I1[np.isnan(I4)].shape)
#print('I: ', I1[np.isnan(I)].shape, np.max(I), np.mean(I))
return I1, I2, I3, I4, I
def raw_split(raw_img):
I1, I2, I3, I4, I = raw2imgs(raw_img)
AoLP, DoLP=calculate_ADoLP(I1, I2, I3, I4, I)
I_p, I_np=I * DoLP, I*(1-DoLP)
# print('AoLP NaN: ', AoLP[np.isnan(AoLP)].shape)
# print('DoLP NaN: ', DoLP[np.isnan(DoLP)].shape)
# print('I_p NaN: ', I_p[np.isnan(I_p)].shape)
# print('I_np NaN: ', I_np[np.isnan(I_np)].shape)
data = [I1, I2, I3, I4, I, DoLP, AoLP, I_p, I_np]
data_expand = [I[np.newaxis, :,:,np.newaxis] for I in data]
return np.concatenate(data_expand,axis=3)
def pols2infor(raw_img):
I1, I2, I3, I4 = [raw_img[:,:,i] for i in range(4)]
I = (I1+I2+I3+I4)*0.5
AoLP, DoLP=calculate_ADoLP(I1, I2, I3, I4, I)
I_p, I_np=I * DoLP, I*(1-DoLP)
# print('AoLP NaN: ', AoLP[np.isnan(AoLP)].shape)
# print('DoLP NaN: ', DoLP[np.isnan(DoLP)].shape)
# print('I_p NaN: ', I_p[np.isnan(I_p)].shape)
# print('I_np NaN: ', I_np[np.isnan(I_np)].shape)
data = [I1, I2, I3, I4, I, DoLP, AoLP, I_p, I_np]
data_expand = [I[np.newaxis, :,:,np.newaxis] for I in data]
return np.concatenate(data_expand,axis=3)
def calculate_ADoLP(I1, I2, I3, I4, I):
Q = I1 - I3
U = I2 - I4
Q[Q == 0] = 0.0001
I[I == 0] = 0.0001
DoLP = np.sqrt(np.square(Q)+np.square(U))/I
AoLP = 0.5*np.arctan(U/Q)
# print(np.min(DoLP), np.max(DoLP))
# AoLP = (AoLP + 0.786)/(2*0.786)
DoLP[DoLP>1] = 1
return AoLP, DoLP
'''
def ad_new(raw):
Q = raw[:,:,:,0:1] - raw[:,:,:,2:3]
U = raw[:,:,:,1:2] - raw[:,:,:,3:4]
Q[Q == 0] = 1e-7
DoLP = np.sqrt(np.square(Q)+np.square(U))/raw[:,:,;,4:5]
AoLP = 0.5*np.arctan(U/Q)
# AoLP = (AoLP + 0.786)/(2*0.786)
return np.concatenate([raw, AoLP, DoLP],axis=3)
'''
def vis_ADoLP(AoLP, DoLP):
hsv = np.concatenate([AoLP[:,:,np.newaxis], DoLP[:,:,np.newaxis], np.ones([AoLP.shape[0], AoLP.shape[1], 1])],axis=2)
rgb = hsv_to_rgb(hsv)
return rgb
def vis_ADI(raw):
AoLP, DoLP, I=raw[:,:,2],raw[:,:,1],raw[:,:,0]
hsv = np.concatenate([AoLP[:,:,np.newaxis], DoLP[:,:,np.newaxis], I[:,:,np.newaxis]],axis=2)
rgb = hsv_to_rgb(hsv)
return rgb
def read_uint12_12p(path):
data = np.fromfile(path, dtype=np.uint8).astype("float32")
fst_uint8, mid_uint8, lst_uint8 = np.reshape(data, (data.shape[0] // 3, 3)).astype(np.uint16).T
fst_uint12 = fst_uint8 + (np.bitwise_and((mid_uint8 << 8), 3840))
snd_uint12 = (lst_uint8 << 4) + (mid_uint8 >> 4)
return np.reshape(np.concatenate((fst_uint12[:, None], snd_uint12[:, None]), axis=1), 2 * fst_uint12.shape[0])
def read_raw(path, imageSize = (2048, 2448)):
npimg = np.fromfile(path, dtype=np.uint16).astype("float32")
unit = float(npimg.shape[0])/(2048*2448)
if unit == 1:
if np.max(npimg)>4096:
npimg /= 65535.
else:
npimg /= 4095.
elif unit== 0.5 :
npimg = np.fromfile(path, dtype=np.uint8).astype("float32")
npimg /= 255.
else:
npimg = np.float32(read_uint12_12p(path))/4095
npimg = npimg.reshape(imageSize)
# print("Normalize- Max: %.4lf Min: %.4lf"%(np.max(npimg), np.min(npimg)))
return npimg
def whole_split(net_out):
key = '<KEY>, DoLP, AoLP, I_p, I_np'
key = key.split(', ')
data_dict = {}
for i in range(9):
data_dict[key[i]] = net_out[0,:,:,i]
return data_dict
def pols2difs(raw_img):
I1, I2, I3, I4 = [raw_img[:,:,i] for i in range(4)]
I = (I1+I2+I3+I4)*0.5
AoLP, DoLP=calculate_ADoLP(I1, I2, I3, I4, I)
I_p, I_np=I * DoLP, I*(1-DoLP)
# print('AoLP NaN: ', AoLP[np.isnan(AoLP)].shape)
# print('DoLP NaN: ', DoLP[np.isnan(DoLP)].shape)
# print('I_p NaN: ', I_p[np.isnan(I_p)].shape)
# print('I_np NaN: ', I_np[np.isnan(I_np)].shape)
data = [I1, I2, I3, I4, I, DoLP, AoLP, I_p, I_np, I1-I2, I1-I3, I1-I4, I2-I3, I2-I4, I3-I4]
data_expand = [I[np.newaxis, :,:,np.newaxis] for I in data]
return np.concatenate(data_expand,axis=3)
def mask(img):
h, w = img.shape[0], img.shape[1]
mask = np.zeros([h, w, 1])
x1 = np.random.randint(int(0.75*w))
x2 = x1 + int(0.25*w)+np.random.randint(int(0.75*w - x1))
y1 = np.random.randint(int(0.75*h))
y2 = y1 + int(0.25*h)+np.random.randint(int(0.75*h - y1))
mask[x1:x2, y1:y2, :] = 1
# print("x1, x2, y1, y2: ", x1, x2, y1, y2)
return mask
def crop_images(X,a,b,is_sq=False):
h_orig,w_orig = X.shape[1:3]
w_crop = np.random.randint(a, b)
r = w_crop/w_orig
h_crop = np.int(h_orig*r)
try:
w_offset = np.random.randint(0, w_orig-w_crop-1)
h_offset = np.random.randint(0, h_orig-h_crop-1)
except:
print("Original W %d, desired W %d"%(w_orig,w_crop))
print("Original H %d, desired H %d"%(h_orig,h_crop))
out = X[:,h_offset:h_offset+h_crop-1,w_offset:w_offset+w_crop-1,:]
h,w=out.shape[1:3]
h = h//32*32
w = w//32*32
return out[:,:h,:w,:]
def aug_ad(im_in, im_R, im_T):
#Crop
h_orig,w_orig = im_in.shape[1:3]
w_crop = np.random.randint(512, 641)
r = w_crop/w_orig
h_crop = np.int(h_orig*r)
h_crop = h_crop//32*32
w_crop = w_crop//32*32
try:
w_offset = np.random.randint(0, w_orig-w_crop-1)
h_offset = np.random.randint(0, h_orig-h_crop-1)
except:
print("Original W %d, desired W %d"%(w_orig,w_crop))
print("Original H %d, desired H %d"%(h_orig,h_crop))
im_in=im_in[:,h_offset:h_offset+h_crop,w_offset:w_offset+w_crop,:]
im_R = im_R[:,h_offset:h_offset+h_crop,w_offset:w_offset+w_crop,:]
im_T = im_T[:,h_offset:h_offset+h_crop,w_offset:w_offset+w_crop,:]
return ad_new(im_in), ad_new(im_R), ad_new(im_T)
def augmentation(im_in, im_R, im_T):
#Crop
h_orig,w_orig = im_in.shape[1:3]
# w_crop = 641#np.random.randint(640, 801)
# r = w_crop/w_orig
# h_crop = np.int(h_orig*r)
w_crop, h_crop = 512, 512
# h_crop = h_crop//32*32
# w_crop = w_crop//32*32
try:
w_offset = np.random.randint(0, w_orig-w_crop-1)
h_offset = np.random.randint(0, h_orig-h_crop-1)
except:
print("Original W %d, desired W %d"%(w_orig,w_crop))
print("Original H %d, desired H %d"%(h_orig,h_crop))
im_in=im_in[:,h_offset:h_offset+h_crop,w_offset:w_offset+w_crop,:]
im_R = im_R[:,h_offset:h_offset+h_crop,w_offset:w_offset+w_crop,:]
im_T = im_T[:,h_offset:h_offset+h_crop,w_offset:w_offset+w_crop,:]
return im_in, im_R, im_T
def crop_augmentation(im_R, im_T):
#Crop
h_orig,w_orig = im_R.shape[1:3]
h_crop = h_orig//224*224
w_crop = w_orig//224*224
size = min(h_crop,w_crop)
# w_crop = 641#np.random.randint(640, 801)
# r = w_crop/w_orig
# h_crop = np.int(h_orig*r)
if size > 640:
size = 640
w_crop = size
h_crop = size
try:
w_offset = np.random.randint(0, w_orig-w_crop)
h_offset = np.random.randint(0, h_orig-h_crop)
except:
print("Original W %d, desired W %d"%(w_orig,w_crop))
print("Original H %d, desired H %d"%(h_orig,h_crop))
im_R = im_R[:,h_offset:h_offset+h_crop,w_offset:w_offset+w_crop,:]
im_T = im_T[:,h_offset:h_offset+h_crop,w_offset:w_offset+w_crop,:]
return im_R, im_T
def crop_augmentation_list(img_list):
#Crop
h_orig,w_orig = img_list[0].shape[1:3]
h_crop = h_orig * 3 // 4 // 32 * 32
w_crop = w_orig * 3 // 4 // 32 * 32
try:
w_offset = np.random.randint(0, w_orig-w_crop)
h_offset = np.random.randint(0, h_orig-h_crop)
except:
print("Original W %d, desired W %d"%(w_orig,w_crop))
print("Original H %d, desired H %d"%(h_orig,h_crop))
crop_list = [img[:,h_offset:h_offset+h_crop,w_offset:w_offset+w_crop,:] for img in img_list]
return crop_list
def tf_calculate_ADoLP(I_all):
I1, I2, I3, I4 = I_all[:,:,:,:1], I_all[:,:,:,1:2], I_all[:,:,:,2:3], I_all[:,:,:,3:4]
I = 0.5 * (I1 + I2 + I3 + I4)+1e-4
Q = I1 - I3
U = I2 - I4
zero_mat = tf.zeros(tf.shape(I1), tf.float32)
ones_mat = 1e-4 * tf.ones(tf.shape(I1), tf.float32)
Q = tf.where(tf.equal(Q, zero_mat), ones_mat, Q)
DoLP = tf.divide(tf.sqrt(tf.square(Q)+tf.square(U)), I)
AoLP = 0.5*tf.atan(U/Q)
# AoLP = (AoLP + 0.786)/(2*0.786)
return AoLP, DoLP
def ADoLP_loss(gt, output):
AoLP1, DoLP1 = tf_calculate_ADoLP(gt)
AoLP2, DoLP2 = tf_calculate_ADoLP(output)
AoLP_loss = tf.reduce_mean(tf.abs(AoLP1 - AoLP2))
DoLP_loss = tf.reduce_mean(tf.abs(DoLP1 - DoLP2))
return AoLP_loss + DoLP_loss
def GC_augmentation(im_in):
#Flip
magic = np.random.random()
# print(im_in.shape)
if magic > 0.75:
im_in=im_in[:,::-1,:,:]
elif magic < 0.25:
im_in=im_in[:,:,::-1,:]
#Crop
h_orig,w_orig = im_in.shape[1:3]
h_crop = 224
w_crop = 224
try:
w_offset = np.random.randint(0, w_orig-w_crop-1)
h_offset = np.random.randint(0, h_orig-h_crop-1)
except:
print("Original W %d, desired W %d"%(w_orig,w_crop))
print("Original H %d, desired H %d"%(h_orig,h_crop))
im_in=im_in[:,h_offset:h_offset+h_crop,w_offset:w_offset+w_crop,:]
return im_in
|
[
"cv2.GaussianBlur",
"numpy.load",
"numpy.maximum",
"numpy.ones",
"numpy.random.randint",
"numpy.tile",
"glob.glob",
"tensorflow.abs",
"numpy.power",
"numpy.max",
"numpy.int",
"numpy.reshape",
"numpy.linspace",
"tensorflow.atan",
"tensorflow.equal",
"numpy.uint8",
"numpy.minimum",
"numpy.square",
"numpy.hstack",
"matplotlib.colors.hsv_to_rgb",
"numpy.arctan",
"numpy.concatenate",
"numpy.vstack",
"scipy.misc.imread",
"numpy.fromfile",
"numpy.float32",
"numpy.zeros",
"time.time",
"cv2.imread",
"tensorflow.shape",
"numpy.random.random",
"numpy.where",
"numpy.bitwise_and",
"tensorflow.square",
"numpy.random.rand",
"numpy.sqrt"
] |
[((554, 576), 'glob.glob', 'glob', (["(data_path + '/*')"], {}), "(data_path + '/*')\n", (558, 576), False, 'from glob import glob\n'), ((1701, 1754), 'glob.glob', 'glob', (['"""../data_new/Data_Polar_Clean/MMR_1/train/*npy"""'], {}), "('../data_new/Data_Polar_Clean/MMR_1/train/*npy')\n", (1705, 1754), False, 'from glob import glob\n'), ((1961, 2013), 'glob.glob', 'glob', (['"""../data_new/Data_Polar_Clean/MMR_1/test/*npy"""'], {}), "('../data_new/Data_Polar_Clean/MMR_1/test/*npy')\n", (1965, 2013), False, 'from glob import glob\n'), ((2219, 2270), 'glob.glob', 'glob', (['"""../data_new/Data_Polar_Clean/MMR_1/val/*npy"""'], {}), "('../data_new/Data_Polar_Clean/MMR_1/val/*npy')\n", (2223, 2270), False, 'from glob import glob\n'), ((2577, 2592), 'numpy.load', 'np.load', (['M_name'], {}), '(M_name)\n', (2584, 2592), True, 'import numpy as np\n'), ((2606, 2621), 'numpy.load', 'np.load', (['R_name'], {}), '(R_name)\n', (2613, 2621), True, 'import numpy as np\n'), ((2976, 3012), 'numpy.tile', 'np.tile', (['row2[np.newaxis, :]', '[h, 1]'], {}), '(row2[np.newaxis, :], [h, 1])\n', (2983, 3012), True, 'import numpy as np\n'), ((3128, 3164), 'numpy.tile', 'np.tile', (['row1[:, np.newaxis]', '[1, w]'], {}), '(row1[:, np.newaxis], [1, w])\n', (3135, 3164), True, 'import numpy as np\n'), ((3173, 3197), 'numpy.power', 'np.power', (['(mat1 * mat2)', '(2)'], {}), '(mat1 * mat2, 2)\n', (3181, 3197), True, 'import numpy as np\n'), ((3286, 3337), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['mat', '(sz, sz)', 'cv2.BORDER_DEFAULT'], {}), '(mat, (sz, sz), cv2.BORDER_DEFAULT)\n', (3302, 3337), False, 'import os, time, cv2, scipy.io\n'), ((3481, 3506), 'numpy.ones', 'np.ones', (['(h_orig, w_orig)'], {}), '((h_orig, w_orig))\n', (3488, 3506), True, 'import numpy as np\n'), ((3521, 3555), 'numpy.random.randint', 'np.random.randint', (['(10)', '(w_orig // 3)'], {}), '(10, w_orig // 3)\n', (3538, 3555), True, 'import numpy as np\n'), ((3568, 3602), 'numpy.random.randint', 'np.random.randint', (['(10)', '(h_orig // 3)'], {}), '(10, h_orig // 3)\n', (3585, 3602), True, 'import numpy as np\n'), ((4108, 4142), 'numpy.random.randint', 'np.random.randint', (['(10)', '(w_orig // 3)'], {}), '(10, w_orig // 3)\n', (4125, 4142), True, 'import numpy as np\n'), ((4155, 4189), 'numpy.random.randint', 'np.random.randint', (['(10)', '(h_orig // 3)'], {}), '(10, h_orig // 3)\n', (4172, 4189), True, 'import numpy as np\n'), ((4697, 4731), 'numpy.random.randint', 'np.random.randint', (['(10)', '(w_orig // 3)'], {}), '(10, w_orig // 3)\n', (4714, 4731), True, 'import numpy as np\n'), ((4744, 4778), 'numpy.random.randint', 'np.random.randint', (['(10)', '(h_orig // 3)'], {}), '(10, h_orig // 3)\n', (4761, 4778), True, 'import numpy as np\n'), ((5398, 5420), 'cv2.imread', 'cv2.imread', (['T_name', '(-1)'], {}), '(T_name, -1)\n', (5408, 5420), False, 'import os, time, cv2, scipy.io\n'), ((5427, 5454), 'numpy.random.randint', 'np.random.randint', (['(256)', '(480)'], {}), '(256, 480)\n', (5444, 5454), True, 'import numpy as np\n'), ((5988, 6010), 'numpy.sqrt', 'np.sqrt', (['(mask1 * mask2)'], {}), '(mask1 * mask2)\n', (5995, 6010), True, 'import numpy as np\n'), ((8451, 8470), 'numpy.hstack', 'np.hstack', (['out_img1'], {}), '(out_img1)\n', (8460, 8470), True, 'import numpy as np\n'), ((8885, 8904), 'numpy.hstack', 'np.hstack', (['out_img2'], {}), '(out_img2)\n', (8894, 8904), True, 'import numpy as np\n'), ((8925, 8956), 'numpy.vstack', 'np.vstack', (['[out_img1, out_img2]'], {}), '([out_img1, out_img2])\n', (8934, 8956), True, 'import numpy as np\n'), ((11133, 11181), 'numpy.concatenate', 'np.concatenate', (['[temp_input, temp_input]'], {'axis': '(3)'}), '([temp_input, temp_input], axis=3)\n', (11147, 11181), True, 'import numpy as np\n'), ((13488, 13523), 'numpy.concatenate', 'np.concatenate', (['data_expand'], {'axis': '(3)'}), '(data_expand, axis=3)\n', (13502, 13523), True, 'import numpy as np\n'), ((14072, 14107), 'numpy.concatenate', 'np.concatenate', (['data_expand'], {'axis': '(3)'}), '(data_expand, axis=3)\n', (14086, 14107), True, 'import numpy as np\n'), ((14920, 14935), 'matplotlib.colors.hsv_to_rgb', 'hsv_to_rgb', (['hsv'], {}), '(hsv)\n', (14930, 14935), False, 'from matplotlib.colors import hsv_to_rgb\n'), ((15038, 15136), 'numpy.concatenate', 'np.concatenate', (['[AoLP[:, :, np.newaxis], DoLP[:, :, np.newaxis], I[:, :, np.newaxis]]'], {'axis': '(2)'}), '([AoLP[:, :, np.newaxis], DoLP[:, :, np.newaxis], I[:, :, np.\n newaxis]], axis=2)\n', (15052, 15136), True, 'import numpy as np\n'), ((15136, 15151), 'matplotlib.colors.hsv_to_rgb', 'hsv_to_rgb', (['hsv'], {}), '(hsv)\n', (15146, 15151), False, 'from matplotlib.colors import hsv_to_rgb\n'), ((17021, 17056), 'numpy.concatenate', 'np.concatenate', (['data_expand'], {'axis': '(3)'}), '(data_expand, axis=3)\n', (17035, 17056), True, 'import numpy as np\n'), ((17123, 17142), 'numpy.zeros', 'np.zeros', (['[h, w, 1]'], {}), '([h, w, 1])\n', (17131, 17142), True, 'import numpy as np\n'), ((17534, 17557), 'numpy.random.randint', 'np.random.randint', (['a', 'b'], {}), '(a, b)\n', (17551, 17557), True, 'import numpy as np\n'), ((17595, 17613), 'numpy.int', 'np.int', (['(h_orig * r)'], {}), '(h_orig * r)\n', (17601, 17613), True, 'import numpy as np\n'), ((18129, 18156), 'numpy.random.randint', 'np.random.randint', (['(512)', '(641)'], {}), '(512, 641)\n', (18146, 18156), True, 'import numpy as np\n'), ((18194, 18212), 'numpy.int', 'np.int', (['(h_orig * r)'], {}), '(h_orig * r)\n', (18200, 18212), True, 'import numpy as np\n'), ((21811, 21829), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (21827, 21829), True, 'import numpy as np\n'), ((904, 927), 'glob.glob', 'glob', (["(folder + '/*.npy')"], {}), "(folder + '/*.npy')\n", (908, 927), False, 'from glob import glob\n'), ((1255, 1278), 'glob.glob', 'glob', (["(folder + '/*.npy')"], {}), "(folder + '/*.npy')\n", (1259, 1278), False, 'from glob import glob\n'), ((2692, 2721), 'numpy.random.randint', 'np.random.randint', (['(h // 5 * 3)'], {}), '(h // 5 * 3)\n', (2709, 2721), True, 'import numpy as np\n'), ((2738, 2767), 'numpy.random.randint', 'np.random.randint', (['(w // 5 * 3)'], {}), '(w // 5 * 3)\n', (2755, 2767), True, 'import numpy as np\n'), ((3702, 3743), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_orig - w_crop - 1)'], {}), '(0, w_orig - w_crop - 1)\n', (3719, 3743), True, 'import numpy as np\n'), ((3761, 3802), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_orig - h_crop - 1)'], {}), '(0, h_orig - h_crop - 1)\n', (3778, 3802), True, 'import numpy as np\n'), ((4289, 4330), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_orig - w_crop - 1)'], {}), '(0, w_orig - w_crop - 1)\n', (4306, 4330), True, 'import numpy as np\n'), ((4348, 4389), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_orig - h_crop - 1)'], {}), '(0, h_orig - h_crop - 1)\n', (4365, 4389), True, 'import numpy as np\n'), ((4878, 4919), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_orig - w_crop - 1)'], {}), '(0, w_orig - w_crop - 1)\n', (4895, 4919), True, 'import numpy as np\n'), ((4937, 4978), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_orig - h_crop - 1)'], {}), '(0, h_orig - h_crop - 1)\n', (4954, 4978), True, 'import numpy as np\n'), ((6049, 6067), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (6065, 6067), True, 'import numpy as np\n'), ((9201, 9228), 'numpy.uint8', 'np.uint8', (['out_img[::2, ::2]'], {}), '(out_img[::2, ::2])\n', (9209, 9228), True, 'import numpy as np\n'), ((14298, 14314), 'numpy.arctan', 'np.arctan', (['(U / Q)'], {}), '(U / Q)\n', (14307, 14314), True, 'import numpy as np\n'), ((15397, 15433), 'numpy.bitwise_and', 'np.bitwise_and', (['(mid_uint8 << 8)', '(3840)'], {}), '(mid_uint8 << 8, 3840)\n', (15411, 15433), True, 'import numpy as np\n'), ((15514, 15580), 'numpy.concatenate', 'np.concatenate', (['(fst_uint12[:, None], snd_uint12[:, None])'], {'axis': '(1)'}), '((fst_uint12[:, None], snd_uint12[:, None]), axis=1)\n', (15528, 15580), True, 'import numpy as np\n'), ((17642, 17683), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_orig - w_crop - 1)'], {}), '(0, w_orig - w_crop - 1)\n', (17659, 17683), True, 'import numpy as np\n'), ((17700, 17741), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_orig - h_crop - 1)'], {}), '(0, h_orig - h_crop - 1)\n', (17717, 17741), True, 'import numpy as np\n'), ((18303, 18344), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_orig - w_crop - 1)'], {}), '(0, w_orig - w_crop - 1)\n', (18320, 18344), True, 'import numpy as np\n'), ((18361, 18402), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_orig - h_crop - 1)'], {}), '(0, h_orig - h_crop - 1)\n', (18378, 18402), True, 'import numpy as np\n'), ((19127, 19168), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_orig - w_crop - 1)'], {}), '(0, w_orig - w_crop - 1)\n', (19144, 19168), True, 'import numpy as np\n'), ((19185, 19226), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_orig - h_crop - 1)'], {}), '(0, h_orig - h_crop - 1)\n', (19202, 19226), True, 'import numpy as np\n'), ((20000, 20037), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_orig - w_crop)'], {}), '(0, w_orig - w_crop)\n', (20017, 20037), True, 'import numpy as np\n'), ((20056, 20093), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_orig - h_crop)'], {}), '(0, h_orig - h_crop)\n', (20073, 20093), True, 'import numpy as np\n'), ((20610, 20647), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_orig - w_crop)'], {}), '(0, w_orig - w_crop)\n', (20627, 20647), True, 'import numpy as np\n'), ((20666, 20703), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_orig - h_crop)'], {}), '(0, h_orig - h_crop)\n', (20683, 20703), True, 'import numpy as np\n'), ((21200, 21212), 'tensorflow.shape', 'tf.shape', (['I1'], {}), '(I1)\n', (21208, 21212), True, 'import tensorflow as tf\n'), ((21301, 21322), 'tensorflow.equal', 'tf.equal', (['Q', 'zero_mat'], {}), '(Q, zero_mat)\n', (21309, 21322), True, 'import tensorflow as tf\n'), ((21414, 21428), 'tensorflow.atan', 'tf.atan', (['(U / Q)'], {}), '(U / Q)\n', (21421, 21428), True, 'import tensorflow as tf\n'), ((21642, 21663), 'tensorflow.abs', 'tf.abs', (['(AoLP1 - AoLP2)'], {}), '(AoLP1 - AoLP2)\n', (21648, 21663), True, 'import tensorflow as tf\n'), ((21697, 21718), 'tensorflow.abs', 'tf.abs', (['(DoLP1 - DoLP2)'], {}), '(DoLP1 - DoLP2)\n', (21703, 21718), True, 'import tensorflow as tf\n'), ((22091, 22132), 'numpy.random.randint', 'np.random.randint', (['(0)', '(w_orig - w_crop - 1)'], {}), '(0, w_orig - w_crop - 1)\n', (22108, 22132), True, 'import numpy as np\n'), ((22149, 22190), 'numpy.random.randint', 'np.random.randint', (['(0)', '(h_orig - h_crop - 1)'], {}), '(0, h_orig - h_crop - 1)\n', (22166, 22190), True, 'import numpy as np\n'), ((705, 733), 'glob.glob', 'glob', (["(folder1 + '/Outdoor/*')"], {}), "(folder1 + '/Outdoor/*')\n", (709, 733), False, 'from glob import glob\n'), ((2789, 2807), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2805, 2807), True, 'import numpy as np\n'), ((2839, 2857), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2855, 2857), True, 'import numpy as np\n'), ((2886, 2920), 'numpy.linspace', 'np.linspace', (['Light_low', '(0.8)', 'mid_w'], {}), '(Light_low, 0.8, mid_w)\n', (2897, 2920), True, 'import numpy as np\n'), ((2919, 2957), 'numpy.linspace', 'np.linspace', (['(0.8)', 'Light_low', '(w - mid_w)'], {}), '(0.8, Light_low, w - mid_w)\n', (2930, 2957), True, 'import numpy as np\n'), ((3038, 3072), 'numpy.linspace', 'np.linspace', (['Light_low', '(0.8)', 'mid_h'], {}), '(Light_low, 0.8, mid_h)\n', (3049, 3072), True, 'import numpy as np\n'), ((3071, 3109), 'numpy.linspace', 'np.linspace', (['(0.8)', 'Light_low', '(h - mid_h)'], {}), '(0.8, Light_low, h - mid_h)\n', (3082, 3109), True, 'import numpy as np\n'), ((4071, 4087), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4085, 4087), True, 'import numpy as np\n'), ((4658, 4674), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4672, 4674), True, 'import numpy as np\n'), ((5247, 5263), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5261, 5263), True, 'import numpy as np\n'), ((5531, 5553), 'numpy.float32', 'np.float32', (['syn_image1'], {}), '(syn_image1)\n', (5541, 5553), True, 'import numpy as np\n'), ((5719, 5737), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5735, 5737), True, 'import numpy as np\n'), ((5822, 5841), 'numpy.power', 'np.power', (['gt_t', '(2.2)'], {}), '(gt_t, 2.2)\n', (5830, 5841), True, 'import numpy as np\n'), ((5843, 5862), 'numpy.power', 'np.power', (['gt_r', '(2.2)'], {}), '(gt_r, 2.2)\n', (5851, 5862), True, 'import numpy as np\n'), ((8985, 9009), 'numpy.maximum', 'np.maximum', (['out_img', '(0.0)'], {}), '(out_img, 0.0)\n', (8995, 9009), True, 'import numpy as np\n'), ((12158, 12188), 'scipy.misc.imread', 'sic.imread', (['raw_name'], {'mode': '"""L"""'}), "(raw_name, mode='L')\n", (12168, 12188), True, 'import scipy.misc as sic\n'), ((14857, 14899), 'numpy.ones', 'np.ones', (['[AoLP.shape[0], AoLP.shape[1], 1]'], {}), '([AoLP.shape[0], AoLP.shape[1], 1])\n', (14864, 14899), True, 'import numpy as np\n'), ((15213, 15246), 'numpy.fromfile', 'np.fromfile', (['path'], {'dtype': 'np.uint8'}), '(path, dtype=np.uint8)\n', (15224, 15246), True, 'import numpy as np\n'), ((15673, 15707), 'numpy.fromfile', 'np.fromfile', (['path'], {'dtype': 'np.uint16'}), '(path, dtype=np.uint16)\n', (15684, 15707), True, 'import numpy as np\n'), ((15803, 15816), 'numpy.max', 'np.max', (['npimg'], {}), '(npimg)\n', (15809, 15816), True, 'import numpy as np\n'), ((21257, 21269), 'tensorflow.shape', 'tf.shape', (['I1'], {}), '(I1)\n', (21265, 21269), True, 'import tensorflow as tf\n'), ((677, 704), 'glob.glob', 'glob', (["(folder1 + '/Indoor/*')"], {}), "(folder1 + '/Indoor/*')\n", (681, 704), False, 'from glob import glob\n'), ((3246, 3267), 'numpy.random.randint', 'np.random.randint', (['(20)'], {}), '(20)\n', (3263, 3267), True, 'import numpy as np\n'), ((5612, 5634), 'cv2.imread', 'cv2.imread', (['R_name', '(-1)'], {}), '(R_name, -1)\n', (5622, 5634), False, 'import os, time, cv2, scipy.io\n'), ((5892, 5910), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5908, 5910), True, 'import numpy as np\n'), ((6098, 6115), 'numpy.power', 'np.power', (['pf', '(2.2)'], {}), '(pf, 2.2)\n', (6106, 6115), True, 'import numpy as np\n'), ((6184, 6201), 'numpy.power', 'np.power', (['pf', '(2.2)'], {}), '(pf, 2.2)\n', (6192, 6201), True, 'import numpy as np\n'), ((10303, 10348), 'numpy.minimum', 'np.minimum', (['(output[0, :, :, 4] * 255.0)', '(255.0)'], {}), '(output[0, :, :, 4] * 255.0, 255.0)\n', (10313, 10348), True, 'import numpy as np\n'), ((14253, 14265), 'numpy.square', 'np.square', (['Q'], {}), '(Q)\n', (14262, 14265), True, 'import numpy as np\n'), ((14266, 14278), 'numpy.square', 'np.square', (['U'], {}), '(U)\n', (14275, 14278), True, 'import numpy as np\n'), ((15304, 15345), 'numpy.reshape', 'np.reshape', (['data', '(data.shape[0] // 3, 3)'], {}), '(data, (data.shape[0] // 3, 3))\n', (15314, 15345), True, 'import numpy as np\n'), ((21367, 21379), 'tensorflow.square', 'tf.square', (['Q'], {}), '(Q)\n', (21376, 21379), True, 'import tensorflow as tf\n'), ((21380, 21392), 'tensorflow.square', 'tf.square', (['U'], {}), '(U)\n', (21389, 21392), True, 'import tensorflow as tf\n'), ((10128, 10139), 'time.time', 'time.time', ([], {}), '()\n', (10137, 10139), False, 'import os, time, cv2, scipy.io\n'), ((10596, 10643), 'numpy.minimum', 'np.minimum', (['(output[0, :, :, -3:] * 255.0)', '(255.0)'], {}), '(output[0, :, :, -3:] * 255.0, 255.0)\n', (10606, 10643), True, 'import numpy as np\n'), ((10725, 10770), 'numpy.minimum', 'np.minimum', (['(output[0, :, :, 6] * 255.0)', '(255.0)'], {}), '(output[0, :, :, 6] * 255.0, 255.0)\n', (10735, 10770), True, 'import numpy as np\n'), ((10852, 10897), 'numpy.minimum', 'np.minimum', (['(output[0, :, :, 5] * 255.0)', '(255.0)'], {}), '(output[0, :, :, 5] * 255.0, 255.0)\n', (10862, 10897), True, 'import numpy as np\n'), ((15935, 15968), 'numpy.fromfile', 'np.fromfile', (['path'], {'dtype': 'np.uint8'}), '(path, dtype=np.uint8)\n', (15946, 15968), True, 'import numpy as np\n'), ((9492, 9515), 'numpy.where', 'np.where', (['all_loss_test'], {}), '(all_loss_test)\n', (9500, 9515), True, 'import numpy as np\n')]
|
import cadquery as cq
import cadquery.selectors as cqs
import logging, importlib
from types import SimpleNamespace as Measures
import utilities # local directory import
# Outline shape of an X-Mount Type M socket for smartphones and other small mobile devices.
# Selective reloading to pick up changes made between script executions.
# See: https://github.com/CadQuery/CQ-editor/issues/99#issue-525367146
importlib.reload(utilities)
log = logging.getLogger(__name__)
# Do-nothing definition of show_object() when not available, to prevent error messages when
# including this module from another module. Obviously in such cases, show_object() calls inside
# this file have no effect, but you'll want to control object visibility from the top level file anyway.
if "show_object" not in globals():
def show_object(*args, **kwargs):
pass
class XMountSocket:
def __init__(self, workplane, measures):
"""
An Armor-X X-Mount Type M socket (the newer version that includes a loop for a strap).
X-Mount is a mount system for mobile devices; see https://armor-x.com/. Type M (shown here)
is meant for smartphones but has been discontinued by Armor-X and replaced with Type K.
Type T is larger and meant for tablets, and as of 2021-05 is still in production. The
socket part is to be glued to the mobile device, while the plug part (see xmount_plug.py)
becomes part of the various mounts.
So far this design is only useful to cut holes for inserting an X-Mount socket, since the
shape is not yet finished (no center cutout, smaller back cutout, no strap loop but only a
bounding-box-like placeholder for it).
:param workplane: The CadQuery workplane to create this part on.
:param measures: The measures to use for the parameters of this design. Expects a nested
[SimpleNamespace](https://docs.python.org/3/library/types.html#types.SimpleNamespace)
object.
.. todo:: Add the X-Mount cutout for mounting an X-Mount plug.
.. todo:: Add the side holes in the X-Mount socket for the orange security clips.
.. todo:: Create a separate design for the orange security clips.
.. todo:: Support generating a positive X-Mount socket. It should be possible to select with
a parameter (in the usual Measures object) if to generate a hole cutter or a positive
part.
.. todo:: Support generating only a cutter for the X-Mount hole, which can be used to cut
an X-Mount socket into a phone case etc..
.. todo:: Create a variant that is much wider and also 10 mm deeper and instead of
plastic for the load-bearing rails uses a piece of 1.0 - 1.5 mm stainless steel
sheet metal. That should be a stripe of 20 mm width on both sides, or ideally as wide
as the phone, and would be glued to the 3D printed part with construction glue. The
3D printed part should prepare that by providing space for the construction glue but
also the spacers to position the metal piece in the correct position.
This variant would be much more durable. 3D printing an X-Mount socket might otherwise
not lead to plastic parts that are strong enough, as the rails are only 1.0-1.5 mm thick
in the original part. To help produce this variant, create a template that can be
printed or 3D printed and glued on sheet metal to help cutting it to shape.
.. todo:: Support holes for magnets (of configurable shape) in the back of the part, where
the original X-Mount part also has its magnet.
"""
self.model = workplane
self.debug = False
self.measures = measures
m = self.measures
m.back_cutout.height = m.height_step_2 - m.back_cutout.height_step_1
# Position of the center cutout's start edge, from the back of the part.
m.center_cutout.depth_pos = m.clip_block.depth_pos + m.clip_block.depth
# TODO: Initialize missing measures with defaults.
self.build()
def build(self):
m = self.measures
baseplane = self.model.workplane()
topface_wire = (
cq.Workplane("XY")
.workplane(offset = m.height_step_2)
.rect(m.width_step_2, m.depth_step_2)
.extrude(-0.1) # Pro forma to get a solid.
.edges("|Z")
.fillet(m.corner_radius)
.faces(">Z")
.wires()
)
# show_object(topface_wire, name = "topface_wire", options = {"color": "yellow", "alpha": 0.8})
base_shape = (
cq.Workplane("XY")
.rect(m.width_step_1, m.depth_step_1)
.extrude(m.height_step_1)
.edges("|Z")
.fillet(m.corner_radius)
# Loft from the top of the extrusion to an inset wire of the same shape.
.faces(">Z").wires().toPending()
.workplane() # Necessary to prevent a segfault. TODO: Report this issue.
.add(topface_wire).toPending()
.loft()
.edges(">Z")
.fillet(m.edge_fillet)
)
loop_placeholder = (
cq.Workplane("XY")
# Depth is 150% of actual loop depth to create a cutter of the full part height even
# in the section affected by the edge filleting of the main shape. Depth also includes
# a space of m.loop.thickness for the loop to pass through when mounted to the
# X-Mount part and that part is inserted into a part cut with this cutter.
.box(m.loop.width_2, 1.5 * m.loop.depth + m.loop.thickness, m.height_step_2, centered = [True, False, False])
# Chamfer the edge bordering the space we made for the fabric strap, so that the strap
# can easily ride up on the cut-out part when mounting it. Factor 0.99 because the CAD
# kernel cannot chamfer away whole faces, and to have vertical edges to round later.
.edges("|X and <Z").chamfer(0.99 * m.height_step_2)
# Small corner rounding for the hole this will cut.
.edges("|Z").fillet(m.loop.corner_radius)
# Put into place.
.translate([
0,
-0.5 * m.depth_step_1 - m.loop.depth_offset - m.loop.thickness,
0
])
)
back_cutout = (
cq.Workplane("XY")
# The cutout is only made half as deep as that of the positive X-Mount part. Because
# the front half of the back cutout should be kept as free space for clips.
.box(m.back_cutout.width, 0.5 * m.back_cutout.depth, m.back_cutout.height, centered = [True, False, False])
# Note, no corner radius here as we're creating only only the back part of the cutout.
.translate([
0,
0.5 * m.depth_step_1 - 0.5 * m.back_cutout.depth,
m.back_cutout.height_step_1
])
)
# show_object(back_cutout, name = "back_cutout", options = {"color": "yellow", "alpha": 0.8})
left_cutout = (
cq.Workplane("XZ")
# Create a profile that we can extrude to get the cutout shape.
# The profile will be in the first quadrant of the XZ plane, suitable for the left
# cutout after extruding and moving it in -x direction.
.move(0, m.side_cutouts.height_step_1)
.vLineTo(m.height_step_2) # Go to full part height.
.hLineTo(m.side_cutouts.width_step_2) # Go to max. cutout depth.
.lineTo(m.side_cutouts.width_step_1, m.side_cutouts.height_step_2)
.close()
# Use half the length, since "both" will extrude the given lenghth into EACH direction.
# Also to simplify the shape, we use the inner depth measure and omit the tapering of
# the extrusion. That works because we're creating a cutter here, and it removes more
# material than the original positive shape has, so the positive shape will still fit.
.extrude(0.5 * m.side_cutouts.depth_2, both = True)
.translate([-0.5 * m.width_step_1, m.side_cutouts.depth_offset, 0])
)
# show_object(left_cutout, name = "left_cutout", options = {"color": "yellow", "alpha": 0.8})
right_cutout = left_cutout.mirror("YZ")
# show_object(right_cutout, name = "right_cutout", options = {"color": "yellow", "alpha": 0.8})
self.model = (
base_shape
.union(loop_placeholder)
.cut(back_cutout)
.cut(left_cutout)
.cut(right_cutout)
# Move the model so that the origin is at the center of the area where a holder is
# fixed to the X-Mount. Helps positioning it when adding it to another model.
.translate([
0,
-0.5 * m.depth_step_1 + m.center_cutout.depth_pos + 0.5 * m.center_cutout.depth_1,
0
])
)
# =============================================================================
# Measures and Part Creation
# =============================================================================
cq.Workplane.part = utilities.part
# Note, a lot of these measures are not in use yet as only a hole cutter is generated so far.
# TODO: Unify the parameter naming. Right now we have both the "width_1" style and "width_step_1"
# style schemes, and they are overlapping.
measures = Measures(
width_step_1 = 34.15, # At bottom of part.
width_step_2 = 26.70, # At top of part.
depth_step_1 = 64.65, # At bottom of part. Ignores belt loop attachment depth.
depth_step_2 = 55.00, # At top of part.
height_step_1 = 0.90,
# Height is 3.90 for the newer Armor-X Type-M socket design, 4.90 for the older.
height_step_2 = 3.90, # Ignores clip block height.
corner_radius = 8.20,
edge_fillet = 5.0, # Edges around the top of the part.
side_cutouts = Measures(
depth_offset = -0.9, # Offset from being centered along the left / right edge.
width_step_1 = 3.00, # At the position where the cutting angle changes.
width_step_2 = 4.00, # At the top surface of the part.
depth_1 = 49.50, # Outer depth, imagined at the top surface of the part.
depth_2 = 42.50, # Inner depth, at the top surface of the part.
height_step_1 = 0.90, # The layer left uncut at the bottom of the part.
height_step_2 = 1.70,
corner_radius = 3.50, # TODO: To confirm after printing. Difficult to measure.
),
# The front half of the back cutout should never be blocked by other parts, to keep the space
# for clips that passed the clip block.
back_cutout = Measures(
width = 18.00,
depth = 7.00,
height_step_1 = 1.32,
corner_radius = 2.07,
),
front_cutout = Measures(
depth_step_1 = 4.66, # At the bottom of the cutout.
depth_step_2 = 5.00, # At the top of the part.
width = 18.00, # At the top of the part.
height_step_1 = 1.10,
corner_radius = 2.07 # Same as the top cutout.
),
center_cutout = Measures(
width_1 = 13.90,
width_2 = 20.75,
depth_1 = 22.5, # Towards the back of the part, where width == width_1.
depth_2 = 23.2, # Towards the front of the part, where width == width_2.
cover_height = 1.00,
cut_depth = 3.20,
corner_radius = 2.80,
wedge_width = 9.70, # At the bottom of the cutout.
wedge_depth = 10.0,
wedge_chamfer = 0.8,
wedge_corner_radius = 1.0
),
clip_block = Measures(
depth_pos = 7.00, # From back of part.
width = 14.00,
depth = 3.00,
height = 4.70,
corner_radius = 0.80,
chamfer_depth = 2.10,
chamfer_height = 3.00
),
loop = Measures(
depth_offset = 4.75, # From front edge of the base shape.
height_offset = 1.15, # From bottom of the main part.
width_1 = 19.50, # At front side of loop.
width_2 = 26.20, # At back side of loop.
depth = 10.50,
height = 2.35,
height_cutoff = 0.70, # At its front section.
thickness = 3.35, # Loop ring strength in the width and depth directions.
corner_radius = 0.9
)
)
xmount_socket = cq.Workplane("XY").part(XMountSocket, measures)
show_options = {"color": "lightgray", "alpha": 0}
show_object(xmount_socket, name = "xmount_socket", options = show_options)
|
[
"importlib.reload",
"cadquery.Workplane",
"types.SimpleNamespace",
"logging.getLogger"
] |
[((407, 434), 'importlib.reload', 'importlib.reload', (['utilities'], {}), '(utilities)\n', (423, 434), False, 'import logging, importlib\n'), ((442, 469), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (459, 469), False, 'import logging, importlib\n'), ((10147, 10305), 'types.SimpleNamespace', 'Measures', ([], {'depth_offset': '(-0.9)', 'width_step_1': '(3.0)', 'width_step_2': '(4.0)', 'depth_1': '(49.5)', 'depth_2': '(42.5)', 'height_step_1': '(0.9)', 'height_step_2': '(1.7)', 'corner_radius': '(3.5)'}), '(depth_offset=-0.9, width_step_1=3.0, width_step_2=4.0, depth_1=\n 49.5, depth_2=42.5, height_step_1=0.9, height_step_2=1.7, corner_radius=3.5\n )\n', (10155, 10305), True, 'from types import SimpleNamespace as Measures\n'), ((10906, 10977), 'types.SimpleNamespace', 'Measures', ([], {'width': '(18.0)', 'depth': '(7.0)', 'height_step_1': '(1.32)', 'corner_radius': '(2.07)'}), '(width=18.0, depth=7.0, height_step_1=1.32, corner_radius=2.07)\n', (10914, 10977), True, 'from types import SimpleNamespace as Measures\n'), ((11048, 11148), 'types.SimpleNamespace', 'Measures', ([], {'depth_step_1': '(4.66)', 'depth_step_2': '(5.0)', 'width': '(18.0)', 'height_step_1': '(1.1)', 'corner_radius': '(2.07)'}), '(depth_step_1=4.66, depth_step_2=5.0, width=18.0, height_step_1=1.1,\n corner_radius=2.07)\n', (11056, 11148), True, 'from types import SimpleNamespace as Measures\n'), ((11335, 11539), 'types.SimpleNamespace', 'Measures', ([], {'width_1': '(13.9)', 'width_2': '(20.75)', 'depth_1': '(22.5)', 'depth_2': '(23.2)', 'cover_height': '(1.0)', 'cut_depth': '(3.2)', 'corner_radius': '(2.8)', 'wedge_width': '(9.7)', 'wedge_depth': '(10.0)', 'wedge_chamfer': '(0.8)', 'wedge_corner_radius': '(1.0)'}), '(width_1=13.9, width_2=20.75, depth_1=22.5, depth_2=23.2,\n cover_height=1.0, cut_depth=3.2, corner_radius=2.8, wedge_width=9.7,\n wedge_depth=10.0, wedge_chamfer=0.8, wedge_corner_radius=1.0)\n', (11343, 11539), True, 'from types import SimpleNamespace as Measures\n'), ((11816, 11937), 'types.SimpleNamespace', 'Measures', ([], {'depth_pos': '(7.0)', 'width': '(14.0)', 'depth': '(3.0)', 'height': '(4.7)', 'corner_radius': '(0.8)', 'chamfer_depth': '(2.1)', 'chamfer_height': '(3.0)'}), '(depth_pos=7.0, width=14.0, depth=3.0, height=4.7, corner_radius=\n 0.8, chamfer_depth=2.1, chamfer_height=3.0)\n', (11824, 11937), True, 'from types import SimpleNamespace as Measures\n'), ((12051, 12213), 'types.SimpleNamespace', 'Measures', ([], {'depth_offset': '(4.75)', 'height_offset': '(1.15)', 'width_1': '(19.5)', 'width_2': '(26.2)', 'depth': '(10.5)', 'height': '(2.35)', 'height_cutoff': '(0.7)', 'thickness': '(3.35)', 'corner_radius': '(0.9)'}), '(depth_offset=4.75, height_offset=1.15, width_1=19.5, width_2=26.2,\n depth=10.5, height=2.35, height_cutoff=0.7, thickness=3.35,\n corner_radius=0.9)\n', (12059, 12213), True, 'from types import SimpleNamespace as Measures\n'), ((12523, 12541), 'cadquery.Workplane', 'cq.Workplane', (['"""XY"""'], {}), "('XY')\n", (12535, 12541), True, 'import cadquery as cq\n'), ((6523, 6541), 'cadquery.Workplane', 'cq.Workplane', (['"""XY"""'], {}), "('XY')\n", (6535, 6541), True, 'import cadquery as cq\n'), ((5292, 5310), 'cadquery.Workplane', 'cq.Workplane', (['"""XY"""'], {}), "('XY')\n", (5304, 5310), True, 'import cadquery as cq\n'), ((4299, 4317), 'cadquery.Workplane', 'cq.Workplane', (['"""XY"""'], {}), "('XY')\n", (4311, 4317), True, 'import cadquery as cq\n'), ((7265, 7283), 'cadquery.Workplane', 'cq.Workplane', (['"""XZ"""'], {}), "('XZ')\n", (7277, 7283), True, 'import cadquery as cq\n'), ((4730, 4748), 'cadquery.Workplane', 'cq.Workplane', (['"""XY"""'], {}), "('XY')\n", (4742, 4748), True, 'import cadquery as cq\n')]
|
import os, uuid, pathlib, base64, json
import sys
ROOT_DIR = os.path.abspath("../../../")
sys.path.append(ROOT_DIR)
from layoutlm.data.convert import convert_img_to_xml
from examples.classification.predict import make_prediction
from mapping import get_template_id
# assumes model exists and is in same directory as this file
MODEL_DIR = 'aetna-trained-model'
OUTPUT_DIR = 'output'
def predict(base64_img, num_matches):
try:
os.mkdir(OUTPUT_DIR)
except:
pass
filename = uuid.uuid4().hex
# assumes that base64_img encodes a .tiff file
img = os.path.join(OUTPUT_DIR, filename + '.tiff')
with open(img, 'wb') as file_to_save:
decoded_image_data = base64.b64decode(base64_img, '-_')
file_to_save.write(decoded_image_data)
convert_img_to_xml(img, OUTPUT_DIR, filename)
hocr = os.path.join(OUTPUT_DIR, filename + '.xml')
matches = make_prediction(MODEL_DIR, hocr, num_matches)
match_array = []
f = open("data/labels/version.txt", "r")
text=f.read()
version=text.split()
for rank, label, prob in matches:
match = {
'rank': rank,
'template_id': get_template_id(label),
'confidence': prob
}
match_array.append(match)
response = {
'model version': version[1],
'matches': match_array,
}
return response
if __name__ == "__main__":
predict('', 1)
|
[
"sys.path.append",
"os.mkdir",
"os.path.abspath",
"uuid.uuid4",
"layoutlm.data.convert.convert_img_to_xml",
"base64.b64decode",
"examples.classification.predict.make_prediction",
"mapping.get_template_id",
"os.path.join"
] |
[((61, 89), 'os.path.abspath', 'os.path.abspath', (['"""../../../"""'], {}), "('../../../')\n", (76, 89), False, 'import os, uuid, pathlib, base64, json\n'), ((90, 115), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (105, 115), False, 'import sys\n'), ((580, 624), 'os.path.join', 'os.path.join', (['OUTPUT_DIR', "(filename + '.tiff')"], {}), "(OUTPUT_DIR, filename + '.tiff')\n", (592, 624), False, 'import os, uuid, pathlib, base64, json\n'), ((782, 827), 'layoutlm.data.convert.convert_img_to_xml', 'convert_img_to_xml', (['img', 'OUTPUT_DIR', 'filename'], {}), '(img, OUTPUT_DIR, filename)\n', (800, 827), False, 'from layoutlm.data.convert import convert_img_to_xml\n'), ((839, 882), 'os.path.join', 'os.path.join', (['OUTPUT_DIR', "(filename + '.xml')"], {}), "(OUTPUT_DIR, filename + '.xml')\n", (851, 882), False, 'import os, uuid, pathlib, base64, json\n'), ((897, 942), 'examples.classification.predict.make_prediction', 'make_prediction', (['MODEL_DIR', 'hocr', 'num_matches'], {}), '(MODEL_DIR, hocr, num_matches)\n', (912, 942), False, 'from examples.classification.predict import make_prediction\n'), ((441, 461), 'os.mkdir', 'os.mkdir', (['OUTPUT_DIR'], {}), '(OUTPUT_DIR)\n', (449, 461), False, 'import os, uuid, pathlib, base64, json\n'), ((502, 514), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (512, 514), False, 'import os, uuid, pathlib, base64, json\n'), ((696, 730), 'base64.b64decode', 'base64.b64decode', (['base64_img', '"""-_"""'], {}), "(base64_img, '-_')\n", (712, 730), False, 'import os, uuid, pathlib, base64, json\n'), ((1162, 1184), 'mapping.get_template_id', 'get_template_id', (['label'], {}), '(label)\n', (1177, 1184), False, 'from mapping import get_template_id\n')]
|
"""Hyperboloid manifold."""
import torch
from manifolds.base import Manifold
from manifolds.poincare import PoincareBall
from utils.math_utils import arcosh, cosh, sinh
class Hyperboloid(Manifold):
"""
Hyperboloid manifold class.
We use the following convention: -x0^2 + x1^2 + ... + xd^2 = -K
c = 1 / K is the hyperbolic curvature.
"""
name = 'Hyperboloid'
eps = {torch.float32: 1e-7, torch.float64: 1e-15}
min_norm = 1e-15
max_norm = 1e6
def __init__(self):
super(Hyperboloid, self).__init__()
self.name = 'Hyperboloid'
self.eps = {torch.float32: 1e-7, torch.float64: 1e-15}
self.min_norm = 1e-15
self.max_norm = 1e6
@classmethod
def minkowski_dot(self, x, y, keepdim=True):
res = torch.sum(x * y, dim=-1) - 2 * x[..., 0] * y[..., 0]
if keepdim:
res = res.view(res.shape + (1,))
return res
@classmethod
def minkowski_norm(self, u, keepdim=True):
dot = Hyperboloid.minkowski_dot(u, u, keepdim=keepdim)
return torch.sqrt(torch.clamp(dot, min=Hyperboloid.eps[u.dtype]))
@classmethod
def sqdist(self, x, y, c):
K = 1. / c
prod = Hyperboloid.minkowski_dot(x, y)
theta = torch.clamp(-prod / K, min=1.0 + Hyperboloid.eps[x.dtype])
sqdist = K * arcosh(theta) ** 2
# clamp distance to avoid nans in Fermi-Dirac decoder
return torch.clamp(sqdist, max=50.0)
@classmethod
def proj(self, x, c = 1):
K = 1. / c
d = x.size(-1) - 1
y = x.narrow(-1, 1, d)
y_sqnorm = torch.norm(y, p=2, dim=1, keepdim=True) ** 2
mask = torch.ones_like(x)
mask[:, 0] = 0
vals = torch.zeros_like(x)
vals[:, 0:1] = torch.sqrt(torch.clamp(K + y_sqnorm, min=Hyperboloid.eps[x.dtype]))
return vals + mask * x
@classmethod
def proj_tan(self, u, x, c):
d = x.size(1) - 1
ux = torch.sum(x.narrow(-1, 1, d) * u.narrow(-1, 1, d), dim=1, keepdim=True)
mask = torch.ones_like(u)
mask[:, 0] = 0
vals = torch.zeros_like(u)
vals[:, 0:1] = ux / torch.clamp(x[:, 0:1], min=Hyperboloid.eps[x.dtype])
return vals + mask * u
@classmethod
def proj_tan0(self, u, c):
"""
Note: the expmap cannot work on raw Euclidean vector. So we have to
transform the Euclidean vector into the Tangent space first.
Note: always the input of proj_tan0 is in the Euclidean space.
"""
narrowed = u.narrow(-1, 0, 1)
vals = torch.zeros_like(u)
vals[:, 0:1] = narrowed
return u - vals
@classmethod
def expmap(self, u, x, c):
""" It folds the tangent space on the manifold (expx : TxM -> M).
"""
K = 1. / c
sqrtK = K ** 0.5
normu = Hyperboloid.minkowski_norm(u)
normu = torch.clamp(normu, max=Hyperboloid.max_norm)
theta = normu / sqrtK
theta = torch.clamp(theta, min=Hyperboloid.min_norm)
result = cosh(theta) * x + sinh(theta) * u / theta
return Hyperboloid.proj(result, c)
@classmethod
def logmap(self, x, y, c):
K = 1. / c
xy = torch.clamp(Hyperboloid.minkowski_dot(x, y) + K, max=-Hyperboloid.eps[x.dtype]) - K
u = y + xy * x * c
normu = Hyperboloid.minkowski_norm(u)
normu = torch.clamp(normu, min=Hyperboloid.min_norm)
dist = Hyperboloid.sqdist(x, y, c) ** 0.5
result = dist * u / normu
return Hyperboloid.proj_tan(result, x, c)
@classmethod
def expmap0(self, u, c):
K = 1. / c
sqrtK = K ** 0.5
d = u.size(-1) - 1
x = u.narrow(-1, 1, d).view(-1, d)
x_norm = torch.norm(x, p=2, dim=1, keepdim=True)
x_norm = torch.clamp(x_norm, min=Hyperboloid.min_norm)
theta = x_norm / sqrtK
res = torch.ones_like(u)
res[:, 0:1] = sqrtK * cosh(theta)
res[:, 1:] = sqrtK * sinh(theta) * x / x_norm
return Hyperboloid.proj(res, c)
@classmethod
def logmap0(self, x, c):
K = 1. / c
sqrtK = K ** 0.5
d = x.size(-1) - 1
y = x.narrow(-1, 1, d).view(-1, d)
y_norm = torch.norm(y, p=2, dim=1, keepdim=True)
y_norm = torch.clamp(y_norm, min=Hyperboloid.min_norm)
res = torch.zeros_like(x)
theta = torch.clamp(x[:, 0:1] / sqrtK, min=1.0 + Hyperboloid.eps[x.dtype])
res[:, 1:] = sqrtK * arcosh(theta) * y / y_norm
return res
@classmethod
def mobius_add(self, x, y, c):
u = Hyperboloid.logmap0(y, c)
v = Hyperboloid.ptransp0(x, u, c)
return Hyperboloid.expmap(v, x, c)
@classmethod
def mobius_matvec(self, m, x, c):
"""
Transform the vector to Euclidean apply matmul and transform back to
Hyperbolic space.
"""
u = Hyperboloid.logmap0(x, c)
mu = u @ m.transpose(-1, -2)
return Hyperboloid.expmap0(mu, c)
@classmethod
def ptransp(self, x, y, u, c):
logxy = Hyperboloid.logmap(x, y, c)
logyx = Hyperboloid.logmap(y, x, c)
sqdist = torch.clamp(self.sqdist(x, y, c), min=self.min_norm)
alpha = Hyperboloid.minkowski_dot(logxy, u) / sqdist
res = u - alpha * (logxy + logyx)
return Hyperboloid.proj_tan(res, y, c)
@classmethod
def ptransp0(self, x, u, c):
K = 1. / c
sqrtK = K ** 0.5
x0 = x.narrow(-1, 0, 1)
d = x.size(-1) - 1
y = x.narrow(-1, 1, d)
y_norm = torch.clamp(torch.norm(y, p=2, dim=1, keepdim=True), min=self.min_norm)
y_normalized = y / y_norm
v = torch.ones_like(x)
v[:, 0:1] = - y_norm
v[:, 1:] = (sqrtK - x0) * y_normalized
alpha = torch.sum(y_normalized * u[:, 1:], dim=1, keepdim=True) / sqrtK
res = u - alpha * v
return Hyperboloid.proj_tan(res, x, c)
@classmethod
def to_hyperboloid(self, x, c):
K = 1./ c
sqrtK = K ** 0.5
sqnorm = torch.norm(x, p=2, dim=1, keepdim=True) ** 2
return sqrtK * torch.cat([K + sqnorm, 2 * sqrtK * x], dim=1) / (K - sqnorm)
@classmethod
def from_poincare(self, x, c=1, ideal=False):
"""Convert from Poincare ball model to hyperboloid model.
Note: converting a point from poincare ball to hyperbolic is
reversible, i.e. p == to_poincare(from_poincare(p)).
Args:
x: torch.tensor of shape (..., dim)
ideal: boolean. Should be True if the input vectors are ideal points, False otherwise
Returns:
torch.tensor of shape (..., dim+1)
To do:
Add some capping to make things numerically stable. This is only needed in the case ideal == False
"""
if ideal:
t = torch.ones(x.shape[:-1], device=x.device).unsqueeze(-1)
return torch.cat((t, x), dim=-1)
else:
K = 1./ c
sqrtK = K ** 0.5
eucl_squared_norm = (x * x).sum(dim=-1, keepdim=True)
return sqrtK * torch.cat((K + eucl_squared_norm, 2 * sqrtK * x), dim=-1) / (K - eucl_squared_norm).clamp_min(Hyperboloid.min_norm)
@classmethod
def concat(self, v, c):
"""
Note that the output dimension is (input_dim-1) * n + 1
"""
p = PoincareBall.from_hyperboloid(v, c)
p = PoincareBall.concat(p)
return Hyperboloid.from_poincare(p, c)
|
[
"torch.ones_like",
"manifolds.poincare.PoincareBall.from_hyperboloid",
"torch.ones",
"torch.zeros_like",
"utils.math_utils.cosh",
"torch.norm",
"utils.math_utils.arcosh",
"torch.cat",
"torch.clamp",
"utils.math_utils.sinh",
"torch.sum",
"manifolds.poincare.PoincareBall.concat"
] |
[((1260, 1318), 'torch.clamp', 'torch.clamp', (['(-prod / K)'], {'min': '(1.0 + Hyperboloid.eps[x.dtype])'}), '(-prod / K, min=1.0 + Hyperboloid.eps[x.dtype])\n', (1271, 1318), False, 'import torch\n'), ((1436, 1465), 'torch.clamp', 'torch.clamp', (['sqdist'], {'max': '(50.0)'}), '(sqdist, max=50.0)\n', (1447, 1465), False, 'import torch\n'), ((1671, 1689), 'torch.ones_like', 'torch.ones_like', (['x'], {}), '(x)\n', (1686, 1689), False, 'import torch\n'), ((1728, 1747), 'torch.zeros_like', 'torch.zeros_like', (['x'], {}), '(x)\n', (1744, 1747), False, 'import torch\n'), ((2047, 2065), 'torch.ones_like', 'torch.ones_like', (['u'], {}), '(u)\n', (2062, 2065), False, 'import torch\n'), ((2104, 2123), 'torch.zeros_like', 'torch.zeros_like', (['u'], {}), '(u)\n', (2120, 2123), False, 'import torch\n'), ((2588, 2607), 'torch.zeros_like', 'torch.zeros_like', (['u'], {}), '(u)\n', (2604, 2607), False, 'import torch\n'), ((2905, 2949), 'torch.clamp', 'torch.clamp', (['normu'], {'max': 'Hyperboloid.max_norm'}), '(normu, max=Hyperboloid.max_norm)\n', (2916, 2949), False, 'import torch\n'), ((2996, 3040), 'torch.clamp', 'torch.clamp', (['theta'], {'min': 'Hyperboloid.min_norm'}), '(theta, min=Hyperboloid.min_norm)\n', (3007, 3040), False, 'import torch\n'), ((3405, 3449), 'torch.clamp', 'torch.clamp', (['normu'], {'min': 'Hyperboloid.min_norm'}), '(normu, min=Hyperboloid.min_norm)\n', (3416, 3449), False, 'import torch\n'), ((3762, 3801), 'torch.norm', 'torch.norm', (['x'], {'p': '(2)', 'dim': '(1)', 'keepdim': '(True)'}), '(x, p=2, dim=1, keepdim=True)\n', (3772, 3801), False, 'import torch\n'), ((3819, 3864), 'torch.clamp', 'torch.clamp', (['x_norm'], {'min': 'Hyperboloid.min_norm'}), '(x_norm, min=Hyperboloid.min_norm)\n', (3830, 3864), False, 'import torch\n'), ((3910, 3928), 'torch.ones_like', 'torch.ones_like', (['u'], {}), '(u)\n', (3925, 3928), False, 'import torch\n'), ((4243, 4282), 'torch.norm', 'torch.norm', (['y'], {'p': '(2)', 'dim': '(1)', 'keepdim': '(True)'}), '(y, p=2, dim=1, keepdim=True)\n', (4253, 4282), False, 'import torch\n'), ((4300, 4345), 'torch.clamp', 'torch.clamp', (['y_norm'], {'min': 'Hyperboloid.min_norm'}), '(y_norm, min=Hyperboloid.min_norm)\n', (4311, 4345), False, 'import torch\n'), ((4360, 4379), 'torch.zeros_like', 'torch.zeros_like', (['x'], {}), '(x)\n', (4376, 4379), False, 'import torch\n'), ((4396, 4462), 'torch.clamp', 'torch.clamp', (['(x[:, 0:1] / sqrtK)'], {'min': '(1.0 + Hyperboloid.eps[x.dtype])'}), '(x[:, 0:1] / sqrtK, min=1.0 + Hyperboloid.eps[x.dtype])\n', (4407, 4462), False, 'import torch\n'), ((5696, 5714), 'torch.ones_like', 'torch.ones_like', (['x'], {}), '(x)\n', (5711, 5714), False, 'import torch\n'), ((7399, 7434), 'manifolds.poincare.PoincareBall.from_hyperboloid', 'PoincareBall.from_hyperboloid', (['v', 'c'], {}), '(v, c)\n', (7428, 7434), False, 'from manifolds.poincare import PoincareBall\n'), ((7447, 7469), 'manifolds.poincare.PoincareBall.concat', 'PoincareBall.concat', (['p'], {}), '(p)\n', (7466, 7469), False, 'from manifolds.poincare import PoincareBall\n'), ((790, 814), 'torch.sum', 'torch.sum', (['(x * y)'], {'dim': '(-1)'}), '(x * y, dim=-1)\n', (799, 814), False, 'import torch\n'), ((1081, 1127), 'torch.clamp', 'torch.clamp', (['dot'], {'min': 'Hyperboloid.eps[u.dtype]'}), '(dot, min=Hyperboloid.eps[u.dtype])\n', (1092, 1127), False, 'import torch\n'), ((1610, 1649), 'torch.norm', 'torch.norm', (['y'], {'p': '(2)', 'dim': '(1)', 'keepdim': '(True)'}), '(y, p=2, dim=1, keepdim=True)\n', (1620, 1649), False, 'import torch\n'), ((1782, 1837), 'torch.clamp', 'torch.clamp', (['(K + y_sqnorm)'], {'min': 'Hyperboloid.eps[x.dtype]'}), '(K + y_sqnorm, min=Hyperboloid.eps[x.dtype])\n', (1793, 1837), False, 'import torch\n'), ((2152, 2204), 'torch.clamp', 'torch.clamp', (['x[:, 0:1]'], {'min': 'Hyperboloid.eps[x.dtype]'}), '(x[:, 0:1], min=Hyperboloid.eps[x.dtype])\n', (2163, 2204), False, 'import torch\n'), ((3959, 3970), 'utils.math_utils.cosh', 'cosh', (['theta'], {}), '(theta)\n', (3963, 3970), False, 'from utils.math_utils import arcosh, cosh, sinh\n'), ((5590, 5629), 'torch.norm', 'torch.norm', (['y'], {'p': '(2)', 'dim': '(1)', 'keepdim': '(True)'}), '(y, p=2, dim=1, keepdim=True)\n', (5600, 5629), False, 'import torch\n'), ((5808, 5863), 'torch.sum', 'torch.sum', (['(y_normalized * u[:, 1:])'], {'dim': '(1)', 'keepdim': '(True)'}), '(y_normalized * u[:, 1:], dim=1, keepdim=True)\n', (5817, 5863), False, 'import torch\n'), ((6061, 6100), 'torch.norm', 'torch.norm', (['x'], {'p': '(2)', 'dim': '(1)', 'keepdim': '(True)'}), '(x, p=2, dim=1, keepdim=True)\n', (6071, 6100), False, 'import torch\n'), ((6953, 6978), 'torch.cat', 'torch.cat', (['(t, x)'], {'dim': '(-1)'}), '((t, x), dim=-1)\n', (6962, 6978), False, 'import torch\n'), ((1340, 1353), 'utils.math_utils.arcosh', 'arcosh', (['theta'], {}), '(theta)\n', (1346, 1353), False, 'from utils.math_utils import arcosh, cosh, sinh\n'), ((3058, 3069), 'utils.math_utils.cosh', 'cosh', (['theta'], {}), '(theta)\n', (3062, 3069), False, 'from utils.math_utils import arcosh, cosh, sinh\n'), ((6129, 6174), 'torch.cat', 'torch.cat', (['[K + sqnorm, 2 * sqrtK * x]'], {'dim': '(1)'}), '([K + sqnorm, 2 * sqrtK * x], dim=1)\n', (6138, 6174), False, 'import torch\n'), ((3076, 3087), 'utils.math_utils.sinh', 'sinh', (['theta'], {}), '(theta)\n', (3080, 3087), False, 'from utils.math_utils import arcosh, cosh, sinh\n'), ((4000, 4011), 'utils.math_utils.sinh', 'sinh', (['theta'], {}), '(theta)\n', (4004, 4011), False, 'from utils.math_utils import arcosh, cosh, sinh\n'), ((4492, 4505), 'utils.math_utils.arcosh', 'arcosh', (['theta'], {}), '(theta)\n', (4498, 4505), False, 'from utils.math_utils import arcosh, cosh, sinh\n'), ((6878, 6919), 'torch.ones', 'torch.ones', (['x.shape[:-1]'], {'device': 'x.device'}), '(x.shape[:-1], device=x.device)\n', (6888, 6919), False, 'import torch\n'), ((7137, 7194), 'torch.cat', 'torch.cat', (['(K + eucl_squared_norm, 2 * sqrtK * x)'], {'dim': '(-1)'}), '((K + eucl_squared_norm, 2 * sqrtK * x), dim=-1)\n', (7146, 7194), False, 'import torch\n')]
|
import PIL
from PIL import Image, ImageDraw
import math
def generate(width, height, fg, bkg, n, steps, substeps, length, angleincr, angle, exportPath):
bkg_color = (int(bkg[1:3], base=16), int(bkg[3:5], base=16), int(bkg[5:7], base=16))
image = Image.new('RGB', (width, height), color=bkg_color)
draw = ImageDraw.Draw(image)
pt = [(-200), image.size[1]-2]
points = []
collatz_up(steps, substeps, n, pt, length,
angle, angleincr, draw, fg, points)
while len(points) > 0:
for point in points:
steps = point[0]
n = point[1]
pt = [point[2], point[3]]
angle = point[4]
collatz_up(steps, substeps, n, pt, length, angle,
angleincr, draw, fg, points)
del points[points.index(point)]
try:
image.save(exportPath)
except:
return 418
return 201
def is_even(n):
if (n % 2) == 0:
return True
else:
return False
def collatz_up(steps, substeps, n, pt, length, angle, angleincr, draw, fg, points):
for i in range(steps):
if is_even(n):
if (n-1) % 3 == 0:
tmp = (n-1)//3
points.append((steps-i, tmp, pt[0], pt[1], angle))
for _ in range(substeps):
angle += (angleincr//substeps)
incx, incy = increment(angle, length, substeps)
draw.line((pt[0], pt[1], pt[0]+incx, pt[1]+incy), fill=fg)
pt = [pt[0]+incx, pt[1]+incy]
else:
for _ in range(substeps):
angle -= (angleincr//substeps)
incx, incy = increment(angle, length, substeps)
draw.line((pt[0], pt[1], pt[0]+incx, pt[1]+incy), fill=fg)
pt = [pt[0]+incx, pt[1]+incy]
n = n*2
def increment(angle, length, substeps):
incx = round(math.sin(math.radians(angle))*length//substeps)
incy = round(math.cos(math.radians(angle))*length//substeps)
if angle > 360:
angle -= 360
incx = round(math.sin(math.radians(angle))*length//substeps)
incy = round(math.cos(math.radians(angle))*length//substeps)
return incx, incy
|
[
"PIL.ImageDraw.Draw",
"PIL.Image.new",
"math.radians"
] |
[((254, 304), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(width, height)'], {'color': 'bkg_color'}), "('RGB', (width, height), color=bkg_color)\n", (263, 304), False, 'from PIL import Image, ImageDraw\n'), ((316, 337), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['image'], {}), '(image)\n', (330, 337), False, 'from PIL import Image, ImageDraw\n'), ((1912, 1931), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (1924, 1931), False, 'import math\n'), ((1977, 1996), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (1989, 1996), False, 'import math\n'), ((2088, 2107), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (2100, 2107), False, 'import math\n'), ((2157, 2176), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (2169, 2176), False, 'import math\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pickle
from keras import Sequential
from keras.layers import Bidirectional, Dense, Embedding, LSTM, TimeDistributed
from keras_contrib.layers.crf import CRF
from keras_contrib.losses import crf_loss
from keras_contrib.metrics import crf_viterbi_accuracy
class BiLSTMCRF:
def __init__(self, batch_size, epochs_num, embedding_dim, embedding_mat, validation_split,
tags, labels, vocab, model_filepath):
self.batch_size = batch_size
self.epochs_num = epochs_num
self.embedding_dim = embedding_dim
self.embedding_mat = embedding_mat
self.validation_split = validation_split
self.tags = tags
self.labels = labels
self.vocab = vocab
self.model_filepath = model_filepath
self.model = self.__build_model()
def train(self, train_x, train_y):
hist = self.model.fit(train_x, train_y,
batch_size=self.batch_size,
epochs=self.epochs_num,
validation_split=self.validation_split)
with open('bilstm_crf_model.pkl', 'wb') as file_pi:
pickle.dump(hist.history, file_pi)
self.model.save(self.model_filepath)
def __build_model(self):
model = Sequential()
embedding_layer = Embedding(input_dim=len(self.vocab) + 1,
output_dim=self.embedding_dim,
weights=[self.embedding_mat],
trainable=False)
model.add(embedding_layer)
bilstm_layer = Bidirectional(LSTM(units=256, return_sequences=True))
model.add(bilstm_layer)
model.add(TimeDistributed(Dense(256, activation="relu")))
crf_layer = CRF(units=len(self.tags), sparse_target=True)
model.add(crf_layer)
model.compile(optimizer="adam", loss=crf_loss, metrics=[crf_viterbi_accuracy])
model.summary()
return model
|
[
"keras.Sequential",
"keras.layers.LSTM",
"pickle.dump",
"keras.layers.Dense"
] |
[((1312, 1324), 'keras.Sequential', 'Sequential', ([], {}), '()\n', (1322, 1324), False, 'from keras import Sequential\n'), ((1186, 1220), 'pickle.dump', 'pickle.dump', (['hist.history', 'file_pi'], {}), '(hist.history, file_pi)\n', (1197, 1220), False, 'import pickle\n'), ((1652, 1690), 'keras.layers.LSTM', 'LSTM', ([], {'units': '(256)', 'return_sequences': '(True)'}), '(units=256, return_sequences=True)\n', (1656, 1690), False, 'from keras.layers import Bidirectional, Dense, Embedding, LSTM, TimeDistributed\n'), ((1759, 1788), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (1764, 1788), False, 'from keras.layers import Bidirectional, Dense, Embedding, LSTM, TimeDistributed\n')]
|
from __future__ import absolute_import
from django.contrib.auth.models import Group
from django.test import TestCase
from hs_core import hydroshare
class TestGroupFromId(TestCase):
def setUp(self):
self.group, _ = Group.objects.get_or_create(name='Resource Author')
self.user = hydroshare.create_account(
'<EMAIL>',
username='jamy1',
first_name='Tian',
last_name='Gan',
superuser=False,
groups=[]
)
self.group = self.user.uaccess.create_group(title='Jamy group', description='This is Jamy group')
def test_accept_group_instance(self):
self.assertEquals(
hydroshare.group_from_id(self.group),
self.group,
msg='group did not match'
)
def test_accept_group_name(self):
self.assertEqual(
hydroshare.group_from_id(self.group.name),
self.group,
msg='lookup group name failed'
)
def test_accept_group_pk(self):
self.assertEqual(
hydroshare.group_from_id(self.group.pk),
self.group,
msg='lookup group id failed'
)
|
[
"hs_core.hydroshare.create_account",
"django.contrib.auth.models.Group.objects.get_or_create",
"hs_core.hydroshare.group_from_id"
] |
[((231, 282), 'django.contrib.auth.models.Group.objects.get_or_create', 'Group.objects.get_or_create', ([], {'name': '"""Resource Author"""'}), "(name='Resource Author')\n", (258, 282), False, 'from django.contrib.auth.models import Group\n'), ((303, 425), 'hs_core.hydroshare.create_account', 'hydroshare.create_account', (['"""<EMAIL>"""'], {'username': '"""jamy1"""', 'first_name': '"""Tian"""', 'last_name': '"""Gan"""', 'superuser': '(False)', 'groups': '[]'}), "('<EMAIL>', username='jamy1', first_name='Tian',\n last_name='Gan', superuser=False, groups=[])\n", (328, 425), False, 'from hs_core import hydroshare\n'), ((692, 728), 'hs_core.hydroshare.group_from_id', 'hydroshare.group_from_id', (['self.group'], {}), '(self.group)\n', (716, 728), False, 'from hs_core import hydroshare\n'), ((879, 920), 'hs_core.hydroshare.group_from_id', 'hydroshare.group_from_id', (['self.group.name'], {}), '(self.group.name)\n', (903, 920), False, 'from hs_core import hydroshare\n'), ((1074, 1113), 'hs_core.hydroshare.group_from_id', 'hydroshare.group_from_id', (['self.group.pk'], {}), '(self.group.pk)\n', (1098, 1113), False, 'from hs_core import hydroshare\n')]
|
import os
import tempfile
import unittest
from django.test.simple import DjangoTestSuiteRunner, build_suite, reorder_suite
from django.test import TestCase as DjangoTestCase
from django.test._doctest import DocTestCase
from django.db.models import get_app, get_apps
from django.db import transaction, connections, DEFAULT_DB_ALIAS
from django.utils.importlib import import_module
from django.conf import settings
class HumfreyTestSuiteRunner(DjangoTestSuiteRunner):
def run_suite(self, suite, **kwargs):
if os.environ.get('HUMFREY_JUNIT_TEST'):
import junitxml
report_filename = os.path.join(os.path.dirname(__file__), '..', 'xmlresults.xml')
with open(report_filename, 'w') as report:
result = junitxml.JUnitXmlResult(report)
result.startTestRun()
suite.run(result)
result.stopTestRun()
return result
else:
return super(HumfreyTestSuiteRunner, self).run_suite(suite, **kwargs)
_ignore_test_modules = [
'django.contrib.auth.tests',
'django.contrib.auth.tests.context_processors',
'django.contrib.auth.tests.decorators',
'django.contrib.auth.tests.signals',
'django.contrib.auth.tests.views',
'django_conneg.tests.basic_auth_middleware',
'object_permissions.tests.backend',
'object_permissions.tests.groups',
'object_permissions.tests.permissions',
]
_ignore_test_modules.extend(getattr(settings, 'IGNORE_TEST_MODULES', ()))
def _filter_suite(self, suite):
tests = []
for testcase in suite._tests:
if type(testcase).__module__ in self._ignore_test_modules:
continue
if isinstance(testcase, unittest.TestSuite):
self._filter_suite(testcase)
if testcase._tests:
tests.append(testcase)
else:
tests.append(testcase)
suite._tests = tests
def build_suite(self, test_labels, extra_tests=None, **kwargs):
suite = super(HumfreyTestSuiteRunner, self).build_suite(test_labels, extra_tests=None, **kwargs)
for module_name in getattr(settings, 'EXTRA_TEST_MODULES', ()):
suite.addTests(unittest.findTestCases(import_module(module_name)))
self._filter_suite(suite)
return suite
try:
from django_jenkins.runner import CITestSuiteRunner
except ImportError:
pass
else:
class HumfreyJenkinsTestSuiteRunner(HumfreyTestSuiteRunner, CITestSuiteRunner):
def run_suite(self, suite, **kwargs):
return super(HumfreyTestSuiteRunner, self).run_suite(suite, **kwargs)
|
[
"os.environ.get",
"os.path.dirname",
"django.utils.importlib.import_module",
"junitxml.JUnitXmlResult"
] |
[((522, 558), 'os.environ.get', 'os.environ.get', (['"""HUMFREY_JUNIT_TEST"""'], {}), "('HUMFREY_JUNIT_TEST')\n", (536, 558), False, 'import os\n'), ((631, 656), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (646, 656), False, 'import os\n'), ((762, 793), 'junitxml.JUnitXmlResult', 'junitxml.JUnitXmlResult', (['report'], {}), '(report)\n', (785, 793), False, 'import junitxml\n'), ((2312, 2338), 'django.utils.importlib.import_module', 'import_module', (['module_name'], {}), '(module_name)\n', (2325, 2338), False, 'from django.utils.importlib import import_module\n')]
|
from django.conf.urls import url
from drugs import views
urlpatterns = [
url(r'^drugbrowser', views.drugbrowser, name='drugbrowser'),
url(r'^drugstatistics', views.drugstatistics, name='drugstatistics'),
url(r'^drugmapping', views.drugmapping, name='drugmapping'),
]
|
[
"django.conf.urls.url"
] |
[((79, 137), 'django.conf.urls.url', 'url', (['"""^drugbrowser"""', 'views.drugbrowser'], {'name': '"""drugbrowser"""'}), "('^drugbrowser', views.drugbrowser, name='drugbrowser')\n", (82, 137), False, 'from django.conf.urls import url\n'), ((145, 212), 'django.conf.urls.url', 'url', (['"""^drugstatistics"""', 'views.drugstatistics'], {'name': '"""drugstatistics"""'}), "('^drugstatistics', views.drugstatistics, name='drugstatistics')\n", (148, 212), False, 'from django.conf.urls import url\n'), ((220, 278), 'django.conf.urls.url', 'url', (['"""^drugmapping"""', 'views.drugmapping'], {'name': '"""drugmapping"""'}), "('^drugmapping', views.drugmapping, name='drugmapping')\n", (223, 278), False, 'from django.conf.urls import url\n')]
|
from datetime import date, datetime
import pandas as pd
import pytest
from alpaca_trade_api.rest import TimeFrame
from pytz import timezone
from liualgotrader.common import config
from liualgotrader.common.data_loader import DataLoader # type: ignore
from liualgotrader.common.types import DataConnectorType, TimeScale
from liualgotrader.data.alpaca import AlpacaData, AlpacaStream
nyc = timezone("America/New_York")
@pytest.mark.devtest
def test_crypto_get_symbol() -> bool:
alpaca_data = AlpacaData()
start = date(2021, 5, 1)
end = date(2021, 10, 1)
_start, _end = alpaca_data._localize_start_end(start, end)
df = alpaca_data.crypto_get_symbol_data(
symbol="BTCUSD", start=_start, end=_end, timeframe=TimeFrame.Day
)
print(df)
return True
@pytest.mark.devtest
def test_btc_data_loader_day() -> bool:
dl = DataLoader(TimeScale.day, connector=DataConnectorType.alpaca)
data = dl["BTCUSD"]["2021-05-01":"2021-10-01"] # type: ignore
print(data)
return True
@pytest.mark.devtest
def test_btc_data_loader_min() -> bool:
dl = DataLoader(connector=DataConnectorType.alpaca)
data = dl["BTCUSD"]["2021-05-01":"2021-10-01"] # type: ignore
print(data)
return True
@pytest.mark.devtest
def test_eth_data_loader_day() -> bool:
dl = DataLoader(TimeScale.day, connector=DataConnectorType.alpaca)
data = dl["ETHUSD"]["2021-05-01":"2021-10-01"] # type: ignore
print(data)
return True
|
[
"liualgotrader.common.data_loader.DataLoader",
"liualgotrader.data.alpaca.AlpacaData",
"datetime.date",
"pytz.timezone"
] |
[((392, 420), 'pytz.timezone', 'timezone', (['"""America/New_York"""'], {}), "('America/New_York')\n", (400, 420), False, 'from pytz import timezone\n'), ((500, 512), 'liualgotrader.data.alpaca.AlpacaData', 'AlpacaData', ([], {}), '()\n', (510, 512), False, 'from liualgotrader.data.alpaca import AlpacaData, AlpacaStream\n'), ((526, 542), 'datetime.date', 'date', (['(2021)', '(5)', '(1)'], {}), '(2021, 5, 1)\n', (530, 542), False, 'from datetime import date, datetime\n'), ((553, 570), 'datetime.date', 'date', (['(2021)', '(10)', '(1)'], {}), '(2021, 10, 1)\n', (557, 570), False, 'from datetime import date, datetime\n'), ((860, 921), 'liualgotrader.common.data_loader.DataLoader', 'DataLoader', (['TimeScale.day'], {'connector': 'DataConnectorType.alpaca'}), '(TimeScale.day, connector=DataConnectorType.alpaca)\n', (870, 921), False, 'from liualgotrader.common.data_loader import DataLoader\n'), ((1094, 1140), 'liualgotrader.common.data_loader.DataLoader', 'DataLoader', ([], {'connector': 'DataConnectorType.alpaca'}), '(connector=DataConnectorType.alpaca)\n', (1104, 1140), False, 'from liualgotrader.common.data_loader import DataLoader\n'), ((1313, 1374), 'liualgotrader.common.data_loader.DataLoader', 'DataLoader', (['TimeScale.day'], {'connector': 'DataConnectorType.alpaca'}), '(TimeScale.day, connector=DataConnectorType.alpaca)\n', (1323, 1374), False, 'from liualgotrader.common.data_loader import DataLoader\n')]
|
from __future__ import print_function
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.views.generic.base import RedirectView
from django.views.generic import CreateView, TemplateView
from django.views.generic.edit import UpdateView
from django.conf import settings
from django.template import RequestContext
from django.shortcuts import get_object_or_404, render_to_response, Http404, redirect
from django.contrib.auth import get_user
from django.contrib import messages
from braces.views import LoginRequiredMixin
from carton.cart import Cart
from mcat.models import Product
from mcat_order.models import Customer, Order, OrderedProduct
from mcat_order.forms import CustomerForm
# =================================== Cart views ===========================
def add_to_cart(request, slug):
if request.is_ajax():
cart = Cart(request.session)
product = get_object_or_404(Product, slug=slug)
cart.add(product, price=product.price)
return render_to_response('mcat_order/cart.html',
{'product' : product},
context_instance=RequestContext(request),
content_type="application/xhtml+xml"
)
else:
if settings.DEBUG:
print("Not ajax request")
raise Http404
def remove_from_cart(request, slug):
if request.is_ajax():
cart = Cart(request.session)
product = get_object_or_404(Product, slug=slug)
cart.remove_single(product)
return render_to_response('mcat_order/cart.html',
context_instance=RequestContext(request),
content_type="application/xhtml+xml"
)
else:
if settings.DEBUG:
print("Not ajax request")
raise Http404
def clear_cart(request):
if request.is_ajax():
cart = Cart(request.session)
cart.clear()
return render_to_response('mcat_order/cart.html',
context_instance=RequestContext(request),
content_type="application/xhtml+xml"
)
else:
if settings.DEBUG:
print("Not ajax request")
raise Http404
# =================================== Order views ===========================
def order_dispatcher(request):
if request.is_ajax():
if request.user.is_authenticated():
# is there a customer for this user?
return HttpResponse("<script>self.location.href='"+reverse('mcat-customer-form')+"'</script>")
else:
login_url = settings.LOGIN_URL+'?next='+reverse('mcat-customer-form')
signup_url = reverse('account_signup')+'?next='+reverse('mcat-customer-form')
return render_to_response('mcat_order/login_choice.html',
{'login_url' : login_url, 'signup_url' : signup_url},
context_instance=RequestContext(request),
content_type="application/xhtml+xml"
)
else:
if settings.DEBUG:
print("Not ajax request")
raise Http404
class CustomerFormView(LoginRequiredMixin, CreateView):
model = Customer
form_class = CustomerForm
template_name = 'mcat_order/customer_form.html'
def dispatch(self, request, *args, **kwargs):
if Customer.objects.filter(user=request.user).exists():
return redirect('mcat-confirm-order')
return super(CustomerFormView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(CustomerFormView, self).get_context_data(**kwargs)
context['no_cart_icon'] = True
return context
def get_login_url(self):
return settings.LOGIN_URL+'?next='+reverse('mcat-customer-form')
def get_success_url(self):
return reverse('mcat-confirm-order')
def form_valid(self, form, **kwargs):
if self.request.method == "POST":
obj = form.save(commit=False)
obj.user = self.request.user
else:
raise Http404
return super(CustomerFormView, self).form_valid(form)
class ConfirmOrderView(LoginRequiredMixin, TemplateView):
template_name = 'mcat_order/confirm_order.html'
def get_login_url(self):
return settings.LOGIN_URL+'?next='+reverse('mcat-confirm-order')
def get_context_data(self, **kwargs):
context = super(ConfirmOrderView, self).get_context_data(**kwargs)
context['customer'] = get_object_or_404(Customer, user=self.request.user)
context['no_cart_icon'] = True
return context
class CustomerUpdateFormView(LoginRequiredMixin, UpdateView):
model = Customer
form_class = CustomerForm
template_name = 'mcat_order/customer_update_form.html'
def get_success_url(self):
return reverse('mcat-confirm-order')
class PostOrderView(LoginRequiredMixin, TemplateView):
template_name = 'mcat_order/posted_order.html'
login_url = settings.LOGIN_URL
def dispatch(self, request, *args, **kwargs):
self.customer = get_object_or_404(Customer, user=self.request.user)
#~ create the order
cart = Cart(request.session)
if cart.count == 0:
messages.warning(self.request, _(u'The cart is empty: order cancelled'))
return super(PostOrderView, self).dispatch(request, *args, **kwargs)
order = Order.objects.create(customer=self.customer, total=cart.total)
for item in cart.items:
#~ get the product
OrderedProduct.objects.create(product=item.product, order=order, quantity=item.quantity, price_per_unit=item.product.price)
cart.clear()
return super(PostOrderView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(PostOrderView, self).get_context_data(**kwargs)
context['customer'] = self.customer
context['no_cart_icon'] = True
return context
|
[
"django.core.urlresolvers.reverse",
"django.shortcuts.redirect",
"carton.cart.Cart",
"django.shortcuts.get_object_or_404",
"mcat_order.models.Order.objects.create",
"mcat_order.models.OrderedProduct.objects.create",
"mcat_order.models.Customer.objects.filter",
"django.template.RequestContext"
] |
[((878, 899), 'carton.cart.Cart', 'Cart', (['request.session'], {}), '(request.session)\n', (882, 899), False, 'from carton.cart import Cart\n'), ((918, 955), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Product'], {'slug': 'slug'}), '(Product, slug=slug)\n', (935, 955), False, 'from django.shortcuts import get_object_or_404, render_to_response, Http404, redirect\n'), ((1485, 1506), 'carton.cart.Cart', 'Cart', (['request.session'], {}), '(request.session)\n', (1489, 1506), False, 'from carton.cart import Cart\n'), ((1525, 1562), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Product'], {'slug': 'slug'}), '(Product, slug=slug)\n', (1542, 1562), False, 'from django.shortcuts import get_object_or_404, render_to_response, Http404, redirect\n'), ((2007, 2028), 'carton.cart.Cart', 'Cart', (['request.session'], {}), '(request.session)\n', (2011, 2028), False, 'from carton.cart import Cart\n'), ((4122, 4151), 'django.core.urlresolvers.reverse', 'reverse', (['"""mcat-confirm-order"""'], {}), "('mcat-confirm-order')\n", (4129, 4151), False, 'from django.core.urlresolvers import reverse\n'), ((4798, 4849), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Customer'], {'user': 'self.request.user'}), '(Customer, user=self.request.user)\n', (4815, 4849), False, 'from django.shortcuts import get_object_or_404, render_to_response, Http404, redirect\n'), ((5137, 5166), 'django.core.urlresolvers.reverse', 'reverse', (['"""mcat-confirm-order"""'], {}), "('mcat-confirm-order')\n", (5144, 5166), False, 'from django.core.urlresolvers import reverse\n'), ((5392, 5443), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Customer'], {'user': 'self.request.user'}), '(Customer, user=self.request.user)\n', (5409, 5443), False, 'from django.shortcuts import get_object_or_404, render_to_response, Http404, redirect\n'), ((5487, 5508), 'carton.cart.Cart', 'Cart', (['request.session'], {}), '(request.session)\n', (5491, 5508), False, 'from carton.cart import Cart\n'), ((5719, 5781), 'mcat_order.models.Order.objects.create', 'Order.objects.create', ([], {'customer': 'self.customer', 'total': 'cart.total'}), '(customer=self.customer, total=cart.total)\n', (5739, 5781), False, 'from mcat_order.models import Customer, Order, OrderedProduct\n'), ((3669, 3699), 'django.shortcuts.redirect', 'redirect', (['"""mcat-confirm-order"""'], {}), "('mcat-confirm-order')\n", (3677, 3699), False, 'from django.shortcuts import get_object_or_404, render_to_response, Http404, redirect\n'), ((4041, 4070), 'django.core.urlresolvers.reverse', 'reverse', (['"""mcat-customer-form"""'], {}), "('mcat-customer-form')\n", (4048, 4070), False, 'from django.core.urlresolvers import reverse\n'), ((4616, 4645), 'django.core.urlresolvers.reverse', 'reverse', (['"""mcat-confirm-order"""'], {}), "('mcat-confirm-order')\n", (4623, 4645), False, 'from django.core.urlresolvers import reverse\n'), ((5857, 5985), 'mcat_order.models.OrderedProduct.objects.create', 'OrderedProduct.objects.create', ([], {'product': 'item.product', 'order': 'order', 'quantity': 'item.quantity', 'price_per_unit': 'item.product.price'}), '(product=item.product, order=order, quantity=\n item.quantity, price_per_unit=item.product.price)\n', (5886, 5985), False, 'from mcat_order.models import Customer, Order, OrderedProduct\n'), ((1171, 1194), 'django.template.RequestContext', 'RequestContext', (['request'], {}), '(request)\n', (1185, 1194), False, 'from django.template import RequestContext\n'), ((1709, 1732), 'django.template.RequestContext', 'RequestContext', (['request'], {}), '(request)\n', (1723, 1732), False, 'from django.template import RequestContext\n'), ((2160, 2183), 'django.template.RequestContext', 'RequestContext', (['request'], {}), '(request)\n', (2174, 2183), False, 'from django.template import RequestContext\n'), ((2793, 2822), 'django.core.urlresolvers.reverse', 'reverse', (['"""mcat-customer-form"""'], {}), "('mcat-customer-form')\n", (2800, 2822), False, 'from django.core.urlresolvers import reverse\n'), ((2883, 2912), 'django.core.urlresolvers.reverse', 'reverse', (['"""mcat-customer-form"""'], {}), "('mcat-customer-form')\n", (2890, 2912), False, 'from django.core.urlresolvers import reverse\n'), ((3597, 3639), 'mcat_order.models.Customer.objects.filter', 'Customer.objects.filter', ([], {'user': 'request.user'}), '(user=request.user)\n', (3620, 3639), False, 'from mcat_order.models import Customer, Order, OrderedProduct\n'), ((2848, 2873), 'django.core.urlresolvers.reverse', 'reverse', (['"""account_signup"""'], {}), "('account_signup')\n", (2855, 2873), False, 'from django.core.urlresolvers import reverse\n'), ((3131, 3154), 'django.template.RequestContext', 'RequestContext', (['request'], {}), '(request)\n', (3145, 3154), False, 'from django.template import RequestContext\n'), ((2683, 2712), 'django.core.urlresolvers.reverse', 'reverse', (['"""mcat-customer-form"""'], {}), "('mcat-customer-form')\n", (2690, 2712), False, 'from django.core.urlresolvers import reverse\n')]
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import polyaxon_sdk
from marshmallow import fields, validate
from polyaxon.contexts import refs as contexts_refs
from polyaxon.lifecycle import V1Statuses
from polyaxon.schemas.base import BaseCamelSchema, BaseConfig
class V1EventKind(polyaxon_sdk.V1EventKind):
events_statuses_mapping = {
polyaxon_sdk.V1EventKind.RUN_STATUS_CREATED: V1Statuses.CREATED,
polyaxon_sdk.V1EventKind.RUN_STATUS_RESUMING: V1Statuses.RESUMING,
polyaxon_sdk.V1EventKind.RUN_STATUS_ON_SCHEDULE: V1Statuses.ON_SCHEDULE,
polyaxon_sdk.V1EventKind.RUN_STATUS_COMPILED: V1Statuses.COMPILED,
polyaxon_sdk.V1EventKind.RUN_STATUS_QUEUED: V1Statuses.QUEUED,
polyaxon_sdk.V1EventKind.RUN_STATUS_SCHEDULED: V1Statuses.SCHEDULED,
polyaxon_sdk.V1EventKind.RUN_STATUS_STARTING: V1Statuses.STARTING,
polyaxon_sdk.V1EventKind.RUN_STATUS_RUNNING: V1Statuses.RUNNING,
polyaxon_sdk.V1EventKind.RUN_STATUS_PROCESSING: V1Statuses.PROCESSING,
polyaxon_sdk.V1EventKind.RUN_STATUS_STOPPING: V1Statuses.STOPPING,
polyaxon_sdk.V1EventKind.RUN_STATUS_FAILED: V1Statuses.FAILED,
polyaxon_sdk.V1EventKind.RUN_STATUS_STOPPED: V1Statuses.STOPPED,
polyaxon_sdk.V1EventKind.RUN_STATUS_SUCCEEDED: V1Statuses.SUCCEEDED,
polyaxon_sdk.V1EventKind.RUN_STATUS_SKIPPED: V1Statuses.SKIPPED,
polyaxon_sdk.V1EventKind.RUN_STATUS_WARNING: V1Statuses.WARNING,
polyaxon_sdk.V1EventKind.RUN_STATUS_UNSCHEDULABLE: V1Statuses.UNSCHEDULABLE,
polyaxon_sdk.V1EventKind.RUN_STATUS_UPSTREAM_FAILED: V1Statuses.UPSTREAM_FAILED,
polyaxon_sdk.V1EventKind.RUN_STATUS_RETRYING: V1Statuses.RETRYING,
polyaxon_sdk.V1EventKind.RUN_STATUS_UNKNOWN: V1Statuses.UNKNOWN,
polyaxon_sdk.V1EventKind.RUN_STATUS_DONE: V1Statuses.DONE,
}
class EventTriggerSchema(BaseCamelSchema):
kinds = fields.List(
fields.Str(validate=validate.OneOf(V1EventKind.allowable_values)),
required=True,
)
ref = fields.Str(required=True)
@staticmethod
def schema_config():
return V1EventTrigger
class V1EventTrigger(BaseConfig, contexts_refs.RefMixin, polyaxon_sdk.V1EventTrigger):
"""Events are an advanced triggering logic that users can take advantage of in addition to:
* Manual triggers via API/CLI/UI.
* Time-based triggers with schedules and crons.
* Upstream triggers with upstream runs or upstream ops in DAGs.
Events can be attached to an operation in the context of a DAG
to extend the simple trigger process,
this is generally important when the user defines a dependency between two operations
and needs a run to start as soon as
the upstream run generates an event instead of waiting until it reaches a final state.
For instance, a usual use-case is to start a tensorboard as soon as training starts.
In that case the downstream operation will watch for the `running` status.
Events can be attached as well to a single operation
to wait for an internal alert or external events,
for instance if a user integrates Polyaxon with Github,
they can trigger training as soon as Polyaxon is notified that a new git commit was created.
Polyaxon provides several internal and external events that users
can leverage to fully automate their usage of the platform:
* "run_status_created"
* "run_status_resuming"
* "run_status_compiled"
* "run_status_queued"
* "run_status_scheduled"
* "run_status_starting"
* "run_status_initializing"
* "run_status_running"
* "run_status_processing"
* "run_status_stopping"
* "run_status_failed"
* "run_status_stopped"
* "run_status_succeeded"
* "run_status_skipped"
* "run_status_warning"
* "run_status_unschedulable"
* "run_status_upstream_failed"
* "run_status_retrying"
* "run_status_unknown"
* "run_status_done"
* "run_approved_actor"
* "run_invalidated_actor"
* "run_new_artifacts"
* "connection_git_commit"
* "connection_dataset_version"
* "connection_registry_image"
* "alert_info"
* "alert_warning"
* "alert_critical"
* "model_version_new_metric"
* "project_custom_event"
* "org_custom_event"
Args:
kinds: List[str]
ref: str
> **Important**: Currently only events with prefix `run_status_*` are supported.
## YAML usage
```yaml
>>> events:
>>> ref: {{ ops.upstream-operation }}
>>> kinds: [run_status_running]
```
```yaml
>>> event:
>>> ref: {{ connections.git-repo-connection-name }}
>>> kinds: [connection_git_commit]
```
## Python usage
```python
>>> from polyaxon.polyflow import V1EventKind, V1EventTrigger
>>> event1 = V1EventTrigger(
>>> ref="{{ ops.upstream-operation }}",
>>> kinds=[V1EventTrigger.RUN_STATUS_RUNNING],
>>> )
>>> event2 = V1EventTrigger(
>>> ref="{{ connections.git-repo-connection-name }}",
>>> kinds=[V1EventTrigger.CONNECTION_GIT_COMMIT],
>>> )
```
## Fields
### kinds
The trigger event kinds to watch, if any event is detected the operation defining the `events`
section will be initiated.
```yaml
>>> event:
>>> kinds: [run_status_running, run_status_done]
```
> **Note**: Similar to trigger in DAGs, after an operation is initiated,
> it will still have to validate the rest of the Polyaxonfile,
> i.e. conditions, contexts, connections, ...
### ref
A valid reference that Polyaxon can resolve the objects that will send the events to watch for.
All supported events are prefixed with the object reference that can send such events.
The `run_*` events can be referenced both by `runs.UUID` or
`ops.OPERATION_NAME` if defined in the context of a DAG.
```yaml
>>> event:
>>> ref: ops.upstream_operation_name
```
"""
IDENTIFIER = "event_trigger"
SCHEMA = EventTriggerSchema
REDUCED_ATTRIBUTES = [
"ref",
]
|
[
"marshmallow.fields.Str",
"marshmallow.validate.OneOf"
] |
[((2603, 2628), 'marshmallow.fields.Str', 'fields.Str', ([], {'required': '(True)'}), '(required=True)\n', (2613, 2628), False, 'from marshmallow import fields, validate\n'), ((2517, 2561), 'marshmallow.validate.OneOf', 'validate.OneOf', (['V1EventKind.allowable_values'], {}), '(V1EventKind.allowable_values)\n', (2531, 2561), False, 'from marshmallow import fields, validate\n')]
|
from astropy import units as u
from glue.core.message import (DataCollectionAddMessage,
DataCollectionDeleteMessage)
from glue.core import Data
from glue.core.link_helpers import LinkSame
from spectral_cube import SpectralCube
from specutils import SpectralRegion
from traitlets import List, Unicode, Int, Any, observe
from regions import RectanglePixelRegion
from jdaviz.core.events import SnackbarMessage
from jdaviz.core.registries import tray_registry
from jdaviz.core.template_mixin import TemplateMixin
from jdaviz.utils import load_template
__all__ = ['Collapse']
spaxel = u.def_unit('spaxel', 1 * u.Unit(""))
u.add_enabled_units([spaxel])
# Mapping of pixel axes before and after collapse, as a function of selected axis
AXES_MAPPING = [((1, 2), (0, 1)), ((0, 2), (0, 1)), ((0, 1), (0, 1))]
@tray_registry('g-collapse', label="Collapse")
class Collapse(TemplateMixin):
template = load_template("collapse.vue", __file__).tag(sync=True)
data_items = List([]).tag(sync=True)
selected_data_item = Unicode().tag(sync=True)
axes = List([]).tag(sync=True)
selected_axis = Int(0).tag(sync=True)
funcs = List(['Mean', 'Median', 'Min', 'Max']).tag(sync=True)
selected_func = Unicode('Mean').tag(sync=True)
spectral_min = Any().tag(sync=True)
spectral_max = Any().tag(sync=True)
spectral_unit = Unicode().tag(sync=True)
spectral_subset_items = List(["None"]).tag(sync=True)
selected_subset = Unicode("None").tag(sync=True)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.hub.subscribe(self, DataCollectionAddMessage,
handler=self._on_data_updated)
self.hub.subscribe(self, DataCollectionDeleteMessage,
handler=self._on_data_updated)
self._selected_data = None
def _on_data_updated(self, msg):
self.data_items = [x.label for x in self.data_collection]
# Default to selecting the first loaded cube
if self._selected_data is None:
for i in range(len(self.data_items)):
try:
self.selected_data_item = self.data_items[i]
except (ValueError, TypeError):
continue
@observe('selected_data_item')
def _on_data_item_selected(self, event):
self._selected_data = next((x for x in self.data_collection
if x.label == event['new']))
# Also set the spectral min and max to default to the full range
cube = self._selected_data.get_object(cls=SpectralCube)
self.spectral_min = cube.spectral_axis[0].value
self.spectral_max = cube.spectral_axis[-1].value
self.spectral_unit = str(cube.spectral_axis.unit)
self.axes = list(range(len(self._selected_data.shape)))
@observe("selected_subset")
def _on_subset_selected(self, event):
# If "None" selected, reset based on bounds of selected data
self._selected_subset = self.selected_subset
if self._selected_subset == "None":
cube = self._selected_data.get_object(cls=SpectralCube)
self.spectral_min = cube.spectral_axis[0].value
self.spectral_max = cube.spectral_axis[-1].value
else:
spec_sub = self._spectral_subsets[self._selected_subset]
unit = u.Unit(self.spectral_unit)
spec_reg = SpectralRegion.from_center(spec_sub.center.x * unit,
spec_sub.width * unit)
self.spectral_min = spec_reg.lower.value
self.spectral_max = spec_reg.upper.value
def vue_list_subsets(self, event):
"""Populate the spectral subset selection dropdown"""
temp_subsets = self.app.get_subsets_from_viewer("spectrum-viewer")
temp_list = ["None"]
temp_dict = {}
# Attempt to filter out spatial subsets
for key, region in temp_subsets.items():
if type(region) == RectanglePixelRegion:
temp_dict[key] = region
temp_list.append(key)
self._spectral_subsets = temp_dict
self.spectral_subset_items = temp_list
def vue_collapse(self, *args, **kwargs):
try:
spec = self._selected_data.get_object(cls=SpectralCube)
except AttributeError:
snackbar_message = SnackbarMessage(
f"Unable to perform collapse over selected data.",
color="error",
sender=self)
self.hub.broadcast(snackbar_message)
return
# If collapsing over the spectral axis, cut out the desired spectral
# region. Defaults to the entire spectrum.
if self.selected_axis == 0:
spec_min = float(self.spectral_min) * u.Unit(self.spectral_unit)
spec_max = float(self.spectral_max) * u.Unit(self.spectral_unit)
spec = spec.spectral_slab(spec_min, spec_max)
collapsed_spec = getattr(spec, self.selected_func.lower())(
axis=self.selected_axis)
data = Data(coords=collapsed_spec.wcs)
data['flux'] = collapsed_spec.filled_data[...]
data.get_component('flux').units = str(collapsed_spec.unit)
data.meta.update(collapsed_spec.meta)
label = f"Collapsed {self._selected_data.label}"
self.data_collection[label] = data
# Link the new dataset pixel-wise to the original dataset. In general
# direct pixel to pixel links are the most efficient and should be
# used in cases like this where we know there is a 1-to-1 mapping of
# pixel coordinates. Here which axes are linked to which depends on
# the selected axis.
(i1, i2), (i1c, i2c) = AXES_MAPPING[self.selected_axis]
self.data_collection.add_link(LinkSame(self._selected_data.pixel_component_ids[i1],
self.data_collection[label].pixel_component_ids[i1c]))
self.data_collection.add_link(LinkSame(self._selected_data.pixel_component_ids[i2],
self.data_collection[label].pixel_component_ids[i2c]))
snackbar_message = SnackbarMessage(
f"Data set '{self._selected_data.label}' collapsed successfully.",
color="success",
sender=self)
self.hub.broadcast(snackbar_message)
|
[
"traitlets.Int",
"traitlets.List",
"glue.core.link_helpers.LinkSame",
"jdaviz.utils.load_template",
"jdaviz.core.events.SnackbarMessage",
"specutils.SpectralRegion.from_center",
"traitlets.Unicode",
"astropy.units.add_enabled_units",
"glue.core.Data",
"jdaviz.core.registries.tray_registry",
"traitlets.observe",
"traitlets.Any",
"astropy.units.Unit"
] |
[((652, 681), 'astropy.units.add_enabled_units', 'u.add_enabled_units', (['[spaxel]'], {}), '([spaxel])\n', (671, 681), True, 'from astropy import units as u\n'), ((839, 884), 'jdaviz.core.registries.tray_registry', 'tray_registry', (['"""g-collapse"""'], {'label': '"""Collapse"""'}), "('g-collapse', label='Collapse')\n", (852, 884), False, 'from jdaviz.core.registries import tray_registry\n'), ((2282, 2311), 'traitlets.observe', 'observe', (['"""selected_data_item"""'], {}), "('selected_data_item')\n", (2289, 2311), False, 'from traitlets import List, Unicode, Int, Any, observe\n'), ((2870, 2896), 'traitlets.observe', 'observe', (['"""selected_subset"""'], {}), "('selected_subset')\n", (2877, 2896), False, 'from traitlets import List, Unicode, Int, Any, observe\n'), ((640, 650), 'astropy.units.Unit', 'u.Unit', (['""""""'], {}), "('')\n", (646, 650), True, 'from astropy import units as u\n'), ((5126, 5157), 'glue.core.Data', 'Data', ([], {'coords': 'collapsed_spec.wcs'}), '(coords=collapsed_spec.wcs)\n', (5130, 5157), False, 'from glue.core import Data\n'), ((6246, 6367), 'jdaviz.core.events.SnackbarMessage', 'SnackbarMessage', (['f"""Data set \'{self._selected_data.label}\' collapsed successfully."""'], {'color': '"""success"""', 'sender': 'self'}), '(\n f"Data set \'{self._selected_data.label}\' collapsed successfully.",\n color=\'success\', sender=self)\n', (6261, 6367), False, 'from jdaviz.core.events import SnackbarMessage\n'), ((931, 970), 'jdaviz.utils.load_template', 'load_template', (['"""collapse.vue"""', '__file__'], {}), "('collapse.vue', __file__)\n", (944, 970), False, 'from jdaviz.utils import load_template\n'), ((1003, 1011), 'traitlets.List', 'List', (['[]'], {}), '([])\n', (1007, 1011), False, 'from traitlets import List, Unicode, Int, Any, observe\n'), ((1052, 1061), 'traitlets.Unicode', 'Unicode', ([], {}), '()\n', (1059, 1061), False, 'from traitlets import List, Unicode, Int, Any, observe\n'), ((1088, 1096), 'traitlets.List', 'List', (['[]'], {}), '([])\n', (1092, 1096), False, 'from traitlets import List, Unicode, Int, Any, observe\n'), ((1132, 1138), 'traitlets.Int', 'Int', (['(0)'], {}), '(0)\n', (1135, 1138), False, 'from traitlets import List, Unicode, Int, Any, observe\n'), ((1166, 1204), 'traitlets.List', 'List', (["['Mean', 'Median', 'Min', 'Max']"], {}), "(['Mean', 'Median', 'Min', 'Max'])\n", (1170, 1204), False, 'from traitlets import List, Unicode, Int, Any, observe\n'), ((1240, 1255), 'traitlets.Unicode', 'Unicode', (['"""Mean"""'], {}), "('Mean')\n", (1247, 1255), False, 'from traitlets import List, Unicode, Int, Any, observe\n'), ((1291, 1296), 'traitlets.Any', 'Any', ([], {}), '()\n', (1294, 1296), False, 'from traitlets import List, Unicode, Int, Any, observe\n'), ((1331, 1336), 'traitlets.Any', 'Any', ([], {}), '()\n', (1334, 1336), False, 'from traitlets import List, Unicode, Int, Any, observe\n'), ((1372, 1381), 'traitlets.Unicode', 'Unicode', ([], {}), '()\n', (1379, 1381), False, 'from traitlets import List, Unicode, Int, Any, observe\n'), ((1425, 1439), 'traitlets.List', 'List', (["['None']"], {}), "(['None'])\n", (1429, 1439), False, 'from traitlets import List, Unicode, Int, Any, observe\n'), ((1477, 1492), 'traitlets.Unicode', 'Unicode', (['"""None"""'], {}), "('None')\n", (1484, 1492), False, 'from traitlets import List, Unicode, Int, Any, observe\n'), ((3396, 3422), 'astropy.units.Unit', 'u.Unit', (['self.spectral_unit'], {}), '(self.spectral_unit)\n', (3402, 3422), True, 'from astropy import units as u\n'), ((3446, 3521), 'specutils.SpectralRegion.from_center', 'SpectralRegion.from_center', (['(spec_sub.center.x * unit)', '(spec_sub.width * unit)'], {}), '(spec_sub.center.x * unit, spec_sub.width * unit)\n', (3472, 3521), False, 'from specutils import SpectralRegion\n'), ((5868, 5980), 'glue.core.link_helpers.LinkSame', 'LinkSame', (['self._selected_data.pixel_component_ids[i1]', 'self.data_collection[label].pixel_component_ids[i1c]'], {}), '(self._selected_data.pixel_component_ids[i1], self.data_collection[\n label].pixel_component_ids[i1c])\n', (5876, 5980), False, 'from glue.core.link_helpers import LinkSame\n'), ((6062, 6174), 'glue.core.link_helpers.LinkSame', 'LinkSame', (['self._selected_data.pixel_component_ids[i2]', 'self.data_collection[label].pixel_component_ids[i2c]'], {}), '(self._selected_data.pixel_component_ids[i2], self.data_collection[\n label].pixel_component_ids[i2c])\n', (6070, 6174), False, 'from glue.core.link_helpers import LinkSame\n'), ((4414, 4513), 'jdaviz.core.events.SnackbarMessage', 'SnackbarMessage', (['f"""Unable to perform collapse over selected data."""'], {'color': '"""error"""', 'sender': 'self'}), "(f'Unable to perform collapse over selected data.', color=\n 'error', sender=self)\n", (4429, 4513), False, 'from jdaviz.core.events import SnackbarMessage\n'), ((4842, 4868), 'astropy.units.Unit', 'u.Unit', (['self.spectral_unit'], {}), '(self.spectral_unit)\n', (4848, 4868), True, 'from astropy import units as u\n'), ((4919, 4945), 'astropy.units.Unit', 'u.Unit', (['self.spectral_unit'], {}), '(self.spectral_unit)\n', (4925, 4945), True, 'from astropy import units as u\n')]
|
from abc import ABCMeta, abstractmethod, abstractstaticmethod
from typing import List
import requests
from bs4 import BeautifulSoup
from anime_cli.anime import Anime
class SearchApi(metaclass=ABCMeta):
def __init__(self, mirror: str):
self.url = f"https://example.{mirror}"
self.request_headers = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36 Edg/95.0.1020.44"
}
def get_soup(self, location: str) -> BeautifulSoup:
"""Gets soup of a page
The get_soup function takes in the location of
the page and the gets the html from it
and parses the html to `BeautifulSoup`
Args:
location: the location of the page
Returns:
`BeautifulSoup` object by parsing the html
"""
r = requests.get(f"{self.url}/{location}", headers=self.request_headers)
return BeautifulSoup(r.content, features="html5lib")
@abstractstaticmethod
def get_headers() -> dict[str, str]:
"""Headers to set while quering anything from the site
Returns:
The header to set while quering anything.
Some links require some additional headers
to be passed to work properly
"""
pass
@abstractmethod
def search_anime(self, keyword: str) -> List[Anime]:
"""Search anime searches for animes by looking at the keyword
Args:
keyword: The keyword to search for when searching animes
Returns:
A list of `Anime` that matched the keyword
"""
pass
@abstractmethod
def get_episodes_count(self, anime: Anime) -> int:
"""Get the total number of episodes in an anime
Args:
anime: The anime for which you want to get episodes for
Returns:
The total number of episodes in the anime
"""
pass
@abstractmethod
def get_embed_video(self, anime: Anime, episode: int) -> str:
"""Get the link to the page where the episode video is embedded
Args:
anime: The anime you want to get episodes for
episode: The episode number of the anime
Returns:
The link to the page where the episode for the anime
is embedded
"""
pass
@abstractmethod
def get_video_url(self, embed_url: str) -> str:
"""Get the direct url to the video
Args:
embed_url: The link to the page where the video is embedded
Returns:
The direct link to the video
"""
pass
|
[
"bs4.BeautifulSoup",
"requests.get"
] |
[((890, 958), 'requests.get', 'requests.get', (['f"""{self.url}/{location}"""'], {'headers': 'self.request_headers'}), "(f'{self.url}/{location}', headers=self.request_headers)\n", (902, 958), False, 'import requests\n'), ((974, 1019), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.content'], {'features': '"""html5lib"""'}), "(r.content, features='html5lib')\n", (987, 1019), False, 'from bs4 import BeautifulSoup\n')]
|
import os, sys, time
import numpy as np
from skimage import io
def cropframes(clip_dir, image_files, clip_path):
clip = clip_path.split('/')[-1]
clip_name = clip.split('.')[0]
crop_dir = clip_dir + 'cropped/'
# crop_dir = '/home/sxg755/dataset/train/all_frames/cropped/'
if not os.path.exists(crop_dir):
os.makedirs(crop_dir)
cropped_files = []
for idx, image in enumerate(image_files):
img = io.imread(image)
h = img.shape[0]
w = img.shape[1]
img_cropped = img[0:4*h/5, 0:w]
io.imsave(crop_dir + clip_name + '_keyframe' + "{0:0>4}".format(idx+1) + '.jpg', img_cropped)
cropped_files.append(crop_dir + clip_name + '_keyframe' + "{0:0>4}".format(idx+1) + '.jpg')
return cropped_files
|
[
"os.path.exists",
"os.makedirs",
"skimage.io.imread"
] |
[((287, 311), 'os.path.exists', 'os.path.exists', (['crop_dir'], {}), '(crop_dir)\n', (301, 311), False, 'import os, sys, time\n'), ((315, 336), 'os.makedirs', 'os.makedirs', (['crop_dir'], {}), '(crop_dir)\n', (326, 336), False, 'import os, sys, time\n'), ((410, 426), 'skimage.io.imread', 'io.imread', (['image'], {}), '(image)\n', (419, 426), False, 'from skimage import io\n')]
|
import openpyxl as op
from os.path import exists
from src.pyion.objects.PyionData import PyionData
def check_headers(sheet) -> None:
if sheet is None:
raise Exception("Sheet cannot be null")
headers = list(sheet.rows)[0]
if len(headers) != 6:
raise Exception("Number of columns is off, should only have 6 columns.")
if "vm" not in headers[0].value.lower():
raise Exception("Vm should be column 1")
if "ci" not in headers[1].value.lower():
raise Exception("Ci should be column 2")
if "vi" not in headers[2].value.lower():
raise Exception("Vi should be column 3")
if "cs" not in headers[3].value.lower():
raise Exception("Cs should be column 4")
if "vadd" not in headers[4].value.lower():
raise Exception("Vadd should be column 5")
if "temp" not in headers[5].value.lower():
raise Exception("temp should be column 6")
# Returns all of a column recursively after row 1
def get_column(sheet, col: str, cell_int: int):
if sheet is None or cell_int is None or col is None:
raise Exception("Null parameter given in get_column method")
cell = f"{col}{str(cell_int)}"
if sheet[cell].value is None:
values = [x[0].value for x in sheet[f"{col}2":f"{col}{str(cell_int-1)}"]]
return values
return get_column(sheet, col, cell_int+1)
def read_file(file_loc: str) -> PyionData:
# Making sure the file exists
if not exists(file_loc):
raise Exception(f"File {file_loc} doesn't exist or cant be found.")
# Initializing all our variables
try:
data = PyionData()
wb = op.load_workbook(file_loc, data_only=True, read_only=True)
sheet = wb["Sheet1"]
except Exception:
raise Exception("Error trying to load excel file, make sure the file is a .xlsx file "
"and the main sheet is titled 'Sheet1'.")
check_headers(sheet)
# Reading all the excel data
data.voltage.value = get_column(sheet, 'a', 2)
if len(data.voltage.value) % 3 != 0:
raise Exception("Voltage column should be in multiples of 3, current column is not.")
for value in data.voltage.value:
if type(value) is not float:
raise Exception("Non-float value found in voltage column")
data.ci.value = sheet["b2"].value
if type(data.ci.value) != int and type(data.ci.value) != float:
raise Exception("Ci value must be an int or float")
data.vi.value = sheet["c2"].value
if type(data.vi.value) != int and type(data.vi.value) != float:
raise Exception("Vi value must be an int or float")
data.cs.value = sheet["d2"].value
if type(data.cs.value) != int and type(data.cs.value) != float:
raise Exception("Cs value must be an int or float")
data.v_add.value = get_column(sheet, 'e', 2)
if len(data.voltage.value)/3 != len(data.v_add.value):
raise Exception("V_add column length should equal voltage column divided by 3, currently does not.")
for value in data.v_add.value:
if type(value) is not float and type(value) is not int:
raise Exception("Non-float / Non-int value found in voltage column")
data.temp.value = sheet["f2"].value
if type(data.temp.value) != int and type(data.temp.value) != float:
raise Exception("Temp value must be an int or float")
return data
|
[
"openpyxl.load_workbook",
"os.path.exists",
"src.pyion.objects.PyionData.PyionData"
] |
[((1461, 1477), 'os.path.exists', 'exists', (['file_loc'], {}), '(file_loc)\n', (1467, 1477), False, 'from os.path import exists\n'), ((1617, 1628), 'src.pyion.objects.PyionData.PyionData', 'PyionData', ([], {}), '()\n', (1626, 1628), False, 'from src.pyion.objects.PyionData import PyionData\n'), ((1642, 1700), 'openpyxl.load_workbook', 'op.load_workbook', (['file_loc'], {'data_only': '(True)', 'read_only': '(True)'}), '(file_loc, data_only=True, read_only=True)\n', (1658, 1700), True, 'import openpyxl as op\n')]
|
import json
import os
import re
import subprocess
os.environ['PATH'] += ':' + os.environ['LAMBDA_TASK_ROOT']
BASES = [
'A',
'C',
'G',
'T',
'N',
]
all_count_pattern = re.compile('[0-9]+')
get_all_calls = all_count_pattern.findall
def perform_query(reference_bases, region, end_min, end_max, alternate_bases,
variant_type, include_details, vcf_location):
args = [
'bcftools', 'query',
'--regions', region,
'--format', '%POS\t%REF\t%ALT\t%INFO\t[%GT,]\n',
vcf_location
]
query_process = subprocess.Popen(args, stdout=subprocess.PIPE, cwd='/tmp',
encoding='ascii')
v_prefix = '<{}'.format(variant_type)
first_bp = int(region[region.find(':')+1: region.find('-')])
last_bp = int(region[region.find('-')+1:])
approx = reference_bases == 'N'
exists = False
variants = []
call_count = 0
all_alleles_count = 0
sample_indexes = []
for line in query_process.stdout:
try:
(position, reference, all_alts, info_str,
genotypes) = line.split('\t')
except ValueError as e:
print(repr(line.split('\t')))
raise e
pos = int(position)
# Ensure each variant will only be found by one process
if not first_bp <= pos <= last_bp:
continue
ref_length = len(reference)
if not end_min <= pos + ref_length - 1 <= end_max:
continue
if not approx and reference.upper() != reference_bases:
continue
alts = all_alts.split(',')
if alternate_bases is None:
if variant_type == 'DEL':
hit_indexes = [i for i, alt in enumerate(alts)
if ((alt.startswith(v_prefix)
or alt == '<CN0>')
if alt.startswith('<')
else len(alt) < ref_length)]
elif variant_type == 'INS':
hit_indexes = [i for i, alt in enumerate(alts)
if (alt.startswith(v_prefix)
if alt.startswith('<')
else len(alt) > ref_length)]
elif variant_type == 'DUP':
pattern = re.compile('({}){{2,}}'.format(reference))
hit_indexes = [i for i, alt in enumerate(alts)
if ((alt.startswith(v_prefix)
or (alt.startswith('<CN')
and alt not in ('<CN0>', '<CN1>')))
if alt.startswith('<')
else pattern.fullmatch(alt))]
elif variant_type == 'DUP:TANDEM':
tandem = reference + reference
hit_indexes = [i for i, alt in enumerate(alts)
if ((alt.startswith(v_prefix)
or alt == '<CN2>')
if alt.startswith('<')
else alt == tandem)]
elif variant_type == 'CNV':
pattern = re.compile('\.|({})*'.format(reference))
hit_indexes = [i for i, alt in enumerate(alts)
if ((alt.startswith(v_prefix)
or alt.startswith('<CN')
or alt.startswith('<DEL')
or alt.startswith('<DUP'))
if alt.startswith('<')
else pattern.fullmatch(alt))]
else:
# For structural variants that aren't otherwise recognisable
hit_indexes = [i for i, alt in enumerate(alts)
if alt.startswith(v_prefix)]
else:
if alternate_bases == 'N':
hit_indexes = [i for i, alt in enumerate(alts)
if alt.upper() in BASES]
else:
hit_indexes = [i for i, alt in enumerate(alts)
if alt.upper() == alternate_bases]
if not hit_indexes:
continue
# Look through INFO for AC and AN, used for efficient calculations. Note
# we cannot request them explicitly in the query, as bcftools will crash
# if they aren't present.
all_alt_counts = None
total_count = None
for info in info_str.split(';'):
if not all_alt_counts and info.startswith('AC='):
all_alt_counts = info[3:]
if total_count is not None:
break
elif total_count is None and info.startswith('AN='):
total_count = int(info[3:])
if all_alleles_count is not None:
break
all_calls = None
if all_alt_counts is not None:
alt_counts = all_alt_counts.split(',')
call_counts = [int(alt_counts[i]) for i in hit_indexes]
variants += [
reference + position + alts[i]
for i in hit_indexes if alt_counts[i] != "0"
]
call_count += sum(call_counts)
else:
# Much slower, but doesn't require INFO/AC
all_calls = get_all_calls(genotypes)
hit_set = set(str(i+1) for i in hit_indexes)
variants += [
reference + position + alts[int(i)-1]
for i in set(all_calls) & hit_set
]
call_count += sum(1 for call in all_calls if call in hit_set)
if call_count:
if not exists:
exists = True
if not include_details:
break
pattern = re.compile('(^|[|/])({})([|/]|$)'.format('|'.join(str(
i for i in hit_indexes))))
sample_indexes += [i for i, gt in enumerate(genotypes.split(','))
if pattern.search(gt)]
# Used for calculating frequency. This will be a misleading value if the
# alleles are spread over multiple vcf records. Ideally we should
# return a dictionary for each matching record/allele, but for now the
# beacon specification doesn't support it. A quick fix might be to
# represent the frequency of any matching allele in the population of
# haplotypes, but this could lead to an illegal value > 1.
if total_count is not None:
all_alleles_count += total_count
else:
# Much slower, but doesn't require INFO/AN
if all_calls is None:
all_calls = get_all_calls(genotypes)
all_alleles_count += len(all_calls)
query_process.stdout.close()
samples = list(set(sample_indexes))
return {
'exists': exists,
'all_alleles_count': all_alleles_count,
'variants': variants,
'call_count': call_count,
'samples': samples,
}
def lambda_handler(event, context):
print('Event Received: {}'.format(json.dumps(event)))
reference_bases = event['reference_bases']
region = event['region']
end_min = event['end_min']
end_max = event['end_max']
alternate_bases = event['alternate_bases']
variant_type = event['variant_type']
include_details = event['include_details']
vcf_location = event['vcf_location']
response = perform_query(reference_bases, region, end_min, end_max,
alternate_bases, variant_type, include_details,
vcf_location)
print('Returning response: {}'.format(json.dumps(response)))
return response
|
[
"subprocess.Popen",
"json.dumps",
"re.compile"
] |
[((189, 209), 're.compile', 're.compile', (['"""[0-9]+"""'], {}), "('[0-9]+')\n", (199, 209), False, 'import re\n'), ((571, 647), 'subprocess.Popen', 'subprocess.Popen', (['args'], {'stdout': 'subprocess.PIPE', 'cwd': '"""/tmp"""', 'encoding': '"""ascii"""'}), "(args, stdout=subprocess.PIPE, cwd='/tmp', encoding='ascii')\n", (587, 647), False, 'import subprocess\n'), ((7184, 7201), 'json.dumps', 'json.dumps', (['event'], {}), '(event)\n', (7194, 7201), False, 'import json\n'), ((7752, 7772), 'json.dumps', 'json.dumps', (['response'], {}), '(response)\n', (7762, 7772), False, 'import json\n')]
|
#!/usr/bin/env python
from setuptools import setup
import glob
package_name = 'apex_launchtest'
setup(
name=package_name,
version='0.1',
description='Apex integration test runner and utilities',
author='<NAME>',
author_email='<EMAIL>',
data_files=[
('share/ament_index/resource_index/packages', ['resource/apex_launchtest']),
('lib/' + package_name, glob.glob('example_processes/**')),
('share/' + package_name + '/examples', glob.glob('examples/[!_]**')),
('bin', ['scripts/apex_launchtest']),
],
packages=[
'apex_launchtest',
'apex_launchtest.asserts',
'apex_launchtest.event_handlers',
'apex_launchtest.util',
],
tests_require=["pytest"],
zip_safe=True,
)
|
[
"glob.glob"
] |
[((395, 428), 'glob.glob', 'glob.glob', (['"""example_processes/**"""'], {}), "('example_processes/**')\n", (404, 428), False, 'import glob\n'), ((479, 507), 'glob.glob', 'glob.glob', (['"""examples/[!_]**"""'], {}), "('examples/[!_]**')\n", (488, 507), False, 'import glob\n')]
|
import pyclesperanto_prototype as cle
import numpy as np
def test_maximum_y_projection():
test1 = cle.push(np.asarray([
[
[1, 0, 0, 0, 9],
[0, 2, 0, 8, 0],
[3, 0, 1, 0, 10],
[0, 4, 0, 7, 0],
[5, 0, 6, 0, 10]
], [
[0, 2, 0, 8, 0],
[1, 0, 0, 0, 9],
[3, 0, 1, 0, 10],
[0, 4, 0, 7, 0],
[5, 0, 6, 0, 10]
], [
[0, 2, 0, 8, 0],
[3, 0, 1, 0, 10],
[0, 4, 0, 7, 0],
[1, 0, 0, 0, 9],
[5, 0, 6, 0, 10]
], [
[0, 2, 0, 8, 0],
[1, 0, 0, 0, 9],
[0, 4, 0, 7, 0],
[3, 0, 1, 0, 10],
[5, 0, 6, 0, 10]
], [
[1, 0, 0, 0, 9],
[0, 4, 0, 7, 0],
[3, 0, 1, 0, 10],
[0, 2, 0, 8, 0],
[5, 0, 6, 0, 10]
]
]).T)
reference = cle.push(np.asarray([
[5, 4, 6, 8, 10],
[5, 4, 6, 8, 10],
[5, 4, 6, 8, 10],
[5, 4, 6, 8, 10],
[5, 4, 6, 8, 10]
]).T)
result = cle.create(reference)
cle.maximum_y_projection(test1, result)
a = cle.pull(result)
b = cle.pull(reference)
print(a)
assert (np.array_equal(a, b))
|
[
"numpy.array_equal",
"pyclesperanto_prototype.maximum_y_projection",
"numpy.asarray",
"pyclesperanto_prototype.pull",
"pyclesperanto_prototype.create"
] |
[((1129, 1150), 'pyclesperanto_prototype.create', 'cle.create', (['reference'], {}), '(reference)\n', (1139, 1150), True, 'import pyclesperanto_prototype as cle\n'), ((1155, 1194), 'pyclesperanto_prototype.maximum_y_projection', 'cle.maximum_y_projection', (['test1', 'result'], {}), '(test1, result)\n', (1179, 1194), True, 'import pyclesperanto_prototype as cle\n'), ((1204, 1220), 'pyclesperanto_prototype.pull', 'cle.pull', (['result'], {}), '(result)\n', (1212, 1220), True, 'import pyclesperanto_prototype as cle\n'), ((1229, 1248), 'pyclesperanto_prototype.pull', 'cle.pull', (['reference'], {}), '(reference)\n', (1237, 1248), True, 'import pyclesperanto_prototype as cle\n'), ((1276, 1296), 'numpy.array_equal', 'np.array_equal', (['a', 'b'], {}), '(a, b)\n', (1290, 1296), True, 'import numpy as np\n'), ((112, 595), 'numpy.asarray', 'np.asarray', (['[[[1, 0, 0, 0, 9], [0, 2, 0, 8, 0], [3, 0, 1, 0, 10], [0, 4, 0, 7, 0], [5, \n 0, 6, 0, 10]], [[0, 2, 0, 8, 0], [1, 0, 0, 0, 9], [3, 0, 1, 0, 10], [0,\n 4, 0, 7, 0], [5, 0, 6, 0, 10]], [[0, 2, 0, 8, 0], [3, 0, 1, 0, 10], [0,\n 4, 0, 7, 0], [1, 0, 0, 0, 9], [5, 0, 6, 0, 10]], [[0, 2, 0, 8, 0], [1, \n 0, 0, 0, 9], [0, 4, 0, 7, 0], [3, 0, 1, 0, 10], [5, 0, 6, 0, 10]], [[1,\n 0, 0, 0, 9], [0, 4, 0, 7, 0], [3, 0, 1, 0, 10], [0, 2, 0, 8, 0], [5, 0,\n 6, 0, 10]]]'], {}), '([[[1, 0, 0, 0, 9], [0, 2, 0, 8, 0], [3, 0, 1, 0, 10], [0, 4, 0, \n 7, 0], [5, 0, 6, 0, 10]], [[0, 2, 0, 8, 0], [1, 0, 0, 0, 9], [3, 0, 1, \n 0, 10], [0, 4, 0, 7, 0], [5, 0, 6, 0, 10]], [[0, 2, 0, 8, 0], [3, 0, 1,\n 0, 10], [0, 4, 0, 7, 0], [1, 0, 0, 0, 9], [5, 0, 6, 0, 10]], [[0, 2, 0,\n 8, 0], [1, 0, 0, 0, 9], [0, 4, 0, 7, 0], [3, 0, 1, 0, 10], [5, 0, 6, 0,\n 10]], [[1, 0, 0, 0, 9], [0, 4, 0, 7, 0], [3, 0, 1, 0, 10], [0, 2, 0, 8,\n 0], [5, 0, 6, 0, 10]]])\n', (122, 595), True, 'import numpy as np\n'), ((963, 1069), 'numpy.asarray', 'np.asarray', (['[[5, 4, 6, 8, 10], [5, 4, 6, 8, 10], [5, 4, 6, 8, 10], [5, 4, 6, 8, 10], [5,\n 4, 6, 8, 10]]'], {}), '([[5, 4, 6, 8, 10], [5, 4, 6, 8, 10], [5, 4, 6, 8, 10], [5, 4, 6,\n 8, 10], [5, 4, 6, 8, 10]])\n', (973, 1069), True, 'import numpy as np\n')]
|
import graphlab as gl
import pandas as pd
import numpy as np
import os
from .core import TrajRecord
def createTrajRecordFromPandasDF(df):
user_daily_loc = gl.SFrame(data = df)
return createTrajRecordFromSFrame(user_daily_loc)
def createTrajRecordFromSparkDF(df, sparkContext):
user_daily_loc = gl.SFrame.from_rdd(df, sparkContext)
return createTrajRecordFromSFrame(user_daily_loc)
def createTrajRecordFromCSV(file_path):
user_daily_loc = gl.SFrame.read_csv(file_path, verbose=False)
return createTrajRecordFromSFrame(user_daily_loc)
def createTrajRecordFromSFrame(user_daily_loc_count):
user_daily_loc_count['user_id'] = user_daily_loc_count['user_id'].astype(str)
# Prepare migration record
# Assign day index to each date
start_date_ori = str(user_daily_loc_count['date'].min())
end_date_ori = str(user_daily_loc_count['date'].max())
# MM/DD/YYYY
start_date = '/'.join([start_date_ori[4:6], start_date_ori[6:],
start_date_ori[:4]])
end_date = '/'.join([end_date_ori[4:6], end_date_ori[6:], end_date_ori[:4]])
all_date = pd.date_range(start=start_date, end=end_date)
all_date_new = [int(str(x)[:4] + str(x)[5:7] + str(x)[8:10])
for x in all_date]
date2index = dict(zip(all_date_new, range(len(all_date_new))))
index2date = dict(zip(range(len(all_date_new)), all_date_new))
end_date_long_ori = str(pd.Timestamp(end_date)+pd.Timedelta('200 day'))
all_date_long = pd.date_range(start=start_date, end=end_date_long_ori)
all_date_long_new = [int(str(x)[:4] + str(x)[5:7] + str(x)[8:10])
for x in all_date_long]
date_num_long = gl.SFrame({'date': all_date_long_new,
'date_num': range(len(all_date_long_new))})
migration_df = user_daily_loc_count
migration_df['date_num'] = migration_df.apply(
lambda x: date2index[x['date']]
)
# Aggregate user daily records
user_loc_date_agg = migration_df.groupby(
['user_id', 'location'],
{'all_date': gl.aggregate.CONCAT('date_num')}
)
user_loc_agg = user_loc_date_agg.groupby(
['user_id'],
{'all_record': gl.aggregate.CONCAT('location', 'all_date')}
)
traj = TrajRecord(user_loc_agg, migration_df, index2date, date_num_long)
return traj
def getResultAsPandasDF(result):
return result.select_columns(
['user_id', 'home', 'destination', 'migration_date',
'uncertainty', 'num_error_day',
'home_start', 'home_end',
'destination_start', 'destination_end',
'home_start_date', 'home_end_date',
'destination_start_date', 'destination_end_date']
).to_dataframe()
def getResultAsSparkDF(result, sparkContext, sqlContext):
return result.select_columns(
['user_id', 'home', 'destination', 'migration_date',
'uncertainty', 'num_error_day',
'home_start', 'home_end',
'destination_start', 'destination_end',
'home_start_date', 'home_end_date',
'destination_start_date', 'destination_end_date']
).to_spark_dataframe(sparkContext, sqlContext)
def getResultAsCsv(result, result_path='result', file_name='migration_event.csv'):
if not os.path.isdir(result_path):
os.makedirs(result_path)
save_file = os.path.join(result_path, file_name)
result.select_columns(
['user_id', 'home', 'destination', 'migration_date',
'uncertainty', 'num_error_day',
'home_start', 'home_end',
'destination_start', 'destination_end',
'home_start_date', 'home_end_date',
'destination_start_date', 'destination_end_date']
).export_csv(save_file)
|
[
"pandas.Timestamp",
"pandas.date_range",
"os.makedirs",
"graphlab.aggregate.CONCAT",
"os.path.isdir",
"graphlab.SFrame.from_rdd",
"graphlab.SFrame",
"pandas.Timedelta",
"os.path.join",
"graphlab.SFrame.read_csv"
] |
[((161, 179), 'graphlab.SFrame', 'gl.SFrame', ([], {'data': 'df'}), '(data=df)\n', (170, 179), True, 'import graphlab as gl\n'), ((309, 345), 'graphlab.SFrame.from_rdd', 'gl.SFrame.from_rdd', (['df', 'sparkContext'], {}), '(df, sparkContext)\n', (327, 345), True, 'import graphlab as gl\n'), ((462, 506), 'graphlab.SFrame.read_csv', 'gl.SFrame.read_csv', (['file_path'], {'verbose': '(False)'}), '(file_path, verbose=False)\n', (480, 506), True, 'import graphlab as gl\n'), ((1118, 1163), 'pandas.date_range', 'pd.date_range', ([], {'start': 'start_date', 'end': 'end_date'}), '(start=start_date, end=end_date)\n', (1131, 1163), True, 'import pandas as pd\n'), ((1499, 1553), 'pandas.date_range', 'pd.date_range', ([], {'start': 'start_date', 'end': 'end_date_long_ori'}), '(start=start_date, end=end_date_long_ori)\n', (1512, 1553), True, 'import pandas as pd\n'), ((3360, 3396), 'os.path.join', 'os.path.join', (['result_path', 'file_name'], {}), '(result_path, file_name)\n', (3372, 3396), False, 'import os\n'), ((3283, 3309), 'os.path.isdir', 'os.path.isdir', (['result_path'], {}), '(result_path)\n', (3296, 3309), False, 'import os\n'), ((3319, 3343), 'os.makedirs', 'os.makedirs', (['result_path'], {}), '(result_path)\n', (3330, 3343), False, 'import os\n'), ((1431, 1453), 'pandas.Timestamp', 'pd.Timestamp', (['end_date'], {}), '(end_date)\n', (1443, 1453), True, 'import pandas as pd\n'), ((1454, 1477), 'pandas.Timedelta', 'pd.Timedelta', (['"""200 day"""'], {}), "('200 day')\n", (1466, 1477), True, 'import pandas as pd\n'), ((2079, 2110), 'graphlab.aggregate.CONCAT', 'gl.aggregate.CONCAT', (['"""date_num"""'], {}), "('date_num')\n", (2098, 2110), True, 'import graphlab as gl\n'), ((2208, 2251), 'graphlab.aggregate.CONCAT', 'gl.aggregate.CONCAT', (['"""location"""', '"""all_date"""'], {}), "('location', 'all_date')\n", (2227, 2251), True, 'import graphlab as gl\n')]
|
import sys
sys.path.insert(0, "../../../")
import argparse
import torch
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data_utils
import numpy as np
from paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_rotate import MnistRotatedDist
from paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_flip import MnistRotatedDistFlip
from paper_experiments.rotated_MNIST.mnist_loader import MnistRotated
from paper_experiments.rotated_MNIST.augmentations.model_baseline import Net
def train(args, model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target, _) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
_, target = target.max(dim=1)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
def test(args, model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target, _ in test_loader:
data, target = data.to(device), target.to(device)
_, target = target.max(dim=1)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
return test_loss, 100. * correct / len(test_loader.dataset)
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=0,
help='random seed (default: 1)')
parser.add_argument('--batch-size', type=int, default=128,
help='input batch size for training (default: 64)')
parser.add_argument('--epochs', type=int, default=200,
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.001,
help='learning rate (default: 0.01)')
parser.add_argument('--da', type=str, default='rotate', choices=['rotate', 'flip'],
help='type of data augmentation')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
# Set seed
torch.manual_seed(args.seed)
torch.backends.cudnn.benchmark = False
np.random.seed(args.seed)
device = torch.device("cuda")
kwargs = {'num_workers': 8, 'pin_memory': True} if use_cuda else {}
# Load supervised training
if args.da == 'rotate':
mnist_30 = MnistRotatedDist('../dataset/', train=True, thetas=[30.0], d_label=0, transform=True)
mnist_60 = MnistRotatedDist('../dataset/', train=True, thetas=[60.0], d_label=1, transform=True)
mnist_90 = MnistRotatedDist('../dataset/', train=True, thetas=[90.0], d_label=2, transform=True)
model_name = 'baseline_test_0_random_rotate_seed_' + str(args.seed)
elif args.da == 'flip':
mnist_30 = MnistRotatedDistFlip('../dataset/', train=True, thetas=[30.0], d_label=0)
mnist_60 = MnistRotatedDistFlip('../dataset/', train=True, thetas=[60.0], d_label=1)
mnist_90 = MnistRotatedDistFlip('../dataset/', train=True, thetas=[90.0], d_label=2)
model_name = 'baseline_test_0_random_flips_seed_' + str(args.seed)
mnist = data_utils.ConcatDataset([mnist_30, mnist_60, mnist_90])
train_size = int(0.9 * len(mnist))
val_size = len(mnist) - train_size
train_dataset, val_dataset = torch.utils.data.random_split(mnist, [train_size, val_size])
train_loader = data_utils.DataLoader(train_dataset,
batch_size=args.batch_size,
shuffle=True, **kwargs)
val_loader = data_utils.DataLoader(val_dataset,
batch_size=args.batch_size,
shuffle=False, **kwargs)
model = Net().to(device)
optimizer = optim.Adam(model.parameters(), lr=args.lr)
best_val_acc = 0
for epoch in range(1, args.epochs + 1):
print('\n Epoch: ' + str(epoch))
train(args, model, device, train_loader, optimizer, epoch)
val_loss, val_acc = test(args, model, device, val_loader)
print(epoch, val_loss, val_acc)
# Save best
if val_acc >= best_val_acc:
best_val_acc = val_acc
torch.save(model, model_name + '.model')
torch.save(args, model_name + '.config')
# Test loader
mnist_0 = MnistRotated('../dataset/', train=False, thetas=[0.0], d_label=0)
test_loader = data_utils.DataLoader(mnist_0,
batch_size=args.batch_size,
shuffle=False, **kwargs)
model = torch.load(model_name + '.model').to(device)
_, test_acc = test(args, model, device, test_loader)
with open(model_name + '.txt', "w") as text_file:
text_file.write("Test Acc: " + str(test_acc))
if __name__ == '__main__':
main()
|
[
"paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_rotate.MnistRotatedDist",
"torch.utils.data.ConcatDataset",
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"paper_experiments.rotated_MNIST.mnist_loader.MnistRotated",
"paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_flip.MnistRotatedDistFlip",
"torch.manual_seed",
"torch.load",
"sys.path.insert",
"torch.utils.data.random_split",
"torch.save",
"paper_experiments.rotated_MNIST.augmentations.model_baseline.Net",
"torch.nn.functional.nll_loss",
"torch.cuda.is_available",
"torch.device",
"torch.no_grad"
] |
[((11, 42), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../../"""'], {}), "(0, '../../../')\n", (26, 42), False, 'import sys\n'), ((1679, 1739), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch MNIST Example"""'}), "(description='PyTorch MNIST Example')\n", (1702, 1739), False, 'import argparse\n'), ((2631, 2659), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (2648, 2659), False, 'import torch\n'), ((2707, 2732), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (2721, 2732), True, 'import numpy as np\n'), ((2747, 2767), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2759, 2767), False, 'import torch\n'), ((3687, 3743), 'torch.utils.data.ConcatDataset', 'data_utils.ConcatDataset', (['[mnist_30, mnist_60, mnist_90]'], {}), '([mnist_30, mnist_60, mnist_90])\n', (3711, 3743), True, 'import torch.utils.data as data_utils\n'), ((3856, 3916), 'torch.utils.data.random_split', 'torch.utils.data.random_split', (['mnist', '[train_size, val_size]'], {}), '(mnist, [train_size, val_size])\n', (3885, 3916), False, 'import torch\n'), ((3937, 4030), 'torch.utils.data.DataLoader', 'data_utils.DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)'}), '(train_dataset, batch_size=args.batch_size, shuffle=\n True, **kwargs)\n', (3958, 4030), True, 'import torch.utils.data as data_utils\n'), ((4126, 4218), 'torch.utils.data.DataLoader', 'data_utils.DataLoader', (['val_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(val_dataset, batch_size=args.batch_size, shuffle=\n False, **kwargs)\n', (4147, 4218), True, 'import torch.utils.data as data_utils\n'), ((4899, 4964), 'paper_experiments.rotated_MNIST.mnist_loader.MnistRotated', 'MnistRotated', (['"""../dataset/"""'], {'train': '(False)', 'thetas': '[0.0]', 'd_label': '(0)'}), "('../dataset/', train=False, thetas=[0.0], d_label=0)\n", (4911, 4964), False, 'from paper_experiments.rotated_MNIST.mnist_loader import MnistRotated\n'), ((4983, 5070), 'torch.utils.data.DataLoader', 'data_utils.DataLoader', (['mnist_0'], {'batch_size': 'args.batch_size', 'shuffle': '(False)'}), '(mnist_0, batch_size=args.batch_size, shuffle=False,\n **kwargs)\n', (5004, 5070), True, 'import torch.utils.data as data_utils\n'), ((878, 904), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'target'], {}), '(output, target)\n', (888, 904), True, 'import torch.nn.functional as F\n'), ((1060, 1075), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1073, 1075), False, 'import torch\n'), ((2585, 2610), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2608, 2610), False, 'import torch\n'), ((2919, 3008), 'paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_rotate.MnistRotatedDist', 'MnistRotatedDist', (['"""../dataset/"""'], {'train': '(True)', 'thetas': '[30.0]', 'd_label': '(0)', 'transform': '(True)'}), "('../dataset/', train=True, thetas=[30.0], d_label=0,\n transform=True)\n", (2935, 3008), False, 'from paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_rotate import MnistRotatedDist\n'), ((3024, 3113), 'paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_rotate.MnistRotatedDist', 'MnistRotatedDist', (['"""../dataset/"""'], {'train': '(True)', 'thetas': '[60.0]', 'd_label': '(1)', 'transform': '(True)'}), "('../dataset/', train=True, thetas=[60.0], d_label=1,\n transform=True)\n", (3040, 3113), False, 'from paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_rotate import MnistRotatedDist\n'), ((3129, 3218), 'paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_rotate.MnistRotatedDist', 'MnistRotatedDist', (['"""../dataset/"""'], {'train': '(True)', 'thetas': '[90.0]', 'd_label': '(2)', 'transform': '(True)'}), "('../dataset/', train=True, thetas=[90.0], d_label=2,\n transform=True)\n", (3145, 3218), False, 'from paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_rotate import MnistRotatedDist\n'), ((3339, 3412), 'paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_flip.MnistRotatedDistFlip', 'MnistRotatedDistFlip', (['"""../dataset/"""'], {'train': '(True)', 'thetas': '[30.0]', 'd_label': '(0)'}), "('../dataset/', train=True, thetas=[30.0], d_label=0)\n", (3359, 3412), False, 'from paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_flip import MnistRotatedDistFlip\n'), ((3432, 3505), 'paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_flip.MnistRotatedDistFlip', 'MnistRotatedDistFlip', (['"""../dataset/"""'], {'train': '(True)', 'thetas': '[60.0]', 'd_label': '(1)'}), "('../dataset/', train=True, thetas=[60.0], d_label=1)\n", (3452, 3505), False, 'from paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_flip import MnistRotatedDistFlip\n'), ((3525, 3598), 'paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_flip.MnistRotatedDistFlip', 'MnistRotatedDistFlip', (['"""../dataset/"""'], {'train': '(True)', 'thetas': '[90.0]', 'd_label': '(2)'}), "('../dataset/', train=True, thetas=[90.0], d_label=2)\n", (3545, 3598), False, 'from paper_experiments.rotated_MNIST.mnist_loader_shifted_label_distribution_flip import MnistRotatedDistFlip\n'), ((4309, 4314), 'paper_experiments.rotated_MNIST.augmentations.model_baseline.Net', 'Net', ([], {}), '()\n', (4312, 4314), False, 'from paper_experiments.rotated_MNIST.augmentations.model_baseline import Net\n'), ((4772, 4812), 'torch.save', 'torch.save', (['model', "(model_name + '.model')"], {}), "(model, model_name + '.model')\n", (4782, 4812), False, 'import torch\n'), ((4825, 4865), 'torch.save', 'torch.save', (['args', "(model_name + '.config')"], {}), "(args, model_name + '.config')\n", (4835, 4865), False, 'import torch\n'), ((5160, 5193), 'torch.load', 'torch.load', (["(model_name + '.model')"], {}), "(model_name + '.model')\n", (5170, 5193), False, 'import torch\n'), ((1284, 1327), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'target'], {'reduction': '"""sum"""'}), "(output, target, reduction='sum')\n", (1294, 1327), True, 'import torch.nn.functional as F\n')]
|
"""Scanning tests with fake mDNS responder.."""
from ipaddress import ip_address
import pytest
from pyatv.const import Protocol
from tests import fake_udns
from tests.conftest import Scanner
from tests.utils import assert_device
IP_1 = "10.0.0.1"
AIRPLAY_NAME = "AirPlay ATV"
AIRPLAY_ID = "AA:BB:CC:DD:EE:FF"
pytestmark = pytest.mark.asyncio
async def test_multicast_scan_airplay_device(udns_server, multicast_scan: Scanner):
udns_server.add_service(
fake_udns.airplay_service(AIRPLAY_NAME, AIRPLAY_ID, addresses=[IP_1])
)
atvs = await multicast_scan()
assert len(atvs) == 1
assert atvs[0].name == AIRPLAY_NAME
assert atvs[0].identifier == AIRPLAY_ID
assert atvs[0].address == ip_address(IP_1)
async def test_unicast_scan_airplay(udns_server, unicast_scan: Scanner):
udns_server.add_service(
fake_udns.airplay_service(AIRPLAY_NAME, AIRPLAY_ID, addresses=[IP_1], port=7000)
)
atvs = await unicast_scan()
assert len(atvs) == 1
assert_device(
atvs[0],
AIRPLAY_NAME,
ip_address(IP_1),
AIRPLAY_ID,
Protocol.AirPlay,
7000,
)
|
[
"ipaddress.ip_address",
"tests.fake_udns.airplay_service"
] |
[((472, 541), 'tests.fake_udns.airplay_service', 'fake_udns.airplay_service', (['AIRPLAY_NAME', 'AIRPLAY_ID'], {'addresses': '[IP_1]'}), '(AIRPLAY_NAME, AIRPLAY_ID, addresses=[IP_1])\n', (497, 541), False, 'from tests import fake_udns\n'), ((723, 739), 'ipaddress.ip_address', 'ip_address', (['IP_1'], {}), '(IP_1)\n', (733, 739), False, 'from ipaddress import ip_address\n'), ((852, 937), 'tests.fake_udns.airplay_service', 'fake_udns.airplay_service', (['AIRPLAY_NAME', 'AIRPLAY_ID'], {'addresses': '[IP_1]', 'port': '(7000)'}), '(AIRPLAY_NAME, AIRPLAY_ID, addresses=[IP_1], port=7000\n )\n', (877, 937), False, 'from tests import fake_udns\n'), ((1065, 1081), 'ipaddress.ip_address', 'ip_address', (['IP_1'], {}), '(IP_1)\n', (1075, 1081), False, 'from ipaddress import ip_address\n')]
|
__author__ = 'sibirrer'
import numpy as np
from astropy.cosmology import default_cosmology
from lenstronomy.Util import class_creator
from lenstronomy.Util import constants as const
from lenstronomy.Cosmo.lens_cosmo import LensCosmo
from lenstronomy.Analysis.kinematics_api import KinematicsAPI
class TDCosmography(KinematicsAPI):
"""
class equipped to perform a cosmographic analysis from a lens model with added measurements of time delays and
kinematics.
This class does not require any cosmological knowledge and can return angular diameter distance estimates
self-consistently integrating the kinematics routines and time delay estimates in the lens modeling.
This description follows Birrer et al. 2016, 2019.
"""
def __init__(self, z_lens, z_source, kwargs_model, cosmo_fiducial=None, lens_model_kinematics_bool=None,
light_model_kinematics_bool=None, kwargs_seeing={}, kwargs_aperture={}, anisotropy_model=None,
multi_observations=False, kwargs_lens_eqn_solver={}):
"""
:param z_lens: redshift of deflector
:param z_source: redshift of source
:param kwargs_model: model configurations (according to FittingSequence)
:param cosmo_fiducial: fiducial cosmology used to compute angular diameter distances where required
:param lens_model_kinematics_bool: (optional) bool list, corresponding to lens models being included into the
kinematics modeling
:param light_model_kinematics_bool: (optional) bool list, corresponding to lens light models being included
into the kinematics modeling
:param kwargs_seeing: seeing conditions (see observation class in Galkin)
:param kwargs_aperture: aperture keyword arguments (see aperture class in Galkin)
:param anisotropy_model: string, anisotropy model type
:param multi_observations: bool, if True, interprets kwargs_aperture and kwargs_seeing as lists of multiple
observations
"""
if cosmo_fiducial is None:
cosmo_fiducial = default_cosmology.get()
self._z_lens = z_lens
self._z_source = z_source
self._cosmo_fiducial = cosmo_fiducial
self._lens_cosmo = LensCosmo(z_lens=z_lens, z_source=z_source, cosmo=self._cosmo_fiducial)
self.LensModel, self.SourceModel, self.LensLightModel, self.PointSource, extinction_class = class_creator.create_class_instances(all_models=True, **kwargs_model,kwargs_lens_eqn_solver=kwargs_lens_eqn_solver)
super(TDCosmography, self).__init__(z_lens=z_lens, z_source=z_source, kwargs_model=kwargs_model,
cosmo=cosmo_fiducial, lens_model_kinematics_bool=lens_model_kinematics_bool,
light_model_kinematics_bool=light_model_kinematics_bool,
kwargs_seeing=kwargs_seeing, kwargs_aperture=kwargs_aperture,
anisotropy_model=anisotropy_model, multi_observations=multi_observations, kwargs_lens_eqn_solver=kwargs_lens_eqn_solver)
def time_delays(self, kwargs_lens, kwargs_ps, kappa_ext=0, original_ps_position=False):
"""
predicts the time delays of the image positions given the fiducial cosmology
:param kwargs_lens: lens model parameters
:param kwargs_ps: point source parameters
:param kappa_ext: external convergence (optional)
:param original_ps_position: boolean (only applies when first point source model is of type 'LENSED_POSITION'),
uses the image positions in the model parameters and does not re-compute images (which might be differently ordered)
in case of the lens equation solver
:return: time delays at image positions for the fixed cosmology
"""
fermat_pot, ra_pos, dec_pos = self.fermat_potential(kwargs_lens, kwargs_ps)
time_delay = self._lens_cosmo.time_delay_units(fermat_pot, kappa_ext)
return time_delay, ra_pos, dec_pos
def fermat_potential(self, kwargs_lens, kwargs_ps, original_ps_position=False):
"""
:param kwargs_lens: lens model keyword argument list
:param kwargs_ps: point source keyword argument list
:return: tuple of Fermat potential of all the image positions in the first point source list entry and ra/dec of the image positions used (Ji Won's modification)
"""
ra_pos, dec_pos = self.PointSource.image_position(kwargs_ps, kwargs_lens, original_position=original_ps_position)
ra_pos = ra_pos[0]
dec_pos = dec_pos[0]
ra_source, dec_source = self.LensModel.ray_shooting(ra_pos, dec_pos, kwargs_lens)
sigma_source = np.sqrt(np.var(ra_source) + np.var(dec_source))
if sigma_source > 0.001:
Warning('Source position computed from the different image positions do not trace back to the same position! '
'The error is %s mas and may be larger than what is required for an accurate relative time delay estimate!'
'See e.g. Birrer & Treu 2019.' % sigma_source * 1000)
ra_source = np.mean(ra_source)
dec_source = np.mean(dec_source)
fermat_pot = self.LensModel.fermat_potential(ra_pos, dec_pos, kwargs_lens, ra_source, dec_source)
return fermat_pot, ra_pos, dec_pos
def velocity_dispersion_dimension_less(self, kwargs_lens, kwargs_lens_light, kwargs_anisotropy, r_eff=None,
theta_E=None, gamma=None):
"""
sigma**2 = Dd/Dds * c**2 * J(kwargs_lens, kwargs_light, anisotropy)
(Equation 4.11 in Birrer et al. 2016 or Equation 6 in Birrer et al. 2019) J() is a dimensionless and
cosmological independent quantity only depending on angular units. This function returns J given the lens
and light parameters and the anisotropy choice without an external mass sheet correction.
:param kwargs_lens: lens model keyword arguments
:param kwargs_lens_light: lens light model keyword arguments
:param kwargs_anisotropy: stellar anisotropy keyword arguments
:param r_eff: projected half-light radius of the stellar light associated with the deflector galaxy, optional,
if set to None will be computed in this function with default settings that may not be accurate.
:return: dimensionless velocity dispersion (see e.g. Birrer et al. 2016, 2019)
"""
sigma_v = self.velocity_dispersion(kwargs_lens=kwargs_lens, kwargs_lens_light=kwargs_lens_light,
kwargs_anisotropy=kwargs_anisotropy, r_eff=r_eff, theta_E=theta_E,
gamma=gamma)
sigma_v *= 1000 # convert from [km/s] to [m/s]
J = sigma_v ** 2 * self._lens_cosmo.dds / self._lens_cosmo.ds / const.c ** 2
return J
def velocity_dispersion_map_dimension_less(self, kwargs_lens, kwargs_lens_light, kwargs_anisotropy, r_eff=None,
theta_E=None, gamma=None):
"""
sigma**2 = Dd/Dds * c**2 * J(kwargs_lens, kwargs_light, anisotropy)
(Equation 4.11 in Birrer et al. 2016 or Equation 6 in Birrer et al. 2019) J() is a dimensionless and
cosmological independent quantity only depending on angular units. This function returns J given the lens
and light parameters and the anisotropy choice without an external mass sheet correction.
This routine computes the IFU map of the kinematic quantities.
:param kwargs_lens: lens model keyword arguments
:param kwargs_lens_light: lens light model keyword arguments
:param kwargs_anisotropy: stellar anisotropy keyword arguments
:param r_eff: projected half-light radius of the stellar light associated with the deflector galaxy, optional,
if set to None will be computed in this function with default settings that may not be accurate.
:return: dimensionless velocity dispersion (see e.g. Birrer et al. 2016, 2019)
"""
sigma_v_map = self.velocity_dispersion_map(kwargs_lens=kwargs_lens, kwargs_lens_light=kwargs_lens_light,
kwargs_anisotropy=kwargs_anisotropy, r_eff=r_eff, theta_E=theta_E,
gamma=gamma)
sigma_v_map *= 1000 # convert from [km/s] to [m/s]
J_map = sigma_v_map ** 2 * self._lens_cosmo.dds / self._lens_cosmo.ds / const.c ** 2
return J_map
@staticmethod
def ddt_from_time_delay(d_fermat_model, dt_measured, kappa_s=0, kappa_ds=0, kappa_d=0):
"""
Time-delay distance in units of Mpc from the modeled Fermat potential and measured time delay from an image pair.
:param d_fermat_model: relative Fermat potential between two images from the same source in units arcsec^2
:param dt_measured: measured time delay between the same image pair in units of days
:return: D_dt, time-delay distance
"""
D_dt_model = dt_measured * const.day_s * const.c / const.Mpc / d_fermat_model / const.arcsec ** 2
D_dt = D_dt_model * (1-kappa_ds) / (1 - kappa_s) / (1 - kappa_d)
return D_dt
@staticmethod
def ds_dds_from_kinematics(sigma_v, J, kappa_s=0, kappa_ds=0):
"""
computes the estimate of the ratio of angular diameter distances Ds/Dds from the kinematic estimate of the lens
and the measured dispersion.
:param sigma_v: velocity dispersion [km/s]
:param J: dimensionless kinematic constraint (see Birrer et al. 2016, 2019)
:return: Ds/Dds
"""
ds_dds_model = (sigma_v * 1000) ** 2 / const.c ** 2 / J
ds_dds = ds_dds_model * (1 - kappa_ds) / (1 - kappa_s)
return ds_dds
def ddt_dd_from_time_delay_and_kinematics(self, d_fermat_model, dt_measured, sigma_v_measured, J, kappa_s=0,
kappa_ds=0, kappa_d=0):
"""
:param d_fermat_model: relative Fermat potential in units arcsec^2
:param dt_measured: measured relative time delay [days]
:param sigma_v_measured: 1-sigma Gaussian uncertainty in the measured velocity dispersion
:param J: modeled dimensionless kinematic estimate
:param kappa_s: LOS convergence from observer to source
:param kappa_ds: LOS convergence from deflector to source
:param kappa_d: LOS convergence from observer to deflector
:return: D_dt, D_d
"""
ddt = self.ddt_from_time_delay(d_fermat_model, dt_measured, kappa_s=kappa_s, kappa_ds=kappa_ds, kappa_d=kappa_d)
ds_dds = self.ds_dds_from_kinematics(sigma_v_measured, J, kappa_s=kappa_s, kappa_ds=kappa_ds)
dd = ddt / ds_dds / (1 + self._z_lens)
return ddt, dd
|
[
"lenstronomy.Cosmo.lens_cosmo.LensCosmo",
"numpy.var",
"astropy.cosmology.default_cosmology.get",
"numpy.mean",
"lenstronomy.Util.class_creator.create_class_instances"
] |
[((2250, 2321), 'lenstronomy.Cosmo.lens_cosmo.LensCosmo', 'LensCosmo', ([], {'z_lens': 'z_lens', 'z_source': 'z_source', 'cosmo': 'self._cosmo_fiducial'}), '(z_lens=z_lens, z_source=z_source, cosmo=self._cosmo_fiducial)\n', (2259, 2321), False, 'from lenstronomy.Cosmo.lens_cosmo import LensCosmo\n'), ((2422, 2542), 'lenstronomy.Util.class_creator.create_class_instances', 'class_creator.create_class_instances', ([], {'all_models': '(True)', 'kwargs_lens_eqn_solver': 'kwargs_lens_eqn_solver'}), '(all_models=True, **kwargs_model,\n kwargs_lens_eqn_solver=kwargs_lens_eqn_solver)\n', (2458, 2542), False, 'from lenstronomy.Util import class_creator\n'), ((5185, 5203), 'numpy.mean', 'np.mean', (['ra_source'], {}), '(ra_source)\n', (5192, 5203), True, 'import numpy as np\n'), ((5225, 5244), 'numpy.mean', 'np.mean', (['dec_source'], {}), '(dec_source)\n', (5232, 5244), True, 'import numpy as np\n'), ((2089, 2112), 'astropy.cosmology.default_cosmology.get', 'default_cosmology.get', ([], {}), '()\n', (2110, 2112), False, 'from astropy.cosmology import default_cosmology\n'), ((4767, 4784), 'numpy.var', 'np.var', (['ra_source'], {}), '(ra_source)\n', (4773, 4784), True, 'import numpy as np\n'), ((4787, 4805), 'numpy.var', 'np.var', (['dec_source'], {}), '(dec_source)\n', (4793, 4805), True, 'import numpy as np\n')]
|
#!/usr/bin/python
#
# Copyright 2018 The boardgame.io Authors
#
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT.
#
# pylint: disable=invalid-name,multiple-imports,global-statement
# To play against this bot, start the tictactoe server from http://boardgame.io/#/multiplayer
# and start the bot with:
# $ python tictactoebot.py
# (will play player '1' by default)
"""
Boardgame.io python client example: starts a bot with player id '0'
that plays randomly against the other player '1'.
"""
import signal, random, logging
from boardgameio import Bot
class TicTacToeBot(Bot):
"""
Example of use of base class boardgameio.Bot:
- the bot connects to the multiplayer server at construction
- each time it is the bot's turn to play, method 'think' is called
- when game is over, method 'gameover' is called.
"""
log = logging.getLogger('tictactoebot')
def __init__(self):
Bot.__init__(self, server='localhost', port=8000,
options={'game_name': 'default',
'num_players': 2,
'player_id': '1'})
def think(self, G, _ctx):
""" Called when it is this bot's turn to play. """
cells = G['cells']
# choose a random empty cell
idx = -1
while True and None in cells:
idx = random.randint(0, len(cells)-1)
if not cells[idx]:
break
self.log.debug('cell chosen: %d', idx)
return self.make_move('clickCell', idx)
def gameover(self, _G, ctx):
""" Called when game is over. """
self.log.info('winner is %s', ctx['gameover'])
running = False
log = logging.getLogger('main')
logging.basicConfig(level=logging.INFO)
def main():
""" Start bot and listen continuously for events. """
log.info('starting bot... (Ctrl-C to stop)')
client = TicTacToeBot()
global running
running = True
while running:
client.listen()
log.info('stopped.')
def stop(_signum, _frame):
""" Stop program. """
log.info('stopping...')
global running
running = False
# start process
if __name__ == '__main__':
signal.signal(signal.SIGINT, stop)
signal.signal(signal.SIGTERM, stop)
main()
|
[
"boardgameio.Bot.__init__",
"logging.basicConfig",
"signal.signal",
"logging.getLogger"
] |
[((1770, 1795), 'logging.getLogger', 'logging.getLogger', (['"""main"""'], {}), "('main')\n", (1787, 1795), False, 'import signal, random, logging\n'), ((1796, 1835), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (1815, 1835), False, 'import signal, random, logging\n'), ((940, 973), 'logging.getLogger', 'logging.getLogger', (['"""tictactoebot"""'], {}), "('tictactoebot')\n", (957, 973), False, 'import signal, random, logging\n'), ((2259, 2293), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'stop'], {}), '(signal.SIGINT, stop)\n', (2272, 2293), False, 'import signal, random, logging\n'), ((2298, 2333), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'stop'], {}), '(signal.SIGTERM, stop)\n', (2311, 2333), False, 'import signal, random, logging\n'), ((1007, 1130), 'boardgameio.Bot.__init__', 'Bot.__init__', (['self'], {'server': '"""localhost"""', 'port': '(8000)', 'options': "{'game_name': 'default', 'num_players': 2, 'player_id': '1'}"}), "(self, server='localhost', port=8000, options={'game_name':\n 'default', 'num_players': 2, 'player_id': '1'})\n", (1019, 1130), False, 'from boardgameio import Bot\n')]
|
import os
import random
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
def save_parameters(options, filename):
with open(filename, "w+") as f:
for key in options.keys():
f.write("{}: {}\n".format(key, options[key]))
# https://gist.github.com/KirillVladimirov/005ec7f762293d2321385580d3dbe335
def seed_everything(seed=1234):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# torch.cuda.manual_seed(seed)
# torch.backends.cudnn.deterministic = True
# https://blog.csdn.net/folk_/article/details/80208557
def train_val_split(logs_meta, labels, val_ratio=0.1):
total_num = len(labels)
train_index = list(range(total_num))
train_logs = {}
val_logs = {}
for key in logs_meta.keys():
train_logs[key] = []
val_logs[key] = []
train_labels = []
val_labels = []
val_num = int(total_num * val_ratio)
for i in range(val_num):
random_index = int(np.random.uniform(0, len(train_index)))
for key in logs_meta.keys():
val_logs[key].append(logs_meta[key][random_index])
val_labels.append(labels[random_index])
del train_index[random_index]
for i in range(total_num - val_num):
for key in logs_meta.keys():
train_logs[key].append(logs_meta[key][train_index[i]])
train_labels.append(labels[train_index[i]])
return train_logs, train_labels, val_logs, val_labels
def plot_train_valid_loss(save_dir):
train_loss = pd.read_csv(save_dir + "train_log.csv")
valid_loss = pd.read_csv(save_dir + "valid_log.csv")
sns.lineplot(x="epoch",y="loss" , data = train_loss, label="train loss")
sns.lineplot(x="epoch",y="loss" , data = valid_loss, label="valid loss")
plt.title("epoch vs train loss vs valid loss")
plt.legend
plt.savefig(save_dir+"train_valid_loss.png")
plt.show()
print("plot done")
def plot_sequence_len(save_dir):
normal_seq_len = []
with open(save_dir+"train", "r") as f:
for line in f.readlines():
line = line.split()
normal_seq_len.append(len(line))
with open(save_dir+"test_normal", 'r') as f:
for line in f.readlines():
normal_seq_len.append(len(line.split()))
abnormal_seq_line = []
with open(save_dir+"test_abnormal", "r") as f:
for line in f.readlines():
abnormal_seq_line.append(len(line.split()))
sns.distplot(normal_seq_len, label="normal")
sns.distplot(abnormal_seq_line, label = "abnormal")
plt.title("session length distribution")
plt.xlabel("num of log keys in a session")
plt.legend()
plt.show()
plt.close()
|
[
"matplotlib.pyplot.title",
"seaborn.lineplot",
"numpy.random.seed",
"matplotlib.pyplot.show",
"pandas.read_csv",
"torch.manual_seed",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.close",
"random.seed",
"seaborn.distplot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((469, 486), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (480, 486), False, 'import random\n'), ((536, 556), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (550, 556), True, 'import numpy as np\n'), ((561, 584), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (578, 584), False, 'import torch\n'), ((1655, 1694), 'pandas.read_csv', 'pd.read_csv', (["(save_dir + 'train_log.csv')"], {}), "(save_dir + 'train_log.csv')\n", (1666, 1694), True, 'import pandas as pd\n'), ((1712, 1751), 'pandas.read_csv', 'pd.read_csv', (["(save_dir + 'valid_log.csv')"], {}), "(save_dir + 'valid_log.csv')\n", (1723, 1751), True, 'import pandas as pd\n'), ((1756, 1826), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""epoch"""', 'y': '"""loss"""', 'data': 'train_loss', 'label': '"""train loss"""'}), "(x='epoch', y='loss', data=train_loss, label='train loss')\n", (1768, 1826), True, 'import seaborn as sns\n'), ((1833, 1903), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': '"""epoch"""', 'y': '"""loss"""', 'data': 'valid_loss', 'label': '"""valid loss"""'}), "(x='epoch', y='loss', data=valid_loss, label='valid loss')\n", (1845, 1903), True, 'import seaborn as sns\n'), ((1910, 1956), 'matplotlib.pyplot.title', 'plt.title', (['"""epoch vs train loss vs valid loss"""'], {}), "('epoch vs train loss vs valid loss')\n", (1919, 1956), True, 'import matplotlib.pyplot as plt\n'), ((1976, 2022), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(save_dir + 'train_valid_loss.png')"], {}), "(save_dir + 'train_valid_loss.png')\n", (1987, 2022), True, 'import matplotlib.pyplot as plt\n'), ((2025, 2035), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2033, 2035), True, 'import matplotlib.pyplot as plt\n'), ((2583, 2627), 'seaborn.distplot', 'sns.distplot', (['normal_seq_len'], {'label': '"""normal"""'}), "(normal_seq_len, label='normal')\n", (2595, 2627), True, 'import seaborn as sns\n'), ((2632, 2681), 'seaborn.distplot', 'sns.distplot', (['abnormal_seq_line'], {'label': '"""abnormal"""'}), "(abnormal_seq_line, label='abnormal')\n", (2644, 2681), True, 'import seaborn as sns\n'), ((2688, 2728), 'matplotlib.pyplot.title', 'plt.title', (['"""session length distribution"""'], {}), "('session length distribution')\n", (2697, 2728), True, 'import matplotlib.pyplot as plt\n'), ((2733, 2775), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""num of log keys in a session"""'], {}), "('num of log keys in a session')\n", (2743, 2775), True, 'import matplotlib.pyplot as plt\n'), ((2780, 2792), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2790, 2792), True, 'import matplotlib.pyplot as plt\n'), ((2797, 2807), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2805, 2807), True, 'import matplotlib.pyplot as plt\n'), ((2812, 2823), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2821, 2823), True, 'import matplotlib.pyplot as plt\n')]
|
# NEVER CALL THIS FROM A SERVICE OR MODEL, ONLY FROM VIEWS/API VIEWS/URLS
import datetime
import logging
import pytz
from django.http import HttpResponse
from django.views.generic import View
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
from channels import service as channels_service
from shows import service as shows_service
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class ShowView(View):
def get_default_channel_context(self, request, *args, **kwargs):
context = {'channel': self.channel,
'is_channel_admin': self.is_channel_admin,
'is_channel_owner': self.is_channel_owner,
'current_show': self.current_show}
# Get the channels that the user is an admin of
context['admin_channels'] = channels_service.get_channels_by_admin(
getattr(self.request.user, 'id'))
# If they aren't an admin of any channels
if not context['admin_channels']:
# Get the channels that the user is part of
context['user_channels'] = channels_service.get_channels_by_user(
getattr(self.request.user, 'id'))
if self.current_show:
# Get the vote types by a list of ids
vote_types = channels_service.fetch_vote_types_by_ids(
self.current_show.vote_types())
# Get the suggestion pools for the current show if it exists
context['suggestion_pools'] = shows_service.get_vote_types_suggestion_pools(
vote_types)
return context
def dispatch(self, request, *args, **kwargs):
# Get the channel ids from the kwargs
channel_name = kwargs.get('channel_name')
channel_id = kwargs.get('channel_id')
# If a channel name was given
if channel_name:
self.channel = channels_service.channel_or_404(channel_name)
# If a channel id was given
elif channel_id:
self.channel = channels_service.channel_or_404(channel_id,
channel_id=True)
else:
self.channel = None
# If a channel was found
if self.channel:
# If the channel has been "deleted"
if self.channel.archived:
return redirect("dumpedit_home")
# See if the user is an admin
self.is_channel_admin = channels_service.check_is_channel_admin(self.channel,
getattr(self.request.user, 'id'))
# See if the user is a channel owner
self.is_channel_owner = channels_service.check_is_channel_owner(self.channel,
getattr(self.request.user, 'id'))
# Determine if there is a current show for this channel
self.current_show = shows_service.get_current_show(self.channel.id)
else:
self.is_channel_admin = False
self.is_channel_owner = False
self.current_show = None
# If the current show is locked and the user isn't an admin
if self.current_show and getattr(self.current_show, 'locked') \
and not self.is_channel_admin:
url_kwargs = {'channel_name': self.channel.name,
'show_id': self.current_show.id}
# Get the live vote path
live_vote_path = reverse('show_live_vote', kwargs=url_kwargs)
# Get the vote receiver path
vote_receiver_path = reverse('show_vote_receiver', kwargs=url_kwargs)
# If the user isn't voting, redirect them to voting
if not request.path in [live_vote_path, vote_receiver_path]:
return redirect(live_vote_path)
return super(ShowView, self).dispatch(request, *args, **kwargs)
def start_new_interval(show, vote_type):
"""
Start the next interval for the show of the given vote type
:return: None
"""
# Get the unused intervals for this vote type
unused_intervals = shows_service.get_unused_interval_list(show.id, vote_type)
logger.info(unused_intervals)
# Set the vote type's next interval start
next_interval = channels_service.start_next_interval(unused_intervals, vote_type)
# Set the start of the vote type's current interval to now
vote_type.current_vote_init = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
vote_type.save()
# if it's a players only vote type
if vote_type.players_only:
suggestions = []
# Otherwise, fetch a randomized (yet sorted) amount of suggestions
else:
suggestions = shows_service.fetch_randomized_suggestions(show.id,
vote_type.suggestion_pool_id,
vote_type.options,
no_randomize=vote_type.no_randomize)
# Set the voting options
# NOTE: Live Votes get deleted for repeateable
shows_service.set_voting_options(show,
vote_type,
next_interval,
suggestions=suggestions)
# If this isn't an interval vote and the vote type has player options
if not vote_type.intervals and vote_type.player_options:
# Set a random player for the vote options
shows_service.set_show_interval_random_player(show,
vote_type,
vote_type.current_interval)
# Make sure the show is locked
show.locked = True
# Set the show's current vote type
show.current_vote_type = vote_type
show.save()
def robots_txt(request):
live_txt = """User-agent: *
Allow: /"""
return HttpResponse(live_txt, content_type='text/plain')
def ssl_verify(request):
live_txt = """57420B94164572C12DD508A3AD1A6423258CD21F
comodoca.com"""
return HttpResponse(live_txt, content_type='text/plain')
def loader_io(request):
robots_txt = "loaderio-77a54235107fba986089b0dea6bdc663"
return HttpResponse(robots_txt, content_type='text/plain')
|
[
"shows.service.fetch_randomized_suggestions",
"shows.service.get_unused_interval_list",
"logging.basicConfig",
"django.http.HttpResponse",
"django.core.urlresolvers.reverse",
"django.shortcuts.redirect",
"shows.service.set_show_interval_random_player",
"shows.service.set_voting_options",
"datetime.datetime.utcnow",
"channels.service.start_next_interval",
"shows.service.get_vote_types_suggestion_pools",
"channels.service.channel_or_404",
"logging.getLogger",
"shows.service.get_current_show"
] |
[((371, 410), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (390, 410), False, 'import logging\n'), ((420, 447), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (437, 447), False, 'import logging\n'), ((4308, 4366), 'shows.service.get_unused_interval_list', 'shows_service.get_unused_interval_list', (['show.id', 'vote_type'], {}), '(show.id, vote_type)\n', (4346, 4366), True, 'from shows import service as shows_service\n'), ((4467, 4532), 'channels.service.start_next_interval', 'channels_service.start_next_interval', (['unused_intervals', 'vote_type'], {}), '(unused_intervals, vote_type)\n', (4503, 4532), True, 'from channels import service as channels_service\n'), ((5318, 5411), 'shows.service.set_voting_options', 'shows_service.set_voting_options', (['show', 'vote_type', 'next_interval'], {'suggestions': 'suggestions'}), '(show, vote_type, next_interval,\n suggestions=suggestions)\n', (5350, 5411), True, 'from shows import service as shows_service\n'), ((6146, 6195), 'django.http.HttpResponse', 'HttpResponse', (['live_txt'], {'content_type': '"""text/plain"""'}), "(live_txt, content_type='text/plain')\n", (6158, 6195), False, 'from django.http import HttpResponse\n'), ((6309, 6358), 'django.http.HttpResponse', 'HttpResponse', (['live_txt'], {'content_type': '"""text/plain"""'}), "(live_txt, content_type='text/plain')\n", (6321, 6358), False, 'from django.http import HttpResponse\n'), ((6457, 6508), 'django.http.HttpResponse', 'HttpResponse', (['robots_txt'], {'content_type': '"""text/plain"""'}), "(robots_txt, content_type='text/plain')\n", (6469, 6508), False, 'from django.http import HttpResponse\n'), ((4901, 5043), 'shows.service.fetch_randomized_suggestions', 'shows_service.fetch_randomized_suggestions', (['show.id', 'vote_type.suggestion_pool_id', 'vote_type.options'], {'no_randomize': 'vote_type.no_randomize'}), '(show.id, vote_type.\n suggestion_pool_id, vote_type.options, no_randomize=vote_type.no_randomize)\n', (4943, 5043), True, 'from shows import service as shows_service\n'), ((5713, 5808), 'shows.service.set_show_interval_random_player', 'shows_service.set_show_interval_random_player', (['show', 'vote_type', 'vote_type.current_interval'], {}), '(show, vote_type, vote_type.\n current_interval)\n', (5758, 5808), True, 'from shows import service as shows_service\n'), ((1620, 1677), 'shows.service.get_vote_types_suggestion_pools', 'shows_service.get_vote_types_suggestion_pools', (['vote_types'], {}), '(vote_types)\n', (1665, 1677), True, 'from shows import service as shows_service\n'), ((2033, 2078), 'channels.service.channel_or_404', 'channels_service.channel_or_404', (['channel_name'], {}), '(channel_name)\n', (2064, 2078), True, 'from channels import service as channels_service\n'), ((3117, 3164), 'shows.service.get_current_show', 'shows_service.get_current_show', (['self.channel.id'], {}), '(self.channel.id)\n', (3147, 3164), True, 'from shows import service as shows_service\n'), ((3669, 3713), 'django.core.urlresolvers.reverse', 'reverse', (['"""show_live_vote"""'], {'kwargs': 'url_kwargs'}), "('show_live_vote', kwargs=url_kwargs)\n", (3676, 3713), False, 'from django.core.urlresolvers import reverse\n'), ((3788, 3836), 'django.core.urlresolvers.reverse', 'reverse', (['"""show_vote_receiver"""'], {'kwargs': 'url_kwargs'}), "('show_vote_receiver', kwargs=url_kwargs)\n", (3795, 3836), False, 'from django.core.urlresolvers import reverse\n'), ((4630, 4656), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (4654, 4656), False, 'import datetime\n'), ((2167, 2227), 'channels.service.channel_or_404', 'channels_service.channel_or_404', (['channel_id'], {'channel_id': '(True)'}), '(channel_id, channel_id=True)\n', (2198, 2227), True, 'from channels import service as channels_service\n'), ((2500, 2525), 'django.shortcuts.redirect', 'redirect', (['"""dumpedit_home"""'], {}), "('dumpedit_home')\n", (2508, 2525), False, 'from django.shortcuts import redirect\n'), ((3997, 4021), 'django.shortcuts.redirect', 'redirect', (['live_vote_path'], {}), '(live_vote_path)\n', (4005, 4021), False, 'from django.shortcuts import redirect\n')]
|
from django.contrib import admin
from adminsortable.admin import SortableAdmin
from merlin.music.models import Song, Category
# Register your models here.
class SongAdmin(SortableAdmin):
list_display = ['__str__', 'artist', 'category', 'fan_votes']
admin.site.register(Category, SortableAdmin)
admin.site.register(Song, SongAdmin)
|
[
"django.contrib.admin.site.register"
] |
[((257, 301), 'django.contrib.admin.site.register', 'admin.site.register', (['Category', 'SortableAdmin'], {}), '(Category, SortableAdmin)\n', (276, 301), False, 'from django.contrib import admin\n'), ((302, 338), 'django.contrib.admin.site.register', 'admin.site.register', (['Song', 'SongAdmin'], {}), '(Song, SongAdmin)\n', (321, 338), False, 'from django.contrib import admin\n')]
|
"""
Miscellaneous utility functions used in other modules.
"""
import glob
from typing import Dict, List, Optional, Tuple, Union
import networkx as nx
import pandas as pd
def get_paths_to_files_in_directory(
directory: str, file_dsc: str = "*"
) -> List[str]:
"""Get paths of all files matching file_dsc in directory"""
template = f"{directory}{file_dsc}"
file_paths = glob.glob(template)
return file_paths
def get_paths_to_dirs_in_directory(
directory: str, folder_dsc: str = "*"
) -> List[str]:
"""Get paths of all directories matching file_dsc in directory"""
template = f"{directory}{folder_dsc}"
dir_paths = glob.glob(template)
# normalize directory paths
dir_paths = [dir_path + "/" for dir_path in dir_paths]
return dir_paths
def labeled_blockmodel(g, partition):
"""
Perform blockmodel transformation on graph *g*
and partition represented by dictionary *partition*.
Values of *partition* are used to partition the graph.
Keys of *partition* are used to label the nodes of the
new graph.
"""
new_g = nx.quotient_graph(g, list(partition.values()), relabel=True)
labels = dict(enumerate(partition.keys()))
new_g = nx.relabel_nodes(new_g, labels)
return new_g
def repartition_dataframe(df, partition):
"""
Create a new dataframe with the same index as
argument dataframe *df*, where columns are
the keys of dictionary *partition*.
The data of the returned dataframe are the
combinations of columns listed in the keys of
*partition*
"""
df2 = pd.DataFrame(index=df.index)
for k, v in list(partition.items()):
df2[k] = df[v[0]]
for i in range(len(v) - 1):
df2[k] = df2[k] + df[v[i]]
return df2
def get_common_head(str1, str2, delimiter=None):
try:
if str1 is None or str2 is None:
return "", 0
else:
# this is ugly control flow clean it
if delimiter is not None:
dstr1 = str1.split(delimiter)
dstr2 = str2.split(delimiter)
for i in range(len(dstr1)):
if dstr1[:i] != dstr2[:i]:
# print list1[:i], list2[:i]
return delimiter.join(dstr1[: i - 1]), i - 1
return str1, i
else:
for i in range(len(str1)):
if str1[:i] != str2[:i]:
# print list1[:i], list2[:i]
return str1[: i - 1], i - 1
return str1[:i], i
return "", 0
except Exception as e:
print(e)
return "", 0
def get_common_foot(str1, str2, delimiter=None):
head, ln = get_common_head(str1[::-1], str2[::-1], delimiter=delimiter)
return head[::-1], ln
# turn these into automated tests
# print get_common_head('abcdefghijklmnop','abcde12345')
# print get_common_head('abcdefghijklmnop',None)
# print get_common_head('abcde\nfghijk\nlmnop\nqrst','abcde\nfghijk\nlmnopqr\nst',delimiter='\n')
def remove_quoted(mess):
message = [
line
for line in mess.split("\n")
if len(line) != 0 and line[0] != ">" and line[-6:] != "wrote:"
]
new = "\n".join(message)
return new
## remove this when clean_message is added to generic libraries
def clean_message(mess):
if mess is None:
mess = ""
mess = remove_quoted(mess)
mess = mess.strip()
return mess
# From here:
# https://stackoverflow.com/questions/46217529/pandas-datetimeindex-frequency-is-none-and-cant-be-set
def add_freq(idx, freq=None):
"""Add a frequency attribute to idx, through inference or directly.
Returns a copy. If `freq` is None, it is inferred.
"""
idx = idx.copy()
if freq is None:
if idx.freq is None:
freq = pd.infer_freq(idx)
else:
return idx
idx.freq = pd.tseries.frequencies.to_offset(freq)
if idx.freq is None:
raise AttributeError(
"no discernible frequency found to `idx`. Specify"
" a frequency string with `freq`."
)
return idx
|
[
"pandas.DataFrame",
"pandas.tseries.frequencies.to_offset",
"pandas.infer_freq",
"networkx.relabel_nodes",
"glob.glob"
] |
[((389, 408), 'glob.glob', 'glob.glob', (['template'], {}), '(template)\n', (398, 408), False, 'import glob\n'), ((655, 674), 'glob.glob', 'glob.glob', (['template'], {}), '(template)\n', (664, 674), False, 'import glob\n'), ((1216, 1247), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['new_g', 'labels'], {}), '(new_g, labels)\n', (1232, 1247), True, 'import networkx as nx\n'), ((1586, 1614), 'pandas.DataFrame', 'pd.DataFrame', ([], {'index': 'df.index'}), '(index=df.index)\n', (1598, 1614), True, 'import pandas as pd\n'), ((3928, 3966), 'pandas.tseries.frequencies.to_offset', 'pd.tseries.frequencies.to_offset', (['freq'], {}), '(freq)\n', (3960, 3966), True, 'import pandas as pd\n'), ((3857, 3875), 'pandas.infer_freq', 'pd.infer_freq', (['idx'], {}), '(idx)\n', (3870, 3875), True, 'import pandas as pd\n')]
|
from lightspider import Spider, light, DEFAULT_PROXY
from lxml import etree
import re
# 编写页面解析函数
@light
def parser(response):
"""
必要的页面解析函数
:param response: 执行Request请求后得到的Response对象,可以自由选择xpath或BeautifulSoup等方式解析处理
:return: type:tuple:(解析得到的数据对象, 解析页面得到的新的tasks(要么为List,要么为None)
"""
html = etree.HTML(response.text)
info = html.xpath('//div[@class="col-md-8"]')[0]
words = [re.sub(r'\(\d+\)', '', item.xpath('string(.)')) for item in info.xpath('./b')[:-1]]
mean = info.xpath('./a/text()')[0]
return {
'mean': mean,
'words': words
}, None
# 编写生成tasks脚本
# tasks = []
# base_url = 'https://www.cilin.org/jyc/b_{}.html'
# for i in range(1, 9996):
# tasks.append(i)
#
# spider = Spider(base_url=base_url, style='json', save_path=r'D:\Data\NLP\corpus\jyc')
tasks = []
for i in range(1, 30):
tasks.append(i)
base_url = 'https://www.cilin.org/jyc/b_{}.html'
save_format = 'json'
spider = Spider(base_url=base_url, save_format=save_format, save_path=r'D:\Data\NLP\corpus\test', proxy=DEFAULT_PROXY)
if __name__ == '__main__':
spider.run(tasks, parser)
|
[
"lightspider.Spider",
"lxml.etree.HTML"
] |
[((958, 1075), 'lightspider.Spider', 'Spider', ([], {'base_url': 'base_url', 'save_format': 'save_format', 'save_path': '"""D:\\\\Data\\\\NLP\\\\corpus\\\\test"""', 'proxy': 'DEFAULT_PROXY'}), "(base_url=base_url, save_format=save_format, save_path=\n 'D:\\\\Data\\\\NLP\\\\corpus\\\\test', proxy=DEFAULT_PROXY)\n", (964, 1075), False, 'from lightspider import Spider, light, DEFAULT_PROXY\n'), ((318, 343), 'lxml.etree.HTML', 'etree.HTML', (['response.text'], {}), '(response.text)\n', (328, 343), False, 'from lxml import etree\n')]
|
import numpy
from numpy import dot, sqrt
def binarize_vector(u):
return u > 0
def cosine_distance(u, v, binary=False):
"""Return the cosine distance between two vectors."""
if binary:
return cosine_distance_binary(u, v)
return 1.0 - dot(u, v) / (sqrt(dot(u, u)) * sqrt(dot(v, v)))
def cosine_distance_binary(u, v):
u = binarize_vector(u)
v = binarize_vector(v)
return (1.0 * (u * v).sum()) / numpy.sqrt((u.sum() * v.sum()))
def euclidean_distance(u, v):
"""Return the euclidean distance between two vectors."""
diff = u - v
return sqrt(dot(diff, diff))
def cityblock_distance(u, v):
"""Return the Manhattan/City Block distance between two vectors."""
return abs(u - v).sum()
def canberra_distance(u, v):
"""Return the canberra distance between two vectors."""
return numpy.sum(abs(u - v) / abs(u + v))
def correlation(u, v):
"""Return the correlation distance between two vectors."""
u_var = u - u.mean()
v_var = v - v.mean()
return 1.0 - dot(
u_var, v_var) / (sqrt(dot(u_var, u_var)) * sqrt(dot(v_var, v_var)))
def dice(u, v):
"""Return the dice coefficient between two vectors."""
u = u > 0
v = v > 0
return (2.0 * (u * v).sum()) / (u.sum() + v.sum())
def jaccard_distance(u, v):
"""return jaccard distance"""
u = numpy.asarray(u)
v = numpy.asarray(v)
return (numpy.double(
numpy.bitwise_and((u != v), numpy.bitwise_or(u != 0, v != 0)).sum()) /
numpy.double(numpy.bitwise_or(u != 0, v != 0).sum()))
def jaccard(u, v):
"""Return the Jaccard coefficient between two vectors."""
u = u > 0
v = v > 0
return (1.0 * (u * v).sum()) / (u + v).sum()
|
[
"numpy.dot",
"numpy.asarray",
"numpy.bitwise_or"
] |
[((1346, 1362), 'numpy.asarray', 'numpy.asarray', (['u'], {}), '(u)\n', (1359, 1362), False, 'import numpy\n'), ((1371, 1387), 'numpy.asarray', 'numpy.asarray', (['v'], {}), '(v)\n', (1384, 1387), False, 'import numpy\n'), ((592, 607), 'numpy.dot', 'dot', (['diff', 'diff'], {}), '(diff, diff)\n', (595, 607), False, 'from numpy import dot, sqrt\n'), ((261, 270), 'numpy.dot', 'dot', (['u', 'v'], {}), '(u, v)\n', (264, 270), False, 'from numpy import dot, sqrt\n'), ((1033, 1050), 'numpy.dot', 'dot', (['u_var', 'v_var'], {}), '(u_var, v_var)\n', (1036, 1050), False, 'from numpy import dot, sqrt\n'), ((279, 288), 'numpy.dot', 'dot', (['u', 'u'], {}), '(u, u)\n', (282, 288), False, 'from numpy import dot, sqrt\n'), ((297, 306), 'numpy.dot', 'dot', (['v', 'v'], {}), '(v, v)\n', (300, 306), False, 'from numpy import dot, sqrt\n'), ((1068, 1085), 'numpy.dot', 'dot', (['u_var', 'u_var'], {}), '(u_var, u_var)\n', (1071, 1085), False, 'from numpy import dot, sqrt\n'), ((1094, 1111), 'numpy.dot', 'dot', (['v_var', 'v_var'], {}), '(v_var, v_var)\n', (1097, 1111), False, 'from numpy import dot, sqrt\n'), ((1518, 1550), 'numpy.bitwise_or', 'numpy.bitwise_or', (['(u != 0)', '(v != 0)'], {}), '(u != 0, v != 0)\n', (1534, 1550), False, 'import numpy\n'), ((1450, 1482), 'numpy.bitwise_or', 'numpy.bitwise_or', (['(u != 0)', '(v != 0)'], {}), '(u != 0, v != 0)\n', (1466, 1482), False, 'import numpy\n')]
|
import random
import logging
from . import scheme
__all__ = ('MTProtoSessionData',)
log = logging.getLogger(__package__)
class MTProtoSessionData:
def __init__(self, id):
if id is None:
id = random.SystemRandom().getrandbits(64)
log.debug('no session_id provided, generated new session_id: {}'.format(id))
self._id = scheme.int64_c(id)
self._auth_keys = dict()
|
[
"random.SystemRandom",
"logging.getLogger"
] |
[((100, 130), 'logging.getLogger', 'logging.getLogger', (['__package__'], {}), '(__package__)\n', (117, 130), False, 'import logging\n'), ((235, 256), 'random.SystemRandom', 'random.SystemRandom', ([], {}), '()\n', (254, 256), False, 'import random\n')]
|
__title__ = "sql2statsd"
__version__ = "1.2.5"
import os
import click
import psycopg2
from statsd import StatsClient
from sql2statsd.utils import YamlFile, log
APP_DIR = click.get_app_dir(__title__)
def ensure_app_dir():
log("Ensuring app dir exists...")
try:
os.makedirs(APP_DIR)
except OSError as e:
if e.errno != os.errno.EEXIST:
raise
else:
log("Created {}.", APP_DIR)
@click.command(context_settings={
"auto_envvar_prefix": __title__.upper(),
"help_option_names": ["-h", "--help"]
})
@click.option(
"--db-servers",
type=YamlFile(),
default=os.path.join(APP_DIR, "db-servers.yaml"),
show_default=True
)
@click.option(
"--statsd-servers",
type=YamlFile(),
default=os.path.join(APP_DIR, "statsd-servers.yaml"),
show_default=True
)
@click.argument(
"job",
type=YamlFile()
)
def main(db_servers, statsd_servers, job):
"""
`sql2statsd` is a CLI utility that queries SQL database and posts results
to StatsD based on provided YAML config files.
"""
ensure_app_dir()
log("Started job execution.")
log("Job: {}", job)
db_server = db_servers[job["db_server"]]
conn = psycopg2.connect(
host=db_server["host"],
port=db_server["port"],
user=db_server["user"],
password=db_server["password"],
dbname=job["db_name"]
)
statsd_server = statsd_servers[job["statsd_server"]]
statsd = StatsClient(
host=statsd_server["host"],
port=statsd_server["port"]
)
log("Querying...")
with conn, conn.cursor() as cur:
cur.execute(job["query"])
assert cur.rowcount == 1, "Query must return exactly one row."
row = cur.fetchone()
assert len(row) == 1, "Query must return exactly one column."
log("Result: {}.", row[0])
log("Sending stats...")
statsd.gauge(job["stat"], row[0])
log("Closing db connection...")
conn.close()
log("Finished job execution.")
|
[
"statsd.StatsClient",
"os.makedirs",
"click.get_app_dir",
"sql2statsd.utils.log",
"sql2statsd.utils.YamlFile",
"os.path.join",
"psycopg2.connect"
] |
[((174, 202), 'click.get_app_dir', 'click.get_app_dir', (['__title__'], {}), '(__title__)\n', (191, 202), False, 'import click\n'), ((231, 264), 'sql2statsd.utils.log', 'log', (['"""Ensuring app dir exists..."""'], {}), "('Ensuring app dir exists...')\n", (234, 264), False, 'from sql2statsd.utils import YamlFile, log\n'), ((1097, 1126), 'sql2statsd.utils.log', 'log', (['"""Started job execution."""'], {}), "('Started job execution.')\n", (1100, 1126), False, 'from sql2statsd.utils import YamlFile, log\n'), ((1131, 1150), 'sql2statsd.utils.log', 'log', (['"""Job: {}"""', 'job'], {}), "('Job: {}', job)\n", (1134, 1150), False, 'from sql2statsd.utils import YamlFile, log\n'), ((1208, 1356), 'psycopg2.connect', 'psycopg2.connect', ([], {'host': "db_server['host']", 'port': "db_server['port']", 'user': "db_server['user']", 'password': "db_server['password']", 'dbname': "job['db_name']"}), "(host=db_server['host'], port=db_server['port'], user=\n db_server['user'], password=db_server['password'], dbname=job['db_name'])\n", (1224, 1356), False, 'import psycopg2\n'), ((1469, 1536), 'statsd.StatsClient', 'StatsClient', ([], {'host': "statsd_server['host']", 'port': "statsd_server['port']"}), "(host=statsd_server['host'], port=statsd_server['port'])\n", (1480, 1536), False, 'from statsd import StatsClient\n'), ((1564, 1582), 'sql2statsd.utils.log', 'log', (['"""Querying..."""'], {}), "('Querying...')\n", (1567, 1582), False, 'from sql2statsd.utils import YamlFile, log\n'), ((1828, 1854), 'sql2statsd.utils.log', 'log', (['"""Result: {}."""', 'row[0]'], {}), "('Result: {}.', row[0])\n", (1831, 1854), False, 'from sql2statsd.utils import YamlFile, log\n'), ((1860, 1883), 'sql2statsd.utils.log', 'log', (['"""Sending stats..."""'], {}), "('Sending stats...')\n", (1863, 1883), False, 'from sql2statsd.utils import YamlFile, log\n'), ((1927, 1958), 'sql2statsd.utils.log', 'log', (['"""Closing db connection..."""'], {}), "('Closing db connection...')\n", (1930, 1958), False, 'from sql2statsd.utils import YamlFile, log\n'), ((1981, 2011), 'sql2statsd.utils.log', 'log', (['"""Finished job execution."""'], {}), "('Finished job execution.')\n", (1984, 2011), False, 'from sql2statsd.utils import YamlFile, log\n'), ((282, 302), 'os.makedirs', 'os.makedirs', (['APP_DIR'], {}), '(APP_DIR)\n', (293, 302), False, 'import os\n'), ((403, 430), 'sql2statsd.utils.log', 'log', (['"""Created {}."""', 'APP_DIR'], {}), "('Created {}.', APP_DIR)\n", (406, 430), False, 'from sql2statsd.utils import YamlFile, log\n'), ((601, 611), 'sql2statsd.utils.YamlFile', 'YamlFile', ([], {}), '()\n', (609, 611), False, 'from sql2statsd.utils import YamlFile, log\n'), ((625, 665), 'os.path.join', 'os.path.join', (['APP_DIR', '"""db-servers.yaml"""'], {}), "(APP_DIR, 'db-servers.yaml')\n", (637, 665), False, 'import os\n'), ((739, 749), 'sql2statsd.utils.YamlFile', 'YamlFile', ([], {}), '()\n', (747, 749), False, 'from sql2statsd.utils import YamlFile, log\n'), ((763, 807), 'os.path.join', 'os.path.join', (['APP_DIR', '"""statsd-servers.yaml"""'], {}), "(APP_DIR, 'statsd-servers.yaml')\n", (775, 807), False, 'import os\n'), ((870, 880), 'sql2statsd.utils.YamlFile', 'YamlFile', ([], {}), '()\n', (878, 880), False, 'from sql2statsd.utils import YamlFile, log\n')]
|
import sympy as sp, math as m
x = sp.symbols('x')
sp.init_printing(use_unicode=True)
#f=sp.Lambda(x, x*sp.cos(x)-x**2*sp.sin(x))
#f=sp.Lambda(x, sp.exp(sp.sin(x)**2-2))
"""
def f(x):
#return x*m.cos(x)-x**2*m.sin(x)
return m.exp(3*x)*m.sin(3*x)/(x**4+1)
"""
def trapecio_compuesta(f, a, b, n):
h = (b-a)/n
result = sp.N(f(a)+f(b))/2
for i in range(1, n):
result += sp.N(f(a+i*h))
result *= h
return result
"""
a, b = 0,2
n = 8
result = trapecio_compuesta(f, a, b, n)
print(result)
# Error
h = (b-a)/n
cota_2 = 0.73576
print("Cota error: "+str(abs((b-a)*h**2/12*cota_2)))
"""
# 2 Examen
f=sp.Lambda(x, (sp.cos(sp.pi*x)+1)**(10/3))
a, b = 0, 1
solution = 3.000492123714049
n = 2
for _ in range(6):
result = trapecio_compuesta(f, a, b, n)
print(str(n)+ " subintervalos:")
print("\t"+str(result))
print("\tError:" + str(abs(result-solution)))
#print("\tError/4:" + str(abs(result-solution)/4))
n*=2
|
[
"sympy.symbols",
"sympy.cos",
"sympy.init_printing"
] |
[((35, 50), 'sympy.symbols', 'sp.symbols', (['"""x"""'], {}), "('x')\n", (45, 50), True, 'import sympy as sp, math as m\n'), ((51, 85), 'sympy.init_printing', 'sp.init_printing', ([], {'use_unicode': '(True)'}), '(use_unicode=True)\n', (67, 85), True, 'import sympy as sp, math as m\n'), ((680, 697), 'sympy.cos', 'sp.cos', (['(sp.pi * x)'], {}), '(sp.pi * x)\n', (686, 697), True, 'import sympy as sp, math as m\n')]
|
# Copyright (c) 2012-2013 SHIFT.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
from unittest import TestCase
from thunderdome.gremlin import parse
class GroovyScannerTest(TestCase):
"""
Test Groovy language scanner
"""
def test_parsing_complicated_function(self):
groovy_file = os.path.join(os.path.dirname(__file__), 'groovy_test_model.groovy')
result = parse(groovy_file)
assert len(result[6].body.split('\n')) == 8
result_map = {x.name: x for x in result}
assert 'get_self' in result_map
assert 'return_value' in result_map
assert 'long_func' in result_map
|
[
"thunderdome.gremlin.parse",
"os.path.dirname"
] |
[((1417, 1435), 'thunderdome.gremlin.parse', 'parse', (['groovy_file'], {}), '(groovy_file)\n', (1422, 1435), False, 'from thunderdome.gremlin import parse\n'), ((1345, 1370), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1360, 1370), False, 'import os\n')]
|
from typing import Dict, Optional, List, Any, Set, TYPE_CHECKING, Tuple
from bxutils import logging
from bxcommon.feed.feed import Feed, FeedKey
from bxcommon.feed.subscriber import Subscriber
if TYPE_CHECKING:
from bxcommon.connections.abstract_node import AbstractNode
logger = logging.get_logger(__name__)
class FeedManager:
feeds: Dict[FeedKey, Feed]
_node: "AbstractNode"
def __init__(self, node: "AbstractNode") -> None:
self.feeds = {}
self._node = node
def __contains__(self, item):
if isinstance(item, FeedKey):
return item in self.feeds
else:
return FeedKey(item) in self.feeds
def register_feed(self, feed: Feed) -> None:
if feed.feed_key in self.feeds:
raise ValueError(
f"Cannot register two feeds with the same name: {feed.name}"
)
self.feeds[feed.feed_key] = feed
def subscribe_to_feed(
self, feed_key: FeedKey, options: Dict[str, Any]
) -> Optional[Subscriber]:
if feed_key in self.feeds:
subscriber = self.feeds[feed_key].subscribe(options)
logger.debug(
"Creating new subscriber ({}) to {}", subscriber.subscription_id, feed_key.name
)
self._node.reevaluate_transaction_streamer_connection()
return subscriber
else:
return None
def unsubscribe_from_feed(
self, feed_key: FeedKey, subscriber_id: str
) -> Optional[Subscriber]:
subscriber = self.feeds[feed_key].unsubscribe(subscriber_id)
if subscriber is not None:
logger.debug(
"Unsubscribing subscriber ({}) from {}",
subscriber.subscription_id,
feed_key.name,
)
self._node.reevaluate_transaction_streamer_connection()
return subscriber
def publish_to_feed(self, feed_key: FeedKey, message: Any) -> None:
if feed_key in self.feeds:
self.feeds[feed_key].publish(message)
def get_feed_fields(self, feed_key: FeedKey) -> List[str]:
return self.feeds[feed_key].FIELDS
def get_feed(self, feed_key: FeedKey):
return self.feeds.get(feed_key)
def get_feed_keys(self, network_num: int = 0) -> List[FeedKey]:
return [key for key in self.feeds if key.network_num == network_num]
def any_subscribers(self) -> bool:
return any(feed.subscriber_count() > 0 for feed in self.feeds.values())
def get_valid_feed_filters(self, feed_key: FeedKey) -> Set[str]:
return self.feeds[feed_key].FILTERS
def validate_feed_filters(self, feed_key: FeedKey, filters: str) -> Tuple[str, List[str]]:
return self.feeds[feed_key].validate_filters(filters)
|
[
"bxcommon.feed.feed.FeedKey",
"bxutils.logging.get_logger"
] |
[((288, 316), 'bxutils.logging.get_logger', 'logging.get_logger', (['__name__'], {}), '(__name__)\n', (306, 316), False, 'from bxutils import logging\n'), ((644, 657), 'bxcommon.feed.feed.FeedKey', 'FeedKey', (['item'], {}), '(item)\n', (651, 657), False, 'from bxcommon.feed.feed import Feed, FeedKey\n')]
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from functools import partial
from typing import Callable, Sequence
import numpy as np
import pytest
import torch
from tests.text.helpers import TextTester
from tests.text.inputs import _inputs_multiple_references, _inputs_single_sentence_single_reference
from torchmetrics.functional.text.rouge import rouge_score
from torchmetrics.text.rouge import ROUGEScore
from torchmetrics.utilities.imports import _NLTK_AVAILABLE, _ROUGE_SCORE_AVAILABLE
if _ROUGE_SCORE_AVAILABLE:
from rouge_score.rouge_scorer import RougeScorer
from rouge_score.scoring import BootstrapAggregator
else:
RougeScorer, BootstrapAggregator = object, object
ROUGE_KEYS = ("rouge1", "rouge2", "rougeL", "rougeLsum")
def _compute_rouge_score(
preds: Sequence[str],
target: Sequence[Sequence[str]],
use_stemmer: bool,
rouge_level: str,
metric: str,
accumulate: str,
):
"""Evaluates rouge scores from rouge-score package for baseline evaluation."""
if isinstance(target, list) and all(isinstance(tgt, str) for tgt in target):
target = [target] if isinstance(preds, str) else [[tgt] for tgt in target]
if isinstance(preds, str):
preds = [preds]
if isinstance(target, str):
target = [[target]]
scorer = RougeScorer(ROUGE_KEYS, use_stemmer=use_stemmer)
aggregator = BootstrapAggregator()
for target_raw, pred_raw in zip(target, preds):
list_results = [scorer.score(tgt, pred_raw) for tgt in target_raw]
aggregator_avg = BootstrapAggregator()
if accumulate == "best":
key_curr = list(list_results[0].keys())[0]
all_fmeasure = torch.tensor([v[key_curr].fmeasure for v in list_results])
highest_idx = torch.argmax(all_fmeasure).item()
aggregator.add_scores(list_results[highest_idx])
elif accumulate == "avg":
for _score in list_results:
aggregator_avg.add_scores(_score)
_score = {rouge_key: scores.mid for rouge_key, scores in aggregator_avg.aggregate().items()}
aggregator.add_scores(_score)
else:
raise ValueError(f"Got unknown accumulate value {accumulate}. Expected to be one of ['best', 'avg']")
rs_scores = aggregator.aggregate()
rs_result = getattr(rs_scores[rouge_level].mid, metric)
return rs_result
@pytest.mark.skipif(not _NLTK_AVAILABLE, reason="test requires nltk")
@pytest.mark.parametrize(
["pl_rouge_metric_key", "use_stemmer"],
[
("rouge1_precision", True),
("rouge1_recall", True),
("rouge1_fmeasure", False),
("rouge2_precision", False),
("rouge2_recall", True),
("rouge2_fmeasure", True),
("rougeL_precision", False),
("rougeL_recall", False),
("rougeL_fmeasure", True),
("rougeLsum_precision", True),
("rougeLsum_recall", False),
("rougeLsum_fmeasure", False),
],
)
@pytest.mark.parametrize(
["preds", "targets"],
[
(_inputs_multiple_references.preds, _inputs_multiple_references.targets),
],
)
@pytest.mark.parametrize("accumulate", ["avg", "best"])
class TestROUGEScore(TextTester):
@pytest.mark.parametrize("ddp", [False, True])
@pytest.mark.parametrize("dist_sync_on_step", [False, True])
def test_rouge_score_class(
self, ddp, dist_sync_on_step, preds, targets, pl_rouge_metric_key, use_stemmer, accumulate
):
metric_args = {"use_stemmer": use_stemmer, "accumulate": accumulate}
rouge_level, metric = pl_rouge_metric_key.split("_")
rouge_metric = partial(
_compute_rouge_score, use_stemmer=use_stemmer, rouge_level=rouge_level, metric=metric, accumulate=accumulate
)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
targets=targets,
metric_class=ROUGEScore,
sk_metric=rouge_metric,
dist_sync_on_step=dist_sync_on_step,
metric_args=metric_args,
key=pl_rouge_metric_key,
)
def test_rouge_score_functional(self, preds, targets, pl_rouge_metric_key, use_stemmer, accumulate):
metric_args = {"use_stemmer": use_stemmer, "accumulate": accumulate}
rouge_level, metric = pl_rouge_metric_key.split("_")
rouge_metric = partial(
_compute_rouge_score, use_stemmer=use_stemmer, rouge_level=rouge_level, metric=metric, accumulate=accumulate
)
self.run_functional_metric_test(
preds,
targets,
metric_functional=rouge_score,
sk_metric=rouge_metric,
metric_args=metric_args,
key=pl_rouge_metric_key,
)
def test_rouge_metric_raises_errors_and_warnings():
"""Test that expected warnings and errors are raised."""
if not _NLTK_AVAILABLE:
with pytest.raises(
ModuleNotFoundError,
match="ROUGE metric requires that `nltk` is installed."
" Either as `pip install torchmetrics[text]` or `pip install nltk`.",
):
ROUGEScore()
def test_rouge_metric_wrong_key_value_error():
key = ("rouge1", "rouge")
with pytest.raises(ValueError):
ROUGEScore(rouge_keys=key)
with pytest.raises(ValueError):
rouge_score(
_inputs_single_sentence_single_reference.preds,
_inputs_single_sentence_single_reference.targets,
rouge_keys=key,
accumulate="best",
)
@pytest.mark.parametrize(
"pl_rouge_metric_key",
[
"rouge1_precision",
"rouge1_recall",
"rouge1_fmeasure",
"rouge2_precision",
"rouge2_recall",
"rouge2_fmeasure",
"rougeL_precision",
"rougeL_recall",
"rougeL_fmeasure",
"rougeLsum_precision",
"rougeLsum_recall",
"rougeLsum_fmeasure",
],
)
def test_rouge_metric_normalizer_tokenizer(pl_rouge_metric_key):
normalizer: Callable[[str], str] = lambda text: re.sub(r"[^a-z0-9]+", " ", text.lower())
tokenizer: Callable[[str], Sequence[str]] = lambda text: re.split(r"\s+", text)
rouge_level, metric = pl_rouge_metric_key.split("_")
original_score = _compute_rouge_score(
preds=_inputs_single_sentence_single_reference.preds,
target=_inputs_single_sentence_single_reference.targets,
rouge_level=rouge_level,
metric=metric,
accumulate="best",
use_stemmer=False,
)
scorer = ROUGEScore(
normalizer=normalizer, tokenizer=tokenizer, rouge_keys=rouge_level, accumulate="best", use_stemmer=False
)
scorer.update(
_inputs_single_sentence_single_reference.preds,
_inputs_single_sentence_single_reference.targets,
)
metrics_score = scorer.compute()
np.isclose(metrics_score[rouge_level + "_" + metric], original_score, atol=1e-8, equal_nan=True)
|
[
"functools.partial",
"re.split",
"rouge_score.rouge_scorer.RougeScorer",
"torch.argmax",
"rouge_score.scoring.BootstrapAggregator",
"numpy.isclose",
"pytest.mark.skipif",
"torchmetrics.text.rouge.ROUGEScore",
"pytest.raises",
"torchmetrics.functional.text.rouge.rouge_score",
"pytest.mark.parametrize",
"torch.tensor"
] |
[((2936, 3004), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not _NLTK_AVAILABLE)'], {'reason': '"""test requires nltk"""'}), "(not _NLTK_AVAILABLE, reason='test requires nltk')\n", (2954, 3004), False, 'import pytest\n'), ((3006, 3429), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["['pl_rouge_metric_key', 'use_stemmer']", "[('rouge1_precision', True), ('rouge1_recall', True), ('rouge1_fmeasure', \n False), ('rouge2_precision', False), ('rouge2_recall', True), (\n 'rouge2_fmeasure', True), ('rougeL_precision', False), ('rougeL_recall',\n False), ('rougeL_fmeasure', True), ('rougeLsum_precision', True), (\n 'rougeLsum_recall', False), ('rougeLsum_fmeasure', False)]"], {}), "(['pl_rouge_metric_key', 'use_stemmer'], [(\n 'rouge1_precision', True), ('rouge1_recall', True), ('rouge1_fmeasure',\n False), ('rouge2_precision', False), ('rouge2_recall', True), (\n 'rouge2_fmeasure', True), ('rougeL_precision', False), ('rougeL_recall',\n False), ('rougeL_fmeasure', True), ('rougeLsum_precision', True), (\n 'rougeLsum_recall', False), ('rougeLsum_fmeasure', False)])\n", (3029, 3429), False, 'import pytest\n'), ((3522, 3648), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["['preds', 'targets']", '[(_inputs_multiple_references.preds, _inputs_multiple_references.targets)]'], {}), "(['preds', 'targets'], [(_inputs_multiple_references\n .preds, _inputs_multiple_references.targets)])\n", (3545, 3648), False, 'import pytest\n'), ((3671, 3725), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""accumulate"""', "['avg', 'best']"], {}), "('accumulate', ['avg', 'best'])\n", (3694, 3725), False, 'import pytest\n'), ((6077, 6374), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""pl_rouge_metric_key"""', "['rouge1_precision', 'rouge1_recall', 'rouge1_fmeasure', 'rouge2_precision',\n 'rouge2_recall', 'rouge2_fmeasure', 'rougeL_precision', 'rougeL_recall',\n 'rougeL_fmeasure', 'rougeLsum_precision', 'rougeLsum_recall',\n 'rougeLsum_fmeasure']"], {}), "('pl_rouge_metric_key', ['rouge1_precision',\n 'rouge1_recall', 'rouge1_fmeasure', 'rouge2_precision', 'rouge2_recall',\n 'rouge2_fmeasure', 'rougeL_precision', 'rougeL_recall',\n 'rougeL_fmeasure', 'rougeLsum_precision', 'rougeLsum_recall',\n 'rougeLsum_fmeasure'])\n", (6100, 6374), False, 'import pytest\n'), ((1854, 1902), 'rouge_score.rouge_scorer.RougeScorer', 'RougeScorer', (['ROUGE_KEYS'], {'use_stemmer': 'use_stemmer'}), '(ROUGE_KEYS, use_stemmer=use_stemmer)\n', (1865, 1902), False, 'from rouge_score.rouge_scorer import RougeScorer\n'), ((1920, 1941), 'rouge_score.scoring.BootstrapAggregator', 'BootstrapAggregator', ([], {}), '()\n', (1939, 1941), False, 'from rouge_score.scoring import BootstrapAggregator\n'), ((3765, 3810), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ddp"""', '[False, True]'], {}), "('ddp', [False, True])\n", (3788, 3810), False, 'import pytest\n'), ((3816, 3875), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dist_sync_on_step"""', '[False, True]'], {}), "('dist_sync_on_step', [False, True])\n", (3839, 3875), False, 'import pytest\n'), ((7073, 7194), 'torchmetrics.text.rouge.ROUGEScore', 'ROUGEScore', ([], {'normalizer': 'normalizer', 'tokenizer': 'tokenizer', 'rouge_keys': 'rouge_level', 'accumulate': '"""best"""', 'use_stemmer': '(False)'}), "(normalizer=normalizer, tokenizer=tokenizer, rouge_keys=\n rouge_level, accumulate='best', use_stemmer=False)\n", (7083, 7194), False, 'from torchmetrics.text.rouge import ROUGEScore\n'), ((7385, 7487), 'numpy.isclose', 'np.isclose', (["metrics_score[rouge_level + '_' + metric]", 'original_score'], {'atol': '(1e-08)', 'equal_nan': '(True)'}), "(metrics_score[rouge_level + '_' + metric], original_score, atol=\n 1e-08, equal_nan=True)\n", (7395, 7487), True, 'import numpy as np\n'), ((2095, 2116), 'rouge_score.scoring.BootstrapAggregator', 'BootstrapAggregator', ([], {}), '()\n', (2114, 2116), False, 'from rouge_score.scoring import BootstrapAggregator\n'), ((4175, 4297), 'functools.partial', 'partial', (['_compute_rouge_score'], {'use_stemmer': 'use_stemmer', 'rouge_level': 'rouge_level', 'metric': 'metric', 'accumulate': 'accumulate'}), '(_compute_rouge_score, use_stemmer=use_stemmer, rouge_level=\n rouge_level, metric=metric, accumulate=accumulate)\n', (4182, 4297), False, 'from functools import partial\n'), ((4900, 5022), 'functools.partial', 'partial', (['_compute_rouge_score'], {'use_stemmer': 'use_stemmer', 'rouge_level': 'rouge_level', 'metric': 'metric', 'accumulate': 'accumulate'}), '(_compute_rouge_score, use_stemmer=use_stemmer, rouge_level=\n rouge_level, metric=metric, accumulate=accumulate)\n', (4907, 5022), False, 'from functools import partial\n'), ((5763, 5788), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5776, 5788), False, 'import pytest\n'), ((5798, 5824), 'torchmetrics.text.rouge.ROUGEScore', 'ROUGEScore', ([], {'rouge_keys': 'key'}), '(rouge_keys=key)\n', (5808, 5824), False, 'from torchmetrics.text.rouge import ROUGEScore\n'), ((5835, 5860), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5848, 5860), False, 'import pytest\n'), ((5870, 6022), 'torchmetrics.functional.text.rouge.rouge_score', 'rouge_score', (['_inputs_single_sentence_single_reference.preds', '_inputs_single_sentence_single_reference.targets'], {'rouge_keys': 'key', 'accumulate': '"""best"""'}), "(_inputs_single_sentence_single_reference.preds,\n _inputs_single_sentence_single_reference.targets, rouge_keys=key,\n accumulate='best')\n", (5881, 6022), False, 'from torchmetrics.functional.text.rouge import rouge_score\n'), ((6692, 6714), 're.split', 're.split', (['"""\\\\s+"""', 'text'], {}), "('\\\\s+', text)\n", (6700, 6714), False, 'import re\n'), ((2233, 2291), 'torch.tensor', 'torch.tensor', (['[v[key_curr].fmeasure for v in list_results]'], {}), '([v[key_curr].fmeasure for v in list_results])\n', (2245, 2291), False, 'import torch\n'), ((5440, 5607), 'pytest.raises', 'pytest.raises', (['ModuleNotFoundError'], {'match': '"""ROUGE metric requires that `nltk` is installed. Either as `pip install torchmetrics[text]` or `pip install nltk`."""'}), "(ModuleNotFoundError, match=\n 'ROUGE metric requires that `nltk` is installed. Either as `pip install torchmetrics[text]` or `pip install nltk`.'\n )\n", (5453, 5607), False, 'import pytest\n'), ((5661, 5673), 'torchmetrics.text.rouge.ROUGEScore', 'ROUGEScore', ([], {}), '()\n', (5671, 5673), False, 'from torchmetrics.text.rouge import ROUGEScore\n'), ((2318, 2344), 'torch.argmax', 'torch.argmax', (['all_fmeasure'], {}), '(all_fmeasure)\n', (2330, 2344), False, 'import torch\n')]
|
from vraapiclient import catalog
from st2actions.runners.pythonrunner import Action
class vRealizeAutomationAction(Action):
def __init__(self, config):
super(vRealizeAutomationAction, self).__init__(config)
self.vra7 = self._get_client()
def _get_client(self):
hostname = self.config['hostname']
username = self.config['username']
password = self.config['password']
tenant = self.config['tenant']
client = catalog.ConsumerClient(hostname, username, password, tenant)
return client
|
[
"vraapiclient.catalog.ConsumerClient"
] |
[((474, 534), 'vraapiclient.catalog.ConsumerClient', 'catalog.ConsumerClient', (['hostname', 'username', 'password', 'tenant'], {}), '(hostname, username, password, tenant)\n', (496, 534), False, 'from vraapiclient import catalog\n')]
|
#!/usr/bin/python
# Note : This is designed for Python 3
import numpy as np
class orthogonal_optimization:
def __init__(self, db):
self.cost_function = db['compute_cost']
self.gradient_function = db['compute_gradient']
self.x_opt = None
self.cost_opt = None
self.db = {}
#self.db['run_debug_2'] = True
#self.db['run_debug_1'] = True
def calc_A(self, x):
G = self.gradient_function(x)
A = G.dot(x.T) - x.dot(G.T)
return [A,G]
# Applying Sherman-Morrison-Woodbury Theorem ( A faster way to update instead of recalculating inverse )
def constant_update_inv(self, x, G, M_inv, alpha_D):
if alpha_D == 0: return M_inv
d = x.shape[1]
I = np.eye(d)
# 1st update
U = alpha_D*G
V = x
E = np.linalg.inv(I + V.T.dot(M_inv).dot(U))
M_inv = M_inv - M_inv.dot(U).dot(E).dot(V.T).dot(M_inv)
# 2nd update
U = -alpha_D*x
V = G
E = np.linalg.inv(I + V.T.dot(M_inv).dot(U))
M_inv = M_inv - M_inv.dot(U).dot(E).dot(V.T).dot(M_inv)
return M_inv
def compute_gradient(self, x):
[A,G] = self.calc_A(x)
return A.dot(x)
def run(self, x_init, max_rep=400, α=2):
d = x_init.shape[0]
self.x_opt = x_init
I = np.eye(d)
converged = False
x_change = np.linalg.norm(x_init)
m = 0
in_cost = self.cost_function(self.x_opt)
while( (converged == False) and (m < max_rep)):
old_alpha = α
new_alpha = α
alpha_D = 0
cost_1 = self.cost_function(self.x_opt)
[A,g] = self.calc_A(self.x_opt)
M_inv = np.linalg.inv(I + new_alpha*A)
while(new_alpha > 0.000000001):
if True: M_inv = self.constant_update_inv(self.x_opt, g, M_inv, alpha_D) # using woodbury inverse update
else: M_inv = np.linalg.inv(I + new_alpha*A) # using slow inverse
#next_x_o = M_inv.dot(I - new_alpha*A).dot(self.x_opt)
#M_inv = np.linalg.inv(I + new_alpha*A) # using slow inverse
#next_x = M_inv.dot(I - new_alpha*A).dot(self.x_opt)
#print '\n'
#print '------------------------------------', np.linalg.norm(next_x - next_x_old)
next_x = M_inv.dot(I - new_alpha*A).dot(self.x_opt)
cost_2 = self.cost_function(next_x)
if 'run_debug_1' in self.db: print(new_alpha, cost_1, cost_2)
#if((cost_2 < cost_1) or (abs(cost_1 - cost_2)/abs(cost_1) < 0.0000001)):
if(cost_2 < cost_1):
x_change = np.linalg.norm(next_x - self.x_opt)
[self.x_opt,R] = np.linalg.qr(next_x) # QR ensures orthogonality
self.cost_opt = cost_2
break
else:
old_alpha = new_alpha
new_alpha = new_alpha*0.2
alpha_D = new_alpha - old_alpha
m += 1
if 'run_debug_2' in self.db: print('Cost Norm : %.3f'%cost_2)
if 'run_debug_3' in self.db: print('Gradient Norm : %.3f'%np.linalg.norm(self.compute_gradient(self.x_opt)))
#print(x_change)
if(x_change < 0.001*np.linalg.norm(self.x_opt)): converged = True
#out_cost = self.cost_function(self.x_opt)
#print('\t\tin cost %.3f , out cost %.3f'%(in_cost,out_cost))
#if out_cost > in_cost:
# import pdb; pdb.set_trace()
return self.x_opt
|
[
"numpy.eye",
"numpy.linalg.qr",
"numpy.linalg.inv",
"numpy.linalg.norm"
] |
[((667, 676), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (673, 676), True, 'import numpy as np\n'), ((1154, 1163), 'numpy.eye', 'np.eye', (['d'], {}), '(d)\n', (1160, 1163), True, 'import numpy as np\n'), ((1197, 1219), 'numpy.linalg.norm', 'np.linalg.norm', (['x_init'], {}), '(x_init)\n', (1211, 1219), True, 'import numpy as np\n'), ((1461, 1493), 'numpy.linalg.inv', 'np.linalg.inv', (['(I + new_alpha * A)'], {}), '(I + new_alpha * A)\n', (1474, 1493), True, 'import numpy as np\n'), ((1656, 1688), 'numpy.linalg.inv', 'np.linalg.inv', (['(I + new_alpha * A)'], {}), '(I + new_alpha * A)\n', (1669, 1688), True, 'import numpy as np\n'), ((2295, 2330), 'numpy.linalg.norm', 'np.linalg.norm', (['(next_x - self.x_opt)'], {}), '(next_x - self.x_opt)\n', (2309, 2330), True, 'import numpy as np\n'), ((2353, 2373), 'numpy.linalg.qr', 'np.linalg.qr', (['next_x'], {}), '(next_x)\n', (2365, 2373), True, 'import numpy as np\n'), ((2779, 2805), 'numpy.linalg.norm', 'np.linalg.norm', (['self.x_opt'], {}), '(self.x_opt)\n', (2793, 2805), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import sys
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from gi.repository import Gdk
w = Gtk.Window()
screen = Gdk.Screen.get_default()
print ("Montors: %d" % screen.get_n_monitors())
if len(sys.argv) > 1:
n = int(sys.argv[1])
else:
n = 0
l = Gtk.Button(label="Hello, %d monitors!" % screen.get_n_monitors())
w.add(l)
w.show_all()
w.fullscreen_on_monitor(screen, n)
l.connect("clicked", Gtk.main_quit)
w.connect("destroy", Gtk.main_quit)
Gtk.main()
|
[
"gi.require_version",
"gi.repository.Gtk.main",
"gi.repository.Gdk.Screen.get_default",
"gi.repository.Gtk.Window"
] |
[((45, 77), 'gi.require_version', 'gi.require_version', (['"""Gtk"""', '"""3.0"""'], {}), "('Gtk', '3.0')\n", (63, 77), False, 'import gi\n'), ((144, 156), 'gi.repository.Gtk.Window', 'Gtk.Window', ([], {}), '()\n', (154, 156), False, 'from gi.repository import Gtk\n'), ((167, 191), 'gi.repository.Gdk.Screen.get_default', 'Gdk.Screen.get_default', ([], {}), '()\n', (189, 191), False, 'from gi.repository import Gdk\n'), ((504, 514), 'gi.repository.Gtk.main', 'Gtk.main', ([], {}), '()\n', (512, 514), False, 'from gi.repository import Gtk\n')]
|
import logging
import threading
import time
from django.db import close_old_connections
from meeting.views import StopMeetingAPI
from utils import cache
thread_id = 0
refresh_interval = 20 # 20 seconds
logger = logging.getLogger(__name__)
# Create your views here.
def start_delay_task():
if thread_id != 0:
logger.warning(f'delay task is already running, TID: {thread_id}')
return
threading.Thread(target=__delay_task, name='Delay Task', daemon=True).start()
logger.info(f'start delay task')
def __delay_task():
global thread_id
thread_id = threading.get_ident()
logger.info(f'delay task started')
while True:
try:
for meeting in cache.delay_queue.retrieve():
close_old_connections()
logger.info(f'to close meeting: {meeting}')
StopMeetingAPI.stop_meeting(meeting, None)
else:
logger.debug('empty delay queue')
except Exception as e:
logger.warning(f'ignore exception: {e}')
time.sleep(refresh_interval)
|
[
"threading.Thread",
"utils.cache.delay_queue.retrieve",
"django.db.close_old_connections",
"threading.get_ident",
"time.sleep",
"meeting.views.StopMeetingAPI.stop_meeting",
"logging.getLogger"
] |
[((215, 242), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (232, 242), False, 'import logging\n'), ((587, 608), 'threading.get_ident', 'threading.get_ident', ([], {}), '()\n', (606, 608), False, 'import threading\n'), ((1054, 1082), 'time.sleep', 'time.sleep', (['refresh_interval'], {}), '(refresh_interval)\n', (1064, 1082), False, 'import time\n'), ((413, 482), 'threading.Thread', 'threading.Thread', ([], {'target': '__delay_task', 'name': '"""Delay Task"""', 'daemon': '(True)'}), "(target=__delay_task, name='Delay Task', daemon=True)\n", (429, 482), False, 'import threading\n'), ((705, 733), 'utils.cache.delay_queue.retrieve', 'cache.delay_queue.retrieve', ([], {}), '()\n', (731, 733), False, 'from utils import cache\n'), ((751, 774), 'django.db.close_old_connections', 'close_old_connections', ([], {}), '()\n', (772, 774), False, 'from django.db import close_old_connections\n'), ((851, 893), 'meeting.views.StopMeetingAPI.stop_meeting', 'StopMeetingAPI.stop_meeting', (['meeting', 'None'], {}), '(meeting, None)\n', (878, 893), False, 'from meeting.views import StopMeetingAPI\n')]
|
"""
Tests for contingency table analyses.
"""
import os
import warnings
import numpy as np
import statsmodels.stats.contingency_tables as ctab
import pandas as pd
from numpy.testing import assert_allclose, assert_equal
import statsmodels.api as sm
cur_dir = os.path.dirname(os.path.abspath(__file__))
fname = "contingency_table_r_results.csv"
fpath = os.path.join(cur_dir, 'results', fname)
r_results = pd.read_csv(fpath)
tables = [None, None, None]
tables[0] = np.asarray([[23, 15], [19, 31]])
tables[1] = np.asarray([[144, 33, 84, 126],
[2, 4, 14, 29],
[0, 2, 6, 25],
[0, 0, 1, 5]])
tables[2] = np.asarray([[20, 10, 5],
[3, 30, 15],
[0, 5, 40]])
def test_homogeneity():
for k,table in enumerate(tables):
st = sm.stats.SquareTable(table, shift_zeros=False)
hm = st.homogeneity()
assert_allclose(hm.statistic, r_results.loc[k, "homog_stat"])
assert_allclose(hm.df, r_results.loc[k, "homog_df"])
# Test Bhapkar via its relationship to Stuart_Maxwell.
hmb = st.homogeneity(method="bhapkar")
assert_allclose(hmb.statistic, hm.statistic / (1 - hm.statistic / table.sum()))
def test_SquareTable_from_data():
np.random.seed(434)
df = pd.DataFrame(index=range(100), columns=["v1", "v2"])
df["v1"] = np.random.randint(0, 5, 100)
df["v2"] = np.random.randint(0, 5, 100)
table = pd.crosstab(df["v1"], df["v2"])
rslt1 = ctab.SquareTable(table)
rslt2 = ctab.SquareTable.from_data(df)
rslt3 = ctab.SquareTable(np.asarray(table))
assert_equal(rslt1.summary().as_text(),
rslt2.summary().as_text())
assert_equal(rslt2.summary().as_text(),
rslt3.summary().as_text())
s = str(rslt1)
assert_equal(s.startswith('A 5x5 contingency table with counts:'), True)
assert_equal(rslt1.table[0, 0], 8.)
def test_SquareTable_nonsquare():
tab = [[1, 0, 3], [2, 1, 4], [3, 0, 5]]
df = pd.DataFrame(tab, index=[0, 1, 3], columns=[0, 2, 3])
df2 = ctab.SquareTable(df, shift_zeros=False)
e = np.asarray([[1, 0, 0, 3], [2, 0, 1, 4], [0, 0, 0, 0], [3, 0, 0, 5]],
dtype=np.float64)
assert_equal(e, df2.table)
def test_cumulative_odds():
table = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
table = np.asarray(table)
tbl_obj = ctab.Table(table)
cum_odds = tbl_obj.cumulative_oddsratios
assert_allclose(cum_odds[0, 0], 28 / float(5 * 11))
assert_allclose(cum_odds[0, 1], (3 * 15) / float(3 * 24), atol=1e-5,
rtol=1e-5)
assert_allclose(np.log(cum_odds), tbl_obj.cumulative_log_oddsratios,
atol=1e-5, rtol=1e-5)
def test_local_odds():
table = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
table = np.asarray(table)
tbl_obj = ctab.Table(table)
loc_odds = tbl_obj.local_oddsratios
assert_allclose(loc_odds[0, 0], 5 / 8.)
assert_allclose(loc_odds[0, 1], 12 / float(15), atol=1e-5,
rtol=1e-5)
assert_allclose(np.log(loc_odds), tbl_obj.local_log_oddsratios,
atol=1e-5, rtol=1e-5)
def test_shifting():
t = np.zeros((3, 4), dtype=np.float64)
result = np.full((3, 4), 0.5)
assert_equal(ctab.Table(t, shift_zeros=False).table, t)
assert_equal(ctab.Table(t, shift_zeros=True).table, result)
t = np.asarray([[0, 1, 2],
[3, 0, 4],
[5, 6, 0]], dtype=np.float64)
r = np.asarray([[0.5, 1, 2],
[3, 0.5, 4],
[5, 6, 0.5]], dtype=np.float64)
assert_equal(ctab.Table(t).table, r)
assert_equal(ctab.Table(t, shift_zeros=True).table, r)
def test_stratified_table_cube():
# Test that we can pass a rank 3 ndarray or a list of rank 2
# ndarrays to StratifiedTable and get the same results.
tab1 = [[[8, 9], [6, 7]], [[4, 9], [5, 5]], [[8, 8], [9, 11]]]
tab2 = np.asarray(tab1).T
ct1 = ctab.StratifiedTable(tab1)
ct2 = ctab.StratifiedTable(tab2)
assert_allclose(ct1.oddsratio_pooled, ct2.oddsratio_pooled)
assert_allclose(ct1.logodds_pooled, ct2.logodds_pooled)
def test_resids():
# CHD x serum data
table = [[12, 8, 31, 41], [307, 246, 439, 245]]
# These results come from SAS
fit = [[22.083, 17.583, 32.536, 19.798],
[296.92, 236.42, 437.46, 266.2]]
c2 = [[4.6037, 5.223, 0.0725, 22.704],
[0.3424, 0.3885, 0.0054, 1.6886]]
# These are regression tests
pr = np.array([[-2.14562121, -2.28538719, -0.26923882, 4.7649169 ],
[ 0.58514314, 0.62325942, 0.07342547, -1.29946443]])
sr = np.array([[-2.55112945, -2.6338782 , -0.34712127, 5.5751083 ],
[ 2.55112945, 2.6338782 , 0.34712127, -5.5751083 ]])
tab = ctab.Table(table)
assert_allclose(tab.fittedvalues, fit, atol=1e-4, rtol=1e-4)
assert_allclose(tab.chi2_contribs, c2, atol=1e-4, rtol=1e-4)
assert_allclose(tab.resid_pearson, pr, atol=1e-4, rtol=1e-4)
assert_allclose(tab.standardized_resids, sr, atol=1e-4, rtol=1e-4)
def test_ordinal_association():
for k,table in enumerate(tables):
row_scores = 1 + np.arange(table.shape[0])
col_scores = 1 + np.arange(table.shape[1])
# First set of scores
rslt = ctab.Table(table, shift_zeros=False).test_ordinal_association(row_scores, col_scores)
assert_allclose(rslt.statistic, r_results.loc[k, "lbl_stat"])
assert_allclose(rslt.null_mean, r_results.loc[k, "lbl_expval"])
assert_allclose(rslt.null_sd**2, r_results.loc[k, "lbl_var"])
assert_allclose(rslt.zscore**2, r_results.loc[k, "lbl_chi2"], rtol=1e-5, atol=1e-5)
assert_allclose(rslt.pvalue, r_results.loc[k, "lbl_pvalue"], rtol=1e-5, atol=1e-5)
# Second set of scores
rslt = ctab.Table(table, shift_zeros=False).test_ordinal_association(row_scores, col_scores**2)
assert_allclose(rslt.statistic, r_results.loc[k, "lbl2_stat"])
assert_allclose(rslt.null_mean, r_results.loc[k, "lbl2_expval"])
assert_allclose(rslt.null_sd**2, r_results.loc[k, "lbl2_var"])
assert_allclose(rslt.zscore**2, r_results.loc[k, "lbl2_chi2"])
assert_allclose(rslt.pvalue, r_results.loc[k, "lbl2_pvalue"], rtol=1e-5, atol=1e-5)
def test_chi2_association():
np.random.seed(8743)
table = np.random.randint(10, 30, size=(4, 4))
from scipy.stats import chi2_contingency
rslt_scipy = chi2_contingency(table)
b = ctab.Table(table).test_nominal_association()
assert_allclose(b.statistic, rslt_scipy[0])
assert_allclose(b.pvalue, rslt_scipy[1])
def test_symmetry():
for k,table in enumerate(tables):
st = sm.stats.SquareTable(table, shift_zeros=False)
b = st.symmetry()
assert_allclose(b.statistic, r_results.loc[k, "bowker_stat"])
assert_equal(b.df, r_results.loc[k, "bowker_df"])
assert_allclose(b.pvalue, r_results.loc[k, "bowker_pvalue"])
def test_mcnemar():
# Use chi^2 without continuity correction
b1 = ctab.mcnemar(tables[0], exact=False, correction=False)
st = sm.stats.SquareTable(tables[0])
b2 = st.homogeneity()
assert_allclose(b1.statistic, b2.statistic)
assert_equal(b2.df, 1)
# Use chi^2 with continuity correction
b3 = ctab.mcnemar(tables[0], exact=False, correction=True)
assert_allclose(b3.pvalue, r_results.loc[0, "homog_cont_p"])
# Use binomial reference distribution
b4 = ctab.mcnemar(tables[0], exact=True)
assert_allclose(b4.pvalue, r_results.loc[0, "homog_binom_p"])
def test_from_data_stratified():
df = pd.DataFrame([[1, 1, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1]]).T
e = np.asarray([[[0, 1], [1, 1]], [[2, 2], [1, 0]]])
# Test pandas
tab1 = ctab.StratifiedTable.from_data(0, 1, 2, df)
assert_equal(tab1.table, e)
# Test ndarray
tab1 = ctab.StratifiedTable.from_data(0, 1, 2, np.asarray(df))
assert_equal(tab1.table, e)
def test_from_data_2x2():
df = pd.DataFrame([[1, 1, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 0]]).T
e = np.asarray([[1, 2], [4, 1]])
# Test pandas
tab1 = ctab.Table2x2.from_data(df, shift_zeros=False)
assert_equal(tab1.table, e)
# Test ndarray
tab1 = ctab.Table2x2.from_data(np.asarray(df), shift_zeros=False)
assert_equal(tab1.table, e)
def test_cochranq():
# library(CVST)
# table1 = matrix(c(1, 0, 1, 1,
# 0, 1, 1, 1,
# 1, 1, 1, 0,
# 0, 1, 0, 0,
# 0, 1, 0, 0,
# 1, 0, 1, 0,
# 0, 1, 0, 0,
# 1, 1, 1, 1,
# 0, 1, 0, 0), ncol=4, byrow=TRUE)
# rslt1 = cochranq.test(table1)
# table2 = matrix(c(0, 0, 1, 1, 0,
# 0, 1, 0, 1, 0,
# 0, 1, 1, 0, 1,
# 1, 0, 0, 0, 1,
# 1, 1, 0, 0, 0,
# 1, 0, 1, 0, 0,
# 0, 1, 0, 0, 0,
# 0, 0, 1, 1, 0,
# 0, 0, 0, 0, 0), ncol=5, byrow=TRUE)
# rslt2 = cochranq.test(table2)
table = [[1, 0, 1, 1],
[0, 1, 1, 1],
[1, 1, 1, 0],
[0, 1, 0, 0],
[0, 1, 0, 0],
[1, 0, 1, 0],
[0, 1, 0, 0],
[1, 1, 1, 1],
[0, 1, 0, 0]]
table = np.asarray(table)
stat, pvalue, df = ctab.cochrans_q(table, return_object=False)
assert_allclose(stat, 4.2)
assert_allclose(df, 3)
table = [[0, 0, 1, 1, 0],
[0, 1, 0, 1, 0],
[0, 1, 1, 0, 1],
[1, 0, 0, 0, 1],
[1, 1, 0, 0, 0],
[1, 0, 1, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 1, 0],
[0, 0, 0, 0, 0]]
table = np.asarray(table)
stat, pvalue, df = ctab.cochrans_q(table, return_object=False)
assert_allclose(stat, 1.2174, rtol=1e-4)
assert_allclose(df, 4)
# Cochran's q and Mcnemar are equivalent for 2x2 tables
data = table[:, 0:2]
xtab = np.asarray(pd.crosstab(data[:, 0], data[:, 1]))
b1 = ctab.cochrans_q(data, return_object=True)
b2 = ctab.mcnemar(xtab, exact=False, correction=False)
assert_allclose(b1.statistic, b2.statistic)
assert_allclose(b1.pvalue, b2.pvalue)
# Test for printing bunch
assert_equal(str(b1).startswith("df 1\npvalue 0.65"), True)
class CheckStratifiedMixin:
@classmethod
def initialize(cls, tables, use_arr=False):
tables1 = tables if not use_arr else np.dstack(tables)
cls.rslt = ctab.StratifiedTable(tables1)
cls.rslt_0 = ctab.StratifiedTable(tables, shift_zeros=True)
tables_pandas = [pd.DataFrame(x) for x in tables]
cls.rslt_pandas = ctab.StratifiedTable(tables_pandas)
def test_oddsratio_pooled(self):
assert_allclose(self.rslt.oddsratio_pooled, self.oddsratio_pooled,
rtol=1e-4, atol=1e-4)
def test_logodds_pooled(self):
assert_allclose(self.rslt.logodds_pooled, self.logodds_pooled,
rtol=1e-4, atol=1e-4)
def test_null_odds(self):
rslt = self.rslt.test_null_odds(correction=True)
assert_allclose(rslt.statistic, self.mh_stat, rtol=1e-4, atol=1e-5)
assert_allclose(rslt.pvalue, self.mh_pvalue, rtol=1e-4, atol=1e-4)
def test_oddsratio_pooled_confint(self):
lcb, ucb = self.rslt.oddsratio_pooled_confint()
assert_allclose(lcb, self.or_lcb, rtol=1e-4, atol=1e-4)
assert_allclose(ucb, self.or_ucb, rtol=1e-4, atol=1e-4)
def test_logodds_pooled_confint(self):
lcb, ucb = self.rslt.logodds_pooled_confint()
assert_allclose(lcb, np.log(self.or_lcb), rtol=1e-4,
atol=1e-4)
assert_allclose(ucb, np.log(self.or_ucb), rtol=1e-4,
atol=1e-4)
def test_equal_odds(self):
if not hasattr(self, "or_homog"):
return
rslt = self.rslt.test_equal_odds(adjust=False)
assert_allclose(rslt.statistic, self.or_homog, rtol=1e-4, atol=1e-4)
assert_allclose(rslt.pvalue, self.or_homog_p, rtol=1e-4, atol=1e-4)
rslt = self.rslt.test_equal_odds(adjust=True)
assert_allclose(rslt.statistic, self.or_homog_adj, rtol=1e-4, atol=1e-4)
assert_allclose(rslt.pvalue, self.or_homog_adj_p, rtol=1e-4, atol=1e-4)
def test_pandas(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
assert_equal(self.rslt.summary().as_text(),
self.rslt_pandas.summary().as_text())
def test_from_data(self):
np.random.seed(241)
df = pd.DataFrame(index=range(100), columns=("v1", "v2", "strat"))
df["v1"] = np.random.randint(0, 2, 100)
df["v2"] = np.random.randint(0, 2, 100)
df["strat"] = np.kron(np.arange(10), np.ones(10))
tables = []
for k in range(10):
ii = np.arange(10*k, 10*(k+1))
tables.append(pd.crosstab(df.loc[ii, "v1"], df.loc[ii, "v2"]))
rslt1 = ctab.StratifiedTable(tables)
rslt2 = ctab.StratifiedTable.from_data("v1", "v2", "strat", df)
assert_equal(rslt1.summary().as_text(), rslt2.summary().as_text())
class TestStratified1(CheckStratifiedMixin):
"""
data = array(c(0, 0, 6, 5,
3, 0, 3, 6,
6, 2, 0, 4,
5, 6, 1, 0,
2, 5, 0, 0),
dim=c(2, 2, 5))
rslt = mantelhaen.test(data)
"""
@classmethod
def setup_class(cls):
tables = [None] * 5
tables[0] = np.array([[0, 0], [6, 5]])
tables[1] = np.array([[3, 0], [3, 6]])
tables[2] = np.array([[6, 2], [0, 4]])
tables[3] = np.array([[5, 6], [1, 0]])
tables[4] = np.array([[2, 5], [0, 0]])
cls.initialize(tables)
cls.oddsratio_pooled = 7
cls.logodds_pooled = np.log(7)
cls.mh_stat = 3.9286
cls.mh_pvalue = 0.04747
cls.or_lcb = 1.026713
cls.or_ucb = 47.725133
class TestStratified2(CheckStratifiedMixin):
"""
library(DescTools)
data = array(c(20, 14, 10, 24,
15, 12, 3, 15,
3, 2, 3, 2,
12, 3, 7, 5,
1, 0, 3, 2),
dim=c(2, 2, 5))
rslt = mantelhaen.test(data)
bd1 = BreslowDayTest(data, correct=FALSE)
bd2 = BreslowDayTest(data, correct=TRUE)
"""
@classmethod
def setup_class(cls):
tables = [None] * 5
tables[0] = np.array([[20, 14], [10, 24]])
tables[1] = np.array([[15, 12], [3, 15]])
tables[2] = np.array([[3, 2], [3, 2]])
tables[3] = np.array([[12, 3], [7, 5]])
tables[4] = np.array([[1, 0], [3, 2]])
# check array of int
cls.initialize(tables, use_arr=True)
cls.oddsratio_pooled = 3.5912
cls.logodds_pooled = np.log(3.5912)
cls.mh_stat = 11.8852
cls.mh_pvalue = 0.0005658
cls.or_lcb = 1.781135
cls.or_ucb = 7.240633
# Breslow Day test without Tarone adjustment
cls.or_homog = 1.8438
cls.or_homog_p = 0.7645
# Breslow Day test with Tarone adjustment
cls.or_homog_adj = 1.8436
cls.or_homog_adj_p = 0.7645
class TestStratified3(CheckStratifiedMixin):
"""
library(DescTools)
data = array(c(313, 512, 19, 89,
207, 353, 8, 17,
205, 120, 391, 202,
278, 139, 244, 131,
138, 53, 299, 94,
351, 22, 317, 24),
dim=c(2, 2, 6))
rslt = mantelhaen.test(data)
bd1 = BreslowDayTest(data, correct=FALSE)
bd2 = BreslowDayTest(data, correct=TRUE)
"""
@classmethod
def setup_class(cls):
tables = [None] * 6
tables[0] = np.array([[313, 512], [19, 89]])
tables[1] = np.array([[207, 353], [8, 17]])
tables[2] = np.array([[205, 120], [391, 202]])
tables[3] = np.array([[278, 139], [244, 131]])
tables[4] = np.array([[138, 53], [299, 94]])
tables[5] = np.array([[351, 22], [317, 24]])
cls.initialize(tables)
cls.oddsratio_pooled = 1.101879
cls.logodds_pooled = np.log(1.101879)
cls.mh_stat = 1.3368
cls.mh_pvalue = 0.2476
cls.or_lcb = 0.9402012
cls.or_ucb = 1.2913602
# Breslow Day test without Tarone adjustment
cls.or_homog = 18.83297
cls.or_homog_p = 0.002064786
# Breslow Day test with Tarone adjustment
cls.or_homog_adj = 18.83297
cls.or_homog_adj_p = 0.002064786
class Check2x2Mixin:
@classmethod
def initialize(cls):
cls.tbl_obj = ctab.Table2x2(cls.table)
cls.tbl_data_obj = ctab.Table2x2.from_data(cls.data)
def test_oddsratio(self):
assert_allclose(self.tbl_obj.oddsratio, self.oddsratio)
def test_log_oddsratio(self):
assert_allclose(self.tbl_obj.log_oddsratio, self.log_oddsratio)
def test_log_oddsratio_se(self):
assert_allclose(self.tbl_obj.log_oddsratio_se, self.log_oddsratio_se)
def test_oddsratio_pvalue(self):
assert_allclose(self.tbl_obj.oddsratio_pvalue(), self.oddsratio_pvalue)
def test_oddsratio_confint(self):
lcb1, ucb1 = self.tbl_obj.oddsratio_confint(0.05)
lcb2, ucb2 = self.oddsratio_confint
assert_allclose(lcb1, lcb2)
assert_allclose(ucb1, ucb2)
def test_riskratio(self):
assert_allclose(self.tbl_obj.riskratio, self.riskratio)
def test_log_riskratio(self):
assert_allclose(self.tbl_obj.log_riskratio, self.log_riskratio)
def test_log_riskratio_se(self):
assert_allclose(self.tbl_obj.log_riskratio_se, self.log_riskratio_se)
def test_riskratio_pvalue(self):
assert_allclose(self.tbl_obj.riskratio_pvalue(), self.riskratio_pvalue)
def test_riskratio_confint(self):
lcb1, ucb1 = self.tbl_obj.riskratio_confint(0.05)
lcb2, ucb2 = self.riskratio_confint
assert_allclose(lcb1, lcb2)
assert_allclose(ucb1, ucb2)
def test_log_riskratio_confint(self):
lcb1, ucb1 = self.tbl_obj.log_riskratio_confint(0.05)
lcb2, ucb2 = self.log_riskratio_confint
assert_allclose(lcb1, lcb2)
assert_allclose(ucb1, ucb2)
def test_from_data(self):
assert_equal(self.tbl_obj.summary().as_text(),
self.tbl_data_obj.summary().as_text())
def test_summary(self):
assert_equal(self.tbl_obj.summary().as_text(),
self.summary_string)
class Test2x2_1(Check2x2Mixin):
@classmethod
def setup_class(cls):
data = np.zeros((8, 2))
data[:, 0] = [0, 0, 1, 1, 0, 0, 1, 1]
data[:, 1] = [0, 1, 0, 1, 0, 1, 0, 1]
cls.data = np.asarray(data)
cls.table = np.asarray([[2, 2], [2, 2]])
cls.oddsratio = 1.
cls.log_oddsratio = 0.
cls.log_oddsratio_se = np.sqrt(2)
cls.oddsratio_confint = [0.062548836166112329, 15.987507702689751]
cls.oddsratio_pvalue = 1.
cls.riskratio = 1.
cls.log_riskratio = 0.
cls.log_riskratio_se = 1 / np.sqrt(2)
cls.riskratio_pvalue = 1.
cls.riskratio_confint = [0.25009765325990629,
3.9984381579173824]
cls.log_riskratio_confint = [-1.3859038243496782,
1.3859038243496782]
ss = [ ' Estimate SE LCB UCB p-value',
'---------------------------------------------------',
'Odds ratio 1.000 0.063 15.988 1.000',
'Log odds ratio 0.000 1.414 -2.772 2.772 1.000',
'Risk ratio 1.000 0.250 3.998 1.000',
'Log risk ratio 0.000 0.707 -1.386 1.386 1.000',
'---------------------------------------------------']
cls.summary_string = '\n'.join(ss)
cls.initialize()
|
[
"numpy.random.seed",
"pandas.read_csv",
"numpy.ones",
"numpy.random.randint",
"numpy.arange",
"os.path.join",
"pandas.DataFrame",
"numpy.full",
"os.path.abspath",
"warnings.simplefilter",
"statsmodels.stats.contingency_tables.mcnemar",
"warnings.catch_warnings",
"numpy.testing.assert_equal",
"numpy.testing.assert_allclose",
"statsmodels.stats.contingency_tables.Table2x2",
"numpy.dstack",
"statsmodels.stats.contingency_tables.SquareTable.from_data",
"numpy.asarray",
"statsmodels.stats.contingency_tables.StratifiedTable",
"statsmodels.api.stats.SquareTable",
"statsmodels.stats.contingency_tables.cochrans_q",
"pandas.crosstab",
"numpy.log",
"statsmodels.stats.contingency_tables.SquareTable",
"statsmodels.stats.contingency_tables.Table",
"numpy.zeros",
"statsmodels.stats.contingency_tables.StratifiedTable.from_data",
"statsmodels.stats.contingency_tables.Table2x2.from_data",
"numpy.array",
"scipy.stats.chi2_contingency",
"numpy.sqrt"
] |
[((354, 393), 'os.path.join', 'os.path.join', (['cur_dir', '"""results"""', 'fname'], {}), "(cur_dir, 'results', fname)\n", (366, 393), False, 'import os\n'), ((406, 424), 'pandas.read_csv', 'pd.read_csv', (['fpath'], {}), '(fpath)\n', (417, 424), True, 'import pandas as pd\n'), ((468, 500), 'numpy.asarray', 'np.asarray', (['[[23, 15], [19, 31]]'], {}), '([[23, 15], [19, 31]])\n', (478, 500), True, 'import numpy as np\n'), ((514, 591), 'numpy.asarray', 'np.asarray', (['[[144, 33, 84, 126], [2, 4, 14, 29], [0, 2, 6, 25], [0, 0, 1, 5]]'], {}), '([[144, 33, 84, 126], [2, 4, 14, 29], [0, 2, 6, 25], [0, 0, 1, 5]])\n', (524, 591), True, 'import numpy as np\n'), ((677, 727), 'numpy.asarray', 'np.asarray', (['[[20, 10, 5], [3, 30, 15], [0, 5, 40]]'], {}), '([[20, 10, 5], [3, 30, 15], [0, 5, 40]])\n', (687, 727), True, 'import numpy as np\n'), ((277, 302), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (292, 302), False, 'import os\n'), ((1302, 1321), 'numpy.random.seed', 'np.random.seed', (['(434)'], {}), '(434)\n', (1316, 1321), True, 'import numpy as np\n'), ((1399, 1427), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)', '(100)'], {}), '(0, 5, 100)\n', (1416, 1427), True, 'import numpy as np\n'), ((1443, 1471), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)', '(100)'], {}), '(0, 5, 100)\n', (1460, 1471), True, 'import numpy as np\n'), ((1484, 1515), 'pandas.crosstab', 'pd.crosstab', (["df['v1']", "df['v2']"], {}), "(df['v1'], df['v2'])\n", (1495, 1515), True, 'import pandas as pd\n'), ((1529, 1552), 'statsmodels.stats.contingency_tables.SquareTable', 'ctab.SquareTable', (['table'], {}), '(table)\n', (1545, 1552), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((1565, 1595), 'statsmodels.stats.contingency_tables.SquareTable.from_data', 'ctab.SquareTable.from_data', (['df'], {}), '(df)\n', (1591, 1595), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((1923, 1959), 'numpy.testing.assert_equal', 'assert_equal', (['rslt1.table[0, 0]', '(8.0)'], {}), '(rslt1.table[0, 0], 8.0)\n', (1935, 1959), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2049, 2102), 'pandas.DataFrame', 'pd.DataFrame', (['tab'], {'index': '[0, 1, 3]', 'columns': '[0, 2, 3]'}), '(tab, index=[0, 1, 3], columns=[0, 2, 3])\n', (2061, 2102), True, 'import pandas as pd\n'), ((2114, 2153), 'statsmodels.stats.contingency_tables.SquareTable', 'ctab.SquareTable', (['df'], {'shift_zeros': '(False)'}), '(df, shift_zeros=False)\n', (2130, 2153), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((2163, 2254), 'numpy.asarray', 'np.asarray', (['[[1, 0, 0, 3], [2, 0, 1, 4], [0, 0, 0, 0], [3, 0, 0, 5]]'], {'dtype': 'np.float64'}), '([[1, 0, 0, 3], [2, 0, 1, 4], [0, 0, 0, 0], [3, 0, 0, 5]], dtype=\n np.float64)\n', (2173, 2254), True, 'import numpy as np\n'), ((2274, 2300), 'numpy.testing.assert_equal', 'assert_equal', (['e', 'df2.table'], {}), '(e, df2.table)\n', (2286, 2300), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((2390, 2407), 'numpy.asarray', 'np.asarray', (['table'], {}), '(table)\n', (2400, 2407), True, 'import numpy as np\n'), ((2422, 2439), 'statsmodels.stats.contingency_tables.Table', 'ctab.Table', (['table'], {}), '(table)\n', (2432, 2439), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((2845, 2862), 'numpy.asarray', 'np.asarray', (['table'], {}), '(table)\n', (2855, 2862), True, 'import numpy as np\n'), ((2877, 2894), 'statsmodels.stats.contingency_tables.Table', 'ctab.Table', (['table'], {}), '(table)\n', (2887, 2894), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((2940, 2980), 'numpy.testing.assert_allclose', 'assert_allclose', (['loc_odds[0, 0]', '(5 / 8.0)'], {}), '(loc_odds[0, 0], 5 / 8.0)\n', (2955, 2980), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((3216, 3250), 'numpy.zeros', 'np.zeros', (['(3, 4)'], {'dtype': 'np.float64'}), '((3, 4), dtype=np.float64)\n', (3224, 3250), True, 'import numpy as np\n'), ((3264, 3284), 'numpy.full', 'np.full', (['(3, 4)', '(0.5)'], {}), '((3, 4), 0.5)\n', (3271, 3284), True, 'import numpy as np\n'), ((3418, 3481), 'numpy.asarray', 'np.asarray', (['[[0, 1, 2], [3, 0, 4], [5, 6, 0]]'], {'dtype': 'np.float64'}), '([[0, 1, 2], [3, 0, 4], [5, 6, 0]], dtype=np.float64)\n', (3428, 3481), True, 'import numpy as np\n'), ((3530, 3599), 'numpy.asarray', 'np.asarray', (['[[0.5, 1, 2], [3, 0.5, 4], [5, 6, 0.5]]'], {'dtype': 'np.float64'}), '([[0.5, 1, 2], [3, 0.5, 4], [5, 6, 0.5]], dtype=np.float64)\n', (3540, 3599), True, 'import numpy as np\n'), ((4010, 4036), 'statsmodels.stats.contingency_tables.StratifiedTable', 'ctab.StratifiedTable', (['tab1'], {}), '(tab1)\n', (4030, 4036), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((4047, 4073), 'statsmodels.stats.contingency_tables.StratifiedTable', 'ctab.StratifiedTable', (['tab2'], {}), '(tab2)\n', (4067, 4073), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((4079, 4138), 'numpy.testing.assert_allclose', 'assert_allclose', (['ct1.oddsratio_pooled', 'ct2.oddsratio_pooled'], {}), '(ct1.oddsratio_pooled, ct2.oddsratio_pooled)\n', (4094, 4138), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4143, 4198), 'numpy.testing.assert_allclose', 'assert_allclose', (['ct1.logodds_pooled', 'ct2.logodds_pooled'], {}), '(ct1.logodds_pooled, ct2.logodds_pooled)\n', (4158, 4198), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4550, 4668), 'numpy.array', 'np.array', (['[[-2.14562121, -2.28538719, -0.26923882, 4.7649169], [0.58514314, \n 0.62325942, 0.07342547, -1.29946443]]'], {}), '([[-2.14562121, -2.28538719, -0.26923882, 4.7649169], [0.58514314, \n 0.62325942, 0.07342547, -1.29946443]])\n', (4558, 4668), True, 'import numpy as np\n'), ((4697, 4812), 'numpy.array', 'np.array', (['[[-2.55112945, -2.6338782, -0.34712127, 5.5751083], [2.55112945, 2.6338782,\n 0.34712127, -5.5751083]]'], {}), '([[-2.55112945, -2.6338782, -0.34712127, 5.5751083], [2.55112945, \n 2.6338782, 0.34712127, -5.5751083]])\n', (4705, 4812), True, 'import numpy as np\n'), ((4846, 4863), 'statsmodels.stats.contingency_tables.Table', 'ctab.Table', (['table'], {}), '(table)\n', (4856, 4863), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((4868, 4932), 'numpy.testing.assert_allclose', 'assert_allclose', (['tab.fittedvalues', 'fit'], {'atol': '(0.0001)', 'rtol': '(0.0001)'}), '(tab.fittedvalues, fit, atol=0.0001, rtol=0.0001)\n', (4883, 4932), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4933, 4997), 'numpy.testing.assert_allclose', 'assert_allclose', (['tab.chi2_contribs', 'c2'], {'atol': '(0.0001)', 'rtol': '(0.0001)'}), '(tab.chi2_contribs, c2, atol=0.0001, rtol=0.0001)\n', (4948, 4997), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((4998, 5062), 'numpy.testing.assert_allclose', 'assert_allclose', (['tab.resid_pearson', 'pr'], {'atol': '(0.0001)', 'rtol': '(0.0001)'}), '(tab.resid_pearson, pr, atol=0.0001, rtol=0.0001)\n', (5013, 5062), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5063, 5133), 'numpy.testing.assert_allclose', 'assert_allclose', (['tab.standardized_resids', 'sr'], {'atol': '(0.0001)', 'rtol': '(0.0001)'}), '(tab.standardized_resids, sr, atol=0.0001, rtol=0.0001)\n', (5078, 5133), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6383, 6403), 'numpy.random.seed', 'np.random.seed', (['(8743)'], {}), '(8743)\n', (6397, 6403), True, 'import numpy as np\n'), ((6417, 6455), 'numpy.random.randint', 'np.random.randint', (['(10)', '(30)'], {'size': '(4, 4)'}), '(10, 30, size=(4, 4))\n', (6434, 6455), True, 'import numpy as np\n'), ((6519, 6542), 'scipy.stats.chi2_contingency', 'chi2_contingency', (['table'], {}), '(table)\n', (6535, 6542), False, 'from scipy.stats import chi2_contingency\n'), ((6602, 6645), 'numpy.testing.assert_allclose', 'assert_allclose', (['b.statistic', 'rslt_scipy[0]'], {}), '(b.statistic, rslt_scipy[0])\n', (6617, 6645), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6650, 6690), 'numpy.testing.assert_allclose', 'assert_allclose', (['b.pvalue', 'rslt_scipy[1]'], {}), '(b.pvalue, rslt_scipy[1])\n', (6665, 6690), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7114, 7168), 'statsmodels.stats.contingency_tables.mcnemar', 'ctab.mcnemar', (['tables[0]'], {'exact': '(False)', 'correction': '(False)'}), '(tables[0], exact=False, correction=False)\n', (7126, 7168), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((7179, 7210), 'statsmodels.api.stats.SquareTable', 'sm.stats.SquareTable', (['tables[0]'], {}), '(tables[0])\n', (7199, 7210), True, 'import statsmodels.api as sm\n'), ((7241, 7284), 'numpy.testing.assert_allclose', 'assert_allclose', (['b1.statistic', 'b2.statistic'], {}), '(b1.statistic, b2.statistic)\n', (7256, 7284), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7289, 7311), 'numpy.testing.assert_equal', 'assert_equal', (['b2.df', '(1)'], {}), '(b2.df, 1)\n', (7301, 7311), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7365, 7418), 'statsmodels.stats.contingency_tables.mcnemar', 'ctab.mcnemar', (['tables[0]'], {'exact': '(False)', 'correction': '(True)'}), '(tables[0], exact=False, correction=True)\n', (7377, 7418), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((7423, 7483), 'numpy.testing.assert_allclose', 'assert_allclose', (['b3.pvalue', "r_results.loc[0, 'homog_cont_p']"], {}), "(b3.pvalue, r_results.loc[0, 'homog_cont_p'])\n", (7438, 7483), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7536, 7571), 'statsmodels.stats.contingency_tables.mcnemar', 'ctab.mcnemar', (['tables[0]'], {'exact': '(True)'}), '(tables[0], exact=True)\n', (7548, 7571), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((7576, 7637), 'numpy.testing.assert_allclose', 'assert_allclose', (['b4.pvalue', "r_results.loc[0, 'homog_binom_p']"], {}), "(b4.pvalue, r_results.loc[0, 'homog_binom_p'])\n", (7591, 7637), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7808, 7856), 'numpy.asarray', 'np.asarray', (['[[[0, 1], [1, 1]], [[2, 2], [1, 0]]]'], {}), '([[[0, 1], [1, 1]], [[2, 2], [1, 0]]])\n', (7818, 7856), True, 'import numpy as np\n'), ((7887, 7930), 'statsmodels.stats.contingency_tables.StratifiedTable.from_data', 'ctab.StratifiedTable.from_data', (['(0)', '(1)', '(2)', 'df'], {}), '(0, 1, 2, df)\n', (7917, 7930), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((7935, 7962), 'numpy.testing.assert_equal', 'assert_equal', (['tab1.table', 'e'], {}), '(tab1.table, e)\n', (7947, 7962), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((8054, 8081), 'numpy.testing.assert_equal', 'assert_equal', (['tab1.table', 'e'], {}), '(tab1.table, e)\n', (8066, 8081), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((8196, 8224), 'numpy.asarray', 'np.asarray', (['[[1, 2], [4, 1]]'], {}), '([[1, 2], [4, 1]])\n', (8206, 8224), True, 'import numpy as np\n'), ((8255, 8301), 'statsmodels.stats.contingency_tables.Table2x2.from_data', 'ctab.Table2x2.from_data', (['df'], {'shift_zeros': '(False)'}), '(df, shift_zeros=False)\n', (8278, 8301), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((8306, 8333), 'numpy.testing.assert_equal', 'assert_equal', (['tab1.table', 'e'], {}), '(tab1.table, e)\n', (8318, 8333), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((8428, 8455), 'numpy.testing.assert_equal', 'assert_equal', (['tab1.table', 'e'], {}), '(tab1.table, e)\n', (8440, 8455), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((9544, 9561), 'numpy.asarray', 'np.asarray', (['table'], {}), '(table)\n', (9554, 9561), True, 'import numpy as np\n'), ((9586, 9629), 'statsmodels.stats.contingency_tables.cochrans_q', 'ctab.cochrans_q', (['table'], {'return_object': '(False)'}), '(table, return_object=False)\n', (9601, 9629), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((9634, 9660), 'numpy.testing.assert_allclose', 'assert_allclose', (['stat', '(4.2)'], {}), '(stat, 4.2)\n', (9649, 9660), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((9665, 9687), 'numpy.testing.assert_allclose', 'assert_allclose', (['df', '(3)'], {}), '(df, 3)\n', (9680, 9687), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((9971, 9988), 'numpy.asarray', 'np.asarray', (['table'], {}), '(table)\n', (9981, 9988), True, 'import numpy as np\n'), ((10013, 10056), 'statsmodels.stats.contingency_tables.cochrans_q', 'ctab.cochrans_q', (['table'], {'return_object': '(False)'}), '(table, return_object=False)\n', (10028, 10056), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((10061, 10103), 'numpy.testing.assert_allclose', 'assert_allclose', (['stat', '(1.2174)'], {'rtol': '(0.0001)'}), '(stat, 1.2174, rtol=0.0001)\n', (10076, 10103), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((10106, 10128), 'numpy.testing.assert_allclose', 'assert_allclose', (['df', '(4)'], {}), '(df, 4)\n', (10121, 10128), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((10283, 10324), 'statsmodels.stats.contingency_tables.cochrans_q', 'ctab.cochrans_q', (['data'], {'return_object': '(True)'}), '(data, return_object=True)\n', (10298, 10324), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((10334, 10383), 'statsmodels.stats.contingency_tables.mcnemar', 'ctab.mcnemar', (['xtab'], {'exact': '(False)', 'correction': '(False)'}), '(xtab, exact=False, correction=False)\n', (10346, 10383), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((10388, 10431), 'numpy.testing.assert_allclose', 'assert_allclose', (['b1.statistic', 'b2.statistic'], {}), '(b1.statistic, b2.statistic)\n', (10403, 10431), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((10436, 10473), 'numpy.testing.assert_allclose', 'assert_allclose', (['b1.pvalue', 'b2.pvalue'], {}), '(b1.pvalue, b2.pvalue)\n', (10451, 10473), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((854, 900), 'statsmodels.api.stats.SquareTable', 'sm.stats.SquareTable', (['table'], {'shift_zeros': '(False)'}), '(table, shift_zeros=False)\n', (874, 900), True, 'import statsmodels.api as sm\n'), ((939, 1000), 'numpy.testing.assert_allclose', 'assert_allclose', (['hm.statistic', "r_results.loc[k, 'homog_stat']"], {}), "(hm.statistic, r_results.loc[k, 'homog_stat'])\n", (954, 1000), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((1009, 1061), 'numpy.testing.assert_allclose', 'assert_allclose', (['hm.df', "r_results.loc[k, 'homog_df']"], {}), "(hm.df, r_results.loc[k, 'homog_df'])\n", (1024, 1061), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((1625, 1642), 'numpy.asarray', 'np.asarray', (['table'], {}), '(table)\n', (1635, 1642), True, 'import numpy as np\n'), ((2666, 2682), 'numpy.log', 'np.log', (['cum_odds'], {}), '(cum_odds)\n', (2672, 2682), True, 'import numpy as np\n'), ((3094, 3110), 'numpy.log', 'np.log', (['loc_odds'], {}), '(loc_odds)\n', (3100, 3110), True, 'import numpy as np\n'), ((3980, 3996), 'numpy.asarray', 'np.asarray', (['tab1'], {}), '(tab1)\n', (3990, 3996), True, 'import numpy as np\n'), ((5446, 5507), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.statistic', "r_results.loc[k, 'lbl_stat']"], {}), "(rslt.statistic, r_results.loc[k, 'lbl_stat'])\n", (5461, 5507), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5516, 5579), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.null_mean', "r_results.loc[k, 'lbl_expval']"], {}), "(rslt.null_mean, r_results.loc[k, 'lbl_expval'])\n", (5531, 5579), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5588, 5651), 'numpy.testing.assert_allclose', 'assert_allclose', (['(rslt.null_sd ** 2)', "r_results.loc[k, 'lbl_var']"], {}), "(rslt.null_sd ** 2, r_results.loc[k, 'lbl_var'])\n", (5603, 5651), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5658, 5749), 'numpy.testing.assert_allclose', 'assert_allclose', (['(rslt.zscore ** 2)', "r_results.loc[k, 'lbl_chi2']"], {'rtol': '(1e-05)', 'atol': '(1e-05)'}), "(rslt.zscore ** 2, r_results.loc[k, 'lbl_chi2'], rtol=1e-05,\n atol=1e-05)\n", (5673, 5749), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5750, 5838), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.pvalue', "r_results.loc[k, 'lbl_pvalue']"], {'rtol': '(1e-05)', 'atol': '(1e-05)'}), "(rslt.pvalue, r_results.loc[k, 'lbl_pvalue'], rtol=1e-05,\n atol=1e-05)\n", (5765, 5838), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((5977, 6039), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.statistic', "r_results.loc[k, 'lbl2_stat']"], {}), "(rslt.statistic, r_results.loc[k, 'lbl2_stat'])\n", (5992, 6039), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6048, 6112), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.null_mean', "r_results.loc[k, 'lbl2_expval']"], {}), "(rslt.null_mean, r_results.loc[k, 'lbl2_expval'])\n", (6063, 6112), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6121, 6185), 'numpy.testing.assert_allclose', 'assert_allclose', (['(rslt.null_sd ** 2)', "r_results.loc[k, 'lbl2_var']"], {}), "(rslt.null_sd ** 2, r_results.loc[k, 'lbl2_var'])\n", (6136, 6185), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6192, 6256), 'numpy.testing.assert_allclose', 'assert_allclose', (['(rslt.zscore ** 2)', "r_results.loc[k, 'lbl2_chi2']"], {}), "(rslt.zscore ** 2, r_results.loc[k, 'lbl2_chi2'])\n", (6207, 6256), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6263, 6352), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.pvalue', "r_results.loc[k, 'lbl2_pvalue']"], {'rtol': '(1e-05)', 'atol': '(1e-05)'}), "(rslt.pvalue, r_results.loc[k, 'lbl2_pvalue'], rtol=1e-05,\n atol=1e-05)\n", (6278, 6352), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6766, 6812), 'statsmodels.api.stats.SquareTable', 'sm.stats.SquareTable', (['table'], {'shift_zeros': '(False)'}), '(table, shift_zeros=False)\n', (6786, 6812), True, 'import statsmodels.api as sm\n'), ((6847, 6908), 'numpy.testing.assert_allclose', 'assert_allclose', (['b.statistic', "r_results.loc[k, 'bowker_stat']"], {}), "(b.statistic, r_results.loc[k, 'bowker_stat'])\n", (6862, 6908), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6917, 6966), 'numpy.testing.assert_equal', 'assert_equal', (['b.df', "r_results.loc[k, 'bowker_df']"], {}), "(b.df, r_results.loc[k, 'bowker_df'])\n", (6929, 6966), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((6975, 7035), 'numpy.testing.assert_allclose', 'assert_allclose', (['b.pvalue', "r_results.loc[k, 'bowker_pvalue']"], {}), "(b.pvalue, r_results.loc[k, 'bowker_pvalue'])\n", (6990, 7035), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((7682, 7778), 'pandas.DataFrame', 'pd.DataFrame', (['[[1, 1, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1]]'], {}), '([[1, 1, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 0], [0, 0, 0,\n 0, 1, 1, 1, 1]])\n', (7694, 7778), True, 'import pandas as pd\n'), ((8034, 8048), 'numpy.asarray', 'np.asarray', (['df'], {}), '(df)\n', (8044, 8048), True, 'import numpy as np\n'), ((8119, 8185), 'pandas.DataFrame', 'pd.DataFrame', (['[[1, 1, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 0]]'], {}), '([[1, 1, 1, 0, 1, 0, 1, 0], [0, 1, 0, 1, 0, 1, 0, 0]])\n', (8131, 8185), True, 'import pandas as pd\n'), ((8389, 8403), 'numpy.asarray', 'np.asarray', (['df'], {}), '(df)\n', (8399, 8403), True, 'import numpy as np\n'), ((10237, 10272), 'pandas.crosstab', 'pd.crosstab', (['data[:, 0]', 'data[:, 1]'], {}), '(data[:, 0], data[:, 1])\n', (10248, 10272), True, 'import pandas as pd\n'), ((10761, 10790), 'statsmodels.stats.contingency_tables.StratifiedTable', 'ctab.StratifiedTable', (['tables1'], {}), '(tables1)\n', (10781, 10790), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((10812, 10858), 'statsmodels.stats.contingency_tables.StratifiedTable', 'ctab.StratifiedTable', (['tables'], {'shift_zeros': '(True)'}), '(tables, shift_zeros=True)\n', (10832, 10858), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((10943, 10978), 'statsmodels.stats.contingency_tables.StratifiedTable', 'ctab.StratifiedTable', (['tables_pandas'], {}), '(tables_pandas)\n', (10963, 10978), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((11026, 11123), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.rslt.oddsratio_pooled', 'self.oddsratio_pooled'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(self.rslt.oddsratio_pooled, self.oddsratio_pooled, rtol=\n 0.0001, atol=0.0001)\n', (11041, 11123), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((11184, 11276), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.rslt.logodds_pooled', 'self.logodds_pooled'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(self.rslt.logodds_pooled, self.logodds_pooled, rtol=0.0001,\n atol=0.0001)\n', (11199, 11276), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((11390, 11460), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.statistic', 'self.mh_stat'], {'rtol': '(0.0001)', 'atol': '(1e-05)'}), '(rslt.statistic, self.mh_stat, rtol=0.0001, atol=1e-05)\n', (11405, 11460), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((11466, 11536), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.pvalue', 'self.mh_pvalue'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(rslt.pvalue, self.mh_pvalue, rtol=0.0001, atol=0.0001)\n', (11481, 11536), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((11644, 11703), 'numpy.testing.assert_allclose', 'assert_allclose', (['lcb', 'self.or_lcb'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(lcb, self.or_lcb, rtol=0.0001, atol=0.0001)\n', (11659, 11703), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((11708, 11767), 'numpy.testing.assert_allclose', 'assert_allclose', (['ucb', 'self.or_ucb'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(ucb, self.or_ucb, rtol=0.0001, atol=0.0001)\n', (11723, 11767), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((12214, 12286), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.statistic', 'self.or_homog'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(rslt.statistic, self.or_homog, rtol=0.0001, atol=0.0001)\n', (12229, 12286), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((12291, 12362), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.pvalue', 'self.or_homog_p'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(rslt.pvalue, self.or_homog_p, rtol=0.0001, atol=0.0001)\n', (12306, 12362), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((12422, 12498), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.statistic', 'self.or_homog_adj'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(rslt.statistic, self.or_homog_adj, rtol=0.0001, atol=0.0001)\n', (12437, 12498), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((12503, 12578), 'numpy.testing.assert_allclose', 'assert_allclose', (['rslt.pvalue', 'self.or_homog_adj_p'], {'rtol': '(0.0001)', 'atol': '(0.0001)'}), '(rslt.pvalue, self.or_homog_adj_p, rtol=0.0001, atol=0.0001)\n', (12518, 12578), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((12865, 12884), 'numpy.random.seed', 'np.random.seed', (['(241)'], {}), '(241)\n', (12879, 12884), True, 'import numpy as np\n'), ((12979, 13007), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(100)'], {}), '(0, 2, 100)\n', (12996, 13007), True, 'import numpy as np\n'), ((13027, 13055), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(100)'], {}), '(0, 2, 100)\n', (13044, 13055), True, 'import numpy as np\n'), ((13298, 13326), 'statsmodels.stats.contingency_tables.StratifiedTable', 'ctab.StratifiedTable', (['tables'], {}), '(tables)\n', (13318, 13326), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((13343, 13398), 'statsmodels.stats.contingency_tables.StratifiedTable.from_data', 'ctab.StratifiedTable.from_data', (['"""v1"""', '"""v2"""', '"""strat"""', 'df'], {}), "('v1', 'v2', 'strat', df)\n", (13373, 13398), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((13854, 13880), 'numpy.array', 'np.array', (['[[0, 0], [6, 5]]'], {}), '([[0, 0], [6, 5]])\n', (13862, 13880), True, 'import numpy as np\n'), ((13901, 13927), 'numpy.array', 'np.array', (['[[3, 0], [3, 6]]'], {}), '([[3, 0], [3, 6]])\n', (13909, 13927), True, 'import numpy as np\n'), ((13948, 13974), 'numpy.array', 'np.array', (['[[6, 2], [0, 4]]'], {}), '([[6, 2], [0, 4]])\n', (13956, 13974), True, 'import numpy as np\n'), ((13995, 14021), 'numpy.array', 'np.array', (['[[5, 6], [1, 0]]'], {}), '([[5, 6], [1, 0]])\n', (14003, 14021), True, 'import numpy as np\n'), ((14042, 14068), 'numpy.array', 'np.array', (['[[2, 5], [0, 0]]'], {}), '([[2, 5], [0, 0]])\n', (14050, 14068), True, 'import numpy as np\n'), ((14164, 14173), 'numpy.log', 'np.log', (['(7)'], {}), '(7)\n', (14170, 14173), True, 'import numpy as np\n'), ((14797, 14827), 'numpy.array', 'np.array', (['[[20, 14], [10, 24]]'], {}), '([[20, 14], [10, 24]])\n', (14805, 14827), True, 'import numpy as np\n'), ((14848, 14877), 'numpy.array', 'np.array', (['[[15, 12], [3, 15]]'], {}), '([[15, 12], [3, 15]])\n', (14856, 14877), True, 'import numpy as np\n'), ((14898, 14924), 'numpy.array', 'np.array', (['[[3, 2], [3, 2]]'], {}), '([[3, 2], [3, 2]])\n', (14906, 14924), True, 'import numpy as np\n'), ((14945, 14972), 'numpy.array', 'np.array', (['[[12, 3], [7, 5]]'], {}), '([[12, 3], [7, 5]])\n', (14953, 14972), True, 'import numpy as np\n'), ((14993, 15019), 'numpy.array', 'np.array', (['[[1, 0], [3, 2]]'], {}), '([[1, 0], [3, 2]])\n', (15001, 15019), True, 'import numpy as np\n'), ((15163, 15177), 'numpy.log', 'np.log', (['(3.5912)'], {}), '(3.5912)\n', (15169, 15177), True, 'import numpy as np\n'), ((16104, 16136), 'numpy.array', 'np.array', (['[[313, 512], [19, 89]]'], {}), '([[313, 512], [19, 89]])\n', (16112, 16136), True, 'import numpy as np\n'), ((16157, 16188), 'numpy.array', 'np.array', (['[[207, 353], [8, 17]]'], {}), '([[207, 353], [8, 17]])\n', (16165, 16188), True, 'import numpy as np\n'), ((16209, 16243), 'numpy.array', 'np.array', (['[[205, 120], [391, 202]]'], {}), '([[205, 120], [391, 202]])\n', (16217, 16243), True, 'import numpy as np\n'), ((16264, 16298), 'numpy.array', 'np.array', (['[[278, 139], [244, 131]]'], {}), '([[278, 139], [244, 131]])\n', (16272, 16298), True, 'import numpy as np\n'), ((16319, 16351), 'numpy.array', 'np.array', (['[[138, 53], [299, 94]]'], {}), '([[138, 53], [299, 94]])\n', (16327, 16351), True, 'import numpy as np\n'), ((16372, 16404), 'numpy.array', 'np.array', (['[[351, 22], [317, 24]]'], {}), '([[351, 22], [317, 24]])\n', (16380, 16404), True, 'import numpy as np\n'), ((16507, 16523), 'numpy.log', 'np.log', (['(1.101879)'], {}), '(1.101879)\n', (16513, 16523), True, 'import numpy as np\n'), ((16985, 17009), 'statsmodels.stats.contingency_tables.Table2x2', 'ctab.Table2x2', (['cls.table'], {}), '(cls.table)\n', (16998, 17009), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((17037, 17070), 'statsmodels.stats.contingency_tables.Table2x2.from_data', 'ctab.Table2x2.from_data', (['cls.data'], {}), '(cls.data)\n', (17060, 17070), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((17110, 17165), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.tbl_obj.oddsratio', 'self.oddsratio'], {}), '(self.tbl_obj.oddsratio, self.oddsratio)\n', (17125, 17165), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((17210, 17273), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.tbl_obj.log_oddsratio', 'self.log_oddsratio'], {}), '(self.tbl_obj.log_oddsratio, self.log_oddsratio)\n', (17225, 17273), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((17321, 17390), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.tbl_obj.log_oddsratio_se', 'self.log_oddsratio_se'], {}), '(self.tbl_obj.log_oddsratio_se, self.log_oddsratio_se)\n', (17336, 17390), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((17660, 17687), 'numpy.testing.assert_allclose', 'assert_allclose', (['lcb1', 'lcb2'], {}), '(lcb1, lcb2)\n', (17675, 17687), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((17696, 17723), 'numpy.testing.assert_allclose', 'assert_allclose', (['ucb1', 'ucb2'], {}), '(ucb1, ucb2)\n', (17711, 17723), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((17764, 17819), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.tbl_obj.riskratio', 'self.riskratio'], {}), '(self.tbl_obj.riskratio, self.riskratio)\n', (17779, 17819), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((17864, 17927), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.tbl_obj.log_riskratio', 'self.log_riskratio'], {}), '(self.tbl_obj.log_riskratio, self.log_riskratio)\n', (17879, 17927), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((17975, 18044), 'numpy.testing.assert_allclose', 'assert_allclose', (['self.tbl_obj.log_riskratio_se', 'self.log_riskratio_se'], {}), '(self.tbl_obj.log_riskratio_se, self.log_riskratio_se)\n', (17990, 18044), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((18314, 18341), 'numpy.testing.assert_allclose', 'assert_allclose', (['lcb1', 'lcb2'], {}), '(lcb1, lcb2)\n', (18329, 18341), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((18350, 18377), 'numpy.testing.assert_allclose', 'assert_allclose', (['ucb1', 'ucb2'], {}), '(ucb1, ucb2)\n', (18365, 18377), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((18540, 18567), 'numpy.testing.assert_allclose', 'assert_allclose', (['lcb1', 'lcb2'], {}), '(lcb1, lcb2)\n', (18555, 18567), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((18576, 18603), 'numpy.testing.assert_allclose', 'assert_allclose', (['ucb1', 'ucb2'], {}), '(ucb1, ucb2)\n', (18591, 18603), False, 'from numpy.testing import assert_allclose, assert_equal\n'), ((18971, 18987), 'numpy.zeros', 'np.zeros', (['(8, 2)'], {}), '((8, 2))\n', (18979, 18987), True, 'import numpy as np\n'), ((19099, 19115), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (19109, 19115), True, 'import numpy as np\n'), ((19136, 19164), 'numpy.asarray', 'np.asarray', (['[[2, 2], [2, 2]]'], {}), '([[2, 2], [2, 2]])\n', (19146, 19164), True, 'import numpy as np\n'), ((19255, 19265), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19262, 19265), True, 'import numpy as np\n'), ((3302, 3334), 'statsmodels.stats.contingency_tables.Table', 'ctab.Table', (['t'], {'shift_zeros': '(False)'}), '(t, shift_zeros=False)\n', (3312, 3334), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((3362, 3393), 'statsmodels.stats.contingency_tables.Table', 'ctab.Table', (['t'], {'shift_zeros': '(True)'}), '(t, shift_zeros=True)\n', (3372, 3393), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((3657, 3670), 'statsmodels.stats.contingency_tables.Table', 'ctab.Table', (['t'], {}), '(t)\n', (3667, 3670), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((3698, 3729), 'statsmodels.stats.contingency_tables.Table', 'ctab.Table', (['t'], {'shift_zeros': '(True)'}), '(t, shift_zeros=True)\n', (3708, 3729), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((5229, 5254), 'numpy.arange', 'np.arange', (['table.shape[0]'], {}), '(table.shape[0])\n', (5238, 5254), True, 'import numpy as np\n'), ((5280, 5305), 'numpy.arange', 'np.arange', (['table.shape[1]'], {}), '(table.shape[1])\n', (5289, 5305), True, 'import numpy as np\n'), ((6552, 6569), 'statsmodels.stats.contingency_tables.Table', 'ctab.Table', (['table'], {}), '(table)\n', (6562, 6569), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((10724, 10741), 'numpy.dstack', 'np.dstack', (['tables'], {}), '(tables)\n', (10733, 10741), True, 'import numpy as np\n'), ((10884, 10899), 'pandas.DataFrame', 'pd.DataFrame', (['x'], {}), '(x)\n', (10896, 10899), True, 'import pandas as pd\n'), ((11892, 11911), 'numpy.log', 'np.log', (['self.or_lcb'], {}), '(self.or_lcb)\n', (11898, 11911), True, 'import numpy as np\n'), ((11988, 12007), 'numpy.log', 'np.log', (['self.or_ucb'], {}), '(self.or_ucb)\n', (11994, 12007), True, 'import numpy as np\n'), ((12618, 12643), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (12641, 12643), False, 'import warnings\n'), ((12657, 12704), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'RuntimeWarning'], {}), "('ignore', RuntimeWarning)\n", (12678, 12704), False, 'import warnings\n'), ((13086, 13099), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (13095, 13099), True, 'import numpy as np\n'), ((13101, 13112), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (13108, 13112), True, 'import numpy as np\n'), ((13180, 13211), 'numpy.arange', 'np.arange', (['(10 * k)', '(10 * (k + 1))'], {}), '(10 * k, 10 * (k + 1))\n', (13189, 13211), True, 'import numpy as np\n'), ((19468, 19478), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (19475, 19478), True, 'import numpy as np\n'), ((5352, 5388), 'statsmodels.stats.contingency_tables.Table', 'ctab.Table', (['table'], {'shift_zeros': '(False)'}), '(table, shift_zeros=False)\n', (5362, 5388), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((5880, 5916), 'statsmodels.stats.contingency_tables.Table', 'ctab.Table', (['table'], {'shift_zeros': '(False)'}), '(table, shift_zeros=False)\n', (5890, 5916), True, 'import statsmodels.stats.contingency_tables as ctab\n'), ((13232, 13279), 'pandas.crosstab', 'pd.crosstab', (["df.loc[ii, 'v1']", "df.loc[ii, 'v2']"], {}), "(df.loc[ii, 'v1'], df.loc[ii, 'v2'])\n", (13243, 13279), True, 'import pandas as pd\n')]
|
# -*- coding: utf-8 -*-
"""Tests for the variant_denovo_filtration workflow starting off variant_phasing.
"""
import textwrap
import pytest
import ruamel.yaml as yaml
from snakemake.io import Wildcards
from snappy_pipeline.workflows.variant_denovo_filtration import VariantDeNovoFiltrationWorkflow
from .common import get_expected_output_vcf_files_dict
from .conftest import patch_module_fs
__author__ = "<NAME> <<EMAIL>>"
@pytest.fixture(scope="module") # otherwise: performance issues
def minimal_config():
"""Return YAML parsing result for (germline) configuration"""
return yaml.round_trip_load(
textwrap.dedent(
r"""
static_data_config:
reference:
path: /path/to/ref.fa
dbsnp:
path: /path/to/dbsnp.vcf.gz
step_config:
ngs_mapping:
tools:
dna: ['bwa']
compute_coverage_bed: true
path_target_regions: /path/to/regions.bed
bwa:
path_index: /path/to/bwa/index.fa
variant_calling:
tools:
- gatk_hc
variant_denovo_filtration:
path_variant_phasing: ../variant_phasing
data_sets:
first_batch:
file: sheet.tsv
search_patterns:
- {'left': '*/*/*_R1.fastq.gz', 'right': '*/*/*_R2.fastq.gz'}
search_paths: ['/path']
type: germline_variants
naming_scheme: only_secondary_id
"""
).lstrip()
)
@pytest.fixture
def variant_de_novo_filtration_workflow(
dummy_workflow,
minimal_config,
dummy_cluster_config,
config_lookup_paths,
work_dir,
config_paths,
germline_sheet_fake_fs,
mocker,
):
"""Return VariantCallingWorkflow object pre-configured with germline sheet"""
# Patch out file-system related things in abstract (the crawling link in step is defined there)
germline_sheet_fake_fs.fs.create_file(
file_path="/path/to/ref.fa.fai",
contents="1\t249250621\t52\t60\t61\n2\t243199373\t253404903\t60\t61\n",
create_missing_dirs=True,
)
patch_module_fs("snappy_pipeline.workflows.abstract", germline_sheet_fake_fs, mocker)
patch_module_fs("snappy_pipeline.workflows.variant_calling", germline_sheet_fake_fs, mocker)
patch_module_fs("snappy_pipeline.workflows.variant_annotation", germline_sheet_fake_fs, mocker)
patch_module_fs("snappy_pipeline.workflows.variant_phasing", germline_sheet_fake_fs, mocker)
patch_module_fs(
"snappy_pipeline.workflows.variant_denovo_filtration", germline_sheet_fake_fs, mocker
)
# Update the "globals" attribute of the mock workflow (snakemake.workflow.Workflow) so we
# can obtain paths from the function as if we really had a NGSMappingPipelineStep there
dummy_workflow.globals = {
"ngs_mapping": lambda x: "NGS_MAPPING/" + x,
"variant_calling": lambda x: "VARIANT_CALLING/" + x,
"variant_annotation": lambda x: "VARIANT_ANNOTATION/" + x,
"variant_phasing": lambda x: "VARIANT_PHASING/" + x,
}
# Construct the workflow object
return VariantDeNovoFiltrationWorkflow(
dummy_workflow,
minimal_config,
dummy_cluster_config,
config_lookup_paths,
config_paths,
work_dir,
)
# Tests for FilterDeNovosStepPart ------------------------------------------------------------------
def test_filter_de_novo_from_variant_phasing_step_part_get_input_files(
variant_de_novo_filtration_workflow,
):
# Define expected
ngs_mapping_out = "NGS_MAPPING/output/bwa.P001-N1-DNA1-WGS1/out/"
bam_ped_dict = {
"bai": ngs_mapping_out + "bwa.P001-N1-DNA1-WGS1.bam.bai",
"bam": ngs_mapping_out + "bwa.P001-N1-DNA1-WGS1.bam",
"ped": "work/write_pedigree.P001-N1-DNA1-WGS1/out/P001-N1-DNA1-WGS1.ped",
}
variant_phasing_name_out = (
"VARIANT_PHASING/output/"
"bwa.gatk_hc.jannovar_annotate_vcf.gatk_pbt.gatk_rbp.P001-N1-DNA1-WGS1/out/"
"bwa.gatk_hc.jannovar_annotate_vcf.gatk_pbt.gatk_rbp.P001-N1-DNA1-WGS1"
)
vcf_dict = get_expected_output_vcf_files_dict(base_out=variant_phasing_name_out)
expected = {**bam_ped_dict, **vcf_dict}
# Get actual
wildcards = Wildcards(
fromdict={"mapper": "bwa", "caller": "gatk_hc", "index_library": "P001-N1-DNA1-WGS1"}
)
actual = variant_de_novo_filtration_workflow.get_input_files("filter_denovo", "run")(wildcards)
assert actual == expected
def test_filter_de_novo_from_variant_phasing_step_part_get_output_files(
variant_de_novo_filtration_workflow,
):
# Define expected
base_name_out = (
r"work/{mapper}.{caller}.jannovar_annotate_vcf.gatk_pbt.gatk_rbp.de_novos."
r"{index_library,[^\.]+}/out/{mapper}.{caller}.jannovar_annotate_vcf.gatk_pbt."
r"gatk_rbp.de_novos.{index_library}"
)
expected = get_expected_output_vcf_files_dict(base_out=base_name_out)
# Get actual
actual = variant_de_novo_filtration_workflow.get_output_files("filter_denovo", "run")
assert actual == expected
def test_filter_de_novo_from_variant_phasing_step_part_get_log_file(
variant_de_novo_filtration_workflow,
):
# Define expected
expected = (
r"work/{mapper}.{caller}.jannovar_annotate_vcf.gatk_pbt.gatk_rbp.de_novos."
r"{index_library,[^\.]+}/log/{mapper}.{caller}.jannovar_annotate_vcf."
r"gatk_pbt.gatk_rbp.de_novos.{index_library}.log"
)
# Get actual
actual = variant_de_novo_filtration_workflow.get_log_file("filter_denovo", "run")
assert actual == expected
def test_filter_de_novo_from_variant_phasing_step_part_update_cluster_config(
variant_de_novo_filtration_workflow, dummy_cluster_config
):
expected = {"mem", "time", "ntasks"}
actual = set(dummy_cluster_config["variant_denovo_filtration_filter_denovo_run"].keys())
assert actual == expected
# Tests for FilterDeNovosHardStepPart --------------------------------------------------------------
def test_filter_de_novo_from_variant_annotationhard_step_part_get_input_files(
variant_de_novo_filtration_workflow,
):
# Define expected
base_name_out = (
r"work/{mapper}.{caller}.jannovar_annotate_vcf.gatk_pbt.gatk_rbp.de_novos."
r"{index_library,[^\.]+}/out/{mapper}.{caller}.jannovar_annotate_vcf.gatk_pbt.gatk_rbp."
r"de_novos.{index_library}"
)
expected = {
"tbi": base_name_out + ".vcf.gz.tbi",
"vcf": base_name_out + ".vcf.gz",
}
# Get actual
actual = variant_de_novo_filtration_workflow.get_input_files("filter_denovo_hard", "run")
assert actual == expected
def test_filter_de_novo_from_variant_annotationhard_step_part_get_output_files(
variant_de_novo_filtration_workflow,
):
# Define expected
base_name_out = (
r"work/{mapper}.{caller}.jannovar_annotate_vcf.gatk_pbt.gatk_rbp.de_novos_hard."
r"{index_library,[^\.]+}/out/{mapper}.{caller}.jannovar_annotate_vcf."
r"gatk_pbt.gatk_rbp.de_novos_hard.{index_library}"
)
expected = {
"summary": base_name_out + ".summary.txt",
"summary_md5": base_name_out + ".summary.txt.md5",
"tbi": base_name_out + ".vcf.gz.tbi",
"tbi_md5": base_name_out + ".vcf.gz.tbi.md5",
"vcf": base_name_out + ".vcf.gz",
"vcf_md5": base_name_out + ".vcf.gz.md5",
}
# Get actual
actual = variant_de_novo_filtration_workflow.get_output_files("filter_denovo_hard", "run")
assert actual == expected
def test_filter_de_novo_from_variant_annotationhard_step_part_get_log_file(
variant_de_novo_filtration_workflow,
):
# Define expected
expected = (
r"work/{mapper}.{caller}.jannovar_annotate_vcf.gatk_pbt.gatk_rbp.de_novos_hard."
r"{index_library,[^\.]+}/log/{mapper}.{caller}.jannovar_annotate_vcf."
r"gatk_pbt.gatk_rbp.de_novos_hard.{index_library}.log"
)
# Get actual
actual = variant_de_novo_filtration_workflow.get_log_file("filter_denovo_hard", "run")
assert actual == expected
def test_filter_de_novo_from_variant_annotationhard_step_part_update_cluster_config(
variant_de_novo_filtration_workflow, dummy_cluster_config
):
expected = {"mem", "time", "ntasks"}
actual = set(dummy_cluster_config["variant_denovo_filtration_filter_denovo_hard_run"].keys())
assert actual == expected
# Tests for VariantDeNovoFiltrationWorkflow --------------------------------------------------------
def test_de_novo_filtration_workflow(variant_de_novo_filtration_workflow):
"""Test simple functionality of the workflow"""
# Perform the tests
#
# Check created sub steps
expected = [
"collect_msdn",
"filter_denovo",
"filter_denovo_hard",
"link_out",
"summarize_counts",
"write_pedigree",
]
assert expected == list(sorted(variant_de_novo_filtration_workflow.sub_steps.keys()))
# Check result file construction
expected = [
"output/bwa.denovo_count_summary/out/bwa.denovo_count_summary.txt",
"output/bwa.denovo_count_summary/out/bwa.denovo_count_summary.txt.md5",
"output/bwa.multisite_de_novo/out/bwa.multisite_de_novo.txt",
"output/bwa.multisite_de_novo/out/bwa.multisite_de_novo.txt.md5",
]
base_name_out = (
"output/bwa.gatk_hc.jannovar_annotate_vcf.gatk_pbt.gatk_rbp.de_novos_hard."
"P00{i}-N1-DNA1-WGS1/out/bwa.gatk_hc.jannovar_annotate_vcf.gatk_pbt.gatk_rbp.de_novos_hard."
"P00{i}-N1-DNA1-WGS1{ext}"
)
expected += [
base_name_out.format(i=i, ext=ext)
for i in (1, 4) # only for indices
for ext in (
".summary.txt",
".summary.txt.md5",
".vcf.gz",
".vcf.gz.md5",
".vcf.gz.tbi",
".vcf.gz.tbi.md5",
)
]
expected = list(sorted(expected))
actual = list(sorted(variant_de_novo_filtration_workflow.get_result_files()))
assert actual == expected
|
[
"snappy_pipeline.workflows.variant_denovo_filtration.VariantDeNovoFiltrationWorkflow",
"textwrap.dedent",
"pytest.fixture",
"snakemake.io.Wildcards"
] |
[((431, 461), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (445, 461), False, 'import pytest\n'), ((3162, 3296), 'snappy_pipeline.workflows.variant_denovo_filtration.VariantDeNovoFiltrationWorkflow', 'VariantDeNovoFiltrationWorkflow', (['dummy_workflow', 'minimal_config', 'dummy_cluster_config', 'config_lookup_paths', 'config_paths', 'work_dir'], {}), '(dummy_workflow, minimal_config,\n dummy_cluster_config, config_lookup_paths, config_paths, work_dir)\n', (3193, 3296), False, 'from snappy_pipeline.workflows.variant_denovo_filtration import VariantDeNovoFiltrationWorkflow\n'), ((4298, 4398), 'snakemake.io.Wildcards', 'Wildcards', ([], {'fromdict': "{'mapper': 'bwa', 'caller': 'gatk_hc', 'index_library': 'P001-N1-DNA1-WGS1'}"}), "(fromdict={'mapper': 'bwa', 'caller': 'gatk_hc', 'index_library':\n 'P001-N1-DNA1-WGS1'})\n", (4307, 4398), False, 'from snakemake.io import Wildcards\n'), ((624, 1505), 'textwrap.dedent', 'textwrap.dedent', (['"""\n static_data_config:\n reference:\n path: /path/to/ref.fa\n dbsnp:\n path: /path/to/dbsnp.vcf.gz\n\n step_config:\n ngs_mapping:\n tools:\n dna: [\'bwa\']\n compute_coverage_bed: true\n path_target_regions: /path/to/regions.bed\n bwa:\n path_index: /path/to/bwa/index.fa\n\n variant_calling:\n tools:\n - gatk_hc\n variant_denovo_filtration:\n path_variant_phasing: ../variant_phasing\n\n data_sets:\n first_batch:\n file: sheet.tsv\n search_patterns:\n - {\'left\': \'*/*/*_R1.fastq.gz\', \'right\': \'*/*/*_R2.fastq.gz\'}\n search_paths: [\'/path\']\n type: germline_variants\n naming_scheme: only_secondary_id\n """'], {}), '(\n """\n static_data_config:\n reference:\n path: /path/to/ref.fa\n dbsnp:\n path: /path/to/dbsnp.vcf.gz\n\n step_config:\n ngs_mapping:\n tools:\n dna: [\'bwa\']\n compute_coverage_bed: true\n path_target_regions: /path/to/regions.bed\n bwa:\n path_index: /path/to/bwa/index.fa\n\n variant_calling:\n tools:\n - gatk_hc\n variant_denovo_filtration:\n path_variant_phasing: ../variant_phasing\n\n data_sets:\n first_batch:\n file: sheet.tsv\n search_patterns:\n - {\'left\': \'*/*/*_R1.fastq.gz\', \'right\': \'*/*/*_R2.fastq.gz\'}\n search_paths: [\'/path\']\n type: germline_variants\n naming_scheme: only_secondary_id\n """\n )\n', (639, 1505), False, 'import textwrap\n')]
|
#!/pxrpythonsubst
#
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
from pxr import Tf
import unittest
class TestTfPyStaticTokens(unittest.TestCase):
def test_PyStaticTokens(self):
testTokens = (
('orange', 'orange'),
('pear', "d'Anjou"),
('Fuji', 'Fuji'),
('Pippin', 'Pippin'),
('McIntosh', 'McIntosh'),
)
for scope in (Tf._testStaticTokens, Tf._TestStaticTokens):
for attrName,expectedValue in testTokens:
self.assertTrue(hasattr(scope, attrName))
value = getattr(scope, attrName)
self.assertEqual(value, expectedValue,
"Unexpected value for {0}: got '{1}', expected '{2}'".format(
attrName, value, expectedValue))
# Not wrapping arrays yet, just the array elements.
self.assertFalse(hasattr(scope, 'apple'))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main"
] |
[((1977, 1992), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1990, 1992), False, 'import unittest\n')]
|
import os
from dotenv import load_dotenv
import asyncio
from time import sleep
load_dotenv()
DICORD_BOT_NAME = os.getenv('DICORD_BOT_NAME')
async def wait_for_msg(testing_bot, channel, content):
sleep(0.6)
try:
return await testing_bot.wait_for('message', timeout=2, check=lambda x: x.guild.id == channel.guild.id and x.author.name == DICORD_BOT_NAME and content in x.content)
except asyncio.TimeoutError:
messages = await channel.history(limit=1).flatten()
if not (len(messages) != 0 and content in messages[0].content):
print(f'Message content {content} not found')
raise Exception()
return messages[0]
async def wait_for_channel_create(testing_bot, guild_id, name):
try:
return await testing_bot.wait_for('guild_channel_create', timeout=2, check=lambda x: x.guild.id == guild_id and x.name == name)
except asyncio.TimeoutError:
new_channel = next((ch for ch in testing_bot.get_guild(guild_id).text_channels if ch.name == name), None)
if new_channel is None:
print(f'Channel {name} not found')
raise Exception()
return new_channel
|
[
"dotenv.load_dotenv",
"os.getenv",
"time.sleep"
] |
[((80, 93), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (91, 93), False, 'from dotenv import load_dotenv\n'), ((112, 140), 'os.getenv', 'os.getenv', (['"""DICORD_BOT_NAME"""'], {}), "('DICORD_BOT_NAME')\n", (121, 140), False, 'import os\n'), ((201, 211), 'time.sleep', 'sleep', (['(0.6)'], {}), '(0.6)\n', (206, 211), False, 'from time import sleep\n')]
|
from src.python.preprocess2 import *
from src.python.geneontology import *
from src.python.baselines import *
from src.python.digo_utils import *
from tqdm import tqdm
import itertools
def get_metric(metric):
cache = dict()
def do_metric(seq1, seq2):
key1 = (seq1.uid, seq2.uid)
key2 = (seq2.uid, seq1.uid)
if key1 in cache:
val = cache[key1]
elif key2 in cache:
val = cache[key2]
else:
val = metric(seq1, seq2)
cache[key1] = val
cache[key2] = val
return val
return do_metric
def get_f(metric, agg):
return lambda seq, sequences: agg([metric(seq, other) for other in sequences])
def is_seq_in_node(seq, node, use_prior=True): # assuming f computed for seq and f_dist_in/out computed for node
f_seq_node = seq.f[node] # precompute f(seq, node)
prior = node.prior if use_prior else 1.0
prob_f_given_node = node.f_dist_in(f_seq_node)
prior_prob_f_node = node.f_dist_out(f_seq_node)
return prior * (prob_f_given_node / prior_prob_f_node)
def compute_f_inside(f, seq, node, submitted, pbar):
if not node.is_leaf():
for child in node.children:
if seq not in child.sequences:
continue
task = compute_f_inside(f, seq, child, submitted, pbar)
if verbose: print("wait %s" % child)
assert child in submitted
val = task.result()
seq.f[child] = val # wait for children's results
if verbose: print("finished %s f_val=%.2f" % (child, val))
try:
task = submitted[node]
except KeyError:
assert seq in node.sequences
task = E.submit(f, seq, node.sequences - {seq})
assert node not in submitted
submitted[node] = task
if pbar: pbar.update(1)
return task
def compute_leaf_datasets(metric, leaves):
tasks = {}
for leaf in leaves:
assert leaf.is_leaf()
s_in = min(30, leaf.size)
sample_in = np.random.choice(list(leaf.sequences), s_in, replace=False)
pairs = set(itertools.combinations(sample_in, 2))
task = E.submit(run_metric_on_pairs, metric, pairs, verbose=False)
tasks[leaf] = task
assert len(tasks) == len(leaves)
pbar = tqdm(range(len(leaves)), desc="leaves processed")
for node, task in tasks.items():
node.dataset = task.result()
assert len(node.dataset) > 0
pbar.update(1)
pbar.close()
def compute_f_outside(f, seq, node, submitted, pbar):
if not node.is_leaf():
for child in node.children:
task = compute_f_outside(f, seq, child, submitted, pbar)
if verbose: print("wait %s" % child)
assert child in submitted
val = task.result()
seq.f[child] = val # wait for children's results
if verbose: print("finished %s f_val=%.2f" % (child, val))
try:
task = submitted[node]
except KeyError:
task = E.submit(f, seq, node.sequences - {seq})
assert node not in submitted
submitted[node] = task
if pbar: pbar.update(1)
return task
def predict_seq(graph, seq, f):
predictions = {}
run_compute_f(f, [seq], graph, compute_f_outside)
for node in graph:
score = is_seq_in_node(seq, node)
predictions[node.go] = score
return predictions
def estimate_distributions(graph, precomputed_sequences, attr_name):
for node in graph:
dataset = [seq.f[node] for seq in precomputed_sequences]
setattr(graph, attr_name, get_distribution(dataset))
def get_blast(blast, evalue):
def do_blast(seq1, seq2):
hits = blast.get_hits(seq1, seq2)
if len(hits) > 0:
hit = hits[np.argmin([h.evalue for h in hits])]
return hit.bitscore
else:
hit = blast.blastp(seq1, seq2, evalue=evalue)
return hit.bitscore
return do_blast
def cleanup():
files = os.listdir(tmp_dir)
for file in files:
if file.endswith(".seq") or file.endswith(".out"):
os.remove(os.path.join(tmp_dir, file))
def run_compute_f(f, seqs, g, method): # method in [compute_f_outside, compute_f_inside]
# pbar = tqdm(range(len(seqs)), desc="sequences processed")
for i, seq in enumerate(seqs):
pbar = tqdm(range(len(g)), desc="[%s] (%d/%d) nodes processed" % (seq.uid, i + 1, len(seqs)))
root_task = method(f, seq, g.root, {}, pbar=pbar)
root_task.result() # wait for all other tasks to finish
# pbar.update(1)
pbar.close()
# pbar.close()
def propagate(leaf, include_root=False):
Q = [leaf]
visited = {leaf}
while Q:
node = Q.pop()
for father in node.fathers:
if not include_root and father.is_root():
continue
if father not in visited:
visited.add(father)
Q.append(father)
return visited
def get_leaves(node_set):
leaf_set = set()
prop_set = set()
for node in node_set:
prop_set |= propagate(node)
for node in node_set:
if node.is_leaf():
leaf_set.add(node)
else:
children = set(node.children)
if len(prop_set & children) == 0:
leaf_set.add(node)
return leaf_set
def predict_ks_leaves(seq, metric, nodes, pbar):
tasks = {}
preds = {}
for node in nodes:
s_in = min(30, node.size)
sample = np.random.choice(list(node.sequences), s_in, replace=False)
pairs = [(seq, sequence) for sequence in sample]
task = E.submit(run_metric_on_pairs, metric, pairs, verbose=False)
tasks[node] = task
for node, task in tasks.items():
seq_dataset = task.result()
assert len(seq_dataset) > 0
_, alpha = ks_2samp(seq_dataset, node.dataset)
preds[node] = alpha
pbar.update(1)
return preds
def preds_by_attr(hits_per_uid, attr, nb=None):
preds = {}
pbar = tqdm(range(len(hits_per_uid)), desc="sequences processed")
for uid, hits in hits_per_uid.items():
pbar.update(1)
preds[uid] = {}
if len(hits) == 0:
continue
for go, hits in hits.items():
assert go != graph.root.go
hs = [getattr(h, attr) for h in hits if h.evalue < 0.001]
if len(hs) == 0:
continue
if nb:
preds[uid][go] = nb.infer(max(hs), graph[go].prior)
else:
preds[uid][go] = max(hs)
pbar.close()
return preds
def propagate_leaf_predictions(leaf_predictions, choose_max_prob=False):
node2probs = {}
predictions = {}
for leaf, prob in leaf_predictions.items():
ancestors = propagate(leaf)
for node in ancestors:
if node in node2probs:
node2probs[node].append(prob)
else:
node2probs[node] = [prob]
for node, probs in node2probs.items():
if choose_max_prob:
predictions[node.go] = max(probs)
else:
predictions[node.go] = 1 - np.prod([1 - pr for pr in probs])
return predictions
if __name__ == "__main__":
cleanup()
from pymongo import MongoClient
client = MongoClient('mongodb://localhost:27017/')
db = client['prot2vec']
asp = 'F' # molecular function
onto = get_ontology(asp)
t0 = datetime.datetime(2014, 1, 1, 0, 0)
t1 = datetime.datetime(2014, 9, 1, 0, 0)
# t0 = datetime.datetime(2017, 1, 1, 0, 0)
# t1 = datetime.datetime.utcnow()
print("Indexing Data...")
trn_stream, tst_stream = get_training_and_validation_streams(db, t0, t1, asp)
print("Loading Training Data...")
uid2seq_trn, uid2go_trn, go2uid_trn = trn_stream.to_dictionaries(propagate=True)
print("Loading Validation Data...")
uid2seq_tst, uid2go_tst, _ = tst_stream.to_dictionaries(propagate=True)
print("Building Graph...")
graph = Graph(onto, uid2seq_trn, go2uid_trn)
print("Graph contains %d nodes" % len(graph))
print("Pruning Graph...")
deleted_nodes = graph.prune(3)
print("Pruned %d, Graph contains %d" % (len(deleted_nodes), len(graph)))
save_object(graph, "Data/digo_%s_graph" % asp)
blast_client = BLAST(db.blast)
blast_metric = get_metric(get_blast(blast_client, evalue=10e6))
f = get_f(blast_metric, agg=np.max)
# print("Computing f \"Outside\"")
# nature_sequences = load_nature_repr_set(db)
# sample_of_nature = blast_client.sort_by_count(nature_sequences)[:1000]
# blast_client.load_precomputed(sample_of_nature)
# run_compute_f(f, sample_of_nature, graph, compute_f_outside)
# graph.estimate_distributions(sample_of_nature, "_f_dist_out")
# save_object(graph, "Data/digo-%s-graph")
# cleanup()
# print("Computing f \"Inside\"")
# pth = "Data/digo_%s_sample_of_inside" % asp
# if os.path.exists(pth):
# sample_of_inside = load_object(pth)
# else:
# sample_of_inside = graph.sample(max_add_to_sample=2)
# save_object(sample_of_inside, pth)
# blast_client.load_precomputed(sample_of_inside)
# run_compute_f(f, sample_of_inside, graph, compute_f_inside)
# graph.estimate_distributions(sample_of_inside, "_f_dist_in")
# save_object(graph, "Data/digo_%s_graph")
# cleanup()
# print("123 Predict...")
# seq_predictions = {}
# targets = [Seq(uid, seq) for uid, seq in uid2seq_tst.items()]
# # blast_client.load_precomputed(targets)
# for tgt in targets:
# seq_predictions[tgt.uid] = graph.predict_seq(tgt, f)
# save_object(seq_predictions, "Data/digo_%s_preds_with_prior" % asp)
limit = None
evalue = 0.001
print("Running BLAST evalue=%s..." % evalue)
tgt2predictions = {}
db_pth = prepare_blast(uid2seq_trn)
targets = [Seq(uid, seq) for uid, seq in uid2seq_tst.items()][:limit]
hits_per_uid = predict_blast_parallel(targets, uid2go_trn, db_pth, evalue)
predictions_pindent = preds_by_attr(hits_per_uid, "pident")
save_object(hits_per_uid, "%s/blast_%s_%s_hsp" % (out_dir, evalue, GoAspect(asp)))
print("Computing K-S datasets")
leaves = graph.leaves
compute_leaf_datasets(blast_metric, leaves)
save_object(graph, "Data/digo_%s_graph" % asp)
print("123 Predict K-S...")
for i, tgt in enumerate(targets):
hits = hits_per_uid[tgt.uid]
leaves_in = set()
leaves_out = set()
for go in hits:
try:
if graph[go].is_leaf():
leaves_in.add(graph[go])
except KeyError:
leaves_out.add(go)
msg = "[%d/%d] (%s) leaves processed" % (i, len(targets), tgt.uid)
pbar = tqdm(range(len(leaves_in)), desc=msg)
leaf_predictions = predict_ks_leaves(tgt, blast_metric, leaves_in, pbar)
predictions = propagate_leaf_predictions(leaf_predictions)
for go, pident in predictions_pindent[tgt.uid].items():
prob = pident / 100
if go in predictions:
continue
predictions[go] = prob
tgt2predictions[tgt.uid] = predictions
ths, _, _, f1s = performance({tgt.uid: predictions}, {tgt.uid: uid2go_tst[tgt.uid]})
j = np.argmin(f1s)
msg = "[%d/%d] (%s) F_max=%.2f @ tau=%.2f" % (i, len(targets), tgt.uid, f1s[j], ths[j])
pbar.set_description(msg)
pbar.close()
save_object(tgt2predictions, "Data/digo_%s_preds_%s_ks" % (asp, evalue))
# print("456 Add BLAST...")
# for tgt in targets:
# predictions = tgt2predictions[tgt.uid]
# for go, pident in predictions_pindent[tgt.uid].items():
# prob = pident / 100
# if go in predictions:
# predictions[go] = 1 - (1 - prob) * (1 - predictions[go])
# else:
# predictions[go] = prob
# tgt2predictions[tgt.uid] = predictions
# save_object(tgt2predictions, "Data/digo_%s_preds_%s_ks_blast" % (asp, evalue))
|
[
"pymongo.MongoClient",
"itertools.combinations"
] |
[((7376, 7417), 'pymongo.MongoClient', 'MongoClient', (['"""mongodb://localhost:27017/"""'], {}), "('mongodb://localhost:27017/')\n", (7387, 7417), False, 'from pymongo import MongoClient\n'), ((2121, 2157), 'itertools.combinations', 'itertools.combinations', (['sample_in', '(2)'], {}), '(sample_in, 2)\n', (2143, 2157), False, 'import itertools\n')]
|
import numpy as np
#add 1 to each array element
np.add(arr, 1)
#subtract 2 from each array element
np.subtract(arr, 2)
#multiply each array element by 3
np.multiply(arr, 3)
#divide each array element by 4 (returns np.nan for division by zero)
np.divide(arr, 4)
#raise each array element to 5th power
np.power(arr, 5)
|
[
"numpy.divide",
"numpy.multiply",
"numpy.subtract",
"numpy.power",
"numpy.add"
] |
[((49, 63), 'numpy.add', 'np.add', (['arr', '(1)'], {}), '(arr, 1)\n', (55, 63), True, 'import numpy as np\n'), ((100, 119), 'numpy.subtract', 'np.subtract', (['arr', '(2)'], {}), '(arr, 2)\n', (111, 119), True, 'import numpy as np\n'), ((154, 173), 'numpy.multiply', 'np.multiply', (['arr', '(3)'], {}), '(arr, 3)\n', (165, 173), True, 'import numpy as np\n'), ((244, 261), 'numpy.divide', 'np.divide', (['arr', '(4)'], {}), '(arr, 4)\n', (253, 261), True, 'import numpy as np\n'), ((301, 317), 'numpy.power', 'np.power', (['arr', '(5)'], {}), '(arr, 5)\n', (309, 317), True, 'import numpy as np\n')]
|
from django.db import models
from ..models import SafeDeleteModel
from .testcase import SafeDeleteTestCase
class InvisibleModel(SafeDeleteModel):
# SafeDeleteModel subclasses automatically have their visibility set to invisible.
name = models.CharField(
max_length=100
)
class VisibilityTestCase(SafeDeleteTestCase):
def setUp(self):
self.instance = InvisibleModel.objects.create(
name='instance'
)
def test_visible_by_pk(self):
"""Test whether the soft deleted model cannot be found by filtering on pk."""
self.assertSoftDelete(self.instance, save=False)
self.assertEqual(
InvisibleModel.objects.filter(
pk=self.instance.pk
).count(),
0
)
self.assertRaises(
InvisibleModel.DoesNotExist,
InvisibleModel.objects.get,
pk=self.instance.pk
)
def test_invisible_by_name(self):
"""Test whether the soft deleted model cannot be found by filtering on name."""
self.assertSoftDelete(self.instance, save=False)
self.assertEqual(
InvisibleModel.objects.filter(
name=self.instance.name
).count(),
0
)
self.assertRaises(
InvisibleModel.DoesNotExist,
InvisibleModel.objects.get,
name=self.instance.name
)
|
[
"django.db.models.CharField"
] |
[((248, 280), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (264, 280), False, 'from django.db import models\n')]
|
# -*- coding: utf-8 -*-
"""Main module"""
import click
def start():
click.echo('Starting')
def stop():
click.echo('Stopping')
|
[
"click.echo"
] |
[((75, 97), 'click.echo', 'click.echo', (['"""Starting"""'], {}), "('Starting')\n", (85, 97), False, 'import click\n'), ((116, 138), 'click.echo', 'click.echo', (['"""Stopping"""'], {}), "('Stopping')\n", (126, 138), False, 'import click\n')]
|
# -*- coding: utf-8 -*-
import math
import random
import re
from chaoslib.exceptions import ActivityFailed
from chaoslib.types import Secrets
from kubernetes import client
from logzero import logger
from chaosk8s import create_k8s_api_client
__all__ = ["terminate_pods"]
def terminate_pods(label_selector: str = None, name_pattern: str = None,
all: bool = False, rand: bool = False,
mode: str = "fixed", qty: int = 1,
ns: str = "default", secrets: Secrets = None):
"""
Terminate a pod gracefully. Select the appropriate pods by label and/or
name patterns. Whenever a pattern is provided for the name, all pods
retrieved will be filtered out if their name do not match the given
pattern.
If neither `label_selector` nor `name_pattern` are provided, all pods
in the namespace will be selected for termination.
If `all` is set to `True`, all matching pods will be terminated.
Value of `qty` varies based on `mode`.
If `mode` is set to `fixed`, then `qty` refers to number of pods to be
terminated. If `mode` is set to `percentage`, then `qty` refers to
percentage of pods, from 1 to 100, to be terminated.
Default `mode` is `fixed` and default `qty` is `1`.
If `rand` is set to `True`, n random pods will be terminated
Otherwise, the first retrieved n pods will be terminated.
"""
# Fail when quantity is less than 0
if qty < 0:
raise ActivityFailed(
"Cannot terminate pods. Quantity '{q}' is negative.".format(q=qty))
# Fail when mode is not `fixed` or `percentage`
if mode not in ['fixed', 'percentage']:
raise ActivityFailed(
"Cannot terminate pods. Mode '{m}' is invalid.".format(m=mode))
api = create_k8s_api_client(secrets)
v1 = client.CoreV1Api(api)
ret = v1.list_namespaced_pod(ns, label_selector=label_selector)
logger.debug("Found {d} pods labelled '{s}'".format(
d=len(ret.items), s=label_selector))
pods = []
if name_pattern:
pattern = re.compile(name_pattern)
for p in ret.items:
if pattern.match(p.metadata.name):
pods.append(p)
logger.debug("Pod '{p}' match pattern".format(
p=p.metadata.name))
else:
pods = ret.items
if not all:
if mode == 'percentage':
qty = math.ceil((qty * len(pods)) / 100)
# If quantity is greater than number of pods present, cap the
# quantity to maximum number of pods
qty = min(qty, len(pods))
if rand:
pods = random.sample(pods, qty)
else:
pods = pods[:qty]
logger.debug("Picked pods '{p}' to be terminated".format(
p=",".join([po.metadata.name for po in pods])))
body = client.V1DeleteOptions()
for p in pods:
res = v1.delete_namespaced_pod(
p.metadata.name, ns, body)
|
[
"kubernetes.client.V1DeleteOptions",
"chaosk8s.create_k8s_api_client",
"random.sample",
"kubernetes.client.CoreV1Api",
"re.compile"
] |
[((1786, 1816), 'chaosk8s.create_k8s_api_client', 'create_k8s_api_client', (['secrets'], {}), '(secrets)\n', (1807, 1816), False, 'from chaosk8s import create_k8s_api_client\n'), ((1827, 1848), 'kubernetes.client.CoreV1Api', 'client.CoreV1Api', (['api'], {}), '(api)\n', (1843, 1848), False, 'from kubernetes import client\n'), ((2832, 2856), 'kubernetes.client.V1DeleteOptions', 'client.V1DeleteOptions', ([], {}), '()\n', (2854, 2856), False, 'from kubernetes import client\n'), ((2074, 2098), 're.compile', 're.compile', (['name_pattern'], {}), '(name_pattern)\n', (2084, 2098), False, 'import re\n'), ((2632, 2656), 'random.sample', 'random.sample', (['pods', 'qty'], {}), '(pods, qty)\n', (2645, 2656), False, 'import random\n')]
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Abstract based class for cclib methods."""
import logging
import sys
class Method(object):
"""Abstract base class for all cclib method classes.
Subclasses defined by cclib:
CDA - charde decomposition analysis
CSPA - C-squared population analysis
Density - density matrix calculation
FragmentAnalysis - fragment analysis for ADF output
LPA - Löwdin population analysis
MBO - Mayer's bond orders
MPA - Mulliken population analysis
Nuclear - properties of atomic nuclei
OPA - overlap population analysis
Population - base class for population analyses
Volume - volume/grid calculations
All the modules containing methods should be importable:
>>> import cda, cspa, density, fragments, lpa, mbo, mpa, nuclear, opa, population, volume
"""
def __init__(self, data, progress=None, loglevel=logging.INFO, logname="Log"):
"""Initialise the Logfile object.
This constructor is typically called by the constructor of a subclass.
"""
self.data = data
self.progress = progress
self.loglevel = loglevel
self.logname = logname
self.logger = logging.getLogger('%s %s' % (self.logname, self.data))
self.logger.setLevel(self.loglevel)
self.logformat = "[%(name)s %(levelname)s] %(message)s"
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(self.logformat))
self.logger.addHandler(handler)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=False)
|
[
"logging.Formatter",
"logging.StreamHandler",
"logging.getLogger",
"doctest.testmod"
] |
[((1837, 1867), 'doctest.testmod', 'doctest.testmod', ([], {'verbose': '(False)'}), '(verbose=False)\n', (1852, 1867), False, 'import doctest\n'), ((1456, 1510), 'logging.getLogger', 'logging.getLogger', (["('%s %s' % (self.logname, self.data))"], {}), "('%s %s' % (self.logname, self.data))\n", (1473, 1510), False, 'import logging\n'), ((1640, 1673), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (1661, 1673), False, 'import logging\n'), ((1704, 1737), 'logging.Formatter', 'logging.Formatter', (['self.logformat'], {}), '(self.logformat)\n', (1721, 1737), False, 'import logging\n')]
|
# lint as: python3
# Copyright 2021 The Ivy Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License..
# ==============================================================================
import setuptools
from pathlib import Path
from distutils.core import setup
def _strip(line):
return line.split(' ')[0].split('#')[0].split(',')[0]
def _replace_logos_html(txt):
# html-containing chunks
chunks = txt.split('.. raw:: html')
# backend logos
backends_chunk = chunks[2]
bc = backends_chunk.split('\n\n')
img_str = '.. image:: https://github.com/unifyai/unifyai.github.io/blob/master/img/externally_linked/logos/supported/frameworks.png?raw=true\n' \
' :width: 100%'
backends_chunk = '\n\n'.join(bc[0:1] + [img_str] + bc[2:])
# library logos
libraries_chunk = chunks[3]
lc = libraries_chunk.split('\n\n')
img_str = '.. image:: https://github.com/unifyai/unifyai.github.io/blob/master/img/externally_linked/ivy_libraries.png?raw=true\n' \
' :width: 100%'
libraries_chunk = '\n\n'.join(lc[0:1] + [img_str] + lc[2:])
# re-join
chunks[3] = libraries_chunk
return ''.join(
['.. raw:: html'.join(chunks[0:2]), backends_chunk, '.. raw:: html'.join(chunks[3:])])
def _replace_gif(gif_chunk):
png_url = 'https://{}.png'.format(gif_chunk.split(".gif?raw=true'>")[0].split('https://')[-1])
gc = gif_chunk.split('\n\n')
img_str = '.. image:: {}?raw=true\n' \
' :width: 100%'.format(png_url)
return '\n\n'.join(gc[0:1] + [img_str] + gc[2:])
def _replace_gifs_html(txt):
# html-containing chunks
chunks = txt.split('.. raw:: html')
# go through each chunk, replacing all html gifs with rst images
return_str = ''
for i, chunk in enumerate(chunks):
new_chunk = chunk
delimiter = '.. raw:: html'
if ".gif?raw=true'>" in chunk:
new_chunk = _replace_gif(chunk)
delimiter = ''
if i == 0:
return_str = chunk
else:
return_str = delimiter.join([return_str, new_chunk])
return return_str
def _is_html(line):
line_squashed = line.replace(' ', '')
if not line_squashed:
return False
if line_squashed[0] == '<' and line_squashed[-1] == '>':
return True
return False
def _is_raw_block(line):
line_squashed = line.replace(' ', '')
if len(line_squashed) < 11:
return False
if line_squashed[-11:] == '..raw::html':
return True
return False
this_directory = Path(__file__).parent
text = (this_directory / "README.rst").read_text()
text = _replace_logos_html(text).replace('. Click on the icons below to learn more!', '!')
text = _replace_gifs_html(text)
lines = text.split('\n')
lines = [line for line in lines if not (_is_html(line) or _is_raw_block(line))]
long_description = '\n'.join(lines)
setup(name='ivy-memory',
version='1.1.9',
author='<NAME>',
author_email='<EMAIL>',
description='End-to-end memory modules for machine learning developers, written in Ivy.',
long_description=long_description,
long_description_content_type='text/x-rst',
url='https://lets-unify.ai/memory',
project_urls={
'Docs': 'https://lets-unify.ai/memory/',
'Source': 'https://github.com/unifyai/memory',
},
packages=setuptools.find_packages(),
install_requires=[_strip(line) for line in open('requirements.txt', 'r')],
classifiers=['License :: OSI Approved :: Apache Software License'],
license='Apache 2.0'
)
|
[
"pathlib.Path",
"setuptools.find_packages"
] |
[((3067, 3081), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (3071, 3081), False, 'from pathlib import Path\n'), ((3893, 3919), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (3917, 3919), False, 'import setuptools\n')]
|
"""
Shanghai Metro database definitions
"""
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy import Column, Integer, Text, Float
ShmDbModel = declarative_base()
# Section for lines
class Line(ShmDbModel):
"""Shanghai Metro lines
"""
__tablename__ = 'lines'
id = Column(Integer, primary_key=True)
name = Column(Text)
codename = Column(Text)
def __repr__(self):
# TODO
pass
# Section for trains
class TrainType(ShmDbModel):
"""Train type table: Category
"""
__tablename__ = "train_types"
id = Column(Integer, primary_key=True)
name = Column(Text)
maker = Column(Text)
display_name = Column(Text)
note = Column(Text)
def __repr__(self):
return "<Train type (name = {:s}, maker = {:s})>".format(self.name, self.maker)
class Train(ShmDbModel):
"""Train table: Object
Train means a trainset with multiple identified by a SN. Trainsets are rarely unlinked.
Unlink/carriage reorganization may happen for special reasons/events, such as expanding from 6-carriage to
8-carriage trains of line 01/02, replacing destroyed carriages of train 0117
Each train are loosely bound to a line. But 03A02/04A02 are occasionally sharing their trains.
"""
__tablename__ = "trains"
id = Column(Integer, primary_key=True)
sn = Column(Text)
# Trains may not operate on its line, such as shared trains for line 3/4
line_id = Column(Integer, ForeignKey("lines.id"))
train_type_id = Column(Integer, ForeignKey("train_types.id"))
# 0 for active; 1 for updated (such as adding carriages); 2 for retired
# New train instances are added for updated trains. For example, for 8-carriage 02074,
# the original 4-carriage is kept as 02074-01
status = Column(Integer)
line = relationship("Line", backref='trains')
train_type = relationship("TrainType", backref="trains")
def __repr__(self):
return "<Train (sn = {:s}, type = {:s})>".format(self.sn, self.train_type.name)
class Station(ShmDbModel):
"""Station table: Object
Station means one station on one line.
Stations with the same name are exchange stations.
One exception is, 浦电路 on line 06 and line 04, which are not exchange stations
"""
__tablename__ = "stations"
id = Column(Integer, primary_key=True)
sn = Column(Text)
chn_name = Column(Text)
eng_name = Column(Text)
line_id = Column(Integer, ForeignKey("lines.id"))
distance = Column(Text)
line = relationship("Line", backref="stations")
class Task(ShmDbModel):
"""Task table: Category
Task is a loose string describing the purpose of several routes
"""
__tablename__ = "tasks"
id = Column(Integer, primary_key=True)
task = Column(Text)
class Route(ShmDbModel):
"""Route table: Event
Route means the event from one station to another by one trainset
"""
__tablename__ = "routes"
id = Column(Integer, primary_key=True)
task_id = Column(Integer, ForeignKey("tasks.id"))
seq_retire = Column(Integer)
train_id = Column(Integer, ForeignKey("trains.id"))
departure_id = Column(Integer, ForeignKey("departures.id"))
arrival_id = Column(Integer, ForeignKey("arrivals.id"))
note = Column(Text)
train = relationship("Train", backref="routes")
task = relationship("Task", backref="routes")
class Departure(ShmDbModel):
"""Route departure table: Action
Departure means leaving a station at a specific time
"""
__tablename__ = "departures"
id = Column(Integer, primary_key=True)
# Route id should be deprecated
# route_id = Column(Integer, ForeignKey("routes.id"))
station_id = Column(Integer, ForeignKey("stations.id"))
time = Column(Text)
date_retire = Column(Text)
time_retire = Column(Text)
route = relationship("Route", uselist=False, backref="departure")
station = relationship("Station", backref="departures")
class Arrival(ShmDbModel):
"""Route arrival table: Action
Arrival means reaching a station at a specific time
"""
__tablename__ = "arrivals"
id = Column(Integer, primary_key=True)
# Route id should be deprecated
# route_id = Column(Integer, ForeignKey("routes.id"))
station_id = Column(Integer, ForeignKey("stations.id"))
time = Column(Text)
time_retire = Column(Text)
route = relationship("Route", uselist=False, backref="arrival")
station = relationship("Station", backref="arrivals")
|
[
"sqlalchemy.ext.declarative.declarative_base",
"sqlalchemy.ForeignKey",
"sqlalchemy.orm.relationship",
"sqlalchemy.Column"
] |
[((242, 260), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (258, 260), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((380, 413), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (386, 413), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((425, 437), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (431, 437), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((453, 465), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (459, 465), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((656, 689), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (662, 689), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((701, 713), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (707, 713), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((726, 738), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (732, 738), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((758, 770), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (764, 770), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((782, 794), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (788, 794), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((1396, 1429), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (1402, 1429), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((1439, 1451), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (1445, 1451), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((1881, 1896), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (1887, 1896), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((1908, 1946), 'sqlalchemy.orm.relationship', 'relationship', (['"""Line"""'], {'backref': '"""trains"""'}), "('Line', backref='trains')\n", (1920, 1946), False, 'from sqlalchemy.orm import relationship\n'), ((1964, 2007), 'sqlalchemy.orm.relationship', 'relationship', (['"""TrainType"""'], {'backref': '"""trains"""'}), "('TrainType', backref='trains')\n", (1976, 2007), False, 'from sqlalchemy.orm import relationship\n'), ((2407, 2440), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (2413, 2440), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((2450, 2462), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (2456, 2462), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((2478, 2490), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (2484, 2490), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((2506, 2518), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (2512, 2518), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((2588, 2600), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (2594, 2600), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((2612, 2652), 'sqlalchemy.orm.relationship', 'relationship', (['"""Line"""'], {'backref': '"""stations"""'}), "('Line', backref='stations')\n", (2624, 2652), False, 'from sqlalchemy.orm import relationship\n'), ((2820, 2853), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (2826, 2853), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((2865, 2877), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (2871, 2877), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((3047, 3080), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (3053, 3080), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((3152, 3167), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (3158, 3167), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((3359, 3371), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (3365, 3371), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((3384, 3423), 'sqlalchemy.orm.relationship', 'relationship', (['"""Train"""'], {'backref': '"""routes"""'}), "('Train', backref='routes')\n", (3396, 3423), False, 'from sqlalchemy.orm import relationship\n'), ((3435, 3473), 'sqlalchemy.orm.relationship', 'relationship', (['"""Task"""'], {'backref': '"""routes"""'}), "('Task', backref='routes')\n", (3447, 3473), False, 'from sqlalchemy.orm import relationship\n'), ((3649, 3682), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (3655, 3682), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((3848, 3860), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (3854, 3860), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((3879, 3891), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (3885, 3891), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((3910, 3922), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (3916, 3922), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((3935, 3992), 'sqlalchemy.orm.relationship', 'relationship', (['"""Route"""'], {'uselist': '(False)', 'backref': '"""departure"""'}), "('Route', uselist=False, backref='departure')\n", (3947, 3992), False, 'from sqlalchemy.orm import relationship\n'), ((4007, 4052), 'sqlalchemy.orm.relationship', 'relationship', (['"""Station"""'], {'backref': '"""departures"""'}), "('Station', backref='departures')\n", (4019, 4052), False, 'from sqlalchemy.orm import relationship\n'), ((4221, 4254), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (4227, 4254), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((4420, 4432), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (4426, 4432), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((4451, 4463), 'sqlalchemy.Column', 'Column', (['Text'], {}), '(Text)\n', (4457, 4463), False, 'from sqlalchemy import Column, Integer, Text, Float\n'), ((4476, 4531), 'sqlalchemy.orm.relationship', 'relationship', (['"""Route"""'], {'uselist': '(False)', 'backref': '"""arrival"""'}), "('Route', uselist=False, backref='arrival')\n", (4488, 4531), False, 'from sqlalchemy.orm import relationship\n'), ((4546, 4589), 'sqlalchemy.orm.relationship', 'relationship', (['"""Station"""'], {'backref': '"""arrivals"""'}), "('Station', backref='arrivals')\n", (4558, 4589), False, 'from sqlalchemy.orm import relationship\n'), ((1559, 1581), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""lines.id"""'], {}), "('lines.id')\n", (1569, 1581), False, 'from sqlalchemy import ForeignKey\n'), ((1619, 1647), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""train_types.id"""'], {}), "('train_types.id')\n", (1629, 1647), False, 'from sqlalchemy import ForeignKey\n'), ((2549, 2571), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""lines.id"""'], {}), "('lines.id')\n", (2559, 2571), False, 'from sqlalchemy import ForeignKey\n'), ((3111, 3133), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""tasks.id"""'], {}), "('tasks.id')\n", (3121, 3133), False, 'from sqlalchemy import ForeignKey\n'), ((3199, 3222), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""trains.id"""'], {}), "('trains.id')\n", (3209, 3222), False, 'from sqlalchemy import ForeignKey\n'), ((3259, 3286), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""departures.id"""'], {}), "('departures.id')\n", (3269, 3286), False, 'from sqlalchemy import ForeignKey\n'), ((3321, 3346), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""arrivals.id"""'], {}), "('arrivals.id')\n", (3331, 3346), False, 'from sqlalchemy import ForeignKey\n'), ((3810, 3835), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""stations.id"""'], {}), "('stations.id')\n", (3820, 3835), False, 'from sqlalchemy import ForeignKey\n'), ((4382, 4407), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""stations.id"""'], {}), "('stations.id')\n", (4392, 4407), False, 'from sqlalchemy import ForeignKey\n')]
|
import numpy as np
import pandas as pd
import itertools
from sklearn.metrics import roc_auc_score, roc_curve, auc
def augment_agg(X):
mean = np.array(X.mean(axis = 1)).reshape(X.shape[0],1)
std = np.array(X.std(axis = 1)).reshape(X.shape[0],1)
rang = np.array((X.max(axis = 1) - X.min(axis = 1))).reshape(X.shape[0],1)
X = np.append(X,mean,1)
X = np.append(X,std,1)
X = np.append(X,rang,1)
return X
def longest_one_streak(lst):
return max(sum(1 for x in l if x == 1) for n, l in itertools.groupby(lst))
def optimal_cutoff(labels, preds):
####################################
# The optimal cut off would be where tpr is high and fpr is low
# tpr - (1-fpr) is zero or near to zero is the optimal cut off point
####################################
fpr, tpr, cutoff = roc_curve(labels, preds)
roc_auc = auc(fpr, tpr)
#print("Area under the ROC curve : %f" % roc_auc)
i = np.arange(len(tpr)) # index for df
roc = pd.DataFrame({'fpr' : pd.Series(fpr, index=i),'tpr' : pd.Series(tpr, index = i), '1-fpr' : pd.Series(1-fpr, index = i), 'tf' : pd.Series(tpr - (1-fpr), index = i), 'thresholds' : pd.Series(cutoff, index = i)})
return float(roc.ix[(roc.tf-0).abs().argsort()[:1]]["thresholds"])
def get_dynamics(VALUES, TEST_SEQ_LENGTH):
first_half = pd.Series([np.mean(i[:int(TEST_SEQ_LENGTH/2)]) for i in VALUES])
second_half = pd.Series([np.mean(i[int(TEST_SEQ_LENGTH/2):TEST_SEQ_LENGTH]) for i in VALUES])
dynamics = (second_half/first_half).fillna(0)
dynamics[np.isinf(dynamics)] = 0
return dynamics
def get_max_streak_length(arr, x):
# intitialize count
count = 0
# initialize max
result = 0
for i in arr:
# Reset count when 0 is found
if (i == x):
count += 1
# If 1 is found, increment count
# and update result if count
# becomes more.
else:
count = 0
result = max(result, count)
return result
|
[
"sklearn.metrics.roc_curve",
"numpy.isinf",
"numpy.append",
"sklearn.metrics.auc",
"pandas.Series",
"itertools.groupby"
] |
[((341, 362), 'numpy.append', 'np.append', (['X', 'mean', '(1)'], {}), '(X, mean, 1)\n', (350, 362), True, 'import numpy as np\n'), ((369, 389), 'numpy.append', 'np.append', (['X', 'std', '(1)'], {}), '(X, std, 1)\n', (378, 389), True, 'import numpy as np\n'), ((396, 417), 'numpy.append', 'np.append', (['X', 'rang', '(1)'], {}), '(X, rang, 1)\n', (405, 417), True, 'import numpy as np\n'), ((825, 849), 'sklearn.metrics.roc_curve', 'roc_curve', (['labels', 'preds'], {}), '(labels, preds)\n', (834, 849), False, 'from sklearn.metrics import roc_auc_score, roc_curve, auc\n'), ((864, 877), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (867, 877), False, 'from sklearn.metrics import roc_auc_score, roc_curve, auc\n'), ((1633, 1651), 'numpy.isinf', 'np.isinf', (['dynamics'], {}), '(dynamics)\n', (1641, 1651), True, 'import numpy as np\n'), ((1007, 1030), 'pandas.Series', 'pd.Series', (['fpr'], {'index': 'i'}), '(fpr, index=i)\n', (1016, 1030), True, 'import pandas as pd\n'), ((1039, 1062), 'pandas.Series', 'pd.Series', (['tpr'], {'index': 'i'}), '(tpr, index=i)\n', (1048, 1062), True, 'import pandas as pd\n'), ((1076, 1103), 'pandas.Series', 'pd.Series', (['(1 - fpr)'], {'index': 'i'}), '(1 - fpr, index=i)\n', (1085, 1103), True, 'import pandas as pd\n'), ((1112, 1147), 'pandas.Series', 'pd.Series', (['(tpr - (1 - fpr))'], {'index': 'i'}), '(tpr - (1 - fpr), index=i)\n', (1121, 1147), True, 'import pandas as pd\n'), ((1164, 1190), 'pandas.Series', 'pd.Series', (['cutoff'], {'index': 'i'}), '(cutoff, index=i)\n', (1173, 1190), True, 'import pandas as pd\n'), ((519, 541), 'itertools.groupby', 'itertools.groupby', (['lst'], {}), '(lst)\n', (536, 541), False, 'import itertools\n')]
|
### Language models and functions
## <NAME>
## Created: March 2020
import pandas as pd
import numpy as np
import math
from scipy.special import comb
## Zipf pmf and cdf
def zipf(k,a):
power_law = np.divide(1,np.power(k,a))
H = np.sum(np.divide(1,np.power(k,a)))
pmf = np.divide(power_law,H)
cdf = [np.sum(pmf[0:i]) for i in range(1,pmf.shape[0]+1)]
return pmf, cdf
## Zipf expected rank/value - theory
def zipf_E(k,a):
return np.sum(np.divide(1,np.power(k,a-1)))/np.sum(np.divide(1,np.power(k,a)))
## Zipf log-log transform
def zipf_loglog(x,a,b):
return (-1*a*x)+b
## Vocabulary construction where c is the number of unigrams
def construct_vocabulary(c):
return [int(i) for i in range(1,c+1,1)]
## Wright-Fisher model - Corpus size function
def corpus_size(t,alpha,beta):
return int(np.ceil(beta*np.exp(alpha*t)))
## Wright-Fisher model - Selection value function
def selection_function(t,A,B):
return A*(np.sin(B*t) + np.cos(B*t))
# Wright-Fisher model (with selection option)
def wright_fisher(c,a,alpha,beta,A,B,T,PV,NV,tau,set_seed=None,steady=False):
# parameters - descriptions
# 1. c - is the number of vocabulary words
# 2. a - is the Zipf parameter
# 3. alpha - is the rate of change for the corpus size
# 4. beta - is the initial corpus size
# 5. A - is the selection value
# 6. B - is the variable selection value
# 7. T - is the total time elapsed (number of years)
# 8. PV - word indices under positive selection
# 9. NV - word indices under negative selection
# 10. tau - the time where selection is induced
# seeding
np.random.seed(set_seed)
# vocabulary and ranks ranks
V = construct_vocabulary(c=c) # ngrams labels
ranks = V # assign ranks to the words
# initial conditions
t = 0
pmf, cdf = zipf(k=ranks,a=a) # initial probability distibution of words
initial_cs = corpus_size(t,alpha,beta) # initial corpus size (exponential function with parameters beta and alpha)
# initial fitness values
initial_fv = np.zeros(len(V)) + 1
if tau == 0:
initial_fv[[i-1 for i in PV]] = 1 + selection_function(tau,A,B)
initial_fv[[i-1 for i in NV]] = 1 - selection_function(tau,A,B)
fv_probs = np.multiply(pmf,initial_fv)
fv_probs_normal = np.divide(fv_probs,np.sum(fv_probs)) # update probabilities
if steady == True:
fv_probs_normal = pmf
# initial word counts
word_samples = V # sample words at least once
word_samples = np.append(word_samples,np.random.choice(V,initial_cs-c,replace=True,p=fv_probs_normal))
wp_u, wp_c = np.unique(word_samples,return_counts=True)
del word_samples
initial_count = np.zeros(len(V))
initial_count[[i-1 for i in wp_u]] = wp_c
initial_probs = np.zeros(len(V))
initial_probs[[i-1 for i in wp_u]] = np.divide(wp_c,np.sum(wp_c))
# time loop
fv_track = [initial_fv]
cs_track = [initial_cs]
count_track = [initial_count]
probs_track = [initial_probs]
for i in range(1,T):
# selection at t >= tau
fv = np.zeros(len(V)) + 1
if i >= tau:
fv[[i-1 for i in PV]] = 1 + selection_function(i,A,B)
fv[[i-1 for i in NV]] = 1 - selection_function(i,A,B)
fv_track.append(fv)
else:
fv_track.append(fv)
fv_probs = np.multiply(probs_track[i-1],fv_track[i-1])
fv_probs_normal = np.divide(fv_probs,np.sum(fv_probs)) # update probabilities
if steady == True:
fv_probs_normal = pmf
# Wright-Fisher
cs_track.append(corpus_size(i,alpha,beta)) # update corpus size
word_samples = V # sample words at least once
word_samples = np.append(word_samples,np.random.choice(V,cs_track[i]-c,replace=True,p=fv_probs_normal))
wp_u, wp_c = np.unique(word_samples,return_counts=True)
del word_samples
next_count = np.zeros(len(V))
next_count[[i-1 for i in wp_u]] = wp_c # update counts
count_track.append(next_count)
next_probs = np.zeros(len(V))
next_probs[[i-1 for i in wp_u]] = wp_c/np.sum(wp_c) # update probabilities
probs_track.append(next_probs)
# compute pscores, zscores, and convert outputs to dataframes
R = pd.DataFrame(np.matrix(count_track).T,index=V,columns=range(0,T))
P = pd.DataFrame(np.divide(R,np.sum(R,axis=0)),index=V,columns=range(0,T))
a = P.T - np.mean(P.T,axis=0)
b = np.std(P.T,axis=0)
Z = np.divide(a,b).T
S = pd.DataFrame(np.matrix(fv_track).T,index=V,columns=range(0,T))
N = pd.DataFrame({'N(t)':cs_track},index=R.columns)
del fv_track, cs_track, count_track, probs_track
return R, P, Z, S, N
# Wright-Fisher model (with selection option and zero words)
def wright_fisher_0(c,a,alpha,beta,A,B,T,PV,NV,tau,set_seed=None,steady=False):
# parameters - descriptions
# 1. c - is the number of vocabulary words
# 2. a - is the Zipf parameter
# 3. alpha - is the rate of change for the corpus size
# 4. beta - is the initial corpus size
# 5. A - is the selection value
# 6. B - is the variable selection value
# 7. T - is the total time elapsed (number of years)
# 8. PV - word indices under positive selection
# 9. NV - word indices under negative selection
# 10. tau - the time where selection is induced
# seeding
np.random.seed(set_seed)
# vocabulary and ranks ranks
V = construct_vocabulary(c=c) # ngrams labels
ranks = V # assign ranks to the words
# initial conditions
t = 0
pmf, cdf = zipf(k=ranks,a=a) # initial probability distibution of words
initial_cs = corpus_size(t,alpha,beta) # initial corpus size (exponential function with parameters beta and alpha)
# initial fitness values
initial_fv = np.zeros(len(V)) + 1
if tau == 0:
initial_fv[[i-1 for i in PV]] = 1 + selection_function(tau,A,B)
initial_fv[[i-1 for i in NV]] = 1 - selection_function(tau,A,B)
fv_probs = np.multiply(pmf,initial_fv)
fv_probs_normal = np.divide(fv_probs,np.sum(fv_probs)) # update probabilities
if steady == True:
fv_probs_normal = pmf
# initial word counts
word_samples = np.random.choice(V,initial_cs,replace=True,p=fv_probs_normal)
wp_u, wp_c = np.unique(word_samples,return_counts=True)
del word_samples
initial_count = np.zeros(len(V))
initial_count[[i-1 for i in wp_u]] = wp_c
initial_probs = np.zeros(len(V))
initial_probs[[i-1 for i in wp_u]] = np.divide(wp_c,np.sum(wp_c))
# time loop
fv_track = [initial_fv]
cs_track = [initial_cs]
count_track = [initial_count]
probs_track = [initial_probs]
for i in range(1,T):
# selection at t >= tau
fv = np.zeros(len(V)) + 1
if i >= tau:
fv[[i-1 for i in PV]] = 1 + selection_function(i,A,B)
fv[[i-1 for i in NV]] = 1 - selection_function(i,A,B)
fv_track.append(fv)
else:
fv_track.append(fv)
fv_probs = np.multiply(probs_track[i-1],fv_track[i-1])
fv_probs_normal = np.divide(fv_probs,np.sum(fv_probs)) # update probabilities
if steady == True:
fv_probs_normal = pmf
# Wright-Fisher
cs_track.append(corpus_size(i,alpha,beta)) # update corpus size
word_samples = np.random.choice(V,cs_track[i],replace=True,p=fv_probs_normal)
wp_u, wp_c = np.unique(word_samples,return_counts=True)
del word_samples
next_count = np.zeros(len(V))
next_count[[i-1 for i in wp_u]] = wp_c # update counts
count_track.append(next_count)
next_probs = np.zeros(len(V))
next_probs[[i-1 for i in wp_u]] = wp_c/np.sum(wp_c) # update probabilities
probs_track.append(next_probs)
# compute pscores, zscores, and convert outputs to dataframes
R = pd.DataFrame(np.matrix(count_track).T,index=V,columns=range(0,T))
P = pd.DataFrame(np.divide(R,np.sum(R,axis=0)),index=V,columns=range(0,T))
a = P.T - np.mean(P.T,axis=0)
b = np.std(P.T,axis=0)
Z = np.divide(a,b).T
S = pd.DataFrame(np.matrix(fv_track).T,index=V,columns=range(0,T))
N = pd.DataFrame({'N(t)':cs_track},index=R.columns)
del fv_track, cs_track, count_track, probs_track
return R, P, Z, S, N
# binomial (pmf)
def binomial(n,x,p,type='pmf'):
if x >= 0 and x <= n:
if type == 'pmf':
f = comb(n,x)*np.power(p,x)*np.power(1-p,n-x)
elif type == 'cdf':
f = 0
for i in range(0,x+1):
f += comb(n,i)*np.power(p,i)*np.power(1-p,n-i)
else:
f = 0
return f
# binomal (pmf)
def binomial_wf(n,x,p,c):
if x <= n-c:
p = binomial(n-c,x,p)
elif x > n-c:
p = 0
return p
# binomial expected value
def E(n,p):
return n*p
# binomial variance
def Var(n,p):
return n*p*(1-p)
# binomial skewness
def Skew(n,p):
return np.divide((1-p)-p,np.sqrt(n*p*(1-p)))
# binomial covariance
def Cov(n,p_i,p_j):
return -1*n*p_i*p_j
|
[
"pandas.DataFrame",
"numpy.divide",
"numpy.matrix",
"numpy.multiply",
"numpy.random.seed",
"numpy.sum",
"numpy.std",
"numpy.power",
"scipy.special.comb",
"numpy.mean",
"numpy.sin",
"numpy.exp",
"numpy.cos",
"numpy.random.choice",
"numpy.unique",
"numpy.sqrt"
] |
[((273, 296), 'numpy.divide', 'np.divide', (['power_law', 'H'], {}), '(power_law, H)\n', (282, 296), True, 'import numpy as np\n'), ((1608, 1632), 'numpy.random.seed', 'np.random.seed', (['set_seed'], {}), '(set_seed)\n', (1622, 1632), True, 'import numpy as np\n'), ((2192, 2220), 'numpy.multiply', 'np.multiply', (['pmf', 'initial_fv'], {}), '(pmf, initial_fv)\n', (2203, 2220), True, 'import numpy as np\n'), ((2536, 2579), 'numpy.unique', 'np.unique', (['word_samples'], {'return_counts': '(True)'}), '(word_samples, return_counts=True)\n', (2545, 2579), True, 'import numpy as np\n'), ((4182, 4201), 'numpy.std', 'np.std', (['P.T'], {'axis': '(0)'}), '(P.T, axis=0)\n', (4188, 4201), True, 'import numpy as np\n'), ((4296, 4345), 'pandas.DataFrame', 'pd.DataFrame', (["{'N(t)': cs_track}"], {'index': 'R.columns'}), "({'N(t)': cs_track}, index=R.columns)\n", (4308, 4345), True, 'import pandas as pd\n'), ((5090, 5114), 'numpy.random.seed', 'np.random.seed', (['set_seed'], {}), '(set_seed)\n', (5104, 5114), True, 'import numpy as np\n'), ((5674, 5702), 'numpy.multiply', 'np.multiply', (['pmf', 'initial_fv'], {}), '(pmf, initial_fv)\n', (5685, 5702), True, 'import numpy as np\n'), ((5869, 5933), 'numpy.random.choice', 'np.random.choice', (['V', 'initial_cs'], {'replace': '(True)', 'p': 'fv_probs_normal'}), '(V, initial_cs, replace=True, p=fv_probs_normal)\n', (5885, 5933), True, 'import numpy as np\n'), ((5945, 5988), 'numpy.unique', 'np.unique', (['word_samples'], {'return_counts': '(True)'}), '(word_samples, return_counts=True)\n', (5954, 5988), True, 'import numpy as np\n'), ((7517, 7536), 'numpy.std', 'np.std', (['P.T'], {'axis': '(0)'}), '(P.T, axis=0)\n', (7523, 7536), True, 'import numpy as np\n'), ((7631, 7680), 'pandas.DataFrame', 'pd.DataFrame', (["{'N(t)': cs_track}"], {'index': 'R.columns'}), "({'N(t)': cs_track}, index=R.columns)\n", (7643, 7680), True, 'import pandas as pd\n'), ((211, 225), 'numpy.power', 'np.power', (['k', 'a'], {}), '(k, a)\n', (219, 225), True, 'import numpy as np\n'), ((304, 320), 'numpy.sum', 'np.sum', (['pmf[0:i]'], {}), '(pmf[0:i])\n', (310, 320), True, 'import numpy as np\n'), ((2258, 2274), 'numpy.sum', 'np.sum', (['fv_probs'], {}), '(fv_probs)\n', (2264, 2274), True, 'import numpy as np\n'), ((2457, 2525), 'numpy.random.choice', 'np.random.choice', (['V', '(initial_cs - c)'], {'replace': '(True)', 'p': 'fv_probs_normal'}), '(V, initial_cs - c, replace=True, p=fv_probs_normal)\n', (2473, 2525), True, 'import numpy as np\n'), ((2761, 2773), 'numpy.sum', 'np.sum', (['wp_c'], {}), '(wp_c)\n', (2767, 2773), True, 'import numpy as np\n'), ((3177, 3225), 'numpy.multiply', 'np.multiply', (['probs_track[i - 1]', 'fv_track[i - 1]'], {}), '(probs_track[i - 1], fv_track[i - 1])\n', (3188, 3225), True, 'import numpy as np\n'), ((3607, 3650), 'numpy.unique', 'np.unique', (['word_samples'], {'return_counts': '(True)'}), '(word_samples, return_counts=True)\n', (3616, 3650), True, 'import numpy as np\n'), ((4157, 4177), 'numpy.mean', 'np.mean', (['P.T'], {'axis': '(0)'}), '(P.T, axis=0)\n', (4164, 4177), True, 'import numpy as np\n'), ((4206, 4221), 'numpy.divide', 'np.divide', (['a', 'b'], {}), '(a, b)\n', (4215, 4221), True, 'import numpy as np\n'), ((5740, 5756), 'numpy.sum', 'np.sum', (['fv_probs'], {}), '(fv_probs)\n', (5746, 5756), True, 'import numpy as np\n'), ((6170, 6182), 'numpy.sum', 'np.sum', (['wp_c'], {}), '(wp_c)\n', (6176, 6182), True, 'import numpy as np\n'), ((6586, 6634), 'numpy.multiply', 'np.multiply', (['probs_track[i - 1]', 'fv_track[i - 1]'], {}), '(probs_track[i - 1], fv_track[i - 1])\n', (6597, 6634), True, 'import numpy as np\n'), ((6864, 6929), 'numpy.random.choice', 'np.random.choice', (['V', 'cs_track[i]'], {'replace': '(True)', 'p': 'fv_probs_normal'}), '(V, cs_track[i], replace=True, p=fv_probs_normal)\n', (6880, 6929), True, 'import numpy as np\n'), ((6942, 6985), 'numpy.unique', 'np.unique', (['word_samples'], {'return_counts': '(True)'}), '(word_samples, return_counts=True)\n', (6951, 6985), True, 'import numpy as np\n'), ((7492, 7512), 'numpy.mean', 'np.mean', (['P.T'], {'axis': '(0)'}), '(P.T, axis=0)\n', (7499, 7512), True, 'import numpy as np\n'), ((7541, 7556), 'numpy.divide', 'np.divide', (['a', 'b'], {}), '(a, b)\n', (7550, 7556), True, 'import numpy as np\n'), ((8313, 8337), 'numpy.sqrt', 'np.sqrt', (['(n * p * (1 - p))'], {}), '(n * p * (1 - p))\n', (8320, 8337), True, 'import numpy as np\n'), ((250, 264), 'numpy.power', 'np.power', (['k', 'a'], {}), '(k, a)\n', (258, 264), True, 'import numpy as np\n'), ((926, 939), 'numpy.sin', 'np.sin', (['(B * t)'], {}), '(B * t)\n', (932, 939), True, 'import numpy as np\n'), ((940, 953), 'numpy.cos', 'np.cos', (['(B * t)'], {}), '(B * t)\n', (946, 953), True, 'import numpy as np\n'), ((3260, 3276), 'numpy.sum', 'np.sum', (['fv_probs'], {}), '(fv_probs)\n', (3266, 3276), True, 'import numpy as np\n'), ((3526, 3595), 'numpy.random.choice', 'np.random.choice', (['V', '(cs_track[i] - c)'], {'replace': '(True)', 'p': 'fv_probs_normal'}), '(V, cs_track[i] - c, replace=True, p=fv_probs_normal)\n', (3542, 3595), True, 'import numpy as np\n'), ((3864, 3876), 'numpy.sum', 'np.sum', (['wp_c'], {}), '(wp_c)\n', (3870, 3876), True, 'import numpy as np\n'), ((4017, 4039), 'numpy.matrix', 'np.matrix', (['count_track'], {}), '(count_track)\n', (4026, 4039), True, 'import numpy as np\n'), ((4100, 4117), 'numpy.sum', 'np.sum', (['R'], {'axis': '(0)'}), '(R, axis=0)\n', (4106, 4117), True, 'import numpy as np\n'), ((4241, 4260), 'numpy.matrix', 'np.matrix', (['fv_track'], {}), '(fv_track)\n', (4250, 4260), True, 'import numpy as np\n'), ((6669, 6685), 'numpy.sum', 'np.sum', (['fv_probs'], {}), '(fv_probs)\n', (6675, 6685), True, 'import numpy as np\n'), ((7199, 7211), 'numpy.sum', 'np.sum', (['wp_c'], {}), '(wp_c)\n', (7205, 7211), True, 'import numpy as np\n'), ((7352, 7374), 'numpy.matrix', 'np.matrix', (['count_track'], {}), '(count_track)\n', (7361, 7374), True, 'import numpy as np\n'), ((7435, 7452), 'numpy.sum', 'np.sum', (['R'], {'axis': '(0)'}), '(R, axis=0)\n', (7441, 7452), True, 'import numpy as np\n'), ((7576, 7595), 'numpy.matrix', 'np.matrix', (['fv_track'], {}), '(fv_track)\n', (7585, 7595), True, 'import numpy as np\n'), ((454, 472), 'numpy.power', 'np.power', (['k', '(a - 1)'], {}), '(k, a - 1)\n', (462, 472), True, 'import numpy as np\n'), ((491, 505), 'numpy.power', 'np.power', (['k', 'a'], {}), '(k, a)\n', (499, 505), True, 'import numpy as np\n'), ((815, 832), 'numpy.exp', 'np.exp', (['(alpha * t)'], {}), '(alpha * t)\n', (821, 832), True, 'import numpy as np\n'), ((7877, 7899), 'numpy.power', 'np.power', (['(1 - p)', '(n - x)'], {}), '(1 - p, n - x)\n', (7885, 7899), True, 'import numpy as np\n'), ((7853, 7863), 'scipy.special.comb', 'comb', (['n', 'x'], {}), '(n, x)\n', (7857, 7863), False, 'from scipy.special import comb\n'), ((7863, 7877), 'numpy.power', 'np.power', (['p', 'x'], {}), '(p, x)\n', (7871, 7877), True, 'import numpy as np\n'), ((7985, 8007), 'numpy.power', 'np.power', (['(1 - p)', '(n - i)'], {}), '(1 - p, n - i)\n', (7993, 8007), True, 'import numpy as np\n'), ((7961, 7971), 'scipy.special.comb', 'comb', (['n', 'i'], {}), '(n, i)\n', (7965, 7971), False, 'from scipy.special import comb\n'), ((7971, 7985), 'numpy.power', 'np.power', (['p', 'i'], {}), '(p, i)\n', (7979, 7985), True, 'import numpy as np\n')]
|
# -*- coding: UTF-8 -*-
import os
import time
import simplejson as json
from django.conf import settings
from django.contrib.auth.decorators import permission_required
from django.http import HttpResponse
from common.config import SysConfig
from common.utils.extend_json_encoder import ExtendJSONEncoder
from sql.engines import get_engine
from sql.plugins.schemasync import SchemaSync
from .models import Instance, ParamTemplate, ParamHistory
@permission_required('sql.menu_instance', raise_exception=True)
def lists(request):
"""获取实例列表"""
limit = int(request.POST.get('limit'))
offset = int(request.POST.get('offset'))
type = request.POST.get('type')
db_type = request.POST.get('db_type')
tags = request.POST.getlist('tags[]')
limit = offset + limit
search = request.POST.get('search', '')
instances = Instance.objects.all()
# 过滤搜索
if search:
instances = instances.filter(instance_name__icontains=search)
# 过滤实例类型
if type:
instances = instances.filter(type=type)
# 过滤数据库类型
if db_type:
instances = instances.filter(db_type=db_type)
# 过滤标签,返回同时包含全部标签的实例,TODO 循环会生成多表JOIN,如果数据量大会存在效率问题
if tags:
for tag in tags:
instances = instances.filter(instancetagrelations__instance_tag=tag,
instancetag__active=True,
instancetagrelations__active=True)
count = instances.count()
instances = instances[offset:limit].values("id", "instance_name", "db_type", "type", "host", "port", "user")
# QuerySet 序列化
rows = [row for row in instances]
result = {"total": count, "rows": rows}
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json')
@permission_required('sql.menu_instance', raise_exception=True)
def users(request):
"""获取实例用户列表"""
instance_id = request.POST.get('instance_id')
try:
instance = Instance.objects.get(id=instance_id)
except Instance.DoesNotExist:
result = {'status': 1, 'msg': '实例不存在', 'data': []}
return HttpResponse(json.dumps(result), content_type='application/json')
sql_get_user = '''select concat("\'", user, "\'", '@', "\'", host,"\'") as query from mysql.user;'''
query_engine = get_engine(instance=instance)
db_users = query_engine.query('mysql', sql_get_user).rows
# 获取用户权限信息
data = []
for db_user in db_users:
user_info = {}
user_priv = query_engine.query('mysql', 'show grants for {};'.format(db_user[0]), close_conn=False).rows
user_info['user'] = db_user[0]
user_info['privileges'] = user_priv
data.append(user_info)
# 关闭连接
query_engine.close()
result = {'status': 0, 'msg': 'ok', 'rows': data}
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json')
@permission_required('sql.param_view', raise_exception=True)
def param_list(request):
"""
获取实例参数列表
:param request:
:return:
"""
instance_id = request.POST.get('instance_id')
editable = True if request.POST.get('editable') else False
search = request.POST.get('search', '')
try:
ins = Instance.objects.get(id=instance_id)
except Instance.DoesNotExist:
result = {'status': 1, 'msg': '实例不存在', 'data': []}
return HttpResponse(json.dumps(result), content_type='application/json')
# 获取已配置参数列表
cnf_params = dict()
for param in ParamTemplate.objects.filter(db_type=ins.db_type, variable_name__contains=search).values(
'id', 'variable_name', 'default_value', 'valid_values', 'description', 'editable'):
param['variable_name'] = param['variable_name'].lower()
cnf_params[param['variable_name']] = param
# 获取实例参数列表
engine = get_engine(instance=ins)
ins_variables = engine.get_variables()
# 处理结果
rows = list()
for variable in ins_variables.rows:
variable_name = variable[0].lower()
row = {
'variable_name': variable_name,
'runtime_value': variable[1],
'editable': False,
}
if variable_name in cnf_params.keys():
row = dict(row, **cnf_params[variable_name])
rows.append(row)
# 过滤参数
if editable:
rows = [row for row in rows if row['editable']]
else:
rows = [row for row in rows if not row['editable']]
return HttpResponse(json.dumps(rows, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json')
@permission_required('sql.param_view', raise_exception=True)
def param_history(request):
"""实例参数修改历史"""
limit = int(request.POST.get('limit'))
offset = int(request.POST.get('offset'))
limit = offset + limit
instance_id = request.POST.get('instance_id')
search = request.POST.get('search', '')
phs = ParamHistory.objects.filter(instance__id=instance_id)
# 过滤搜索条件
if search:
phs = ParamHistory.objects.filter(variable_name__contains=search)
count = phs.count()
phs = phs[offset:limit].values("instance__instance_name", "variable_name", "old_var", "new_var",
"user_display", "create_time")
# QuerySet 序列化
rows = [row for row in phs]
result = {"total": count, "rows": rows}
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json')
@permission_required('sql.param_edit', raise_exception=True)
def param_edit(request):
user = request.user
instance_id = request.POST.get('instance_id')
variable_name = request.POST.get('variable_name')
variable_value = request.POST.get('runtime_value')
try:
ins = Instance.objects.get(id=instance_id)
except Instance.DoesNotExist:
result = {'status': 1, 'msg': '实例不存在', 'data': []}
return HttpResponse(json.dumps(result), content_type='application/json')
# 修改参数
engine = get_engine(instance=ins)
# 校验是否配置模板
if not ParamTemplate.objects.filter(variable_name=variable_name).exists():
result = {'status': 1, 'msg': '请先在参数模板中配置该参数!', 'data': []}
return HttpResponse(json.dumps(result), content_type='application/json')
# 获取当前运行参数值
runtime_value = engine.get_variables(variables=[variable_name]).rows[0][1]
if variable_value == runtime_value:
result = {'status': 1, 'msg': '参数值与实际运行值一致,未调整!', 'data': []}
return HttpResponse(json.dumps(result), content_type='application/json')
set_result = engine.set_variable(variable_name=variable_name, variable_value=variable_value)
if set_result.error:
result = {'status': 1, 'msg': f'设置错误,错误信息:{set_result.error}', 'data': []}
return HttpResponse(json.dumps(result), content_type='application/json')
# 修改成功的保存修改记录
else:
ParamHistory.objects.create(
instance=ins,
variable_name=variable_name,
old_var=runtime_value,
new_var=variable_value,
set_sql=set_result.full_sql,
user_name=user.username,
user_display=user.display
)
result = {'status': 0, 'msg': '修改成功,请手动持久化到配置文件!', 'data': []}
return HttpResponse(json.dumps(result), content_type='application/json')
@permission_required('sql.menu_schemasync', raise_exception=True)
def schemasync(request):
"""对比实例schema信息"""
instance_name = request.POST.get('instance_name')
db_name = request.POST.get('db_name')
target_instance_name = request.POST.get('target_instance_name')
target_db_name = request.POST.get('target_db_name')
sync_auto_inc = True if request.POST.get('sync_auto_inc') == 'true' else False
sync_comments = True if request.POST.get('sync_comments') == 'true' else False
result = {'status': 0, 'msg': 'ok', 'data': {'diff_stdout': '', 'patch_stdout': '', 'revert_stdout': ''}}
# 循环对比全部数据库
if db_name == 'all' or target_db_name == 'all':
db_name = '*'
target_db_name = '*'
# 取出该实例的连接方式
instance_info = Instance.objects.get(instance_name=instance_name)
target_instance_info = Instance.objects.get(instance_name=target_instance_name)
# 检查SchemaSync程序路径
path = SysConfig().get('schemasync')
if path is None:
result['status'] = 1
result['msg'] = '请配置SchemaSync路径!'
return HttpResponse(json.dumps(result), content_type='application/json')
# 提交给SchemaSync获取对比结果
schema_sync = SchemaSync()
# 准备参数
tag = int(time.time())
output_directory = os.path.join(settings.BASE_DIR, 'downloads/schemasync/')
args = {
"sync-auto-inc": sync_auto_inc,
"sync-comments": sync_comments,
"tag": tag,
"output-directory": output_directory,
"source": r"mysql://{user}:'{pwd}'@{host}:{port}/{database}".format(user=instance_info.user,
pwd=instance_info.raw_password,
host=instance_info.host,
port=instance_info.port,
database=db_name),
"target": r"mysql://{user}:'{pwd}'@{host}:{port}/{database}".format(user=target_instance_info.user,
pwd=target_instance_info.raw_password,
host=target_instance_info.host,
port=target_instance_info.port,
database=target_db_name)
}
# 参数检查
args_check_result = schema_sync.check_args(args)
if args_check_result['status'] == 1:
return HttpResponse(json.dumps(args_check_result), content_type='application/json')
# 参数转换
cmd_args = schema_sync.generate_args2cmd(args, shell=True)
# 执行命令
try:
stdout, stderr = schema_sync.execute_cmd(cmd_args, shell=True).communicate()
diff_stdout = f'{stdout}{stderr}'
except RuntimeError as e:
diff_stdout = str(e)
# 非全部数据库对比可以读取对比结果并在前端展示
if db_name != '*':
date = time.strftime("%Y%m%d", time.localtime())
patch_sql_file = '%s%s_%s.%s.patch.sql' % (output_directory, target_db_name, tag, date)
revert_sql_file = '%s%s_%s.%s.revert.sql' % (output_directory, target_db_name, tag, date)
try:
with open(patch_sql_file, 'r') as f:
patch_sql = f.read()
except FileNotFoundError as e:
patch_sql = str(e)
try:
with open(revert_sql_file, 'r') as f:
revert_sql = f.read()
except FileNotFoundError as e:
revert_sql = str(e)
result['data'] = {'diff_stdout': diff_stdout, 'patch_stdout': patch_sql, 'revert_stdout': revert_sql}
else:
result['data'] = {'diff_stdout': diff_stdout, 'patch_stdout': '', 'revert_stdout': ''}
return HttpResponse(json.dumps(result), content_type='application/json')
def instance_resource(request):
"""
获取实例内的资源信息,database、schema、table、column
:param request:
:return:
"""
instance_name = request.POST.get('instance_name')
db_name = request.POST.get('db_name')
schema_name = request.POST.get('schema_name')
tb_name = request.POST.get('tb_name')
resource_type = request.POST.get('resource_type')
try:
instance = Instance.objects.get(instance_name=instance_name)
except Instance.DoesNotExist:
result = {'status': 1, 'msg': '实例不存在', 'data': []}
return HttpResponse(json.dumps(result), content_type='application/json')
result = {'status': 0, 'msg': 'ok', 'data': []}
try:
query_engine = get_engine(instance=instance)
if resource_type == 'database':
resource = query_engine.get_all_databases()
elif resource_type == 'schema' and db_name:
resource = query_engine.get_all_schemas(db_name=db_name)
elif resource_type == 'table' and db_name:
if schema_name:
resource = query_engine.get_all_tables(db_name=db_name, schema_name=schema_name)
else:
resource = query_engine.get_all_tables(db_name=db_name)
elif resource_type == 'column' and db_name and tb_name:
if schema_name:
resource = query_engine.get_all_columns_by_tb(db_name=db_name, schema_name=schema_name, tb_name=tb_name)
else:
resource = query_engine.get_all_columns_by_tb(db_name=db_name, tb_name=tb_name)
else:
raise TypeError('不支持的资源类型或者参数不完整!')
except Exception as msg:
result['status'] = 1
result['msg'] = str(msg)
else:
if resource.error:
result['status'] = 1
result['msg'] = resource.error
else:
result['data'] = resource.rows
return HttpResponse(json.dumps(result), content_type='application/json')
def describe(request):
"""获取表结构"""
instance_name = request.POST.get('instance_name')
try:
instance = Instance.objects.get(instance_name=instance_name)
except Instance.DoesNotExist:
result = {'status': 1, 'msg': '实例不存在', 'data': []}
return HttpResponse(json.dumps(result), content_type='application/json')
db_name = request.POST.get('db_name')
schema_name = request.POST.get('schema_name')
tb_name = request.POST.get('tb_name')
result = {'status': 0, 'msg': 'ok', 'data': []}
try:
query_engine = get_engine(instance=instance)
if schema_name:
query_result = query_engine.describe_table(db_name, tb_name, schema_name)
else:
query_result = query_engine.describe_table(db_name, tb_name)
result['data'] = query_result.__dict__
except Exception as msg:
result['status'] = 1
result['msg'] = str(msg)
return HttpResponse(json.dumps(result), content_type='application/json')
|
[
"django.contrib.auth.decorators.permission_required",
"simplejson.dumps",
"time.time",
"sql.plugins.schemasync.SchemaSync",
"common.config.SysConfig",
"sql.engines.get_engine",
"os.path.join",
"time.localtime"
] |
[((448, 510), 'django.contrib.auth.decorators.permission_required', 'permission_required', (['"""sql.menu_instance"""'], {'raise_exception': '(True)'}), "('sql.menu_instance', raise_exception=True)\n", (467, 510), False, 'from django.contrib.auth.decorators import permission_required\n'), ((1835, 1897), 'django.contrib.auth.decorators.permission_required', 'permission_required', (['"""sql.menu_instance"""'], {'raise_exception': '(True)'}), "('sql.menu_instance', raise_exception=True)\n", (1854, 1897), False, 'from django.contrib.auth.decorators import permission_required\n'), ((2991, 3050), 'django.contrib.auth.decorators.permission_required', 'permission_required', (['"""sql.param_view"""'], {'raise_exception': '(True)'}), "('sql.param_view', raise_exception=True)\n", (3010, 3050), False, 'from django.contrib.auth.decorators import permission_required\n'), ((4670, 4729), 'django.contrib.auth.decorators.permission_required', 'permission_required', (['"""sql.param_view"""'], {'raise_exception': '(True)'}), "('sql.param_view', raise_exception=True)\n", (4689, 4729), False, 'from django.contrib.auth.decorators import permission_required\n'), ((5589, 5648), 'django.contrib.auth.decorators.permission_required', 'permission_required', (['"""sql.param_edit"""'], {'raise_exception': '(True)'}), "('sql.param_edit', raise_exception=True)\n", (5608, 5648), False, 'from django.contrib.auth.decorators import permission_required\n'), ((7437, 7501), 'django.contrib.auth.decorators.permission_required', 'permission_required', (['"""sql.menu_schemasync"""'], {'raise_exception': '(True)'}), "('sql.menu_schemasync', raise_exception=True)\n", (7456, 7501), False, 'from django.contrib.auth.decorators import permission_required\n'), ((2351, 2380), 'sql.engines.get_engine', 'get_engine', ([], {'instance': 'instance'}), '(instance=instance)\n', (2361, 2380), False, 'from sql.engines import get_engine\n'), ((3915, 3939), 'sql.engines.get_engine', 'get_engine', ([], {'instance': 'ins'}), '(instance=ins)\n', (3925, 3939), False, 'from sql.engines import get_engine\n'), ((6117, 6141), 'sql.engines.get_engine', 'get_engine', ([], {'instance': 'ins'}), '(instance=ins)\n', (6127, 6141), False, 'from sql.engines import get_engine\n'), ((8622, 8634), 'sql.plugins.schemasync.SchemaSync', 'SchemaSync', ([], {}), '()\n', (8632, 8634), False, 'from sql.plugins.schemasync import SchemaSync\n'), ((8696, 8752), 'os.path.join', 'os.path.join', (['settings.BASE_DIR', '"""downloads/schemasync/"""'], {}), "(settings.BASE_DIR, 'downloads/schemasync/')\n", (8708, 8752), False, 'import os\n'), ((1709, 1773), 'simplejson.dumps', 'json.dumps', (['result'], {'cls': 'ExtendJSONEncoder', 'bigint_as_string': '(True)'}), '(result, cls=ExtendJSONEncoder, bigint_as_string=True)\n', (1719, 1773), True, 'import simplejson as json\n'), ((2865, 2929), 'simplejson.dumps', 'json.dumps', (['result'], {'cls': 'ExtendJSONEncoder', 'bigint_as_string': '(True)'}), '(result, cls=ExtendJSONEncoder, bigint_as_string=True)\n', (2875, 2929), True, 'import simplejson as json\n'), ((4546, 4608), 'simplejson.dumps', 'json.dumps', (['rows'], {'cls': 'ExtendJSONEncoder', 'bigint_as_string': '(True)'}), '(rows, cls=ExtendJSONEncoder, bigint_as_string=True)\n', (4556, 4608), True, 'import simplejson as json\n'), ((5463, 5527), 'simplejson.dumps', 'json.dumps', (['result'], {'cls': 'ExtendJSONEncoder', 'bigint_as_string': '(True)'}), '(result, cls=ExtendJSONEncoder, bigint_as_string=True)\n', (5473, 5527), True, 'import simplejson as json\n'), ((7381, 7399), 'simplejson.dumps', 'json.dumps', (['result'], {}), '(result)\n', (7391, 7399), True, 'import simplejson as json\n'), ((8660, 8671), 'time.time', 'time.time', ([], {}), '()\n', (8669, 8671), False, 'import time\n'), ((11326, 11344), 'simplejson.dumps', 'json.dumps', (['result'], {}), '(result)\n', (11336, 11344), True, 'import simplejson as json\n'), ((12086, 12115), 'sql.engines.get_engine', 'get_engine', ([], {'instance': 'instance'}), '(instance=instance)\n', (12096, 12115), False, 'from sql.engines import get_engine\n'), ((13273, 13291), 'simplejson.dumps', 'json.dumps', (['result'], {}), '(result)\n', (13283, 13291), True, 'import simplejson as json\n'), ((13892, 13921), 'sql.engines.get_engine', 'get_engine', ([], {'instance': 'instance'}), '(instance=instance)\n', (13902, 13921), False, 'from sql.engines import get_engine\n'), ((14281, 14299), 'simplejson.dumps', 'json.dumps', (['result'], {}), '(result)\n', (14291, 14299), True, 'import simplejson as json\n'), ((6332, 6350), 'simplejson.dumps', 'json.dumps', (['result'], {}), '(result)\n', (6342, 6350), True, 'import simplejson as json\n'), ((6618, 6636), 'simplejson.dumps', 'json.dumps', (['result'], {}), '(result)\n', (6628, 6636), True, 'import simplejson as json\n'), ((6904, 6922), 'simplejson.dumps', 'json.dumps', (['result'], {}), '(result)\n', (6914, 6922), True, 'import simplejson as json\n'), ((8373, 8384), 'common.config.SysConfig', 'SysConfig', ([], {}), '()\n', (8382, 8384), False, 'from common.config import SysConfig\n'), ((8524, 8542), 'simplejson.dumps', 'json.dumps', (['result'], {}), '(result)\n', (8534, 8542), True, 'import simplejson as json\n'), ((10097, 10126), 'simplejson.dumps', 'json.dumps', (['args_check_result'], {}), '(args_check_result)\n', (10107, 10126), True, 'import simplejson as json\n'), ((10533, 10549), 'time.localtime', 'time.localtime', ([], {}), '()\n', (10547, 10549), False, 'import time\n'), ((2173, 2191), 'simplejson.dumps', 'json.dumps', (['result'], {}), '(result)\n', (2183, 2191), True, 'import simplejson as json\n'), ((3476, 3494), 'simplejson.dumps', 'json.dumps', (['result'], {}), '(result)\n', (3486, 3494), True, 'import simplejson as json\n'), ((6039, 6057), 'simplejson.dumps', 'json.dumps', (['result'], {}), '(result)\n', (6049, 6057), True, 'import simplejson as json\n'), ((11948, 11966), 'simplejson.dumps', 'json.dumps', (['result'], {}), '(result)\n', (11958, 11966), True, 'import simplejson as json\n'), ((13620, 13638), 'simplejson.dumps', 'json.dumps', (['result'], {}), '(result)\n', (13630, 13638), True, 'import simplejson as json\n')]
|
from eve.auth import TokenAuth
from werkzeug.security import check_password_hash
import random
import string
from personal_finances.run import app
class RolesAuth(TokenAuth):
def check_auth(self, token, allowed_roles, resource, method):
# use Eve's own db driver; no additional connections/resources are used
accounts = app.data.driver.db['accounts']
lookup = {'token': token}
if allowed_roles:
# only retrieve a user if his roles match ``allowed_roles``
lookup['roles'] = {'$in': allowed_roles}
account = accounts.find_one(lookup)
return account
def add_token(documents):
# Don't use this in production:
# You should at least make sure that the token is unique.
for document in documents:
document["token"] = (''.join(random.choice(string.ascii_uppercase) for x in range(10)))
|
[
"random.choice"
] |
[((819, 856), 'random.choice', 'random.choice', (['string.ascii_uppercase'], {}), '(string.ascii_uppercase)\n', (832, 856), False, 'import random\n')]
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _snowboydetect
else:
import _snowboydetect
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
class SnowboyDetect(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, resource_filename, model_str):
_snowboydetect.SnowboyDetect_swiginit(self, _snowboydetect.new_SnowboyDetect(resource_filename, model_str))
def Reset(self):
return _snowboydetect.SnowboyDetect_Reset(self)
def RunDetection(self, *args):
return _snowboydetect.SnowboyDetect_RunDetection(self, *args)
def SetSensitivity(self, sensitivity_str):
return _snowboydetect.SnowboyDetect_SetSensitivity(self, sensitivity_str)
def SetHighSensitivity(self, high_sensitivity_str):
return _snowboydetect.SnowboyDetect_SetHighSensitivity(self, high_sensitivity_str)
def GetSensitivity(self):
return _snowboydetect.SnowboyDetect_GetSensitivity(self)
def SetAudioGain(self, audio_gain):
return _snowboydetect.SnowboyDetect_SetAudioGain(self, audio_gain)
def UpdateModel(self):
return _snowboydetect.SnowboyDetect_UpdateModel(self)
def NumHotwords(self):
return _snowboydetect.SnowboyDetect_NumHotwords(self)
def ApplyFrontend(self, apply_frontend):
return _snowboydetect.SnowboyDetect_ApplyFrontend(self, apply_frontend)
def SampleRate(self):
return _snowboydetect.SnowboyDetect_SampleRate(self)
def NumChannels(self):
return _snowboydetect.SnowboyDetect_NumChannels(self)
def BitsPerSample(self):
return _snowboydetect.SnowboyDetect_BitsPerSample(self)
__swig_destroy__ = _snowboydetect.delete_SnowboyDetect
# Register SnowboyDetect in _snowboydetect:
_snowboydetect.SnowboyDetect_swigregister(SnowboyDetect)
class SnowboyVad(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, resource_filename):
_snowboydetect.SnowboyVad_swiginit(self, _snowboydetect.new_SnowboyVad(resource_filename))
def Reset(self):
return _snowboydetect.SnowboyVad_Reset(self)
def RunVad(self, *args):
return _snowboydetect.SnowboyVad_RunVad(self, *args)
def SetAudioGain(self, audio_gain):
return _snowboydetect.SnowboyVad_SetAudioGain(self, audio_gain)
def ApplyFrontend(self, apply_frontend):
return _snowboydetect.SnowboyVad_ApplyFrontend(self, apply_frontend)
def SampleRate(self):
return _snowboydetect.SnowboyVad_SampleRate(self)
def NumChannels(self):
return _snowboydetect.SnowboyVad_NumChannels(self)
def BitsPerSample(self):
return _snowboydetect.SnowboyVad_BitsPerSample(self)
__swig_destroy__ = _snowboydetect.delete_SnowboyVad
# Register SnowboyVad in _snowboydetect:
_snowboydetect.SnowboyVad_swigregister(SnowboyVad)
|
[
"_snowboydetect.SnowboyDetect_SetSensitivity",
"_snowboydetect.SnowboyDetect_UpdateModel",
"_snowboydetect.SnowboyDetect_NumHotwords",
"_snowboydetect.SnowboyVad_RunVad",
"_snowboydetect.SnowboyVad_ApplyFrontend",
"_snowboydetect.SnowboyDetect_NumChannels",
"_snowboydetect.SnowboyVad_SampleRate",
"_snowboydetect.SnowboyDetect_RunDetection",
"_snowboydetect.SnowboyDetect_SetAudioGain",
"_snowboydetect.SnowboyDetect_swigregister",
"_snowboydetect.SnowboyDetect_ApplyFrontend",
"_snowboydetect.SnowboyVad_NumChannels",
"_snowboydetect.SnowboyVad_BitsPerSample",
"_snowboydetect.SnowboyDetect_Reset",
"_snowboydetect.SnowboyVad_swigregister",
"_snowboydetect.SnowboyVad_Reset",
"_snowboydetect.SnowboyVad_SetAudioGain",
"_snowboydetect.SnowboyDetect_BitsPerSample",
"_snowboydetect.SnowboyDetect_GetSensitivity",
"_snowboydetect.SnowboyDetect_SetHighSensitivity",
"_snowboydetect.SnowboyDetect_SampleRate",
"_snowboydetect.new_SnowboyDetect",
"_snowboydetect.new_SnowboyVad"
] |
[((3770, 3826), '_snowboydetect.SnowboyDetect_swigregister', '_snowboydetect.SnowboyDetect_swigregister', (['SnowboyDetect'], {}), '(SnowboyDetect)\n', (3811, 3826), False, 'import _snowboydetect\n'), ((4888, 4938), '_snowboydetect.SnowboyVad_swigregister', '_snowboydetect.SnowboyVad_swigregister', (['SnowboyVad'], {}), '(SnowboyVad)\n', (4926, 4938), False, 'import _snowboydetect\n'), ((2451, 2491), '_snowboydetect.SnowboyDetect_Reset', '_snowboydetect.SnowboyDetect_Reset', (['self'], {}), '(self)\n', (2485, 2491), False, 'import _snowboydetect\n'), ((2543, 2597), '_snowboydetect.SnowboyDetect_RunDetection', '_snowboydetect.SnowboyDetect_RunDetection', (['self', '*args'], {}), '(self, *args)\n', (2584, 2597), False, 'import _snowboydetect\n'), ((2661, 2727), '_snowboydetect.SnowboyDetect_SetSensitivity', '_snowboydetect.SnowboyDetect_SetSensitivity', (['self', 'sensitivity_str'], {}), '(self, sensitivity_str)\n', (2704, 2727), False, 'import _snowboydetect\n'), ((2800, 2875), '_snowboydetect.SnowboyDetect_SetHighSensitivity', '_snowboydetect.SnowboyDetect_SetHighSensitivity', (['self', 'high_sensitivity_str'], {}), '(self, high_sensitivity_str)\n', (2847, 2875), False, 'import _snowboydetect\n'), ((2922, 2971), '_snowboydetect.SnowboyDetect_GetSensitivity', '_snowboydetect.SnowboyDetect_GetSensitivity', (['self'], {}), '(self)\n', (2965, 2971), False, 'import _snowboydetect\n'), ((3028, 3087), '_snowboydetect.SnowboyDetect_SetAudioGain', '_snowboydetect.SnowboyDetect_SetAudioGain', (['self', 'audio_gain'], {}), '(self, audio_gain)\n', (3069, 3087), False, 'import _snowboydetect\n'), ((3131, 3177), '_snowboydetect.SnowboyDetect_UpdateModel', '_snowboydetect.SnowboyDetect_UpdateModel', (['self'], {}), '(self)\n', (3171, 3177), False, 'import _snowboydetect\n'), ((3221, 3267), '_snowboydetect.SnowboyDetect_NumHotwords', '_snowboydetect.SnowboyDetect_NumHotwords', (['self'], {}), '(self)\n', (3261, 3267), False, 'import _snowboydetect\n'), ((3329, 3393), '_snowboydetect.SnowboyDetect_ApplyFrontend', '_snowboydetect.SnowboyDetect_ApplyFrontend', (['self', 'apply_frontend'], {}), '(self, apply_frontend)\n', (3371, 3393), False, 'import _snowboydetect\n'), ((3436, 3481), '_snowboydetect.SnowboyDetect_SampleRate', '_snowboydetect.SnowboyDetect_SampleRate', (['self'], {}), '(self)\n', (3475, 3481), False, 'import _snowboydetect\n'), ((3525, 3571), '_snowboydetect.SnowboyDetect_NumChannels', '_snowboydetect.SnowboyDetect_NumChannels', (['self'], {}), '(self)\n', (3565, 3571), False, 'import _snowboydetect\n'), ((3617, 3665), '_snowboydetect.SnowboyDetect_BitsPerSample', '_snowboydetect.SnowboyDetect_BitsPerSample', (['self'], {}), '(self)\n', (3659, 3665), False, 'import _snowboydetect\n'), ((4162, 4199), '_snowboydetect.SnowboyVad_Reset', '_snowboydetect.SnowboyVad_Reset', (['self'], {}), '(self)\n', (4193, 4199), False, 'import _snowboydetect\n'), ((4245, 4290), '_snowboydetect.SnowboyVad_RunVad', '_snowboydetect.SnowboyVad_RunVad', (['self', '*args'], {}), '(self, *args)\n', (4277, 4290), False, 'import _snowboydetect\n'), ((4347, 4403), '_snowboydetect.SnowboyVad_SetAudioGain', '_snowboydetect.SnowboyVad_SetAudioGain', (['self', 'audio_gain'], {}), '(self, audio_gain)\n', (4385, 4403), False, 'import _snowboydetect\n'), ((4465, 4526), '_snowboydetect.SnowboyVad_ApplyFrontend', '_snowboydetect.SnowboyVad_ApplyFrontend', (['self', 'apply_frontend'], {}), '(self, apply_frontend)\n', (4504, 4526), False, 'import _snowboydetect\n'), ((4569, 4611), '_snowboydetect.SnowboyVad_SampleRate', '_snowboydetect.SnowboyVad_SampleRate', (['self'], {}), '(self)\n', (4605, 4611), False, 'import _snowboydetect\n'), ((4655, 4698), '_snowboydetect.SnowboyVad_NumChannels', '_snowboydetect.SnowboyVad_NumChannels', (['self'], {}), '(self)\n', (4692, 4698), False, 'import _snowboydetect\n'), ((4744, 4789), '_snowboydetect.SnowboyVad_BitsPerSample', '_snowboydetect.SnowboyVad_BitsPerSample', (['self'], {}), '(self)\n', (4783, 4789), False, 'import _snowboydetect\n'), ((2350, 2412), '_snowboydetect.new_SnowboyDetect', '_snowboydetect.new_SnowboyDetect', (['resource_filename', 'model_str'], {}), '(resource_filename, model_str)\n', (2382, 2412), False, 'import _snowboydetect\n'), ((4075, 4123), '_snowboydetect.new_SnowboyVad', '_snowboydetect.new_SnowboyVad', (['resource_filename'], {}), '(resource_filename)\n', (4104, 4123), False, 'import _snowboydetect\n')]
|
import os
def get_hook_script_dir():
return os.path.join(os.path.dirname(__file__), 'hook_scripts')
def get_hook_names():
return os.listdir(get_hook_script_dir())
|
[
"os.path.dirname"
] |
[((63, 88), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (78, 88), False, 'import os\n')]
|
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch import optim
import matplotlib.pyplot as plt
import random
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
# Build dataset
class DiagnosisDataset(Dataset):
def __init__(self,data_list, seq_length, pad_value=0 ,drop_len=1):
self.seq_length = seq_length
self.pad_value = pad_value
self.drop_len = drop_len
self.data_list = [x for x in data_list if len(x) > self.drop_len]
self.input, self.target = self.input_target(self.data_list)
def __getitem__(self,idx):
inputs = np.array(self.input[idx]).astype(np.int64)
inputs = padding(inputs,self.seq_length, pad_value=self.pad_value)
targets = np.array(self.target[idx]).astype(np.int64)
return inputs, targets
def __len__(self):
return len(self.target)
def input_target(self,x):
inputs = []
targets = []
for data_ in x:
len_ = len(data_)
inputs.append(data_[:(len_-1)])
targets.append(data_[-1])
return inputs, targets
class PatientLSTM(nn.Module):
def __init__(self,n_feature,n_hidden,n_layer,drop_prob,vocab_size = 2626,padding_val = 2625):
super().__init__()
self.n_feature = n_feature
self.n_hidden = n_hidden
self.n_layer = n_layer
self.drop_prob = drop_prob
self.embedding = nn.Embedding(vocab_size,self.n_feature,padding_idx=padding_val)
self.lstm = nn.LSTM(self.n_feature,self.n_hidden,self.n_layer,batch_first=True)
self.dropout = nn.Dropout(self.drop_prob)
self.fc = nn.Linear(11*self.n_hidden,vocab_size)
def forward(self,x):
embedded = self.embedding(x)
lstm_ouput, hidden = self.lstm(embedded)
out = self.dropout(lstm_ouput)
out = out.reshape(-1,11*self.n_hidden)
out = self.fc(out)
return out, hidden
def padding(x_array, length, pad_value=0):
"""
x_array: to be padded
length: max length
"""
len_ = len(x_array)
len2pad = length-len_
assert len2pad >= 0,"padding length should >= the array length"
padded_x = np.pad(x_array,(0,len2pad),mode="constant",constant_values=pad_value)
return padded_x
def train_model(n_feature,n_hidden,n_layer,drop_prob,batch_size,input_size,train,val,num_epoch,pad_value=2625,save_path="save_model/latest_model.pth"):
trainset = DiagnosisDataset(train,input_size,pad_value=pad_value)
trainloader = DataLoader(trainset,batch_size=batch_size,shuffle=True)
valset = DiagnosisDataset(val,input_size,pad_value=pad_value)
valloader = DataLoader(valset,batch_size=500,shuffle=True)
#model define
model = PatientLSTM(n_feature,n_hidden,n_layer,drop_prob)
model.to(device)
#define loss
creteria = nn.CrossEntropyLoss()
# define optimizer
optimizer = optim.SGD(model.parameters(),lr=0.02, momentum=0.9)
print(f"{'epoch':15s}{'train_loss':20s}")
print("-"*60)
for epoch in range(num_epoch):
# set model into train mode
# h = model.init_hidden(batch_size)
model.train()
train_loss = []
for bt_idx, (inputs,targets) in enumerate(trainloader):
# set data to device
inputs, targets = inputs.to(device), targets.to(device)
# h = tuple([each.data for each in h])
# make predictions
output, (_) = model(inputs)
# compute loss
tr_loss = creteria(output, targets)
# set gradients to zero
model.zero_grad()
# backpropagate the gradients
tr_loss.backward()
train_loss.append(tr_loss.item())
# upadte the weights
optimizer.step()
# scheduler.step(tr_loss.item())
# set model eval mode
model.eval()
test_loss = []
total = 0
correct = 0
with torch.no_grad():
for inputs,targets in valloader:
inputs, targets = inputs.to(device), targets.to(device)
y_pred,_ = model(inputs)
loss = creteria(y_pred, targets)
test_loss.append(loss.item())
# apply softmax to final layer
y_pred = F.softmax(y_pred, 1).cpu()
# get max score and index
score, indx = torch.max(y_pred, 1)
correct += torch.eq(targets.cpu(),indx).sum()
total += targets.size()[0]
print(f"{epoch+1:4d}{np.mean(train_loss):18.4f}")
if epoch%50==0:
torch.save(model.state_dict(), f"save_model/model-{epoch}-{np.mean(train_loss):18.4f}.pth")
f = open("demofile1l.txt", "w")
f.write(f"{epoch+1:4d}{np.mean(train_loss):18.4f}\n")
f.close()
print("Save model..")
torch.save(model.state_dict(), save_path)
print("Training finished...")
def load_model(n_feature,n_hidden,n_layer,drop_prob,save_path):
# device = torch.device('cpu')
model = PatientLSTM(n_feature,n_hidden,n_layer,drop_prob)
model.load_state_dict(torch.load(save_path, map_location=device))
print("Model Loaded")
return model
def infer(x_test,model):
_x=torch.from_numpy(np.array(x_test)).view(1,-1)
with torch.no_grad():
y_hat, _ = model(_x.to(device))
y_hat = F.softmax(y_hat,1).cpu()
_, indx = torch.max(y_hat,1)
return indx.item()
|
[
"numpy.pad",
"torch.nn.Dropout",
"torch.utils.data.DataLoader",
"torch.nn.Embedding",
"torch.load",
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.softmax",
"numpy.mean",
"torch.cuda.is_available",
"torch.max",
"numpy.array",
"torch.nn.Linear",
"torch.nn.LSTM",
"torch.no_grad"
] |
[((2338, 2411), 'numpy.pad', 'np.pad', (['x_array', '(0, len2pad)'], {'mode': '"""constant"""', 'constant_values': 'pad_value'}), "(x_array, (0, len2pad), mode='constant', constant_values=pad_value)\n", (2344, 2411), True, 'import numpy as np\n'), ((2680, 2737), 'torch.utils.data.DataLoader', 'DataLoader', (['trainset'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(trainset, batch_size=batch_size, shuffle=True)\n', (2690, 2737), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((2818, 2866), 'torch.utils.data.DataLoader', 'DataLoader', (['valset'], {'batch_size': '(500)', 'shuffle': '(True)'}), '(valset, batch_size=500, shuffle=True)\n', (2828, 2866), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((2998, 3019), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3017, 3019), False, 'from torch import nn\n'), ((239, 264), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (262, 264), False, 'import torch\n'), ((1572, 1637), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'self.n_feature'], {'padding_idx': 'padding_val'}), '(vocab_size, self.n_feature, padding_idx=padding_val)\n', (1584, 1637), False, 'from torch import nn\n'), ((1656, 1726), 'torch.nn.LSTM', 'nn.LSTM', (['self.n_feature', 'self.n_hidden', 'self.n_layer'], {'batch_first': '(True)'}), '(self.n_feature, self.n_hidden, self.n_layer, batch_first=True)\n', (1663, 1726), False, 'from torch import nn\n'), ((1747, 1773), 'torch.nn.Dropout', 'nn.Dropout', (['self.drop_prob'], {}), '(self.drop_prob)\n', (1757, 1773), False, 'from torch import nn\n'), ((1792, 1833), 'torch.nn.Linear', 'nn.Linear', (['(11 * self.n_hidden)', 'vocab_size'], {}), '(11 * self.n_hidden, vocab_size)\n', (1801, 1833), False, 'from torch import nn\n'), ((5286, 5328), 'torch.load', 'torch.load', (['save_path'], {'map_location': 'device'}), '(save_path, map_location=device)\n', (5296, 5328), False, 'import torch\n'), ((5471, 5486), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5484, 5486), False, 'import torch\n'), ((5587, 5606), 'torch.max', 'torch.max', (['y_hat', '(1)'], {}), '(y_hat, 1)\n', (5596, 5606), False, 'import torch\n'), ((4114, 4129), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4127, 4129), False, 'import torch\n'), ((720, 745), 'numpy.array', 'np.array', (['self.input[idx]'], {}), '(self.input[idx])\n', (728, 745), True, 'import numpy as np\n'), ((865, 891), 'numpy.array', 'np.array', (['self.target[idx]'], {}), '(self.target[idx])\n', (873, 891), True, 'import numpy as np\n'), ((4555, 4575), 'torch.max', 'torch.max', (['y_pred', '(1)'], {}), '(y_pred, 1)\n', (4564, 4575), False, 'import torch\n'), ((5433, 5449), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (5441, 5449), True, 'import numpy as np\n'), ((5544, 5563), 'torch.nn.functional.softmax', 'F.softmax', (['y_hat', '(1)'], {}), '(y_hat, 1)\n', (5553, 5563), True, 'import torch.nn.functional as F\n'), ((4710, 4729), 'numpy.mean', 'np.mean', (['train_loss'], {}), '(train_loss)\n', (4717, 4729), True, 'import numpy as np\n'), ((4938, 4957), 'numpy.mean', 'np.mean', (['train_loss'], {}), '(train_loss)\n', (4945, 4957), True, 'import numpy as np\n'), ((4456, 4476), 'torch.nn.functional.softmax', 'F.softmax', (['y_pred', '(1)'], {}), '(y_pred, 1)\n', (4465, 4476), True, 'import torch.nn.functional as F\n'), ((4834, 4853), 'numpy.mean', 'np.mean', (['train_loss'], {}), '(train_loss)\n', (4841, 4853), True, 'import numpy as np\n')]
|
import logging
_console_handler = logging.StreamHandler()
_console_handler.setLevel(logging.DEBUG)
_formatter = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s')
_console_handler.setFormatter(_formatter)
log = logging.getLogger('electrum-merchant')
log.setLevel(logging.DEBUG)
log.addHandler(_console_handler)
|
[
"logging.Formatter",
"logging.StreamHandler",
"logging.getLogger"
] |
[((35, 58), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (56, 58), False, 'import logging\n'), ((113, 175), 'logging.Formatter', 'logging.Formatter', (['"""[%(asctime)s] [%(levelname)s] %(message)s"""'], {}), "('[%(asctime)s] [%(levelname)s] %(message)s')\n", (130, 175), False, 'import logging\n'), ((225, 263), 'logging.getLogger', 'logging.getLogger', (['"""electrum-merchant"""'], {}), "('electrum-merchant')\n", (242, 263), False, 'import logging\n')]
|
#!/usr/bin/env python
"""
Setup a movement GUI.
Move Mip based on the GUI
mip_test_continuous_drive_gui2.py -i hci0 -b D0:39:72:C4:7A:01 [-c|--carzy}
"""
#from Tkinter import *
import Tkinter
from movement_canvas import MovementCanvas
import logging
import mippy
import argparse
carzy = 0
def updateMovement():
# movementCanvas.positionX,movementCanvas.positionY, (-50 - 50)
# movementCanvas.positionAngle,movementCanvas.positionMagnitude
if movementCanvas.positionMagnitude < 10:
logging.debug('updateMovement : stopping')
mip.continuousDriveForward(0x0)
else:
forwardSpeed = int((-movementCanvas.positionY * 0x20)/50)
turnSpeed = int((movementCanvas.positionX * 0x20)/50)
logging.debug('updateMovement : forwardSpeed %d : turnSpeed %d' % (forwardSpeed,turnSpeed))
if carzy > 0:
mip.continuousCarzyDrive(forwardSpeed,turnSpeed)
else:
mip.continuousDrive(forwardSpeed,turnSpeed)
top.after(50,updateMovement)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Continuous Drive GUI.')
mippy.add_arguments(parser)
parser.add_argument(
'-c',
'--carzy',
default=0x0,action='count',
help='Carzy mode')
args = parser.parse_args()
if args.carzy > 0:
carzy = 1
else:
carzy = 0
print ("Carzy Mode = %d " % (carzy))
logging.basicConfig(level=logging.DEBUG)
gt = mippy.GattTool(args.adaptor, args.device)
mip = mippy.Mip(gt)
# start gui
top = Tkinter.Tk()
movementCanvas = MovementCanvas(top,300,300)
movementCanvas.setBindings()
#movementCanvas.addUpdateCallback(movement)
movementCanvas.pack()
top.after(50,updateMovement)
top.mainloop()
|
[
"logging.debug",
"argparse.ArgumentParser",
"logging.basicConfig",
"Tkinter.Tk",
"mippy.GattTool",
"mippy.add_arguments",
"movement_canvas.MovementCanvas",
"mippy.Mip"
] |
[((1054, 1114), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Continuous Drive GUI."""'}), "(description='Continuous Drive GUI.')\n", (1077, 1114), False, 'import argparse\n'), ((1119, 1146), 'mippy.add_arguments', 'mippy.add_arguments', (['parser'], {}), '(parser)\n', (1138, 1146), False, 'import mippy\n'), ((1415, 1455), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (1434, 1455), False, 'import logging\n'), ((1465, 1506), 'mippy.GattTool', 'mippy.GattTool', (['args.adaptor', 'args.device'], {}), '(args.adaptor, args.device)\n', (1479, 1506), False, 'import mippy\n'), ((1517, 1530), 'mippy.Mip', 'mippy.Mip', (['gt'], {}), '(gt)\n', (1526, 1530), False, 'import mippy\n'), ((1557, 1569), 'Tkinter.Tk', 'Tkinter.Tk', ([], {}), '()\n', (1567, 1569), False, 'import Tkinter\n'), ((1591, 1620), 'movement_canvas.MovementCanvas', 'MovementCanvas', (['top', '(300)', '(300)'], {}), '(top, 300, 300)\n', (1605, 1620), False, 'from movement_canvas import MovementCanvas\n'), ((505, 547), 'logging.debug', 'logging.debug', (['"""updateMovement : stopping"""'], {}), "('updateMovement : stopping')\n", (518, 547), False, 'import logging\n'), ((734, 831), 'logging.debug', 'logging.debug', (["('updateMovement : forwardSpeed %d : turnSpeed %d' % (forwardSpeed, turnSpeed))"], {}), "('updateMovement : forwardSpeed %d : turnSpeed %d' % (\n forwardSpeed, turnSpeed))\n", (747, 831), False, 'import logging\n')]
|
if "bpy" in locals():
import imp
imp.reload(engine)
else:
import bpy
from brte import engine
class DebugEngine(bpy.types.RenderEngine, engine.RealTimeEngine):
bl_idname = 'RTE_DEBUG'
bl_label = 'RTE Debug'
|
[
"imp.reload"
] |
[((41, 59), 'imp.reload', 'imp.reload', (['engine'], {}), '(engine)\n', (51, 59), False, 'import imp\n')]
|
# Copyright (C) 2013-2015 eNovance SAS <<EMAIL>>
# Author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import collections
import glob
import os
import pprint
class Machine:
def __init__(self, name, value):
self.name = name
self.value = value
def compare(sets):
machines = []
for current_set in sets:
my_string = repr(sets[current_set])
machines.append(Machine(current_set, my_string))
to_be_sorted = collections.defaultdict(list)
for machine in machines:
key = machine.value
value = machine.name
to_be_sorted[key].append(value)
return dict(to_be_sorted)
def get_hosts_list_from_result(result):
systems_list = []
for element in result:
current_set = set()
for system in result[element]:
current_set.add(system)
systems_list.append(current_set)
return systems_list
def print_systems_groups(systems_groups):
total_hosts = 0
for system in systems_groups:
total_hosts += len(system)
print("The %d systems can be grouped in %d groups of "
"identical hardware" % (total_hosts, len(systems_groups)))
for system in systems_groups:
print("Group %d (%d Systems)" % (
systems_groups.index(system), len(system)))
print("-> " + ', '.join(system))
print()
def print_groups(global_params, result, title):
print("##### %s #####" % title)
groups_name = ""
for element in result:
group = result[element]
group_name = title.strip().replace(" ", "_")
if "output_dir" in global_params.keys():
group_name = "%s/%s" % (global_params["output_dir"], group_name)
for host in group:
group_name = "%s_%s" % (group_name, host.strip())
groups_name = "%s '%s.def'" % (groups_name, group_name)
print("%d identical systems :" % (len(group)))
print(group)
pprint.pprint(sorted(eval(element)))
# But always save it to a file for diffing
if "output_dir" in global_params.keys():
with open("%s.def" % group_name, "w") as fout:
pprint.pprint(sorted(eval(element)), fout)
print()
if "output_dir" in global_params.keys():
if len(result) > 1:
output_file = "%s/%s.diff" % (global_params["output_dir"],
title.strip().replace(" ", "_"))
os.system("diff -ub --from-file %s > '%s'" %
(groups_name, output_file))
else:
# If no difference exists, we can kill the def files
for filename in glob.glob("%s/%s*.def" %
(global_params["output_dir"],
title.strip().replace(" ", "_"))):
os.remove(filename)
print("#####" * 2 + "#" * len(title))
def compute_similar_hosts_list(systems_groups, new_groups):
for group in new_groups:
for systems_group in systems_groups:
intersection = set.intersection(systems_group, group)
if intersection:
if len(intersection) < len(systems_group):
# print("%d vs %d" % (len(intersection), len(systems_group)))
# We do have a partial match meaning we shall break
# the existing group in pieces
difference = set.difference(systems_group, group)
# The group we worked on doesn't exist anymore
# So let's delete it
systems_groups.remove(systems_group)
# Let's add the two sub groups generated by this split
systems_groups.append(intersection)
systems_groups.append(difference)
|
[
"collections.defaultdict",
"os.remove",
"os.system"
] |
[((1016, 1045), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (1039, 1045), False, 'import collections\n'), ((3003, 3075), 'os.system', 'os.system', (['("diff -ub --from-file %s > \'%s\'" % (groups_name, output_file))'], {}), '("diff -ub --from-file %s > \'%s\'" % (groups_name, output_file))\n', (3012, 3075), False, 'import os\n'), ((3388, 3407), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (3397, 3407), False, 'import os\n')]
|
import pytest
import tasks
def test_add_TypeError(tasks_db):
with pytest.raises(TypeError):
tasks.add(task='not a Task object')
def test_list_TypeError(tasks_db):
with pytest.raises(TypeError):
tasks.list(owner=123)
def test_get_TypeError(tasks_db):
with pytest.raises(TypeError):
tasks.get(task_id='123')
class TestUpdate():
def test_bad_id(self, tasks_db):
with pytest.raises(TypeError):
tasks.update(task_id={'dict instead':1},
task=tasks.Task())
def test_bad_task(self, tasks_db):
with pytest.raises(TypeError):
tasks.update(task_id=1, task='not a task')
def test_delete_TypeError(tasks_db):
with pytest.raises(TypeError):
tasks.delete(task_id=(1,2,3))
def test_start_tasks_db_ValueError(tasks_db):
with pytest.raises(ValueError) as excinfo:
tasks.start_tasks_db('some/great/path', 'mysql')
exception_msg = excinfo.value.args[0]
assert exception_msg == "db_type must be a 'tiny' or 'mongo'"
|
[
"tasks.get",
"tasks.delete",
"tasks.start_tasks_db",
"tasks.add",
"tasks.list",
"tasks.Task",
"pytest.raises",
"tasks.update"
] |
[((71, 95), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (84, 95), False, 'import pytest\n'), ((105, 140), 'tasks.add', 'tasks.add', ([], {'task': '"""not a Task object"""'}), "(task='not a Task object')\n", (114, 140), False, 'import tasks\n'), ((186, 210), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (199, 210), False, 'import pytest\n'), ((220, 241), 'tasks.list', 'tasks.list', ([], {'owner': '(123)'}), '(owner=123)\n', (230, 241), False, 'import tasks\n'), ((286, 310), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (299, 310), False, 'import pytest\n'), ((320, 344), 'tasks.get', 'tasks.get', ([], {'task_id': '"""123"""'}), "(task_id='123')\n", (329, 344), False, 'import tasks\n'), ((727, 751), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (740, 751), False, 'import pytest\n'), ((761, 792), 'tasks.delete', 'tasks.delete', ([], {'task_id': '(1, 2, 3)'}), '(task_id=(1, 2, 3))\n', (773, 792), False, 'import tasks\n'), ((847, 872), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (860, 872), False, 'import pytest\n'), ((893, 941), 'tasks.start_tasks_db', 'tasks.start_tasks_db', (['"""some/great/path"""', '"""mysql"""'], {}), "('some/great/path', 'mysql')\n", (913, 941), False, 'import tasks\n'), ((421, 445), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (434, 445), False, 'import pytest\n'), ((599, 623), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (612, 623), False, 'import pytest\n'), ((637, 679), 'tasks.update', 'tasks.update', ([], {'task_id': '(1)', 'task': '"""not a task"""'}), "(task_id=1, task='not a task')\n", (649, 679), False, 'import tasks\n'), ((531, 543), 'tasks.Task', 'tasks.Task', ([], {}), '()\n', (541, 543), False, 'import tasks\n')]
|
from __future__ import absolute_import, unicode_literals
from appearance.classes import Icon
icon_key_setup = Icon(driver_name='fontawesome', symbol='key')
icon_keyserver_search = Icon(driver_name='fontawesome', symbol='search')
|
[
"appearance.classes.Icon"
] |
[((112, 157), 'appearance.classes.Icon', 'Icon', ([], {'driver_name': '"""fontawesome"""', 'symbol': '"""key"""'}), "(driver_name='fontawesome', symbol='key')\n", (116, 157), False, 'from appearance.classes import Icon\n'), ((182, 230), 'appearance.classes.Icon', 'Icon', ([], {'driver_name': '"""fontawesome"""', 'symbol': '"""search"""'}), "(driver_name='fontawesome', symbol='search')\n", (186, 230), False, 'from appearance.classes import Icon\n')]
|
import datetime
import numbers
import numpy as np
from past.builtins import basestring
import netCDF4
def validate_calendar(calendar):
"""Validate calendar string for CF Conventions.
Parameters
----------
calendar : str
Returns
-------
out : str
same as input if the calendar is valid
Notes
-----
1. The 'none' value for the calendar attribute is not supported anywhere
in this code presently, so NotImplementedError is raised.
2. NetCDFError is raised for invalid calendars.
"""
if calendar in ['gregorian', 'standard', 'proleptic_gregorian', 'noleap',
'365_day', 'all_leap', '366_day', '360_day', 'julian']:
return calendar
elif calendar == 'none':
raise NotImplementedError("calendar is set to 'none'")
else:
# should be a better error...
raise NotImplementedError("Unknown calendar: {0}".format(calendar))
def _calendar_from_ncdataset(ncdataset):
"""Get calendar from a netCDF4._netCDF4.Dataset object.
Parameters
----------
ncdataset : netCDF4._netCDF4.Dataset
Returns
-------
out : str
calendar attribute of the time variable
Notes
-----
1. The 'none' value for the calendar attribute is not supported anywhere
in this code presently, so NotImplementedError is raised.
2. NetCDFError is raised for invalid calendars or if there is not time
variable in the dataset.
"""
if 'time' in ncdataset.variables:
if hasattr(ncdataset.variables['time'], 'calendar'):
return validate_calendar(ncdataset.variables['time'].calendar)
else:
return 'gregorian'
else:
# should be a better error...
raise NotImplementedError("NetCDF file has no time variable")
def get_calendar(nc_resource):
"""Get calendar from a NetCDF resource.
Parameters
----------
nc_resource : str or netCDF4._netCDF4.Dataset or netCDF4._netCDF4.Variable
Returns
-------
out : str
calendar attribute of the time variable
Notes
-----
1. The 'none' value for the calendar attribute is not supported anywhere
in this code presently, so NotImplementedError is raised.
2. NetCDFError is raised for invalid calendars or if there is not time
variable in the dataset.
"""
if hasattr(nc_resource, 'calendar'):
return validate_calendar(nc_resource.calendar)
elif isinstance(nc_resource, netCDF4._netCDF4.Dataset):
return _calendar_from_ncdataset(nc_resource)
elif isinstance(nc_resource, basestring):
nc = netCDF4.Dataset(nc_resource, 'r')
return _calendar_from_ncdataset(nc)
else:
msg = "Unknown NetCDF resource: {0}"
raise NotImplementedError(msg.format(str(nc_resource)))
def multiple_files_time_indice(nc_files, t):
if t < 0:
raise NotImplementedError("Starting from the end.")
for (i, nc_file) in enumerate(nc_files):
ncdataset = netCDF4.Dataset(nc_file, 'r')
if 'time' not in ncdataset.dimensions:
raise NotImplementedError() # should be a better error...
nt = ncdataset.dimensions['time'].size
if t < nt:
return (i, t)
t -= nt
raise NotImplementedError("overflow.") # should be a better error...
def _nearest_time_from_netcdf_time_units(nc_files, t, threshold=None):
if isinstance(nc_files, basestring):
nc_files = [nc_files]
previous_end_time = None
previous_nt = None
initial_calendar = None
initial_time_units = None
for (i, nc_file) in enumerate(nc_files):
ncdataset = netCDF4.Dataset(nc_file, 'r')
if 'time' not in ncdataset.variables:
raise NotImplementedError() # should be a better error...
nctime = ncdataset.variables['time']
if initial_calendar is None:
initial_calendar = _calendar_from_ncdataset(ncdataset)
initial_time_units = nctime.units
else:
current_calendar = _calendar_from_ncdataset(ncdataset)
# Here we should use a calendar compare that takes into account
# aliases.
c1 = (current_calendar != initial_calendar)
c2 = (nctime.units != initial_time_units)
if c1 or c2:
datetimes = netCDF4.num2date(nctime[:], nctime.units,
current_calendar)
nctime = netCDF4.date2num(datetimes, initial_time_units,
initial_calendar)
start_time = nctime[0]
end_time = nctime[-1]
if (t >= start_time) and (t <= end_time):
tn = int((np.abs(nctime[:]-t)).argmin())
if threshold and (abs(nctime[tn]-t) > threshold):
# should be a better error...
raise NotImplementedError("No value below threshold.")
return (i, tn)
elif t < start_time:
if previous_end_time is not None:
pdiff = np.abs(previous_end_time-t)
ndiff = np.abs(start_time-t)
if pdiff <= ndiff:
if threshold and (pdiff > threshold):
# should be a better error...
raise NotImplementedError("No value below threshold.")
return (i-1, previous_nt-1)
else:
if threshold and (ndiff > threshold):
# should be a better error...
raise NotImplementedError("No value below threshold.")
return(i, 0)
elif threshold and ((start_time-t) > threshold):
# should be a better error...
raise NotImplementedError("No value below threshold.")
return (0, 0)
previous_end_time = end_time
previous_nt = ncdataset.dimensions['time'].size
if threshold and (t-end_time > threshold):
# should be a better error...
raise NotImplementedError("No value below threshold.")
return (i, previous_nt-1)
def nearest_time(nc_files, t, threshold=None):
if isinstance(nc_files, basestring):
nc_files = [nc_files]
if isinstance(t, (list, set, tuple)):
if len(t) > 3:
t = netCDF4.netcdftime.netcdftime(t[0], t[1], t[2],
t[3], t[4], t[5])
else:
t = netCDF4.netcdftime.netcdftime(t[0], t[1], t[2], 12, 0, 0)
elif isinstance(t, basestring):
# Can't use time.strptime because of alternate NetCDF calendars
decode_t = t.split('T')
decode_date = decode_t[0].split('-')
yyyy = int(decode_date[0])
mm = int(decode_date[1])
dd = int(decode_date[2])
if len(decode_t) > 1:
decode_time = decode_t[1].split(':')
hh = int(decode_time[0])
mi = int(decode_time[1])
ss = int(decode_time[2])
else:
hh = 12
mi = 0
ss = 0
try:
t = datetime.datetime(yyyy, mm, dd, hh, mi, ss)
except ValueError:
t = netCDF4.netcdftime.datetime(yyyy, mm, dd, hh, mi, ss)
if isinstance(t, numbers.Number):
return _nearest_time_from_netcdf_time_units(nc_files, t, threshold)
elif isinstance(t, (datetime.datetime,
netCDF4.netcdftime._datetime.datetime)):
nc = netCDF4.Dataset(nc_files[0], 'r')
nctime = nc.variables['time']
t = netCDF4.date2num(t, nctime.units, _calendar_from_ncdataset(nc))
return _nearest_time_from_netcdf_time_units(nc_files, t, threshold)
else:
raise NotImplementedError()
def time_start_end(nc_resource):
"""Retrieve start and end date in a NetCDF file.
Parameters
----------
nc_resource : netCDF4._netCDF4.Dataset
Returns
-------
out : (netcdftime._datetime.datetime, netcdftime._datetime.datetime)
Tuple with start date and end date.
"""
if 'time' in nc_resource.variables:
nctime = nc_resource.variables['time']
nccalendar = getattr(nctime, 'calendar', 'gregorian')
datetime_min = netCDF4.num2date(
nctime[0], nctime.units, nccalendar)
datetime_max = netCDF4.num2date(
nctime[-1], nctime.units, nccalendar)
return (datetime_min, datetime_max)
else:
return (None, None)
def nc_datetime_to_iso(nc_datetime, force_gregorian_date=False,
raise_non_gregorian_dates=False):
"""Convert a NetCDF datetime to ISO format.
Parameters
----------
nc_datetime : netcdftime._datetime.datetime
force_gregorian_date : bool
Force output to be a valid gregorian calendar date. Only use this
if you know what you are doing, information will be lost about dates
in other valid CF-Convention calendars. In those cases, a nearest
gregorian date is forged.
raise_non_gregorian_dates : bool
In combination with force_gregorian_date, will raise an error if the
date is not a valid gregorian date, instead of returning the forged
nearest date.
Returns
-------
out : str
ISO formatted datetime.
Notes
-----
Does not support time zones.
"""
if force_gregorian_date:
try:
real_datetime = datetime.datetime(*nc_datetime.timetuple()[0:6])
except ValueError:
if raise_non_gregorian_dates:
raise
# Forging a nearest gregorian date. Try day-1 and if this works
# and hour < 12, set to (year,month,day-1,23,59,59), else
# set to (year,month+1,1,0,0,0).
year = nc_datetime.year
next_month = nc_datetime.month + 1
if next_month == 13:
next_month = 1
year += 1
real_datetime = datetime.datetime(year, next_month, 1, 0, 0, 0)
if nc_datetime.hour < 12:
try:
real_datetime = datetime.datetime(
nc_datetime.year, nc_datetime.month,
nc_datetime.day - 1)
real_datetime = datetime.datetime(
nc_datetime.year, nc_datetime.month,
nc_datetime.day - 1, 23, 59, 59)
except ValueError:
pass
return real_datetime.isoformat()
else:
return nc_datetime.strftime('%Y-%m-%dT%H:%M:%S')
|
[
"netCDF4.Dataset",
"numpy.abs",
"netCDF4.date2num",
"datetime.datetime",
"netCDF4.num2date",
"netCDF4.netcdftime.datetime",
"netCDF4.netcdftime.netcdftime"
] |
[((3029, 3058), 'netCDF4.Dataset', 'netCDF4.Dataset', (['nc_file', '"""r"""'], {}), "(nc_file, 'r')\n", (3044, 3058), False, 'import netCDF4\n'), ((3678, 3707), 'netCDF4.Dataset', 'netCDF4.Dataset', (['nc_file', '"""r"""'], {}), "(nc_file, 'r')\n", (3693, 3707), False, 'import netCDF4\n'), ((8254, 8307), 'netCDF4.num2date', 'netCDF4.num2date', (['nctime[0]', 'nctime.units', 'nccalendar'], {}), '(nctime[0], nctime.units, nccalendar)\n', (8270, 8307), False, 'import netCDF4\n'), ((8344, 8398), 'netCDF4.num2date', 'netCDF4.num2date', (['nctime[-1]', 'nctime.units', 'nccalendar'], {}), '(nctime[-1], nctime.units, nccalendar)\n', (8360, 8398), False, 'import netCDF4\n'), ((6344, 6409), 'netCDF4.netcdftime.netcdftime', 'netCDF4.netcdftime.netcdftime', (['t[0]', 't[1]', 't[2]', 't[3]', 't[4]', 't[5]'], {}), '(t[0], t[1], t[2], t[3], t[4], t[5])\n', (6373, 6409), False, 'import netCDF4\n'), ((6486, 6543), 'netCDF4.netcdftime.netcdftime', 'netCDF4.netcdftime.netcdftime', (['t[0]', 't[1]', 't[2]', '(12)', '(0)', '(0)'], {}), '(t[0], t[1], t[2], 12, 0, 0)\n', (6515, 6543), False, 'import netCDF4\n'), ((7498, 7531), 'netCDF4.Dataset', 'netCDF4.Dataset', (['nc_files[0]', '"""r"""'], {}), "(nc_files[0], 'r')\n", (7513, 7531), False, 'import netCDF4\n'), ((2646, 2679), 'netCDF4.Dataset', 'netCDF4.Dataset', (['nc_resource', '"""r"""'], {}), "(nc_resource, 'r')\n", (2661, 2679), False, 'import netCDF4\n'), ((4363, 4422), 'netCDF4.num2date', 'netCDF4.num2date', (['nctime[:]', 'nctime.units', 'current_calendar'], {}), '(nctime[:], nctime.units, current_calendar)\n', (4379, 4422), False, 'import netCDF4\n'), ((4493, 4558), 'netCDF4.date2num', 'netCDF4.date2num', (['datetimes', 'initial_time_units', 'initial_calendar'], {}), '(datetimes, initial_time_units, initial_calendar)\n', (4509, 4558), False, 'import netCDF4\n'), ((7121, 7164), 'datetime.datetime', 'datetime.datetime', (['yyyy', 'mm', 'dd', 'hh', 'mi', 'ss'], {}), '(yyyy, mm, dd, hh, mi, ss)\n', (7138, 7164), False, 'import datetime\n'), ((9984, 10031), 'datetime.datetime', 'datetime.datetime', (['year', 'next_month', '(1)', '(0)', '(0)', '(0)'], {}), '(year, next_month, 1, 0, 0, 0)\n', (10001, 10031), False, 'import datetime\n'), ((5070, 5099), 'numpy.abs', 'np.abs', (['(previous_end_time - t)'], {}), '(previous_end_time - t)\n', (5076, 5099), True, 'import numpy as np\n'), ((5122, 5144), 'numpy.abs', 'np.abs', (['(start_time - t)'], {}), '(start_time - t)\n', (5128, 5144), True, 'import numpy as np\n'), ((7208, 7261), 'netCDF4.netcdftime.datetime', 'netCDF4.netcdftime.datetime', (['yyyy', 'mm', 'dd', 'hh', 'mi', 'ss'], {}), '(yyyy, mm, dd, hh, mi, ss)\n', (7235, 7261), False, 'import netCDF4\n'), ((4734, 4755), 'numpy.abs', 'np.abs', (['(nctime[:] - t)'], {}), '(nctime[:] - t)\n', (4740, 4755), True, 'import numpy as np\n'), ((10127, 10202), 'datetime.datetime', 'datetime.datetime', (['nc_datetime.year', 'nc_datetime.month', '(nc_datetime.day - 1)'], {}), '(nc_datetime.year, nc_datetime.month, nc_datetime.day - 1)\n', (10144, 10202), False, 'import datetime\n'), ((10288, 10379), 'datetime.datetime', 'datetime.datetime', (['nc_datetime.year', 'nc_datetime.month', '(nc_datetime.day - 1)', '(23)', '(59)', '(59)'], {}), '(nc_datetime.year, nc_datetime.month, nc_datetime.day - 1,\n 23, 59, 59)\n', (10305, 10379), False, 'import datetime\n')]
|
# coding=utf8
import nuke
import random
MAX_POINTS_COUNTS = 300000
def create_cloud_point():
file_path, max_points = select_plyfile_dialog()
if not file_path:
return
node_name = file_path.split("/")[-1]
read_data = scan_ply_data(file_path, max_points)
create_node(node_name, read_data)
def select_plyfile_dialog():
p = nuke.Panel(".ply importer")
p.addFilenameSearch(".ply file", "")
p.addSingleLineInput("Maximum points to create", MAX_POINTS_COUNTS)
p.addButton("Cancel")
p.addButton("OK")
p.show()
path = p.value(".ply file") or None
if path:
max_points = int(p.value("Maximum points to create"))
return path, max_points
return None, None
def scan_ply_data(path, max_points):
started = False
total = 0
points = ""
normals = ""
colors = ""
vert_entries = []
verts_count = 0
with open(path) as r:
lines = r.readlines()
for line in lines:
if "element vertex" in line:
verts_count = int(line.split()[2])
print ("%s points detected! " % verts_count)
if started and total < verts_count:
vert_entries.append(line)
total += 1
if "end_header" in line:
started = True
if max_points <= verts_count:
print ("selecting only %s points at random! ") % max_points
vert_entries = randomSelection(max_points, vert_entries)
for entry in vert_entries:
data_split = entry.split()
points += "%s " % " ".join(data_split[0:3])
normals += "%s " % " ".join(data_split[3:6])
colors += "%s " % " ".join([str(float(v) / 255.0) for v in data_split[6:9]]) # ignore alpha value
read_data = {"total": total, "points": points, "normals": normals, "colors": colors, "verts_count": verts_count}
return read_data
def randomSelection(numToPick, items):
shuffledItems = items
random.shuffle(shuffledItems)
return shuffledItems[0:numToPick]
def create_node(node_name, read_data):
label = "%s points" % (read_data["verts_count"])
node = nuke.createNode("BakedPointCloud")
node.knob("serializePoints").setValue("%s %s" % (read_data["total"], read_data["points"]))
node.knob("serializeNormals").setValue("%s %s" % (read_data["total"], read_data["normals"]))
node.knob("serializeColors").setValue("%s %s" % (read_data["total"], read_data["colors"]))
node.knob("name").setValue(node_name)
node.knob("label").setValue(label)
if __name__ == "__main__":
create_cloud_point()
|
[
"random.shuffle",
"nuke.createNode",
"nuke.Panel"
] |
[((356, 383), 'nuke.Panel', 'nuke.Panel', (['""".ply importer"""'], {}), "('.ply importer')\n", (366, 383), False, 'import nuke\n'), ((1929, 1958), 'random.shuffle', 'random.shuffle', (['shuffledItems'], {}), '(shuffledItems)\n', (1943, 1958), False, 'import random\n'), ((2102, 2136), 'nuke.createNode', 'nuke.createNode', (['"""BakedPointCloud"""'], {}), "('BakedPointCloud')\n", (2117, 2136), False, 'import nuke\n')]
|
from dataclasses import asdict
import flask
from expungeservice.util import DateWithFuture as date
from expungeservice.models.record import Record
from expungeservice.models.record_summary import RecordSummary
class ExpungeModelEncoder(flask.json.JSONEncoder):
def record_summary_to_json(self, record_summary):
return {
**self.record_to_json(record_summary.record),
**{
"summary": {
"total_charges": record_summary.total_charges,
"eligible_charges_by_date": record_summary.eligible_charges_by_date,
"county_balances": record_summary.county_balances,
"total_balance_due": record_summary.total_balance_due,
"total_cases": record_summary.total_cases,
},
"questions": record_summary.questions,
},
}
def record_to_json(self, record):
return {
"total_balance_due": record.total_balance_due,
"cases": [self.case_to_json(case) for case in record.cases],
"errors": record.errors,
}
def case_to_json(self, case):
return {
**self.case_summary_to_json(case.summary),
"charges": [self.charge_to_json(charge) for charge in case.charges],
}
def case_summary_to_json(self, case):
return {
"name": case.name,
"birth_year": case.birth_year if case.birth_year else "",
"case_number": case.case_number,
"citation_number": case.citation_number,
"location": case.location,
"date": case.date,
"violation_type": case.violation_type,
"current_status": case.current_status
+ (
"" if case.current_status.lower() in ["open", "closed"] else " (Closed)" if case.closed() else " (Open)"
),
"balance_due": case.get_balance_due(),
"case_detail_link": case.case_detail_link,
"edit_status": case.edit_status,
}
def charge_to_json(self, charge):
return {
"ambiguous_charge_id": charge.ambiguous_charge_id,
"case_number": charge.case_number,
"date": charge.date,
"disposition": charge.disposition,
"expungement_result": charge.expungement_result,
"id": charge.id,
"level": charge.level,
"name": charge.name,
"probation_revoked": charge.probation_revoked,
"statute": charge.statute,
"type_name": charge.charge_type.type_name,
"expungement_rules": charge.charge_type.expungement_rules,
"edit_status": charge.edit_status,
}
def default(self, o):
if isinstance(o, RecordSummary):
return self.record_summary_to_json(o)
elif isinstance(o, Record):
return self.record_to_json(o)
elif isinstance(o, date):
return o.strftime("%b %-d, %Y")
else:
return flask.json.JSONEncoder.default(self, o)
|
[
"flask.json.JSONEncoder.default"
] |
[((3071, 3110), 'flask.json.JSONEncoder.default', 'flask.json.JSONEncoder.default', (['self', 'o'], {}), '(self, o)\n', (3101, 3110), False, 'import flask\n')]
|
# Copyright 2019 TheGreatGeek
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Sequence, Dict
from enum import Enum, auto
import re
class AtomType(Enum):
KEYWORD = auto()
STRING = auto()
class Atom:
def __init__(self, atom_type: AtomType, value):
self.atom_type = atom_type
self.value = value
def get_atom_type(self) -> AtomType:
return self.atom_type
def get_value(self):
return self.value
def __eq__(self, other):
if not isinstance(other, Atom):
return False
return self.atom_type == other.get_atom_type() and self.value == other.get_value()
def __str__(self):
return f"Atom({self.atom_type}, {self.value})"
class Statement:
def __init__(self, parsed_command: Sequence[Atom]):
self.atoms = tuple(parsed_command)
def get_atoms(self) -> Sequence[Atom]:
return self.atoms
def matches_pattern(self, pattern) -> bool:
atoms = self.get_atoms()
if len(atoms) != len(pattern):
return False
for i, pattern_part in enumerate(pattern):
if pattern_part[0] == AtomType.KEYWORD and atoms[i].get_atom_type() == AtomType.KEYWORD:
if atoms[i].get_value() != pattern_part[1]:
return False
elif pattern_part[0] == AtomType.STRING and atoms[i].get_atom_type() == AtomType.STRING:
if pattern_part[1].fullmatch(atoms[i].get_value()) is None:
return False
return True
def __str__(self):
return str(tuple(map(str, self.atoms)))
class Paragraph:
def __init__(self, statements: Sequence[Statement]):
self.statements = tuple(statements)
def get_statements(self) -> Sequence[Statement]:
return self.statements
def html_escape_quotes(html: str) -> str:
return html.replace('"', '"').replace("'", ''')
def str_tag_params(tag_params: Dict[str, str]) -> str:
return ' '.join(map(lambda kv: '"{}"="{}"'.format(*map(html_escape_quotes, kv)), tag_params.items()))
class FormatDict(dict):
def __getitem__(self, item):
try:
return super().__getitem__(item)
except KeyError:
return "{" + str(item) + "}"
|
[
"enum.auto"
] |
[((683, 689), 'enum.auto', 'auto', ([], {}), '()\n', (687, 689), False, 'from enum import Enum, auto\n'), ((703, 709), 'enum.auto', 'auto', ([], {}), '()\n', (707, 709), False, 'from enum import Enum, auto\n')]
|
import numpy as np
import torch
from mmdet.core.bbox.builder import BBOX_CODERS
from mmdet.core.bbox.coder.base_bbox_coder import BaseBBoxCoder
@BBOX_CODERS.register_module()
class DeltaOBBCoder(BaseBBoxCoder):
def __init__(self,
target_means=(0., 0., 0., 0., 0.),
target_stds=(0.1, 0.1, 0.2, 0.2, 0.1),
obb_encode='thetaobb'):
super(BaseBBoxCoder, self).__init__()
self.means = target_means
self.stds = target_stds
self.obb_encode = obb_encode
def encode(self, obbs, gt_obbs):
assert obbs.size(0) == gt_obbs.size(0)
if self.obb_encode == 'thetaobb':
assert obbs.size(0) == gt_obbs.size(0)
encoded_obbs = thetaobb2delta(obbs, gt_obbs, self.means, self.stds)
else:
raise(RuntimeError('do not support the encode mthod: {}'.format(self.obb_encode)))
return encoded_obbs
def decode(self,
obbs,
pred_obbs,
max_shape=None,
wh_ratio_clip=16 / 1000):
assert pred_obbs.size(0) == obbs.size(0)
if self.obb_encode == 'thetaobb':
decoded_obbs = delta2thetaobb(obbs, pred_obbs, self.means, self.stds,
max_shape, wh_ratio_clip)
else:
raise(RuntimeError('do not support the encode mthod: {}'.format(self.obb_encode)))
return decoded_obbs
def thetaobb2delta(proposals, gt, means=(0., 0., 0., 0., 0.), stds=(0.1, 0.1, 0.2, 0.2, 0.1)):
# proposals: (x1, y1, x2, y2)
# gt: (cx, cy, w, h, theta)
assert proposals.size(0) == gt.size(0)
proposals = proposals.float()
gt = gt.float()
px = (proposals[..., 0] + proposals[..., 2]) * 0.5
py = (proposals[..., 1] + proposals[..., 3]) * 0.5
pw = proposals[..., 2] - proposals[..., 0]
ph = proposals[..., 3] - proposals[..., 1]
pa = np.ones(proposals.shape[0], dtype=np.int32) * (-np.pi / 2.0)
pa = torch.from_numpy(np.stack(pa)).float().to(proposals.device)
gx = gt[..., 0]
gy = gt[..., 1]
gw = gt[..., 2]
gh = gt[..., 3]
ga = gt[..., 4]
dx = (gx - px) / pw
dy = (gy - py) / ph
dw = torch.log(gw / pw)
dh = torch.log(gh / ph)
da = ga - pa
deltas = torch.stack([dx, dy, dw, dh, da], dim=-1)
means = deltas.new_tensor(means).unsqueeze(0)
stds = deltas.new_tensor(stds).unsqueeze(0)
deltas = deltas.sub_(means).div_(stds)
return deltas
def delta2thetaobb(rois,
deltas,
means=[0., 0., 0., 0., 0.],
stds=[0.1, 0.1, 0.2, 0.2, 0.1],
max_shape=None,
wh_ratio_clip=16 / 1000):
means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 5)
stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 5)
denorm_deltas = deltas * stds + means
dx = denorm_deltas[:, 0::5]
dy = denorm_deltas[:, 1::5]
dw = denorm_deltas[:, 2::5]
dh = denorm_deltas[:, 3::5]
da = denorm_deltas[:, 4::5]
max_ratio = np.abs(np.log(wh_ratio_clip))
dw = dw.clamp(min=-max_ratio, max=max_ratio)
dh = dh.clamp(min=-max_ratio, max=max_ratio)
px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx)
py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy)
pw = (rois[:, 2] - rois[:, 0]).unsqueeze(1).expand_as(dw)
ph = (rois[:, 3] - rois[:, 1]).unsqueeze(1).expand_as(dh)
pa = np.ones(rois.shape[0], dtype=np.int32) * (-np.pi / 2.0)
pa = torch.from_numpy(np.stack(pa)).float().to(rois.device).unsqueeze(1).expand_as(da)
gw = pw * dw.exp()
gh = ph * dh.exp()
gx = torch.addcmul(px, 1, pw, dx) # gx = px + pw * dx
gy = torch.addcmul(py, 1, ph, dy) # gy = py + ph * dy
ga = da + pa
if max_shape is not None:
gx = gx.clamp(min=0, max=max_shape[1])
gy = gy.clamp(min=0, max=max_shape[0])
gw = gw.clamp(min=0, max=max_shape[1])
gh = gh.clamp(min=0, max=max_shape[0])
thetaobbs = torch.stack([gx, gy, gw, gh, ga], dim=-1).view_as(deltas)
return thetaobbs
|
[
"numpy.stack",
"torch.stack",
"numpy.log",
"torch.addcmul",
"numpy.ones",
"torch.log",
"mmdet.core.bbox.builder.BBOX_CODERS.register_module"
] |
[((148, 177), 'mmdet.core.bbox.builder.BBOX_CODERS.register_module', 'BBOX_CODERS.register_module', ([], {}), '()\n', (175, 177), False, 'from mmdet.core.bbox.builder import BBOX_CODERS\n'), ((2220, 2238), 'torch.log', 'torch.log', (['(gw / pw)'], {}), '(gw / pw)\n', (2229, 2238), False, 'import torch\n'), ((2248, 2266), 'torch.log', 'torch.log', (['(gh / ph)'], {}), '(gh / ph)\n', (2257, 2266), False, 'import torch\n'), ((2298, 2339), 'torch.stack', 'torch.stack', (['[dx, dy, dw, dh, da]'], {'dim': '(-1)'}), '([dx, dy, dw, dh, da], dim=-1)\n', (2309, 2339), False, 'import torch\n'), ((3696, 3724), 'torch.addcmul', 'torch.addcmul', (['px', '(1)', 'pw', 'dx'], {}), '(px, 1, pw, dx)\n', (3709, 3724), False, 'import torch\n'), ((3755, 3783), 'torch.addcmul', 'torch.addcmul', (['py', '(1)', 'ph', 'dy'], {}), '(py, 1, ph, dy)\n', (3768, 3783), False, 'import torch\n'), ((1931, 1974), 'numpy.ones', 'np.ones', (['proposals.shape[0]'], {'dtype': 'np.int32'}), '(proposals.shape[0], dtype=np.int32)\n', (1938, 1974), True, 'import numpy as np\n'), ((3093, 3114), 'numpy.log', 'np.log', (['wh_ratio_clip'], {}), '(wh_ratio_clip)\n', (3099, 3114), True, 'import numpy as np\n'), ((3489, 3527), 'numpy.ones', 'np.ones', (['rois.shape[0]'], {'dtype': 'np.int32'}), '(rois.shape[0], dtype=np.int32)\n', (3496, 3527), True, 'import numpy as np\n'), ((4057, 4098), 'torch.stack', 'torch.stack', (['[gx, gy, gw, gh, ga]'], {'dim': '(-1)'}), '([gx, gy, gw, gh, ga], dim=-1)\n', (4068, 4098), False, 'import torch\n'), ((2018, 2030), 'numpy.stack', 'np.stack', (['pa'], {}), '(pa)\n', (2026, 2030), True, 'import numpy as np\n'), ((3571, 3583), 'numpy.stack', 'np.stack', (['pa'], {}), '(pa)\n', (3579, 3583), True, 'import numpy as np\n')]
|
from threading import Thread
n = 0
def add1(N):
# Global n and "n=0" above are the same variable
# global is needed to change n from the global scope
global n
while n < N:
n = n + 1
print('Add1: %d' % n)
class Adder(Thread):
def __init__(self, changeN, N):
Thread.__init__(self)
self.changeN = changeN
self.N = N
def run(self):
global n
while n < self.N:
n = n + self.changeN
print('Adder Class %d: %d' % (self.changeN, n))
maxCount = 100
a = Thread(target=add1, args=(maxCount,))
a2 = Adder(1, maxCount)
s2 = Adder(-1, maxCount)
# start calls for the Threads own "run()" method
a2.start()
a.start()
s2.start()
|
[
"threading.Thread",
"threading.Thread.__init__"
] |
[((557, 594), 'threading.Thread', 'Thread', ([], {'target': 'add1', 'args': '(maxCount,)'}), '(target=add1, args=(maxCount,))\n', (563, 594), False, 'from threading import Thread\n'), ((307, 328), 'threading.Thread.__init__', 'Thread.__init__', (['self'], {}), '(self)\n', (322, 328), False, 'from threading import Thread\n')]
|
import argparse
import torch
import pytorch_lightning as pl
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.plugins import DDPPlugin
from hparams import cfg
from torch.utils.data import DataLoader, random_split
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='TRAIN OCR-ENGINE MODULE')
parser.add_argument('-m', '--module', type=str,
choices=['detector', 'recognizer'],
help='module to train')
parser.add_argument('-v', '--version', type=int, default=0,
help='version number')
parser.add_argument('-d', '--data_path', type=str, required=True,
help='generated data (.pkl) path')
parser.add_argument('-bs', '--batch_size', type=int, default=4,
help='batch size')
parser.add_argument('-lr', '--learning_rate', type=float, default=5e-5,
help='learning rate for training [detector: 5e-5, recognizer: 1.0]')
parser.add_argument('-e', '--max_epoch', type=int, default=100,
help='max epoch')
parser.add_argument('-nw', '--num_workers', type=int, default=4,
help='number of workers for calling data')
parser.add_argument('-rt', '--resume_training', type=str, default=None,
help='resume from certain checkpoint file')
args = parser.parse_args()
cfg.lr = args.learning_rate
cfg.data_path = args.data_path
print('configs:', cfg)
if args.module == 'detector':
from models.craft_pl import CRAFT
from datasets.craft_dataset import DatasetSYNTH
dataset = DatasetSYNTH(cfg)
model = CRAFT(cfg)
collate = None
else:
from models.deepTextRecog_pl import DeepTextRecog
from datasets.deepTextRecog_dataset import DatasetSYNTH, AlignCollate
dataset = DatasetSYNTH(cfg)
model = DeepTextRecog(cfg, dataset.tokens)
collate = AlignCollate(cfg)
trainSize = int(len(dataset)*0.9)
trainDataset, validDataset = random_split(dataset, [trainSize, len(dataset)-trainSize])
trainDataloader = DataLoader(trainDataset,
batch_size=args.batch_size,
num_workers=args.num_workers,
collate_fn=collate)
validDataloader = DataLoader(validDataset,
batch_size=args.batch_size,
num_workers=args.num_workers,
collate_fn=collate)
logger = TensorBoardLogger('tb_logs', name=args.module,
version=args.version, default_hp_metric=False)
# lr_callback = pl.callbacks.LearningRateMonitor(logging_interval='step')
if args.module == 'detector':
filename='checkpoints-{epoch:02d}-{fscore:.2f}'
else:
filename='checkpoints-{epoch:02d}-{acc:.2f}'
ckpt_callback = pl.callbacks.ModelCheckpoint(
monitor='fscore' if args.module == 'detector' else 'acc',
dirpath=f'checkpoints/version_{args.version}',
filename=filename,
save_top_k=3,
mode='max',
)
n_gpu = torch.cuda.device_count()
trainer = pl.Trainer(gpus=n_gpu if args.module=='detector' else 1,
max_epochs=args.max_epoch, logger=logger,
num_sanity_val_steps=1, accelerator='ddp2',
num_nodes=1 if args.module=='detector' else n_gpu,
callbacks=[ckpt_callback],
plugins=DDPPlugin(find_unused_parameters=False),
gradient_clip_val=5.0,
resume_from_checkpoint=args.resume_training)
trainer.fit(model, train_dataloader=trainDataloader, val_dataloaders=validDataloader)
|
[
"pytorch_lightning.callbacks.ModelCheckpoint",
"argparse.ArgumentParser",
"torch.utils.data.DataLoader",
"datasets.deepTextRecog_dataset.DatasetSYNTH",
"pytorch_lightning.plugins.DDPPlugin",
"models.craft_pl.CRAFT",
"torch.cuda.device_count",
"datasets.deepTextRecog_dataset.AlignCollate",
"pytorch_lightning.loggers.TensorBoardLogger",
"models.deepTextRecog_pl.DeepTextRecog"
] |
[((284, 346), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""TRAIN OCR-ENGINE MODULE"""'}), "(description='TRAIN OCR-ENGINE MODULE')\n", (307, 346), False, 'import argparse\n'), ((2194, 2301), 'torch.utils.data.DataLoader', 'DataLoader', (['trainDataset'], {'batch_size': 'args.batch_size', 'num_workers': 'args.num_workers', 'collate_fn': 'collate'}), '(trainDataset, batch_size=args.batch_size, num_workers=args.\n num_workers, collate_fn=collate)\n', (2204, 2301), False, 'from torch.utils.data import DataLoader, random_split\n'), ((2418, 2525), 'torch.utils.data.DataLoader', 'DataLoader', (['validDataset'], {'batch_size': 'args.batch_size', 'num_workers': 'args.num_workers', 'collate_fn': 'collate'}), '(validDataset, batch_size=args.batch_size, num_workers=args.\n num_workers, collate_fn=collate)\n', (2428, 2525), False, 'from torch.utils.data import DataLoader, random_split\n'), ((2634, 2731), 'pytorch_lightning.loggers.TensorBoardLogger', 'TensorBoardLogger', (['"""tb_logs"""'], {'name': 'args.module', 'version': 'args.version', 'default_hp_metric': '(False)'}), "('tb_logs', name=args.module, version=args.version,\n default_hp_metric=False)\n", (2651, 2731), False, 'from pytorch_lightning.loggers import TensorBoardLogger\n'), ((3020, 3206), 'pytorch_lightning.callbacks.ModelCheckpoint', 'pl.callbacks.ModelCheckpoint', ([], {'monitor': "('fscore' if args.module == 'detector' else 'acc')", 'dirpath': 'f"""checkpoints/version_{args.version}"""', 'filename': 'filename', 'save_top_k': '(3)', 'mode': '"""max"""'}), "(monitor='fscore' if args.module == 'detector' else\n 'acc', dirpath=f'checkpoints/version_{args.version}', filename=filename,\n save_top_k=3, mode='max')\n", (3048, 3206), True, 'import pytorch_lightning as pl\n'), ((3259, 3284), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3282, 3284), False, 'import torch\n'), ((1704, 1721), 'datasets.deepTextRecog_dataset.DatasetSYNTH', 'DatasetSYNTH', (['cfg'], {}), '(cfg)\n', (1716, 1721), False, 'from datasets.deepTextRecog_dataset import DatasetSYNTH, AlignCollate\n'), ((1738, 1748), 'models.craft_pl.CRAFT', 'CRAFT', (['cfg'], {}), '(cfg)\n', (1743, 1748), False, 'from models.craft_pl import CRAFT\n'), ((1936, 1953), 'datasets.deepTextRecog_dataset.DatasetSYNTH', 'DatasetSYNTH', (['cfg'], {}), '(cfg)\n', (1948, 1953), False, 'from datasets.deepTextRecog_dataset import DatasetSYNTH, AlignCollate\n'), ((1970, 2004), 'models.deepTextRecog_pl.DeepTextRecog', 'DeepTextRecog', (['cfg', 'dataset.tokens'], {}), '(cfg, dataset.tokens)\n', (1983, 2004), False, 'from models.deepTextRecog_pl import DeepTextRecog\n'), ((2023, 2040), 'datasets.deepTextRecog_dataset.AlignCollate', 'AlignCollate', (['cfg'], {}), '(cfg)\n', (2035, 2040), False, 'from datasets.deepTextRecog_dataset import DatasetSYNTH, AlignCollate\n'), ((3654, 3693), 'pytorch_lightning.plugins.DDPPlugin', 'DDPPlugin', ([], {'find_unused_parameters': '(False)'}), '(find_unused_parameters=False)\n', (3663, 3693), False, 'from pytorch_lightning.plugins import DDPPlugin\n')]
|
import turtle
t = turtle.Pen()
turtle.bgcolor('black')
colors = ['red', 'yellow', 'blue', 'green']
t.speed(0)
for x in range(100):
t.pencolor(colors[x % 4])
t.circle(x)
t.left(1)
turtle.done()
|
[
"turtle.Pen",
"turtle.done",
"turtle.bgcolor"
] |
[((19, 31), 'turtle.Pen', 'turtle.Pen', ([], {}), '()\n', (29, 31), False, 'import turtle\n'), ((32, 55), 'turtle.bgcolor', 'turtle.bgcolor', (['"""black"""'], {}), "('black')\n", (46, 55), False, 'import turtle\n'), ((193, 206), 'turtle.done', 'turtle.done', ([], {}), '()\n', (204, 206), False, 'import turtle\n')]
|
# -*- coding: utf-8 -*-
import bs4
import urllib.request
import urllib.parse
import os
import sys
import json
from typing import Tuple
def formating_URL(query: str) -> Tuple[str, str]:
"""与えられたクエリから検索用のURLを作成し,返す
Args:
query (str): クエリ
Returns:
Tuple[str, str]: 検索用URL, クエリ
"""
query = query.split()
query = '+'.join(query)
url = "https://www.google.co.in/search?q="+urllib.parse.quote_plus(
query, encoding='utf-8'
)+"&source=lnms&tbm=isch"
return (url, query)
def get_soup(url: str) -> bs4.BeautifulSoup:
"""BeautifulSoupを使ってリクエストを飛ばし,データを返す.
Args:
url (str): 検索URL
Returns:
bs4.BeautifulSoup: [description]
"""
header = {
'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"
}
request = urllib.request.urlopen(
urllib.request.Request(url, headers=header)
)
soup = bs4.BeautifulSoup(request, 'html.parser')
return soup
def check_Dir(query: str):
"""検索用に新しくディレクトリを作成する.(もしすでに存在しているなら,そのまま)
Args:
query (str):クエリ
Returns:
[type]: ディレクトリパス
"""
DIR = "Pictures"
if not os.path.exists(DIR):
os.mkdir(DIR)
DIR = os.path.join(DIR, query.split()[0])
if not os.path.exists(DIR):
os.mkdir(DIR)
return DIR
def crawring(query: str):
"""クローリングを実行する
Args:
query (str): クエリ
"""
link_list = []
url, query = formating_URL(query)
print(url)
soup = get_soup(url)
DIR = check_Dir(query)
label = str(len([i for i in os.listdir('./Pictures/')]) + 1)
for a in soup.find_all("div", {"class": "rg_meta"}):
link, Type = json.loads(a.text)["ou"], json.loads(a.text)["ity"]
link_list.append((link, Type))
print("Total Picture is", len(link_list))
for _, (img, Type) in enumerate(link_list):
try:
img_data = urllib.request.urlopen(img).read()
cntr = len([i for i in os.listdir(DIR) if label in i]) + 1
if len(Type) == 0:
f = open(os.path.join(DIR, label + "_" + str(cntr)+".jpg"), 'wb')
else:
f = open(os.path.join(DIR, label + "_" + str(cntr)+"."+Type), 'wb')
f.write(img_data)
f.close()
except Exception as e:
print("IMG loading failed : "+img)
print(e)
print("FINISH")
def print_usage():
"""
実行時引数にエラーがあった場合呼ばれるメソッド
"""
print("Usage: %s Query" % __file__)
print("Queryに検索したいキーワードを入力してください")
print("ちなみに日本語は不可.")
def main():
if len(sys.argv) != 2:
print_usage()
sys.exit()
query = sys.argv[1]
crawring(query)
if __name__ == "__main__":
main()
|
[
"os.mkdir",
"json.loads",
"os.path.exists",
"bs4.BeautifulSoup",
"os.listdir",
"sys.exit"
] |
[((985, 1026), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['request', '"""html.parser"""'], {}), "(request, 'html.parser')\n", (1002, 1026), False, 'import bs4\n'), ((1232, 1251), 'os.path.exists', 'os.path.exists', (['DIR'], {}), '(DIR)\n', (1246, 1251), False, 'import os\n'), ((1261, 1274), 'os.mkdir', 'os.mkdir', (['DIR'], {}), '(DIR)\n', (1269, 1274), False, 'import os\n'), ((1333, 1352), 'os.path.exists', 'os.path.exists', (['DIR'], {}), '(DIR)\n', (1347, 1352), False, 'import os\n'), ((1362, 1375), 'os.mkdir', 'os.mkdir', (['DIR'], {}), '(DIR)\n', (1370, 1375), False, 'import os\n'), ((2707, 2717), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2715, 2717), False, 'import sys\n'), ((1750, 1768), 'json.loads', 'json.loads', (['a.text'], {}), '(a.text)\n', (1760, 1768), False, 'import json\n'), ((1776, 1794), 'json.loads', 'json.loads', (['a.text'], {}), '(a.text)\n', (1786, 1794), False, 'import json\n'), ((1638, 1663), 'os.listdir', 'os.listdir', (['"""./Pictures/"""'], {}), "('./Pictures/')\n", (1648, 1663), False, 'import os\n'), ((2043, 2058), 'os.listdir', 'os.listdir', (['DIR'], {}), '(DIR)\n', (2053, 2058), False, 'import os\n')]
|
import json,random
#Unused couldnt be arsed removing, it has too much emotional value to me.
usedNames = []
with open('first-names.json', 'r', encoding = "utf-8-sig") as outfile:
data = json.load(outfile)
class Critter(object):
"""a virtual pet"""
total = 0
@staticmethod
def status():
print("\nThe total number of critters is ", Critter.total)
def __init__(self, name, colour):
print("A critter is born, ", name)
self.name = name
self.colour = colour
Critter.total += 1
#Main
print("Accessing the class attribute Critter.total: ", Critter.total)
print("creating critters: ")
for i in range(len(data)):
# Gets a random name from a json file.
index = random.randrange(len(data))
crit = Critter(data[index], "Red")
del data[index]
Critter.status()
input("press any key to exit...")
|
[
"json.load"
] |
[((198, 216), 'json.load', 'json.load', (['outfile'], {}), '(outfile)\n', (207, 216), False, 'import json, random\n')]
|
from dataclasses import dataclass
from typing import Callable, Any
from rxbp.mixins.flowablemixin import FlowableMixin
from rxbp.multicast.observables.flatmergenobackpressureobservable import FlatMergeNoBackpressureObservable
from rxbp.scheduler import Scheduler
from rxbp.subscriber import Subscriber
from rxbp.subscription import Subscription
@dataclass
class FlatMergeNoBackpressureFlowable(FlowableMixin):
source: FlowableMixin
selector: Callable[[Any], FlowableMixin]
subscribe_scheduler: Scheduler
def unsafe_subscribe(self, subscriber: Subscriber) -> Subscription:
assert self.subscribe_scheduler == subscriber.subscribe_scheduler
subscription = self.source.unsafe_subscribe(subscriber=subscriber)
def observable_selector(val: Any):
flowable = self.selector(val)
subscription = flowable.unsafe_subscribe(subscriber=subscriber)
return subscription.observable
return subscription.copy(observable=FlatMergeNoBackpressureObservable(
source=subscription.observable,
selector=observable_selector,
scheduler=subscriber.scheduler,
subscribe_scheduler=self.subscribe_scheduler, # todo: why not taken from subscriber?
))
|
[
"rxbp.multicast.observables.flatmergenobackpressureobservable.FlatMergeNoBackpressureObservable"
] |
[((992, 1174), 'rxbp.multicast.observables.flatmergenobackpressureobservable.FlatMergeNoBackpressureObservable', 'FlatMergeNoBackpressureObservable', ([], {'source': 'subscription.observable', 'selector': 'observable_selector', 'scheduler': 'subscriber.scheduler', 'subscribe_scheduler': 'self.subscribe_scheduler'}), '(source=subscription.observable, selector=\n observable_selector, scheduler=subscriber.scheduler,\n subscribe_scheduler=self.subscribe_scheduler)\n', (1025, 1174), False, 'from rxbp.multicast.observables.flatmergenobackpressureobservable import FlatMergeNoBackpressureObservable\n')]
|
from .Player import Player
from .TileCollection import TileCollection
from .Center import Center
from .TileColor import TileColor
from .AzulAction import AzulAction
import random
import numpy as np
import math
class AzulBoard():
def __init__(self):
self.player1 = Player(1)
self.player2 = Player(-1)
self.bag = TileCollection(20, 20, 20, 20, 20, 0)
self.lid = TileCollection()
self.center = Center(self.bag, self.lid)
self.roundFinished = False
self.playerIDWhoHadWhiteLastRound = 0
def display(self):
print("---------------------------------------------------------")
print("Bag:", self.bag.toString())
self.lid.display()
self.center.display()
print()
self.player1.display()
print()
self.player2.display()
print("---------------------------------------------------------")
def toString(self):
return self.bag.toString() + self.lid.toString() + self.center.toString() + self.player1.toString() + self.player2.toString()
def fillWallsRandomly(self, prob: float):
self.player1.wall.cells = self.getValidRandomWall(prob)
self.player2.wall.cells = self.getValidRandomWall(prob)
def getValidRandomWall(self, prob: float):
valid = False
while not valid:
numpyWall = np.random.choice(a=[True, False], size=(5, 5), p = [prob, 1-prob])
valid = True
for line in numpyWall:
if line.all():
valid = False
return numpyWall.tolist()
def fillPlayerLinesRandomly(self, bag, lineHasTilesProb: float):
self.player1.playerLines.lines = self.getValidRandomPlayerLines(bag, self.player1, lineHasTilesProb)
self.player2.playerLines.lines = self.getValidRandomPlayerLines(bag, self.player2, lineHasTilesProb)
def getValidRandomPlayerLines(self, bag, player, lineHasTilesProb: float):
lines = []
for i in range(5):
addSomeTiles = np.random.uniform(0, 1) < lineHasTilesProb
if addSomeTiles:
validColors = player.wall.getValidColorsForRow(i)
color = np.random.choice(validColors)
number = min(np.random.randint(0, i + 1), bag.getCountOfColor(color)) # Don't remove tiles we don't have in the bag.
if number == 0:
color = None
else:
bag.removeTiles(color, number)
else:
color = None
number = 0
lines.append([i + 1, color, number])
return lines
def getNextState(self, player, actionInt):
action = AzulAction.getActionFromInt(actionInt, player)
return self.executeAction(action)
def getPlayerFromAction(self, action: AzulAction) -> Player:
if action.playerID == 1:
return self.player1
else:
return self.player2
def executeAction(self, action: AzulAction):
actionPlayer = self.getPlayerFromAction(action)
# Manipulate tiles from center
tilesInHand = self.center.takeTiles(action)
# Place tiles on player board
overflow = actionPlayer.placeTilesFromAction(action, tilesInHand)
# Potentially put overflow in lid
self.lid.addTiles(action.color, overflow.getCountOfColor(action.color))
if self.shouldFinishRound():
self.finishRound()
return self
def shouldFinishRound(self) -> bool:
for factory in self.center.factories:
if factory.tiles.getCount() > 0:
return False
if self.center.center.getCount() > 0:
return False
return True
def finishRound(self):
self.roundFinished = True
self.playerIDWhoHadWhiteLastRound = 0 # Reset
# Track if player1 had white tile
if (self.player1.hasWhiteTile):
self.playerIDWhoHadWhiteLastRound = self.player1.id
# move tiles to bag and lid
tilesToLid = self.player1.finishRound()
tilesToLid.moveAllTiles(self.lid)
if (self.player2.hasWhiteTile):
self.playerIDWhoHadWhiteLastRound = self.player2.id
tilesToLid = self.player2.finishRound()
tilesToLid.moveAllTiles(self.lid)
def setupNextRound(self):
self.roundFinished = False
self.center = Center(self.bag, self.lid)
self.player1.hasWhiteTile = False
self.player2.hasWhiteTile = False
def isGameFinished(self):
return self.player1.wall.hasFinishedRow() or self.player2.wall.hasFinishedRow()
def getAllTiles(self):
# Created as a sanity check. Make sure there are 20/20/20/20/20/1 tiles in the game at all times.
sumTiles = TileCollection()
sumTiles.addTilesFromCollection(self.bag)
sumTiles.addTilesFromCollection(self.lid)
sumTiles.addTilesFromCollection(self.player1.getAllTiles())
sumTiles.addTilesFromCollection(self.player2.getAllTiles())
sumTiles.addTilesFromCollection(self.center.getAllTiles())
return sumTiles
|
[
"numpy.random.uniform",
"numpy.random.randint",
"numpy.random.choice"
] |
[((1423, 1489), 'numpy.random.choice', 'np.random.choice', ([], {'a': '[True, False]', 'size': '(5, 5)', 'p': '[prob, 1 - prob]'}), '(a=[True, False], size=(5, 5), p=[prob, 1 - prob])\n', (1439, 1489), True, 'import numpy as np\n'), ((2122, 2145), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2139, 2145), True, 'import numpy as np\n'), ((2287, 2316), 'numpy.random.choice', 'np.random.choice', (['validColors'], {}), '(validColors)\n', (2303, 2316), True, 'import numpy as np\n'), ((2347, 2374), 'numpy.random.randint', 'np.random.randint', (['(0)', '(i + 1)'], {}), '(0, i + 1)\n', (2364, 2374), True, 'import numpy as np\n')]
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands import CliCommandType
from azext_devops.dev.common.exception_handler import azure_devops_exception_handler
from ._format import (transform_build_table_output,
transform_builds_table_output,
transform_pipeline_run_table_output,
transform_pipeline_or_run_table_output,
transform_pipeline_runs_table_output,
transform_pipeline_table_output,
transform_pipelines_table_output,
transform_build_tags_output,
transform_definition_table_output,
transform_definitions_table_output,
transform_releases_table_output,
transform_release_table_output,
transform_release_definitions_table_output,
transform_release_definition_table_output,
transform_runs_artifact_table_output,
transform_pipelines_pools_table_output,
transform_pipelines_pool_table_output,
transform_pipelines_agents_table_output,
transform_pipelines_agent_table_output,
transform_pipelines_queues_table_output,
transform_pipelines_queue_table_output,
transform_pipelines_variable_groups_table_output,
transform_pipelines_variable_group_table_output,
transform_pipelines_variables_table_output,
transform_pipelines_var_group_variables_table_output)
buildOps = CliCommandType(
operations_tmpl='azext_devops.dev.pipelines.build#{}',
exception_handler=azure_devops_exception_handler
)
buildDefOps = CliCommandType(
operations_tmpl='azext_devops.dev.pipelines.build_definition#{}',
exception_handler=azure_devops_exception_handler
)
buildTaskOps = CliCommandType(
operations_tmpl='azext_devops.dev.pipelines.task#{}',
exception_handler=azure_devops_exception_handler
)
releaseOps = CliCommandType(
operations_tmpl='azext_devops.dev.pipelines.release#{}',
exception_handler=azure_devops_exception_handler
)
releaseDefinitionOps = CliCommandType(
operations_tmpl='azext_devops.dev.pipelines.release_definition#{}',
exception_handler=azure_devops_exception_handler
)
pipelinesOps = CliCommandType(
operations_tmpl='azext_devops.dev.pipelines.pipeline#{}',
exception_handler=azure_devops_exception_handler
)
pipelineCreateOps = CliCommandType(
operations_tmpl='azext_devops.dev.pipelines.pipeline_create#{}',
exception_handler=azure_devops_exception_handler
)
pipelinesRunOps = CliCommandType(
operations_tmpl='azext_devops.dev.pipelines.pipeline_run#{}',
exception_handler=azure_devops_exception_handler
)
pipelineRunArtifactsOps = CliCommandType(
operations_tmpl='azext_devops.dev.pipelines.runs_artifacts#{}',
exception_handler=azure_devops_exception_handler
)
pipelineAgentPoolQueueOps = CliCommandType(
operations_tmpl='azext_devops.dev.pipelines.agent_pool_queue#{}',
exception_handler=azure_devops_exception_handler
)
pipelineVariableGroupOps = CliCommandType(
operations_tmpl='azext_devops.dev.pipelines.variable_group#{}',
exception_handler=azure_devops_exception_handler
)
pipelineVariablesOps = CliCommandType(
operations_tmpl='azext_devops.dev.pipelines.pipeline_variables#{}',
exception_handler=azure_devops_exception_handler
)
# pylint: disable=too-many-statements
def load_build_commands(self, _):
with self.command_group('pipelines', command_type=pipelineCreateOps) as g:
g.command('create', 'pipeline_create', table_transformer=transform_pipeline_or_run_table_output)
g.command('update', 'pipeline_update', table_transformer=transform_pipeline_table_output)
with self.command_group('pipelines', command_type=pipelinesOps) as g:
g.command('list', 'pipeline_list', table_transformer=transform_pipelines_table_output)
g.command('show', 'pipeline_show', table_transformer=transform_pipeline_table_output)
g.command('delete', 'pipeline_delete', confirmation='Are you sure you want to delete this pipeline?')
g.command('run', 'pipeline_run', table_transformer=transform_pipeline_run_table_output)
with self.command_group('pipelines build', command_type=buildOps) as g:
# basic build commands
g.command('list', 'build_list', table_transformer=transform_builds_table_output)
g.command('queue', 'build_queue', table_transformer=transform_build_table_output)
g.command('show', 'build_show', table_transformer=transform_build_table_output)
with self.command_group('pipelines build tag', command_type=buildOps) as g:
# basic build tag commands
g.command('list', 'get_build_tags', table_transformer=transform_build_tags_output)
g.command('add', 'add_build_tags', table_transformer=transform_build_tags_output)
g.command('delete', 'delete_build_tag', table_transformer=transform_build_tags_output)
with self.command_group('pipelines build definition', command_type=buildDefOps) as g:
# basic build definition commands
g.command('list', 'build_definition_list', table_transformer=transform_definitions_table_output)
g.command('show', 'build_definition_show', table_transformer=transform_definition_table_output)
with self.command_group('pipelines release', command_type=releaseOps) as g:
# basic release commands
g.command('list', 'release_list', table_transformer=transform_releases_table_output)
g.command('create', 'release_create', table_transformer=transform_release_table_output)
g.command('show', 'release_show', table_transformer=transform_release_table_output)
with self.command_group('pipelines release definition', command_type=releaseDefinitionOps) as g:
# basic release commands
g.command('list', 'release_definition_list', table_transformer=transform_release_definitions_table_output)
g.command('show', 'release_definition_show', table_transformer=transform_release_definition_table_output)
with self.command_group('pipelines runs artifact', command_type=pipelineRunArtifactsOps) as g:
g.command('download', 'run_artifact_download')
g.command('list', 'run_artifact_list', table_transformer=transform_runs_artifact_table_output)
g.command('upload', 'run_artifact_upload')
with self.command_group('pipelines runs tag', command_type=pipelinesRunOps) as g:
g.command('add', 'pipeline_run_add_tag', table_transformer=transform_build_tags_output)
g.command('list', 'pipeline_run_get_tags', table_transformer=transform_build_tags_output)
g.command('delete', 'pipeline_run_delete_tag', table_transformer=transform_build_tags_output)
with self.command_group('pipelines runs', command_type=pipelinesRunOps) as g:
g.command('list', 'pipeline_run_list', table_transformer=transform_pipeline_runs_table_output)
g.command('show', 'pipeline_run_show', table_transformer=transform_pipeline_run_table_output)
with self.command_group('pipelines pool', command_type=pipelineAgentPoolQueueOps) as g:
g.command('list', 'list_pools', table_transformer=transform_pipelines_pools_table_output)
g.command('show', 'show_pool', table_transformer=transform_pipelines_pool_table_output)
with self.command_group('pipelines agent', command_type=pipelineAgentPoolQueueOps) as g:
g.command('list', 'list_agents', table_transformer=transform_pipelines_agents_table_output)
g.command('show', 'show_agent', table_transformer=transform_pipelines_agent_table_output)
with self.command_group('pipelines queue', command_type=pipelineAgentPoolQueueOps) as g:
g.command('list', 'list_queues', table_transformer=transform_pipelines_queues_table_output)
g.command('show', 'show_queue', table_transformer=transform_pipelines_queue_table_output)
with self.command_group('pipelines variable-group', command_type=pipelineVariableGroupOps) as g:
g.command('create', 'variable_group_create', table_transformer=transform_pipelines_variable_group_table_output)
g.command('show', 'variable_group_show', table_transformer=transform_pipelines_variable_group_table_output)
g.command('list', 'variable_group_list', table_transformer=transform_pipelines_variable_groups_table_output)
g.command('update', 'variable_group_update', table_transformer=transform_pipelines_variable_group_table_output)
g.command('delete', 'variable_group_delete',
confirmation='Are you sure you want to delete this variable group?')
with self.command_group('pipelines variable-group variable', command_type=pipelineVariableGroupOps) as g:
g.command('create', 'variable_group_variable_add',
table_transformer=transform_pipelines_var_group_variables_table_output)
g.command('list', 'variable_group_variable_list',
table_transformer=transform_pipelines_var_group_variables_table_output)
g.command('update', 'variable_group_variable_update',
table_transformer=transform_pipelines_var_group_variables_table_output)
g.command('delete', 'variable_group_variable_delete',
confirmation='Are you sure you want to delete this variable?')
with self.command_group('pipelines variable', command_type=pipelineVariablesOps) as g:
g.command('create', 'pipeline_variable_add', table_transformer=transform_pipelines_variables_table_output)
g.command('update', 'pipeline_variable_update', table_transformer=transform_pipelines_variables_table_output)
g.command('list', 'pipeline_variable_list', table_transformer=transform_pipelines_variables_table_output)
g.command('delete', 'pipeline_variable_delete',
confirmation='Are you sure you want to delete this variable?')
|
[
"azure.cli.core.commands.CliCommandType"
] |
[((2015, 2138), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azext_devops.dev.pipelines.build#{}"""', 'exception_handler': 'azure_devops_exception_handler'}), "(operations_tmpl='azext_devops.dev.pipelines.build#{}',\n exception_handler=azure_devops_exception_handler)\n", (2029, 2138), False, 'from azure.cli.core.commands import CliCommandType\n'), ((2160, 2300), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azext_devops.dev.pipelines.build_definition#{}"""', 'exception_handler': 'azure_devops_exception_handler'}), "(operations_tmpl=\n 'azext_devops.dev.pipelines.build_definition#{}', exception_handler=\n azure_devops_exception_handler)\n", (2174, 2300), False, 'from azure.cli.core.commands import CliCommandType\n'), ((2317, 2439), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azext_devops.dev.pipelines.task#{}"""', 'exception_handler': 'azure_devops_exception_handler'}), "(operations_tmpl='azext_devops.dev.pipelines.task#{}',\n exception_handler=azure_devops_exception_handler)\n", (2331, 2439), False, 'from azure.cli.core.commands import CliCommandType\n'), ((2460, 2585), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azext_devops.dev.pipelines.release#{}"""', 'exception_handler': 'azure_devops_exception_handler'}), "(operations_tmpl='azext_devops.dev.pipelines.release#{}',\n exception_handler=azure_devops_exception_handler)\n", (2474, 2585), False, 'from azure.cli.core.commands import CliCommandType\n'), ((2616, 2758), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azext_devops.dev.pipelines.release_definition#{}"""', 'exception_handler': 'azure_devops_exception_handler'}), "(operations_tmpl=\n 'azext_devops.dev.pipelines.release_definition#{}', exception_handler=\n azure_devops_exception_handler)\n", (2630, 2758), False, 'from azure.cli.core.commands import CliCommandType\n'), ((2775, 2901), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azext_devops.dev.pipelines.pipeline#{}"""', 'exception_handler': 'azure_devops_exception_handler'}), "(operations_tmpl='azext_devops.dev.pipelines.pipeline#{}',\n exception_handler=azure_devops_exception_handler)\n", (2789, 2901), False, 'from azure.cli.core.commands import CliCommandType\n'), ((2929, 3068), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azext_devops.dev.pipelines.pipeline_create#{}"""', 'exception_handler': 'azure_devops_exception_handler'}), "(operations_tmpl=\n 'azext_devops.dev.pipelines.pipeline_create#{}', exception_handler=\n azure_devops_exception_handler)\n", (2943, 3068), False, 'from azure.cli.core.commands import CliCommandType\n'), ((3088, 3218), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azext_devops.dev.pipelines.pipeline_run#{}"""', 'exception_handler': 'azure_devops_exception_handler'}), "(operations_tmpl='azext_devops.dev.pipelines.pipeline_run#{}',\n exception_handler=azure_devops_exception_handler)\n", (3102, 3218), False, 'from azure.cli.core.commands import CliCommandType\n'), ((3252, 3390), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azext_devops.dev.pipelines.runs_artifacts#{}"""', 'exception_handler': 'azure_devops_exception_handler'}), "(operations_tmpl=\n 'azext_devops.dev.pipelines.runs_artifacts#{}', exception_handler=\n azure_devops_exception_handler)\n", (3266, 3390), False, 'from azure.cli.core.commands import CliCommandType\n'), ((3420, 3560), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azext_devops.dev.pipelines.agent_pool_queue#{}"""', 'exception_handler': 'azure_devops_exception_handler'}), "(operations_tmpl=\n 'azext_devops.dev.pipelines.agent_pool_queue#{}', exception_handler=\n azure_devops_exception_handler)\n", (3434, 3560), False, 'from azure.cli.core.commands import CliCommandType\n'), ((3589, 3727), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azext_devops.dev.pipelines.variable_group#{}"""', 'exception_handler': 'azure_devops_exception_handler'}), "(operations_tmpl=\n 'azext_devops.dev.pipelines.variable_group#{}', exception_handler=\n azure_devops_exception_handler)\n", (3603, 3727), False, 'from azure.cli.core.commands import CliCommandType\n'), ((3752, 3894), 'azure.cli.core.commands.CliCommandType', 'CliCommandType', ([], {'operations_tmpl': '"""azext_devops.dev.pipelines.pipeline_variables#{}"""', 'exception_handler': 'azure_devops_exception_handler'}), "(operations_tmpl=\n 'azext_devops.dev.pipelines.pipeline_variables#{}', exception_handler=\n azure_devops_exception_handler)\n", (3766, 3894), False, 'from azure.cli.core.commands import CliCommandType\n')]
|
# This function an HTTP starter function for Durable Functions.
# Before running this sample, please:
# - create a Durable orchestration function
# - create a Durable activity function (default name is "Hello")
# - add azure-functions-durable to requirements.txt
# - run pip install -r requirements.txt
import logging
import azure.functions as func
import azure.durable_functions as df
async def main(req: func.HttpRequest, starter: str) -> func.HttpResponse:
client = df.DurableOrchestrationClient(starter)
instance_id = await client.start_new(req.route_params["functionName"], None, None)
logging.info(f"Started orchestration with ID = '{instance_id}'.")
return client.create_check_status_response(req, instance_id)
|
[
"azure.durable_functions.DurableOrchestrationClient",
"logging.info"
] |
[((477, 515), 'azure.durable_functions.DurableOrchestrationClient', 'df.DurableOrchestrationClient', (['starter'], {}), '(starter)\n', (506, 515), True, 'import azure.durable_functions as df\n'), ((608, 673), 'logging.info', 'logging.info', (['f"""Started orchestration with ID = \'{instance_id}\'."""'], {}), '(f"Started orchestration with ID = \'{instance_id}\'.")\n', (620, 673), False, 'import logging\n')]
|
# Uses a learnable embedding for the countries which is fed as input to both encoders (at the respective dense layers).
# Copyright 2020 (c) Cognizant Digital Business, Evolutionary AI. All rights reserved. Issued under the Apache 2.0 License.
import os
import urllib.request
# Suppress noisy Tensorflow debug logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# noinspection PyPep8Naming
import keras.backend as K
import numpy as np
import pandas as pd
from keras.callbacks import EarlyStopping
from keras.constraints import Constraint
from keras.layers import Concatenate
from keras.layers import Dense
from keras.layers import Embedding
from keras.layers import Input
from keras.layers import LSTM
from keras.layers import Reshape
from keras.layers import Lambda
from keras.models import Model
import ongoing.predictors.base as base
from ongoing.predictors.base import BasePredictor
NB_LOOKBACK_DAYS = 21
NB_TEST_DAYS = 14
WINDOW_SIZE = 7
NUM_EPOCHS = 1000
LSTM_SIZE = 32
EMBED_SIZE = 4
NPI_DELAY = 0
class Positive(Constraint):
def __call__(self, w):
return K.abs(w)
# Functions to be used for lambda layers in model
def _combine_r_and_d(x):
r, d = x
return r * (1. - d)
class geoLSTMPredictor(BasePredictor):
"""
A class that computes a fitness for Prescriptor candidates.
"""
def __init__(self, path_to_model_weights=None, path_to_geos=None,
embed_size=EMBED_SIZE, lstm_size=LSTM_SIZE, nb_lookback_days=NB_LOOKBACK_DAYS, nb_test_days=NB_TEST_DAYS,
window_size=WINDOW_SIZE, npi_delay=NPI_DELAY, num_epochs=NUM_EPOCHS, seed=base.SEED):
super().__init__(seed=seed)
self.embed_size = embed_size
self.lstm_size = lstm_size
self.nb_lookback_days = nb_lookback_days
self.nb_test_days = nb_test_days
self.window_size = window_size
self.npi_delay = npi_delay
self.num_epochs = num_epochs
if path_to_model_weights and path_to_geos:
# Load model weights
nb_context = 2 # New cases rate and new deaths rate are used as context
nb_action = len(base.NPI_COLUMNS)
with open(path_to_geos, 'r') as f:
self.geos = [g.rstrip() for g in f.readlines()]
self.predictor, _ = self._construct_model(nb_context=nb_context,
nb_action=nb_action,
embed_size=self.embed_size,
lstm_size=self.lstm_size,
nb_lookback_days=self.nb_lookback_days)
self.predictor.load_weights(path_to_model_weights)
self.country_samples = None # will be set when fit() or predict() are called
def predict(self, data=None):
if self.train_df is None:
raise Exception("train_df must be defined before calling predict()")
if data is None:
data = self.test_df
if self.country_samples is None:
self.country_samples = self._create_country_samples(self.train_df,
list(self.train_df.GeoID.unique()),
self.nb_lookback_days,
self.npi_delay,
self.nb_test_days)
self.geos = list(self.country_samples.keys())
start_date = pd.to_datetime(data.Date.min(), format='%Y-%m-%d')
end_date = pd.to_datetime(data.Date.max(), format='%Y-%m-%d')
nb_days = (end_date - start_date).days + 1
# Prepare the output
forecast = {"CountryName": [],
"RegionName": [],
"Date": [],
"PredictedDailyTotalCases": [],
"PredictedDailyNewCases": [],
"PredictedDailyTotalDeaths": [],
"PredictedDailyNewDeaths": []}
# For each requested geo
geos = data.GeoID.unique()
for g in geos:
if g not in self.geos:
# the model was not trained for this geo: return zeroes
print("WARNING: The model was not trained for {}".format(g))
pred_total_cases = [0] * nb_days
pred_new_cases = [0] * nb_days
pred_total_deaths = [0] * nb_days
pred_new_deaths = [0] * nb_days
geo_start_date = start_date
else:
cdf = self.train_df[self.train_df.GeoID == g]
if len(cdf) == 0:
# we don't have historical data for this geo: return zeroes
pred_total_cases = [0] * nb_days
pred_new_cases = [0] * nb_days
pred_total_deaths = [0] * nb_days
pred_new_deaths = [0] * nb_days
geo_start_date = start_date
else:
last_known_date = cdf.Date.max()
# Start predicting from start_date, unless there's a gap since last known date
geo_start_date = min(last_known_date + np.timedelta64(1, 'D'), start_date)
npis_gdf = data[(data.Date >= geo_start_date - pd.Timedelta(days=self.npi_delay)) & (data.Date <= end_date - pd.Timedelta(days=self.npi_delay))]
pred_total_cases, pred_new_cases, pred_total_deaths, pred_new_deaths = self._get_new_cases_preds(cdf, g, npis_gdf)
# Append forecast data to results to return
country = data[data.GeoID == g].iloc[0].CountryName
region = data[data.GeoID == g].iloc[0].RegionName
for i, (ptot_cases, pnew_cases, ptot_deaths, pnew_deaths) in enumerate(zip(pred_total_cases, pred_new_cases, pred_total_deaths, pred_new_deaths)):
forecast["GeoID"].append(g)
forecast["CountryName"].append(country)
forecast["RegionName"].append(region)
current_date = geo_start_date + pd.offsets.Day(i)
forecast["Date"].append(current_date)
forecast["PredictedDailyTotalCases"].append(ptot_cases)
forecast["PredictedDailyNewCases"].append(pnew_cases)
forecast["PredictedDailyTotalDeaths"].append(ptot_deaths)
forecast["PredictedDailyNewDeaths"].append(pnew_deaths)
forecast_df = pd.DataFrame.from_dict(forecast)
# Return only the requested predictions (PredictedDailyTotalCases)
return forecast_df[(forecast_df.Date >= start_date) & (forecast_df.Date <= end_date)]
def _get_new_cases_preds(self, c_df, g, npis_df):
cdf = c_df[c_df.ConfirmedCases.notnull()]
initial_context_input = self.country_samples[g]['X_test_context'][-1]
initial_action_input = self.country_samples[g]['X_test_action'][-1]
country_id = np.array([self.geos.index(g)])
# Predictions with passed npis
cnpis_df = npis_df[npis_df.GeoID == g]
npis_sequence = np.array(cnpis_df[base.NPI_COLUMNS])
# Get the predictions with the passed NPIs
preds = self._roll_out_predictions(self.predictor,
initial_context_input,
initial_action_input,
country_id,
npis_sequence)
preds_cases = preds[:,0]
preds_deaths = preds[:,1]
# Gather info to convert to total cases
prev_confirmed_cases = np.array(cdf.ConfirmedCases)
prev_new_cases = np.array(cdf.NewCases)
initial_total_cases = prev_confirmed_cases[-1]
pop_size = np.array(cdf.Population)[-1] # Population size doesn't change over time
prev_confirmed_deaths = np.array(cdf.ConfirmedDeaths)
prev_new_deaths = np.array(cdf.NewDeaths)
initial_total_deaths = prev_confirmed_deaths[-1]
# Compute predictor's cases forecast
pred_total_cases, pred_new_cases = base.convert_ratios_to_total_cases(
preds_cases,
self.window_size,
prev_new_cases,
initial_total_cases,
pop_size)
# Compute predictor's deaths forecast
pred_total_deaths, pred_new_deaths = base.convert_ratios_to_total_deaths(
preds_deaths,
self.window_size,
prev_new_deaths,
initial_total_deaths)
return pred_total_cases, pred_new_cases, pred_total_deaths, pred_new_deaths
@staticmethod
def _create_country_samples(df: pd.DataFrame, geos: list, nb_lookback_days: int, npi_delay: int, nb_test_days: int) -> dict:
"""
For each country, creates numpy arrays for Keras
:param df: a Pandas DataFrame with historical data for countries (the "Oxford" dataset)
:param geos: a list of geo names
:return: a dictionary of train and test sets, for each specified country
"""
context_columns = ['PredictionRatio', 'DeathRatio']
action_columns = base.NPI_COLUMNS
outcome_columns = ['PredictionRatio', 'DeathRatio']
country_samples = {}
for i, g in enumerate(geos):
cdf = df[df.GeoID == g]
cdf = cdf[cdf.ConfirmedCases.notnull()]
context_data = np.array(cdf[context_columns])
action_data = np.array(cdf[action_columns])
outcome_data = np.array(cdf[outcome_columns])
context_samples = []
action_samples = []
outcome_samples = []
nb_total_days = outcome_data.shape[0]
for d in range(nb_lookback_days+npi_delay, nb_total_days):
context_samples.append(context_data[d - nb_lookback_days: d])
action_samples.append(action_data[d-npi_delay - nb_lookback_days: d-npi_delay])
outcome_samples.append(outcome_data[d])
if len(outcome_samples) > 0:
X_context = np.stack(context_samples, axis=0)
X_action = np.stack(action_samples, axis=0)
X_country = i*np.ones(X_context.shape[0])
y = np.stack(outcome_samples, axis=0)
country_samples[g] = {
'X_context': X_context,
'X_action': X_action,
'X_country': X_country,
'y': y,
'X_test_context': X_context[-nb_test_days:],
'X_test_action': X_action[-nb_test_days:],
'X_test_country': X_country[-nb_test_days:],
'y_test': y[-nb_test_days:],
}
return country_samples
# Function for performing roll outs into the future
@staticmethod
def _roll_out_predictions(predictor, initial_context_input, initial_action_input, country_id, future_action_sequence):
nb_roll_out_days = future_action_sequence.shape[0]
pred_output = np.zeros((nb_roll_out_days, 2))
context_input = np.expand_dims(np.copy(initial_context_input), axis=0)
action_input = np.expand_dims(np.copy(initial_action_input), axis=0)
country_input = np.expand_dims(np.copy(country_id), axis=0)
for d in range(nb_roll_out_days):
action_input[:, :-1] = action_input[:, 1:]
# Use the passed actions
action_sequence = future_action_sequence[d]
action_input[:, -1] = action_sequence
pred = predictor.predict([context_input, action_input, country_input])
pred_output[d] = pred[-1]
context_input[:, :-1] = context_input[:, 1:]
context_input[:, -1] = pred[-1]
return pred_output
def fit(self):
if self.train_df is None:
raise Exception("train_df must be defined bfr calling predict()")
self.country_samples = self._create_country_samples(self.train_df,
list(self.train_df.GeoID.unique()),
self.nb_lookback_days,
self.npi_delay,
self.nb_test_days)
self.geos = list(self.country_samples.keys())
# Aggregate data for training
all_X_context_list = [self.country_samples[c]['X_context']
for c in self.country_samples]
all_X_action_list = [self.country_samples[c]['X_action']
for c in self.country_samples]
all_X_country_list = [self.country_samples[c]['X_country']
for c in self.country_samples]
all_y_list = [self.country_samples[c]['y']
for c in self.country_samples]
X_context = np.concatenate(all_X_context_list)
X_action = np.concatenate(all_X_action_list)
X_country = np.concatenate(all_X_country_list)
y = np.concatenate(all_y_list)
# Clip outliers
MIN_VALUE = 0.
MAX_VALUE = 2.
X_context = np.clip(X_context, MIN_VALUE, MAX_VALUE)
y = np.clip(y, MIN_VALUE, MAX_VALUE)
X_context, X_action, X_country, y = self._permute_data(X_context, X_action, X_country, y)
self.predictor, training_model = self._construct_model(nb_context=X_context.shape[-1],
nb_action=X_action.shape[-1],
embed_size=self.embed_size,
lstm_size=self.lstm_size,
nb_lookback_days=self.nb_lookback_days)
history = self._train_model(training_model, X_context, X_action, X_country, y, epochs=self.num_epochs, verbose=0)
top_epoch = np.argmin(history.history['val_loss'])
train_loss = history.history['loss'][top_epoch]
val_loss = history.history['val_loss'][top_epoch]
print('Train Loss:', train_loss)
print('Val Loss:', val_loss)
# Shuffling data prior to train/val split
def _permute_data(self, X_context, X_action, X_country, y):
p = np.random.permutation(y.shape[0])
X_context = X_context[p]
X_action = X_action[p]
X_country = X_country[p]
y = y[p]
return X_context, X_action, X_country, y
# Construct model
def _construct_model(self, nb_context, nb_action, embed_size=10, lstm_size=32, nb_lookback_days=21):
# Create country embedding
country_id = Input(shape=(1,),
name='country_id')
emb = Embedding(len(self.geos), embed_size)(country_id)
emb = Reshape((embed_size,))(emb)
# Create context encoder
context_input = Input(shape=(nb_lookback_days, nb_context),
name='context_input')
x = LSTM(lstm_size,
return_sequences=False,
name='context_lstm')(context_input)
x = Concatenate(axis=1)([x, emb]) # concatenate the output of the LSTM with the country embedding prior to the dense layer
context_output = Dense(units=2,
activation='softplus',
name='context_dense')(x)
# Create action encoder
# Every aspect is monotonic and nonnegative except final bias
action_input = Input(shape=(nb_lookback_days, nb_action),
name='action_input')
x = LSTM(units=lstm_size,
kernel_constraint=Positive(),
recurrent_constraint=Positive(),
bias_constraint=Positive(),
return_sequences=False,
name='action_lstm')(action_input)
x = Concatenate(axis=1)([x, emb]) # concatenate the output of the LSTM with the country embedding prior to the dense layer
action_output = Dense(units=2,
activation='sigmoid',
name='action_dense')(x)
# Create prediction model
model_output = Lambda(_combine_r_and_d, name='prediction')(
[context_output, action_output])
model = Model(inputs=[context_input, action_input, country_id],
outputs=[model_output])
model.compile(loss='mae', optimizer='adam')
# Create training model, which includes loss to measure
# variance of action_output predictions
training_model = Model(inputs=[context_input, action_input, country_id],
outputs=[model_output])
training_model.compile(loss='mae',
optimizer='adam')
return model, training_model
# Train model
def _train_model(self, training_model, X_context, X_action, X_country, y, epochs=1, verbose=0):
early_stopping = EarlyStopping(patience=20,
restore_best_weights=True)
history = training_model.fit([X_context, X_action, X_country], [y],
epochs=epochs,
batch_size=32,
validation_split=0.1,
callbacks=[early_stopping],
verbose=verbose)
return history
def save_model(self, path_to_weights, path_to_country_list):
self.predictor.save_weights(path_to_weights)
with open(path_to_country_list, 'w') as f:
f.writelines("{}\n".format(g) for g in self.geos)
if __name__ == '__main__':
# Run all test cases
model = geoLSTMPredictor()
model.evaluate()
model.save_model('./ongoing/predictors/geolstm/models/model.h5', './ongoing/predictors/geolstm/models/countries.txt')
|
[
"ongoing.predictors.base.convert_ratios_to_total_deaths",
"numpy.ones",
"numpy.clip",
"keras.backend.abs",
"numpy.argmin",
"keras.models.Model",
"pandas.offsets.Day",
"keras.layers.Input",
"keras.layers.Reshape",
"numpy.copy",
"pandas.Timedelta",
"numpy.stack",
"pandas.DataFrame.from_dict",
"ongoing.predictors.base.convert_ratios_to_total_cases",
"keras.layers.Concatenate",
"numpy.random.permutation",
"numpy.concatenate",
"keras.layers.LSTM",
"numpy.zeros",
"keras.callbacks.EarlyStopping",
"numpy.array",
"keras.layers.Dense",
"keras.layers.Lambda",
"numpy.timedelta64"
] |
[((1081, 1089), 'keras.backend.abs', 'K.abs', (['w'], {}), '(w)\n', (1086, 1089), True, 'import keras.backend as K\n'), ((6557, 6589), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['forecast'], {}), '(forecast)\n', (6579, 6589), True, 'import pandas as pd\n'), ((7180, 7216), 'numpy.array', 'np.array', (['cnpis_df[base.NPI_COLUMNS]'], {}), '(cnpis_df[base.NPI_COLUMNS])\n', (7188, 7216), True, 'import numpy as np\n'), ((7717, 7745), 'numpy.array', 'np.array', (['cdf.ConfirmedCases'], {}), '(cdf.ConfirmedCases)\n', (7725, 7745), True, 'import numpy as np\n'), ((7771, 7793), 'numpy.array', 'np.array', (['cdf.NewCases'], {}), '(cdf.NewCases)\n', (7779, 7793), True, 'import numpy as np\n'), ((7973, 8002), 'numpy.array', 'np.array', (['cdf.ConfirmedDeaths'], {}), '(cdf.ConfirmedDeaths)\n', (7981, 8002), True, 'import numpy as np\n'), ((8029, 8052), 'numpy.array', 'np.array', (['cdf.NewDeaths'], {}), '(cdf.NewDeaths)\n', (8037, 8052), True, 'import numpy as np\n'), ((8199, 8315), 'ongoing.predictors.base.convert_ratios_to_total_cases', 'base.convert_ratios_to_total_cases', (['preds_cases', 'self.window_size', 'prev_new_cases', 'initial_total_cases', 'pop_size'], {}), '(preds_cases, self.window_size,\n prev_new_cases, initial_total_cases, pop_size)\n', (8233, 8315), True, 'import ongoing.predictors.base as base\n'), ((8465, 8575), 'ongoing.predictors.base.convert_ratios_to_total_deaths', 'base.convert_ratios_to_total_deaths', (['preds_deaths', 'self.window_size', 'prev_new_deaths', 'initial_total_deaths'], {}), '(preds_deaths, self.window_size,\n prev_new_deaths, initial_total_deaths)\n', (8500, 8575), True, 'import ongoing.predictors.base as base\n'), ((11132, 11163), 'numpy.zeros', 'np.zeros', (['(nb_roll_out_days, 2)'], {}), '((nb_roll_out_days, 2))\n', (11140, 11163), True, 'import numpy as np\n'), ((13017, 13051), 'numpy.concatenate', 'np.concatenate', (['all_X_context_list'], {}), '(all_X_context_list)\n', (13031, 13051), True, 'import numpy as np\n'), ((13071, 13104), 'numpy.concatenate', 'np.concatenate', (['all_X_action_list'], {}), '(all_X_action_list)\n', (13085, 13104), True, 'import numpy as np\n'), ((13125, 13159), 'numpy.concatenate', 'np.concatenate', (['all_X_country_list'], {}), '(all_X_country_list)\n', (13139, 13159), True, 'import numpy as np\n'), ((13172, 13198), 'numpy.concatenate', 'np.concatenate', (['all_y_list'], {}), '(all_y_list)\n', (13186, 13198), True, 'import numpy as np\n'), ((13290, 13330), 'numpy.clip', 'np.clip', (['X_context', 'MIN_VALUE', 'MAX_VALUE'], {}), '(X_context, MIN_VALUE, MAX_VALUE)\n', (13297, 13330), True, 'import numpy as np\n'), ((13343, 13375), 'numpy.clip', 'np.clip', (['y', 'MIN_VALUE', 'MAX_VALUE'], {}), '(y, MIN_VALUE, MAX_VALUE)\n', (13350, 13375), True, 'import numpy as np\n'), ((14088, 14126), 'numpy.argmin', 'np.argmin', (["history.history['val_loss']"], {}), "(history.history['val_loss'])\n", (14097, 14126), True, 'import numpy as np\n'), ((14442, 14475), 'numpy.random.permutation', 'np.random.permutation', (['y.shape[0]'], {}), '(y.shape[0])\n', (14463, 14475), True, 'import numpy as np\n'), ((14823, 14859), 'keras.layers.Input', 'Input', ([], {'shape': '(1,)', 'name': '"""country_id"""'}), "(shape=(1,), name='country_id')\n", (14828, 14859), False, 'from keras.layers import Input\n'), ((15051, 15116), 'keras.layers.Input', 'Input', ([], {'shape': '(nb_lookback_days, nb_context)', 'name': '"""context_input"""'}), "(shape=(nb_lookback_days, nb_context), name='context_input')\n", (15056, 15116), False, 'from keras.layers import Input\n'), ((15677, 15740), 'keras.layers.Input', 'Input', ([], {'shape': '(nb_lookback_days, nb_action)', 'name': '"""action_input"""'}), "(shape=(nb_lookback_days, nb_action), name='action_input')\n", (15682, 15740), False, 'from keras.layers import Input\n'), ((16479, 16558), 'keras.models.Model', 'Model', ([], {'inputs': '[context_input, action_input, country_id]', 'outputs': '[model_output]'}), '(inputs=[context_input, action_input, country_id], outputs=[model_output])\n', (16484, 16558), False, 'from keras.models import Model\n'), ((16771, 16850), 'keras.models.Model', 'Model', ([], {'inputs': '[context_input, action_input, country_id]', 'outputs': '[model_output]'}), '(inputs=[context_input, action_input, country_id], outputs=[model_output])\n', (16776, 16850), False, 'from keras.models import Model\n'), ((17156, 17209), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(20)', 'restore_best_weights': '(True)'}), '(patience=20, restore_best_weights=True)\n', (17169, 17209), False, 'from keras.callbacks import EarlyStopping\n'), ((7868, 7892), 'numpy.array', 'np.array', (['cdf.Population'], {}), '(cdf.Population)\n', (7876, 7892), True, 'import numpy as np\n'), ((9496, 9526), 'numpy.array', 'np.array', (['cdf[context_columns]'], {}), '(cdf[context_columns])\n', (9504, 9526), True, 'import numpy as np\n'), ((9553, 9582), 'numpy.array', 'np.array', (['cdf[action_columns]'], {}), '(cdf[action_columns])\n', (9561, 9582), True, 'import numpy as np\n'), ((9610, 9640), 'numpy.array', 'np.array', (['cdf[outcome_columns]'], {}), '(cdf[outcome_columns])\n', (9618, 9640), True, 'import numpy as np\n'), ((11203, 11233), 'numpy.copy', 'np.copy', (['initial_context_input'], {}), '(initial_context_input)\n', (11210, 11233), True, 'import numpy as np\n'), ((11281, 11310), 'numpy.copy', 'np.copy', (['initial_action_input'], {}), '(initial_action_input)\n', (11288, 11310), True, 'import numpy as np\n'), ((11359, 11378), 'numpy.copy', 'np.copy', (['country_id'], {}), '(country_id)\n', (11366, 11378), True, 'import numpy as np\n'), ((14965, 14987), 'keras.layers.Reshape', 'Reshape', (['(embed_size,)'], {}), '((embed_size,))\n', (14972, 14987), False, 'from keras.layers import Reshape\n'), ((15159, 15219), 'keras.layers.LSTM', 'LSTM', (['lstm_size'], {'return_sequences': '(False)', 'name': '"""context_lstm"""'}), "(lstm_size, return_sequences=False, name='context_lstm')\n", (15163, 15219), False, 'from keras.layers import LSTM\n'), ((15281, 15300), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(1)'}), '(axis=1)\n', (15292, 15300), False, 'from keras.layers import Concatenate\n'), ((15426, 15485), 'keras.layers.Dense', 'Dense', ([], {'units': '(2)', 'activation': '"""softplus"""', 'name': '"""context_dense"""'}), "(units=2, activation='softplus', name='context_dense')\n", (15431, 15485), False, 'from keras.layers import Dense\n'), ((16050, 16069), 'keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(1)'}), '(axis=1)\n', (16061, 16069), False, 'from keras.layers import Concatenate\n'), ((16194, 16251), 'keras.layers.Dense', 'Dense', ([], {'units': '(2)', 'activation': '"""sigmoid"""', 'name': '"""action_dense"""'}), "(units=2, activation='sigmoid', name='action_dense')\n", (16199, 16251), False, 'from keras.layers import Dense\n'), ((16373, 16416), 'keras.layers.Lambda', 'Lambda', (['_combine_r_and_d'], {'name': '"""prediction"""'}), "(_combine_r_and_d, name='prediction')\n", (16379, 16416), False, 'from keras.layers import Lambda\n'), ((10159, 10192), 'numpy.stack', 'np.stack', (['context_samples'], {'axis': '(0)'}), '(context_samples, axis=0)\n', (10167, 10192), True, 'import numpy as np\n'), ((10220, 10252), 'numpy.stack', 'np.stack', (['action_samples'], {'axis': '(0)'}), '(action_samples, axis=0)\n', (10228, 10252), True, 'import numpy as np\n'), ((10331, 10364), 'numpy.stack', 'np.stack', (['outcome_samples'], {'axis': '(0)'}), '(outcome_samples, axis=0)\n', (10339, 10364), True, 'import numpy as np\n'), ((6174, 6191), 'pandas.offsets.Day', 'pd.offsets.Day', (['i'], {}), '(i)\n', (6188, 6191), True, 'import pandas as pd\n'), ((10283, 10310), 'numpy.ones', 'np.ones', (['X_context.shape[0]'], {}), '(X_context.shape[0])\n', (10290, 10310), True, 'import numpy as np\n'), ((5293, 5315), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""D"""'], {}), "(1, 'D')\n", (5307, 5315), True, 'import numpy as np\n'), ((5396, 5429), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': 'self.npi_delay'}), '(days=self.npi_delay)\n', (5408, 5429), True, 'import pandas as pd\n'), ((5458, 5491), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': 'self.npi_delay'}), '(days=self.npi_delay)\n', (5470, 5491), True, 'import pandas as pd\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 26 14:52:43 EDT 2017
@author: ben
Genrate ui pyuic5 -x ndust.ui -o ndustgui.py
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2017, <NAME>"
__license__ = "MIT"
__version__ = "0.1.0"
__email__ = "<EMAIL>"
__status__ = "Development"
import sys
import os
#from PyQt5 import QtGui, QtCore
#from PyQt5.QtCore import SIGNAL
#from PyQt5.QtCore import *
#from PyQt5.QtGui import
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from ndustgui import Ui_MainWindow
import h5py
#import grid model
import mgrid as mg
#import plasma model
import mplasma as mp
#import plasma model
import mnano as mn
# import hdf5 library wrappers
class Window(QMainWindow, Ui_MainWindow):
""" Main window
"""
def __init__(self, parent = None):
"""
"""
super(Window, self).__init__(parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# error message
self.emsg = QtWidgets.QErrorMessage(self)
self.emsg.setWindowModality(QtCore.Qt.WindowModal)
# connections
# grid tab
grid_save_btn = self.ui.buttonBox_grid_save.button(QtWidgets.QDialogButtonBox.Save)
grid_save_btn.clicked.connect(self.grid_save)
grid_close_btn = self.ui.buttonBox_grid_save.button(QtWidgets.QDialogButtonBox.Close)
grid_close_btn.clicked.connect(QtWidgets.qApp.quit)
self.radiobutton_mpc = self.ui.radioButton_MPC
self.radiobutton_mpc.toggled.connect(self.toggle_terms)
self.radiobutton_ipa = self.ui.radioButton_IPA
self.radiobutton_ipa.toggled.connect(self.toggle_terms)
self.checkbox_tunnel = self.ui.checkBox_tunnel
self.checkbox_tunnel.toggled.connect(self.toggle_eaffinity)
self.checkbox_wnu = self.ui.checkBox_wnu
self.checkbox_wnu.toggled.connect(self.toggle_nucleation)
self.checkbox_wsg = self.ui.checkBox_wsg
self.checkbox_wsg.toggled.connect(self.toggle_sgrowth)
self.checkbox_wco = self.ui.checkBox_wco
self.checkbox_wco.toggled.connect(self.toggle_coagulation)
self.checkbox_wch = self.ui.checkBox_wch
self.checkbox_wch.toggled.connect(self.toggle_charging)
self.checkbox_sih4coupled = self.ui.checkBox_sih4coupled
self.checkbox_sih4coupled.toggled.connect(self.toggle_sih4coupled)
self.checkbox_chargewidth = self.ui.checkBox_chargewidth
self.checkbox_chargewidth.toggled.connect(self.toggle_chargewidth)
self.checkbox_vdw = self.ui.checkBox_vdw
self.checkbox_vdw.toggled.connect(self.toggle_vdw)
self.checkbox_bf = self.ui.checkBox_bf
# plasma tab
plasma_save_btn = self.ui.buttonBox_plasma_save.button(QtWidgets.QDialogButtonBox.Save)
plasma_save_btn.clicked.connect(self.plasma_save)
plasma_close_btn = self.ui.buttonBox_plasma_save.button(QtWidgets.QDialogButtonBox.Close)
plasma_close_btn.clicked.connect(QtWidgets.qApp.quit)
# nano tab
nano_save_btn = self.ui.buttonBox_nano_save.button(QtWidgets.QDialogButtonBox.Save)
nano_save_btn.clicked.connect(self.nano_save)
nano_close_btn = self.ui.buttonBox_nano_save.button(QtWidgets.QDialogButtonBox.Close)
nano_close_btn.clicked.connect(QtWidgets.qApp.quit)
self.prefix = ""
self.dirname = "/mnt/data/ben/ndust/data/"
self.server = [" guillimin:~/duster/results/", " cottos:~/duster/results/"]
# show window
self.show()
# grid
# h5 filename for grid
self.gridfilename = ""
# container for GridSystem
self.gsys = []
# container for volume sections
self.vsections = []
self.update_vsections()
self.spinbox_peakpos = self.ui.spinBox_peakpos
self.spinbox_peakpos.valueChanged.connect(self.update_peakradius)
# container for charge sections
self.qsections = []
# container for description
self.gdescription = []
# container for electrostatic interaction
self.einteraction = []
#
# plasma
# h5 filename for plasma
self.plasmafilename = ""
# container for parameters
self.parameters = []
# container for electrons
self.electrons = []
# container for ions
self.ions = []
# container for metastables
self.metastables = []
# container for description
self.pdescription = []
#
# nano
# h5 filename for nano
self.nanofilename = ""
# container for nanoparticle parameters
self.nanoparticles = []
# container for rates
self.rates = []
# container for time
self.time = []
# container for density
self.density = []
self.update_peakradius()
# container for description
self.ndescription = []
#
# WARNING TODO update charge sections too
def update_vsections(self):
nvsections = self.ui.spinBox_vsecs.value()
rmin = le2float(self.ui.lineEdit_minrad)*1e-9
base = le2float(self.ui.lineEdit_base)
power = le2float(self.ui.lineEdit_power)
linear = False
special = False
small = False
medium = False
big = False
if self.ui.checkBox_linear.isChecked():
linear = True
if self.ui.checkBox_special.isChecked():
special = True
nvsections = 40
self.ui.spinBox_vsecs.setValue(nvsections)
if self.ui.checkBox_small.isChecked():
small = True
nvsections = 48
self.ui.spinBox_vsecs.setValue(nvsections)
if self.ui.checkBox_medium.isChecked():
medium = True
nvsections = 48
self.ui.spinBox_vsecs.setValue(nvsections)
if self.ui.checkBox_big.isChecked():
big = True
nvsections = 48
self.ui.spinBox_vsecs.setValue(nvsections)
grid_type = {'linear':linear, 'special':special, 'small':small, 'medium':medium, 'big':big}
minvoliface, mg.ifaces, mg.vols, mg.rads, mg.diams = mg.compute_sections(nvsections, rmin,
base, power, grid_type)
vmin = mg.ifaces[0]
strvmin = "{:.4e}".format(vmin)
value2le(self.ui.lineEdit_minvol, strvmin)
vmax = mg.ifaces[-1]
strvmax = "{:.4e}".format(vmax)
value2le(self.ui.lineEdit_maxvol, strvmax)
rmax = mg.rads[-1]*1e9
strrmax = "{:.4f}".format(rmax)
value2le(self.ui.lineEdit_maxrad, strrmax)
def update_peakradius(self):
""" update peakradius line edit
"""
peakpos = self.ui.spinBox_peakpos.value()
if peakpos > len(mg.rads)-1:
peakpos = len(mg.rads)-1
self.ui.spinBox_peakpos.setValue(peakpos)
radius_peakpos = mg.rads[peakpos]*1e9
strrad = "{:.4f}".format(radius_peakpos)
value2le(self.ui.lineEdit_radius, strrad)
return peakpos
def errorWritingH5file(self, errormessage):
self.emsg.showMessage(errormessage)
def open_h5file(self, filename, mode):
try:
h5f = h5py.File(filename, "w")
except (Exception) as e:
print(e)
self.errorWritingH5file(str(e))
return None
else:
print("file {} open".format(filename))
return h5f
#def grid_close(self):
def grid_save(self):
""" Save grid files
"""
print("\n[ii] saving grid file\n")
self.gridfilename = str(self.ui.lineEdit_grid_save_name.displayText())
# WARNING must check for empty file FIXME
#if self.gridfilename <> "":
#self.prefix += "-"
h5f = self.open_h5file(self.dirname+self.gridfilename, "w")
if h5f is None:
return -1
#h5f.attrs["gtime"] = 0.0
#profile_rerr = le2float(self.ui.lineEdit_ptol)
#h5f.attrs["profile_rerr"] = profile_rerr
# Use pivot method in charges (1D/2D)
qpivot = False
if self.ui.checkBox_qpivot.isChecked():
qpivot = True
# Nanoparticle temperature
temperature = le2float(self.ui.lineEdit_temp)
# Nanoparticle mass density
nmdensity = le2float(self.ui.lineEdit_nmdens)
# instantiate the group GridSystem
self.gsys = mg.GridSystem(h5f, qpivot, temperature, nmdensity)
# write group
self.gsys.toHDF5()
# volume sections
self.update_vsections()
# volume sections
nvsections = self.ui.spinBox_vsecs.value()
rmin = le2float(self.ui.lineEdit_minrad)*1e-9
base = le2float(self.ui.lineEdit_base)
power = le2float(self.ui.lineEdit_power)
linear = False
special = False
small = False
medium = False
big = False
if self.ui.checkBox_linear.isChecked():
linear = True
if self.ui.checkBox_special.isChecked():
special = True
if self.ui.checkBox_small.isChecked():
small = True
if self.ui.checkBox_medium.isChecked():
medium = True
if self.ui.checkBox_big.isChecked():
big = True
grid_type = {'linear':linear, 'special':special, 'small':small, 'medium':medium, 'big':big}
self.vsections = mg.VSections(h5f, nvsections, rmin, base, power, grid_type)
self.vsections.toHDF5()
# charge sections
##FIXME
if self.ui.checkBox_special.isChecked():
special = True
max_positive = self.ui.spinBox_maxpos.value()
max_negative = self.ui.spinBox_maxneg.value()
if not special:
nqsections = max_positive + max_negative + 1
print('not special ', nqsections)
else:
#nqsections = 50
#print('special ', nqsections)
#max_positive = 5
#max_negative = 296
#self.ui.spinBox_maxneg.setValue(max_negative)
#self.ui.spinBox_maxpos.setValue(max_positive)
nqsections = 100
print('special ', nqsections)
max_positive = 5
max_negative = 296
self.ui.spinBox_maxneg.setValue(max_negative)
self.ui.spinBox_maxpos.setValue(max_positive)
value2le(self.ui.lineEdit_qsecs, nqsections)
self.qsections = []
print('type', type(nqsections))
if big:
print('BIG')
nqsections = 30
value2le(self.ui.lineEdit_qsecs, nqsections)
print('type', type(nqsections), nqsections)
self.qsections = mg.QSections(h5f, nqsections, max_positive, max_negative, grid_type)
elif small:
print('SMALL')
nqsections = 32
value2le(self.ui.lineEdit_qsecs, nqsections)
print('type', type(nqsections), nqsections)
self.qsections = mg.QSections(h5f, nqsections, max_positive, max_negative, grid_type)
elif medium:
print('MEDIUM')
nqsections = 33
value2le(self.ui.lineEdit_qsecs, nqsections)
print('type', type(nqsections), nqsections)
self.qsections = mg.QSections(h5f, nqsections, max_positive, max_negative, grid_type)
else:
self.qsections = mg.QSections(h5f, nqsections, max_positive, max_negative, grid_type)
self.qsections.toHDF5()
# description for grid
description_text = qte2string(self.ui.textEdit_grid_desc)
self.gdescription = mg.Description(h5f, description_text)
self.gdescription.toHDF5()
# electrostatic interaction
multiplier = le2float(self.ui.lineEdit_mult)
dconstant = le2float(self.ui.lineEdit_die)
terms = self.ui.spinBox_terms.value()
#
method = 0# MPC
if self.ui.radioButton_IPA.isChecked():
method = 1# IPA
elif self.ui.radioButton_coul.isChecked():
method = 2# Coulomb
elif self.ui.radioButton_Hybrid.isChecked():
method = 3# Hybrid
elif self.ui.radioButton_free.isChecked():
method = 4# Free, no interaction
#
self.einteraction = mg.EInteraction(h5f, multiplier, dconstant, method,
terms)
self.einteraction.toHDF5()
vdw = 0
cutoff = 0.0
bf = 0
if self.checkbox_vdw.isChecked():
vdw = 1
cutoff = le2float(self.ui.lineEdit_cutoff)
if self.checkbox_bf.isChecked():
bf = 1
self.gvdw = mg.VDWaals(h5f, vdw, cutoff, bf)
self.gvdw.toHDF5()
# close file
h5f.close()
#
def toggle_terms(self):
""" Toggle spin box terms if MPC method is selected
"""
if self.radiobutton_mpc.isChecked() or self.radiobutton_ipa.isChecked():
#print("MPC Checked")
self.ui.label_terms.setEnabled(1)
self.ui.spinBox_terms.setEnabled(1)
else:
#print("MPC UNChecked")
self.ui.label_terms.setEnabled(0)
self.ui.spinBox_terms.setEnabled(0)
#
def toggle_eaffinity(self):
""" Toggle eaffinity box if tunnel is checked
"""
if self.checkbox_tunnel.isChecked():
self.ui.label_tunnel.setEnabled(1)
self.ui.label_eaffinity.setEnabled(1)
self.ui.lineEdit_eaffinity.setEnabled(1)
else:
self.ui.label_tunnel.setEnabled(0)
self.ui.label_eaffinity.setEnabled(0)
self.ui.lineEdit_eaffinity.setEnabled(0)
#
def toggle_nucleation(self):
""" Toggle nucleation_rate box if nucleation is checked
"""
if self.checkbox_wnu.isChecked():
self.ui.label_nucleation.setEnabled(1)
self.ui.lineEdit_nucleation_rate.setEnabled(1)
else:
self.ui.label_nucleation.setEnabled(0)
self.ui.lineEdit_nucleation_rate.setEnabled(0)
#
def toggle_sgrowth(self):
""" Toggle sgrowth_rate box if sgrowth is checked
"""
if self.checkbox_wsg.isChecked():
self.ui.label_sgrowth.setEnabled(1)
self.ui.lineEdit_sgrowth.setEnabled(1)
else:
self.ui.label_sgrowth.setEnabled(0)
self.ui.lineEdit_sgrowth.setEnabled(0)
#
def toggle_coagulation(self):
""" Toggle coagulation label if coagulation is checked
"""
if self.checkbox_wco.isChecked():
self.ui.label_wco.setEnabled(1)
else:
self.ui.label_wco.setEnabled(0)
#
def toggle_charging(self):
""" Toggle charging label if charging is checked
"""
if self.checkbox_wch.isChecked():
self.ui.label_wch.setEnabled(1)
else:
self.ui.label_wch.setEnabled(0)
#
def toggle_sih4coupled(self):
""" Toggle sih4 coupled
"""
if self.checkbox_sih4coupled.isChecked():
self.ui.label_sih4ratio.setEnabled(1)
self.ui.lineEdit_sih4ratio.setEnabled(1)
#
self.ui.label_sih4nmol.setEnabled(1)
self.ui.spinBox_sih4nmol.setEnabled(1)
else:
self.ui.label_sih4ratio.setEnabled(0)
self.ui.lineEdit_sih4ratio.setEnabled(0)
#
self.ui.label_sih4nmol.setEnabled(0)
self.ui.spinBox_sih4nmol.setEnabled(0)
#
def toggle_chargewidth(self):
""" Toggle charge width
"""
if self.checkbox_chargewidth.isChecked():
self.ui.spinBox_chargeposwidth.setEnabled(1)
self.ui.spinBox_chargenegwidth.setEnabled(1)
else:
self.ui.spinBox_chargeposwidth.setEnabled(0)
self.ui.spinBox_chargenegwidth.setEnabled(0)
#
def toggle_vdw(self):
""" Toggle vdw
"""
if self.checkbox_vdw.isChecked():
self.ui.label_cutoff.setEnabled(1)
self.ui.lineEdit_cutoff.setEnabled(1)
self.ui.checkBox_bf.setEnabled(1)
else:
self.ui.label_cutoff.setEnabled(0)
self.ui.lineEdit_cutoff.setEnabled(0)
self.ui.checkBox_bf.setEnabled(0)
self.ui.checkBox_bf.setChecked(0)
#
def plasma_save(self):
""" Save plasma file
"""
print("\n[ii] saving plasma file\n")
self.plasmafilename = str(self.ui.lineEdit_plasma_save_name.displayText())
# WARNING must check for empty file FIXME
h5f = self.open_h5file(self.dirname+self.plasmafilename, "w")
if h5f is None:
return -1
# Parameters
# Fixed plasma check
pfixed = self.ui.checkBox_pfixed.isChecked()
# Length
length = le2float(self.ui.lineEdit_length)
# Temperature
temperature = le2float(self.ui.lineEdit_temp_gas)
# Pressure
pressure = le2float(self.ui.lineEdit_pressure)
# instantiate the group Parameters
self.parameters = mp.Parameters(h5f, pfixed, length, temperature, pressure)
# fill line edit with gas density
strng = "{:.4e}".format(self.parameters.neutral_density())
value2le(self.ui.lineEdit_ng, strng)
# write group
self.parameters.toHDF5()
# Electrons
# Electron mean energy
emean = le2float(self.ui.lineEdit_emean)
# Electron density
ne = le2float(self.ui.lineEdit_ne)
# instantiate the group Electrons
self.electrons = mp.Electrons(h5f, emean, ne)
# write group
self.electrons.toHDF5()
# Ions
# Ion temperature
itemp = le2float(self.ui.lineEdit_itemp)
# Ion density
ni = le2float(self.ui.lineEdit_ni)
imass = le2float(self.ui.lineEdit_imass)
# instantiate the group Ions
self.ions = mp.Ions(h5f, itemp, ni, imass)
# write group
self.ions.toHDF5()
# Metastables
# Metastable density
nm = le2float(self.ui.lineEdit_nm)
# instantiate the group Electrons
self.metastables = mp.Metastables(h5f, nm)
# write group
self.metastables.toHDF5()
# Description
# description for plasma
description_text = qte2string(self.ui.textEdit_plasma_desc)
self.pdescription = mp.Description(h5f, description_text)
# write description
self.pdescription.toHDF5()
# close file
h5f.close()
#
def nano_save(self):
""" Save nano file
"""
print("\n[ii] saving nano file\n")
self.nanofilename = str(self.ui.lineEdit_nano_save_name.text())
# WARNING must check for empty file FIXME
h5f = self.open_h5file(self.dirname+self.nanofilename, "w")
if h5f is None:
print("\n[ee] nano file error\n")
return -1
# Nanoparticles
# Tunnel current check
tunnel = self.ui.checkBox_tunnel.isChecked()
# Electron affinity
eaffinity = le2float(self.ui.lineEdit_eaffinity)
# Accomodation factor
accfactor = le2float(self.ui.lineEdit_acc)
# instantiate the group Nanoparticles
self.nanoparticles = mn.Nanoparticles(h5f, tunnel, eaffinity, accfactor)
# write group
self.nanoparticles.toHDF5()
# Rates
# with nucleation
wnu = self.ui.checkBox_wnu.isChecked()
# nucleation rate
nucleation_rate = le2float(self.ui.lineEdit_nucleation_rate)
# with surface growth
wsg = self.ui.checkBox_wsg.isChecked()
# surface growth rate
sgrowth_rate = le2float(self.ui.lineEdit_sgrowth)
# with coagulation
wco = self.ui.checkBox_wco.isChecked()
# with charging
wch = self.ui.checkBox_wch.isChecked()
#coupled SiH4
wsih4 = self.ui.checkBox_sih4coupled.isChecked()
# SiH4 to gas ratio (1/ gas to SiH4 ratio)
sih4ratio = 1.0/le2float(self.ui.lineEdit_sih4ratio)
#
sih4nmol = self.ui.spinBox_sih4nmol.value()
#
# instantiate the group Nanoparticles
self.rates = mn.Rates(h5f, wnu, nucleation_rate,
wsg, sgrowth_rate,
wco, wch, wsih4, sih4ratio, sih4nmol)
# write group
self.rates.toHDF5()
# Time
# nanoparticle growth delta time
ndeltat = le2float(self.ui.lineEdit_ndeltat)
# nanoparticle charging delta time
qdeltat = le2float(self.ui.lineEdit_qdeltat)
# stop time
tstop = le2float(self.ui.lineEdit_tstop)
# instantiate the group Time
self.time = mn.Time(h5f, ndeltat, qdeltat, tstop)
# write group
self.time.toHDF5()
# Density
# nanoparticle initial density
indens = le2float(self.ui.lineEdit_indens)
# nanoparticle qtol
qtol = le2float(self.ui.lineEdit_qtol)
# volume sections
#
distribution = 0# delta default
if self.ui.radioButton_step.isChecked():
distribution = 1# step
elif self.ui.radioButton_gaussian.isChecked():
distribution = 2# Gaussian
peakpos = self.update_peakradius()
width = self.ui.spinBox_width.value()
withchargewidth = False
chargewidth = {"negative": 0, "positive": 0}
if self.checkbox_chargewidth.isChecked():
chargenegwidth = self.ui.spinBox_chargenegwidth.value()
chargeposwidth = self.ui.spinBox_chargeposwidth.value()
if chargenegwidth != 0 or chargeposwidth !=0:
withchargewidth = True
chargewidth["negative"] = chargenegwidth
chargewidth["positive"] = chargeposwidth
# instantiate the group Time
self.density = mn.Density(h5f, indens, qtol, distribution, peakpos, width,
withchargewidth, chargewidth)
# write group
self.density.toHDF5()
# Description
# description for nano
description_text = qte2string(self.ui.textEdit_nano_desc)
self.ndescription = mn.Description(h5f, description_text)
# write description
self.ndescription.toHDF5()
# close file
h5f.close()
#
def le2int(leobj):
""" Qt LineEdit to integer
"""
return int(leobj.displayText())
def le2float(leobj):
""" Qt LineEdit to float
"""
# WARNING TODO QValidator or exceptions for inputs
#try:
#number = float(leobj.displayText())
#except Exception:
#QtGui.QMessageBox.about(self, 'Error','Input can only be a number')
#pass
return float(leobj.displayText())
def value2le(leobj, value):
""" Set value to Qt LineEdit
"""
leobj.setText(str(value))
def qte2string(qteobj):
""" Qt LineEdit to integer
"""
return str(qteobj.toPlainText())
# instantiation
app = QApplication(sys.argv)
window = Window(None)
sys.exit(app.exec_())
|
[
"PyQt5.QtWidgets.QErrorMessage",
"ndustgui.Ui_MainWindow",
"mgrid.VDWaals",
"mgrid.QSections",
"mgrid.GridSystem",
"mplasma.Description",
"mplasma.Ions",
"mnano.Density",
"mnano.Rates",
"mnano.Nanoparticles",
"h5py.File",
"mgrid.compute_sections",
"mplasma.Metastables",
"mgrid.EInteraction",
"mplasma.Parameters",
"mplasma.Electrons",
"mgrid.VSections",
"mgrid.Description",
"mnano.Time",
"mnano.Description"
] |
[((935, 950), 'ndustgui.Ui_MainWindow', 'Ui_MainWindow', ([], {}), '()\n', (948, 950), False, 'from ndustgui import Ui_MainWindow\n'), ((1012, 1041), 'PyQt5.QtWidgets.QErrorMessage', 'QtWidgets.QErrorMessage', (['self'], {}), '(self)\n', (1035, 1041), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5766, 5827), 'mgrid.compute_sections', 'mg.compute_sections', (['nvsections', 'rmin', 'base', 'power', 'grid_type'], {}), '(nvsections, rmin, base, power, grid_type)\n', (5785, 5827), True, 'import mgrid as mg\n'), ((7848, 7898), 'mgrid.GridSystem', 'mg.GridSystem', (['h5f', 'qpivot', 'temperature', 'nmdensity'], {}), '(h5f, qpivot, temperature, nmdensity)\n', (7861, 7898), True, 'import mgrid as mg\n'), ((8726, 8785), 'mgrid.VSections', 'mg.VSections', (['h5f', 'nvsections', 'rmin', 'base', 'power', 'grid_type'], {}), '(h5f, nvsections, rmin, base, power, grid_type)\n', (8738, 8785), True, 'import mgrid as mg\n'), ((10689, 10726), 'mgrid.Description', 'mg.Description', (['h5f', 'description_text'], {}), '(h5f, description_text)\n', (10703, 10726), True, 'import mgrid as mg\n'), ((11284, 11342), 'mgrid.EInteraction', 'mg.EInteraction', (['h5f', 'multiplier', 'dconstant', 'method', 'terms'], {}), '(h5f, multiplier, dconstant, method, terms)\n', (11299, 11342), True, 'import mgrid as mg\n'), ((11631, 11663), 'mgrid.VDWaals', 'mg.VDWaals', (['h5f', 'vdw', 'cutoff', 'bf'], {}), '(h5f, vdw, cutoff, bf)\n', (11641, 11663), True, 'import mgrid as mg\n'), ((15540, 15597), 'mplasma.Parameters', 'mp.Parameters', (['h5f', 'pfixed', 'length', 'temperature', 'pressure'], {}), '(h5f, pfixed, length, temperature, pressure)\n', (15553, 15597), True, 'import mplasma as mp\n'), ((15999, 16027), 'mplasma.Electrons', 'mp.Electrons', (['h5f', 'emean', 'ne'], {}), '(h5f, emean, ne)\n', (16011, 16027), True, 'import mplasma as mp\n'), ((16306, 16336), 'mplasma.Ions', 'mp.Ions', (['h5f', 'itemp', 'ni', 'imass'], {}), '(h5f, itemp, ni, imass)\n', (16313, 16336), True, 'import mplasma as mp\n'), ((16522, 16545), 'mplasma.Metastables', 'mp.Metastables', (['h5f', 'nm'], {}), '(h5f, nm)\n', (16536, 16545), True, 'import mplasma as mp\n'), ((16730, 16767), 'mplasma.Description', 'mp.Description', (['h5f', 'description_text'], {}), '(h5f, description_text)\n', (16744, 16767), True, 'import mplasma as mp\n'), ((17522, 17573), 'mnano.Nanoparticles', 'mn.Nanoparticles', (['h5f', 'tunnel', 'eaffinity', 'accfactor'], {}), '(h5f, tunnel, eaffinity, accfactor)\n', (17538, 17573), True, 'import mnano as mn\n'), ((18366, 18462), 'mnano.Rates', 'mn.Rates', (['h5f', 'wnu', 'nucleation_rate', 'wsg', 'sgrowth_rate', 'wco', 'wch', 'wsih4', 'sih4ratio', 'sih4nmol'], {}), '(h5f, wnu, nucleation_rate, wsg, sgrowth_rate, wco, wch, wsih4,\n sih4ratio, sih4nmol)\n', (18374, 18462), True, 'import mnano as mn\n'), ((18854, 18891), 'mnano.Time', 'mn.Time', (['h5f', 'ndeltat', 'qdeltat', 'tstop'], {}), '(h5f, ndeltat, qdeltat, tstop)\n', (18861, 18891), True, 'import mnano as mn\n'), ((19888, 19981), 'mnano.Density', 'mn.Density', (['h5f', 'indens', 'qtol', 'distribution', 'peakpos', 'width', 'withchargewidth', 'chargewidth'], {}), '(h5f, indens, qtol, distribution, peakpos, width, withchargewidth,\n chargewidth)\n', (19898, 19981), True, 'import mnano as mn\n'), ((20184, 20221), 'mnano.Description', 'mn.Description', (['h5f', 'description_text'], {}), '(h5f, description_text)\n', (20198, 20221), True, 'import mnano as mn\n'), ((6766, 6790), 'h5py.File', 'h5py.File', (['filename', '"""w"""'], {}), "(filename, 'w')\n", (6775, 6790), False, 'import h5py\n'), ((9854, 9922), 'mgrid.QSections', 'mg.QSections', (['h5f', 'nqsections', 'max_positive', 'max_negative', 'grid_type'], {}), '(h5f, nqsections, max_positive, max_negative, grid_type)\n', (9866, 9922), True, 'import mgrid as mg\n'), ((10106, 10174), 'mgrid.QSections', 'mg.QSections', (['h5f', 'nqsections', 'max_positive', 'max_negative', 'grid_type'], {}), '(h5f, nqsections, max_positive, max_negative, grid_type)\n', (10118, 10174), True, 'import mgrid as mg\n'), ((10360, 10428), 'mgrid.QSections', 'mg.QSections', (['h5f', 'nqsections', 'max_positive', 'max_negative', 'grid_type'], {}), '(h5f, nqsections, max_positive, max_negative, grid_type)\n', (10372, 10428), True, 'import mgrid as mg\n'), ((10468, 10536), 'mgrid.QSections', 'mg.QSections', (['h5f', 'nqsections', 'max_positive', 'max_negative', 'grid_type'], {}), '(h5f, nqsections, max_positive, max_negative, grid_type)\n', (10480, 10536), True, 'import mgrid as mg\n')]
|
import os
from unittest import mock
from castero.datafile import DataFile
from castero.downloadqueue import DownloadQueue
def test_datafile_download(display):
display.change_status = mock.MagicMock(name="change_status")
mydownloadqueue = DownloadQueue()
url = "https://travis-ci.org/"
DataFile.download_to_file(
url,
"datafile_download_temp",
"datafile download name",
mydownloadqueue,
display=display
)
while mydownloadqueue.length > 0:
pass
assert display.change_status.call_count > 0
assert os.path.exists("datafile_download_temp")
os.remove("datafile_download_temp")
|
[
"os.remove",
"unittest.mock.MagicMock",
"castero.downloadqueue.DownloadQueue",
"os.path.exists",
"castero.datafile.DataFile.download_to_file"
] |
[((190, 226), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'name': '"""change_status"""'}), "(name='change_status')\n", (204, 226), False, 'from unittest import mock\n'), ((249, 264), 'castero.downloadqueue.DownloadQueue', 'DownloadQueue', ([], {}), '()\n', (262, 264), False, 'from castero.downloadqueue import DownloadQueue\n'), ((304, 424), 'castero.datafile.DataFile.download_to_file', 'DataFile.download_to_file', (['url', '"""datafile_download_temp"""', '"""datafile download name"""', 'mydownloadqueue'], {'display': 'display'}), "(url, 'datafile_download_temp',\n 'datafile download name', mydownloadqueue, display=display)\n", (329, 424), False, 'from castero.datafile import DataFile\n'), ((577, 617), 'os.path.exists', 'os.path.exists', (['"""datafile_download_temp"""'], {}), "('datafile_download_temp')\n", (591, 617), False, 'import os\n'), ((622, 657), 'os.remove', 'os.remove', (['"""datafile_download_temp"""'], {}), "('datafile_download_temp')\n", (631, 657), False, 'import os\n')]
|
"""
.. todo::
WRITEME
"""
import numpy as np
def is_iterable(obj):
"""
Robustly test whether an object is iterable.
Parameters
----------
obj : object
The object to be checked.
Returns
-------
is_iterable : bool
`True` if the object is iterable, `False` otherwise.
Notes
-----
This tests iterability by calling `iter()` and catching a `TypeError`.
Various other ways might occur to you, but they all have flaws:
* `hasattr(obj, '__len__')` will fail for objects that can be iterated
on despite not knowing their length a priori.
* `hasattr(obj, '__iter__')` will fail on objects like Theano tensors
that implement it solely to raise a `TypeError` (because Theano
tensors implement `__getitem__` semantics, Python 2.x will try
to iterate on them via this legacy method if `__iter__` is not
defined).
* `hasattr` has a tendency to swallow other exception-like objects
(`KeyboardInterrupt`, etc.) anyway, and should be avoided for this
reason in Python 2.x, but `getattr()` with a sentinel value suffers
from the exact same pitfalls above.
"""
try:
iter(obj)
except TypeError:
return False
return True
def contains_nan(arr):
"""
Test whether a numpy.ndarray contains any `np.nan` values.
Paramaters:
-----------
arr : np.ndarray
Returns
-------
contains_nan : bool
`True` if the array contains any `np.nan` values, `False` otherwise.
Notes
-----
Tests for the presence of `np.nan`'s using `np.isnan(np.min(ndarray))`.
This approach is faster and more memory efficient than the obvious
alternative, calling `np.any(np.isnan(ndarray))`, which requires the
construction of a boolean array with the same shape as the input array.
"""
return np.isnan(np.min(arr))
def contains_inf(arr):
"""
Test whether a numpy.ndarray contains any `np.inf` values.
Paramaters:
-----------
arr : np.ndarray
Returns
-------
contains_inf : bool
`True` if the array contains any `np.inf` values, `False` otherwise.
Notes
-----
Tests for the presence of `np.inf`'s by determining whether the
values returned by `np.nanmin(arr)` and `np.nanmax(arr)` are finite.
This approach is more memory efficient than the obvious alternative,
calling `np.any(np.isinf(ndarray))`, which requires the construction of a
boolean array with the same shape as the input array.
"""
return np.isinf(np.nanmax(arr)) or np.isinf(np.nanmin(arr))
def isfinite(arr):
"""
Test whether a numpy.ndarray contains any `np.inf` or `np.nan` values.
Paramaters:
-----------
arr : np.ndarray
Returns
-------
isfinite : bool
`True` if the array contains no np.inf or np.nan values, `False`
otherwise.
Notes
-----
Tests for the presence of `np.inf` or `np.nan` values by determining
whether the values returned by `np.min(arr)` and `np.max(arr)` are finite.
This approach is more memory efficient than the obvious alternative,
calling `np.any(np.isfinite(ndarray))`, which requires the construction of
a boolean array with the same shape as the input array.
"""
return np.isfinite(np.max(arr)) and np.isfinite(np.min(arr))
|
[
"numpy.nanmin",
"numpy.min",
"numpy.max",
"numpy.nanmax"
] |
[((1887, 1898), 'numpy.min', 'np.min', (['arr'], {}), '(arr)\n', (1893, 1898), True, 'import numpy as np\n'), ((2575, 2589), 'numpy.nanmax', 'np.nanmax', (['arr'], {}), '(arr)\n', (2584, 2589), True, 'import numpy as np\n'), ((2603, 2617), 'numpy.nanmin', 'np.nanmin', (['arr'], {}), '(arr)\n', (2612, 2617), True, 'import numpy as np\n'), ((3330, 3341), 'numpy.max', 'np.max', (['arr'], {}), '(arr)\n', (3336, 3341), True, 'import numpy as np\n'), ((3359, 3370), 'numpy.min', 'np.min', (['arr'], {}), '(arr)\n', (3365, 3370), True, 'import numpy as np\n')]
|
import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']
).get_hosts('all')
solr_base_dir = "/opt/solr/alfresco-search-services"
solr_rerank_conf_dir = f"{solr_base_dir}/data/solrhome/templates/rerank/conf"
def test_mountpoint_present(host):
mount_point = f"{solr_base_dir}/data"
assert host.mount_point(mount_point).exists
assert host.mount_point(mount_point).filesystem == "xfs"
@pytest.mark.parametrize("svc", [
("solr"),
])
def test_services_are_enabled(host, svc):
service = host.service(svc)
assert service.is_enabled
assert service.is_running
@pytest.mark.parametrize("port", [
(8983),
(22),
])
def test_service_ports_are_listening(host, port):
assert host.socket(f"tcp://0.0.0.0:{port}")
@pytest.mark.parametrize("cron_task", [
("solr_backup"),
])
def test_solr_backup_cron_is_present(host, cron_task):
assert host.ansible("cron", f"name={cron_task}state=present", become=True)
@pytest.mark.parametrize("user_id, user_group, user_dir", [
("solr", "solr", f"{solr_base_dir}/data/solrhome/alfresco"),
("solr", "solr", f"{solr_base_dir}/data/solrhome/archive"),
])
def test_application_directories_exist(host, user_id, user_group, user_dir):
f = host.file(user_dir)
assert f.is_directory
assert f.user == user_id
assert f.group == user_group
@pytest.mark.parametrize("user_id, user_group, file_name", [
(
"solr",
"solr",
f"{solr_rerank_conf_dir}/solrcore.properties"
),
("solr", "solr", "/etc/default/solr.in.sh"),
("root", "root", "/root/solr_backup.sh"),
])
def test_application_files_exist(host, user_id, user_group, file_name):
f = host.file(file_name)
assert f.is_file
assert f.user == user_id
assert f.group == user_group
|
[
"pytest.mark.parametrize"
] |
[((513, 553), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""svc"""', "['solr']"], {}), "('svc', ['solr'])\n", (536, 553), False, 'import pytest\n'), ((700, 743), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""port"""', '[8983, 22]'], {}), "('port', [8983, 22])\n", (723, 743), False, 'import pytest\n'), ((860, 913), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cron_task"""', "['solr_backup']"], {}), "('cron_task', ['solr_backup'])\n", (883, 913), False, 'import pytest\n'), ((1060, 1247), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""user_id, user_group, user_dir"""', "[('solr', 'solr', f'{solr_base_dir}/data/solrhome/alfresco'), ('solr',\n 'solr', f'{solr_base_dir}/data/solrhome/archive')]"], {}), "('user_id, user_group, user_dir', [('solr', 'solr',\n f'{solr_base_dir}/data/solrhome/alfresco'), ('solr', 'solr',\n f'{solr_base_dir}/data/solrhome/archive')])\n", (1083, 1247), False, 'import pytest\n'), ((1447, 1666), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""user_id, user_group, file_name"""', "[('solr', 'solr', f'{solr_rerank_conf_dir}/solrcore.properties'), ('solr',\n 'solr', '/etc/default/solr.in.sh'), ('root', 'root',\n '/root/solr_backup.sh')]"], {}), "('user_id, user_group, file_name', [('solr', 'solr',\n f'{solr_rerank_conf_dir}/solrcore.properties'), ('solr', 'solr',\n '/etc/default/solr.in.sh'), ('root', 'root', '/root/solr_backup.sh')])\n", (1470, 1666), False, 'import pytest\n')]
|
from collections import defaultdict
import random
from protocolbuffers import GameplaySaveData_pb2
from autonomy.autonomy_preference import ObjectPreferenceTag, AutonomyPreferenceType
from distributor.rollback import ProtocolBufferRollback
from event_testing.resolver import SingleSimResolver
from event_testing.tests import TunableTestSet
from households.household_tracker import HouseholdTracker
from sims4.tuning.tunable import TunableEnumSet, TunableMapping, TunableEnumEntry, TunableList, TunableReference
from singletons import DEFAULT
import services
import sims4
logger = sims4.log.Logger('ObjectPreference', default_owner='nabaker')
class _ZoneSpecificObjectPreferenceData:
def __init__(self):
self.object_to_sim = defaultdict(dict)
self._sim_to_object = {}
def __contains__(self, key):
return key in self._sim_to_object
def __getitem__(self, key):
return self._sim_to_object[key]
def __iter__(self):
return iter(self._sim_to_object)
def __len__(self):
return len(self._sim_to_object)
def __bool__(self):
if self._sim_to_object:
return True
return False
def keys(self):
return self._sim_to_object.keys()
def values(self):
return self._sim_to_object.values()
def items(self):
return self._sim_to_object.items()
def get_restricted_object(self, sim_id):
if sim_id in self._sim_to_object:
return self._sim_to_object[sim_id]
return (None, None)
def get_restricted_sim(self, object_id, subroot_index):
if object_id in self.object_to_sim and subroot_index in self.object_to_sim[object_id]:
return self.object_to_sim[object_id][subroot_index]
def get_restricted_sims(self, object_id):
if object_id in self.object_to_sim:
return self.object_to_sim[object_id].values()
def set_restriction(self, object_id, subroot_index, sim_id):
if sim_id in self._sim_to_object:
(old_object_id, old_subroot_index) = self._sim_to_object[sim_id]
self.clear_restriction(old_object_id, old_subroot_index)
return_sim_id = self.get_restricted_sim(object_id, subroot_index)
self.object_to_sim[object_id][subroot_index] = sim_id
self._sim_to_object[sim_id] = (object_id, subroot_index)
if return_sim_id is not None and return_sim_id != sim_id:
del self._sim_to_object[return_sim_id]
return return_sim_id
def clear_restriction(self, object_id, subroot_index):
if object_id not in self.object_to_sim:
return
if subroot_index is None:
sims_to_clear = self.object_to_sim[object_id].values()
del self.object_to_sim[object_id]
elif subroot_index in self.object_to_sim[object_id]:
sims_to_clear = (self.object_to_sim[object_id][subroot_index],)
del self.object_to_sim[object_id][subroot_index]
if not self.object_to_sim[object_id]:
del self.object_to_sim[object_id]
else:
sims_to_clear = tuple()
for sim_id in sims_to_clear:
del self._sim_to_object[sim_id]
game_object = services.object_manager().get(object_id)
if game_object is not None:
game_object.update_object_tooltip()
def get_invalid_object_ids(self):
object_manager = services.object_manager()
invalid_object_ids = []
for object_id in self.object_to_sim:
if object_manager.get(object_id) is None:
invalid_object_ids.append(object_id)
return invalid_object_ids
class HouseholdObjectPreferenceTracker(HouseholdTracker):
TAGS_TO_CONVERT = TunableEnumSet(description='\n The tags that should automatically be converted from "use preference"\n functionality to "use only" functionality on load.\n ', enum_type=ObjectPreferenceTag, enum_default=ObjectPreferenceTag.INVALID, invalid_enums=(ObjectPreferenceTag.INVALID,))
IGNORE_TESTS = TunableMapping(description='\n Mapping of tag to tests used to bypass the use only status.\n SingelSimResolver on the sim is used.\n ', key_type=TunableEnumEntry(description='\n The visual style of the balloon background.\n ', tunable_type=ObjectPreferenceTag, default=ObjectPreferenceTag.INVALID, invalid_enums=(ObjectPreferenceTag.INVALID,)), value_type=TunableTestSet(description='\n Set of tests that must be passed for sim to ignore "use only" status.\n '))
def __init__(self, household):
self._owner = household
self._zone_object_preference_datas = defaultdict(_ZoneSpecificObjectPreferenceData)
@property
def owner(self):
return self._owner
def reset(self):
self._zone_object_preference_datas = defaultdict(_ZoneSpecificObjectPreferenceData)
def household_lod_cleanup(self):
self.reset()
def _is_ignore_disallowed(self, sim_info, preference_tag):
return sim_info.is_selectable and (preference_tag in self.IGNORE_TESTS and self.IGNORE_TESTS[preference_tag].run_tests(SingleSimResolver(sim_info)))
def get_restricted_object(self, sim_id, preference_tag):
zone_preference_tuple = (services.current_zone_id(), preference_tag)
if zone_preference_tuple not in self._zone_object_preference_datas:
return (None, None)
return self._zone_object_preference_datas[zone_preference_tuple].get_restricted_object(sim_id)
def get_restricted_sims(self, object_id, preference_tag):
zone_preference_tuple = (services.current_zone_id(), preference_tag)
if zone_preference_tuple not in self._zone_object_preference_datas:
return
return self._zone_object_preference_datas[zone_preference_tuple].get_restricted_sims(object_id)
def get_restricted_sim(self, object_id, subroot_index, preference_tag):
zone_preference_tuple = (services.current_zone_id(), preference_tag)
if zone_preference_tuple not in self._zone_object_preference_datas:
return
return self._zone_object_preference_datas[zone_preference_tuple].get_restricted_sim(object_id, subroot_index)
def get_preferable_subroot_index(self, game_object):
if game_object.is_sim or not not (game_object.is_part and game_object.restrict_autonomy_preference):
return
return game_object.subroot_index
def get_restriction(self, sim_info, game_object, preference_tag, full_object=False, allow_test=True):
sim_id = sim_info.sim_id
zone_preference_tuple = (services.current_zone_id(), preference_tag)
if zone_preference_tuple not in self._zone_object_preference_datas:
return AutonomyPreferenceType.ALLOWED
object_id = game_object.id
subroot_index = self.get_preferable_subroot_index(game_object)
zone_preference_data = self._zone_object_preference_datas[zone_preference_tuple]
if sim_id in zone_preference_data:
(owned_object_id, owned_subroot_index) = zone_preference_data[sim_id]
if owned_object_id not in services.object_manager():
zone_preference_data.clear_restriction(owned_object_id, None)
else:
if owned_object_id == object_id and (full_object or subroot_index == owned_subroot_index):
return AutonomyPreferenceType.USE_ONLY
if allow_test and self._is_ignore_disallowed(sim_info, preference_tag):
return AutonomyPreferenceType.ALLOWED
return AutonomyPreferenceType.DISALLOWED
if object_id in zone_preference_data.object_to_sim:
if full_object:
if None not in zone_preference_data.object_to_sim[object_id]:
return AutonomyPreferenceType.ALLOWED
if subroot_index not in zone_preference_data.object_to_sim[object_id]:
return AutonomyPreferenceType.ALLOWED
elif subroot_index not in zone_preference_data.object_to_sim[object_id]:
return AutonomyPreferenceType.ALLOWED
else:
return AutonomyPreferenceType.ALLOWED
if allow_test and self._is_ignore_disallowed(sim_info, preference_tag):
return AutonomyPreferenceType.ALLOWED
return AutonomyPreferenceType.DISALLOWED
def set_restriction(self, sim_info, game_objects, preference_tag, should_force):
zone_preference_data = self._zone_object_preference_datas[(services.current_zone_id(), preference_tag)]
if not should_force and sim_info.sim_id in zone_preference_data:
return
object_id = None
subroot_index = None
for game_object in game_objects:
object_id = game_object.id
if game_object.is_part and game_object.restrict_autonomy_preference:
subroot_index = game_object.subroot_index
else:
subroot_index = None
if zone_preference_data.get_restricted_sim(object_id, subroot_index) is not None:
continue
break
else:
if not should_force:
return
if object_id is not None:
old_sim_id = zone_preference_data.set_restriction(object_id, subroot_index, sim_info.sim_id)
if old_sim_id is not None:
roommate_service = services.get_roommate_service()
if roommate_service is not None:
roommate_service.assign_bed(old_sim_id)
game_object.update_object_tooltip()
def set_object_restriction(self, sim_id, obj, preference_tag):
zone_preference_data = self._zone_object_preference_datas[(services.current_zone_id(), preference_tag)]
for part in obj.parts:
if part.restrict_autonomy_preference:
zone_preference_data.set_restriction(obj.id, part.subroot_index, sim_id)
return
zone_preference_data.set_restriction(obj.id, None, sim_id)
obj.update_object_tooltip()
def clear_restriction(self, game_objects, preference_tag):
zone_preference_data = self._zone_object_preference_datas[(services.current_zone_id(), preference_tag)]
object_id = None
subroot_index = None
for game_object in game_objects:
object_id = game_object.id
subroot_index = self.get_preferable_subroot_index(game_object)
sim_id = zone_preference_data.get_restricted_sim(object_id, subroot_index)
if sim_id is not None:
zone_preference_data.clear_restriction(object_id, subroot_index)
roommate_service = services.get_roommate_service()
if roommate_service is not None:
roommate_service.assign_bed(sim_id, avoid_id=object_id)
def clear_sim_restriction(self, sim_id):
current_zone_id = services.current_zone_id()
for (zone_preference_tuple, zone_preference_data) in self._zone_object_preference_datas.items():
if zone_preference_tuple[0] == current_zone_id:
(object_id, subroot_index) = zone_preference_data.get_restricted_object(sim_id)
if object_id is not None:
zone_preference_data.clear_restriction(object_id, subroot_index)
def update_preference_if_possible(self, sim_info):
object_manager = services.object_manager()
use_preferences = sim_info.autonomy_use_preferences
sim_id = sim_info.sim_id
for preference_tag in self.TAGS_TO_CONVERT:
if preference_tag in use_preferences:
old_object_id = use_preferences[preference_tag]
old_object = object_manager.get(old_object_id)
if old_object is not None:
del use_preferences[preference_tag]
subroot_index = None
zone_preference_data = self._zone_object_preference_datas[(services.current_zone_id(), preference_tag)]
if sim_id in zone_preference_data:
continue
for old_object_part in old_object.parts:
if old_object_part.restrict_autonomy_preference:
subroot_index = old_object_part.subroot_index
existing_sim_id = zone_preference_data.get_restricted_sim(old_object_id, subroot_index)
if existing_sim_id is None:
break
else:
if subroot_index is not None:
pass
else:
zone_preference_data.set_restriction(old_object_id, subroot_index, sim_id)
zone_preference_data.set_restriction(old_object_id, subroot_index, sim_id)
def convert_existing_preferences(self):
for sim_info in self._owner:
self.update_preference_if_possible(sim_info)
def save_data(self, household_msg):
household_msg.object_preference_tracker = GameplaySaveData_pb2.ObjectPreferenceTracker()
for (zone_preference_tuple, zone_preference_data) in self._zone_object_preference_datas.items():
with ProtocolBufferRollback(household_msg.object_preference_tracker.zone_preference_datas) as save_zone_data:
save_zone_data.zone_id = zone_preference_tuple[0]
save_zone_data.preference_tag = zone_preference_tuple[1]
for (sim_id, object_tuple) in zone_preference_data.items():
with ProtocolBufferRollback(save_zone_data.sim_preferences) as save_sim_preference:
(object_id, subroot_index) = object_tuple
save_sim_preference.sim_id = sim_id
save_sim_preference.object_id = object_id
if subroot_index is None:
save_sim_preference.subroot_index = -1
else:
save_sim_preference.subroot_index = subroot_index
def load_data(self, object_preference_msg, is_household=True):
for zone_data in object_preference_msg.zone_preference_datas:
if is_household:
zone_data_proto = services.get_persistence_service().get_zone_proto_buff(zone_data.zone_id)
if not zone_data_proto is None:
if zone_data_proto.household_id != self._owner.id:
continue
zone_specific_data = self._zone_object_preference_datas[(zone_data.zone_id, zone_data.preference_tag)]
for sim_preference in zone_data.sim_preferences:
subroot_index = None if sim_preference.subroot_index == -1 else sim_preference.subroot_index
zone_specific_data.set_restriction(sim_preference.object_id, subroot_index, sim_preference.sim_id)
else:
zone_specific_data = self._zone_object_preference_datas[(zone_data.zone_id, zone_data.preference_tag)]
for sim_preference in zone_data.sim_preferences:
subroot_index = None if sim_preference.subroot_index == -1 else sim_preference.subroot_index
zone_specific_data.set_restriction(sim_preference.object_id, subroot_index, sim_preference.sim_id)
def validate_objects(self, zone_id):
roommate_service = services.get_roommate_service()
for (zone_preference_tuple, zone_preference_data) in self._zone_object_preference_datas.items():
if zone_preference_tuple[0] != zone_id:
continue
for bad_object_id in zone_preference_data.get_invalid_object_ids():
sim_ids = zone_preference_data.get_restricted_sims(bad_object_id)
if not sim_ids:
continue
zone_preference_data.clear_restriction(bad_object_id, None)
if roommate_service is not None:
for sim_id in sim_ids:
roommate_service.assign_bed(sim_id, avoid_id=bad_object_id)
|
[
"services.get_roommate_service",
"distributor.rollback.ProtocolBufferRollback",
"services.current_zone_id",
"protocolbuffers.GameplaySaveData_pb2.ObjectPreferenceTracker",
"collections.defaultdict",
"event_testing.resolver.SingleSimResolver",
"sims4.tuning.tunable.TunableEnumEntry",
"services.get_persistence_service",
"event_testing.tests.TunableTestSet",
"sims4.tuning.tunable.TunableEnumSet",
"services.object_manager",
"sims4.log.Logger"
] |
[((580, 641), 'sims4.log.Logger', 'sims4.log.Logger', (['"""ObjectPreference"""'], {'default_owner': '"""nabaker"""'}), "('ObjectPreference', default_owner='nabaker')\n", (596, 641), False, 'import sims4\n'), ((3741, 4055), 'sims4.tuning.tunable.TunableEnumSet', 'TunableEnumSet', ([], {'description': '"""\n The tags that should automatically be converted from "use preference"\n functionality to "use only" functionality on load.\n """', 'enum_type': 'ObjectPreferenceTag', 'enum_default': 'ObjectPreferenceTag.INVALID', 'invalid_enums': '(ObjectPreferenceTag.INVALID,)'}), '(description=\n """\n The tags that should automatically be converted from "use preference"\n functionality to "use only" functionality on load.\n """\n , enum_type=ObjectPreferenceTag, enum_default=ObjectPreferenceTag.\n INVALID, invalid_enums=(ObjectPreferenceTag.INVALID,))\n', (3755, 4055), False, 'from sims4.tuning.tunable import TunableEnumSet, TunableMapping, TunableEnumEntry, TunableList, TunableReference\n'), ((738, 755), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (749, 755), False, 'from collections import defaultdict\n'), ((3416, 3441), 'services.object_manager', 'services.object_manager', ([], {}), '()\n', (3439, 3441), False, 'import services\n'), ((4700, 4746), 'collections.defaultdict', 'defaultdict', (['_ZoneSpecificObjectPreferenceData'], {}), '(_ZoneSpecificObjectPreferenceData)\n', (4711, 4746), False, 'from collections import defaultdict\n'), ((4877, 4923), 'collections.defaultdict', 'defaultdict', (['_ZoneSpecificObjectPreferenceData'], {}), '(_ZoneSpecificObjectPreferenceData)\n', (4888, 4923), False, 'from collections import defaultdict\n'), ((10994, 11020), 'services.current_zone_id', 'services.current_zone_id', ([], {}), '()\n', (11018, 11020), False, 'import services\n'), ((11490, 11515), 'services.object_manager', 'services.object_manager', ([], {}), '()\n', (11513, 11515), False, 'import services\n'), ((13179, 13225), 'protocolbuffers.GameplaySaveData_pb2.ObjectPreferenceTracker', 'GameplaySaveData_pb2.ObjectPreferenceTracker', ([], {}), '()\n', (13223, 13225), False, 'from protocolbuffers import GameplaySaveData_pb2\n'), ((15551, 15582), 'services.get_roommate_service', 'services.get_roommate_service', ([], {}), '()\n', (15580, 15582), False, 'import services\n'), ((4225, 4461), 'sims4.tuning.tunable.TunableEnumEntry', 'TunableEnumEntry', ([], {'description': '"""\n The visual style of the balloon background.\n """', 'tunable_type': 'ObjectPreferenceTag', 'default': 'ObjectPreferenceTag.INVALID', 'invalid_enums': '(ObjectPreferenceTag.INVALID,)'}), '(description=\n """\n The visual style of the balloon background.\n """\n , tunable_type=ObjectPreferenceTag, default=ObjectPreferenceTag.INVALID,\n invalid_enums=(ObjectPreferenceTag.INVALID,))\n', (4241, 4461), False, 'from sims4.tuning.tunable import TunableEnumSet, TunableMapping, TunableEnumEntry, TunableList, TunableReference\n'), ((4458, 4597), 'event_testing.tests.TunableTestSet', 'TunableTestSet', ([], {'description': '"""\n Set of tests that must be passed for sim to ignore "use only" status.\n """'}), '(description=\n """\n Set of tests that must be passed for sim to ignore "use only" status.\n """\n )\n', (4472, 4597), False, 'from event_testing.tests import TunableTestSet\n'), ((5299, 5325), 'services.current_zone_id', 'services.current_zone_id', ([], {}), '()\n', (5323, 5325), False, 'import services\n'), ((5650, 5676), 'services.current_zone_id', 'services.current_zone_id', ([], {}), '()\n', (5674, 5676), False, 'import services\n'), ((6003, 6029), 'services.current_zone_id', 'services.current_zone_id', ([], {}), '()\n', (6027, 6029), False, 'import services\n'), ((6660, 6686), 'services.current_zone_id', 'services.current_zone_id', ([], {}), '()\n', (6684, 6686), False, 'import services\n'), ((3227, 3252), 'services.object_manager', 'services.object_manager', ([], {}), '()\n', (3250, 3252), False, 'import services\n'), ((7188, 7213), 'services.object_manager', 'services.object_manager', ([], {}), '()\n', (7211, 7213), False, 'import services\n'), ((8592, 8618), 'services.current_zone_id', 'services.current_zone_id', ([], {}), '()\n', (8616, 8618), False, 'import services\n'), ((9477, 9508), 'services.get_roommate_service', 'services.get_roommate_service', ([], {}), '()\n', (9506, 9508), False, 'import services\n'), ((9801, 9827), 'services.current_zone_id', 'services.current_zone_id', ([], {}), '()\n', (9825, 9827), False, 'import services\n'), ((10273, 10299), 'services.current_zone_id', 'services.current_zone_id', ([], {}), '()\n', (10297, 10299), False, 'import services\n'), ((10765, 10796), 'services.get_roommate_service', 'services.get_roommate_service', ([], {}), '()\n', (10794, 10796), False, 'import services\n'), ((13348, 13438), 'distributor.rollback.ProtocolBufferRollback', 'ProtocolBufferRollback', (['household_msg.object_preference_tracker.zone_preference_datas'], {}), '(household_msg.object_preference_tracker.\n zone_preference_datas)\n', (13370, 13438), False, 'from distributor.rollback import ProtocolBufferRollback\n'), ((5174, 5201), 'event_testing.resolver.SingleSimResolver', 'SingleSimResolver', (['sim_info'], {}), '(sim_info)\n', (5191, 5201), False, 'from event_testing.resolver import SingleSimResolver\n'), ((13693, 13747), 'distributor.rollback.ProtocolBufferRollback', 'ProtocolBufferRollback', (['save_zone_data.sim_preferences'], {}), '(save_zone_data.sim_preferences)\n', (13715, 13747), False, 'from distributor.rollback import ProtocolBufferRollback\n'), ((14390, 14424), 'services.get_persistence_service', 'services.get_persistence_service', ([], {}), '()\n', (14422, 14424), False, 'import services\n'), ((12057, 12083), 'services.current_zone_id', 'services.current_zone_id', ([], {}), '()\n', (12081, 12083), False, 'import services\n')]
|