code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
from django.urls import reverse
from rest_framework import status
from conf_site.api.tests import ConferenceSiteAPITestCase
class ConferenceSiteAPIConferenceTestCase(ConferenceSiteAPITestCase):
def test_conference_api_anonymous_user(self):
response = self.client.get(reverse("conference-detail"))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {
"title": self.conference.title,
"start_date": self.conference.start_date.strftime("%Y-%m-%d"),
"end_date": self.conference.end_date.strftime("%Y-%m-%d"),
})
| [
"django.urls.reverse"
] | [((283, 311), 'django.urls.reverse', 'reverse', (['"""conference-detail"""'], {}), "('conference-detail')\n", (290, 311), False, 'from django.urls import reverse\n')] |
import asyncio
from django.core.management.base import BaseCommand
from Harvest.utils import get_logger
from task_queue.scheduler import QueueScheduler
logger = get_logger(__name__)
class Command(BaseCommand):
help = "Run the queue consumer"
def handle(self, *args, **options):
QueueScheduler().run()
| [
"Harvest.utils.get_logger",
"task_queue.scheduler.QueueScheduler"
] | [((164, 184), 'Harvest.utils.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (174, 184), False, 'from Harvest.utils import get_logger\n'), ((300, 316), 'task_queue.scheduler.QueueScheduler', 'QueueScheduler', ([], {}), '()\n', (314, 316), False, 'from task_queue.scheduler import QueueScheduler\n')] |
# Generated by Django 3.0.10 on 2020-09-10 13:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0003_user_mobile_phone'),
]
operations = [
migrations.AlterField(
model_name='user',
name='mobile_phone',
field=models.CharField(blank=True, default='', max_length=255, verbose_name='Mobile phone number'),
preserve_default=False,
),
]
| [
"django.db.models.CharField"
] | [((338, 435), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '""""""', 'max_length': '(255)', 'verbose_name': '"""Mobile phone number"""'}), "(blank=True, default='', max_length=255, verbose_name=\n 'Mobile phone number')\n", (354, 435), False, 'from django.db import migrations, models\n')] |
# -*- coding: utf-8 -*-
# @Time : 5/31/2018 9:20 PM
# @Author : sunyonghai
# @File : test.py
# @Software: ZJ_AI
from multiprocessing import Pool, Lock, Value
import os
tests_count = 80
lock = Lock()
counter = Value('i', 0) # int type,相当于java里面的原子变量
def run(fn):
global tests_count, lock, counter
with lock:
counter.value += 1
print( 'NO. (%d/%d) test start. PID: %d ' % (counter.value, tests_count, os.getpid()))
# do something below ...
if __name__ == "__main__":
pool = Pool(4)
# 80个任务,会运行run()80次,每次传入xrange数组一个元素
pool.map(run, range(80))
pool.close()
pool.join() | [
"multiprocessing.Lock",
"os.getpid",
"multiprocessing.Value",
"multiprocessing.Pool"
] | [((202, 208), 'multiprocessing.Lock', 'Lock', ([], {}), '()\n', (206, 208), False, 'from multiprocessing import Pool, Lock, Value\n'), ((220, 233), 'multiprocessing.Value', 'Value', (['"""i"""', '(0)'], {}), "('i', 0)\n", (225, 233), False, 'from multiprocessing import Pool, Lock, Value\n'), ((517, 524), 'multiprocessing.Pool', 'Pool', (['(4)'], {}), '(4)\n', (521, 524), False, 'from multiprocessing import Pool, Lock, Value\n'), ((434, 445), 'os.getpid', 'os.getpid', ([], {}), '()\n', (443, 445), False, 'import os\n')] |
import turtle
tortuguinha = turtle.Turtle()
tortuguinha.shape('turtle')
tortuguinha.color('red')
tortugo = turtle.Turtle()
tortugo.shape('turtle')
tortugo.color('blue')
def faz_quadradin(the_turtle):
for i in range(0,4):
the_turtle.forward(100)
the_turtle.right(90)
def faz_espiral(the_turtle):
for i in range(0,36):
faz_quadradin(the_turtle)
the_turtle.right(10)
faz_espiral(tortuguinha)
tortugo.right(5)
faz_espiral(tortugo)
| [
"turtle.Turtle"
] | [((30, 45), 'turtle.Turtle', 'turtle.Turtle', ([], {}), '()\n', (43, 45), False, 'import turtle\n'), ((109, 124), 'turtle.Turtle', 'turtle.Turtle', ([], {}), '()\n', (122, 124), False, 'import turtle\n')] |
# Copyright (c) 2015 Ericsson AB.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from kingbird.objects import base as obj_base
from kingbird.tests import base
from oslo_versionedobjects import fields as obj_fields
class TestBaseObject(base.KingbirdTestCase):
def test_base_class(self):
obj = obj_base.KingbirdObject()
self.assertEqual(obj_base.KingbirdObject.OBJ_PROJECT_NAMESPACE,
obj.OBJ_PROJECT_NAMESPACE)
self.assertEqual(obj_base.KingbirdObject.VERSION,
obj.VERSION)
@mock.patch.object(obj_base.KingbirdObject, "obj_reset_changes")
def test_from_db_object(self, mock_obj_reset_ch):
class TestKingbirdObject(obj_base.KingbirdObject,
obj_base.VersionedObjectDictCompat):
fields = {
"key1": obj_fields.StringField(),
"key2": obj_fields.StringField(),
}
obj = TestKingbirdObject()
context = mock.Mock()
db_obj = {
"key1": "value1",
"key2": "value2",
}
res = obj_base.KingbirdObject._from_db_object(context, obj, db_obj)
self.assertIsNotNone(res)
self.assertEqual("value1", obj["key1"])
self.assertEqual("value2", obj["key2"])
self.assertEqual(obj._context, context)
mock_obj_reset_ch.assert_called_once_with()
def test_from_db_object_none(self):
obj = obj_base.KingbirdObject()
db_obj = None
context = mock.Mock()
res = obj_base.KingbirdObject._from_db_object(context, obj, db_obj)
self.assertIsNone(res)
| [
"mock.Mock",
"kingbird.objects.base.KingbirdObject",
"kingbird.objects.base.KingbirdObject._from_db_object",
"mock.patch.object",
"oslo_versionedobjects.fields.StringField"
] | [((1099, 1162), 'mock.patch.object', 'mock.patch.object', (['obj_base.KingbirdObject', '"""obj_reset_changes"""'], {}), "(obj_base.KingbirdObject, 'obj_reset_changes')\n", (1116, 1162), False, 'import mock\n'), ((847, 872), 'kingbird.objects.base.KingbirdObject', 'obj_base.KingbirdObject', ([], {}), '()\n', (870, 872), True, 'from kingbird.objects import base as obj_base\n'), ((1536, 1547), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (1545, 1547), False, 'import mock\n'), ((1651, 1712), 'kingbird.objects.base.KingbirdObject._from_db_object', 'obj_base.KingbirdObject._from_db_object', (['context', 'obj', 'db_obj'], {}), '(context, obj, db_obj)\n', (1690, 1712), True, 'from kingbird.objects import base as obj_base\n'), ((1998, 2023), 'kingbird.objects.base.KingbirdObject', 'obj_base.KingbirdObject', ([], {}), '()\n', (2021, 2023), True, 'from kingbird.objects import base as obj_base\n'), ((2064, 2075), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (2073, 2075), False, 'import mock\n'), ((2091, 2152), 'kingbird.objects.base.KingbirdObject._from_db_object', 'obj_base.KingbirdObject._from_db_object', (['context', 'obj', 'db_obj'], {}), '(context, obj, db_obj)\n', (2130, 2152), True, 'from kingbird.objects import base as obj_base\n'), ((1392, 1416), 'oslo_versionedobjects.fields.StringField', 'obj_fields.StringField', ([], {}), '()\n', (1414, 1416), True, 'from oslo_versionedobjects import fields as obj_fields\n'), ((1442, 1466), 'oslo_versionedobjects.fields.StringField', 'obj_fields.StringField', ([], {}), '()\n', (1464, 1466), True, 'from oslo_versionedobjects import fields as obj_fields\n')] |
import tempfile
from abc import ABC, abstractmethod
from time import sleep, time
from hardware.camera import Photo, Resolution
class CameraDriver(ABC):
@abstractmethod
def capture(self) -> Photo:
pass
class PiCameraDriver(CameraDriver):
def __init__(self, resolution=Resolution(1024, 768), iso=300):
from picamera import PiCamera
self.resolution = resolution
self.iso = iso
self._camera = PiCamera(resolution=resolution)
self._camera.iso = iso
sleep(2)
print('Camera ready!')
def capture(self) -> Photo:
origin = tempfile.NamedTemporaryFile(mode="w+t", suffix='.jpg')
self._camera.capture(origin.name)
photo = Photo(origin.name, resolution=self.resolution, iso=self.iso)
return photo.resize(ratio=1)
| [
"hardware.camera.Resolution",
"hardware.camera.Photo",
"picamera.PiCamera",
"time.sleep",
"tempfile.NamedTemporaryFile"
] | [((294, 315), 'hardware.camera.Resolution', 'Resolution', (['(1024)', '(768)'], {}), '(1024, 768)\n', (304, 315), False, 'from hardware.camera import Photo, Resolution\n'), ((449, 480), 'picamera.PiCamera', 'PiCamera', ([], {'resolution': 'resolution'}), '(resolution=resolution)\n', (457, 480), False, 'from picamera import PiCamera\n'), ((520, 528), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (525, 528), False, 'from time import sleep, time\n'), ((611, 665), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""w+t"""', 'suffix': '""".jpg"""'}), "(mode='w+t', suffix='.jpg')\n", (638, 665), False, 'import tempfile\n'), ((726, 786), 'hardware.camera.Photo', 'Photo', (['origin.name'], {'resolution': 'self.resolution', 'iso': 'self.iso'}), '(origin.name, resolution=self.resolution, iso=self.iso)\n', (731, 786), False, 'from hardware.camera import Photo, Resolution\n')] |
from tests.common.devices.base import AnsibleHostBase
class VMHost(AnsibleHostBase):
"""
@summary: Class for VM server
For running ansible module on VM server
"""
def __init__(self, ansible_adhoc, hostname):
AnsibleHostBase.__init__(self, ansible_adhoc, hostname)
@property
def external_port(self):
if not hasattr(self, "_external_port"):
vm = self.host.options["variable_manager"]
im = self.host.options["inventory_manager"]
hostvars = vm.get_vars(host=im.get_host(self.hostname), include_delegate_to=False)
setattr(self, "_external_port", hostvars["external_port"])
return getattr(self, "_external_port")
| [
"tests.common.devices.base.AnsibleHostBase.__init__"
] | [((240, 295), 'tests.common.devices.base.AnsibleHostBase.__init__', 'AnsibleHostBase.__init__', (['self', 'ansible_adhoc', 'hostname'], {}), '(self, ansible_adhoc, hostname)\n', (264, 295), False, 'from tests.common.devices.base import AnsibleHostBase\n')] |
"""Control the sc2monitor."""
import asyncio
import logging
import math
import time
from datetime import datetime, timedelta
from operator import itemgetter
import aiohttp
import sc2monitor.model as model
from sc2monitor.handlers import SQLAlchemyHandler
from sc2monitor.sc2api import SC2API
logger = logging.getLogger(__name__)
sql_logger = logging.getLogger()
class Controller:
"""Control the sc2monitor."""
def __init__(self, **kwargs):
"""Init the sc2monitor."""
self.kwargs = kwargs
self.sc2api = None
self.db_session = None
self.current_season = {}
async def __aenter__(self):
"""Create a aiohttp and db session that will later be closed."""
headers = {'Accept-Encoding': 'gzip, deflate'}
self.http_session = aiohttp.ClientSession(headers=headers)
self.create_db_session()
return self
def create_db_session(self):
"""Create sqlalchemy database session."""
self.db_session = model.create_db_session(
db=self.kwargs.pop('db', ''),
encoding=self.kwargs.pop('encoding', ''))
self.handler = SQLAlchemyHandler(self.db_session)
self.handler.setLevel(logging.INFO)
sql_logger.setLevel(logging.INFO)
sql_logger.addHandler(self.handler)
if len(self.kwargs) > 0:
self.setup(**self.kwargs)
self.sc2api = SC2API(self)
self.cache_matches = self.get_config(
'cache_matches',
default_value=1000)
self.cache_logs = self.get_config(
'cache_logs',
default_value=500)
self.cache_runs = self.get_config(
'cache_runs',
default_value=500)
self.analyze_matches = self.get_config(
'analyze_matches',
default_value=100)
async def __aexit__(self, exc_type, exc, tb):
"""Close all aiohtto and database session."""
await self.http_session.close()
self.db_session.commit()
self.db_session.close()
self.db_session = None
def get_config(self, key, default_value=None,
raise_key_error=True,
return_object=False):
"""Read a config value from database."""
if default_value is not None:
raise_key_error = False
entry = self.db_session.query(
model.Config).filter(model.Config.key == key).scalar()
if not entry:
if raise_key_error:
raise ValueError(f'Unknown config key "{key}"')
else:
if return_object:
return None
else:
return '' if default_value is None else default_value
else:
if return_object:
return entry
else:
return entry.value
def set_config(self, key, value, commit=True):
"""Save a config value to the database."""
entry = self.db_session.query(
model.Config).filter(model.Config.key == key).scalar()
if not entry:
self.db_session.add(model.Config(key=key, value=value))
else:
entry.value = value
if commit:
self.db_session.commit()
def setup(self, **kwargs):
"""Set up the sc2monitor with api-key and api-secret."""
valid_keys = ['api_key', 'api_secret',
'cache_matches', 'analyze_matches']
for key, value in kwargs.items():
if key not in valid_keys:
raise ValueError(
f"Invalid configuration key '{key}'"
f" (valid keys: {', '.join(valid_keys)})")
self.set_config(key, value, commit=False)
self.db_session.commit()
if self.sc2api:
self.sc2api.read_config()
def add_player(self, url, race=model.Race['Random']):
"""Add a player by url to the sc2monitor."""
close_db = False
if self.db_session is None:
self.create_db_session()
close_db = True
server, realm, player_id = self.sc2api.parse_profile_url(url)
count = self.db_session.query(model.Player).filter(
model.Player.realm == realm,
model.Player.player_id == player_id,
model.Player.server == server).count()
if count == 0:
new_player = model.Player(
realm=realm,
player_id=player_id,
server=server,
race=race)
self.db_session.add(new_player)
self.db_session.commit()
if close_db:
self.db_session.close()
self.db_session = None
def remove_player(self, url):
"""Remove a player by url to the sc2monitor."""
close_db = False
if self.db_session is None:
self.create_db_session()
close_db = True
server, realm, player_id = self.sc2api.parse_profile_url(url)
for player in self.db_session.query(model.Player).filter(
model.Player.realm == realm,
model.Player.player_id == player_id,
model.Player.server == server).all():
self.db_session.delete(player)
self.db_session.commit()
if close_db:
self.db_session.close()
self.db_session = None
async def update_season(self, server: model.Server):
"""Update info about the current season in the database."""
current_season = await self.sc2api.get_season(server)
season = self.db_session.query(model.Season).\
filter(model.Season.server == server).\
order_by(model.Season.season_id.desc()).\
limit(1).scalar()
if not season or current_season.season_id != season.season_id:
self.db_session.add(current_season)
self.db_session.commit()
self.db_session.refresh(current_season)
logger.info(f'Found a new ladder season: {current_season}')
return current_season
else:
season.start = current_season.start
season.end = current_season.end
season.year = current_season.year
season.number = current_season.number
self.db_session.commit()
return season
async def update_seasons(self):
"""Update seasons info for all servers."""
servers = [server[0] for server in self.db_session.query(
model.Player.server).distinct()]
tasks = []
for server in servers:
tasks.append(asyncio.create_task(self.update_season(server)))
for season in await asyncio.gather(*tasks, return_exceptions=True):
try:
if isinstance(season, model.Season):
self.current_season[season.server.id()] = season
else:
raise season
except Exception:
logger.exception(
('The following exception was'
' raised while updating seasons:'))
async def query_player(self, player: model.Player):
"""Collect api data of a player."""
complete_data = []
for ladder in await self.sc2api.get_ladders(player):
async for data in self.sc2api.get_ladder_data(player, ladder):
current_player = await self.get_player_with_race(player, data)
missing_games, new = self.count_missing_games(
current_player, data)
if missing_games['Total'] > 0:
complete_data.append({'player': current_player,
'new_data': data,
'missing': missing_games,
'Win': 0,
'Loss': 0})
if len(complete_data) > 0:
await self.process_player(complete_data, new)
elif (not player.name
or not isinstance(player.refreshed, datetime)
or player.refreshed <= datetime.now() - timedelta(days=1)):
await self.update_player_name(player)
async def update_player_name(self, player: model.Player, name=''):
"""Update the name of a player from api data."""
if not name:
metadata = await self.sc2api.get_metadata(player)
name = metadata['name']
for tmp_player in self.db_session.query(model.Player).filter(
model.Player.player_id == player.player_id,
model.Player.realm == player.realm,
model.Player.server == player.server,
model.Player.name != name).all():
logger.info(f"{tmp_player.id}: Updating name to '{name}'")
tmp_player.name = name
self.db_session.commit()
async def check_match_history(self, complete_data):
"""Check matches in match history and assign them to races."""
match_history = await self.sc2api.get_match_history(
complete_data[0]['player'])
for match in match_history:
positive = []
for data_key, data in enumerate(complete_data):
needed = data['missing'].get(match['result'].describe(), 0) > 0
try:
datetime_check = (match['datetime']
- data['player'].last_played
> timedelta(seconds=0))
except TypeError:
datetime_check = True
if (needed and datetime_check):
positive.append(data_key)
if len(positive) == 0:
continue
elif len(positive) >= 1:
# Choose the race with most missing results.
max_missing = 0
for key in positive:
tmp_missing = complete_data[key][
'missing'][match['result'].describe()]
if tmp_missing > max_missing:
data_key = key
max_missing = tmp_missing
complete_data[data_key][
'missing'][match['result'].describe()] -= 1
complete_data[data_key][match['result'].describe()] += 1
try:
complete_data[data_key]['games'].insert(0, match)
except KeyError:
complete_data[data_key]['games'] = [match]
try:
last_played = match['datetime']
except Exception:
last_played = datetime.now()
return last_played, len(match_history)
async def process_player(self, complete_data, new=False):
"""Process the api data of a player."""
last_played, len_history \
= await self.check_match_history(complete_data)
for race_player in complete_data:
race_player['missing']['Total'] = race_player['missing']['Win'] + \
race_player['missing']['Loss']
if race_player['missing']['Total'] > 0:
if new:
logger.info(
f"{race_player['player'].id}: Ignoring "
f"{race_player['missing']['Total']} games missing in"
f" match history ({len_history}) "
"of new player.")
else:
self.guess_games(race_player, last_played)
self.guess_mmr_changes(race_player)
await self.update_player(race_player)
self.calc_statistics(race_player['player'])
async def update_player(self, complete_data):
"""Update database with new data of a player."""
player = complete_data['player']
new_data = complete_data['new_data']
player.mmr = new_data['mmr']
player.ladder_id = new_data['ladder_id']
player.league = new_data['league']
player.ladder_joined = new_data['joined']
player.wins = new_data['wins']
player.losses = new_data['losses']
player.last_active_season = self.get_season_id(player.server)
if player.name != new_data['name']:
await self.update_player_name(
player,
new_data['name'])
if (not player.last_played
or player.ladder_joined
> player.last_played):
player.last_played = player.ladder_joined
self.db_session.commit()
def calc_statistics(self, player: model.Player):
"""Recalculate player statistics."""
self.db_session.refresh(player)
if not player.statistics:
stats = model.Statistics(player=player)
self.db_session.add(stats)
self.db_session.commit()
self.db_session.refresh(stats)
else:
stats = player.statistics
matches = self.db_session.query(model.Match).filter(
model.Match.player_id == player.id).order_by(
model.Match.datetime.desc()).limit(self.analyze_matches).all()
stats.games_available = len(matches)
wma_mmr_denominator = stats.games_available * \
(stats.games_available + 1.0) / 2.0
stats.max_mmr = player.mmr
stats.min_mmr = player.mmr
stats.current_mmr = player.mmr
wma_mmr = 0.0
expected_mmr_value = 0.0
expected_mmr_value2 = 0.0
current_wining_streak = 0
current_losing_streak = 0
for idx, match in enumerate(matches):
if match.result == model.Result.Win:
stats.wins += 1
current_wining_streak += 1
current_losing_streak = 0
if current_wining_streak > stats.longest_wining_streak:
stats.longest_wining_streak = current_wining_streak
elif match.result == model.Result.Loss:
stats.losses += 1
current_losing_streak += 1
current_wining_streak = 0
if current_losing_streak > stats.longest_losing_streak:
stats.longest_losing_streak = current_losing_streak
if match.max_length <= 120:
stats.instant_left_games += 1
if match.guess:
stats.guessed_games += 1
mmr = match.mmr
wma_mmr += mmr * \
(stats.games_available - idx) / wma_mmr_denominator
if stats.max_mmr < mmr:
stats.max_mmr = mmr
if stats.min_mmr > mmr:
stats.min_mmr = mmr
expected_mmr_value += mmr / stats.games_available
expected_mmr_value2 += mmr * (mmr / stats.games_available)
if stats.games_available <= 1:
stats.lr_mmr_slope = 0.0
stats.lr_mmr_intercept = expected_mmr_value
else:
ybar = expected_mmr_value
xbar = -0.5 * (stats.games_available - 1)
numerator = 0
denominator = 0
for x, match in enumerate(matches):
x = -x
y = match.mmr
numerator += (x - xbar) * (y - ybar)
denominator += (x - xbar) * (x - xbar)
stats.lr_mmr_slope = numerator / denominator
stats.lr_mmr_intercept = ybar - stats.lr_mmr_slope * xbar
stats.sd_mmr = round(
math.sqrt(expected_mmr_value2
- expected_mmr_value
* expected_mmr_value))
# critical_idx = min(self.controller.config['no_critical_games'],
# stats.games_available) - 1
# stats.critical_game_played = matches[critical_idx]["played"]
stats.avg_mmr = expected_mmr_value
stats.wma_mmr = wma_mmr
self.db_session.commit()
@classmethod
def guess_games(cls, complete_data, last_played):
"""Guess games of a player if missing in match history."""
# If a player isn't new in the database and has played more
# than 25 games since the last refresh or the match
# history is not available for this player, there are
# missing games in the match history. These are guessed to be very
# close to the last game of the match history and in alternating
# order.
player = complete_data['player']
if 'games' not in complete_data:
complete_data['games'] = []
logger.info((
"{}: {} missing games in match "
+ "history - more guessing!").format(
player.id, complete_data['missing']['Total']))
try:
delta = (last_played - player.last_played) / \
complete_data['missing']['Total']
except Exception:
delta = timedelta(minutes=3)
if delta > timedelta(minutes=3):
delta = timedelta(minutes=3)
if delta.total_seconds() <= 0:
last_played = datetime.now()
delta = timedelta(minutes=3)
while (complete_data['missing']['Win'] > 0
or complete_data['missing']['Loss'] > 0):
if complete_data['missing']['Win'] > 0:
last_played = last_played - delta
complete_data['games'].append(
{'datetime': last_played, 'result': model.Result.Win})
complete_data['missing']['Win'] -= 1
complete_data['Win'] += 1
if (complete_data['missing']['Win'] > 0
and complete_data['missing']['Win']
> complete_data['missing']['Loss']):
# If there are more wins than losses add
# a second win before the next loss.
last_played = last_played - delta
complete_data['games'].append(
{'datetime': last_played, 'result': model.Result.Win})
complete_data['missing']['Win'] -= 1
complete_data['Win'] += 1
if complete_data['missing']['Loss'] > 0:
last_played = last_played - delta
complete_data['games'].append(
{'datetime': last_played, 'result': model.Result.Loss})
complete_data['missing']['Loss'] -= 1
complete_data['Loss'] += 1
if (complete_data['missing']['Loss'] > 0
and complete_data['missing']['Win']
< complete_data['missing']['Loss']):
# If there are more losses than wins add second loss before
# the next win.
last_played = last_played - delta
complete_data['games'].append(
{'datetime': last_played, 'result': model.Result.Loss})
complete_data['missing']['Loss'] -= 1
complete_data['Loss'] += 1
def guess_mmr_changes(self, complete_data):
"""Guess MMR change of matches."""
MMR = complete_data['player'].mmr
if MMR is None:
MMR = 0
totalMMRchange = complete_data['new_data']['mmr'] - MMR
wins = complete_data['Win']
losses = complete_data['Loss']
complete_data['games'] = sorted(
complete_data.get('games', []), key=itemgetter('datetime'))
logger.info('{}: Adding {} wins and {} losses!'.format(
complete_data['player'].id, wins, losses))
if wins + losses <= 0:
# No games to guess
return
# Estimate MMR change to be +/-21 for a win and losse, each adjusted
# by the average deviation to achive the most recent MMR value.
# Is 21 accurate? Yes, as the empirical avrage MMR change is 20.9016
# according to data gathered by this tool.
if wins + losses == 1 and MMR != 0:
MMRchange = abs(totalMMRchange)
else:
MMRchange = 21
if MMR == 0:
totalMMRchange = MMRchange * (wins - losses)
MMR = complete_data['new_data']['mmr'] - totalMMRchange
while True:
avgMMRadjustment = (totalMMRchange - MMRchange
* (wins - losses)) / (wins + losses)
# Make sure that sign of MMR change is correct
if abs(avgMMRadjustment) >= MMRchange and MMRchange <= 50:
MMRchange += 1
logger.info(f"{complete_data['player'].id}:"
f" Adjusting avg. MMR change to {MMRchange}")
else:
break
last_played = complete_data['player'].last_played
previous_match = self.db_session.query(model.Match).\
filter(model.Match.player_id
== complete_data['player'].id).\
order_by(model.Match.datetime.desc()).limit(1).scalar()
# Warning breaks Travis CI
# if not previous_match:
# logger.warning('{}: No previous match found.'.format(
# complete_data['player'].id))
for idx, match in enumerate(complete_data['games']):
estMMRchange = round(
MMRchange * match['result'].change() + avgMMRadjustment)
MMR = MMR + estMMRchange
try:
delta = match['datetime'] - last_played
except Exception:
delta = timedelta(minutes=3)
last_played = match['datetime']
max_length = delta.total_seconds()
# Don't mark the most recent game as guess, as time and mmr value
# should be accurate (but not mmr change).
guess = not (idx + 1 == len(complete_data['games']))
alpha = 2.0 / (100.0 + 1.0)
if previous_match and previous_match.ema_mmr > 0.0:
delta = MMR - previous_match.ema_mmr
ema_mmr = previous_match.ema_mmr + alpha * delta
emvar_mmr = (1.0 - alpha) * \
(previous_match.emvar_mmr + alpha * delta * delta)
else:
ema_mmr = MMR
emvar_mmr = 0.0
new_match = model.Match(
player=complete_data['player'],
result=match['result'],
datetime=match['datetime'],
mmr=MMR,
mmr_change=estMMRchange,
guess=guess,
ema_mmr=ema_mmr,
emvar_mmr=emvar_mmr,
max_length=max_length)
complete_data['player'].last_played = match['datetime']
self.db_session.add(new_match)
previous_match = new_match
self.db_session.commit()
# Delete old matches:
deletions = 0
for match in self.db_session.query(model.Match).\
filter(model.Match.player_id == complete_data['player'].id).\
order_by(model.Match.datetime.desc()).\
offset(self.cache_matches).all():
self.db_session.delete(match)
deletions += 1
if deletions > 0:
self.db_session.commit()
logger.info(f"{complete_data['player'].id}: "
f"{deletions} matches deleted!")
def update_ema_mmr(self, player: model.Player):
"""Update the exponential moving avarage MMR of a player."""
matches = self.db_session.query(model.Match).\
filter(model.Match.player == player).\
order_by(model.Match.datetime.asc()).all()
previous_match = None
for match in matches:
alpha = 2.0 / (100.0 + 1.0)
if previous_match and previous_match.ema_mmr > 0.0:
delta = match.mmr - previous_match.ema_mmr
ema_mmr = previous_match.ema_mmr + alpha * delta
emvar_mmr = (1.0 - alpha) * \
(previous_match.emvar_mmr + alpha * delta * delta)
else:
ema_mmr = match.mmr
emvar_mmr = 0.0
match.ema_mmr = ema_mmr
match.emvar_mmr = emvar_mmr
previous_match = match
self.db_session.commit()
def get_season_id(self, server: model.Server):
"""Get the current season id on a server."""
return self.current_season[server.id()].season_id
def count_missing_games(self, player: model.Player, data):
"""Count games of the api data that are not yet in the database."""
missing = {}
missing['Win'] = data['wins']
missing['Loss'] = data['losses']
if player.last_active_season == 0 or player.mmr == 0:
new = True
elif (player.last_active_season < self.get_season_id(player.server)):
# New Season!
# TODO: Check if last season endpoint can be requested!
# Only the legacy endpoints give the option to query the
# previous season's data (given that the ladder ID is
# known), e.g.:
# https://eu.api.blizzard.com/sc2/legacy/ladder/2/209966
new = False
elif (player.ladder_id != data['ladder_id']
or not player.ladder_joined
or player.ladder_joined < data['joined']
or data['wins'] < player.wins
or data['losses'] < player.losses):
# Old season, but new ladder or same ladder, but rejoined
if (data['wins'] < player.wins
or data['losses'] < player.losses):
# Forced ladder reset!
logger.info('{}: Manual ladder reset to {}!'.format(
player.id, data['ladder_id']))
new = True
else:
# Promotion?!
missing['Win'] -= player.wins
missing['Loss'] -= player.losses
new = player.mmr == 0
if missing['Win'] + missing['Loss'] == 0:
# Player was promoted/demoted to/from GM!
promotion = data['league'] == model.League.Grandmaster
demotion = player.league == model.League.Grandmaster
if promotion == demotion:
logger.warning(
'Logical error in GM promotion/'
'demotion detection.')
player.ladder_joined = data['joined']
player.ladder_id = data['ladder_id']
player.league = data['league']
self.db_session.commit()
logger.info(f"{player.id}: GM promotion/demotion.")
else:
if data['league'] < player.league:
logger.warning('Logical error in promtion detection.')
else:
logger.info(f"{player.id}: Promotion "
f"to ladder {data['ladder_id']}!")
else:
missing['Win'] -= player.wins
missing['Loss'] -= player.losses
new = player.mmr == 0
missing['Total'] = missing['Win'] + missing['Loss']
if (missing['Total']) > 0:
logger.info(
'{player}: {Total} new matches found!'.format(
player=player.id, **missing))
return missing, new
async def get_player_with_race(self, player, ladder_data):
"""Get the player with the race present in the ladder data."""
if player.ladder_id == 0:
player.race = ladder_data['race']
correct_player = player
elif player.race != ladder_data['race']:
correct_player = self.db_session.query(model.Player).filter(
model.Player.player_id == player.player_id,
model.Player.realm == player.realm,
model.Player.server == player.server,
model.Player.race == ladder_data['race']).scalar()
if not correct_player:
correct_player = model.Player(
player_id=player.player_id,
realm=player.realm,
server=player.server,
race=ladder_data['race'],
ladder_id=0)
self.db_session.add(correct_player)
self.db_session.commit()
self.db_session.refresh(correct_player)
else:
correct_player = player
return correct_player
def delete_old_logs_and_runs(self):
""" Delete old logs and runs from database."""
deletions = 0
for log_entry in self.db_session.query(model.Log).\
order_by(model.Log.datetime.desc()).\
offset(self.cache_logs).all():
self.db_session.delete(log_entry)
deletions += 1
if deletions > 0:
self.db_session.commit()
logger.info(f"{deletions} old log entries were deleted!")
deletions = 0
for run in self.db_session.query(model.Run).\
order_by(model.Run.datetime.desc()).\
offset(self.cache_runs).all():
self.db_session.delete(run)
deletions += 1
if deletions > 0:
self.db_session.commit()
logger.info(f"{deletions} old run logs were deleted!")
async def run(self):
"""Run the sc2monitor."""
start_time = time.time()
logger.debug("Starting job...")
await self.update_seasons()
unique_group = (model.Player.player_id,
model.Player.realm, model.Player.server)
tasks = []
players = self.db_session.query(model.Player).distinct(
*unique_group).group_by(*unique_group).all()
for player in players:
tasks.append(asyncio.create_task(self.query_player(player)))
results = await asyncio.gather(*tasks, return_exceptions=True)
for key, result in enumerate(results):
try:
if result is not None:
raise result
except Exception:
logger.exception(
'The following exception was'
f' raised while quering player {players[key].id}:')
self.delete_old_logs_and_runs()
duration = time.time() - start_time
self.db_session.add(
model.Run(duration=duration,
api_requests=self.sc2api.request_count,
api_retries=self.sc2api.retry_count,
warnings=self.handler.warnings,
errors=self.handler.errors))
self.db_session.commit()
logger.debug(f"Finished job performing {self.sc2api.request_count}"
f" api requests ({self.sc2api.retry_count} retries)"
f" in {duration:.2f} seconds.")
| [
"logging.getLogger",
"sc2monitor.sc2api.SC2API",
"sc2monitor.model.Statistics",
"math.sqrt",
"operator.itemgetter",
"datetime.timedelta",
"sc2monitor.model.Match.datetime.asc",
"sc2monitor.model.Run",
"sc2monitor.model.Match",
"asyncio.gather",
"sc2monitor.model.Log.datetime.desc",
"sc2monitor... | [((304, 331), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (321, 331), False, 'import logging\n'), ((345, 364), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (362, 364), False, 'import logging\n'), ((798, 836), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {'headers': 'headers'}), '(headers=headers)\n', (819, 836), False, 'import aiohttp\n'), ((1144, 1178), 'sc2monitor.handlers.SQLAlchemyHandler', 'SQLAlchemyHandler', (['self.db_session'], {}), '(self.db_session)\n', (1161, 1178), False, 'from sc2monitor.handlers import SQLAlchemyHandler\n'), ((1403, 1415), 'sc2monitor.sc2api.SC2API', 'SC2API', (['self'], {}), '(self)\n', (1409, 1415), False, 'from sc2monitor.sc2api import SC2API\n'), ((29401, 29412), 'time.time', 'time.time', ([], {}), '()\n', (29410, 29412), False, 'import time\n'), ((4403, 4475), 'sc2monitor.model.Player', 'model.Player', ([], {'realm': 'realm', 'player_id': 'player_id', 'server': 'server', 'race': 'race'}), '(realm=realm, player_id=player_id, server=server, race=race)\n', (4415, 4475), True, 'import sc2monitor.model as model\n'), ((6704, 6750), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {'return_exceptions': '(True)'}), '(*tasks, return_exceptions=True)\n', (6718, 6750), False, 'import asyncio\n'), ((12762, 12793), 'sc2monitor.model.Statistics', 'model.Statistics', ([], {'player': 'player'}), '(player=player)\n', (12778, 12793), True, 'import sc2monitor.model as model\n'), ((15490, 15562), 'math.sqrt', 'math.sqrt', (['(expected_mmr_value2 - expected_mmr_value * expected_mmr_value)'], {}), '(expected_mmr_value2 - expected_mmr_value * expected_mmr_value)\n', (15499, 15562), False, 'import math\n'), ((16920, 16940), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(3)'}), '(minutes=3)\n', (16929, 16940), False, 'from datetime import datetime, timedelta\n'), ((16962, 16982), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(3)'}), '(minutes=3)\n', (16971, 16982), False, 'from datetime import datetime, timedelta\n'), ((17049, 17063), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (17061, 17063), False, 'from datetime import datetime, timedelta\n'), ((17084, 17104), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(3)'}), '(minutes=3)\n', (17093, 17104), False, 'from datetime import datetime, timedelta\n'), ((22172, 22384), 'sc2monitor.model.Match', 'model.Match', ([], {'player': "complete_data['player']", 'result': "match['result']", 'datetime': "match['datetime']", 'mmr': 'MMR', 'mmr_change': 'estMMRchange', 'guess': 'guess', 'ema_mmr': 'ema_mmr', 'emvar_mmr': 'emvar_mmr', 'max_length': 'max_length'}), "(player=complete_data['player'], result=match['result'],\n datetime=match['datetime'], mmr=MMR, mmr_change=estMMRchange, guess=\n guess, ema_mmr=ema_mmr, emvar_mmr=emvar_mmr, max_length=max_length)\n", (22183, 22384), True, 'import sc2monitor.model as model\n'), ((29874, 29920), 'asyncio.gather', 'asyncio.gather', (['*tasks'], {'return_exceptions': '(True)'}), '(*tasks, return_exceptions=True)\n', (29888, 29920), False, 'import asyncio\n'), ((30304, 30315), 'time.time', 'time.time', ([], {}), '()\n', (30313, 30315), False, 'import time\n'), ((30370, 30543), 'sc2monitor.model.Run', 'model.Run', ([], {'duration': 'duration', 'api_requests': 'self.sc2api.request_count', 'api_retries': 'self.sc2api.retry_count', 'warnings': 'self.handler.warnings', 'errors': 'self.handler.errors'}), '(duration=duration, api_requests=self.sc2api.request_count,\n api_retries=self.sc2api.retry_count, warnings=self.handler.warnings,\n errors=self.handler.errors)\n', (30379, 30543), True, 'import sc2monitor.model as model\n'), ((3123, 3157), 'sc2monitor.model.Config', 'model.Config', ([], {'key': 'key', 'value': 'value'}), '(key=key, value=value)\n', (3135, 3157), True, 'import sc2monitor.model as model\n'), ((10667, 10681), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (10679, 10681), False, 'from datetime import datetime, timedelta\n'), ((16879, 16899), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(3)'}), '(minutes=3)\n', (16888, 16899), False, 'from datetime import datetime, timedelta\n'), ((19352, 19374), 'operator.itemgetter', 'itemgetter', (['"""datetime"""'], {}), "('datetime')\n", (19362, 19374), False, 'from operator import itemgetter\n'), ((21418, 21438), 'datetime.timedelta', 'timedelta', ([], {'minutes': '(3)'}), '(minutes=3)\n', (21427, 21438), False, 'from datetime import datetime, timedelta\n'), ((23496, 23522), 'sc2monitor.model.Match.datetime.asc', 'model.Match.datetime.asc', ([], {}), '()\n', (23520, 23522), True, 'import sc2monitor.model as model\n'), ((28008, 28134), 'sc2monitor.model.Player', 'model.Player', ([], {'player_id': 'player.player_id', 'realm': 'player.realm', 'server': 'player.server', 'race': "ladder_data['race']", 'ladder_id': '(0)'}), "(player_id=player.player_id, realm=player.realm, server=player.\n server, race=ladder_data['race'], ladder_id=0)\n", (28020, 28134), True, 'import sc2monitor.model as model\n'), ((8140, 8154), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8152, 8154), False, 'from datetime import datetime, timedelta\n'), ((8157, 8174), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (8166, 8174), False, 'from datetime import datetime, timedelta\n'), ((9516, 9536), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (9525, 9536), False, 'from datetime import datetime, timedelta\n'), ((5707, 5736), 'sc2monitor.model.Season.season_id.desc', 'model.Season.season_id.desc', ([], {}), '()\n', (5734, 5736), True, 'import sc2monitor.model as model\n'), ((13097, 13124), 'sc2monitor.model.Match.datetime.desc', 'model.Match.datetime.desc', ([], {}), '()\n', (13122, 13124), True, 'import sc2monitor.model as model\n'), ((20854, 20881), 'sc2monitor.model.Match.datetime.desc', 'model.Match.datetime.desc', ([], {}), '()\n', (20879, 20881), True, 'import sc2monitor.model as model\n'), ((22919, 22946), 'sc2monitor.model.Match.datetime.desc', 'model.Match.datetime.desc', ([], {}), '()\n', (22944, 22946), True, 'import sc2monitor.model as model\n'), ((28664, 28689), 'sc2monitor.model.Log.datetime.desc', 'model.Log.datetime.desc', ([], {}), '()\n', (28687, 28689), True, 'import sc2monitor.model as model\n'), ((29047, 29072), 'sc2monitor.model.Run.datetime.desc', 'model.Run.datetime.desc', ([], {}), '()\n', (29070, 29072), True, 'import sc2monitor.model as model\n')] |
import pandas as pd
from tabulate import tabulate
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import config_sim
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from cadCAD import configs
# Policies per Mechanism
def p1m1(_g, step, sH, s):
return {'policy1': 1}
def p2m1(_g, step, sH, s):
return {'policy2': 2}
def p1m2(_g, step, sH, s):
return {'policy1': 2, 'policy2': 2}
def p2m2(_g, step, sH, s):
return {'policy1': 2, 'policy2': 2}
def p1m3(_g, step, sH, s):
return {'policy1': 1, 'policy2': 2, 'policy3': 3}
def p2m3(_g, step, sH, s):
return {'policy1': 1, 'policy2': 2, 'policy3': 3}
# Internal States per Mechanism
def add(y, x):
return lambda _g, step, sH, s, _input: (y, s[y] + x)
def policies(_g, step, sH, s, _input):
y = 'policies'
x = _input
return (y, x)
# Genesis States
genesis_states = {
'policies': {},
's1': 0
}
variables = {
's1': add('s1', 1),
"policies": policies
}
psubs = {
"m1": {
"policies": {
"p1": p1m1,
"p2": p2m1
},
"variables": variables
},
"m2": {
"policies": {
"p1": p1m2,
"p2": p2m2
},
"variables": variables
},
"m3": {
"policies": {
"p1": p1m3,
"p2": p2m3
},
"variables": variables
}
}
sim_config = config_sim(
{
"N": 1,
"T": range(3),
}
)
append_configs(
sim_configs=sim_config,
initial_state=genesis_states,
partial_state_update_blocks=psubs,
policy_ops=[lambda a, b: a + b, lambda y: y * 2] # Default: lambda a, b: a + b
)
exec_mode = ExecutionMode()
local_proc_ctx = ExecutionContext(context=exec_mode.local_mode)
run = Executor(exec_context=local_proc_ctx, configs=configs)
raw_result, tensor_field, sessions = run.execute()
result = pd.DataFrame(raw_result)
print()
print("Tensor Field:")
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql'))
print()
| [
"tabulate.tabulate",
"cadCAD.configuration.append_configs",
"cadCAD.engine.ExecutionMode",
"cadCAD.engine.Executor",
"pandas.DataFrame",
"cadCAD.engine.ExecutionContext"
] | [((1493, 1654), 'cadCAD.configuration.append_configs', 'append_configs', ([], {'sim_configs': 'sim_config', 'initial_state': 'genesis_states', 'partial_state_update_blocks': 'psubs', 'policy_ops': '[lambda a, b: a + b, lambda y: y * 2]'}), '(sim_configs=sim_config, initial_state=genesis_states,\n partial_state_update_blocks=psubs, policy_ops=[lambda a, b: a + b, lambda\n y: y * 2])\n', (1507, 1654), False, 'from cadCAD.configuration import append_configs\n'), ((1708, 1723), 'cadCAD.engine.ExecutionMode', 'ExecutionMode', ([], {}), '()\n', (1721, 1723), False, 'from cadCAD.engine import ExecutionMode, ExecutionContext, Executor\n'), ((1741, 1787), 'cadCAD.engine.ExecutionContext', 'ExecutionContext', ([], {'context': 'exec_mode.local_mode'}), '(context=exec_mode.local_mode)\n', (1757, 1787), False, 'from cadCAD.engine import ExecutionMode, ExecutionContext, Executor\n'), ((1794, 1848), 'cadCAD.engine.Executor', 'Executor', ([], {'exec_context': 'local_proc_ctx', 'configs': 'configs'}), '(exec_context=local_proc_ctx, configs=configs)\n', (1802, 1848), False, 'from cadCAD.engine import ExecutionMode, ExecutionContext, Executor\n'), ((1910, 1934), 'pandas.DataFrame', 'pd.DataFrame', (['raw_result'], {}), '(raw_result)\n', (1922, 1934), True, 'import pandas as pd\n'), ((1973, 2028), 'tabulate.tabulate', 'tabulate', (['tensor_field'], {'headers': '"""keys"""', 'tablefmt': '"""psql"""'}), "(tensor_field, headers='keys', tablefmt='psql')\n", (1981, 2028), False, 'from tabulate import tabulate\n'), ((2053, 2102), 'tabulate.tabulate', 'tabulate', (['result'], {'headers': '"""keys"""', 'tablefmt': '"""psql"""'}), "(result, headers='keys', tablefmt='psql')\n", (2061, 2102), False, 'from tabulate import tabulate\n')] |
from __future__ import absolute_import, division, print_function
import pytest
import telnyx
TEST_RESOURCE_ID = "f1486bae-f067-460c-ad43-73a92848f902"
class TestPortingOrder(object):
def test_is_listable(self, request_mock):
resources = telnyx.PortingOrder.list()
request_mock.assert_requested("get", "/v2/porting_orders")
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], telnyx.PortingOrder)
def test_is_retrievable(self, request_mock):
resource = telnyx.PortingOrder.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested("get", "/v2/porting_orders/%s" % TEST_RESOURCE_ID)
assert isinstance(resource, telnyx.PortingOrder)
def test_is_creatable(self, request_mock):
resource = telnyx.PortingOrder.create(
phone_numbers=["13035550000", "13035550001", "13035550002"],
)
request_mock.assert_requested("post", "/v2/porting_orders")
assert isinstance(resource.data[0], telnyx.PortingOrder)
def test_is_saveable(self, request_mock):
porting_order = telnyx.PortingOrder.retrieve(TEST_RESOURCE_ID)
porting_order.webhook_event = "https://update.com"
porting_order.customer_reference = "updated name"
resource = porting_order.save()
request_mock.assert_requested(
"patch", "/v2/porting_orders/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, telnyx.PortingOrder)
assert resource is porting_order
def test_is_modifiable(self, request_mock):
resource = telnyx.PortingOrder.modify(
TEST_RESOURCE_ID,
webhook_event="https://update.com",
customer_reference="updated name",
)
request_mock.assert_requested(
"patch", "/v2/porting_orders/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, telnyx.PortingOrder)
def test_is_deletable(self, request_mock):
resource = telnyx.PortingOrder.retrieve(TEST_RESOURCE_ID)
resource.delete()
request_mock.assert_requested(
"delete", "/v2/porting_orders/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, telnyx.PortingOrder)
def test_can_confirm_porting_order_action(self, request_mock):
resource = telnyx.PortingOrder.retrieve(TEST_RESOURCE_ID)
resource.confirm()
request_mock.assert_requested(
"post", "/v2/porting_orders/%s/actions/confirm" % TEST_RESOURCE_ID
)
assert isinstance(resource, telnyx.PortingOrder)
@pytest.mark.skip(reason="PDF endpoint not supported by mock currently")
def test_can_get_loa_template(self, request_mock):
resource = telnyx.PortingOrder.retrieve(TEST_RESOURCE_ID)
resource.loaTemplate()
request_mock.assert_requested(
"get", "/v2/porting_orders/%s/loa_template" % TEST_RESOURCE_ID
)
assert isinstance(resource, telnyx.PortingOrder)
def test_can_list_porting_phone_numbers(self, request_mock):
resource = telnyx.PortingPhoneNumber.list()
request_mock.assert_requested("get", "/v2/porting_phone_numbers")
assert isinstance(resource.data, list)
assert isinstance(resource.data[0], telnyx.PortingPhoneNumber)
| [
"telnyx.PortingOrder.modify",
"pytest.mark.skip",
"telnyx.PortingOrder.create",
"telnyx.PortingPhoneNumber.list",
"telnyx.PortingOrder.list",
"telnyx.PortingOrder.retrieve"
] | [((2585, 2656), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""PDF endpoint not supported by mock currently"""'}), "(reason='PDF endpoint not supported by mock currently')\n", (2601, 2656), False, 'import pytest\n'), ((254, 280), 'telnyx.PortingOrder.list', 'telnyx.PortingOrder.list', ([], {}), '()\n', (278, 280), False, 'import telnyx\n'), ((531, 577), 'telnyx.PortingOrder.retrieve', 'telnyx.PortingOrder.retrieve', (['TEST_RESOURCE_ID'], {}), '(TEST_RESOURCE_ID)\n', (559, 577), False, 'import telnyx\n'), ((791, 882), 'telnyx.PortingOrder.create', 'telnyx.PortingOrder.create', ([], {'phone_numbers': "['13035550000', '13035550001', '13035550002']"}), "(phone_numbers=['13035550000', '13035550001',\n '13035550002'])\n", (817, 882), False, 'import telnyx\n'), ((1106, 1152), 'telnyx.PortingOrder.retrieve', 'telnyx.PortingOrder.retrieve', (['TEST_RESOURCE_ID'], {}), '(TEST_RESOURCE_ID)\n', (1134, 1152), False, 'import telnyx\n'), ((1589, 1709), 'telnyx.PortingOrder.modify', 'telnyx.PortingOrder.modify', (['TEST_RESOURCE_ID'], {'webhook_event': '"""https://update.com"""', 'customer_reference': '"""updated name"""'}), "(TEST_RESOURCE_ID, webhook_event=\n 'https://update.com', customer_reference='updated name')\n", (1615, 1709), False, 'import telnyx\n'), ((1989, 2035), 'telnyx.PortingOrder.retrieve', 'telnyx.PortingOrder.retrieve', (['TEST_RESOURCE_ID'], {}), '(TEST_RESOURCE_ID)\n', (2017, 2035), False, 'import telnyx\n'), ((2320, 2366), 'telnyx.PortingOrder.retrieve', 'telnyx.PortingOrder.retrieve', (['TEST_RESOURCE_ID'], {}), '(TEST_RESOURCE_ID)\n', (2348, 2366), False, 'import telnyx\n'), ((2731, 2777), 'telnyx.PortingOrder.retrieve', 'telnyx.PortingOrder.retrieve', (['TEST_RESOURCE_ID'], {}), '(TEST_RESOURCE_ID)\n', (2759, 2777), False, 'import telnyx\n'), ((3075, 3107), 'telnyx.PortingPhoneNumber.list', 'telnyx.PortingPhoneNumber.list', ([], {}), '()\n', (3105, 3107), False, 'import telnyx\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import numpy as np
import unittest
import paddle
import paddle.nn as nn
class SimpleReturnLayer(nn.Layer):
def forward(self, x):
return x
class AddAttrLayer(nn.Layer):
def __init__(self):
super(AddAttrLayer, self).__init__()
self.attr = None
def forward(self, x):
out = x + self.attr
return out
class IsInstanceLayer(nn.Layer):
def __init__(self, layer):
super(IsInstanceLayer, self).__init__()
self.layer = layer
@paddle.jit.to_static
def forward(self, x):
if isinstance(self.layer, (AddAttrLayer, )):
self.layer.attr = x
res = self.layer(x)
return res
class SequentialLayer(nn.Layer):
def __init__(self, layers):
super(SequentialLayer, self).__init__()
self.layers = nn.LayerList(layers)
@paddle.jit.to_static
def forward(self, x):
res = x
for layer in self.layers:
if isinstance(layer, AddAttrLayer):
layer.attr = x
res = layer(res)
return res
def train(model, to_static):
prog_trans = paddle.jit.ProgramTranslator.get_instance()
prog_trans.enable(to_static)
x = paddle.ones(shape=[2, 3], dtype='int32')
out = model(x)
return out.numpy()
class TestIsinstance(unittest.TestCase):
def test_isinstance_simple_return_layer(self):
model = IsInstanceLayer(SimpleReturnLayer())
self._test_model(model)
def test_isinstance_add_attr_layer(self):
model = IsInstanceLayer(AddAttrLayer())
self._test_model(model)
def test_sequential_layer(self):
layers = []
for i in range(5):
layers.append(SimpleReturnLayer())
layers.append(AddAttrLayer())
model = SequentialLayer(layers)
self._test_model(model)
def _test_model(self, model):
st_out = train(model, to_static=True)
dy_out = train(model, to_static=False)
self.assertTrue(
np.allclose(dy_out, st_out),
msg="dy_out:\n {}\n st_out:\n{}".format(dy_out, st_out))
if __name__ == "__main__":
unittest.main()
| [
"numpy.allclose",
"paddle.nn.LayerList",
"paddle.jit.ProgramTranslator.get_instance",
"paddle.ones",
"unittest.main"
] | [((2204, 2247), 'paddle.jit.ProgramTranslator.get_instance', 'paddle.jit.ProgramTranslator.get_instance', ([], {}), '()\n', (2245, 2247), False, 'import paddle\n'), ((2290, 2330), 'paddle.ones', 'paddle.ones', ([], {'shape': '[2, 3]', 'dtype': '"""int32"""'}), "(shape=[2, 3], dtype='int32')\n", (2301, 2330), False, 'import paddle\n'), ((3222, 3237), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3235, 3237), False, 'import unittest\n'), ((1905, 1925), 'paddle.nn.LayerList', 'nn.LayerList', (['layers'], {}), '(layers)\n', (1917, 1925), True, 'import paddle.nn as nn\n'), ((3091, 3118), 'numpy.allclose', 'np.allclose', (['dy_out', 'st_out'], {}), '(dy_out, st_out)\n', (3102, 3118), True, 'import numpy as np\n')] |
""" The task of the registry is to register complex objects
by an keyword/alias that you easily can build and instanciate
these objects with a single keyword. This allows it in a easy
manner to parse a yaml configuration file and use these values
to instanciate the available objects.
"""
import tensorflow as tf
from importlib import import_module
from dlf.core.preprocessing import PreprocessingMethod
from dlf.core.callback import Callback
from dlf.core.evaluator import Evaluator
FRAMEWORK_CALLBACKS = {}
FRAMEWORK_DATA_GENERATORS = {}
FRAMEWORK_LOSSES = {}
FRAMEWORK_METRICS = {}
FRAMEWORK_MODELS = {}
FRAMEWORK_PREPROCESSING_METHODS = {}
FRAMEWORK_EVALUATORS = {}
FRAMEWORK_ACTIVE_EXPERIMENT = None
def import_framework_modules(module_folder, package):
""" Auto import of all files in module folder
# Note
This is necessary for the register_* decorator to work properly.
# Args
module_folder: str.path to folder where files to import are located
package: str. module path e.g. dlf.metrics
"""
# auto import all files and register metrics
# Path(__file__).parent
for module in module_folder.iterdir():
if module.name == '__init__.py' or module.suffix != '.py':
continue
module = f'{package}.{module.stem}'
import_module(module)
def register_preprocessing_method(*names):
"""Decorator to register a preprocessing object to the framework
# Args
*names: Tuple(str). List of aliases for this preprocessing object
# Raises
ValueError: If the parent of this method is not of type [PreprocessingMethod](/dlf/core/preprocessing)
"""
def decorator(cls):
if not issubclass(cls, PreprocessingMethod):
raise ValueError("invalid base class for class {}".format(cls))
for name in names:
FRAMEWORK_PREPROCESSING_METHODS[name] = cls
return cls
return decorator
def register_metric(*names):
"""Decorator to register a custom metric to the framework
# Args
*names: Tuple(str). List of aliases for this metric
# Raises
ValueError: If the parent of this method is not of type `tf.keras.metrics.Metrics`
ValueError: If a given alias is not valid
"""
def decorator(cls):
if not issubclass(cls, tf.keras.metrics.Metric):
raise ValueError("invalid base class for class {}".format(cls))
FRAMEWORK_METRICS[cls.__name__] = cls # alias
for name in names:
if not isinstance(name, str):
raise ValueError(
"Invalid type of name '{}' for register_metric decorator".format(name))
FRAMEWORK_METRICS[name] = cls
return cls
return decorator
def register_loss(*names):
"""Decorator to register a custom loss to the framework
# Args
*names: Tuple(str) List of aliases for this loss
# Raises
Exception: If object is not subclass of `tf.keras.losses.Loss`
ValueError: If a given alias is not valid
"""
def decorator(cls):
if not issubclass(cls, tf.keras.losses.Loss):
raise Exception("invalid base class for class {}".format(cls))
FRAMEWORK_LOSSES[cls.__name__] = cls # alias
for name in names:
if not isinstance(name, str):
raise ValueError(
"Invalid type of name '{}' for register_loss decorator".format(name))
FRAMEWORK_LOSSES[name] = cls
return cls
return decorator
def register_data_generator(*names):
"""Decorator to register a data reader to the framework
# Args
*names: Tuple(str). List of aliases for this data reader
# Raises
ValueError: If a given alias is not valid
"""
def decorator(cls):
for name in names:
if not isinstance(name, str):
raise ValueError(
"Invalid type of name '{}' for register_data_generator decorator".format(name))
FRAMEWORK_DATA_GENERATORS[name] = cls
return cls
return decorator
def register_model(*names):
"""Decorator to register a custom model to the framework
# Args
*names: Tuple(str). List of aliases for this model
# Raises
ValueError: If a given alias is not valid
"""
def decorator(cls):
for name in names:
if not isinstance(name, str):
raise ValueError(
"Invalid type of name '{}' for register_model decorator".format(name))
FRAMEWORK_MODELS[name] = cls
return cls
return decorator
def register_callback(*names):
"""Decorator to register a callback to the framework
# Args
*names: Tuple(str). List of aliases for this callback
# Raises
ValueError: If a given alias is not valid
"""
def decorator(cls):
for name in names:
if not issubclass(cls, Callback):
raise ValueError(
"Invalid type of name '{}' for register_callback decorator".format(name))
FRAMEWORK_CALLBACKS[name] = cls
return cls
return decorator
def register_evaluator(*names):
"""Decorator to register an evaluator to the framework
# Args
*names: Tuple(str). List of aliases for this evaluator
# Raises
ValueError: If a given alias is not valid
"""
def decorator(cls):
for name in names:
if not issubclass(cls, Evaluator):
raise ValueError(
"Invalid type of name '{}' for register_evaluator decorator".format(name))
FRAMEWORK_EVALUATORS[name] = cls
return cls
return decorator
def set_active_experiment(exp):
"""Sets active experiment to global state and
allows all modules to access it
# Arguments
exp: dlf.core.Experiment. Active experiment
"""
global FRAMEWORK_ACTIVE_EXPERIMENT
FRAMEWORK_ACTIVE_EXPERIMENT = exp
def get_active_experiment():
"""Gets the current, active, experiment
# Returns
dlf.core.Experiment. Active experiment
"""
global FRAMEWORK_ACTIVE_EXPERIMENT
return FRAMEWORK_ACTIVE_EXPERIMENT
| [
"importlib.import_module"
] | [((1307, 1328), 'importlib.import_module', 'import_module', (['module'], {}), '(module)\n', (1320, 1328), False, 'from importlib import import_module\n')] |
import os
import json
import rmse
TUNING_FILE = "/Users/markduan/duan/USC_course/USC_APDS/INF553/project/predict/tuning.json"
CORATED_LIMIT = [3, 5, 7, 10]
LONELY_THRESHOLD = [2, 3, 5, 7]
N_NEIGHBORS_ITEMBASED = [5, 7, 10, 12]
WEIGHT = [0.2, 0.4, 0.6, 0.8]
def writeRes(c, l, n, w, res):
with open(TUNING_FILE, 'a', encoding='utf-8') as fp:
x = {
'c': c,
'l': l,
'n': n,
'w': w,
'rmse': res
}
x_j = json.dumps(x)
fp.write(x_j)
fp.write('\n')
if os.path.exists(TUNING_FILE):
os.remove(TUNING_FILE)
for c in CORATED_LIMIT:
for l in LONELY_THRESHOLD:
train_comm = "spark-submit train.py %d %d %d" % (c, l, l)
os.system(train_comm)
for n in N_NEIGHBORS_ITEMBASED:
for w in WEIGHT:
test_comm = "spark-submit predict.py %d %f" % (n, w)
os.system(test_comm)
res = rmse.getRmse()
writeRes(c, l, n, w, res)
| [
"os.path.exists",
"json.dumps",
"os.system",
"rmse.getRmse",
"os.remove"
] | [((553, 580), 'os.path.exists', 'os.path.exists', (['TUNING_FILE'], {}), '(TUNING_FILE)\n', (567, 580), False, 'import os\n'), ((586, 608), 'os.remove', 'os.remove', (['TUNING_FILE'], {}), '(TUNING_FILE)\n', (595, 608), False, 'import os\n'), ((490, 503), 'json.dumps', 'json.dumps', (['x'], {}), '(x)\n', (500, 503), False, 'import json\n'), ((739, 760), 'os.system', 'os.system', (['train_comm'], {}), '(train_comm)\n', (748, 760), False, 'import os\n'), ((915, 935), 'os.system', 'os.system', (['test_comm'], {}), '(test_comm)\n', (924, 935), False, 'import os\n'), ((958, 972), 'rmse.getRmse', 'rmse.getRmse', ([], {}), '()\n', (970, 972), False, 'import rmse\n')] |
from __future__ import unicode_literals
from datetime import datetime, date
from django.db import models
from django.contrib.auth.models import User
from django.db.models.aggregates import Sum
AGE_LIMIT = 7 # 7 days age limit
class PriceTemplate(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
class Product(models.Model):
name = models.CharField(max_length=200)
rate = models.IntegerField('Default price of product')
template = models.ManyToManyField(
PriceTemplate,
through='ProductPriceTemplate',
through_fields=('product', 'template'))
def __unicode__(self):
return self.name
@property
def quantity(self):
return self.product_sales.aggregate(
Sum('quantity'))['quantity__sum'] or 0
@property
def amount(self):
return self.product_sales.aggregate(
Sum('amount'))['amount__sum'] or 0
class ProductPriceTemplate(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE)
template = models.ForeignKey(PriceTemplate, on_delete=models.CASCADE)
price = models.IntegerField()
def __unicode__(self):
return "{0} - {1}".format(self.product, self.template)
class BatchSize(models.Model):
name = models.CharField(max_length=100)
quantity = models.PositiveIntegerField()
def __unicode__(self):
return self.name
class Rep(models.Model):
name = models.CharField(max_length=200)
user = models.OneToOneField(User, on_delete=models.CASCADE)
supervisor = models.ForeignKey('Rep', null=True, blank=True)
last_activity = models.DateTimeField(blank=True, null=True)
def __unicode__(self):
return self.name
@property
def is_old(self):
if not self.last_activity:
return True
if (date.today() - self.last_activity.date()) <= AGE_LIMIT:
return False
return True
class Customer(models.Model):
HOSPITAL = 0
INSTITUTION = 1
PHARMACY = 2
WHOLESELLER = 3
HEALTH_PERSONNEL = 4
CUSTOMER_TYPE = (
(0, 'Hospital'),
(1, 'Institution'),
(2, 'Pharmacy'),
(3, 'Wholeseller'),
(4, 'Health Personnel'))
name = models.CharField(max_length=200)
address = models.TextField(blank=True)
contact_person = models.CharField(max_length=200, blank=True)
phone1 = models.CharField(max_length=20, blank=True)
email = models.EmailField(blank=True, null=True)
customer_type = models.PositiveIntegerField(choices=CUSTOMER_TYPE)
price_template = models.ForeignKey(PriceTemplate, null=True, blank=True)
def __unicode__(self):
return self.name
@property
def balance(self):
sales = Sale.objects.filter(invoice__customer=self).aggregate(
Sum('amount'))['amount__sum'] or 0
#sales = self.customer_sales.aggregate(
# Sum('amount'))['amount__sum'] or 0
paymt = self.customer_payments.aggregate(
Sum('amount'))['amount__sum'] or 0
return sales - paymt
class Invoice(models.Model):
ACTUAL_SALES = 0
SOR = 1
SAMPLES = 2
INVOICE_TYPES = ((0, 'Actual Sales'), (1, 'SOR'), (2, 'Samples'))
rep = models.ForeignKey(Rep, related_name='rep_invoices')
customer = models.ForeignKey(Customer, related_name='customer_invoices')
invoice_no = models.CharField(max_length=200, blank=True)
invoice_date = models.DateField(blank=True, null=True)
sales_type = models.PositiveIntegerField(choices=INVOICE_TYPES)
recorded_date = models.DateTimeField(default=datetime.now)
def __unicode__(self):
return unicode(self.invoice_no)
@property
def amount(self):
return sum([sale.amount for sale in self.invoice_sales.all()])
class Sale(models.Model):
invoice = models.ForeignKey(
Invoice, related_name='invoice_sales', null=True)
product = models.ForeignKey(Product, related_name='product_sales')
batch_size = models.ForeignKey(BatchSize, null=True)
quantity = models.PositiveIntegerField()
amount = models.IntegerField()
recorded_date = models.DateTimeField(default=datetime.now)
def __unicode__(self):
return unicode(self.invoice)
@property
def rate(self):
templ = self.invoice.customer.price_template
if not templ:
price = self.product.rate
else:
try:
prod_price_templ = ProductPriceTemplate.objects.get(
product=self.product, template=templ)
except ProductPriceTemplate.DoesNotExist:
price = self.product.rate
else:
price = prod_price_templ.price
return price
class Payment(models.Model):
EPAYMENT = 0
CHEQUE = 1
TELLER = 2
MODE_OF_PAYMENT = ((0, 'E-Payment'), (1, 'Cheque'), (2, 'Teller'))
rep = models.ForeignKey(Rep, related_name='rep_payments')
customer = models.ForeignKey(Customer, related_name='customer_payments')
amount = models.PositiveIntegerField()
receipt_no = models.CharField(max_length=50, blank=True)
payment_date = models.DateField()
receipt_date = models.DateField()
recorded_date = models.DateTimeField(default=datetime.now)
balance = models.IntegerField()
bank_of_payment = models.CharField(max_length=200, blank=True)
mode_of_payment = models.PositiveIntegerField(choices=MODE_OF_PAYMENT)
teller_number = models.CharField(max_length=50, blank=True)
teller_date = models.DateField(blank=True, null=True)
cheque_date = models.DateField(blank=True, null=True)
remarks = models.TextField(blank=True)
def __unicode__(self):
return unicode(self.customer)
| [
"django.db.models.aggregates.Sum",
"django.db.models.OneToOneField",
"django.db.models.EmailField",
"django.db.models.DateField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.IntegerField",
"django.db.models.ManyToManyField",
"django.db.models.PositiveIntegerField",... | [((277, 309), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (293, 309), False, 'from django.db import models\n'), ((405, 437), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (421, 437), False, 'from django.db import models\n'), ((449, 496), 'django.db.models.IntegerField', 'models.IntegerField', (['"""Default price of product"""'], {}), "('Default price of product')\n", (468, 496), False, 'from django.db import models\n'), ((512, 625), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['PriceTemplate'], {'through': '"""ProductPriceTemplate"""', 'through_fields': "('product', 'template')"}), "(PriceTemplate, through='ProductPriceTemplate',\n through_fields=('product', 'template'))\n", (534, 625), False, 'from django.db import models\n'), ((1022, 1074), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Product'], {'on_delete': 'models.CASCADE'}), '(Product, on_delete=models.CASCADE)\n', (1039, 1074), False, 'from django.db import models\n'), ((1090, 1148), 'django.db.models.ForeignKey', 'models.ForeignKey', (['PriceTemplate'], {'on_delete': 'models.CASCADE'}), '(PriceTemplate, on_delete=models.CASCADE)\n', (1107, 1148), False, 'from django.db import models\n'), ((1161, 1182), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (1180, 1182), False, 'from django.db import models\n'), ((1318, 1350), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)'}), '(max_length=100)\n', (1334, 1350), False, 'from django.db import models\n'), ((1366, 1395), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (1393, 1395), False, 'from django.db import models\n'), ((1487, 1519), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1503, 1519), False, 'from django.db import models\n'), ((1531, 1583), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (1551, 1583), False, 'from django.db import models\n'), ((1601, 1648), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Rep"""'], {'null': '(True)', 'blank': '(True)'}), "('Rep', null=True, blank=True)\n", (1618, 1648), False, 'from django.db import models\n'), ((1669, 1712), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1689, 1712), False, 'from django.db import models\n'), ((2278, 2310), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (2294, 2310), False, 'from django.db import models\n'), ((2325, 2353), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (2341, 2353), False, 'from django.db import models\n'), ((2375, 2419), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'blank': '(True)'}), '(max_length=200, blank=True)\n', (2391, 2419), False, 'from django.db import models\n'), ((2433, 2476), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'blank': '(True)'}), '(max_length=20, blank=True)\n', (2449, 2476), False, 'from django.db import models\n'), ((2489, 2529), 'django.db.models.EmailField', 'models.EmailField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (2506, 2529), False, 'from django.db import models\n'), ((2550, 2600), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'choices': 'CUSTOMER_TYPE'}), '(choices=CUSTOMER_TYPE)\n', (2577, 2600), False, 'from django.db import models\n'), ((2622, 2677), 'django.db.models.ForeignKey', 'models.ForeignKey', (['PriceTemplate'], {'null': '(True)', 'blank': '(True)'}), '(PriceTemplate, null=True, blank=True)\n', (2639, 2677), False, 'from django.db import models\n'), ((3269, 3320), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Rep'], {'related_name': '"""rep_invoices"""'}), "(Rep, related_name='rep_invoices')\n", (3286, 3320), False, 'from django.db import models\n'), ((3336, 3397), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Customer'], {'related_name': '"""customer_invoices"""'}), "(Customer, related_name='customer_invoices')\n", (3353, 3397), False, 'from django.db import models\n'), ((3415, 3459), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'blank': '(True)'}), '(max_length=200, blank=True)\n', (3431, 3459), False, 'from django.db import models\n'), ((3479, 3518), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (3495, 3518), False, 'from django.db import models\n'), ((3536, 3586), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'choices': 'INVOICE_TYPES'}), '(choices=INVOICE_TYPES)\n', (3563, 3586), False, 'from django.db import models\n'), ((3607, 3649), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'datetime.now'}), '(default=datetime.now)\n', (3627, 3649), False, 'from django.db import models\n'), ((3868, 3935), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Invoice'], {'related_name': '"""invoice_sales"""', 'null': '(True)'}), "(Invoice, related_name='invoice_sales', null=True)\n", (3885, 3935), False, 'from django.db import models\n'), ((3959, 4015), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Product'], {'related_name': '"""product_sales"""'}), "(Product, related_name='product_sales')\n", (3976, 4015), False, 'from django.db import models\n'), ((4033, 4072), 'django.db.models.ForeignKey', 'models.ForeignKey', (['BatchSize'], {'null': '(True)'}), '(BatchSize, null=True)\n', (4050, 4072), False, 'from django.db import models\n'), ((4088, 4117), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (4115, 4117), False, 'from django.db import models\n'), ((4131, 4152), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (4150, 4152), False, 'from django.db import models\n'), ((4173, 4215), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'datetime.now'}), '(default=datetime.now)\n', (4193, 4215), False, 'from django.db import models\n'), ((4929, 4980), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Rep'], {'related_name': '"""rep_payments"""'}), "(Rep, related_name='rep_payments')\n", (4946, 4980), False, 'from django.db import models\n'), ((4996, 5057), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Customer'], {'related_name': '"""customer_payments"""'}), "(Customer, related_name='customer_payments')\n", (5013, 5057), False, 'from django.db import models\n'), ((5071, 5100), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (5098, 5100), False, 'from django.db import models\n'), ((5118, 5161), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'blank': '(True)'}), '(max_length=50, blank=True)\n', (5134, 5161), False, 'from django.db import models\n'), ((5181, 5199), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (5197, 5199), False, 'from django.db import models\n'), ((5219, 5237), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (5235, 5237), False, 'from django.db import models\n'), ((5258, 5300), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'datetime.now'}), '(default=datetime.now)\n', (5278, 5300), False, 'from django.db import models\n'), ((5315, 5336), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (5334, 5336), False, 'from django.db import models\n'), ((5359, 5403), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'blank': '(True)'}), '(max_length=200, blank=True)\n', (5375, 5403), False, 'from django.db import models\n'), ((5426, 5478), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {'choices': 'MODE_OF_PAYMENT'}), '(choices=MODE_OF_PAYMENT)\n', (5453, 5478), False, 'from django.db import models\n'), ((5499, 5542), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'blank': '(True)'}), '(max_length=50, blank=True)\n', (5515, 5542), False, 'from django.db import models\n'), ((5561, 5600), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (5577, 5600), False, 'from django.db import models\n'), ((5619, 5658), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (5635, 5658), False, 'from django.db import models\n'), ((5673, 5701), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (5689, 5701), False, 'from django.db import models\n'), ((1874, 1886), 'datetime.date.today', 'date.today', ([], {}), '()\n', (1884, 1886), False, 'from datetime import datetime, date\n'), ((796, 811), 'django.db.models.aggregates.Sum', 'Sum', (['"""quantity"""'], {}), "('quantity')\n", (799, 811), False, 'from django.db.models.aggregates import Sum\n'), ((929, 942), 'django.db.models.aggregates.Sum', 'Sum', (['"""amount"""'], {}), "('amount')\n", (932, 942), False, 'from django.db.models.aggregates import Sum\n'), ((2852, 2865), 'django.db.models.aggregates.Sum', 'Sum', (['"""amount"""'], {}), "('amount')\n", (2855, 2865), False, 'from django.db.models.aggregates import Sum\n'), ((3045, 3058), 'django.db.models.aggregates.Sum', 'Sum', (['"""amount"""'], {}), "('amount')\n", (3048, 3058), False, 'from django.db.models.aggregates import Sum\n')] |
import logging
import os
from datetime import datetime
class Logger :
logger = None
def myLogger(self):
if None == self.logger:
self.logger=logging.getLogger('nrdf')
self.logger.setLevel(logging.DEBUG)
log_folder = r"logs/"
os.makedirs(os.path.dirname(log_folder), exist_ok=True)
output_file = os.path.join(log_folder, datetime.now().strftime("%Y_%m_%d-%H_%M_%S"))
file_handler=logging.FileHandler(output_file + '.log', mode="w", encoding=None, delay=False)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
file_handler.setFormatter(formatter)
self.logger.addHandler(file_handler)
stream_handler = logging.StreamHandler()
file_handler.setFormatter(formatter)
self.logger.addHandler(stream_handler)
self.logger.propagate = False
return self.logger | [
"logging.getLogger",
"logging.StreamHandler",
"logging.Formatter",
"os.path.dirname",
"datetime.datetime.now",
"logging.FileHandler"
] | [((170, 195), 'logging.getLogger', 'logging.getLogger', (['"""nrdf"""'], {}), "('nrdf')\n", (187, 195), False, 'import logging\n'), ((468, 547), 'logging.FileHandler', 'logging.FileHandler', (["(output_file + '.log')"], {'mode': '"""w"""', 'encoding': 'None', 'delay': '(False)'}), "(output_file + '.log', mode='w', encoding=None, delay=False)\n", (487, 547), False, 'import logging\n'), ((572, 630), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)s %(message)s"""'], {}), "('%(asctime)s %(levelname)s %(message)s')\n", (589, 630), False, 'import logging\n'), ((771, 794), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (792, 794), False, 'import logging\n'), ((302, 329), 'os.path.dirname', 'os.path.dirname', (['log_folder'], {}), '(log_folder)\n', (317, 329), False, 'import os\n'), ((397, 411), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (409, 411), False, 'from datetime import datetime\n')] |
import os
import argparse
import cv2
import torch
import pandas as pd
from tqdm import tqdm
from pathlib import Path
import segmentation_models_pytorch as smp
from tools.datasets import InferenceDataset
from tools.models import CovidScoringNet, SegmentationModel
from tools.utils import extract_model_opts, get_list_of_files
def inference(
model: CovidScoringNet,
dataset: InferenceDataset,
output_dir: str,
csv_name: str,
) -> None:
model.eval()
output_lungs_dir = os.path.join(output_dir, 'lungs')
output_covid_dir = os.path.join(output_dir, 'covid')
os.makedirs(output_lungs_dir) if not os.path.exists(output_lungs_dir) else False
os.makedirs(output_covid_dir) if not os.path.exists(output_covid_dir) else False
data = {
'dataset': [],
'filename': [],
'lungs_mask': [],
'covid_mask': [],
'score': [],
}
keys = ['lung_segment_{:d}'.format(idx + 1) for idx in range(6)]
lung_segment_probs = {key: [] for key in keys}
data.update(lung_segment_probs)
for source_img, img_path in tqdm(dataset, desc='Prediction', unit=' images'):
image_path = os.path.normpath(img_path)
filename = os.path.split(image_path)[-1]
dataset_name = image_path.split(os.sep)[-3]
predicted_score, mask_lungs, mask_covid, raw_pred = model.predict(source_img)
cv2.imwrite(os.path.join(output_lungs_dir, filename), mask_lungs * 255)
cv2.imwrite(os.path.join(output_covid_dir, filename), mask_covid * 255)
data['dataset'].append(dataset_name)
data['filename'].append(filename)
data['lungs_mask'].append(os.path.join(output_lungs_dir, filename))
data['covid_mask'].append(os.path.join(output_covid_dir, filename))
data['score'].append(predicted_score)
for idx in range(len(raw_pred)):
raw_pred_col = 'lung_segment_{:d}'.format(idx + 1)
data[raw_pred_col].append(raw_pred[idx])
csv_save_path = os.path.join(output_dir, csv_name)
df = pd.DataFrame(data)
df.to_csv(csv_save_path, index=False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Inference pipeline')
# Dataset settings
parser.add_argument('--data_dir', type=str)
parser.add_argument('--output_dir', default='dataset/inference_output', type=str)
parser.add_argument('--csv_name', default='model_outputs.csv', type=str)
# COVID model settings
parser.add_argument('--covid_model_path', type=str)
parser.add_argument('--covid_model_name', default='Unet', type=str)
parser.add_argument('--covid_encoder_name', default='se_resnet101', type=str)
parser.add_argument('--covid_encoder_weights', default='imagenet', type=str)
parser.add_argument('--covid_in_channels', default=3, type=int)
parser.add_argument('--covid_num_classes', default=1, type=int)
parser.add_argument('--covid_activation', default='sigmoid', type=str)
parser.add_argument('--covid_dropout', default=0.2, type=float)
parser.add_argument('--covid_aux_params', default=True, type=bool)
parser.add_argument('--covid_input_size', nargs='+', default=(480, 480), type=int)
# Lungs model settings
parser.add_argument('--lungs_model_path', type=str)
parser.add_argument('--lungs_model_name', default='Unet', type=str)
parser.add_argument('--lungs_encoder_name', default='se_resnext101_32x4d', type=str)
parser.add_argument('--lungs_encoder_weights', default='imagenet', type=str)
parser.add_argument('--lungs_in_channels', default=3, type=int)
parser.add_argument('--lungs_num_classes', default=1, type=int)
parser.add_argument('--lungs_activation', default='sigmoid', type=str)
parser.add_argument('--lungs_dropout', default=0.2, type=float)
parser.add_argument('--lungs_aux_params', default=False, type=bool)
parser.add_argument('--lungs_input_size', nargs='+', default=(384, 384), type=int)
# Additional settings
parser.add_argument('--automatic_parser', action='store_true')
parser.add_argument('--threshold', default=0.5, type=float)
args = parser.parse_args()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
args.covid_input_size = tuple(args.covid_input_size)
args.lungs_input_size = tuple(args.lungs_input_size)
if args.automatic_parser:
covid_model_opts = extract_model_opts(args.covid_model_path)
lungs_model_opts = extract_model_opts(args.lungs_model_path)
args.covid_model_name = covid_model_opts['model_name']
args.covid_encoder_name = covid_model_opts['encoder_name']
args.covid_encoder_weights = covid_model_opts['encoder_weights']
args.lungs_model_name = lungs_model_opts['model_name']
args.lungs_encoder_name = lungs_model_opts['encoder_name']
args.lungs_encoder_weights = lungs_model_opts['encoder_weights']
args.output_dir = os.path.join(args.output_dir, args.covid_model_name)
args.csv_name = '{:s}_{:s}{:s}'.format(
Path(args.csv_name).stem,
args.covid_model_name,
Path(args.csv_name).suffix
)
covid_aux_params = None
if args.covid_aux_params:
covid_aux_params = dict(
pooling='avg',
dropout=args.covid_dropout,
activation=args.covid_activation,
classes=args.covid_num_classes,
)
lungs_aux_params = None
if args.lungs_aux_params:
lungs_aux_params = dict(
pooling='avg',
dropout=args.lungs_dropout,
activation=args.covid_activation,
classes=args.covid_num_classes,
)
covid_model = SegmentationModel(
model_name=args.covid_model_name,
encoder_name=args.covid_encoder_name,
aux_params=covid_aux_params,
encoder_weights=args.covid_encoder_weights,
in_channels=args.covid_in_channels,
num_classes=args.covid_num_classes,
activation=args.covid_activation,
wandb_api_key=None,
)
lungs_model = SegmentationModel(
model_name=args.lungs_model_name,
encoder_name=args.lungs_encoder_name,
aux_params=lungs_aux_params,
encoder_weights=args.lungs_encoder_weights,
in_channels=args.lungs_in_channels,
num_classes=args.lungs_num_classes,
activation=args.lungs_activation,
wandb_api_key=None,
)
covid_model = covid_model.build_model()
lungs_model = lungs_model.build_model()
covid_model.load_state_dict(torch.load(args.covid_model_path, map_location=device))
lungs_model.load_state_dict(torch.load(args.lungs_model_path, map_location=device))
covid_preprocessing_params = smp.encoders.get_preprocessing_params(
encoder_name=args.covid_encoder_name, pretrained=args.covid_encoder_weights
)
lung_preprocessing_params = smp.encoders.get_preprocessing_params(
encoder_name=args.lungs_encoder_name, pretrained=args.lungs_encoder_weights
)
img_paths = get_list_of_files(args.data_dir, ['mask'])
dataset = InferenceDataset(img_paths, input_size=args.lungs_input_size)
model = CovidScoringNet(
lungs_model,
covid_model,
device,
args.threshold,
args.lungs_input_size,
args.covid_input_size,
covid_preprocessing_params,
lung_preprocessing_params,
crop_type='single_crop',
)
inference(model, dataset, args.output_dir, args.csv_name)
| [
"tools.datasets.InferenceDataset",
"os.path.exists",
"tools.utils.get_list_of_files",
"argparse.ArgumentParser",
"os.makedirs",
"pathlib.Path",
"torch.load",
"tqdm.tqdm",
"os.path.join",
"tools.models.CovidScoringNet",
"os.path.split",
"os.path.normpath",
"torch.cuda.is_available",
"tools.... | [((494, 527), 'os.path.join', 'os.path.join', (['output_dir', '"""lungs"""'], {}), "(output_dir, 'lungs')\n", (506, 527), False, 'import os\n'), ((551, 584), 'os.path.join', 'os.path.join', (['output_dir', '"""covid"""'], {}), "(output_dir, 'covid')\n", (563, 584), False, 'import os\n'), ((1083, 1131), 'tqdm.tqdm', 'tqdm', (['dataset'], {'desc': '"""Prediction"""', 'unit': '""" images"""'}), "(dataset, desc='Prediction', unit=' images')\n", (1087, 1131), False, 'from tqdm import tqdm\n'), ((1994, 2028), 'os.path.join', 'os.path.join', (['output_dir', 'csv_name'], {}), '(output_dir, csv_name)\n', (2006, 2028), False, 'import os\n'), ((2038, 2056), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (2050, 2056), True, 'import pandas as pd\n'), ((2141, 2198), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Inference pipeline"""'}), "(description='Inference pipeline')\n", (2164, 2198), False, 'import argparse\n'), ((5682, 5990), 'tools.models.SegmentationModel', 'SegmentationModel', ([], {'model_name': 'args.covid_model_name', 'encoder_name': 'args.covid_encoder_name', 'aux_params': 'covid_aux_params', 'encoder_weights': 'args.covid_encoder_weights', 'in_channels': 'args.covid_in_channels', 'num_classes': 'args.covid_num_classes', 'activation': 'args.covid_activation', 'wandb_api_key': 'None'}), '(model_name=args.covid_model_name, encoder_name=args.\n covid_encoder_name, aux_params=covid_aux_params, encoder_weights=args.\n covid_encoder_weights, in_channels=args.covid_in_channels, num_classes=\n args.covid_num_classes, activation=args.covid_activation, wandb_api_key\n =None)\n', (5699, 5990), False, 'from tools.models import CovidScoringNet, SegmentationModel\n'), ((6061, 6369), 'tools.models.SegmentationModel', 'SegmentationModel', ([], {'model_name': 'args.lungs_model_name', 'encoder_name': 'args.lungs_encoder_name', 'aux_params': 'lungs_aux_params', 'encoder_weights': 'args.lungs_encoder_weights', 'in_channels': 'args.lungs_in_channels', 'num_classes': 'args.lungs_num_classes', 'activation': 'args.lungs_activation', 'wandb_api_key': 'None'}), '(model_name=args.lungs_model_name, encoder_name=args.\n lungs_encoder_name, aux_params=lungs_aux_params, encoder_weights=args.\n lungs_encoder_weights, in_channels=args.lungs_in_channels, num_classes=\n args.lungs_num_classes, activation=args.lungs_activation, wandb_api_key\n =None)\n', (6078, 6369), False, 'from tools.models import CovidScoringNet, SegmentationModel\n'), ((6721, 6839), 'segmentation_models_pytorch.encoders.get_preprocessing_params', 'smp.encoders.get_preprocessing_params', ([], {'encoder_name': 'args.covid_encoder_name', 'pretrained': 'args.covid_encoder_weights'}), '(encoder_name=args.covid_encoder_name,\n pretrained=args.covid_encoder_weights)\n', (6758, 6839), True, 'import segmentation_models_pytorch as smp\n'), ((6882, 7000), 'segmentation_models_pytorch.encoders.get_preprocessing_params', 'smp.encoders.get_preprocessing_params', ([], {'encoder_name': 'args.lungs_encoder_name', 'pretrained': 'args.lungs_encoder_weights'}), '(encoder_name=args.lungs_encoder_name,\n pretrained=args.lungs_encoder_weights)\n', (6919, 7000), True, 'import segmentation_models_pytorch as smp\n'), ((7028, 7070), 'tools.utils.get_list_of_files', 'get_list_of_files', (['args.data_dir', "['mask']"], {}), "(args.data_dir, ['mask'])\n", (7045, 7070), False, 'from tools.utils import extract_model_opts, get_list_of_files\n'), ((7085, 7146), 'tools.datasets.InferenceDataset', 'InferenceDataset', (['img_paths'], {'input_size': 'args.lungs_input_size'}), '(img_paths, input_size=args.lungs_input_size)\n', (7101, 7146), False, 'from tools.datasets import InferenceDataset\n'), ((7160, 7360), 'tools.models.CovidScoringNet', 'CovidScoringNet', (['lungs_model', 'covid_model', 'device', 'args.threshold', 'args.lungs_input_size', 'args.covid_input_size', 'covid_preprocessing_params', 'lung_preprocessing_params'], {'crop_type': '"""single_crop"""'}), "(lungs_model, covid_model, device, args.threshold, args.\n lungs_input_size, args.covid_input_size, covid_preprocessing_params,\n lung_preprocessing_params, crop_type='single_crop')\n", (7175, 7360), False, 'from tools.models import CovidScoringNet, SegmentationModel\n'), ((589, 618), 'os.makedirs', 'os.makedirs', (['output_lungs_dir'], {}), '(output_lungs_dir)\n', (600, 618), False, 'import os\n'), ((674, 703), 'os.makedirs', 'os.makedirs', (['output_covid_dir'], {}), '(output_covid_dir)\n', (685, 703), False, 'import os\n'), ((1154, 1180), 'os.path.normpath', 'os.path.normpath', (['img_path'], {}), '(img_path)\n', (1170, 1180), False, 'import os\n'), ((4166, 4191), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4189, 4191), False, 'import torch\n'), ((4375, 4416), 'tools.utils.extract_model_opts', 'extract_model_opts', (['args.covid_model_path'], {}), '(args.covid_model_path)\n', (4393, 4416), False, 'from tools.utils import extract_model_opts, get_list_of_files\n'), ((4444, 4485), 'tools.utils.extract_model_opts', 'extract_model_opts', (['args.lungs_model_path'], {}), '(args.lungs_model_path)\n', (4462, 4485), False, 'from tools.utils import extract_model_opts, get_list_of_files\n'), ((4921, 4973), 'os.path.join', 'os.path.join', (['args.output_dir', 'args.covid_model_name'], {}), '(args.output_dir, args.covid_model_name)\n', (4933, 4973), False, 'import os\n'), ((6543, 6597), 'torch.load', 'torch.load', (['args.covid_model_path'], {'map_location': 'device'}), '(args.covid_model_path, map_location=device)\n', (6553, 6597), False, 'import torch\n'), ((6631, 6685), 'torch.load', 'torch.load', (['args.lungs_model_path'], {'map_location': 'device'}), '(args.lungs_model_path, map_location=device)\n', (6641, 6685), False, 'import torch\n'), ((626, 658), 'os.path.exists', 'os.path.exists', (['output_lungs_dir'], {}), '(output_lungs_dir)\n', (640, 658), False, 'import os\n'), ((711, 743), 'os.path.exists', 'os.path.exists', (['output_covid_dir'], {}), '(output_covid_dir)\n', (725, 743), False, 'import os\n'), ((1201, 1226), 'os.path.split', 'os.path.split', (['image_path'], {}), '(image_path)\n', (1214, 1226), False, 'import os\n'), ((1390, 1430), 'os.path.join', 'os.path.join', (['output_lungs_dir', 'filename'], {}), '(output_lungs_dir, filename)\n', (1402, 1430), False, 'import os\n'), ((1470, 1510), 'os.path.join', 'os.path.join', (['output_covid_dir', 'filename'], {}), '(output_covid_dir, filename)\n', (1482, 1510), False, 'import os\n'), ((1652, 1692), 'os.path.join', 'os.path.join', (['output_lungs_dir', 'filename'], {}), '(output_lungs_dir, filename)\n', (1664, 1692), False, 'import os\n'), ((1728, 1768), 'os.path.join', 'os.path.join', (['output_covid_dir', 'filename'], {}), '(output_covid_dir, filename)\n', (1740, 1768), False, 'import os\n'), ((5035, 5054), 'pathlib.Path', 'Path', (['args.csv_name'], {}), '(args.csv_name)\n', (5039, 5054), False, 'from pathlib import Path\n'), ((5108, 5127), 'pathlib.Path', 'Path', (['args.csv_name'], {}), '(args.csv_name)\n', (5112, 5127), False, 'from pathlib import Path\n')] |
#!/usr/bin/env python
# Copyright (c) 2017, DIANA-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""ROOT constants used in deserialization."""
import numpy
# used in unmarshaling
kByteCountMask = numpy.int64(0x40000000)
kByteCountVMask = numpy.int64(0x4000)
kClassMask = numpy.int64(0x80000000)
kNewClassTag = numpy.int64(0xFFFFFFFF)
kIsOnHeap = numpy.uint32(0x01000000)
kIsReferenced = numpy.uint32(1 << 4)
kMapOffset = 2
# not used?
kNullTag = 0
kNotDeleted = 0x02000000
kZombie = 0x04000000
kBitMask = 0x00ffffff
kDisplacementMask = 0xFF000000
################################################################ core/zip/inc/Compression.h
kZLIB = 1
kLZMA = 2
kOldCompressionAlgo = 3
kLZ4 = 4
kUndefinedCompressionAlgorithm = 5
################################################################ constants for streamers
kBase = 0
kChar = 1
kShort = 2
kInt = 3
kLong = 4
kFloat = 5
kCounter = 6
kCharStar = 7
kDouble = 8
kDouble32 = 9
kLegacyChar = 10
kUChar = 11
kUShort = 12
kUInt = 13
kULong = 14
kBits = 15
kLong64 = 16
kULong64 = 17
kBool = 18
kFloat16 = 19
kOffsetL = 20
kOffsetP = 40
kObject = 61
kAny = 62
kObjectp = 63
kObjectP = 64
kTString = 65
kTObject = 66
kTNamed = 67
kAnyp = 68
kAnyP = 69
kAnyPnoVT = 70
kSTLp = 71
kSkip = 100
kSkipL = 120
kSkipP = 140
kConv = 200
kConvL = 220
kConvP = 240
kSTL = 300
kSTLstring = 365
kStreamer = 500
kStreamLoop = 501
################################################################ constants from core/foundation/inc/ESTLType.h
kNotSTL = 0
kSTLvector = 1
kSTLlist = 2
kSTLdeque = 3
kSTLmap = 4
kSTLmultimap = 5
kSTLset = 6
kSTLmultiset = 7
kSTLbitset = 8
kSTLforwardlist = 9
kSTLunorderedset = 10
kSTLunorderedmultiset = 11
kSTLunorderedmap = 12
kSTLunorderedmultimap = 13
kSTLend = 14
kSTLany = 300
################################################################ IOFeatures
kGenerateOffsetMap = 1
| [
"numpy.int64",
"numpy.uint32"
] | [((1673, 1696), 'numpy.int64', 'numpy.int64', (['(1073741824)'], {}), '(1073741824)\n', (1684, 1696), False, 'import numpy\n'), ((1721, 1739), 'numpy.int64', 'numpy.int64', (['(16384)'], {}), '(16384)\n', (1732, 1739), False, 'import numpy\n'), ((1765, 1788), 'numpy.int64', 'numpy.int64', (['(2147483648)'], {}), '(2147483648)\n', (1776, 1788), False, 'import numpy\n'), ((1813, 1836), 'numpy.int64', 'numpy.int64', (['(4294967295)'], {}), '(4294967295)\n', (1824, 1836), False, 'import numpy\n'), ((1862, 1884), 'numpy.uint32', 'numpy.uint32', (['(16777216)'], {}), '(16777216)\n', (1874, 1884), False, 'import numpy\n'), ((1911, 1931), 'numpy.uint32', 'numpy.uint32', (['(1 << 4)'], {}), '(1 << 4)\n', (1923, 1931), False, 'import numpy\n')] |
from django.contrib import admin
from .models import Comment
# Register your models here.
class CommentsAdmin(admin.ModelAdmin):
list_display = ['id', "user", "content", "timestamp"]
class Meta:
model = Comment
admin.site.register(Comment, CommentsAdmin)
| [
"django.contrib.admin.site.register"
] | [((233, 276), 'django.contrib.admin.site.register', 'admin.site.register', (['Comment', 'CommentsAdmin'], {}), '(Comment, CommentsAdmin)\n', (252, 276), False, 'from django.contrib import admin\n')] |
import itertools
import os
import random
import numpy as np
import pandas as pd
from tqdm import tqdm
def _get_steps():
hdf_subdir = "augmentation/"
steps = {"step_name": ["prototypical", "single_sources", "mixtures"]}
steps_df = pd.DataFrame(steps)
steps_df["hdf_path"] = hdf_subdir + steps_df["step_name"]
# Impose order on the augmentation steps:
steps_df["step_name"] = pd.Categorical(
steps_df["step_name"], ["prototypical", "single_sources", "mixtures"]
)
steps_df.sort_values("step_name", inplace=True, ignore_index=True)
return steps_df
def prototypical_spectrum(dataset, source_df):
"""Weighted average of calibration spectra with randomly assigned weights
between 0 and 1.
Args:
dataset (pyeem.datasets.Dataset): Your PyEEM dataset.
source_df (pandas.DataFrame): Calibration information for a single source.
Returns:
pandas.DataFrame: A prototypical Excitation Emission Matrix for a single source.
"""
aug_steps_df = _get_steps()
source_name = source_df.index.get_level_values("source").unique().item()
source_units = source_df.index.get_level_values("source_units").unique().item()
intensity_units = (
source_df.index.get_level_values("intensity_units").unique().item()
)
proto_eems = []
for index, row in source_df[source_df["prototypical_sample"]].iterrows():
eem_path = row["hdf_path"]
eem = pd.read_hdf(dataset.hdf, key=eem_path)
proto_eems.append(eem)
# TODO - IMPORTANT: This can't just be the mean of the prototypical samples...
# Need to use the same weighted average as the intensity values!
proto_concentration = source_df[source_df["prototypical_sample"]][
"concentration"
].mean()
"""
weights = []
for i in range(len(proto_eems)):
weights.append(random.uniform(0, 1))
proto_eem = np.average([eem.values for eem in proto_eems], axis=0, weights=weights)
"""
proto_eem = np.average([eem.values for eem in proto_eems], axis=0)
proto_eem = pd.DataFrame(
data=proto_eem, index=proto_eems[0].index, columns=proto_eems[0].columns
)
proto_eem.index.name = "emission_wavelength"
hdf_path = aug_steps_df[aug_steps_df["step_name"] == "prototypical"][
"hdf_path"
].item()
hdf_path = os.path.join(hdf_path, source_name)
new_indices = np.array(
["source", "proto_conc", "source_units", "intensity_units", "hdf_path"]
)
proto_eem = proto_eem.assign(
**{
"source": source_name,
"proto_conc": proto_concentration,
"source_units": source_units,
"intensity_units": intensity_units,
"hdf_path": hdf_path,
}
)
proto_eem.set_index(new_indices.tolist(), append=True, inplace=True)
new_indices = np.append(new_indices, ("emission_wavelength"))
proto_eem = proto_eem.reorder_levels(new_indices)
proto_eem.to_hdf(dataset.hdf, key=hdf_path)
return proto_eem
def create_prototypical_spectra(dataset, cal_df):
"""Creates a protoypical spectrum for each calibration source in the PyEEM
dataset.
Args:
dataset (pyeem.datasets.Dataset): Your PyEEM dataset.
cal_df (pandas.DataFrame): Calibration information for your dataset
returned from :meth:`pyeem.preprocessing.calibration()`
Returns:
pandas.DataFrame: A table describing the prototypical spectra and their
paths within the HDF5 store.
"""
results_rows = []
for source_name, group in cal_df.groupby(level="source", as_index=False):
proto_eem_df = prototypical_spectrum(dataset, group)
new_indices = proto_eem_df.index.droplevel("emission_wavelength").unique()
result = dict(zip(list(new_indices.names), list(new_indices.item())))
results_rows.append(result)
results_df = pd.DataFrame(results_rows)
results_index = "source"
results_df.set_index(results_index, inplace=True)
return results_df
def single_source(dataset, source_df, conc_range, num_spectra):
"""Creates augmented single source spectra for a single calibration source.
Args:
dataset (pyeem.datasets.Dataset): Your PyEEM dataset.
source_df (pandas.DataFrame): Calibration information for a single source.
conc_range (tuple of (int, float)): The concentration range which the
augmented single source spectra will occupy.
num_spectra (int): The number of augmented single source spectra to create.
Returns:
pandas.DataFrame: A table describing the source's augmented spectra and their
paths within the HDF5 store.
"""
aug_steps_df = _get_steps()
# Get the source's name
source_name = source_df.index.get_level_values("source").unique().item()
# Get the HDF5 path to the source's prototypical EEM
proto_hdf_path = aug_steps_df[aug_steps_df["step_name"] == "prototypical"][
"hdf_path"
].item()
proto_hdf_path = os.path.join(proto_hdf_path, source_name)
# Read in the prototypical EEM
proto_eem = pd.read_hdf(dataset.hdf, key=proto_hdf_path)
# Get the source's prototypical concentration
proto_concentration = proto_eem.index.get_level_values("proto_conc").unique().item()
# Remove the concentration index from the dataframe
proto_eem.reset_index(level=["proto_conc"], drop=True, inplace=True)
# Get the slope and intercept of the source's calibration function
slope = source_df.index.get_level_values("slope").unique().item()
y_intercept = source_df.index.get_level_values("intercept").unique().item()
"""
slope = (
cal_df.xs(source_name, level="source")
.index.get_level_values("slope")
.unique()
.item()
)
y_intercept = (
cal_df.xs(source_name, level="source")
.index.get_level_values("intercept")
.unique()
.item()
)
"""
# Generate the 1D polynomial
cal_func = np.poly1d([slope, y_intercept])
# Generate the concentration range based on the argument's
concentration_range = np.linspace(conc_range[0], conc_range[1], num=num_spectra)
# Create a new HDF5 path for the single source spectra
hdf_path = aug_steps_df[aug_steps_df["step_name"] == "single_sources"][
"hdf_path"
].item()
hdf_path = os.path.join(hdf_path, source_name)
# aug_ss_dfs: A list which we will iteratively append single source spectra to. For each
# concentration in the concentration range. Then we will turn the list of DFs
# into a single DF by using concat()
aug_ss_dfs = []
sources = list(dataset.calibration_sources)
for new_concentration in concentration_range:
scalar = cal_func(new_concentration) / cal_func(proto_concentration)
ss_eem = proto_eem * scalar
# Make sure there are no negative values
ss_eem.clip(lower=0, inplace=True)
label = np.zeros(len(sources))
source_index = sources.index(source_name)
label[source_index] = new_concentration
ss_eem.index.name = "emission_wavelength"
ss_eem = ss_eem.assign(**dict(zip(sources, label)))
new_indices = sources
ss_eem.set_index(new_indices, append=True, inplace=True)
new_indices = [
"source",
"source_units",
"intensity_units",
"hdf_path",
] + new_indices
new_indices.append("emission_wavelength")
ss_eem = ss_eem.reorder_levels(new_indices)
ss_eem.rename(index={proto_hdf_path: hdf_path}, inplace=True)
aug_ss_dfs.append(ss_eem)
aug_ss_df = pd.concat(aug_ss_dfs)
aug_ss_df.to_hdf(dataset.hdf, key=hdf_path)
return aug_ss_df
def create_single_source_spectra(dataset, cal_df, conc_range, num_spectra):
"""Creates augmented single source spectra for each calibration source in the
PyEEM dataset.
Args:
dataset (pyeem.datasets.Dataset): Your PyEEM dataset.
cal_df (pandas.DataFrame): Calibration information for your dataset
returned from :meth:`pyeem.preprocessing.calibration()`
conc_range (tuple of (int, float)): The concentration range which the
augmented single source spectra will occupy.
num_spectra (int): The number of augmented single source spectra for each
calibration source.
Returns:
pandas.DataFrame: A table describing the augmented single source spectra
and their paths within the HDF5 store.
"""
aug_ss_dfs = []
for source_name, group in tqdm(cal_df.groupby(level="source", as_index=False)):
ss_df = single_source(
dataset, group, conc_range=conc_range, num_spectra=num_spectra
)
ss_df = (
ss_df.index.droplevel(["emission_wavelength"])
.unique()
.to_frame()
.reset_index(drop=True)
)
ss_df.set_index(
["source", "source_units", "intensity_units", "hdf_path"], inplace=True
)
aug_ss_dfs.append(ss_df)
aug_ss_df = pd.concat(aug_ss_dfs)
return aug_ss_df
"""
def mixture():
return
"""
def create_mixture_spectra(dataset, cal_df, conc_range, num_steps, scale="logarithmic"):
"""Creates augmented mixture spectra by summing together augmented single source spectra.
The number of augmented mixtures created is equal to the Cartesian product composed of...
Args:
dataset (pyeem.datasets.Dataset): Your PyEEM dataset.
cal_df (pandas.DataFrame): Calibration information for your dataset
returned from :meth:`pyeem.preprocessing.calibration()`
conc_range (tuple of (int, float)): The concentration range which the
augmented spectra mixtures will occupy.
num_steps (int): The number of intervals within the concentration range.
scale (str, optional): Determines how the concentrations will be spaced along
the given concentration range. Options are "linear" and "logarithmic". Defaults to "logarithmic".
Raises:
Exception: Raised if calibration sources are reported in different units.
ValueError: Raised if the scale argument is a value other than linear" or "logarithmic".
Returns:
pandas.DataFrame: A table describing the augmented mixture spectra
and their paths within the HDF5 store.
"""
if cal_df.index.get_level_values("source_units").nunique() != 1:
raise Exception(
"Sources must be reported in the same units in order create augmented mixtures."
)
sources = cal_df.index.get_level_values(level="source").unique().to_list()
source_units = cal_df.index.get_level_values("source_units").unique().item()
intensity_units = (
cal_df.index.get_level_values(level="intensity_units").unique().item()
)
aug_steps_df = _get_steps()
hdf_path = aug_steps_df[aug_steps_df["step_name"] == "mixtures"]["hdf_path"].item()
proto_spectra = []
for source_name, group in cal_df.groupby(level="source", as_index=False):
# Get the HDF5 path to the source's prototypical EEM
proto_hdf_path = aug_steps_df[aug_steps_df["step_name"] == "prototypical"][
"hdf_path"
].item()
proto_hdf_path = os.path.join(proto_hdf_path, source_name)
# Read in the prototypical EEM
proto_eem = pd.read_hdf(dataset.hdf, key=proto_hdf_path)
proto_spectra.append(proto_eem)
proto_eem_df = pd.concat(proto_spectra)
if scale == "logarithmic":
number_range = np.geomspace(conc_range[0], conc_range[1], num=num_steps)
elif scale == "linear":
number_range = np.linspace(conc_range[0], conc_range[1], num=num_steps)
else:
raise ValueError("scale must be 'logarithmic' or 'linear'")
cartesian_product = [
p for p in itertools.product(number_range.tolist(), repeat=len(sources))
]
aug = []
for conc_set in tqdm(cartesian_product, desc="Creating Augmented Mixtures"):
mix = []
# TODO - it'd be a good idea to break this out into another function.
# Call it mixture() -- returns a single mixture EEM
for index, label in enumerate(zip(sources, conc_set)):
source_name = label[0]
new_concentration = label[1]
slope = (
cal_df.xs(source_name, level="source")
.index.get_level_values("slope")
.unique()
.item()
)
y_intercept = (
cal_df.xs(source_name, level="source")
.index.get_level_values("intercept")
.unique()
.item()
)
cal_func = np.poly1d([slope, y_intercept])
proto_eem = proto_eem_df.xs(source_name, level="source", drop_level=False)
proto_concentration = (
proto_eem.index.get_level_values("proto_conc").unique().item()
)
proto_eem.reset_index(level=["proto_conc"], drop=True, inplace=True)
scalar = cal_func(new_concentration) / cal_func(proto_concentration)
new_eem = proto_eem * scalar
# Make sure there are no negative values
new_eem.clip(lower=0, inplace=True)
mix.append(new_eem)
mix_eem = pd.concat(mix).sum(level="emission_wavelength")
mix_eem = mix_eem.assign(**dict(zip(sources, conc_set)))
mix_eem["hdf_path"] = hdf_path
mix_eem["source"] = "mixture"
mix_eem["source_units"] = source_units
mix_eem["intensity_units"] = intensity_units
new_indices = [
"source",
"source_units",
"intensity_units",
"hdf_path",
] + sources
mix_eem.set_index(new_indices, append=True, inplace=True)
new_indices = np.append(new_indices, ("emission_wavelength"))
mix_eem = mix_eem.reorder_levels(new_indices)
aug.append(mix_eem)
aug_mix_df = pd.concat(aug)
aug_mix_df.to_hdf(dataset.hdf, key=hdf_path)
aug_mix_df = (
aug_mix_df.index.droplevel(["emission_wavelength"])
.unique()
.to_frame()
.reset_index(drop=True)
)
aug_mix_df.set_index(
["source", "source_units", "intensity_units", "hdf_path"], inplace=True
)
return aug_mix_df
| [
"numpy.average",
"tqdm.tqdm",
"os.path.join",
"pandas.Categorical",
"numpy.append",
"numpy.array",
"numpy.linspace",
"numpy.geomspace",
"pandas.DataFrame",
"pandas.concat",
"numpy.poly1d",
"pandas.read_hdf"
] | [((245, 264), 'pandas.DataFrame', 'pd.DataFrame', (['steps'], {}), '(steps)\n', (257, 264), True, 'import pandas as pd\n'), ((401, 490), 'pandas.Categorical', 'pd.Categorical', (["steps_df['step_name']", "['prototypical', 'single_sources', 'mixtures']"], {}), "(steps_df['step_name'], ['prototypical', 'single_sources',\n 'mixtures'])\n", (415, 490), True, 'import pandas as pd\n'), ((2006, 2060), 'numpy.average', 'np.average', (['[eem.values for eem in proto_eems]'], {'axis': '(0)'}), '([eem.values for eem in proto_eems], axis=0)\n', (2016, 2060), True, 'import numpy as np\n'), ((2078, 2169), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'proto_eem', 'index': 'proto_eems[0].index', 'columns': 'proto_eems[0].columns'}), '(data=proto_eem, index=proto_eems[0].index, columns=proto_eems[\n 0].columns)\n', (2090, 2169), True, 'import pandas as pd\n'), ((2350, 2385), 'os.path.join', 'os.path.join', (['hdf_path', 'source_name'], {}), '(hdf_path, source_name)\n', (2362, 2385), False, 'import os\n'), ((2405, 2490), 'numpy.array', 'np.array', (["['source', 'proto_conc', 'source_units', 'intensity_units', 'hdf_path']"], {}), "(['source', 'proto_conc', 'source_units', 'intensity_units',\n 'hdf_path'])\n", (2413, 2490), True, 'import numpy as np\n'), ((2860, 2905), 'numpy.append', 'np.append', (['new_indices', '"""emission_wavelength"""'], {}), "(new_indices, 'emission_wavelength')\n", (2869, 2905), True, 'import numpy as np\n'), ((3908, 3934), 'pandas.DataFrame', 'pd.DataFrame', (['results_rows'], {}), '(results_rows)\n', (3920, 3934), True, 'import pandas as pd\n'), ((5034, 5075), 'os.path.join', 'os.path.join', (['proto_hdf_path', 'source_name'], {}), '(proto_hdf_path, source_name)\n', (5046, 5075), False, 'import os\n'), ((5128, 5172), 'pandas.read_hdf', 'pd.read_hdf', (['dataset.hdf'], {'key': 'proto_hdf_path'}), '(dataset.hdf, key=proto_hdf_path)\n', (5139, 5172), True, 'import pandas as pd\n'), ((6023, 6054), 'numpy.poly1d', 'np.poly1d', (['[slope, y_intercept]'], {}), '([slope, y_intercept])\n', (6032, 6054), True, 'import numpy as np\n'), ((6145, 6203), 'numpy.linspace', 'np.linspace', (['conc_range[0]', 'conc_range[1]'], {'num': 'num_spectra'}), '(conc_range[0], conc_range[1], num=num_spectra)\n', (6156, 6203), True, 'import numpy as np\n'), ((6387, 6422), 'os.path.join', 'os.path.join', (['hdf_path', 'source_name'], {}), '(hdf_path, source_name)\n', (6399, 6422), False, 'import os\n'), ((7683, 7704), 'pandas.concat', 'pd.concat', (['aug_ss_dfs'], {}), '(aug_ss_dfs)\n', (7692, 7704), True, 'import pandas as pd\n'), ((9128, 9149), 'pandas.concat', 'pd.concat', (['aug_ss_dfs'], {}), '(aug_ss_dfs)\n', (9137, 9149), True, 'import pandas as pd\n'), ((11553, 11577), 'pandas.concat', 'pd.concat', (['proto_spectra'], {}), '(proto_spectra)\n', (11562, 11577), True, 'import pandas as pd\n'), ((12024, 12083), 'tqdm.tqdm', 'tqdm', (['cartesian_product'], {'desc': '"""Creating Augmented Mixtures"""'}), "(cartesian_product, desc='Creating Augmented Mixtures')\n", (12028, 12083), False, 'from tqdm import tqdm\n'), ((14073, 14087), 'pandas.concat', 'pd.concat', (['aug'], {}), '(aug)\n', (14082, 14087), True, 'import pandas as pd\n'), ((1455, 1493), 'pandas.read_hdf', 'pd.read_hdf', (['dataset.hdf'], {'key': 'eem_path'}), '(dataset.hdf, key=eem_path)\n', (1466, 1493), True, 'import pandas as pd\n'), ((11347, 11388), 'os.path.join', 'os.path.join', (['proto_hdf_path', 'source_name'], {}), '(proto_hdf_path, source_name)\n', (11359, 11388), False, 'import os\n'), ((11448, 11492), 'pandas.read_hdf', 'pd.read_hdf', (['dataset.hdf'], {'key': 'proto_hdf_path'}), '(dataset.hdf, key=proto_hdf_path)\n', (11459, 11492), True, 'import pandas as pd\n'), ((11633, 11690), 'numpy.geomspace', 'np.geomspace', (['conc_range[0]', 'conc_range[1]'], {'num': 'num_steps'}), '(conc_range[0], conc_range[1], num=num_steps)\n', (11645, 11690), True, 'import numpy as np\n'), ((13925, 13970), 'numpy.append', 'np.append', (['new_indices', '"""emission_wavelength"""'], {}), "(new_indices, 'emission_wavelength')\n", (13934, 13970), True, 'import numpy as np\n'), ((11742, 11798), 'numpy.linspace', 'np.linspace', (['conc_range[0]', 'conc_range[1]'], {'num': 'num_steps'}), '(conc_range[0], conc_range[1], num=num_steps)\n', (11753, 11798), True, 'import numpy as np\n'), ((12793, 12824), 'numpy.poly1d', 'np.poly1d', (['[slope, y_intercept]'], {}), '([slope, y_intercept])\n', (12802, 12824), True, 'import numpy as np\n'), ((13398, 13412), 'pandas.concat', 'pd.concat', (['mix'], {}), '(mix)\n', (13407, 13412), True, 'import pandas as pd\n')] |
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal
from analysis.comparisons import gt, gte, lt, lte
def test_comparisons():
# a | b | gt | gte | lt | lte
# ---+---+----+-----+----+-----
# 1 | 1 | F | T | F | T
# 1 | 2 | F | F | T | T
# 2 | 1 | T | T | F | F
# 1 | - | T | T | F | F
# - | 1 | F | F | T | T
# - | - | F | F | F | F
# This makes things line up nicely
T = True
F = False
df = pd.DataFrame.from_records(
[
(1, 1, F, T, F, T),
(1, 2, F, F, T, T),
(2, 1, T, T, F, F),
(1, 0, T, T, F, F),
(0, 1, F, F, T, T),
(0, 0, F, F, F, F),
],
columns=["a", "b", "gt", "gte", "lt", "lte"],
).replace(0, np.nan)
assert_series_equal(gt(df["a"], df["b"]), df["gt"], check_names=False)
assert_series_equal(gte(df["a"], df["b"]), df["gte"], check_names=False)
assert_series_equal(lt(df["a"], df["b"]), df["lt"], check_names=False)
assert_series_equal(lte(df["a"], df["b"]), df["lte"], check_names=False)
| [
"pandas.DataFrame.from_records",
"analysis.comparisons.lt",
"analysis.comparisons.gt",
"analysis.comparisons.lte",
"analysis.comparisons.gte"
] | [((859, 879), 'analysis.comparisons.gt', 'gt', (["df['a']", "df['b']"], {}), "(df['a'], df['b'])\n", (861, 879), False, 'from analysis.comparisons import gt, gte, lt, lte\n'), ((934, 955), 'analysis.comparisons.gte', 'gte', (["df['a']", "df['b']"], {}), "(df['a'], df['b'])\n", (937, 955), False, 'from analysis.comparisons import gt, gte, lt, lte\n'), ((1011, 1031), 'analysis.comparisons.lt', 'lt', (["df['a']", "df['b']"], {}), "(df['a'], df['b'])\n", (1013, 1031), False, 'from analysis.comparisons import gt, gte, lt, lte\n'), ((1086, 1107), 'analysis.comparisons.lte', 'lte', (["df['a']", "df['b']"], {}), "(df['a'], df['b'])\n", (1089, 1107), False, 'from analysis.comparisons import gt, gte, lt, lte\n'), ((515, 716), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['[(1, 1, F, T, F, T), (1, 2, F, F, T, T), (2, 1, T, T, F, F), (1, 0, T, T, F,\n F), (0, 1, F, F, T, T), (0, 0, F, F, F, F)]'], {'columns': "['a', 'b', 'gt', 'gte', 'lt', 'lte']"}), "([(1, 1, F, T, F, T), (1, 2, F, F, T, T), (2, 1, T,\n T, F, F), (1, 0, T, T, F, F), (0, 1, F, F, T, T), (0, 0, F, F, F, F)],\n columns=['a', 'b', 'gt', 'gte', 'lt', 'lte'])\n", (540, 716), True, 'import pandas as pd\n')] |
import pytest
import numpy as np
import os
import pyarrow as pa
import pyarrow.feather as feather
import pandas as pd
from app.services.preprocessor import PreProcessor
from typing import List
@pytest.fixture
def preprocessor() -> PreProcessor:
return PreProcessor("datasets/csvs/train.csv", "datasets/csvs/building1.csv")
@pytest.fixture
def generic_csv() -> str:
arr = np.random.rand(20, 20)
path = "datasets/csvs/dummy.csv"
np.savetxt(path, arr)
yield path
os.remove(path)
@pytest.fixture
def generic_feathers() -> List[str]:
base_path = "datasets/gen"
files = []
n_files = 30
col_rows = 20
rows = [f"row{x}" for x in range(0, col_rows)]
columns = [f"column{x}" for x in range(0, col_rows)]
for number in range(0, n_files):
arr = np.random.rand(col_rows , col_rows)
df = pd.DataFrame(arr, index = rows, columns = columns)
file_path = f"{base_path}/gen_{number}.feather"
files.append(file_path)
feather.write_feather(df, file_path)
yield (files, n_files, col_rows)
for file in files:
os.remove(file)
| [
"numpy.random.rand",
"app.services.preprocessor.PreProcessor",
"numpy.savetxt",
"pandas.DataFrame",
"pyarrow.feather.write_feather",
"os.remove"
] | [((260, 330), 'app.services.preprocessor.PreProcessor', 'PreProcessor', (['"""datasets/csvs/train.csv"""', '"""datasets/csvs/building1.csv"""'], {}), "('datasets/csvs/train.csv', 'datasets/csvs/building1.csv')\n", (272, 330), False, 'from app.services.preprocessor import PreProcessor\n'), ((384, 406), 'numpy.random.rand', 'np.random.rand', (['(20)', '(20)'], {}), '(20, 20)\n', (398, 406), True, 'import numpy as np\n'), ((448, 469), 'numpy.savetxt', 'np.savetxt', (['path', 'arr'], {}), '(path, arr)\n', (458, 469), True, 'import numpy as np\n'), ((491, 506), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (500, 506), False, 'import os\n'), ((804, 838), 'numpy.random.rand', 'np.random.rand', (['col_rows', 'col_rows'], {}), '(col_rows, col_rows)\n', (818, 838), True, 'import numpy as np\n'), ((853, 899), 'pandas.DataFrame', 'pd.DataFrame', (['arr'], {'index': 'rows', 'columns': 'columns'}), '(arr, index=rows, columns=columns)\n', (865, 899), True, 'import pandas as pd\n'), ((1000, 1036), 'pyarrow.feather.write_feather', 'feather.write_feather', (['df', 'file_path'], {}), '(df, file_path)\n', (1021, 1036), True, 'import pyarrow.feather as feather\n'), ((1108, 1123), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (1117, 1123), False, 'import os\n')] |
import itertools
import unittest
from usfm_utils.elements.document import Document
from usfm_utils.elements.element_impls import FormattedText, Text, Paragraph, Footnote
from usfm_utils.elements.footnote_utils import AutomaticFootnoteLabel, CustomFootnoteLabel
from usfm_utils.html.html_visitor import HtmlVisitor, non_span_formatting
from tests import test_utils
class HtmlRenderingTest(unittest.TestCase):
@staticmethod
def render_elements(*elements):
return HtmlRenderingTest.render(Document(elements))
@staticmethod
def render(document):
test_file = HtmlRenderingTest.TestFile()
visitor = HtmlVisitor(test_file)
visitor.write(document)
return test_file.content()
def test_footnotes(self):
for kind in list(Footnote.Kind):
word = test_utils.word()
footnote = Footnote(kind, [Text(word)], AutomaticFootnoteLabel())
paragraph = Paragraph([footnote])
rendered = self.render_elements(paragraph)
self.assertIn(kind.name, rendered)
self.assertIn(word, rendered)
for kind in list(Footnote.Kind):
word = test_utils.word()
label = test_utils.word(allow_empty=False)
footnote = Footnote(kind, [Text(word)], CustomFootnoteLabel(label))
paragraph = Paragraph([footnote])
rendered = self.render_elements(paragraph)
self.assertIn(kind.name, rendered)
self.assertIn(word, rendered)
self.assertIn(label, rendered)
def test_formatted_text(self):
for kind in list(FormattedText.Kind):
text = " ".join(test_utils.word(allow_empty=False)
for _ in range(10))
formatted_text = FormattedText(kind, [Text(text)])
rendered = self.render_elements(formatted_text)
self.assertIn(text, rendered)
if kind in non_span_formatting:
open_tag, close_tag = non_span_formatting[kind]
self.assertIn(open_tag, rendered)
self.assertIn(close_tag, rendered)
else:
self.assertIn(kind.name, rendered) # kind.name should appear as a class
def test_heading(self):
word = test_utils.word()
heading = test_utils.word()
elements = [Paragraph([Text(word)])]
document = Document(elements, heading=heading)
rendered = self.render(document)
self.assertIn(word, rendered)
self.assertIn(heading, rendered)
def test_paragraph(self):
bools = (False, True)
for embedded, poetic, introductory, continuation \
in itertools.product(bools, bools, bools, bools):
word = test_utils.word()
text = Text(word)
paragraph = Paragraph([text],
embedded=embedded,
poetic=poetic,
introductory=introductory,
continuation=continuation)
rendered = self.render_elements(paragraph)
self.assertIn(word, rendered)
if embedded:
self.assertIn("embedded", rendered) # should appear as a class
else:
self.assertNotIn("embedded", rendered)
if poetic:
self.assertIn("poetic", rendered)
else:
self.assertNotIn("poetic", rendered)
if introductory:
self.assertIn("introductory", rendered)
else:
self.assertNotIn("introductory", rendered)
if continuation:
self.assertIn("continuation", rendered)
else:
self.assertNotIn("continuation", rendered)
class TestFile(object):
"""
A file-like string object used for mocking text files
"""
def __init__(self):
self._content = ""
def content(self):
return self._content
def write(self, p_str):
self._content += p_str
if __name__ == "__main__":
unittest.main()
| [
"tests.test_utils.word",
"usfm_utils.elements.footnote_utils.AutomaticFootnoteLabel",
"itertools.product",
"usfm_utils.elements.document.Document",
"usfm_utils.elements.footnote_utils.CustomFootnoteLabel",
"usfm_utils.elements.element_impls.Text",
"unittest.main",
"usfm_utils.elements.element_impls.Pa... | [((4142, 4157), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4155, 4157), False, 'import unittest\n'), ((639, 661), 'usfm_utils.html.html_visitor.HtmlVisitor', 'HtmlVisitor', (['test_file'], {}), '(test_file)\n', (650, 661), False, 'from usfm_utils.html.html_visitor import HtmlVisitor, non_span_formatting\n'), ((2270, 2287), 'tests.test_utils.word', 'test_utils.word', ([], {}), '()\n', (2285, 2287), False, 'from tests import test_utils\n'), ((2306, 2323), 'tests.test_utils.word', 'test_utils.word', ([], {}), '()\n', (2321, 2323), False, 'from tests import test_utils\n'), ((2388, 2423), 'usfm_utils.elements.document.Document', 'Document', (['elements'], {'heading': 'heading'}), '(elements, heading=heading)\n', (2396, 2423), False, 'from usfm_utils.elements.document import Document\n'), ((2683, 2728), 'itertools.product', 'itertools.product', (['bools', 'bools', 'bools', 'bools'], {}), '(bools, bools, bools, bools)\n', (2700, 2728), False, 'import itertools\n'), ((507, 525), 'usfm_utils.elements.document.Document', 'Document', (['elements'], {}), '(elements)\n', (515, 525), False, 'from usfm_utils.elements.document import Document\n'), ((820, 837), 'tests.test_utils.word', 'test_utils.word', ([], {}), '()\n', (835, 837), False, 'from tests import test_utils\n'), ((940, 961), 'usfm_utils.elements.element_impls.Paragraph', 'Paragraph', (['[footnote]'], {}), '([footnote])\n', (949, 961), False, 'from usfm_utils.elements.element_impls import FormattedText, Text, Paragraph, Footnote\n'), ((1166, 1183), 'tests.test_utils.word', 'test_utils.word', ([], {}), '()\n', (1181, 1183), False, 'from tests import test_utils\n'), ((1204, 1238), 'tests.test_utils.word', 'test_utils.word', ([], {'allow_empty': '(False)'}), '(allow_empty=False)\n', (1219, 1238), False, 'from tests import test_utils\n'), ((1343, 1364), 'usfm_utils.elements.element_impls.Paragraph', 'Paragraph', (['[footnote]'], {}), '([footnote])\n', (1352, 1364), False, 'from usfm_utils.elements.element_impls import FormattedText, Text, Paragraph, Footnote\n'), ((2749, 2766), 'tests.test_utils.word', 'test_utils.word', ([], {}), '()\n', (2764, 2766), False, 'from tests import test_utils\n'), ((2786, 2796), 'usfm_utils.elements.element_impls.Text', 'Text', (['word'], {}), '(word)\n', (2790, 2796), False, 'from usfm_utils.elements.element_impls import FormattedText, Text, Paragraph, Footnote\n'), ((2821, 2931), 'usfm_utils.elements.element_impls.Paragraph', 'Paragraph', (['[text]'], {'embedded': 'embedded', 'poetic': 'poetic', 'introductory': 'introductory', 'continuation': 'continuation'}), '([text], embedded=embedded, poetic=poetic, introductory=\n introductory, continuation=continuation)\n', (2830, 2931), False, 'from usfm_utils.elements.element_impls import FormattedText, Text, Paragraph, Footnote\n'), ((890, 914), 'usfm_utils.elements.footnote_utils.AutomaticFootnoteLabel', 'AutomaticFootnoteLabel', ([], {}), '()\n', (912, 914), False, 'from usfm_utils.elements.footnote_utils import AutomaticFootnoteLabel, CustomFootnoteLabel\n'), ((1291, 1317), 'usfm_utils.elements.footnote_utils.CustomFootnoteLabel', 'CustomFootnoteLabel', (['label'], {}), '(label)\n', (1310, 1317), False, 'from usfm_utils.elements.footnote_utils import AutomaticFootnoteLabel, CustomFootnoteLabel\n'), ((877, 887), 'usfm_utils.elements.element_impls.Text', 'Text', (['word'], {}), '(word)\n', (881, 887), False, 'from usfm_utils.elements.element_impls import FormattedText, Text, Paragraph, Footnote\n'), ((1278, 1288), 'usfm_utils.elements.element_impls.Text', 'Text', (['word'], {}), '(word)\n', (1282, 1288), False, 'from usfm_utils.elements.element_impls import FormattedText, Text, Paragraph, Footnote\n'), ((1662, 1696), 'tests.test_utils.word', 'test_utils.word', ([], {'allow_empty': '(False)'}), '(allow_empty=False)\n', (1677, 1696), False, 'from tests import test_utils\n'), ((1795, 1805), 'usfm_utils.elements.element_impls.Text', 'Text', (['text'], {}), '(text)\n', (1799, 1805), False, 'from usfm_utils.elements.element_impls import FormattedText, Text, Paragraph, Footnote\n'), ((2355, 2365), 'usfm_utils.elements.element_impls.Text', 'Text', (['word'], {}), '(word)\n', (2359, 2365), False, 'from usfm_utils.elements.element_impls import FormattedText, Text, Paragraph, Footnote\n')] |
#!/usr/bin/env python
#
# // SPDX-License-Identifier: BSD-3-CLAUSE
#
# (C) Copyright 2018, Xilinx, Inc.
#
import tensorflow as tf
import numpy as np
from xfdnn_compiler_tensorflow import TFFrontend
#from xfdnn.tools.compile.frontends.frontend_caffe import CaffeFrontend
from tensorflow.python.platform import gfile
import xdnn_opt
class xdnnRT:
def __init__(self, compiler, rtargs):
#print ("compiler args", cargs)
self._inputs = self.list_inputs_of_graph()
pydotGraph, schedule, self._out, _ = compiler.compile()
# print ("compiled pydot graph", pydotGraph)
# print ("compiled schedule", schedule)
opt = None
if rtargs.device == "CPU":
opt = xdnn_opt.CPUTransform( self._inputs, pydotGraph, schedule)
elif rtargs.device == "FPGA":
if rtargs.xclbin:
opt = xdnn_opt.FPGATransform( self._inputs, pydotGraph, schedule, rtargs.xclbin)
else:
raise AttributeError("Must specify path to xclbin when device = FPGA")
else:
raise AttributeError("Unsupported device type", rtargs.device)
#variables hold the inputs/consts of graph
self._variables = opt.variables
self._layers = opt.getLayers()
for l in self._layers:
l.setup()
def list_inputs_of_graph(self):
pass
def preprocess(self,inputs):
pass
def batch_classify(self, img_list, batch, preprocess) :
bctr = 0
ictr = 0
pred = None
prepdata = {}
prep = self._inputs[0]
print(len(img_list))
ctr = 0
pred = []
while ctr < len(img_list) :
ctrmax = min(ctr+batch, len(img_list))
pred.append(self.feed_forward(img_list[ctr:ctrmax], preprocess = preprocess))
ctr = ctrmax
if len(pred) == 0 : return []
elif len(pred) == 1 :
return pred[0]
return np.concatenate(pred)
def feed_forward(self, inputs, out=None, preprocess = None):
inp_dict = {}
if not preprocess:
preprocess = self.preprocess
inp_dict[self._inputs[0]] = preprocess(inputs)
for k, v in inp_dict.items():
self._variables[k] = v
for layer in self._layers:
layer_inputs = []
layer_inputs = [self._variables[inp] for inp in layer.inputs]
self._variables[layer.output] = layer.forward_exec( layer_inputs )
if out is None:
return self._variables[self._out]
return self._variables[out]
class TFxdnnRT(xdnnRT):
def __init__ ( self, cargs):
self._tfGraph = tf.GraphDef()
with gfile.FastGFile(cargs.networkfile, 'rb') as f:
self._tfGraph.ParseFromString(f.read())
compiler = TFFrontend(cargs)
xdnnRT.__init__(self, compiler, cargs)
def list_inputs_of_graph(self) :
res = []
for node in self._tfGraph.node :
if node.op == 'Placeholder' :
res.append(node.name)
return res
def preprocess(self, inputs):
if type(inputs) is not np.ndarray:
inputs = np.transpose(self.read_tensor_from_image_file(inputs), [0,3,1,2]) # assuming that there is only one input
return inputs
def read_tensor_from_image_file(self, file_name,
input_height=299,
input_width=299,
input_mean=0,
input_std=255):
input_name = "file_reader"
file_reader = tf.read_file(file_name, input_name)
if file_name.endswith(".png"):
image_reader = tf.image.decode_png(file_reader, channels=3, name="png_reader")
elif file_name.endswith(".gif"):
image_reader = tf.squeeze(
tf.image.decode_gif(file_reader, name="gif_reader"))
elif file_name.endswith(".bmp"):
image_reader = tf.image.decode_bmp(file_reader, name="bmp_reader")
else:
image_reader = tf.image.decode_jpeg(
file_reader, channels=3, name="jpeg_reader")
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0)
resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
with tf.Session() as sess :
result = sess.run(normalized)
return result
| [
"tensorflow.image.decode_png",
"xfdnn_compiler_tensorflow.TFFrontend",
"tensorflow.image.decode_bmp",
"tensorflow.Session",
"tensorflow.image.resize_bilinear",
"xdnn_opt.CPUTransform",
"xdnn_opt.FPGATransform",
"tensorflow.GraphDef",
"tensorflow.python.platform.gfile.FastGFile",
"tensorflow.image.... | [((1993, 2013), 'numpy.concatenate', 'np.concatenate', (['pred'], {}), '(pred)\n', (2007, 2013), True, 'import numpy as np\n'), ((2743, 2756), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (2754, 2756), True, 'import tensorflow as tf\n'), ((2889, 2906), 'xfdnn_compiler_tensorflow.TFFrontend', 'TFFrontend', (['cargs'], {}), '(cargs)\n', (2899, 2906), False, 'from xfdnn_compiler_tensorflow import TFFrontend\n'), ((3718, 3753), 'tensorflow.read_file', 'tf.read_file', (['file_name', 'input_name'], {}), '(file_name, input_name)\n', (3730, 3753), True, 'import tensorflow as tf\n'), ((4296, 4329), 'tensorflow.cast', 'tf.cast', (['image_reader', 'tf.float32'], {}), '(image_reader, tf.float32)\n', (4303, 4329), True, 'import tensorflow as tf\n'), ((4354, 4385), 'tensorflow.expand_dims', 'tf.expand_dims', (['float_caster', '(0)'], {}), '(float_caster, 0)\n', (4368, 4385), True, 'import tensorflow as tf\n'), ((4404, 4472), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['dims_expander', '[input_height, input_width]'], {}), '(dims_expander, [input_height, input_width])\n', (4428, 4472), True, 'import tensorflow as tf\n'), ((731, 788), 'xdnn_opt.CPUTransform', 'xdnn_opt.CPUTransform', (['self._inputs', 'pydotGraph', 'schedule'], {}), '(self._inputs, pydotGraph, schedule)\n', (752, 788), False, 'import xdnn_opt\n'), ((2770, 2810), 'tensorflow.python.platform.gfile.FastGFile', 'gfile.FastGFile', (['cargs.networkfile', '"""rb"""'], {}), "(cargs.networkfile, 'rb')\n", (2785, 2810), False, 'from tensorflow.python.platform import gfile\n'), ((3820, 3883), 'tensorflow.image.decode_png', 'tf.image.decode_png', (['file_reader'], {'channels': '(3)', 'name': '"""png_reader"""'}), "(file_reader, channels=3, name='png_reader')\n", (3839, 3883), True, 'import tensorflow as tf\n'), ((4504, 4538), 'tensorflow.subtract', 'tf.subtract', (['resized', '[input_mean]'], {}), '(resized, [input_mean])\n', (4515, 4538), True, 'import tensorflow as tf\n'), ((4566, 4578), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4576, 4578), True, 'import tensorflow as tf\n'), ((880, 953), 'xdnn_opt.FPGATransform', 'xdnn_opt.FPGATransform', (['self._inputs', 'pydotGraph', 'schedule', 'rtargs.xclbin'], {}), '(self._inputs, pydotGraph, schedule, rtargs.xclbin)\n', (902, 953), False, 'import xdnn_opt\n'), ((3976, 4027), 'tensorflow.image.decode_gif', 'tf.image.decode_gif', (['file_reader'], {'name': '"""gif_reader"""'}), "(file_reader, name='gif_reader')\n", (3995, 4027), True, 'import tensorflow as tf\n'), ((4097, 4148), 'tensorflow.image.decode_bmp', 'tf.image.decode_bmp', (['file_reader'], {'name': '"""bmp_reader"""'}), "(file_reader, name='bmp_reader')\n", (4116, 4148), True, 'import tensorflow as tf\n'), ((4190, 4255), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['file_reader'], {'channels': '(3)', 'name': '"""jpeg_reader"""'}), "(file_reader, channels=3, name='jpeg_reader')\n", (4210, 4255), True, 'import tensorflow as tf\n')] |
import unittest
from red_and_blue import red_and_blue
class Test(unittest.TestCase):
def test_1(self):
self.assertEqual(red_and_blue([6, -5, 7, -3], [2, 3, -4]), 13)
def test_2(self):
self.assertEqual(red_and_blue([1, 1], [10, -3, 2, 2]), 13)
def test_3(self):
self.assertEqual(red_and_blue([-1, -2, -3, -4, -5], [-1, -2, -3, -4, -5]), 0)
def test_4(self):
self.assertEqual(red_and_blue([0], [0]), 0)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"red_and_blue.red_and_blue"
] | [((487, 502), 'unittest.main', 'unittest.main', ([], {}), '()\n', (500, 502), False, 'import unittest\n'), ((134, 174), 'red_and_blue.red_and_blue', 'red_and_blue', (['[6, -5, 7, -3]', '[2, 3, -4]'], {}), '([6, -5, 7, -3], [2, 3, -4])\n', (146, 174), False, 'from red_and_blue import red_and_blue\n'), ((228, 264), 'red_and_blue.red_and_blue', 'red_and_blue', (['[1, 1]', '[10, -3, 2, 2]'], {}), '([1, 1], [10, -3, 2, 2])\n', (240, 264), False, 'from red_and_blue import red_and_blue\n'), ((318, 374), 'red_and_blue.red_and_blue', 'red_and_blue', (['[-1, -2, -3, -4, -5]', '[-1, -2, -3, -4, -5]'], {}), '([-1, -2, -3, -4, -5], [-1, -2, -3, -4, -5])\n', (330, 374), False, 'from red_and_blue import red_and_blue\n'), ((427, 449), 'red_and_blue.red_and_blue', 'red_and_blue', (['[0]', '[0]'], {}), '([0], [0])\n', (439, 449), False, 'from red_and_blue import red_and_blue\n')] |
import random
from string import ascii_letters, digits
from urllib.parse import urlparse
def list_to_string(list_items, separator='\n'):
"""
Converts list items to string with separator
"""
return separator.join(list_items)
def string_to_list(string, separator='\n'):
"""
Converts string with separator to a list
"""
return [word for word in string.split(separator)]
def generate_random_string(length: int = 8) -> str:
"""
Generates random string of letters and digits with length
"""
symbols = ascii_letters + digits
return ''.join(random.choice(symbols) for letter in range(length))
def build_host_url(url: str, scheme: str = 'https') -> str:
"""
Parses url and adding http scheme if it doesn't exist
"""
parse_result = urlparse(url, scheme)
if parse_result.netloc:
netloc = parse_result.netloc
path = parse_result.path
else:
netloc = parse_result.path
path = ''
host = parse_result._replace(netloc=netloc, path=path)
return host.geturl()
def remove_key_values(dictionary, keys=['self', '__class__']):
"""
Removes key values from dictionary
"""
new_dict = dictionary
for key in keys:
del new_dict[key]
return new_dict
def filter_resource_entities_by_key_value(resource_entities, key, value):
"""
Filters all resource entities by key and values,
returns list of resource entities
"""
found_dicts = [d for d in resource_entities if d[key] == value]
if not found_dicts:
raise KeyError(f'resource entities with {key} "{value}" not found')
return found_dicts
def set_resource_default_fields(args_to_set, query_params, resource_instances):
for key, value in args_to_set.items():
if value is None:
query_params[key] = resource_instances[key]
| [
"random.choice",
"urllib.parse.urlparse"
] | [((799, 820), 'urllib.parse.urlparse', 'urlparse', (['url', 'scheme'], {}), '(url, scheme)\n', (807, 820), False, 'from urllib.parse import urlparse\n'), ((592, 614), 'random.choice', 'random.choice', (['symbols'], {}), '(symbols)\n', (605, 614), False, 'import random\n')] |
#!/usr/bin/env python
# Lint as: python3
"""E2E tests for the timeline flow."""
import csv
import io
from typing import Sequence
from typing import Text
from absl.testing import absltest
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.util import temp
from grr_response_proto.api import timeline_pb2
from grr_response_test.end_to_end_tests import test_base
class TestTimelineLinux(test_base.EndToEndTest):
"""A class with Linux-specific timeline tests."""
platforms = [test_base.EndToEndTest.Platform.LINUX]
def testUsrBin(self):
args = self.grr_api.types.CreateFlowArgs("TimelineFlow")
args.root = "/bin/".encode("utf-8")
flow = self.RunFlowAndWait("TimelineFlow", args=args)
with temp.AutoTempFilePath(suffix=".body") as temp_filepath:
timeline_format = timeline_pb2.ApiGetCollectedTimelineArgs.Format.BODY
body = flow.GetCollectedTimeline(timeline_format)
body.WriteToFile(temp_filepath)
with io.open(temp_filepath, mode="r", encoding="utf-8") as temp_filedesc:
entries = list(csv.reader(temp_filedesc, delimiter="|"))
paths = [entry[1] for entry in entries]
self.assertIn("/bin/bash", paths)
self.assertIn("/bin/cat", paths)
self.assertIn("/bin/chmod", paths)
self.assertIn("/bin/cp", paths)
self.assertIn("/bin/rm", paths)
self.assertIn("/bin/sleep", paths)
for entry in entries:
assertBodyEntrySanity(self, entry)
class TestTimelineWindows(test_base.EndToEndTest):
"""A class with Windows-specific timeline tests."""
platforms = [test_base.EndToEndTest.Platform.WINDOWS]
def testWindows(self):
args = self.grr_api.types.CreateFlowArgs("TimelineFlow")
args.root = "C:\\Windows".encode("utf-8")
flow = self.RunFlowAndWait("TimelineFlow", args=args)
with temp.AutoTempFilePath(suffix=".body") as temp_filepath:
timeline_format = timeline_pb2.ApiGetCollectedTimelineArgs.Format.BODY
body = flow.GetCollectedTimeline(timeline_format)
body.WriteToFile(temp_filepath)
with io.open(temp_filepath, mode="r", encoding="utf-8") as temp_filedesc:
entries = list(csv.reader(temp_filedesc, delimiter="|"))
paths = [entry[1].lower() for entry in entries]
self.assertIn("C:\\Windows\\explorer.exe".lower(), paths)
self.assertIn("C:\\Windows\\notepad.exe".lower(), paths)
self.assertIn("C:\\Windows\\regedit.exe".lower(), paths)
self.assertIn("C:\\Windows\\System32\\dwm.exe".lower(), paths)
for entry in entries:
assertBodyEntrySanity(self, entry)
def testWindowsBackslashEscape(self):
args = self.grr_api.types.CreateFlowArgs("TimelineFlow")
args.root = "C:\\Windows".encode("utf-8")
flow = self.RunFlowAndWait("TimelineFlow", args=args)
with temp.AutoTempFilePath(suffix=".body") as temp_filepath:
body = flow.GetCollectedTimelineBody(backslash_escape=True)
body.WriteToFile(temp_filepath)
with io.open(temp_filepath, mode="r", encoding="utf-8") as temp_filedesc:
content = temp_filedesc.read().lower()
self.assertIn("|C:\\\\Windows\\\\explorer.exe|".lower(), content)
self.assertIn("|C:\\\\Windows\\\\notepad.exe|".lower(), content)
self.assertIn("|C:\\\\Windows\\\\regedit.exe|".lower(), content)
self.assertIn("|C:\\\\Windows\\\\System32\\\\dwm.exe|".lower(), content)
def assertBodyEntrySanity( # pylint: disable=invalid-name
test: absltest.TestCase,
entry: Sequence[Text],
) -> None:
"""Asserts that given row of a body file is sane."""
# Size should be non-negative (some files might be empty, though).
test.assertGreaterEqual(int(entry[6]), 0)
# All timestamps should be positive.
test.assertGreater(int(entry[7]), 0)
test.assertGreater(int(entry[8]), 0)
test.assertGreater(int(entry[9]), 0)
# All timestamps should be older than now.
now = rdfvalue.RDFDatetime.Now()
test.assertLessEqual(int(entry[7]), now.AsSecondsSinceEpoch())
test.assertLessEqual(int(entry[8]), now.AsSecondsSinceEpoch())
test.assertLessEqual(int(entry[9]), now.AsSecondsSinceEpoch())
| [
"csv.reader",
"grr_response_core.lib.util.temp.AutoTempFilePath",
"io.open",
"grr_response_core.lib.rdfvalue.RDFDatetime.Now"
] | [((3856, 3882), 'grr_response_core.lib.rdfvalue.RDFDatetime.Now', 'rdfvalue.RDFDatetime.Now', ([], {}), '()\n', (3880, 3882), False, 'from grr_response_core.lib import rdfvalue\n'), ((734, 771), 'grr_response_core.lib.util.temp.AutoTempFilePath', 'temp.AutoTempFilePath', ([], {'suffix': '""".body"""'}), "(suffix='.body')\n", (755, 771), False, 'from grr_response_core.lib.util import temp\n'), ((1812, 1849), 'grr_response_core.lib.util.temp.AutoTempFilePath', 'temp.AutoTempFilePath', ([], {'suffix': '""".body"""'}), "(suffix='.body')\n", (1833, 1849), False, 'from grr_response_core.lib.util import temp\n'), ((2775, 2812), 'grr_response_core.lib.util.temp.AutoTempFilePath', 'temp.AutoTempFilePath', ([], {'suffix': '""".body"""'}), "(suffix='.body')\n", (2796, 2812), False, 'from grr_response_core.lib.util import temp\n'), ((974, 1024), 'io.open', 'io.open', (['temp_filepath'], {'mode': '"""r"""', 'encoding': '"""utf-8"""'}), "(temp_filepath, mode='r', encoding='utf-8')\n", (981, 1024), False, 'import io\n'), ((2052, 2102), 'io.open', 'io.open', (['temp_filepath'], {'mode': '"""r"""', 'encoding': '"""utf-8"""'}), "(temp_filepath, mode='r', encoding='utf-8')\n", (2059, 2102), False, 'import io\n'), ((2947, 2997), 'io.open', 'io.open', (['temp_filepath'], {'mode': '"""r"""', 'encoding': '"""utf-8"""'}), "(temp_filepath, mode='r', encoding='utf-8')\n", (2954, 2997), False, 'import io\n'), ((1066, 1106), 'csv.reader', 'csv.reader', (['temp_filedesc'], {'delimiter': '"""|"""'}), "(temp_filedesc, delimiter='|')\n", (1076, 1106), False, 'import csv\n'), ((2144, 2184), 'csv.reader', 'csv.reader', (['temp_filedesc'], {'delimiter': '"""|"""'}), "(temp_filedesc, delimiter='|')\n", (2154, 2184), False, 'import csv\n')] |
import sys
sys.path.append("flask-comics-api")
from app import app
def test_all_comics():
print(app)
assert False | [
"sys.path.append"
] | [((11, 46), 'sys.path.append', 'sys.path.append', (['"""flask-comics-api"""'], {}), "('flask-comics-api')\n", (26, 46), False, 'import sys\n')] |
from copy import deepcopy
import numpy as np
def complete_mol(self, labels):
"""
Take a cell and complete certain molecules
The objective is to end up with a unit cell where the molecules of interest
are complete. The rest of the atoms of the cell must remain intact. Note that
the input atoms are transformed and are the same as are present in the
output.
Parameters
----------
labels : int or list of ints
The number of the atoms from which the molecules are generated
Returns
-------
new_mol : Mol object
The now complete molecule
new_cell : Mol object
The cell with the completed molecule
"""
new_mol, scattered_mol = self.per_select(labels, old_pos=True)
new_cell_atoms = deepcopy(
[a for a in self.atoms if a not in scattered_mol])
new_cell = self.copy()
new_cell.atoms = new_cell_atoms
for atom in new_mol:
new_cell.append(atom.copy())
return new_mol, new_cell
def complete_cell(self):
"""
Return a cell where atoms have been translated to complete all molecules of
the cell
Returns
-------
out_cell : Mol object
The new untruncated cell
full_mol_l : list of Mol objects
Each molecule in the untruncated cell
"""
full_mol_l = []
remaining = self.copy()
while len(remaining) != 0:
full_mol, cell = remaining.complete_mol(0)
full_mol_l.append(full_mol)
remaining = cell
for atom in full_mol:
if atom in remaining:
remaining.remove(atom)
# Convinently, remaining is now an empty Mol
out_cell = remaining
for mol in full_mol_l:
out_cell.extend(mol)
return out_cell, full_mol_l
def supercell(self, trans):
"""
Return a supercell of I x J x K
Parameters
----------
trans : array-like of length 3
Multiplications of the primitive cell
Returns
-------
supercell : Mol object
New supercell with adjusted lattice vectors
"""
import fromage.utils.mol as mol_init
# make the input into a np array
trans = np.array(trans)
new_cell = self.empty_mol()
for a_mult in range(trans[0]):
for b_mult in range(trans[1]):
for c_mult in range(trans[2]):
vector = a_mult * \
self.vectors[0] + b_mult * \
self.vectors[1] + c_mult * self.vectors[2]
new_atoms = mol_init.Mol([i.v_translated(vector)
for i in self.atoms])
new_cell += new_atoms
out_vec = (self.vectors.T * trans.transpose()).T
new_cell.vectors = out_vec
return new_cell
def centered_supercell(self, trans, from_origin=False):
"""
Make a bigger supercell out of an input cell.
The cell is multiplied positively and negatively through each lattice
vector so that the supercluster ends up being
(1+2*trans[0])*(1+2*trans[1])*(1+2*trans[2]) times larger. For example if the
input is 1,1,1 for a cubic unit cell, the output will be the original unit
cell surrounded by 26 other unit cells forming a total 3x3x3 cube.
Alternatively, the multiplication can be centered around the origin, a corner of the
unit cell, instead of the centre. In that case the supercluster ends up being
only (2*trans[0])*(2*trans[1])*(2*trans[2])
Parameters
----------
trans : numpy array of length 3
Multiplications of the primitive cell
from_origin : bool
Determines the kind of multiplication. True is corner of the cell as
the center, False is middle of the cell.
Returns
-------
mega_cell : Mol object
The resulting supercell
"""
import fromage.utils.mol as mol_init
trans_series = [0, 0, 0]
for i, tra in enumerate(trans):
if from_origin:
trans_series[i] = list(range(-tra, tra))
else:
trans_series[i] = list(range(-tra, tra + 1))
trans_series = np.array(trans_series)
new_cell = self.empty_mol()
for a_mult in trans_series[0]:
for b_mult in trans_series[1]:
for c_mult in trans_series[2]:
vector = a_mult * \
self.vectors[0] + b_mult * \
self.vectors[1] + c_mult * self.vectors[2]
new_atoms = mol_init.Mol([i.v_translated(vector)
for i in self.atoms])
new_cell += new_atoms
out_vec = (self.vectors.T * trans.transpose()).T
new_cell.vectors = out_vec
return new_cell
def trans_from_rad(self, clust_rad):
"""
Generate the translations necessary to encapsulate a sphere of given rad
Parameters
----------
clust_rad : float
Radius defining a sphere
Returns
-------
trans_count : 3 x 1 numpy array
The translations required for the unit cell to contain the sphere
"""
# determine how many unit cells we need
vectors = deepcopy(self.vectors)
# vectors normal to faces
a_perp = np.cross(vectors[1], vectors[2])
b_perp = np.cross(vectors[2], vectors[0])
c_perp = np.cross(vectors[0], vectors[1])
# the three normalised unit vectors
perp = np.array([a_perp / np.linalg.norm(a_perp), b_perp /
np.linalg.norm(b_perp), c_perp / np.linalg.norm(c_perp)])
trans_count = np.array([1, 1, 1])
# distances from faces
distances = np.array([0.0, 0.0, 0.0])
new_vectors = deepcopy(vectors)
for comp in range(3):
while True:
trans_count[comp] += 1
distances[comp] = np.dot(new_vectors[comp], perp[comp])
new_vectors[comp] = trans_count[comp] * vectors[comp]
if distances[comp] > clust_rad:
break
trans_count -= np.array([1, 1, 1])
return trans_count
def make_cluster(self, clust_rad, mode='exc', central_mol=None):
"""
Generate a cluster of molecules from a primitive cell
This first makes a supercell of the correct size which will contain with
one additional buffer shell. Then the sphere is generated from this new
supercell by connectivity.
A central molecule can also be supplied which will turn the spheres
defining the clusters into the union of spheres stemming from each atom
of the central molecule.
Parameters
----------
clust_rad : float
Radius defining a sphere. All molecules with atoms in the sphere are
to be grabbed
mode : str
Switches between inclusive and exclusive selecting. Inclusive,
'inc', selects all molecules which have atoms within the radius.
Exclusive, 'exc', selects all molecules fully in the radius.
Default: false
central_mol : Mol
If this is supplied, the central molecule will act as a kernel for
the cluster which will end up being of the appropriate shape.
Returns
-------
cluster : Mol object
Spherical cluster of molecules from their crystal positions
"""
import fromage.utils.mol as mol_init
# if there is a central mol, account for nearest neighbour molecules
# bleeding out of the original radius
if central_mol:
central_rad = 0
for atom in central_mol:
dis = atom.v_dist([0, 0, 0])
if dis < central_rad:
central_rad = dis
trans = self.trans_from_rad(clust_rad + central_rad)
# get the translations necessary to enclose the required mols
else:
trans = self.trans_from_rad(clust_rad)
# if the cluster is inclusive, then extra mols might be required from
# an additional layer of the supercell
if mode == 'inc':
trans += np.array([1, 1, 1]) # one buffer cell layer
supercell = self.centered_supercell(trans, from_origin=True)
seed_atoms = mol_init.Mol([])
# get seedatoms in the shape of the central mol if pertinent
if central_mol:
for atom_i in supercell:
for atom_j in central_mol:
if atom_i.dist(atom_j) < clust_rad:
seed_atoms.append(atom_i)
break
# get spherical seedatoms
else:
for atom in supercell:
if atom.v_dist([0, 0, 0]) < clust_rad:
seed_atoms.append(atom)
max_mol_len = 0
if mode == 'exc':
while len(seed_atoms) > 0:
mol = seed_atoms.select(0)
if len(mol) > max_mol_len:
max_mol_len = len(mol)
clust_atoms = mol_init.Mol([])
if len(mol) == max_mol_len:
clust_atoms += mol
for atom in mol:
seed_atoms.remove(atom)
if mode == 'inc':
clust_atoms = mol_init.Mol([])
max_mol_len = len(supercell.select(supercell.index(seed_atoms[0])))
while len(seed_atoms) > 0:
# The part of the mol detected in seed_atoms
mol_tmp = seed_atoms.select(0)
if len(mol_tmp) < max_mol_len:
# The whole mol, which could potentially include even more
# seed_atoms
mol = supercell.select(supercell.index(seed_atoms[0]))
else:
mol = mol_tmp
clust_atoms += mol
for atom in mol_tmp:
seed_atoms.remove(atom)
for atom in mol:
supercell.remove(atom)
# remove all atoms of the mol which are part of seed_atoms
try:
seed_atoms.remove(atom)
except ValueError:
pass
return clust_atoms
def centered_mols(self, labels, return_trans=False):
"""
Return the molecules translated at the origin with a corresponding cell
Parameters
----------
labels : int or list of ints
The labels of the atoms to select
print_centro : bool
Print the translation vector which was detected as -centroid
Returns
-------
mol : Mol object
The selected molecules with their centroid at the origin
mod_cell : Mol object
The new confined cell corresponding to the now translated molecules
"""
mol, mod_cell = self.complete_mol(labels)
centro = mol.centroid()
mol.translate(-centro)
mod_cell.translate(-centro)
mod_cell = mod_cell.confined()
if return_trans:
return mol, mod_cell, -centro
else:
return mol, mod_cell
def confined(self):
"""Move all atoms to fit inside the primitive cell"""
frac_mol = self.dir_to_frac_pos()
out_mol = frac_mol.frac_to_dir_pos()
return out_mol
| [
"copy.deepcopy",
"numpy.cross",
"numpy.linalg.norm",
"numpy.array",
"numpy.dot",
"fromage.utils.mol.Mol"
] | [((769, 828), 'copy.deepcopy', 'deepcopy', (['[a for a in self.atoms if a not in scattered_mol]'], {}), '([a for a in self.atoms if a not in scattered_mol])\n', (777, 828), False, 'from copy import deepcopy\n'), ((2143, 2158), 'numpy.array', 'np.array', (['trans'], {}), '(trans)\n', (2151, 2158), True, 'import numpy as np\n'), ((4050, 4072), 'numpy.array', 'np.array', (['trans_series'], {}), '(trans_series)\n', (4058, 4072), True, 'import numpy as np\n'), ((5055, 5077), 'copy.deepcopy', 'deepcopy', (['self.vectors'], {}), '(self.vectors)\n', (5063, 5077), False, 'from copy import deepcopy\n'), ((5122, 5154), 'numpy.cross', 'np.cross', (['vectors[1]', 'vectors[2]'], {}), '(vectors[1], vectors[2])\n', (5130, 5154), True, 'import numpy as np\n'), ((5168, 5200), 'numpy.cross', 'np.cross', (['vectors[2]', 'vectors[0]'], {}), '(vectors[2], vectors[0])\n', (5176, 5200), True, 'import numpy as np\n'), ((5214, 5246), 'numpy.cross', 'np.cross', (['vectors[0]', 'vectors[1]'], {}), '(vectors[0], vectors[1])\n', (5222, 5246), True, 'import numpy as np\n'), ((5449, 5468), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (5457, 5468), True, 'import numpy as np\n'), ((5513, 5538), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (5521, 5538), True, 'import numpy as np\n'), ((5558, 5575), 'copy.deepcopy', 'deepcopy', (['vectors'], {}), '(vectors)\n', (5566, 5575), False, 'from copy import deepcopy\n'), ((5877, 5896), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (5885, 5896), True, 'import numpy as np\n'), ((7923, 7939), 'fromage.utils.mol.Mol', 'mol_init.Mol', (['[]'], {}), '([])\n', (7935, 7939), True, 'import fromage.utils.mol as mol_init\n'), ((7795, 7814), 'numpy.array', 'np.array', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (7803, 7814), True, 'import numpy as np\n'), ((8814, 8830), 'fromage.utils.mol.Mol', 'mol_init.Mol', (['[]'], {}), '([])\n', (8826, 8830), True, 'import fromage.utils.mol as mol_init\n'), ((5688, 5725), 'numpy.dot', 'np.dot', (['new_vectors[comp]', 'perp[comp]'], {}), '(new_vectors[comp], perp[comp])\n', (5694, 5725), True, 'import numpy as np\n'), ((5318, 5340), 'numpy.linalg.norm', 'np.linalg.norm', (['a_perp'], {}), '(a_perp)\n', (5332, 5340), True, 'import numpy as np\n'), ((5372, 5394), 'numpy.linalg.norm', 'np.linalg.norm', (['b_perp'], {}), '(b_perp)\n', (5386, 5394), True, 'import numpy as np\n'), ((5405, 5427), 'numpy.linalg.norm', 'np.linalg.norm', (['c_perp'], {}), '(c_perp)\n', (5419, 5427), True, 'import numpy as np\n'), ((8609, 8625), 'fromage.utils.mol.Mol', 'mol_init.Mol', (['[]'], {}), '([])\n', (8621, 8625), True, 'import fromage.utils.mol as mol_init\n')] |
"""Tests for aiopvpc."""
import logging
from asyncio import TimeoutError
from datetime import datetime, timedelta
from unittest.mock import patch
import pytest
from aiohttp import ClientError
from aiopvpc import ESIOS_TARIFFS, PVPCData, REFERENCE_TZ
from .conftest import MockAsyncSession, TZ_TEST
@pytest.mark.parametrize(
"day_str, timezone, num_prices, num_calls, num_prices_8h, available_8h, last_hour",
(
("2019-10-26 00:00:00+08:00", TZ_TEST, 0, 1, 0, False, None),
("2019-10-26 00:00:00", TZ_TEST, 24, 1, 24, True, 23),
("2019-10-27 00:00:00", TZ_TEST, 25, 1, 25, True, 23),
("2019-03-31 20:00:00", TZ_TEST, 23, 2, 23, False, 23),
("2019-03-31 20:00:00+04:00", TZ_TEST, 23, 1, 23, False, 23),
("2019-10-26 21:00:00", TZ_TEST, 49, 2, 26, True, 23),
("2019-10-26 21:00:00+01:00", TZ_TEST, 49, 2, 26, True, 23),
("2019-10-26 00:00:00", REFERENCE_TZ, 24, 1, 24, True, 23),
("2019-10-27 00:00:00", REFERENCE_TZ, 25, 1, 25, True, 23),
("2019-03-31 20:00:00", REFERENCE_TZ, 23, 2, 23, False, 23),
("2019-10-26 21:00:00", REFERENCE_TZ, 49, 2, 25, True, 23),
("2021-06-01 06:00:00", REFERENCE_TZ, 24, 1, 24, True, 23),
),
)
@pytest.mark.asyncio
async def test_price_extract(
day_str, timezone, num_prices, num_calls, num_prices_8h, available_8h, last_hour
):
"""Test data parsing of official API files."""
day = datetime.fromisoformat(day_str)
mock_session = MockAsyncSession()
pvpc_data = PVPCData(
local_timezone=timezone,
tariff="discrimination",
websession=mock_session,
)
pvpc_data.source_available = True
assert not pvpc_data.process_state_and_attributes(day)
assert mock_session.call_count == 0
await pvpc_data.async_update_prices(day)
has_prices = pvpc_data.process_state_and_attributes(day)
assert len(pvpc_data._current_prices) == num_prices
assert mock_session.call_count == num_calls
has_prices = pvpc_data.process_state_and_attributes(day + timedelta(hours=10))
assert len(pvpc_data._current_prices) == num_prices_8h
assert has_prices == available_8h
if has_prices:
last_dt, last_p = list(pvpc_data._current_prices.items())[-1]
assert last_dt.astimezone(timezone).hour == last_hour
@pytest.mark.parametrize(
"available, day_str, num_log_msgs, status, exception",
(
(False, "2032-10-26 00:00:00+00:00", 0, 200, None),
(False, "2032-10-26 00:00:00+00:00", 0, 500, None),
(True, "2032-10-26 00:00:00+00:00", 1, 200, TimeoutError),
(False, "2032-10-26 00:00:00+00:00", 0, 200, TimeoutError),
(True, "2032-10-26 00:00:00+00:00", 1, 200, ClientError),
(False, "2032-10-26 00:00:00+00:00", 0, 200, ClientError),
),
)
@pytest.mark.asyncio
async def test_bad_downloads(
available,
day_str,
num_log_msgs,
status,
exception,
caplog,
):
"""Test data parsing of official API files."""
day = datetime.fromisoformat(day_str)
mock_session = MockAsyncSession(status=status, exc=exception)
with caplog.at_level(logging.INFO):
pvpc_data = PVPCData(
local_timezone=REFERENCE_TZ,
tariff="normal",
websession=mock_session,
)
pvpc_data.source_available = available
assert not pvpc_data.process_state_and_attributes(day)
prices = await pvpc_data.async_update_prices(day)
assert not prices
assert not pvpc_data.process_state_and_attributes(day)
assert len(caplog.messages) == num_log_msgs
assert mock_session.call_count == 1
assert len(prices) == 0
@pytest.mark.parametrize(
"timezone, start, end",
(
(
TZ_TEST,
datetime(2019, 10, 26, 15, tzinfo=TZ_TEST),
datetime(2019, 10, 27, 13, tzinfo=TZ_TEST),
),
(
REFERENCE_TZ,
datetime(2019, 10, 26, 15, tzinfo=REFERENCE_TZ),
datetime(2019, 10, 27, 13, tzinfo=REFERENCE_TZ),
),
),
)
def test_full_data_download_range(timezone, start, end):
"""Test retrieval of full PVPC data in a day range."""
with patch("aiohttp.ClientSession", MockAsyncSession):
pvpc_data = PVPCData(local_timezone=timezone)
prices = pvpc_data.download_prices_for_range(start, end)
assert len(prices) == 24
first_price = min(prices)
last_price = max(prices)
data_first_hour = prices[first_price]
# Check full PVPC data is retrieved
assert len(data_first_hour) == 30
assert all(tag in data_first_hour for tag in ESIOS_TARIFFS)
# Check units have not changed in full data retrieval (they are in €/MWh)
assert all(data_first_hour[tag] > 1 for tag in ESIOS_TARIFFS)
# check tz-alignment (price at 15h is tz-independent)
assert prices[first_price]["NOC"] == 119.16
assert first_price.astimezone(timezone).hour == 15
assert last_price.astimezone(timezone).hour == 13
@pytest.mark.asyncio
async def test_download_range(caplog):
"""Test retrieval of full PVPC data in a day range."""
start = datetime(2019, 10, 26, 15)
end = datetime(2019, 10, 28, 13)
mock_session = MockAsyncSession()
with caplog.at_level(logging.WARNING):
pvpc_data = PVPCData(
tariff="electric_car", local_timezone=TZ_TEST, websession=mock_session
)
prices = await pvpc_data.async_download_prices_for_range(start, end)
assert mock_session.call_count == 3
assert len(prices) == 34
assert len(caplog.messages) == 2
no_prices = await pvpc_data.async_download_prices_for_range(
datetime(2010, 8, 27, tzinfo=TZ_TEST),
datetime(2010, 8, 27, 22, tzinfo=TZ_TEST),
)
assert len(no_prices) == 0
assert len(caplog.messages) == 4
assert not await pvpc_data.async_download_prices_for_range(
datetime(2010, 8, 27), datetime(2010, 8, 27, 23)
)
assert len(caplog.messages) == 7
first_price = min(prices)
assert first_price.hour == 14 and first_price.tzname() == "UTC"
# Check only tariff values are retrieved
assert isinstance(prices[first_price], float)
assert prices[first_price] < 1
| [
"datetime.datetime",
"pytest.mark.parametrize",
"datetime.datetime.fromisoformat",
"unittest.mock.patch",
"datetime.timedelta",
"aiopvpc.PVPCData"
] | [((303, 1184), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""day_str, timezone, num_prices, num_calls, num_prices_8h, available_8h, last_hour"""', "(('2019-10-26 00:00:00+08:00', TZ_TEST, 0, 1, 0, False, None), (\n '2019-10-26 00:00:00', TZ_TEST, 24, 1, 24, True, 23), (\n '2019-10-27 00:00:00', TZ_TEST, 25, 1, 25, True, 23), (\n '2019-03-31 20:00:00', TZ_TEST, 23, 2, 23, False, 23), (\n '2019-03-31 20:00:00+04:00', TZ_TEST, 23, 1, 23, False, 23), (\n '2019-10-26 21:00:00', TZ_TEST, 49, 2, 26, True, 23), (\n '2019-10-26 21:00:00+01:00', TZ_TEST, 49, 2, 26, True, 23), (\n '2019-10-26 00:00:00', REFERENCE_TZ, 24, 1, 24, True, 23), (\n '2019-10-27 00:00:00', REFERENCE_TZ, 25, 1, 25, True, 23), (\n '2019-03-31 20:00:00', REFERENCE_TZ, 23, 2, 23, False, 23), (\n '2019-10-26 21:00:00', REFERENCE_TZ, 49, 2, 25, True, 23), (\n '2021-06-01 06:00:00', REFERENCE_TZ, 24, 1, 24, True, 23))"], {}), "(\n 'day_str, timezone, num_prices, num_calls, num_prices_8h, available_8h, last_hour'\n , (('2019-10-26 00:00:00+08:00', TZ_TEST, 0, 1, 0, False, None), (\n '2019-10-26 00:00:00', TZ_TEST, 24, 1, 24, True, 23), (\n '2019-10-27 00:00:00', TZ_TEST, 25, 1, 25, True, 23), (\n '2019-03-31 20:00:00', TZ_TEST, 23, 2, 23, False, 23), (\n '2019-03-31 20:00:00+04:00', TZ_TEST, 23, 1, 23, False, 23), (\n '2019-10-26 21:00:00', TZ_TEST, 49, 2, 26, True, 23), (\n '2019-10-26 21:00:00+01:00', TZ_TEST, 49, 2, 26, True, 23), (\n '2019-10-26 00:00:00', REFERENCE_TZ, 24, 1, 24, True, 23), (\n '2019-10-27 00:00:00', REFERENCE_TZ, 25, 1, 25, True, 23), (\n '2019-03-31 20:00:00', REFERENCE_TZ, 23, 2, 23, False, 23), (\n '2019-10-26 21:00:00', REFERENCE_TZ, 49, 2, 25, True, 23), (\n '2021-06-01 06:00:00', REFERENCE_TZ, 24, 1, 24, True, 23)))\n", (326, 1184), False, 'import pytest\n'), ((2320, 2764), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""available, day_str, num_log_msgs, status, exception"""', "((False, '2032-10-26 00:00:00+00:00', 0, 200, None), (False,\n '2032-10-26 00:00:00+00:00', 0, 500, None), (True,\n '2032-10-26 00:00:00+00:00', 1, 200, TimeoutError), (False,\n '2032-10-26 00:00:00+00:00', 0, 200, TimeoutError), (True,\n '2032-10-26 00:00:00+00:00', 1, 200, ClientError), (False,\n '2032-10-26 00:00:00+00:00', 0, 200, ClientError))"], {}), "('available, day_str, num_log_msgs, status, exception',\n ((False, '2032-10-26 00:00:00+00:00', 0, 200, None), (False,\n '2032-10-26 00:00:00+00:00', 0, 500, None), (True,\n '2032-10-26 00:00:00+00:00', 1, 200, TimeoutError), (False,\n '2032-10-26 00:00:00+00:00', 0, 200, TimeoutError), (True,\n '2032-10-26 00:00:00+00:00', 1, 200, ClientError), (False,\n '2032-10-26 00:00:00+00:00', 0, 200, ClientError)))\n", (2343, 2764), False, 'import pytest\n'), ((1434, 1465), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['day_str'], {}), '(day_str)\n', (1456, 1465), False, 'from datetime import datetime, timedelta\n'), ((1521, 1609), 'aiopvpc.PVPCData', 'PVPCData', ([], {'local_timezone': 'timezone', 'tariff': '"""discrimination"""', 'websession': 'mock_session'}), "(local_timezone=timezone, tariff='discrimination', websession=\n mock_session)\n", (1529, 1609), False, 'from aiopvpc import ESIOS_TARIFFS, PVPCData, REFERENCE_TZ\n'), ((3007, 3038), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['day_str'], {}), '(day_str)\n', (3029, 3038), False, 'from datetime import datetime, timedelta\n'), ((5125, 5151), 'datetime.datetime', 'datetime', (['(2019)', '(10)', '(26)', '(15)'], {}), '(2019, 10, 26, 15)\n', (5133, 5151), False, 'from datetime import datetime, timedelta\n'), ((5162, 5188), 'datetime.datetime', 'datetime', (['(2019)', '(10)', '(28)', '(13)'], {}), '(2019, 10, 28, 13)\n', (5170, 5188), False, 'from datetime import datetime, timedelta\n'), ((3165, 3244), 'aiopvpc.PVPCData', 'PVPCData', ([], {'local_timezone': 'REFERENCE_TZ', 'tariff': '"""normal"""', 'websession': 'mock_session'}), "(local_timezone=REFERENCE_TZ, tariff='normal', websession=mock_session)\n", (3173, 3244), False, 'from aiopvpc import ESIOS_TARIFFS, PVPCData, REFERENCE_TZ\n'), ((4188, 4236), 'unittest.mock.patch', 'patch', (['"""aiohttp.ClientSession"""', 'MockAsyncSession'], {}), "('aiohttp.ClientSession', MockAsyncSession)\n", (4193, 4236), False, 'from unittest.mock import patch\n'), ((4258, 4291), 'aiopvpc.PVPCData', 'PVPCData', ([], {'local_timezone': 'timezone'}), '(local_timezone=timezone)\n', (4266, 4291), False, 'from aiopvpc import ESIOS_TARIFFS, PVPCData, REFERENCE_TZ\n'), ((5291, 5376), 'aiopvpc.PVPCData', 'PVPCData', ([], {'tariff': '"""electric_car"""', 'local_timezone': 'TZ_TEST', 'websession': 'mock_session'}), "(tariff='electric_car', local_timezone=TZ_TEST, websession=mock_session\n )\n", (5299, 5376), False, 'from aiopvpc import ESIOS_TARIFFS, PVPCData, REFERENCE_TZ\n'), ((2048, 2067), 'datetime.timedelta', 'timedelta', ([], {'hours': '(10)'}), '(hours=10)\n', (2057, 2067), False, 'from datetime import datetime, timedelta\n'), ((3774, 3816), 'datetime.datetime', 'datetime', (['(2019)', '(10)', '(26)', '(15)'], {'tzinfo': 'TZ_TEST'}), '(2019, 10, 26, 15, tzinfo=TZ_TEST)\n', (3782, 3816), False, 'from datetime import datetime, timedelta\n'), ((3830, 3872), 'datetime.datetime', 'datetime', (['(2019)', '(10)', '(27)', '(13)'], {'tzinfo': 'TZ_TEST'}), '(2019, 10, 27, 13, tzinfo=TZ_TEST)\n', (3838, 3872), False, 'from datetime import datetime, timedelta\n'), ((3933, 3980), 'datetime.datetime', 'datetime', (['(2019)', '(10)', '(26)', '(15)'], {'tzinfo': 'REFERENCE_TZ'}), '(2019, 10, 26, 15, tzinfo=REFERENCE_TZ)\n', (3941, 3980), False, 'from datetime import datetime, timedelta\n'), ((3994, 4041), 'datetime.datetime', 'datetime', (['(2019)', '(10)', '(27)', '(13)'], {'tzinfo': 'REFERENCE_TZ'}), '(2019, 10, 27, 13, tzinfo=REFERENCE_TZ)\n', (4002, 4041), False, 'from datetime import datetime, timedelta\n'), ((5671, 5708), 'datetime.datetime', 'datetime', (['(2010)', '(8)', '(27)'], {'tzinfo': 'TZ_TEST'}), '(2010, 8, 27, tzinfo=TZ_TEST)\n', (5679, 5708), False, 'from datetime import datetime, timedelta\n'), ((5722, 5763), 'datetime.datetime', 'datetime', (['(2010)', '(8)', '(27)', '(22)'], {'tzinfo': 'TZ_TEST'}), '(2010, 8, 27, 22, tzinfo=TZ_TEST)\n', (5730, 5763), False, 'from datetime import datetime, timedelta\n'), ((5931, 5952), 'datetime.datetime', 'datetime', (['(2010)', '(8)', '(27)'], {}), '(2010, 8, 27)\n', (5939, 5952), False, 'from datetime import datetime, timedelta\n'), ((5954, 5979), 'datetime.datetime', 'datetime', (['(2010)', '(8)', '(27)', '(23)'], {}), '(2010, 8, 27, 23)\n', (5962, 5979), False, 'from datetime import datetime, timedelta\n')] |
from sklearn import metrics
import torch
from models import *
import torch.backends.cudnn as cudnn
import seaborn as sns
import matplotlib.pyplot as plt
from dataset import load
#define the net
device = 'cuda' if torch.cuda.is_available() else 'cpu'
net = LSTM(3, 10, 2, 3)
net = net.to(device)
if device == 'cuda':
net = torch.nn.DataParallel(net)
cudnn.benchmark = True
net.load_state_dict(torch.load('./checkpoint/ckpt.pth'))
net = net.module
#loading data
_, _, valloader, classes = load()
def validation():
print(net.classifier)
#print(net)
net.eval()
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(valloader):
inputs, targets = inputs.to(device).float(), targets.to(device)
inputs = inputs.view(-1,300,3)
outputs = net(inputs)
# Confusion Matrix
print("Confusion Matrix...")
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
Accuracy = 100.*correct/total
predicted = predicted.cpu().numpy()
targets = targets.data.cpu().numpy()
cm = metrics.confusion_matrix(targets, predicted)
print(cm)
print('Accuracy=',Accuracy,"%")
figure = plt.figure(figsize=(8, 8))
sns.heatmap(cm, annot=True, cmap='Blues')
plt.ylim(0, 10)
plt.xlabel('Predicted labels')
plt.ylabel('True labels')
plt.show()
if __name__=='__main__':
validation()
| [
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"torch.load",
"matplotlib.pyplot.xlabel",
"torch.nn.DataParallel",
"seaborn.heatmap",
"matplotlib.pyplot.figure",
"torch.cuda.is_available",
"dataset.load",
"matplotlib.pyplot.ylim",
"sklearn.metrics.confusion_matrix"
] | [((500, 506), 'dataset.load', 'load', ([], {}), '()\n', (504, 506), False, 'from dataset import load\n'), ((214, 239), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (237, 239), False, 'import torch\n'), ((329, 355), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['net'], {}), '(net)\n', (350, 355), False, 'import torch\n'), ((403, 438), 'torch.load', 'torch.load', (['"""./checkpoint/ckpt.pth"""'], {}), "('./checkpoint/ckpt.pth')\n", (413, 438), False, 'import torch\n'), ((1111, 1155), 'sklearn.metrics.confusion_matrix', 'metrics.confusion_matrix', (['targets', 'predicted'], {}), '(targets, predicted)\n', (1135, 1155), False, 'from sklearn import metrics\n'), ((1219, 1245), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (1229, 1245), True, 'import matplotlib.pyplot as plt\n'), ((1250, 1291), 'seaborn.heatmap', 'sns.heatmap', (['cm'], {'annot': '(True)', 'cmap': '"""Blues"""'}), "(cm, annot=True, cmap='Blues')\n", (1261, 1291), True, 'import seaborn as sns\n'), ((1297, 1312), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(10)'], {}), '(0, 10)\n', (1305, 1312), True, 'import matplotlib.pyplot as plt\n'), ((1317, 1347), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted labels"""'], {}), "('Predicted labels')\n", (1327, 1347), True, 'import matplotlib.pyplot as plt\n'), ((1352, 1377), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True labels"""'], {}), "('True labels')\n", (1362, 1377), True, 'import matplotlib.pyplot as plt\n'), ((1382, 1392), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1390, 1392), True, 'import matplotlib.pyplot as plt\n')] |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
项目同步
1. 从CC拉取业务列表及人员信息
2. 更新项目信息
3. 更新用户组信息
"""
from celery.schedules import crontab # noqa
from celery.task import periodic_task # noqa
from django.conf import settings # noqa
from apps.utils.log import logger # noqa
from apps.log_search.handlers.biz import BizHandler # noqa
from apps.log_search.models import ProjectInfo # noqa
from apps.utils.db import array_chunk # noqa
from apps.utils.lock import share_lock # noqa
@periodic_task(run_every=crontab(minute="*/1"), queue="sync")
@share_lock()
def sync():
if settings.USING_SYNC_BUSINESS:
# 同步CMDB业务信息
sync_projects()
return True
return False
def sync_projects():
"""
同步CMDB业务信息
"""
businesses = BizHandler.list()
if not businesses:
logger.error("[log_search][tasks]get business error")
return False
objs = []
# 项目信息
projects = ProjectInfo.get_cmdb_projects()
# 用户组
for business in businesses:
bk_biz_id = int(business["bk_biz_id"])
if not projects.get(bk_biz_id):
objs.append(
ProjectInfo(
project_name=business["bk_biz_name"],
bk_biz_id=business["bk_biz_id"],
bk_app_code=settings.APP_CODE,
time_zone=business.get("time_zone", settings.TIME_ZONE),
)
)
else:
has_deleted = ProjectInfo.objects.filter(bk_biz_id=bk_biz_id, is_deleted=True)
if has_deleted:
has_deleted.update(is_deleted=False)
# 增加修改project_name
ProjectInfo.objects.filter(bk_biz_id=bk_biz_id).exclude(project_name=business["bk_biz_name"]).update(
project_name=business["bk_biz_name"]
)
del projects[int(business["bk_biz_id"])]
if objs:
chunks = array_chunk(objs)
for chunk in chunks:
ProjectInfo.objects.bulk_create(chunk)
logger.info("[log_search][tasks]sync business nums: {}".format(len(objs)))
if projects:
ProjectInfo.objects.filter(project_id__in=projects.values()).delete()
logger.info(
"[sync_projects] businesses=>{}, sync=>{}, delete=>{}".format(len(businesses), len(objs), len(projects))
)
return True
| [
"apps.log_search.models.ProjectInfo.objects.bulk_create",
"apps.log_search.models.ProjectInfo.objects.filter",
"apps.utils.log.logger.error",
"apps.log_search.handlers.biz.BizHandler.list",
"apps.utils.db.array_chunk",
"apps.log_search.models.ProjectInfo.get_cmdb_projects",
"celery.schedules.crontab",
... | [((1867, 1879), 'apps.utils.lock.share_lock', 'share_lock', ([], {}), '()\n', (1877, 1879), False, 'from apps.utils.lock import share_lock\n'), ((2082, 2099), 'apps.log_search.handlers.biz.BizHandler.list', 'BizHandler.list', ([], {}), '()\n', (2097, 2099), False, 'from apps.log_search.handlers.biz import BizHandler\n'), ((2247, 2278), 'apps.log_search.models.ProjectInfo.get_cmdb_projects', 'ProjectInfo.get_cmdb_projects', ([], {}), '()\n', (2276, 2278), False, 'from apps.log_search.models import ProjectInfo\n'), ((1829, 1850), 'celery.schedules.crontab', 'crontab', ([], {'minute': '"""*/1"""'}), "(minute='*/1')\n", (1836, 1850), False, 'from celery.schedules import crontab\n'), ((2131, 2184), 'apps.utils.log.logger.error', 'logger.error', (['"""[log_search][tasks]get business error"""'], {}), "('[log_search][tasks]get business error')\n", (2143, 2184), False, 'from apps.utils.log import logger\n'), ((3217, 3234), 'apps.utils.db.array_chunk', 'array_chunk', (['objs'], {}), '(objs)\n', (3228, 3234), False, 'from apps.utils.db import array_chunk\n'), ((2774, 2838), 'apps.log_search.models.ProjectInfo.objects.filter', 'ProjectInfo.objects.filter', ([], {'bk_biz_id': 'bk_biz_id', 'is_deleted': '(True)'}), '(bk_biz_id=bk_biz_id, is_deleted=True)\n', (2800, 2838), False, 'from apps.log_search.models import ProjectInfo\n'), ((3276, 3314), 'apps.log_search.models.ProjectInfo.objects.bulk_create', 'ProjectInfo.objects.bulk_create', (['chunk'], {}), '(chunk)\n', (3307, 3314), False, 'from apps.log_search.models import ProjectInfo\n'), ((2964, 3011), 'apps.log_search.models.ProjectInfo.objects.filter', 'ProjectInfo.objects.filter', ([], {'bk_biz_id': 'bk_biz_id'}), '(bk_biz_id=bk_biz_id)\n', (2990, 3011), False, 'from apps.log_search.models import ProjectInfo\n')] |
import unittest
import logging
import asyncio
import datetime
from pytz import timezone
import dscraper
from dscraper.utils import FrequencyController
logger = logging.getLogger(__name__)
from .utils import Test
EPS = 1e-6
class TestController(Test):
INVERTAL = 0.2
CONFIG_NONE = (0, 0, 0, EPS, None)
CONFIG_ALL_DAY_NONE = (0, 0, 0, 0, None)
CONFIG_INVALID = (0, INVERTAL, -0.1, 22, None)
CONFIG_INVALID2 = (0, INVERTAL, 0, 24.1, None)
def setUp(self):
self.all_time = FrequencyController((0, self.INVERTAL, 0, 0, None))
def wait_once(self, controller):
return self.loop_until_complete(controller.wait())
def test_wait(self):
none_time = FrequencyController(self.CONFIG_NONE)
all_none_time = FrequencyController(self.CONFIG_ALL_DAY_NONE)
for cont in (self.all_time, none_time, all_none_time):
self.assertFalse(self.wait_once(cont), 'First wait blocked')
self.assertTrue(self.wait_once(self.all_time), 'False negative')
self.assertFalse(self.wait_once(none_time), 'False positive')
self.assertFalse(self.wait_once(all_none_time), 'False positive')
def test_now_wait(self):
now = datetime.datetime.now()
start = end = now.hour + now.minute / 60 + now.second / 3600
current = FrequencyController((0, self.INVERTAL, start - EPS, end + EPS, None))
pos_offset = FrequencyController((0, self.INVERTAL, start - EPS, end - EPS, None))
neg_offset = FrequencyController((0, self.INVERTAL, start + EPS, end + EPS, None))
for cont in (current, pos_offset, neg_offset):
self.assertFalse(self.wait_once(cont), 'First wait blocked')
self.assertTrue(self.wait_once(current), 'False negative')
self.assertFalse(self.wait_once(pos_offset), 'False positive')
self.assertFalse(self.wait_once(neg_offset), 'False positive')
def test_sequential(self):
self.wait_once(self.all_time)
self.all_time.release()
self.assertCountEqual(self.gather(self.all_time.wait(), self.all_time.wait()), [
True, False], 'not released and acquired')
def test_sequential(self):
self.wait_once(self.all_time)
self.assertTrue(self.wait_once(self.all_time), 'unblock before freed')
self.all_time.free()
self.assertFalse(self.wait_once(self.all_time), 'not freed')
def test_invalid(self):
def create_invalid(config):
try:
FrequencyController(config)
except ValueError:
pass
else:
self.fail('Incorrect value check')
create_invalid(self.CONFIG_INVALID)
create_invalid(self.CONFIG_INVALID2)
| [
"logging.getLogger",
"datetime.datetime.now",
"dscraper.utils.FrequencyController"
] | [((161, 188), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (178, 188), False, 'import logging\n'), ((507, 558), 'dscraper.utils.FrequencyController', 'FrequencyController', (['(0, self.INVERTAL, 0, 0, None)'], {}), '((0, self.INVERTAL, 0, 0, None))\n', (526, 558), False, 'from dscraper.utils import FrequencyController\n'), ((702, 739), 'dscraper.utils.FrequencyController', 'FrequencyController', (['self.CONFIG_NONE'], {}), '(self.CONFIG_NONE)\n', (721, 739), False, 'from dscraper.utils import FrequencyController\n'), ((764, 809), 'dscraper.utils.FrequencyController', 'FrequencyController', (['self.CONFIG_ALL_DAY_NONE'], {}), '(self.CONFIG_ALL_DAY_NONE)\n', (783, 809), False, 'from dscraper.utils import FrequencyController\n'), ((1208, 1231), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1229, 1231), False, 'import datetime\n'), ((1319, 1388), 'dscraper.utils.FrequencyController', 'FrequencyController', (['(0, self.INVERTAL, start - EPS, end + EPS, None)'], {}), '((0, self.INVERTAL, start - EPS, end + EPS, None))\n', (1338, 1388), False, 'from dscraper.utils import FrequencyController\n'), ((1410, 1479), 'dscraper.utils.FrequencyController', 'FrequencyController', (['(0, self.INVERTAL, start - EPS, end - EPS, None)'], {}), '((0, self.INVERTAL, start - EPS, end - EPS, None))\n', (1429, 1479), False, 'from dscraper.utils import FrequencyController\n'), ((1501, 1570), 'dscraper.utils.FrequencyController', 'FrequencyController', (['(0, self.INVERTAL, start + EPS, end + EPS, None)'], {}), '((0, self.INVERTAL, start + EPS, end + EPS, None))\n', (1520, 1570), False, 'from dscraper.utils import FrequencyController\n'), ((2520, 2547), 'dscraper.utils.FrequencyController', 'FrequencyController', (['config'], {}), '(config)\n', (2539, 2547), False, 'from dscraper.utils import FrequencyController\n')] |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for exploration domain objects and methods defined on them."""
import os
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import param_domain
from core.tests import test_utils
import feconf
import utils
# Dictionary-like data structures within sample YAML must be formatted
# alphabetically to match string equivalence with the YAML generation
# methods tested below.
#
# If evaluating differences in YAML, conversion to dict form via
# utils.dict_from_yaml can isolate differences quickly.
SAMPLE_YAML_CONTENT = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
skin_customizations:
panels_contents:
bottom: []
states:
%s:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: %s
feedback: []
param_changes: []
fallbacks: []
id: null
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: New state
feedback: []
param_changes: []
fallbacks:
- outcome:
dest: New state
feedback: []
param_changes: []
trigger:
customization_args:
num_submits:
value: 42
trigger_type: NthResubmission
id: null
param_changes: []
states_schema_version: %d
tags: []
title: Title
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
SAMPLE_UNTITLED_YAML_CONTENT = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
skin_customizations:
panels_contents: {}
states:
%s:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args: {}
default_outcome:
dest: %s
feedback: []
param_changes: []
fallbacks: []
id: null
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args: {}
default_outcome:
dest: New state
feedback: []
param_changes: []
fallbacks:
- outcome:
dest: New state
feedback: []
param_changes: []
trigger:
customization_args:
num_submits:
value: 42
trigger_type: NthResubmission
id: null
param_changes: []
states_schema_version: %d
tags: []
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.LAST_UNTITLED_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
SAMPLE_YAML_CONTENT_WITH_GADGETS = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: %s
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: %d
skin_customizations:
panels_contents:
bottom:
- customization_args:
adviceObjects:
value:
- adviceTitle: b
adviceHtml: <p>c</p>
gadget_type: TestGadget
gadget_name: ATestGadget
visible_in_states:
- New state
- Second state
states:
%s:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: %s
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: New state
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
Second state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: Second state
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: %d
tags: []
title: Title
""") % (
feconf.DEFAULT_INIT_STATE_NAME,
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.DEFAULT_INIT_STATE_NAME,
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
TEST_GADGETS = {
'TestGadget': {
'dir': os.path.join(feconf.GADGETS_DIR, 'TestGadget')
}
}
TEST_GADGET_CUSTOMIZATION_ARGS = {
'adviceObjects': {
'value': [{
'adviceTitle': 'b',
'adviceHtml': '<p>c</p>'
}]
}
}
TEST_GADGET_DICT = {
'gadget_type': 'TestGadget',
'gadget_name': 'ATestGadget',
'customization_args': TEST_GADGET_CUSTOMIZATION_ARGS,
'visible_in_states': ['First state']
}
class ExplorationDomainUnitTests(test_utils.GenericTestBase):
"""Test the exploration domain object."""
# TODO(bhenning): The validation tests below should be split into separate
# unit tests. Also, all validation errors should be covered in the tests.
def test_validation(self):
"""Test validation of explorations."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.init_state_name = ''
exploration.states = {}
exploration.title = 'Hello #'
self._assert_validation_error(exploration, 'Invalid character #')
exploration.title = 'Title'
exploration.category = 'Category'
# Note: If '/' ever becomes a valid state name, ensure that the rule
# editor frontend tenplate is fixed -- it currently uses '/' as a
# sentinel for an invalid state name.
bad_state = exp_domain.State.create_default_state('/')
exploration.states = {'/': bad_state}
self._assert_validation_error(
exploration, 'Invalid character / in a state name')
new_state = exp_domain.State.create_default_state('ABC')
new_state.update_interaction_id('TextInput')
# The 'states' property must be a non-empty dict of states.
exploration.states = {}
self._assert_validation_error(
exploration, 'exploration has no states')
exploration.states = {'A string #': new_state}
self._assert_validation_error(
exploration, 'Invalid character # in a state name')
exploration.states = {'A string _': new_state}
self._assert_validation_error(
exploration, 'Invalid character _ in a state name')
exploration.states = {'ABC': new_state}
self._assert_validation_error(
exploration, 'has no initial state name')
exploration.init_state_name = 'initname'
self._assert_validation_error(
exploration,
r'There is no state in \[\'ABC\'\] corresponding to '
'the exploration\'s initial state name initname.')
# Test whether a default outcome to a non-existing state is invalid.
exploration.states = {exploration.init_state_name: new_state}
self._assert_validation_error(
exploration, 'destination ABC is not a valid')
# Restore a valid exploration.
init_state = exploration.states[exploration.init_state_name]
default_outcome = init_state.interaction.default_outcome
default_outcome.dest = exploration.init_state_name
exploration.validate()
# Ensure an answer group with two classifier rules is invalid
init_state.interaction.answer_groups.append(
exp_domain.AnswerGroup.from_dict({
'outcome': {
'dest': exploration.init_state_name,
'feedback': ['Feedback'],
'param_changes': [],
},
'rule_specs': [{
'inputs': {
'training_data': ['Test']
},
'rule_type': 'FuzzyMatches'
}, {
'inputs': {
'training_data': ['Test']
},
'rule_type': 'FuzzyMatches'
}],
'correct': False,
})
)
self._assert_validation_error(
exploration, 'AnswerGroups can only have one classifier rule.')
# Restore a valid exploration.
init_state.interaction.answer_groups.pop()
exploration.validate()
# Ensure an invalid destination can also be detected for answer groups.
# Note: The state must keep its default_outcome, otherwise it will
# trigger a validation error for non-terminal states needing to have a
# default outcome. To validate the outcome of the answer group, this
# default outcome must point to a valid state.
init_state = exploration.states[exploration.init_state_name]
default_outcome = init_state.interaction.default_outcome
default_outcome.dest = exploration.init_state_name
init_state.interaction.answer_groups.append(
exp_domain.AnswerGroup.from_dict({
'outcome': {
'dest': exploration.init_state_name,
'feedback': ['Feedback'],
'param_changes': [],
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'correct': False,
})
)
exploration.validate()
interaction = init_state.interaction
answer_groups = interaction.answer_groups
answer_group = answer_groups[0]
answer_group.outcome.dest = 'DEF'
self._assert_validation_error(
exploration, 'destination DEF is not a valid')
# Restore a valid exploration.
exploration.states[exploration.init_state_name].update_interaction_id(
'TextInput')
answer_group.outcome.dest = exploration.init_state_name
exploration.validate()
# Validate RuleSpec.
rule_spec = answer_group.rule_specs[0]
rule_spec.inputs = {}
self._assert_validation_error(
exploration, 'RuleSpec \'Contains\' is missing inputs')
rule_spec.inputs = 'Inputs string'
self._assert_validation_error(
exploration, 'Expected inputs to be a dict')
rule_spec.inputs = {'x': 'Test'}
rule_spec.rule_type = 'FakeRuleType'
self._assert_validation_error(exploration, 'Unrecognized rule type')
rule_spec.inputs = {'x': 15}
rule_spec.rule_type = 'Contains'
with self.assertRaisesRegexp(
Exception, 'Expected unicode string, received 15'
):
exploration.validate()
rule_spec.inputs = {'x': '{{ExampleParam}}'}
self._assert_validation_error(
exploration,
'RuleSpec \'Contains\' has an input with name \'x\' which refers '
'to an unknown parameter within the exploration: ExampleParam')
# Restore a valid exploration.
exploration.param_specs['ExampleParam'] = param_domain.ParamSpec(
'UnicodeString')
exploration.validate()
# Validate Outcome.
outcome = answer_group.outcome
destination = exploration.init_state_name
outcome.dest = None
self._assert_validation_error(
exploration, 'Every outcome should have a destination.')
# Try setting the outcome destination to something other than a string.
outcome.dest = 15
self._assert_validation_error(
exploration, 'Expected outcome dest to be a string')
outcome.dest = destination
outcome.feedback = 'Feedback'
self._assert_validation_error(
exploration, 'Expected outcome feedback to be a list')
outcome.feedback = [15]
self._assert_validation_error(
exploration, 'Expected outcome feedback item to be a string')
outcome.feedback = ['Feedback']
exploration.validate()
outcome.param_changes = 'Changes'
self._assert_validation_error(
exploration, 'Expected outcome param_changes to be a list')
outcome.param_changes = []
exploration.validate()
# Validate InteractionInstance.
interaction.id = 15
self._assert_validation_error(
exploration, 'Expected interaction id to be a string')
interaction.id = 'SomeInteractionTypeThatDoesNotExist'
self._assert_validation_error(exploration, 'Invalid interaction id')
interaction.id = 'TextInput'
exploration.validate()
interaction.customization_args = []
self._assert_validation_error(
exploration, 'Expected customization args to be a dict')
interaction.customization_args = {15: ''}
self._assert_validation_error(
exploration, 'Invalid customization arg name')
interaction.customization_args = {'placeholder': ''}
exploration.validate()
interaction.answer_groups = {}
self._assert_validation_error(
exploration, 'Expected answer groups to be a list')
interaction.answer_groups = answer_groups
interaction.id = 'EndExploration'
self._assert_validation_error(
exploration,
'Terminal interactions must not have a default outcome.')
interaction.id = 'TextInput'
interaction.default_outcome = None
self._assert_validation_error(
exploration,
'Non-terminal interactions must have a default outcome.')
interaction.id = 'EndExploration'
self._assert_validation_error(
exploration,
'Terminal interactions must not have any answer groups.')
# A terminal interaction without a default outcome or answer group is
# valid. This resets the exploration back to a valid state.
interaction.answer_groups = []
exploration.validate()
interaction.fallbacks = {}
self._assert_validation_error(
exploration, 'Expected fallbacks to be a list')
# Restore a valid exploration.
interaction.id = 'TextInput'
interaction.answer_groups = answer_groups
interaction.default_outcome = default_outcome
interaction.fallbacks = []
exploration.validate()
# Validate AnswerGroup.
answer_group.rule_specs = {}
self._assert_validation_error(
exploration, 'Expected answer group rules to be a list')
answer_group.rule_specs = []
self._assert_validation_error(
exploration,
'There must be at least one rule for each answer group.')
exploration.states = {
exploration.init_state_name: exp_domain.State.create_default_state(
exploration.init_state_name)
}
exploration.states[exploration.init_state_name].update_interaction_id(
'TextInput')
exploration.validate()
exploration.language_code = 'fake_code'
self._assert_validation_error(exploration, 'Invalid language_code')
exploration.language_code = 'English'
self._assert_validation_error(exploration, 'Invalid language_code')
exploration.language_code = 'en'
exploration.validate()
exploration.param_specs = 'A string'
self._assert_validation_error(exploration, 'param_specs to be a dict')
exploration.param_specs = {
'@': param_domain.ParamSpec.from_dict({
'obj_type': 'UnicodeString'
})
}
self._assert_validation_error(
exploration, 'Only parameter names with characters')
exploration.param_specs = {
'notAParamSpec': param_domain.ParamSpec.from_dict(
{'obj_type': 'UnicodeString'})
}
exploration.validate()
def test_fallbacks_validation(self):
"""Test validation of state fallbacks."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.objective = 'Objective'
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('TextInput')
exploration.validate()
base_outcome = {
'dest': exploration.init_state_name,
'feedback': [],
'param_changes': [],
}
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'FakeTriggerName',
'customization_args': {
'num_submits': {
'value': 42,
},
},
},
'outcome': base_outcome,
}])
self._assert_validation_error(exploration, 'Unknown trigger type')
with self.assertRaises(KeyError):
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {
'num_submits': {
'value': 42,
},
},
},
'outcome': {},
}])
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {},
},
'outcome': base_outcome,
}])
# Default values for the customization args will be added silently.
exploration.validate()
self.assertEqual(len(init_state.interaction.fallbacks), 1)
self.assertEqual(
init_state.interaction.fallbacks[0].trigger.customization_args,
{
'num_submits': {
'value': 3,
}
})
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {
'num_submits': {
'value': 42,
},
'bad_key_that_will_get_stripped_silently': {
'value': 'unused_value',
}
},
},
'outcome': base_outcome,
}])
# Unused customization arg keys will be stripped silently.
exploration.validate()
self.assertEqual(len(init_state.interaction.fallbacks), 1)
self.assertEqual(
init_state.interaction.fallbacks[0].trigger.customization_args,
{
'num_submits': {
'value': 42,
}
})
init_state.update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {
'num_submits': {
'value': 2,
},
},
},
'outcome': base_outcome,
}])
exploration.validate()
def test_tag_validation(self):
"""Test validation of exploration tags."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.objective = 'Objective'
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('EndExploration')
init_state.interaction.default_outcome = None
exploration.validate()
exploration.tags = 'this should be a list'
self._assert_validation_error(
exploration, 'Expected \'tags\' to be a list')
exploration.tags = [123]
self._assert_validation_error(exploration, 'to be a string')
exploration.tags = ['abc', 123]
self._assert_validation_error(exploration, 'to be a string')
exploration.tags = ['']
self._assert_validation_error(exploration, 'Tags should be non-empty')
exploration.tags = ['123']
self._assert_validation_error(
exploration, 'should only contain lowercase letters and spaces')
exploration.tags = ['ABC']
self._assert_validation_error(
exploration, 'should only contain lowercase letters and spaces')
exploration.tags = [' a b']
self._assert_validation_error(
exploration, 'Tags should not start or end with whitespace')
exploration.tags = ['a b ']
self._assert_validation_error(
exploration, 'Tags should not start or end with whitespace')
exploration.tags = ['a b']
self._assert_validation_error(
exploration, 'Adjacent whitespace in tags should be collapsed')
exploration.tags = ['abc', 'abc']
self._assert_validation_error(
exploration, 'Some tags duplicate each other')
exploration.tags = ['computer science', 'analysis', 'a b c']
exploration.validate()
def test_exploration_skin_and_gadget_validation(self):
"""Test that Explorations including gadgets validate properly."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
invalid_gadget_instance = exp_domain.GadgetInstance(
'bad_type', 'aUniqueGadgetName', [], {})
with self.assertRaisesRegexp(
utils.ValidationError,
'Unknown gadget with type bad_type is not in the registry.'
):
invalid_gadget_instance.validate()
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
gadget_instance = exploration.skin_instance.panel_contents_dict[
'bottom'][0]
# Force a GadgetInstance to require certain state names.
gadget_instance.visible_in_states.extend(['DEF', 'GHI'])
self._assert_validation_error(
exploration, 'Exploration missing required states: DEF, GHI')
def_state = exp_domain.State.create_default_state('DEF')
def_state.update_interaction_id('TextInput')
exploration.states['DEF'] = def_state
self._assert_validation_error(
exploration, 'Exploration missing required state: GHI')
ghi_state = exp_domain.State.create_default_state('GHI')
ghi_state.update_interaction_id('TextInput')
exploration.states['GHI'] = ghi_state
exploration.validate()
# Force a gadget name collision.
gadget_instance.visible_in_states = ['DEF']
exploration.add_gadget(TEST_GADGET_DICT, 'bottom')
exploration.skin_instance.panel_contents_dict[
'bottom'][1].visible_in_states = ['GHI']
self._assert_validation_error(
exploration,
'ATestGadget gadget instance name must be unique.')
exploration.skin_instance.panel_contents_dict['bottom'].pop()
gadget_instance.visible_in_states.extend(['DEF'])
self._assert_validation_error(
exploration,
'TestGadget specifies visibility repeatedly for state: DEF')
# Remove duplicate state.
gadget_instance.visible_in_states.pop()
# Adding a panel that doesn't exist in the skin.
exploration.skin_instance.panel_contents_dict[
'non_existent_panel'] = []
self._assert_validation_error(
exploration,
'The panel name \'non_existent_panel\' is invalid.')
def test_gadget_name_validation(self):
"""Test that gadget naming conditions validate properly."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
gadget_instance = exploration.skin_instance.panel_contents_dict[
'bottom'][0]
gadget_instance.validate()
gadget_instance.name = ''
self._assert_validation_error(
gadget_instance, 'Gadget name must not be an empty string.')
gadget_instance.name = 0
self._assert_validation_error(
gadget_instance,
'Gadget name must be a string. Received type: int')
gadget_instance.name = 'ASuperLongGadgetNameThatExceedsTheLimit'
max_length = exp_domain.GadgetInstance._MAX_GADGET_NAME_LENGTH # pylint: disable=protected-access
self._assert_validation_error(
gadget_instance,
'ASuperLongGadgetNameThatExceedsTheLimit gadget name'
' exceeds maximum length of %d' % max_length)
gadget_instance.name = 'VERYGADGET!'
self._assert_validation_error(
gadget_instance,
'Gadget names must be alphanumeric. Spaces are allowed. '
'Received: VERYGADGET!')
gadget_instance.name = 'Name with \t tab'
self._assert_validation_error(
gadget_instance,
'Gadget names must be alphanumeric. Spaces are allowed. '
'Received: Name with \t tab')
gadget_instance.name = 'Name with \n newline'
self._assert_validation_error(
gadget_instance,
'Gadget names must be alphanumeric. Spaces are allowed. '
'Received: Name with \n newline')
gadget_instance.name = 'Name with 3 space'
self._assert_validation_error(
gadget_instance,
'Gadget names must be alphanumeric. Spaces are allowed. '
'Received: Name with 3 space')
gadget_instance.name = ' untrim whitespace '
self._assert_validation_error(
gadget_instance,
'Gadget names must be alphanumeric. Spaces are allowed. '
'Received: untrim whitespace ')
# Names with spaces and number should pass.
gadget_instance.name = 'Space and 1'
gadget_instance.validate()
def test_exploration_get_gadget_types(self):
"""Test that Exploration.get_gadget_types returns apt results."""
exploration_without_gadgets = exp_domain.Exploration.from_yaml(
'An Exploration ID', SAMPLE_YAML_CONTENT)
self.assertEqual(exploration_without_gadgets.get_gadget_types(), [])
exploration_with_gadgets = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
self.assertEqual(
exploration_with_gadgets.get_gadget_types(), ['TestGadget'])
another_gadget = exp_domain.GadgetInstance(
'AnotherGadget', 'GadgetUniqueName1', [], {}
)
exploration_with_gadgets.skin_instance.panel_contents_dict[
'bottom'].append(another_gadget)
self.assertEqual(
exploration_with_gadgets.get_gadget_types(),
['AnotherGadget', 'TestGadget']
)
def test_title_category_and_objective_validation(self):
"""Test that titles, categories and objectives are validated only in
'strict' mode.
"""
self.save_new_valid_exploration(
'exp_id', '<EMAIL>', title='', category='',
objective='', end_state_name='End')
exploration = exp_services.get_exploration_by_id('exp_id')
exploration.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'title must be specified'
):
exploration.validate(strict=True)
exploration.title = 'A title'
with self.assertRaisesRegexp(
utils.ValidationError, 'category must be specified'
):
exploration.validate(strict=True)
exploration.category = 'A category'
with self.assertRaisesRegexp(
utils.ValidationError, 'objective must be specified'
):
exploration.validate(strict=True)
exploration.objective = 'An objective'
exploration.validate(strict=True)
def test_is_demo_property(self):
"""Test the is_demo property."""
demo = exp_domain.Exploration.create_default_exploration('0')
self.assertEqual(demo.is_demo, True)
notdemo1 = exp_domain.Exploration.create_default_exploration('a')
self.assertEqual(notdemo1.is_demo, False)
notdemo2 = exp_domain.Exploration.create_default_exploration('abcd')
self.assertEqual(notdemo2.is_demo, False)
def test_exploration_export_import(self):
"""Test that to_dict and from_dict preserve all data within an
exploration.
"""
demo = exp_domain.Exploration.create_default_exploration('0')
demo_dict = demo.to_dict()
exp_from_dict = exp_domain.Exploration.from_dict(demo_dict)
self.assertEqual(exp_from_dict.to_dict(), demo_dict)
def test_interaction_with_none_id_is_not_terminal(self):
"""Test that an interaction with an id of None leads to is_terminal
being false.
"""
# Default exploration has a default interaction with an ID of None.
demo = exp_domain.Exploration.create_default_exploration('0')
init_state = demo.states[feconf.DEFAULT_INIT_STATE_NAME]
self.assertFalse(init_state.interaction.is_terminal)
class StateExportUnitTests(test_utils.GenericTestBase):
"""Test export of states."""
def test_export_state_to_dict(self):
"""Test exporting a state to a dict."""
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id')
exploration.add_states(['New state'])
state_dict = exploration.states['New state'].to_dict()
expected_dict = {
'classifier_model_id': None,
'content': [{
'type': 'text',
'value': u''
}],
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': 'New state',
'feedback': [],
'param_changes': [],
},
'fallbacks': [],
'id': None,
},
'param_changes': [],
}
self.assertEqual(expected_dict, state_dict)
class YamlCreationUnitTests(test_utils.GenericTestBase):
"""Test creation of explorations from YAML files."""
EXP_ID = 'An exploration_id'
def test_yaml_import_and_export(self):
"""Test the from_yaml() and to_yaml() methods."""
exploration = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, title='Title', category='Category')
exploration.add_states(['New state'])
self.assertEqual(len(exploration.states), 2)
exploration.states['New state'].update_interaction_fallbacks([{
'trigger': {
'trigger_type': 'NthResubmission',
'customization_args': {
'num_submits': {
'value': 42,
},
},
},
'outcome': {
'dest': 'New state',
'feedback': [],
'param_changes': [],
},
}])
exploration.validate()
yaml_content = exploration.to_yaml()
self.assertEqual(yaml_content, SAMPLE_YAML_CONTENT)
exploration2 = exp_domain.Exploration.from_yaml('exp2', yaml_content)
self.assertEqual(len(exploration2.states), 2)
yaml_content_2 = exploration2.to_yaml()
self.assertEqual(yaml_content_2, yaml_content)
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml('exp3', 'No_initial_state_name')
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp4', 'Invalid\ninit_state_name:\nMore stuff')
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp4', 'State1:\n(\nInvalid yaml')
with self.assertRaisesRegexp(
Exception, 'Expected a YAML version >= 10, received: 9'
):
exp_domain.Exploration.from_yaml(
'exp4', SAMPLE_UNTITLED_YAML_CONTENT)
with self.assertRaisesRegexp(
Exception, 'Expected a YAML version <= 9'
):
exp_domain.Exploration.from_untitled_yaml(
'exp4', 'Title', 'Category', SAMPLE_YAML_CONTENT)
def test_yaml_import_and_export_without_gadgets(self):
"""Test from_yaml() and to_yaml() methods without gadgets."""
exploration_without_gadgets = exp_domain.Exploration.from_yaml(
self.EXP_ID, SAMPLE_YAML_CONTENT)
yaml_content = exploration_without_gadgets.to_yaml()
self.assertEqual(yaml_content, SAMPLE_YAML_CONTENT)
def test_yaml_import_and_export_with_gadgets(self):
"""Test from_yaml() and to_yaml() methods including gadgets."""
exploration_with_gadgets = exp_domain.Exploration.from_yaml(
self.EXP_ID, SAMPLE_YAML_CONTENT_WITH_GADGETS)
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
generated_yaml = exploration_with_gadgets.to_yaml()
generated_yaml_as_dict = utils.dict_from_yaml(generated_yaml)
sample_yaml_as_dict = utils.dict_from_yaml(
SAMPLE_YAML_CONTENT_WITH_GADGETS)
self.assertEqual(generated_yaml_as_dict, sample_yaml_as_dict)
class SchemaMigrationMethodsUnitTests(test_utils.GenericTestBase):
"""Tests the presence of appropriate schema migration methods in the
Exploration domain object class.
"""
def test_correct_states_schema_conversion_methods_exist(self):
"""Test that the right states schema conversion methods exist."""
current_states_schema_version = (
feconf.CURRENT_EXPLORATION_STATES_SCHEMA_VERSION)
for version_num in range(current_states_schema_version):
self.assertTrue(hasattr(
exp_domain.Exploration,
'_convert_states_v%s_dict_to_v%s_dict' % (
version_num, version_num + 1)))
self.assertFalse(hasattr(
exp_domain.Exploration,
'_convert_states_v%s_dict_to_v%s_dict' % (
current_states_schema_version,
current_states_schema_version + 1)))
def test_correct_exploration_schema_conversion_methods_exist(self):
"""Test that the right exploration schema conversion methods exist."""
current_exp_schema_version = (
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION)
for version_num in range(1, current_exp_schema_version):
self.assertTrue(hasattr(
exp_domain.Exploration,
'_convert_v%s_dict_to_v%s_dict' % (
version_num, version_num + 1)))
self.assertFalse(hasattr(
exp_domain.Exploration,
'_convert_v%s_dict_to_v%s_dict' % (
current_exp_schema_version, current_exp_schema_version + 1)))
class SchemaMigrationUnitTests(test_utils.GenericTestBase):
"""Test migration methods for yaml content."""
YAML_CONTENT_V1 = ("""default_skin: conversation_v1
param_changes: []
param_specs: {}
schema_version: 1
states:
- content:
- type: text
value: ''
name: (untitled state)
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
- content:
- type: text
value: ''
name: New state
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V2 = ("""default_skin: conversation_v1
init_state_name: (untitled state)
param_changes: []
param_specs: {}
schema_version: 2
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V3 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 3
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V4 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 4
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
""")
YAML_CONTENT_V5 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 5
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
tags: []
""")
YAML_CONTENT_V6 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
YAML_CONTENT_V7 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 7
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 4
tags: []
""")
YAML_CONTENT_V8 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 8
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 5
tags: []
""")
YAML_CONTENT_V9 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 9
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 6
tags: []
""")
YAML_CONTENT_V10 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 10
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 7
tags: []
title: Title
""")
YAML_CONTENT_V11 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 11
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
classifier_model_id: null
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 8
tags: []
title: Title
""")
YAML_CONTENT_V12 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 12
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
classifier_model_id: null
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 9
tags: []
title: Title
""")
_LATEST_YAML_CONTENT = YAML_CONTENT_V12
def test_load_from_v1(self):
"""Test direct loading from a v1 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V1)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v2(self):
"""Test direct loading from a v2 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V2)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v3(self):
"""Test direct loading from a v3 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V3)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v4(self):
"""Test direct loading from a v4 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V4)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v5(self):
"""Test direct loading from a v5 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V5)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v6(self):
"""Test direct loading from a v6 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V6)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v7(self):
"""Test direct loading from a v7 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V7)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v8(self):
"""Test direct loading from a v8 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V8)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v9(self):
"""Test direct loading from a v9 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V9)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v10(self):
"""Test direct loading from a v10 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V10)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v11(self):
"""Test direct loading from a v11 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V11)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v12(self):
"""Test direct loading from a v12 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V12)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
class ConversionUnitTests(test_utils.GenericTestBase):
"""Test conversion methods."""
def test_convert_exploration_to_player_dict(self):
exp_title = 'Title'
second_state_name = 'first state'
exploration = exp_domain.Exploration.create_default_exploration(
'eid', title=exp_title, category='Category')
exploration.add_states([second_state_name])
def _get_default_state_dict(content_str, dest_name):
return {
'classifier_model_id': None,
'content': [{
'type': 'text',
'value': content_str,
}],
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': dest_name,
'feedback': [],
'param_changes': [],
},
'fallbacks': [],
'id': None,
},
'param_changes': [],
}
self.assertEqual(exploration.to_player_dict(), {
'init_state_name': feconf.DEFAULT_INIT_STATE_NAME,
'title': exp_title,
'states': {
feconf.DEFAULT_INIT_STATE_NAME: _get_default_state_dict(
feconf.DEFAULT_INIT_STATE_CONTENT_STR,
feconf.DEFAULT_INIT_STATE_NAME),
second_state_name: _get_default_state_dict(
'', second_state_name),
},
'param_changes': [],
'param_specs': {},
'skin_customizations': (
exp_domain.SkinInstance._get_default_skin_customizations() # pylint: disable=protected-access
),
'language_code': 'en',
})
class StateOperationsUnitTests(test_utils.GenericTestBase):
"""Test methods operating on states."""
def test_delete_state(self):
"""Test deletion of states."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.add_states(['first state'])
with self.assertRaisesRegexp(
ValueError, 'Cannot delete initial state'
):
exploration.delete_state(exploration.init_state_name)
exploration.add_states(['second state'])
exploration.delete_state('second state')
with self.assertRaisesRegexp(ValueError, 'fake state does not exist'):
exploration.delete_state('fake state')
def test_state_operations(self):
"""Test adding, updating and checking existence of states."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
self.assertNotIn('invalid_state_name', exploration.states)
self.assertEqual(len(exploration.states), 1)
default_state_name = exploration.init_state_name
exploration.rename_state(default_state_name, 'Renamed state')
self.assertEqual(len(exploration.states), 1)
self.assertEqual(exploration.init_state_name, 'Renamed state')
# Add a new state.
exploration.add_states(['State 2'])
self.assertEqual(len(exploration.states), 2)
# It is OK to rename a state to the same name.
exploration.rename_state('State 2', 'State 2')
# But it is not OK to add or rename a state using a name that already
# exists.
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.add_states(['State 2'])
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.rename_state('State 2', 'Renamed state')
# And it is OK to rename a state to 'END' (old terminal pseudostate). It
# is tested throughout this test because a lot of old behavior used to
# be specific to states named 'END'. These tests validate that is no
# longer the situation.
exploration.rename_state('State 2', 'END')
# Should successfully be able to name it back.
exploration.rename_state('END', 'State 2')
# The exploration now has exactly two states.
self.assertNotIn(default_state_name, exploration.states)
self.assertIn('Renamed state', exploration.states)
self.assertIn('State 2', exploration.states)
# Can successfully add 'END' state
exploration.add_states(['END'])
# Should fail to rename like any other state
with self.assertRaisesRegexp(ValueError, 'Duplicate state name'):
exploration.rename_state('State 2', 'END')
# Ensure the other states are connected to END
exploration.states[
'Renamed state'].interaction.default_outcome.dest = 'State 2'
exploration.states['State 2'].interaction.default_outcome.dest = 'END'
# Ensure the other states have interactions
exploration.states['Renamed state'].update_interaction_id('TextInput')
exploration.states['State 2'].update_interaction_id('TextInput')
# Other miscellaneous requirements for validation
exploration.title = 'Title'
exploration.category = 'Category'
exploration.objective = 'Objective'
# The exploration should NOT be terminable even though it has a state
# called 'END' and everything else is connected to it.
with self.assertRaises(Exception):
exploration.validate(strict=True)
# Renaming the node to something other than 'END' and giving it an
# EndExploration is enough to validate it, though it cannot have a
# default outcome or answer groups.
exploration.rename_state('END', 'AnotherEnd')
another_end_state = exploration.states['AnotherEnd']
another_end_state.update_interaction_id('EndExploration')
another_end_state.interaction.default_outcome = None
exploration.validate(strict=True)
# Name it back for final tests
exploration.rename_state('AnotherEnd', 'END')
# Should be able to successfully delete it
exploration.delete_state('END')
self.assertNotIn('END', exploration.states)
class GadgetOperationsUnitTests(test_utils.GenericTestBase):
"""Test methods operating on gadgets."""
def test_gadget_operations(self):
"""Test deletion of gadgets."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
exploration.add_gadget(TEST_GADGET_DICT, 'bottom')
self.assertEqual(exploration.skin_instance.panel_contents_dict[
'bottom'][0].type, TEST_GADGET_DICT['gadget_type'])
self.assertEqual(exploration.skin_instance.panel_contents_dict[
'bottom'][0].name, TEST_GADGET_DICT['gadget_name'])
with self.assertRaisesRegexp(
ValueError, 'Gadget NotARealGadget does not exist.'
):
exploration.rename_gadget('NotARealGadget', 'ANewName')
exploration.rename_gadget(
TEST_GADGET_DICT['gadget_name'], 'ANewName')
self.assertEqual(exploration.skin_instance.panel_contents_dict[
'bottom'][0].name, 'ANewName')
# Add another gadget.
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
exploration.add_gadget(TEST_GADGET_DICT, 'bottom')
self.assertEqual(
exploration.get_all_gadget_names(),
['ANewName', 'ATestGadget']
)
with self.assertRaisesRegexp(
ValueError, 'Duplicate gadget name: ANewName'
):
exploration.rename_gadget('ATestGadget', 'ANewName')
gadget_instance = exploration.get_gadget_instance_by_name(
'ANewName')
self.assertIs(
exploration.skin_instance.panel_contents_dict['bottom'][0],
gadget_instance
)
panel = exploration._get_panel_for_gadget('ANewName') # pylint: disable=protected-access
self.assertEqual(panel, 'bottom')
exploration.delete_gadget('ANewName')
exploration.delete_gadget('ATestGadget')
self.assertEqual(exploration.skin_instance.panel_contents_dict[
'bottom'], [])
with self.assertRaisesRegexp(
ValueError, 'Gadget ANewName does not exist.'
):
exploration.delete_gadget('ANewName')
class SkinInstanceUnitTests(test_utils.GenericTestBase):
"""Test methods for SkinInstance."""
_SAMPLE_SKIN_INSTANCE_DICT = {
'skin_id': 'conversation_v1',
'skin_customizations': {
'panels_contents': {
'bottom': [
{
'customization_args': TEST_GADGET_CUSTOMIZATION_ARGS,
'gadget_type': 'TestGadget',
'gadget_name': 'ATestGadget',
'visible_in_states': ['New state', 'Second state']
}
]
}
}
}
def test_get_state_names_required_by_gadgets(self):
"""Test accurate computation of state_names_required_by_gadgets."""
skin_instance = exp_domain.SkinInstance(
'conversation_v1',
self._SAMPLE_SKIN_INSTANCE_DICT['skin_customizations'])
self.assertEqual(
skin_instance.get_state_names_required_by_gadgets(),
['New state', 'Second state'])
def test_generation_of_get_default_skin_customizations(self):
"""Tests that default skin customizations are created properly."""
skin_instance = exp_domain.SkinInstance(feconf.DEFAULT_SKIN_ID, None)
self.assertEqual(
skin_instance.panel_contents_dict,
{'bottom': []}
)
def test_conversion_of_skin_to_and_from_dict(self):
"""Tests conversion of SkinInstance to and from dict representations."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
skin_instance = exploration.skin_instance
skin_instance_as_dict = skin_instance.to_dict()
self.assertEqual(
skin_instance_as_dict,
self._SAMPLE_SKIN_INSTANCE_DICT)
skin_instance_as_instance = exp_domain.SkinInstance.from_dict(
skin_instance_as_dict)
self.assertEqual(skin_instance_as_instance.skin_id, 'conversation_v1')
self.assertEqual(
sorted(skin_instance_as_instance.panel_contents_dict.keys()),
['bottom'])
class GadgetInstanceUnitTests(test_utils.GenericTestBase):
"""Tests methods instantiating and validating GadgetInstances."""
def test_gadget_instantiation(self):
"""Test instantiation of GadgetInstances."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
self.assertEqual(len(exploration.skin_instance.panel_contents_dict[
'bottom']), 1)
def test_gadget_instance_properties(self):
"""Test accurate representation of gadget properties."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
panel_contents_dict = exploration.skin_instance.panel_contents_dict
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
test_gadget_instance = panel_contents_dict['bottom'][0]
self.assertEqual(test_gadget_instance.height, 50)
self.assertEqual(test_gadget_instance.width, 60)
self.assertIn('New state', test_gadget_instance.visible_in_states)
def test_gadget_instance_validation(self):
"""Test validation of GadgetInstance."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
panel_contents_dict = exploration.skin_instance.panel_contents_dict
with self.swap(feconf, 'ALLOWED_GADGETS', TEST_GADGETS):
test_gadget_instance = panel_contents_dict['bottom'][0]
# Validation against sample YAML should pass without error.
exploration.validate()
# Assert size exceeded error triggers when a gadget's size exceeds
# a panel's capacity.
with self.swap(
test_gadget_instance.gadget,
'width_px',
4600):
self._assert_validation_error(
exploration,
'Width 4600 of panel \'bottom\' exceeds limit of 350')
# Assert internal validation against CustomizationArgSpecs.
test_gadget_instance.customization_args[
'adviceObjects']['value'].extend(
[
{'adviceTitle': 'test_title', 'adviceHtml': 'test html'},
{'adviceTitle': 'another_title', 'adviceHtml': 'more html'},
{'adviceTitle': 'third_title', 'adviceHtml': 'third html'}
]
)
with self.assertRaisesRegexp(
utils.ValidationError,
'TestGadget is limited to 3 tips, found 4.'
):
test_gadget_instance.validate()
test_gadget_instance.customization_args[
'adviceObjects']['value'].pop()
# Assert that too many gadgets in a panel raise a ValidationError.
panel_contents_dict['bottom'].append(test_gadget_instance)
with self.assertRaisesRegexp(
utils.ValidationError,
'\'bottom\' panel expected at most 1 gadget, but 2 gadgets are '
'visible in state \'New state\'.'
):
exploration.validate()
# Assert that an error is raised when a gadget is not visible in any
# states.
test_gadget_instance.visible_in_states = []
with self.assertRaisesRegexp(
utils.ValidationError,
'TestGadget gadget not visible in any states.'):
test_gadget_instance.validate()
def test_conversion_of_gadget_instance_to_and_from_dict(self):
"""Test conversion of GadgetInstance to and from dict. """
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
panel_contents_dict = exploration.skin_instance.panel_contents_dict
test_gadget_instance = panel_contents_dict['bottom'][0]
test_gadget_as_dict = test_gadget_instance.to_dict()
self.assertEqual(
test_gadget_as_dict,
{
'gadget_type': 'TestGadget',
'gadget_name': 'ATestGadget',
'visible_in_states': ['New state', 'Second state'],
'customization_args': TEST_GADGET_CUSTOMIZATION_ARGS
}
)
test_gadget_as_instance = exp_domain.GadgetInstance.from_dict(
test_gadget_as_dict)
self.assertEqual(test_gadget_as_instance.width, 60)
self.assertEqual(test_gadget_as_instance.height, 50)
class GadgetVisibilityInStatesUnitTests(test_utils.GenericTestBase):
"""Tests methods affecting gadget visibility in states."""
def test_retrieving_affected_gadgets(self):
"""Test that appropriate gadgets are retrieved."""
exploration = exp_domain.Exploration.from_yaml(
'exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)
affected_gadget_instances = (
exploration._get_gadget_instances_visible_in_state('Second state')) # pylint: disable=protected-access
self.assertEqual(len(affected_gadget_instances), 1)
self.assertEqual(affected_gadget_instances[0].name, 'ATestGadget')
| [
"utils.dict_from_yaml",
"core.domain.param_domain.ParamSpec.from_dict",
"core.domain.exp_services.get_exploration_by_id",
"core.domain.exp_domain.SkinInstance",
"os.path.join",
"core.domain.param_domain.ParamSpec",
"core.domain.exp_domain.SkinInstance.from_dict",
"core.domain.exp_domain.GadgetInstance... | [((6011, 6057), 'os.path.join', 'os.path.join', (['feconf.GADGETS_DIR', '"""TestGadget"""'], {}), "(feconf.GADGETS_DIR, 'TestGadget')\n", (6023, 6057), False, 'import os\n'), ((6791, 6847), 'core.domain.exp_domain.Exploration.create_default_exploration', 'exp_domain.Exploration.create_default_exploration', (['"""eid"""'], {}), "('eid')\n", (6840, 6847), False, 'from core.domain import exp_domain\n'), ((7331, 7373), 'core.domain.exp_domain.State.create_default_state', 'exp_domain.State.create_default_state', (['"""/"""'], {}), "('/')\n", (7368, 7373), False, 'from core.domain import exp_domain\n'), ((7544, 7588), 'core.domain.exp_domain.State.create_default_state', 'exp_domain.State.create_default_state', (['"""ABC"""'], {}), "('ABC')\n", (7581, 7588), False, 'from core.domain import exp_domain\n'), ((12844, 12883), 'core.domain.param_domain.ParamSpec', 'param_domain.ParamSpec', (['"""UnicodeString"""'], {}), "('UnicodeString')\n", (12866, 12883), False, 'from core.domain import param_domain\n'), ((17813, 17869), 'core.domain.exp_domain.Exploration.create_default_exploration', 'exp_domain.Exploration.create_default_exploration', (['"""eid"""'], {}), "('eid')\n", (17862, 17869), False, 'from core.domain import exp_domain\n'), ((21034, 21090), 'core.domain.exp_domain.Exploration.create_default_exploration', 'exp_domain.Exploration.create_default_exploration', (['"""eid"""'], {}), "('eid')\n", (21083, 21090), False, 'from core.domain import exp_domain\n'), ((22974, 23048), 'core.domain.exp_domain.Exploration.from_yaml', 'exp_domain.Exploration.from_yaml', (['"""exp1"""', 'SAMPLE_YAML_CONTENT_WITH_GADGETS'], {}), "('exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)\n", (23006, 23048), False, 'from core.domain import exp_domain\n'), ((23097, 23163), 'core.domain.exp_domain.GadgetInstance', 'exp_domain.GadgetInstance', (['"""bad_type"""', '"""aUniqueGadgetName"""', '[]', '{}'], {}), "('bad_type', 'aUniqueGadgetName', [], {})\n", (23122, 23163), False, 'from core.domain import exp_domain\n'), ((25560, 25634), 'core.domain.exp_domain.Exploration.from_yaml', 'exp_domain.Exploration.from_yaml', (['"""exp1"""', 'SAMPLE_YAML_CONTENT_WITH_GADGETS'], {}), "('exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)\n", (25592, 25634), False, 'from core.domain import exp_domain\n'), ((28169, 28243), 'core.domain.exp_domain.Exploration.from_yaml', 'exp_domain.Exploration.from_yaml', (['"""An Exploration ID"""', 'SAMPLE_YAML_CONTENT'], {}), "('An Exploration ID', SAMPLE_YAML_CONTENT)\n", (28201, 28243), False, 'from core.domain import exp_domain\n'), ((28370, 28444), 'core.domain.exp_domain.Exploration.from_yaml', 'exp_domain.Exploration.from_yaml', (['"""exp1"""', 'SAMPLE_YAML_CONTENT_WITH_GADGETS'], {}), "('exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)\n", (28402, 28444), False, 'from core.domain import exp_domain\n'), ((28583, 28654), 'core.domain.exp_domain.GadgetInstance', 'exp_domain.GadgetInstance', (['"""AnotherGadget"""', '"""GadgetUniqueName1"""', '[]', '{}'], {}), "('AnotherGadget', 'GadgetUniqueName1', [], {})\n", (28608, 28654), False, 'from core.domain import exp_domain\n'), ((29267, 29311), 'core.domain.exp_services.get_exploration_by_id', 'exp_services.get_exploration_by_id', (['"""exp_id"""'], {}), "('exp_id')\n", (29301, 29311), False, 'from core.domain import exp_services\n'), ((30100, 30154), 'core.domain.exp_domain.Exploration.create_default_exploration', 'exp_domain.Exploration.create_default_exploration', (['"""0"""'], {}), "('0')\n", (30149, 30154), False, 'from core.domain import exp_domain\n'), ((30220, 30274), 'core.domain.exp_domain.Exploration.create_default_exploration', 'exp_domain.Exploration.create_default_exploration', (['"""a"""'], {}), "('a')\n", (30269, 30274), False, 'from core.domain import exp_domain\n'), ((30345, 30402), 'core.domain.exp_domain.Exploration.create_default_exploration', 'exp_domain.Exploration.create_default_exploration', (['"""abcd"""'], {}), "('abcd')\n", (30394, 30402), False, 'from core.domain import exp_domain\n'), ((30619, 30673), 'core.domain.exp_domain.Exploration.create_default_exploration', 'exp_domain.Exploration.create_default_exploration', (['"""0"""'], {}), "('0')\n", (30668, 30673), False, 'from core.domain import exp_domain\n'), ((30733, 30776), 'core.domain.exp_domain.Exploration.from_dict', 'exp_domain.Exploration.from_dict', (['demo_dict'], {}), '(demo_dict)\n', (30765, 30776), False, 'from core.domain import exp_domain\n'), ((31100, 31154), 'core.domain.exp_domain.Exploration.create_default_exploration', 'exp_domain.Exploration.create_default_exploration', (['"""0"""'], {}), "('0')\n", (31149, 31154), False, 'from core.domain import exp_domain\n'), ((31484, 31543), 'core.domain.exp_domain.Exploration.create_default_exploration', 'exp_domain.Exploration.create_default_exploration', (['"""exp_id"""'], {}), "('exp_id')\n", (31533, 31543), False, 'from core.domain import exp_domain\n'), ((32618, 32721), 'core.domain.exp_domain.Exploration.create_default_exploration', 'exp_domain.Exploration.create_default_exploration', (['self.EXP_ID'], {'title': '"""Title"""', 'category': '"""Category"""'}), "(self.EXP_ID, title=\n 'Title', category='Category')\n", (32667, 32721), False, 'from core.domain import exp_domain\n'), ((33469, 33523), 'core.domain.exp_domain.Exploration.from_yaml', 'exp_domain.Exploration.from_yaml', (['"""exp2"""', 'yaml_content'], {}), "('exp2', yaml_content)\n", (33501, 33523), False, 'from core.domain import exp_domain\n'), ((34719, 34785), 'core.domain.exp_domain.Exploration.from_yaml', 'exp_domain.Exploration.from_yaml', (['self.EXP_ID', 'SAMPLE_YAML_CONTENT'], {}), '(self.EXP_ID, SAMPLE_YAML_CONTENT)\n', (34751, 34785), False, 'from core.domain import exp_domain\n'), ((35084, 35163), 'core.domain.exp_domain.Exploration.from_yaml', 'exp_domain.Exploration.from_yaml', (['self.EXP_ID', 'SAMPLE_YAML_CONTENT_WITH_GADGETS'], {}), '(self.EXP_ID, SAMPLE_YAML_CONTENT_WITH_GADGETS)\n', (35116, 35163), False, 'from core.domain import exp_domain\n'), ((35340, 35376), 'utils.dict_from_yaml', 'utils.dict_from_yaml', (['generated_yaml'], {}), '(generated_yaml)\n', (35360, 35376), False, 'import utils\n'), ((35407, 35461), 'utils.dict_from_yaml', 'utils.dict_from_yaml', (['SAMPLE_YAML_CONTENT_WITH_GADGETS'], {}), '(SAMPLE_YAML_CONTENT_WITH_GADGETS)\n', (35427, 35461), False, 'import utils\n'), ((54978, 55074), 'core.domain.exp_domain.Exploration.from_untitled_yaml', 'exp_domain.Exploration.from_untitled_yaml', (['"""eid"""', '"""Title"""', '"""Category"""', 'self.YAML_CONTENT_V1'], {}), "('eid', 'Title', 'Category', self.\n YAML_CONTENT_V1)\n", (55019, 55074), False, 'from core.domain import exp_domain\n'), ((55269, 55365), 'core.domain.exp_domain.Exploration.from_untitled_yaml', 'exp_domain.Exploration.from_untitled_yaml', (['"""eid"""', '"""Title"""', '"""Category"""', 'self.YAML_CONTENT_V2'], {}), "('eid', 'Title', 'Category', self.\n YAML_CONTENT_V2)\n", (55310, 55365), False, 'from core.domain import exp_domain\n'), ((55560, 55656), 'core.domain.exp_domain.Exploration.from_untitled_yaml', 'exp_domain.Exploration.from_untitled_yaml', (['"""eid"""', '"""Title"""', '"""Category"""', 'self.YAML_CONTENT_V3'], {}), "('eid', 'Title', 'Category', self.\n YAML_CONTENT_V3)\n", (55601, 55656), False, 'from core.domain import exp_domain\n'), ((55851, 55947), 'core.domain.exp_domain.Exploration.from_untitled_yaml', 'exp_domain.Exploration.from_untitled_yaml', (['"""eid"""', '"""Title"""', '"""Category"""', 'self.YAML_CONTENT_V4'], {}), "('eid', 'Title', 'Category', self.\n YAML_CONTENT_V4)\n", (55892, 55947), False, 'from core.domain import exp_domain\n'), ((56142, 56238), 'core.domain.exp_domain.Exploration.from_untitled_yaml', 'exp_domain.Exploration.from_untitled_yaml', (['"""eid"""', '"""Title"""', '"""Category"""', 'self.YAML_CONTENT_V5'], {}), "('eid', 'Title', 'Category', self.\n YAML_CONTENT_V5)\n", (56183, 56238), False, 'from core.domain import exp_domain\n'), ((56433, 56529), 'core.domain.exp_domain.Exploration.from_untitled_yaml', 'exp_domain.Exploration.from_untitled_yaml', (['"""eid"""', '"""Title"""', '"""Category"""', 'self.YAML_CONTENT_V6'], {}), "('eid', 'Title', 'Category', self.\n YAML_CONTENT_V6)\n", (56474, 56529), False, 'from core.domain import exp_domain\n'), ((56724, 56820), 'core.domain.exp_domain.Exploration.from_untitled_yaml', 'exp_domain.Exploration.from_untitled_yaml', (['"""eid"""', '"""Title"""', '"""Category"""', 'self.YAML_CONTENT_V7'], {}), "('eid', 'Title', 'Category', self.\n YAML_CONTENT_V7)\n", (56765, 56820), False, 'from core.domain import exp_domain\n'), ((57015, 57111), 'core.domain.exp_domain.Exploration.from_untitled_yaml', 'exp_domain.Exploration.from_untitled_yaml', (['"""eid"""', '"""Title"""', '"""Category"""', 'self.YAML_CONTENT_V8'], {}), "('eid', 'Title', 'Category', self.\n YAML_CONTENT_V8)\n", (57056, 57111), False, 'from core.domain import exp_domain\n'), ((57306, 57402), 'core.domain.exp_domain.Exploration.from_untitled_yaml', 'exp_domain.Exploration.from_untitled_yaml', (['"""eid"""', '"""Title"""', '"""Category"""', 'self.YAML_CONTENT_V9'], {}), "('eid', 'Title', 'Category', self.\n YAML_CONTENT_V9)\n", (57347, 57402), False, 'from core.domain import exp_domain\n'), ((57599, 57661), 'core.domain.exp_domain.Exploration.from_yaml', 'exp_domain.Exploration.from_yaml', (['"""eid"""', 'self.YAML_CONTENT_V10'], {}), "('eid', self.YAML_CONTENT_V10)\n", (57631, 57661), False, 'from core.domain import exp_domain\n'), ((57863, 57925), 'core.domain.exp_domain.Exploration.from_yaml', 'exp_domain.Exploration.from_yaml', (['"""eid"""', 'self.YAML_CONTENT_V11'], {}), "('eid', self.YAML_CONTENT_V11)\n", (57895, 57925), False, 'from core.domain import exp_domain\n'), ((58127, 58189), 'core.domain.exp_domain.Exploration.from_yaml', 'exp_domain.Exploration.from_yaml', (['"""eid"""', 'self.YAML_CONTENT_V12'], {}), "('eid', self.YAML_CONTENT_V12)\n", (58159, 58189), False, 'from core.domain import exp_domain\n'), ((58518, 58616), 'core.domain.exp_domain.Exploration.create_default_exploration', 'exp_domain.Exploration.create_default_exploration', (['"""eid"""'], {'title': 'exp_title', 'category': '"""Category"""'}), "('eid', title=exp_title,\n category='Category')\n", (58567, 58616), False, 'from core.domain import exp_domain\n'), ((60398, 60454), 'core.domain.exp_domain.Exploration.create_default_exploration', 'exp_domain.Exploration.create_default_exploration', (['"""eid"""'], {}), "('eid')\n", (60447, 60454), False, 'from core.domain import exp_domain\n'), ((61037, 61093), 'core.domain.exp_domain.Exploration.create_default_exploration', 'exp_domain.Exploration.create_default_exploration', (['"""eid"""'], {}), "('eid')\n", (61086, 61093), False, 'from core.domain import exp_domain\n'), ((64768, 64824), 'core.domain.exp_domain.Exploration.create_default_exploration', 'exp_domain.Exploration.create_default_exploration', (['"""eid"""'], {}), "('eid')\n", (64817, 64824), False, 'from core.domain import exp_domain\n'), ((67737, 67840), 'core.domain.exp_domain.SkinInstance', 'exp_domain.SkinInstance', (['"""conversation_v1"""', "self._SAMPLE_SKIN_INSTANCE_DICT['skin_customizations']"], {}), "('conversation_v1', self._SAMPLE_SKIN_INSTANCE_DICT[\n 'skin_customizations'])\n", (67760, 67840), False, 'from core.domain import exp_domain\n'), ((68161, 68214), 'core.domain.exp_domain.SkinInstance', 'exp_domain.SkinInstance', (['feconf.DEFAULT_SKIN_ID', 'None'], {}), '(feconf.DEFAULT_SKIN_ID, None)\n', (68184, 68214), False, 'from core.domain import exp_domain\n'), ((68485, 68559), 'core.domain.exp_domain.Exploration.from_yaml', 'exp_domain.Exploration.from_yaml', (['"""exp1"""', 'SAMPLE_YAML_CONTENT_WITH_GADGETS'], {}), "('exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)\n", (68517, 68559), False, 'from core.domain import exp_domain\n'), ((68824, 68880), 'core.domain.exp_domain.SkinInstance.from_dict', 'exp_domain.SkinInstance.from_dict', (['skin_instance_as_dict'], {}), '(skin_instance_as_dict)\n', (68857, 68880), False, 'from core.domain import exp_domain\n'), ((69346, 69420), 'core.domain.exp_domain.Exploration.from_yaml', 'exp_domain.Exploration.from_yaml', (['"""exp1"""', 'SAMPLE_YAML_CONTENT_WITH_GADGETS'], {}), "('exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)\n", (69378, 69420), False, 'from core.domain import exp_domain\n'), ((69673, 69747), 'core.domain.exp_domain.Exploration.from_yaml', 'exp_domain.Exploration.from_yaml', (['"""exp1"""', 'SAMPLE_YAML_CONTENT_WITH_GADGETS'], {}), "('exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)\n", (69705, 69747), False, 'from core.domain import exp_domain\n'), ((70281, 70355), 'core.domain.exp_domain.Exploration.from_yaml', 'exp_domain.Exploration.from_yaml', (['"""exp1"""', 'SAMPLE_YAML_CONTENT_WITH_GADGETS'], {}), "('exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)\n", (70313, 70355), False, 'from core.domain import exp_domain\n'), ((72642, 72716), 'core.domain.exp_domain.Exploration.from_yaml', 'exp_domain.Exploration.from_yaml', (['"""exp1"""', 'SAMPLE_YAML_CONTENT_WITH_GADGETS'], {}), "('exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)\n", (72674, 72716), False, 'from core.domain import exp_domain\n'), ((73293, 73349), 'core.domain.exp_domain.GadgetInstance.from_dict', 'exp_domain.GadgetInstance.from_dict', (['test_gadget_as_dict'], {}), '(test_gadget_as_dict)\n', (73328, 73349), False, 'from core.domain import exp_domain\n'), ((73750, 73824), 'core.domain.exp_domain.Exploration.from_yaml', 'exp_domain.Exploration.from_yaml', (['"""exp1"""', 'SAMPLE_YAML_CONTENT_WITH_GADGETS'], {}), "('exp1', SAMPLE_YAML_CONTENT_WITH_GADGETS)\n", (73782, 73824), False, 'from core.domain import exp_domain\n'), ((9185, 9507), 'core.domain.exp_domain.AnswerGroup.from_dict', 'exp_domain.AnswerGroup.from_dict', (["{'outcome': {'dest': exploration.init_state_name, 'feedback': ['Feedback'],\n 'param_changes': []}, 'rule_specs': [{'inputs': {'training_data': [\n 'Test']}, 'rule_type': 'FuzzyMatches'}, {'inputs': {'training_data': [\n 'Test']}, 'rule_type': 'FuzzyMatches'}], 'correct': False}"], {}), "({'outcome': {'dest': exploration.\n init_state_name, 'feedback': ['Feedback'], 'param_changes': []},\n 'rule_specs': [{'inputs': {'training_data': ['Test']}, 'rule_type':\n 'FuzzyMatches'}, {'inputs': {'training_data': ['Test']}, 'rule_type':\n 'FuzzyMatches'}], 'correct': False})\n", (9217, 9507), False, 'from core.domain import exp_domain\n'), ((10714, 10944), 'core.domain.exp_domain.AnswerGroup.from_dict', 'exp_domain.AnswerGroup.from_dict', (["{'outcome': {'dest': exploration.init_state_name, 'feedback': ['Feedback'],\n 'param_changes': []}, 'rule_specs': [{'inputs': {'x': 'Test'},\n 'rule_type': 'Contains'}], 'correct': False}"], {}), "({'outcome': {'dest': exploration.\n init_state_name, 'feedback': ['Feedback'], 'param_changes': []},\n 'rule_specs': [{'inputs': {'x': 'Test'}, 'rule_type': 'Contains'}],\n 'correct': False})\n", (10746, 10944), False, 'from core.domain import exp_domain\n'), ((16576, 16642), 'core.domain.exp_domain.State.create_default_state', 'exp_domain.State.create_default_state', (['exploration.init_state_name'], {}), '(exploration.init_state_name)\n', (16613, 16642), False, 'from core.domain import exp_domain\n'), ((17303, 17366), 'core.domain.param_domain.ParamSpec.from_dict', 'param_domain.ParamSpec.from_dict', (["{'obj_type': 'UnicodeString'}"], {}), "({'obj_type': 'UnicodeString'})\n", (17335, 17366), False, 'from core.domain import param_domain\n'), ((17577, 17640), 'core.domain.param_domain.ParamSpec.from_dict', 'param_domain.ParamSpec.from_dict', (["{'obj_type': 'UnicodeString'}"], {}), "({'obj_type': 'UnicodeString'})\n", (17609, 17640), False, 'from core.domain import param_domain\n'), ((23842, 23886), 'core.domain.exp_domain.State.create_default_state', 'exp_domain.State.create_default_state', (['"""DEF"""'], {}), "('DEF')\n", (23879, 23886), False, 'from core.domain import exp_domain\n'), ((24134, 24178), 'core.domain.exp_domain.State.create_default_state', 'exp_domain.State.create_default_state', (['"""GHI"""'], {}), "('GHI')\n", (24171, 24178), False, 'from core.domain import exp_domain\n'), ((33737, 33802), 'core.domain.exp_domain.Exploration.from_yaml', 'exp_domain.Exploration.from_yaml', (['"""exp3"""', '"""No_initial_state_name"""'], {}), "('exp3', 'No_initial_state_name')\n", (33769, 33802), False, 'from core.domain import exp_domain\n'), ((33859, 33946), 'core.domain.exp_domain.Exploration.from_yaml', 'exp_domain.Exploration.from_yaml', (['"""exp4"""', '"""Invalid\ninit_state_name:\nMore stuff"""'], {}), '(\'exp4\',\n """Invalid\ninit_state_name:\nMore stuff""")\n', (33891, 33946), False, 'from core.domain import exp_domain\n'), ((34014, 34084), 'core.domain.exp_domain.Exploration.from_yaml', 'exp_domain.Exploration.from_yaml', (['"""exp4"""', '"""State1:\n(\nInvalid yaml"""'], {}), '(\'exp4\', """State1:\n(\nInvalid yaml""")\n', (34046, 34084), False, 'from core.domain import exp_domain\n'), ((34234, 34304), 'core.domain.exp_domain.Exploration.from_yaml', 'exp_domain.Exploration.from_yaml', (['"""exp4"""', 'SAMPLE_UNTITLED_YAML_CONTENT'], {}), "('exp4', SAMPLE_UNTITLED_YAML_CONTENT)\n", (34266, 34304), False, 'from core.domain import exp_domain\n'), ((34442, 34537), 'core.domain.exp_domain.Exploration.from_untitled_yaml', 'exp_domain.Exploration.from_untitled_yaml', (['"""exp4"""', '"""Title"""', '"""Category"""', 'SAMPLE_YAML_CONTENT'], {}), "('exp4', 'Title', 'Category',\n SAMPLE_YAML_CONTENT)\n", (34483, 34537), False, 'from core.domain import exp_domain\n'), ((60041, 60099), 'core.domain.exp_domain.SkinInstance._get_default_skin_customizations', 'exp_domain.SkinInstance._get_default_skin_customizations', ([], {}), '()\n', (60097, 60099), False, 'from core.domain import exp_domain\n')] |
# -*- coding: utf-8 -*-
# @Time : 2020/2/15 16:10
# @Author : <NAME>
# @Email : <EMAIL>
# @File : utils.py
# @Software: PyCharm
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.core.framework import summary_pb2
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.average = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.average = self.sum / float(self.count)
def make_summary(name, val):
return summary_pb2.Summary(value=[summary_pb2.Summary.Value(tag=name, simple_value=val)])
def plot_accuracy(x,y,path):
"""
这是绘制精度的函数
:param x: x坐标数组
:param y: y坐标数组
:param path: 结果保存地址
:param mode: 模式,“train”代表训练损失,“val”为验证损失
"""
lengend_array = ["train_acc", "val_acc"]
train_accuracy,val_accuracy = y
plt.plot(x, train_accuracy, 'r-')
plt.plot(x, val_accuracy, 'b--')
plt.grid(True)
plt.xlim(0, x[-1]+2)
#plt.xticks(x)
plt.xlabel("epoch")
plt.ylabel("accuracy")
plt.legend(lengend_array,loc="best")
plt.savefig(path)
plt.close()
def plot_loss(x,y,path,mode="train"):
"""
这是绘制损失的函数
:param x: x坐标数组
:param y: y坐标数组
:param path: 结果保存地址
:param mode: 模式,“train”代表训练损失,“val”为验证损失
"""
if mode == "train":
lengend_array = ["train_loss","train_image_cls_loss","train_domain_cls_loss"]
else:
lengend_array = ["val_loss", "val_image_cls_loss", "val_domain_cls_loss"]
loss_results,image_cls_loss_results,domain_cls_loss_results = y
loss_results_min = np.max([np.min(loss_results) - 0.1,0])
image_cls_loss_results_min = np.max([np.min(image_cls_loss_results) - 0.1,0])
domain_cls_loss_results_min =np.max([np.min(domain_cls_loss_results) - 0.1,0])
y_min = np.min([loss_results_min,image_cls_loss_results_min,domain_cls_loss_results_min])
plt.plot(x, loss_results, 'r-')
plt.plot(x, image_cls_loss_results, 'b--')
plt.plot(x, domain_cls_loss_results, 'g-.')
plt.grid(True)
plt.xlabel("epoch")
plt.ylabel("loss")
plt.xlim(0,x[-1]+2)
plt.ylim(ymin=y_min)
#plt.xticks(x)
plt.legend(lengend_array,loc="best")
plt.savefig(path)
plt.close()
def learning_rate_schedule(process,init_learning_rate = 0.01,alpha = 10.0 , beta = 0.75):
"""
这个学习率的变换函数
:param process: 训练进程比率,值在0-1之间
:param init_learning_rate: 初始学习率,默认为0.01
:param alpha: 参数alpha,默认为10
:param beta: 参数beta,默认为0.75
"""
return init_learning_rate /(1.0 + alpha * process)**beta
def grl_lambda_schedule(process,gamma=10.0):
"""
这是GRL的参数lambda的变换函数
:param process: 训练进程比率,值在0-1之间
:param gamma: 参数gamma,默认为10
"""
return 2.0 / (1.0+np.exp(-gamma*process)) - 1.0 | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"tensorflow.core.framework.summary_pb2.Summary.Value",
"matplotlib.pyplot.close",
"numpy.exp",
"numpy.min",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.xlim",... | [((1003, 1036), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'train_accuracy', '"""r-"""'], {}), "(x, train_accuracy, 'r-')\n", (1011, 1036), True, 'import matplotlib.pyplot as plt\n'), ((1042, 1074), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'val_accuracy', '"""b--"""'], {}), "(x, val_accuracy, 'b--')\n", (1050, 1074), True, 'import matplotlib.pyplot as plt\n'), ((1080, 1094), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1088, 1094), True, 'import matplotlib.pyplot as plt\n'), ((1100, 1122), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(x[-1] + 2)'], {}), '(0, x[-1] + 2)\n', (1108, 1122), True, 'import matplotlib.pyplot as plt\n'), ((1146, 1165), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (1156, 1165), True, 'import matplotlib.pyplot as plt\n'), ((1171, 1193), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (1181, 1193), True, 'import matplotlib.pyplot as plt\n'), ((1199, 1236), 'matplotlib.pyplot.legend', 'plt.legend', (['lengend_array'], {'loc': '"""best"""'}), "(lengend_array, loc='best')\n", (1209, 1236), True, 'import matplotlib.pyplot as plt\n'), ((1241, 1258), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (1252, 1258), True, 'import matplotlib.pyplot as plt\n'), ((1264, 1275), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1273, 1275), True, 'import matplotlib.pyplot as plt\n'), ((1981, 2068), 'numpy.min', 'np.min', (['[loss_results_min, image_cls_loss_results_min, domain_cls_loss_results_min]'], {}), '([loss_results_min, image_cls_loss_results_min,\n domain_cls_loss_results_min])\n', (1987, 2068), True, 'import numpy as np\n'), ((2068, 2099), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'loss_results', '"""r-"""'], {}), "(x, loss_results, 'r-')\n", (2076, 2099), True, 'import matplotlib.pyplot as plt\n'), ((2105, 2147), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'image_cls_loss_results', '"""b--"""'], {}), "(x, image_cls_loss_results, 'b--')\n", (2113, 2147), True, 'import matplotlib.pyplot as plt\n'), ((2153, 2196), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'domain_cls_loss_results', '"""g-."""'], {}), "(x, domain_cls_loss_results, 'g-.')\n", (2161, 2196), True, 'import matplotlib.pyplot as plt\n'), ((2202, 2216), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2210, 2216), True, 'import matplotlib.pyplot as plt\n'), ((2222, 2241), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (2232, 2241), True, 'import matplotlib.pyplot as plt\n'), ((2247, 2265), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (2257, 2265), True, 'import matplotlib.pyplot as plt\n'), ((2271, 2293), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(x[-1] + 2)'], {}), '(0, x[-1] + 2)\n', (2279, 2293), True, 'import matplotlib.pyplot as plt\n'), ((2296, 2316), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {'ymin': 'y_min'}), '(ymin=y_min)\n', (2304, 2316), True, 'import matplotlib.pyplot as plt\n'), ((2342, 2379), 'matplotlib.pyplot.legend', 'plt.legend', (['lengend_array'], {'loc': '"""best"""'}), "(lengend_array, loc='best')\n", (2352, 2379), True, 'import matplotlib.pyplot as plt\n'), ((2384, 2401), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (2395, 2401), True, 'import matplotlib.pyplot as plt\n'), ((2407, 2418), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2416, 2418), True, 'import matplotlib.pyplot as plt\n'), ((681, 734), 'tensorflow.core.framework.summary_pb2.Summary.Value', 'summary_pb2.Summary.Value', ([], {'tag': 'name', 'simple_value': 'val'}), '(tag=name, simple_value=val)\n', (706, 734), False, 'from tensorflow.core.framework import summary_pb2\n'), ((1770, 1790), 'numpy.min', 'np.min', (['loss_results'], {}), '(loss_results)\n', (1776, 1790), True, 'import numpy as np\n'), ((1843, 1873), 'numpy.min', 'np.min', (['image_cls_loss_results'], {}), '(image_cls_loss_results)\n', (1849, 1873), True, 'import numpy as np\n'), ((1926, 1957), 'numpy.min', 'np.min', (['domain_cls_loss_results'], {}), '(domain_cls_loss_results)\n', (1932, 1957), True, 'import numpy as np\n'), ((2939, 2963), 'numpy.exp', 'np.exp', (['(-gamma * process)'], {}), '(-gamma * process)\n', (2945, 2963), True, 'import numpy as np\n')] |
import discord
import asyncio
import aiofiles
from discord.ext import commands
intents = discord.Intents.all()
client = commands.Bot(command_prefix=commands.when_mentioned_or('!'),intents=intents)
client.ticket_configs = {}
@client.command()
async def ping(ctx):
embed=discord.Embed(title="Bot Ping",description=f"My ping is {round(client.latency * 1000)}ms ",color=discord.Colour.gold())
await ctx.reply(embed=embed)
@client.event
async def on_ready():
print("Bot is online")
@client.event
async def on_raw_reaction_add(payload): #When a reaction is added
if payload.member.id != client.user.id and str(payload.emoji) == u"\U0001F3AB": #Checks if the reaction is not made by a bot an emoji is "🎫"
msg_id, channel_id, category_id = client.ticket_configs[payload.guild_id]
if payload.message_id == msg_id: #checks if the reaction message is equal to the message id in ticket_configs.txt
guild = client.get_guild(payload.guild_id)
for category in guild.categories:
if category.id == category_id:
break
channel = guild.get_channel(channel_id) #gets the channel id
ticket_channel = await category.create_text_channel(f"ticket-{payload.member.display_name}", topic=f"Ticket for {payload.member.display_name}.", permission_synced=True) #Creates a ticket as "ticket_channel"
f = open(f"tickets/{ticket_channel.id}.txt", "w") #Opens a folder called "tickets" and inside it creates a file with the channel id. Usefull for transcripts
f.close() #closes the file
await ticket_channel.set_permissions(payload.member, read_messages=True, send_messages=True) # Adds the member to the ticket
mention_member = f"{payload.member.mention}"
message = await channel.fetch_message(msg_id)
await message.remove_reaction(payload.emoji, payload.member) #Removes the reaction for the message where you react to make a ticket
creation_embed=discord.Embed(title="Ticket Created",description="Thank you for creating a ticket and make sure that the ticket follows our ticket guidelines and explain the ticket creation reason in detail so our staff can help you.",color=discord.Colour.blurple())
await ticket_channel.send(mention_member,embed=creation_embed) # Mentions the member and sends the embded to the channel where the ticket is created.
@client.command()
async def close(ctx):
channel = ctx.channel
if channel.name.startswith("ticket"): #checks if a channel name starts with "ticket"
await ctx.reply("Are you sure you want to close the ticket? Reply with ``confirm`` to close the ticket.") #Will ask the user to confirm to close the ticket
await client.wait_for("message",check=lambda m: m.channel == ctx.channel and m.author == ctx.author and m.content == "confirm",timeout=10) #Wait for a message with content "confirm" and makes sure that the command runner is the message sender and waits for reply for 10 seconds.
await channel.delete() #If the message is "confirm" it will delete the channel
closer = ctx.author.mention
transcript_chan = client.get_channel(803399751487717396) #channel to send the ticket transcript to.
await transcript_chan.send(closer,file=discord.File(f"tickets/{channel.id}.txt")) #Sends the file to the transcript channel and mentions the ticket closer there.
else:
return
@client.command()
@commands.has_permissions(administrator=True)
async def config(ctx, msg: discord.Message=None, category: discord.CategoryChannel=None): #Usage = !config "message_id category_id" to get the ids enable deveoper mode and right click the message that will be used to create tickets and the category id is the category where the tickets will be created.
if msg is None or category is None: #If a message id or category id is not provided.
error_embed=discord.Embed(title="Ticket Configuration Failed",description="Failed to configure. Either an argument is missing or an invalid argument was passed.",color=discord.Colour.red())
await ctx.channel.send(embed=error_embed)
return
client.ticket_configs[ctx.guild.id] = [msg.id, msg.channel.id, category.id] #Resets the configuration
async with aiofiles.open("ticket_configs.txt", mode="r") as file:
data = await file.readlines()
async with aiofiles.open("ticket_configs.txt", mode="w") as file:
await file.write(f"{ctx.guild.id} {msg.id} {msg.channel.id} {category.id}\n")
for line in data:
if int(line.split(" ")[0]) != ctx.guild.id:
await file.write(line)
await msg.add_reaction(u"\U0001F3AB") # Adds reaction to the message and when someone reacts to this emoji it will create a ticket.
await ctx.channel.send("Successfully configured the ticket system.") # If you get thsi it means that the ticket system has been configured successfully.
@client.event
async def on_message(message):
await client.process_commands(message)#processes the command
if message.channel.name.startswith("ticket"): #check if the channel name starts with "ticket"
f = open(f"tickets/{message.channel.id}.txt", "a") # Opens the channel id in the tickets folder
f.write(f"{message.author} : {message.content}\n") # Write the message author and the message he sent
f.close() #closesthe file
client.run("your_bot_token_here")
| [
"discord.ext.commands.has_permissions",
"discord.ext.commands.when_mentioned_or",
"aiofiles.open",
"discord.Colour.gold",
"discord.Intents.all",
"discord.Colour.red",
"discord.Colour.blurple",
"discord.File"
] | [((92, 113), 'discord.Intents.all', 'discord.Intents.all', ([], {}), '()\n', (111, 113), False, 'import discord\n'), ((3497, 3541), 'discord.ext.commands.has_permissions', 'commands.has_permissions', ([], {'administrator': '(True)'}), '(administrator=True)\n', (3521, 3541), False, 'from discord.ext import commands\n'), ((151, 182), 'discord.ext.commands.when_mentioned_or', 'commands.when_mentioned_or', (['"""!"""'], {}), "('!')\n", (177, 182), False, 'from discord.ext import commands\n'), ((4321, 4366), 'aiofiles.open', 'aiofiles.open', (['"""ticket_configs.txt"""'], {'mode': '"""r"""'}), "('ticket_configs.txt', mode='r')\n", (4334, 4366), False, 'import aiofiles\n'), ((4430, 4475), 'aiofiles.open', 'aiofiles.open', (['"""ticket_configs.txt"""'], {'mode': '"""w"""'}), "('ticket_configs.txt', mode='w')\n", (4443, 4475), False, 'import aiofiles\n'), ((374, 395), 'discord.Colour.gold', 'discord.Colour.gold', ([], {}), '()\n', (393, 395), False, 'import discord\n'), ((4111, 4131), 'discord.Colour.red', 'discord.Colour.red', ([], {}), '()\n', (4129, 4131), False, 'import discord\n'), ((2253, 2277), 'discord.Colour.blurple', 'discord.Colour.blurple', ([], {}), '()\n', (2275, 2277), False, 'import discord\n'), ((3329, 3370), 'discord.File', 'discord.File', (['f"""tickets/{channel.id}.txt"""'], {}), "(f'tickets/{channel.id}.txt')\n", (3341, 3370), False, 'import discord\n')] |
"""
Calm Runbook Sample for set variable task
"""
from calm.dsl.runbooks import read_local_file
from calm.dsl.runbooks import runbook, runbook_json
from calm.dsl.runbooks import RunbookTask as Task
from calm.dsl.runbooks import CalmEndpoint as Endpoint, basic_cred
CRED_USERNAME = read_local_file(".tests/runbook_tests/username")
CRED_PASSWORD = read_local_file(".tests/runbook_tests/password")
VM_IP = read_local_file(".tests/runbook_tests/vm_ip")
Cred = basic_cred(CRED_USERNAME, CRED_PASSWORD, name="endpoint_cred")
endpoint = Endpoint.Linux.ip([VM_IP], cred=Cred)
@runbook
def DslSetVariableTask(endpoints=[endpoint], default=False):
"Runbook example with Set Variable Tasks"
Task.SetVariable.escript(script="print 'var1=test'", variables=["var1"])
Task.SetVariable.ssh(
filename="scripts/sample_script.sh", variables=["var2"], target=endpoints[0]
)
Task.Exec.escript(script="print '@@{var1}@@ @@{var2}@@'")
def main():
print(runbook_json(DslSetVariableTask))
if __name__ == "__main__":
main()
| [
"calm.dsl.runbooks.basic_cred",
"calm.dsl.runbooks.RunbookTask.SetVariable.ssh",
"calm.dsl.runbooks.CalmEndpoint.Linux.ip",
"calm.dsl.runbooks.runbook_json",
"calm.dsl.runbooks.RunbookTask.Exec.escript",
"calm.dsl.runbooks.RunbookTask.SetVariable.escript",
"calm.dsl.runbooks.read_local_file"
] | [((283, 331), 'calm.dsl.runbooks.read_local_file', 'read_local_file', (['""".tests/runbook_tests/username"""'], {}), "('.tests/runbook_tests/username')\n", (298, 331), False, 'from calm.dsl.runbooks import read_local_file\n'), ((348, 396), 'calm.dsl.runbooks.read_local_file', 'read_local_file', (['""".tests/runbook_tests/password"""'], {}), "('.tests/runbook_tests/password')\n", (363, 396), False, 'from calm.dsl.runbooks import read_local_file\n'), ((405, 450), 'calm.dsl.runbooks.read_local_file', 'read_local_file', (['""".tests/runbook_tests/vm_ip"""'], {}), "('.tests/runbook_tests/vm_ip')\n", (420, 450), False, 'from calm.dsl.runbooks import read_local_file\n'), ((459, 521), 'calm.dsl.runbooks.basic_cred', 'basic_cred', (['CRED_USERNAME', 'CRED_PASSWORD'], {'name': '"""endpoint_cred"""'}), "(CRED_USERNAME, CRED_PASSWORD, name='endpoint_cred')\n", (469, 521), False, 'from calm.dsl.runbooks import CalmEndpoint as Endpoint, basic_cred\n'), ((533, 570), 'calm.dsl.runbooks.CalmEndpoint.Linux.ip', 'Endpoint.Linux.ip', (['[VM_IP]'], {'cred': 'Cred'}), '([VM_IP], cred=Cred)\n', (550, 570), True, 'from calm.dsl.runbooks import CalmEndpoint as Endpoint, basic_cred\n'), ((694, 766), 'calm.dsl.runbooks.RunbookTask.SetVariable.escript', 'Task.SetVariable.escript', ([], {'script': '"""print \'var1=test\'"""', 'variables': "['var1']"}), '(script="print \'var1=test\'", variables=[\'var1\'])\n', (718, 766), True, 'from calm.dsl.runbooks import RunbookTask as Task\n'), ((771, 874), 'calm.dsl.runbooks.RunbookTask.SetVariable.ssh', 'Task.SetVariable.ssh', ([], {'filename': '"""scripts/sample_script.sh"""', 'variables': "['var2']", 'target': 'endpoints[0]'}), "(filename='scripts/sample_script.sh', variables=['var2'\n ], target=endpoints[0])\n", (791, 874), True, 'from calm.dsl.runbooks import RunbookTask as Task\n'), ((888, 945), 'calm.dsl.runbooks.RunbookTask.Exec.escript', 'Task.Exec.escript', ([], {'script': '"""print \'@@{var1}@@ @@{var2}@@\'"""'}), '(script="print \'@@{var1}@@ @@{var2}@@\'")\n', (905, 945), True, 'from calm.dsl.runbooks import RunbookTask as Task\n'), ((970, 1002), 'calm.dsl.runbooks.runbook_json', 'runbook_json', (['DslSetVariableTask'], {}), '(DslSetVariableTask)\n', (982, 1002), False, 'from calm.dsl.runbooks import runbook, runbook_json\n')] |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2018 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import collections
import math
import numpy as np
def count_words(filename):
counter = collections.Counter()
with open(filename, "r") as fd:
for line in fd:
words = line.strip().split()
counter.update(words)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, counts = list(zip(*count_pairs))
return words, counts
def control_symbols(string):
if not string:
return []
else:
return string.strip().split(",")
def save_vocab(name, vocab):
if name.split(".")[-1] != "txt":
name = name + ".txt"
# pairs = sorted(vocab.items(), key=lambda x: (x[1], x[0]))
pairs = sorted(vocab.items(), key=lambda x: x[1], reverse=True)
words, ids = list(zip(*pairs))
# total freq
T_freq = sum(ids)
with open(name, "w") as f:
for i, word in enumerate(words):
# f.write(word + " " + str(ids[i]) + "\n")
f.write(word + " " + "%.16f" % (ids[i] / T_freq) + "\n")
# write total freq
def cal_cdf_model(corpus, vocab):
pairs = sorted(vocab.items(), key=lambda x: x[1], reverse=True)
words, ids = list(zip(*pairs))
freq_dict = {}
for word, id in zip(words, ids):
freq_dict[word] = id
T_freq = sum(ids)
data = []
debug = 0
with open(corpus, "r") as f:
for line in f.readlines():
line = line.split()
SUM = 0
for w in line:
p = freq_dict[w] / T_freq
if p != 0:
SUM += math.log(p)
SUM = -SUM
data.append(SUM)
# if SUM < 5.718:
# debug += 1
# print (SUM)
# data contains all sum log
# bins='auto'
v, base = np.histogram(data, bins=np.arange(1000))
print ("data:", data[:50])
print ("value", v[:50])
base = base.astype(np.float32)
print ("base:", base[:50])
print ("highest value:", base[-1])
print ("len of base:", len(base))
# print ("debug:", debug)
cdf = np.cumsum(v)
cdf = cdf / len(data)
cdf = cdf.astype(np.float32)
print ("cdf:", cdf, cdf.dtype)
print ("outputing cdf and bases.")
# res = {"cdf": cdf, "base": base}
np.savez(args.output + "-cdf_base.npz", cdf=cdf, base=base)
def parse_args():
parser = argparse.ArgumentParser(description="Create vocabulary")
parser.add_argument("corpus", help="input corpus")
parser.add_argument("output", default="vocab.txt",
help="Output vocabulary name")
parser.add_argument("--limit", default=0, type=int, help="Vocabulary size")
parser.add_argument("--control", type=str, default="",
help="Add control symbols to vocabulary. "
"Control symbols are separated by comma.")
return parser.parse_args()
args=parse_args()
def main():
vocab = {}
limit = args.limit
count = 0
words, counts = count_words(args.corpus)
ctrl_symbols = control_symbols(args.control)
for sym in ctrl_symbols:
vocab[sym] = len(vocab)
for word, freq in zip(words, counts):
if limit and len(vocab) >= limit:
break
if word in vocab:
print("Warning: found duplicate token %s, ignored" % word)
continue
# vocab[word] = len(vocab)
# print(word, freq)
vocab[word] = freq
count += freq
save_vocab(args.output, vocab)
cal_cdf_model(args.corpus, vocab)
print("Total words: %d" % sum(counts))
print("Unique words: %d" % len(words))
print("Vocabulary coverage: %4.2f%%" % (100.0 * count / sum(counts)))
if __name__ == "__main__":
main()
| [
"numpy.savez",
"argparse.ArgumentParser",
"math.log",
"collections.Counter",
"numpy.cumsum",
"numpy.arange"
] | [((291, 312), 'collections.Counter', 'collections.Counter', ([], {}), '()\n', (310, 312), False, 'import collections\n'), ((2254, 2266), 'numpy.cumsum', 'np.cumsum', (['v'], {}), '(v)\n', (2263, 2266), True, 'import numpy as np\n'), ((2443, 2502), 'numpy.savez', 'np.savez', (["(args.output + '-cdf_base.npz')"], {'cdf': 'cdf', 'base': 'base'}), "(args.output + '-cdf_base.npz', cdf=cdf, base=base)\n", (2451, 2502), True, 'import numpy as np\n'), ((2537, 2593), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create vocabulary"""'}), "(description='Create vocabulary')\n", (2560, 2593), False, 'import argparse\n'), ((1995, 2010), 'numpy.arange', 'np.arange', (['(1000)'], {}), '(1000)\n', (2004, 2010), True, 'import numpy as np\n'), ((1757, 1768), 'math.log', 'math.log', (['p'], {}), '(p)\n', (1765, 1768), False, 'import math\n')] |
# ---
# jupyter:
# jupytext:
# formats: jupyter_scripts//ipynb,scripts//py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 1.0.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # series_tools:
#
# set of tools that work with streamflow records.
# - Identify events.
# - Identidy baseflow and runoff.
#
import pandas as pd
import numpy as np
# ## Digital filters
#
# Collection of functions to separate runoff from baseflow.
# +
def DigitalFilters(Q,tipo = 'Eckhart', a = 0.98, BFI = 0.8):
'''Digital filters to separate baseflow from runoff in a continuos time series.
Parameters:
- tipo: type of filter to be used.
- Eckhart o 1.
- Nathan o 2.
- Chapman o 3.
- Q: pandas series with the streamflow records.
- a: paramter for the filter.
- Eckhart: 0.98.
- Nathan: 0.8.
- Chapman: 0.8.
- BFI: 0.8 only applies for Eckhart filter.
Returns:
- Pandas DataFrame with the Runoff, Baseflow.'''
#Functions definitions.
def Nathan1990(Q, a = 0.8):
'''One parameter digital filter of Nathan and McMahon (1990)'''
R = np.zeros(Q.size)
c = 1
for q1,q2 in zip(Q[:-1], Q[1:]):
R[c] = a*R[c-1] + ((1+a)/2.)*(q2-q1)
if R[c]<0:
R[c] = 0
elif R[c]>q2:
R[c] = q2
c += 1
B = Q - R
return R, B
def Eckhart2005(Q, BFI=0.8, a = 0.98):
'''Two parameter Eckhart digital filter
Parameters:
- Q: np.ndarray with the streamflow records.
- BFI: The maximum amount of baseflow (%).
- a: parameter alpha (0.98)
Output:
- R: total runoff.
- B: total baseflow.'''
#SEparation
B = np.zeros(Q.size)
B[0] = Q[0]
c = 1
for q in Q[1:]:
#SEparation equation
B[c] = ((1.0-BFI)*a*B[c-1]+(1.0-a)*BFI*q)/(1.0-a*BFI)
#Constrains
if B[c] > q:
B[c] = q
c+=1
R = Q - B
return R, B
def ChapmanMaxwell1996(Q, a = 0.98):
'''Digital filter proposed by chapman and maxwell (1996)'''
B = np.zeros(Q.size)
c = 1
for q in Q[1:]:
B[c] = (a / (2.-a))*B[c-1] + ((1.-a)/(2.-a))*q
c+=1
R = Q-B
return R,B
#Cal the filter
if tipo == 'Eckhart' or tipo == 1:
R,B = Eckhart2005(Q.values, a, BFI)
elif tipo =='Nathan' or tipo == 2:
R,B = Nathan1990(Q.values, a,)
elif tipo == 'Chapman' or tipo ==3:
R,B = ChapmanMaxwell1996(Q.values, a)
#Returns the serie
return pd.DataFrame(np.vstack([R,B]).T, index = Q.index, columns = ['Runoff','Baseflow'])
# -
# ## Events selection functions
#
# Collection of functions to identify peaks in a series and the end of each peak recession.
# +
def Events_Get_Peaks(Q, Qmin = None, tw = pd.Timedelta('12h')):
'''Find the peack values of the hydrographs of a serie
Params:
- Q: Pandas serie with the records.
- Qmin: The minimum value of Q to be considered a peak.
if None takes the 99th percentile of the series as the min
- tw: size of the ime window used to eliminate surrounding maximum values'''
if Qmin is None:
Qmin = np.percentile(Q.values[np.isfinite(Q.values)], 99)
#Find the maximum
Qmax = Q[Q>Qmin]
QmaxCopy = Qmax.copy()
#Search the maxium maximorums
Flag = True
PosMax = []
while Flag:
MaxIdx = Qmax.idxmax()
PosMax.append(MaxIdx)
Qmax[MaxIdx-tw:MaxIdx+tw] = -9
if Qmax.max() < Qmin: Flag = False
#Return the result
return QmaxCopy[PosMax].sort_index()
def Events_Get_End(Q, Qmax, minDif = 0.04, minDistance = None,maxSearch = 10, Window = '1h'):
'''Find the end of each selected event in order to know the
longitude of each recession event.
Parameters:
- Q: Pandas series with the records.
- Qmax: Pandas series with the peak streamflows.
- minDif: The minimum difference to consider that a recession is over.
Optional:
- minDistance: minimum temporal distance between the peak and the end.
- maxSearch: maximum number of iterations to search for the end.
- Widow: Size of the temporal window used to smooth the streamflow
records before the difference estimation (pandas format).
Returns:
- Qend: The point indicating the en of the recession.'''
#Obtains the difference
X = Q.resample('1h').mean()
dX = X.values[1:] - X.values[:-1]
dX = pd.Series(dX, index=X.index[:-1])
#Obtains the points.
DatesEnds = []
Correct = []
for peakIndex in Qmax.index:
try:
a = dX[dX.index > peakIndex]
if minDistance is None:
DatesEnds.append(a[a>minDif].index[0])
else:
Dates = a[a>minDif].index
flag = True
c = 0
while flag:
distancia = Dates[c] - peakIndex
if distancia > minDistance:
DatesEnds.append(Dates[c])
flag= False
c += 1
if c>maxSearch: flag = False
Correct.append(0)
except:
DatesEnds.append(peakIndex)
Correct.append(1)
#Returns the pandas series with the values and end dates
Correct = np.array(Correct)
return pd.Series(Q[DatesEnds], index=DatesEnds), Qmax[Correct == 0]
# -
# ## Runoff analysis
# +
def Runoff_SeparateBaseflow(Qobs, Qsim):
'''From observed records obtain the baseflow and runoff streamflow records.
Parameters:
- Qobs: Observed record dt < 1h.
- Qsim: Simulated records dt < 1h.
Returns:
- Qh: Observed records at hourly scale.
- Qsh: Simulated records at a hourly scale.
- Qsep: Observed separated records at hourly scale'''
#Observed series to hourly scale.
Qh = Qobs.resample('1h').mean()
Qh[np.isnan(Qh)] = Qh.mean()
Qh[Qh<0] = Qh.mean()
Qsep = DigitalFilters(Qh, tipo = 'Nathan', a = 0.998)
#Pre-process of simulated series to hourly scale.
Qsh = Qsim.resample('1h').mean()
Qsh[np.isnan(Qsh)] = 0.0
#Return results
return Qh, Qsh, Qsep
def Runoff_FindEvents(Qobs, Qsim, minTime = 1, minConcav = None, minPeak = None):
'''Separates runoff from baseflow and finds the events.
Parameters:
- Qobs: Hourly obseved streamflow.
- Qsim: Hourly simulated streamflow.
- minTime: minimum duration of the event.
- minConcav: minimum concavity of the event.
- minPeak: minimum value of the peakflows.
Returns:
- pos1: pandas index lists with the initial positions.
- pos2: pandas index lists with the end positions.'''
#Obtain the positions of the start and
pos1, pos2 = __Runoff_Get_Events__(Qsim, np.percentile(Qobs, 20))
pos1, pos2 = __Runoff_Del_Events__(Qobs, pos1, pos2, minTime=1, minConcav=minConcav, minPeak = minPeak)
#Returns results
return pos1, pos2
def Runoff_CompleteAnalysis(Area, Qobs, Rain, Qsep, pos1, pos2, N=None, Nant = None):
'''Obtains the DataFrame with the resume of the RC analysis.
Parameters:
- Area: the area of the basin in km2.
- Qobs: Hourly observed streamflow.
- Rain: Hourly rainfall.
- Qsep: Hourly dataFrame with the separated flows.
- pos1: pandas index lists with the initial positions.
- pos2: pandas index lists with the end positions.
- N: Number of days to eval the rainfall between p1-N: p2.
- Nant: Number of antecedent days to eval the rainfall between p1-Nant : p1-N.
Results:
- DataFrame with the columns: RC, RainEvent, RainBefore, RainInt, Qmax'''
#Search for N
if N is None:
#Time window based on the basin area.
N = Area**0.2
N = np.floor(N) // 2 * 2 + 1
if N<3: N = 3
if N>11: N = 11
Ndays = pd.Timedelta(str(N)+'d')
if Nant is None:
Nant = pd.Timedelta(str(N+3)+'d')
else:
Ndays = N
if Nant is None:
Nant = N + pd.Timedelta('3d')
#Lists of data
RC = []
RainTot = []
Date = []
Qmax = []
RainInt = []
RainAnt = []
#Get Values for events
for pi,pf in zip(pos1, pos2):
#General variables obtention
Runoff = Qsep['Runoff'][pi:pf+Ndays].sum()*3600.
Rainfall = (Rain[pi-Ndays:pf].sum()/1000.)*(Area*1e6)
#Runoff and streamflow List updates
Qmax.append(Qobs[pi:pf].max())
RC.append(Runoff / Rainfall)
#Rainfall list updates
RainTot.append(Rain[pi-Ndays:pf].sum())
RainInt.append(Rain[pi-Ndays:pf].max())
RainAnt.append(Rain[pi-Ndays-Nant:pi-Ndays].sum())
#Dates.
Date.append(pi)
#Converts to arrays
RC = np.array(RC)
RainTot = np.array(RainTot)
RainInt = np.array(RainInt)
RainAnt = np.array(RainAnt)
Date = np.array(Date)
Qmax = np.array(Qmax)
#Select the correct values
p1 = np.where(np.isfinite(RC))[0]
p2 = np.where((RC[p1]<=1.0) & (RC[p1]>0.0))[0]
#Lo que es
RC = RC[p1[p2]]
RainTot = RainTot[p1[p2]]
RainInt = RainInt[p1[p2]]
RainAnt = RainAnt[p1[p2]]
Date = Date[p1[p2]]
Qmax = Qmax[p1[p2]]
#Los malos
pos = np.where((RC>0.04) & (RainTot<10))[0]
#Depura de nuevo
RC = np.delete(RC, pos)
RainTot = np.delete(RainTot, pos)
RainInt = np.delete(RainInt, pos)
RainAnt = np.delete(RainAnt, pos)
Date = np.delete(Date, pos)
Qmax = np.delete(Qmax, pos)
#Turns things into a DataFrame
Data = pd.DataFrame(
np.vstack([RC, RainTot, RainAnt, RainInt, Qmax]).T,
index= Date,
columns=['RC', 'RainEvent', 'RainBefore','RainInt','Qmax'])
return Data
def Runoff_groupByRain(D, groupby = 'RainEvent' , bins = None,
Vmin=None, Vmax=None, Nb = 10, logx = True):
'''Group the values of RC in function of a variable.
Parameters:
- D: pandas Dataframe with the results from the RC analysis.
- groupby: name of the column to use for the groups.
- Vmin: minimum value to set the groups.
- Vmax: max value to set the groups.
- b: number of bins.
- logx: use or not logaritmic X axis.
Results:
- Dictionary with the RC by groups, P25, P50, P90, mean value of the variable
for grouping, Variable for groups.'''
#Change if the axis X is logarithm or not
if logx:
x = np.log(D[groupby])
else:
x = D[groupby]
#SEt max y min
if Vmin is None: Vmin = x.min()
if Vmax is None: Vmax = x.max()
#SEt the intervals
if bins is None:
b = np.linspace(Vmin, Vmax, Nb)
else:
b = bins
#Make the groups
DicStats = {'RC':[],'P25':[],'P75':[],'P50':[], 'X': [], groupby: []}
for i,j in zip(b[:-1], b[1:]):
p = np.where((x>=i) & (x<=j))[0]
if p.size > 0:
DicStats['RC'].append(D['RC'][p])
DicStats['P25'].append(np.percentile(D['RC'][p], 25))
DicStats['P50'].append(np.percentile(D['RC'][p], 50))
DicStats['P75'].append(np.percentile(D['RC'][p], 75))
DicStats['X'].append((i+j)/2.)
DicStats[groupby].append(x[p])
return DicStats
#-------------------------------------------------------------------------------------------
## Backgroudn functions.
def __Runoff_Get_Events__(Q, Umbral):
'''Obtais the initia and end dates of the events related to
a time series based on the results from the Asynch 190.
Parameters:
- Q: pandas series with the streamflow (simulated from asynch 190 no infiltration).
- perc: percentile used to stablish runoff occurrence.
Returns:
- pos1: initial date of each event.
- pos2: end date of each event'''
#Treshold and positions with values over it
pos = np.where(Q.values > Umbral)[0]
#Positions start and end.
Dpos = pos[1:] - pos[:-1]
Dpos1 = pd.Series(Dpos, Q.index[pos[1:]])
pos1 = Dpos1[Dpos1>1].index
pos1 = pos1.insert(0, Q.index[pos][0])
pos1 = pos1[:-1]
Dpos2 = pd.Series(Dpos, Q.index[pos[:-1]])
pos2 = Dpos2[Dpos2>1].index
#returns results
return pos1, pos2
def __Runoff_Get_eventsPeaks__(Q, pos1, pos2):
'''Obtains the peaks of the observed events selected by the
criteria of the asynch 190 model
PArameters:
- Q: Pandas series qwith the observed data.
- pos1: list with the start of the peaks.
- pos2: list with the end of the peaks.
Returns:
- List with the peaks corresponding to the events.'''
#Peak at each event
Peaks = []
for p1, p2 in zip(pos1, pos2):
Peaks.append(np.nanmax(Q[p1:p2].values))
return Peaks
def __Runoff_Del_Events__(Q, pos1, pos2, minTime = 2.5, minPeak = None, minConcav = None):
'''Eliminates events from the selected initial peaks based on different
aspects such as min time of the event, min peak and the concativity.
Parameters:
- Q: pandas series with the observed streamflow.
- pos1: Pandas indexes with the start of the events.
- pos2: Pandas indexes with the end of the events.
- minTime: minimum time (days) of the duration of the hydrographs.
- minPeak: minim value of the peak at the hydrographs.
- minConcat: minimum concativity for the hydrograph (suggested: 10).
Returns:
- starts: pandas index with the corrected starts.
- ends: pandas indexes with the corrected ends.'''
#Eliminates events based on their duration
if minTime is not None:
#Obtains the duration
Td = pos2 - pos1
Td = Td.total_seconds()/(3600*24)
Td = Td.values
#Eliminates
p = np.where(Td<minTime)[0]
pos1 = pos1.delete(p)
pos2 = pos2.delete(p)
#Eliminates events based on the peak flow
if minPeak is not None:
#Obtains peaks
Peaks = Series_Get_eventsPeaks(Q, pos1, pos2)
Peaks = np.array(Peaks)
#Eliminates
p = np.where(Peaks<minPeak)[0]
pos1 = pos1.delete(p)
pos2 = pos2.delete(p)
#Eliminates events based on the concavity criterion
if minConcav is not None:
#Obtains the concativity series
Concav = Q.resample('5h').mean().diff(2)
Concav = Series_Get_eventsPeaks(Concav, pos1, pos2)
#Eliminates
p = np.where(np.array(Concav)<minConcav)[0]
pos1 = pos1.delete(p)
pos2 = pos2.delete(p)
#Returns the result
return pos1, pos2
# -
# ## Recession analysis
# +
#Function to obtain a
def Recession_NDF_method(l):
'''l[0]: np.ndarray of the streamflow data.
l[1]: parameter B between 0 and 5'''
# Function to obtains A for a given B (l[1])
def Estimate_A(Q,B,dt):
e1 = np.nansum((Q.values[:-1] - Q.values[1:]))
e2 = dt * np.nansum(((Q.values[:-1] - Q.values[1:])/2.)**B)
return e1/e2
# Estimates Q for the pair B and A
def Estimate_Q(Q, B, A):
'''Obtaines the estimated Q for a given A and B
Parameters:
- Qo: the initial value of the analyzed peak.
- t: Vector with the elapsed time.'''
#Convert time vector to elapsed time in seconds.
t = Q.index.astype('int64') / 1e9
t = (t.values - t.values[0])/3600.
Qo = Q.values[0]
# Obtains the estimted Qs
return Qo * (1 - ( (1.-B)*A*t / Qo**(1.-B) )) ** (1./(1.-B))
def Estimate_error(Qobs, Qsim):
'''Estimates the total percentage error obtained with the pair
A and B'''
Vsim = Qsim.sum()
Vobs = Qobs.sum()
return (Vsim - Vobs) / Vsim
#Obtains the time delta
dt = l[0].index[1] - l[0].index[0]
dt = dt.value / 1e9
#Estimates A
A = Estimate_A(l[0],l[1],dt)
#Estimaest Q
Qsim = Estimate_Q(l[0],l[1], A)
CountNaN = Qsim[np.isnan(Qsim)].size
#Estimate error
if CountNaN == 0:
E = Estimate_error(l[0],Qsim)
else:
E = 1000
return A, E, Qsim
# search B for recession
def Recession_Search_NDF(Q,Initial = 0, Long=1 ,process = 8, Window = 1, step = 0.01):
'''Search for the optimum value of B and A for a hydrograph
Parameters:
- Initial: Initial point oscillates between 0 and 168h.
- Long: recession longitude oscillates between 4 and 12 days.
- process: total number of processors to do the analysis.'''
#Movement of the initial and finish time
dis_i = pd.Timedelta(hours = Initial)
dis_f = pd.Timedelta(hours = 24*Long)
#Take a portion of the recession curve
X = Q[Q.idxmax()+dis_i:Q.idxmax()+dis_f+dis_i]
# Excercise to obtain A and B for a streamflow record.
L = []
B = np.arange(0, 5., step)
for b in B:
L.append([X, b])
p = Pool(processes=process)
Res = p.map(NDF_method, L)
p.close()
p.join()
#Error selection
Error = np.abs([i[1] for i in Res])
PosEr = np.argmin(Error)
#Return: B, A, E and Qsim
return B[PosEr], Res[PosEr][0], Error[PosEr], pd.Series(Res[PosEr][2], X.index)
# -
| [
"pandas.Series",
"numpy.abs",
"numpy.where",
"numpy.delete",
"pandas.Timedelta",
"numpy.log",
"numpy.floor",
"numpy.array",
"numpy.zeros",
"numpy.linspace",
"numpy.isnan",
"numpy.vstack",
"numpy.isfinite",
"numpy.nanmax",
"numpy.argmin",
"numpy.percentile",
"numpy.nansum",
"numpy.a... | [((3125, 3144), 'pandas.Timedelta', 'pd.Timedelta', (['"""12h"""'], {}), "('12h')\n", (3137, 3144), True, 'import pandas as pd\n'), ((4823, 4856), 'pandas.Series', 'pd.Series', (['dX'], {'index': 'X.index[:-1]'}), '(dX, index=X.index[:-1])\n', (4832, 4856), True, 'import pandas as pd\n'), ((5690, 5707), 'numpy.array', 'np.array', (['Correct'], {}), '(Correct)\n', (5698, 5707), True, 'import numpy as np\n'), ((9215, 9227), 'numpy.array', 'np.array', (['RC'], {}), '(RC)\n', (9223, 9227), True, 'import numpy as np\n'), ((9242, 9259), 'numpy.array', 'np.array', (['RainTot'], {}), '(RainTot)\n', (9250, 9259), True, 'import numpy as np\n'), ((9274, 9291), 'numpy.array', 'np.array', (['RainInt'], {}), '(RainInt)\n', (9282, 9291), True, 'import numpy as np\n'), ((9306, 9323), 'numpy.array', 'np.array', (['RainAnt'], {}), '(RainAnt)\n', (9314, 9323), True, 'import numpy as np\n'), ((9335, 9349), 'numpy.array', 'np.array', (['Date'], {}), '(Date)\n', (9343, 9349), True, 'import numpy as np\n'), ((9361, 9375), 'numpy.array', 'np.array', (['Qmax'], {}), '(Qmax)\n', (9369, 9375), True, 'import numpy as np\n'), ((9765, 9783), 'numpy.delete', 'np.delete', (['RC', 'pos'], {}), '(RC, pos)\n', (9774, 9783), True, 'import numpy as np\n'), ((9798, 9821), 'numpy.delete', 'np.delete', (['RainTot', 'pos'], {}), '(RainTot, pos)\n', (9807, 9821), True, 'import numpy as np\n'), ((9836, 9859), 'numpy.delete', 'np.delete', (['RainInt', 'pos'], {}), '(RainInt, pos)\n', (9845, 9859), True, 'import numpy as np\n'), ((9874, 9897), 'numpy.delete', 'np.delete', (['RainAnt', 'pos'], {}), '(RainAnt, pos)\n', (9883, 9897), True, 'import numpy as np\n'), ((9909, 9929), 'numpy.delete', 'np.delete', (['Date', 'pos'], {}), '(Date, pos)\n', (9918, 9929), True, 'import numpy as np\n'), ((9941, 9961), 'numpy.delete', 'np.delete', (['Qmax', 'pos'], {}), '(Qmax, pos)\n', (9950, 9961), True, 'import numpy as np\n'), ((12409, 12442), 'pandas.Series', 'pd.Series', (['Dpos', 'Q.index[pos[1:]]'], {}), '(Dpos, Q.index[pos[1:]])\n', (12418, 12442), True, 'import pandas as pd\n'), ((12551, 12585), 'pandas.Series', 'pd.Series', (['Dpos', 'Q.index[pos[:-1]]'], {}), '(Dpos, Q.index[pos[:-1]])\n', (12560, 12585), True, 'import pandas as pd\n'), ((16982, 17009), 'pandas.Timedelta', 'pd.Timedelta', ([], {'hours': 'Initial'}), '(hours=Initial)\n', (16994, 17009), True, 'import pandas as pd\n'), ((17024, 17053), 'pandas.Timedelta', 'pd.Timedelta', ([], {'hours': '(24 * Long)'}), '(hours=24 * Long)\n', (17036, 17053), True, 'import pandas as pd\n'), ((17226, 17249), 'numpy.arange', 'np.arange', (['(0)', '(5.0)', 'step'], {}), '(0, 5.0, step)\n', (17235, 17249), True, 'import numpy as np\n'), ((17413, 17440), 'numpy.abs', 'np.abs', (['[i[1] for i in Res]'], {}), '([i[1] for i in Res])\n', (17419, 17440), True, 'import numpy as np\n'), ((17453, 17469), 'numpy.argmin', 'np.argmin', (['Error'], {}), '(Error)\n', (17462, 17469), True, 'import numpy as np\n'), ((1303, 1319), 'numpy.zeros', 'np.zeros', (['Q.size'], {}), '(Q.size)\n', (1311, 1319), True, 'import numpy as np\n'), ((1963, 1979), 'numpy.zeros', 'np.zeros', (['Q.size'], {}), '(Q.size)\n', (1971, 1979), True, 'import numpy as np\n'), ((2388, 2404), 'numpy.zeros', 'np.zeros', (['Q.size'], {}), '(Q.size)\n', (2396, 2404), True, 'import numpy as np\n'), ((5719, 5759), 'pandas.Series', 'pd.Series', (['Q[DatesEnds]'], {'index': 'DatesEnds'}), '(Q[DatesEnds], index=DatesEnds)\n', (5728, 5759), True, 'import pandas as pd\n'), ((6292, 6304), 'numpy.isnan', 'np.isnan', (['Qh'], {}), '(Qh)\n', (6300, 6304), True, 'import numpy as np\n'), ((6500, 6513), 'numpy.isnan', 'np.isnan', (['Qsh'], {}), '(Qsh)\n', (6508, 6513), True, 'import numpy as np\n'), ((7195, 7218), 'numpy.percentile', 'np.percentile', (['Qobs', '(20)'], {}), '(Qobs, 20)\n', (7208, 7218), True, 'import numpy as np\n'), ((9454, 9496), 'numpy.where', 'np.where', (['((RC[p1] <= 1.0) & (RC[p1] > 0.0))'], {}), '((RC[p1] <= 1.0) & (RC[p1] > 0.0))\n', (9462, 9496), True, 'import numpy as np\n'), ((9696, 9734), 'numpy.where', 'np.where', (['((RC > 0.04) & (RainTot < 10))'], {}), '((RC > 0.04) & (RainTot < 10))\n', (9704, 9734), True, 'import numpy as np\n'), ((10889, 10907), 'numpy.log', 'np.log', (['D[groupby]'], {}), '(D[groupby])\n', (10895, 10907), True, 'import numpy as np\n'), ((11089, 11116), 'numpy.linspace', 'np.linspace', (['Vmin', 'Vmax', 'Nb'], {}), '(Vmin, Vmax, Nb)\n', (11100, 11116), True, 'import numpy as np\n'), ((12306, 12333), 'numpy.where', 'np.where', (['(Q.values > Umbral)'], {}), '(Q.values > Umbral)\n', (12314, 12333), True, 'import numpy as np\n'), ((14453, 14468), 'numpy.array', 'np.array', (['Peaks'], {}), '(Peaks)\n', (14461, 14468), True, 'import numpy as np\n'), ((15274, 15313), 'numpy.nansum', 'np.nansum', (['(Q.values[:-1] - Q.values[1:])'], {}), '(Q.values[:-1] - Q.values[1:])\n', (15283, 15313), True, 'import numpy as np\n'), ((17550, 17583), 'pandas.Series', 'pd.Series', (['Res[PosEr][2]', 'X.index'], {}), '(Res[PosEr][2], X.index)\n', (17559, 17583), True, 'import pandas as pd\n'), ((2874, 2891), 'numpy.vstack', 'np.vstack', (['[R, B]'], {}), '([R, B])\n', (2883, 2891), True, 'import numpy as np\n'), ((9425, 9440), 'numpy.isfinite', 'np.isfinite', (['RC'], {}), '(RC)\n', (9436, 9440), True, 'import numpy as np\n'), ((10030, 10078), 'numpy.vstack', 'np.vstack', (['[RC, RainTot, RainAnt, RainInt, Qmax]'], {}), '([RC, RainTot, RainAnt, RainInt, Qmax])\n', (10039, 10078), True, 'import numpy as np\n'), ((11290, 11319), 'numpy.where', 'np.where', (['((x >= i) & (x <= j))'], {}), '((x >= i) & (x <= j))\n', (11298, 11319), True, 'import numpy as np\n'), ((13149, 13175), 'numpy.nanmax', 'np.nanmax', (['Q[p1:p2].values'], {}), '(Q[p1:p2].values)\n', (13158, 13175), True, 'import numpy as np\n'), ((14202, 14224), 'numpy.where', 'np.where', (['(Td < minTime)'], {}), '(Td < minTime)\n', (14210, 14224), True, 'import numpy as np\n'), ((14501, 14526), 'numpy.where', 'np.where', (['(Peaks < minPeak)'], {}), '(Peaks < minPeak)\n', (14509, 14526), True, 'import numpy as np\n'), ((15334, 15388), 'numpy.nansum', 'np.nansum', (['(((Q.values[:-1] - Q.values[1:]) / 2.0) ** B)'], {}), '(((Q.values[:-1] - Q.values[1:]) / 2.0) ** B)\n', (15343, 15388), True, 'import numpy as np\n'), ((16376, 16390), 'numpy.isnan', 'np.isnan', (['Qsim'], {}), '(Qsim)\n', (16384, 16390), True, 'import numpy as np\n'), ((3541, 3562), 'numpy.isfinite', 'np.isfinite', (['Q.values'], {}), '(Q.values)\n', (3552, 3562), True, 'import numpy as np\n'), ((8476, 8494), 'pandas.Timedelta', 'pd.Timedelta', (['"""3d"""'], {}), "('3d')\n", (8488, 8494), True, 'import pandas as pd\n'), ((11423, 11452), 'numpy.percentile', 'np.percentile', (["D['RC'][p]", '(25)'], {}), "(D['RC'][p], 25)\n", (11436, 11452), True, 'import numpy as np\n'), ((11489, 11518), 'numpy.percentile', 'np.percentile', (["D['RC'][p]", '(50)'], {}), "(D['RC'][p], 50)\n", (11502, 11518), True, 'import numpy as np\n'), ((11555, 11584), 'numpy.percentile', 'np.percentile', (["D['RC'][p]", '(75)'], {}), "(D['RC'][p], 75)\n", (11568, 11584), True, 'import numpy as np\n'), ((8209, 8220), 'numpy.floor', 'np.floor', (['N'], {}), '(N)\n', (8217, 8220), True, 'import numpy as np\n'), ((14865, 14881), 'numpy.array', 'np.array', (['Concav'], {}), '(Concav)\n', (14873, 14881), True, 'import numpy as np\n')] |
# +
import argparse
import os
import pickle
import sys
sys.path.append("..")
import numpy as np
import torchvision
import torchvision.transforms as T
import torch.utils.data as torch_data
from tqdm import tqdm
from models.classifiers import EvalCompoundResNet
# -
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-F', '--function', type=str, required=True, choices=['max_index', 'count_data'])
parser.add_argument('-O', '--output_path', type=str, required=True)
parser.add_argument('--num_attr', type=str, default=8)
parser.add_argument('--sample_per_category', type=int, default=1e5)
parser.add_argument('--weight_path', type=str, default='/home/u5397696/interpolation/celebA-hq-classifier/')
parser.add_argument('--data_root', type=str, default='/home/u5397696/interpolation/interfacegan/data/tmp')
return parser.parse_args()
def max_index(args):
if not os.path.exists(args.output_path):
raise ValueError(f"{args.output_path} doesn't exist.")
with open(args.output_path, 'rb') as f:
data_index = pickle.load(f)
print(f'#attributes: {len(data_index)}')
max_val = -1e9
for i in range(len(data_index)):
max_p = np.max(data_index[i][0])
max_n = np.max(data_index[i][1])
max_val = np.max([max_val, max_p, max_n])
print(i, max_p, max_n)
print (f'Max index is {max_val}')
def count_data(args):
#if os.path.exists(args.output_path):
# raise ValueError(f"{args.output_path} has existed.")
t = T.Compose([T.Resize(224), T.ToTensor()])
dset = torchvision.datasets.ImageFolder(args.data_root, transform=t)
loader= torch_data.DataLoader(dset, batch_size=32, shuffle=False, num_workers=4, pin_memory=True)
print (f'Start processing {os.path.basename(args.data_root)}.')
m = EvalCompoundResNet(args.weight_path).cuda()
data_index = [[[],[]] for _ in range(args.num_attr)]
image_cnt = 0
for bid, (imgs, _) in enumerate(loader):
imgs = imgs.cuda()
preds = m.predict_quantize(imgs)
for iid, pred in enumerate(preds):
is_save = False
for ind in range(args.num_attr):
if pred[ind] == True and len(data_index[ind][0])<args.sample_per_category:
is_save = True
data_index[ind][0].append(image_cnt)
elif pred[ind] == False and len(data_index[ind][1])<args.sample_per_category:
is_save = True
data_index[ind][1].append(image_cnt)
if is_save:
image_cnt += 1
if bid % 10 == 0:
for i in range(args.num_attr):
print(i, len(data_index[i][0]), len(data_index[i][1]))
print(f'Processes {bid}/{len(loader)}.')
with open(args.output_path, 'wb') as f:
pickle.dump(data_index, f)
def main():
args = parse_args()
if args.function == 'max_index':
max_index(args)
elif args.function == 'count_data':
count_data(args)
if __name__ == '__main__':
main()
| [
"os.path.exists",
"pickle.dump",
"argparse.ArgumentParser",
"pickle.load",
"numpy.max",
"torchvision.datasets.ImageFolder",
"models.classifiers.EvalCompoundResNet",
"os.path.basename",
"torch.utils.data.DataLoader",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"sys.path.... | [((55, 76), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (70, 76), False, 'import sys\n'), ((300, 325), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (323, 325), False, 'import argparse\n'), ((1629, 1690), 'torchvision.datasets.ImageFolder', 'torchvision.datasets.ImageFolder', (['args.data_root'], {'transform': 't'}), '(args.data_root, transform=t)\n', (1661, 1690), False, 'import torchvision\n'), ((1704, 1797), 'torch.utils.data.DataLoader', 'torch_data.DataLoader', (['dset'], {'batch_size': '(32)', 'shuffle': '(False)', 'num_workers': '(4)', 'pin_memory': '(True)'}), '(dset, batch_size=32, shuffle=False, num_workers=4,\n pin_memory=True)\n', (1725, 1797), True, 'import torch.utils.data as torch_data\n'), ((930, 962), 'os.path.exists', 'os.path.exists', (['args.output_path'], {}), '(args.output_path)\n', (944, 962), False, 'import os\n'), ((1101, 1115), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1112, 1115), False, 'import pickle\n'), ((1238, 1262), 'numpy.max', 'np.max', (['data_index[i][0]'], {}), '(data_index[i][0])\n', (1244, 1262), True, 'import numpy as np\n'), ((1279, 1303), 'numpy.max', 'np.max', (['data_index[i][1]'], {}), '(data_index[i][1])\n', (1285, 1303), True, 'import numpy as np\n'), ((1322, 1353), 'numpy.max', 'np.max', (['[max_val, max_p, max_n]'], {}), '([max_val, max_p, max_n])\n', (1328, 1353), True, 'import numpy as np\n'), ((1588, 1601), 'torchvision.transforms.Resize', 'T.Resize', (['(224)'], {}), '(224)\n', (1596, 1601), True, 'import torchvision.transforms as T\n'), ((1603, 1615), 'torchvision.transforms.ToTensor', 'T.ToTensor', ([], {}), '()\n', (1613, 1615), True, 'import torchvision.transforms as T\n'), ((1871, 1907), 'models.classifiers.EvalCompoundResNet', 'EvalCompoundResNet', (['args.weight_path'], {}), '(args.weight_path)\n', (1889, 1907), False, 'from models.classifiers import EvalCompoundResNet\n'), ((1826, 1858), 'os.path.basename', 'os.path.basename', (['args.data_root'], {}), '(args.data_root)\n', (1842, 1858), False, 'import os\n'), ((2918, 2944), 'pickle.dump', 'pickle.dump', (['data_index', 'f'], {}), '(data_index, f)\n', (2929, 2944), False, 'import pickle\n')] |
from bitcom.client.system_client import SystemClient
from bitcom.utils import *
from bitcom.constant import *
system_client = SystemClient(url=USER1_HOST, access_key=USER1_ACCESS_KEY, secret_key=USER1_SECRET_KEY)
timestamp_response = system_client.get_system_timestamp()
LogInfo.output("Get server timestamp: ", timestamp_response)
version_response = system_client.get_system_version()
LogInfo.output("Get API version: ", version_response)
cod_status_response = system_client.get_system_cod_status()
LogInfo.output("Get cancel-only status after system maintenance: ", cod_status_response)
| [
"bitcom.client.system_client.SystemClient"
] | [((127, 218), 'bitcom.client.system_client.SystemClient', 'SystemClient', ([], {'url': 'USER1_HOST', 'access_key': 'USER1_ACCESS_KEY', 'secret_key': 'USER1_SECRET_KEY'}), '(url=USER1_HOST, access_key=USER1_ACCESS_KEY, secret_key=\n USER1_SECRET_KEY)\n', (139, 218), False, 'from bitcom.client.system_client import SystemClient\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/GG_start.ui'
#
# Created by: PyQt5 UI code generator 5.10.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from utilGui import Names
class Ui_main_window(object):
def setupUi(self, main_window):
main_window.setObjectName("main_window")
main_window.resize(684, 828)
main_window.setWindowTitle('Alchemist');
self.centralwidget = QtWidgets.QWidget(main_window)
self.centralwidget.setObjectName("centralwidget")
# run_btn
self.run_btn = QtWidgets.QPushButton(self.centralwidget)
self.run_btn.setText("Run")
self.run_btn.setGeometry(QtCore.QRect(510, 20, 121, 30))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.run_btn.setFont(font)
self.run_btn.setStyleSheet("border-color: rgb(114, 159, 207);\n"
"background-color: rgb(78, 154, 6);")
self.run_btn.setObjectName("run_btn")
# min_max table
self.min_max_table = QtWidgets.QTableWidget(self.centralwidget)
self.min_max_table.setGeometry(QtCore.QRect(20, 100, 421, 192))
self.min_max_table.setObjectName("min_max_table")
self.min_max_table.setColumnCount(2)
self.min_max_table.setRowCount(len(Names.Chemical_Compounds))
for i in range(len(Names.Chemical_Compounds)):
item = QtWidgets.QTableWidgetItem(Names.Chemical_Compounds[i])
self.min_max_table.setVerticalHeaderItem(i, item)
item = QtWidgets.QTableWidgetItem("min")
font = QtGui.QFont()
font.setBold(False)
font.setItalic(True)
font.setWeight(50)
item.setFont(font)
item.setBackground(QtGui.QColor(114, 159, 207))
self.min_max_table.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem("max")
font = QtGui.QFont()
font.setBold(False)
font.setItalic(True)
font.setWeight(50)
item.setFont(font)
item.setBackground(QtGui.QColor(114, 159, 207))
self.min_max_table.setHorizontalHeaderItem(1, item)
for i in range(len(Names.Chemical_Compounds)):
self.min_max_table.setItem(
i, 0, QtWidgets.QTableWidgetItem("0.0"))
self.min_max_table.setItem(
i, 1, QtWidgets.QTableWidgetItem("1.0"))
self.min_max_label = QtWidgets.QLabel(self.centralwidget)
self.min_max_label.setGeometry(QtCore.QRect(20, 70, 421, 22))
self.min_max_label.setObjectName("min_max_label")
self.min_max_label.setText("Search space limitation:")
# opt_label
self.opt_label = QtWidgets.QLabel(self.centralwidget)
self.opt_label.setGeometry(QtCore.QRect(460, 100, 201, 22))
self.opt_label.setObjectName("opt_label")
self.opt_label.setText("Methods:")
# amount
self.amount_sp = QtWidgets.QSpinBox(self.centralwidget)
self.amount_sp.setGeometry(QtCore.QRect(460, 270, 201, 31))
self.amount_sp.setMinimum(1)
self.amount_sp.setMaximum(10000)
self.amount_sp.setValue(1)
self.amount_sp.setObjectName("amount_sp")
self.amount_label = QtWidgets.QLabel(self.centralwidget)
self.amount_label.setGeometry(QtCore.QRect(460, 240, 201, 22))
self.amount_label.setObjectName("amount_label")
self.amount_label.setText("Amount:")
# tg
self.tg_dsb = QtWidgets.QDoubleSpinBox(self.centralwidget)
self.tg_dsb.setGeometry(QtCore.QRect(460, 200, 201, 31))
self.tg_dsb.setMaximum(50000)
self.tg_dsb.setMinimum(0)
self.tg_dsb.setDecimals(0)
self.tg_dsb.setSingleStep(5)
self.tg_dsb.setObjectName("tg_dsb")
self.tg_dsb.setValue(700)
self.tg_label = QtWidgets.QLabel(self.centralwidget)
self.tg_label.setGeometry(QtCore.QRect(460, 170, 201, 22))
self.tg_label.setObjectName("tg_label")
self.tg_label.setText("TG:")
# opt_cb
self.opt_cb = QtWidgets.QComboBox(self.centralwidget)
self.opt_cb.setGeometry(QtCore.QRect(460, 130, 201, 30))
self.opt_cb.setObjectName("opt_cb")
self.opt_cb.addItem("SA")
self.opt_cb.addItem("PSO")
self.opt_cb.addItem("RS")
# result_tb
self.result_label = QtWidgets.QLabel(self.centralwidget)
self.result_label.setGeometry(QtCore.QRect(20, 350, 641, 22))
self.result_label.setObjectName("result_label")
self.result_label.setText("Results:")
self.result_tb = QtWidgets.QTableWidget(self.centralwidget)
self.result_tb.setGeometry(QtCore.QRect(20, 380, 641, 341))
self.result_tb.setObjectName("result_table")
self.result_tb.setColumnCount(46)
# self.result_tb.setRowCount(1)
for i in range(len(Names.Chemical_Elemnts)):
item = QtWidgets.QTableWidgetItem(Names.Chemical_Elemnts[i])
self.result_tb.setHorizontalHeaderItem(i, item)
font = QtGui.QFont()
font.setItalic(True)
item.setFont(font)
item.setBackground(QtGui.QColor(114, 159, 207))
item = QtWidgets.QTableWidgetItem("TG")
self.result_tb.setHorizontalHeaderItem(i+1, item)
font = QtGui.QFont()
font.setItalic(True)
item.setFont(font)
item.setBackground(QtGui.QColor(114, 159, 207))
# discard_btn
self.discard_btn = QtWidgets.QPushButton(self.centralwidget)
self.discard_btn.setText("Discard")
self.discard_btn.setGeometry(QtCore.QRect(540, 730, 122, 30))
self.discard_btn.setObjectName("discard_btn")
# save_btn
self.save_btn = QtWidgets.QPushButton(self.centralwidget)
self.save_btn.setText("Save")
self.save_btn.setGeometry(QtCore.QRect(400, 730, 122, 30))
self.save_btn.setToolTip("")
self.save_btn.setObjectName("save_btn")
# clean_all_btn
self.clean_all_btn = QtWidgets.QPushButton(self.centralwidget)
self.clean_all_btn.setGeometry(QtCore.QRect(20, 730, 122, 30))
self.clean_all_btn.setObjectName("clean_all_btn")
self.clean_all_btn.setText("Clan All")
main_window.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(main_window)
self.menubar.setGeometry(QtCore.QRect(0, 0, 684, 27))
self.menubar.setObjectName("menubar")
main_window.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(main_window)
self.statusbar.setObjectName("statusbar")
main_window.setStatusBar(self.statusbar)
| [
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QTableWidget",
"PyQt5.QtWidgets.QSpinBox",
"PyQt5.QtGui.QFont",
"PyQt5.QtWidgets.QDoubleSpinBox",
"PyQt5.QtWidgets.QComboBox",
"PyQt5.QtGui.QColor",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QStatusBar",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QTableWi... | [((500, 530), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['main_window'], {}), '(main_window)\n', (517, 530), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((631, 672), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (652, 672), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((789, 802), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (800, 802), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1103, 1145), 'PyQt5.QtWidgets.QTableWidget', 'QtWidgets.QTableWidget', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1125, 1145), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1600, 1633), 'PyQt5.QtWidgets.QTableWidgetItem', 'QtWidgets.QTableWidgetItem', (['"""min"""'], {}), "('min')\n", (1626, 1633), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1649, 1662), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1660, 1662), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1905, 1938), 'PyQt5.QtWidgets.QTableWidgetItem', 'QtWidgets.QTableWidgetItem', (['"""max"""'], {}), "('max')\n", (1931, 1938), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1954, 1967), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1965, 1967), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2474, 2510), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2490, 2510), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2748, 2784), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2764, 2784), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2989, 3027), 'PyQt5.QtWidgets.QSpinBox', 'QtWidgets.QSpinBox', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3007, 3027), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3287, 3323), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3303, 3323), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3532, 3576), 'PyQt5.QtWidgets.QDoubleSpinBox', 'QtWidgets.QDoubleSpinBox', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3556, 3576), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3888, 3924), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3904, 3924), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4117, 4156), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4136, 4156), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4418, 4454), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4434, 4454), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4652, 4694), 'PyQt5.QtWidgets.QTableWidget', 'QtWidgets.QTableWidget', (['self.centralwidget'], {}), '(self.centralwidget)\n', (4674, 4694), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5258, 5290), 'PyQt5.QtWidgets.QTableWidgetItem', 'QtWidgets.QTableWidgetItem', (['"""TG"""'], {}), "('TG')\n", (5284, 5290), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5364, 5377), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (5375, 5377), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5540, 5581), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (5561, 5581), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5794, 5835), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (5815, 5835), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6080, 6121), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (6101, 6121), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6379, 6410), 'PyQt5.QtWidgets.QMenuBar', 'QtWidgets.QMenuBar', (['main_window'], {}), '(main_window)\n', (6397, 6410), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6589, 6622), 'PyQt5.QtWidgets.QStatusBar', 'QtWidgets.QStatusBar', (['main_window'], {}), '(main_window)\n', (6609, 6622), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((742, 772), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(510)', '(20)', '(121)', '(30)'], {}), '(510, 20, 121, 30)\n', (754, 772), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1185, 1216), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(100)', '(421)', '(192)'], {}), '(20, 100, 421, 192)\n', (1197, 1216), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1466, 1521), 'PyQt5.QtWidgets.QTableWidgetItem', 'QtWidgets.QTableWidgetItem', (['Names.Chemical_Compounds[i]'], {}), '(Names.Chemical_Compounds[i])\n', (1492, 1521), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1801, 1828), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(114)', '(159)', '(207)'], {}), '(114, 159, 207)\n', (1813, 1828), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2106, 2133), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(114)', '(159)', '(207)'], {}), '(114, 159, 207)\n', (2118, 2133), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2550, 2579), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(70)', '(421)', '(22)'], {}), '(20, 70, 421, 22)\n', (2562, 2579), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2820, 2851), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(460)', '(100)', '(201)', '(22)'], {}), '(460, 100, 201, 22)\n', (2832, 2851), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3063, 3094), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(460)', '(270)', '(201)', '(31)'], {}), '(460, 270, 201, 31)\n', (3075, 3094), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3362, 3393), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(460)', '(240)', '(201)', '(22)'], {}), '(460, 240, 201, 22)\n', (3374, 3393), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3609, 3640), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(460)', '(200)', '(201)', '(31)'], {}), '(460, 200, 201, 31)\n', (3621, 3640), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3959, 3990), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(460)', '(170)', '(201)', '(22)'], {}), '(460, 170, 201, 22)\n', (3971, 3990), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4189, 4220), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(460)', '(130)', '(201)', '(30)'], {}), '(460, 130, 201, 30)\n', (4201, 4220), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4493, 4523), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(350)', '(641)', '(22)'], {}), '(20, 350, 641, 22)\n', (4505, 4523), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4730, 4761), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(380)', '(641)', '(341)'], {}), '(20, 380, 641, 341)\n', (4742, 4761), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4971, 5024), 'PyQt5.QtWidgets.QTableWidgetItem', 'QtWidgets.QTableWidgetItem', (['Names.Chemical_Elemnts[i]'], {}), '(Names.Chemical_Elemnts[i])\n', (4997, 5024), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5104, 5117), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (5115, 5117), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5461, 5488), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(114)', '(159)', '(207)'], {}), '(114, 159, 207)\n', (5473, 5488), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5663, 5694), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(540)', '(730)', '(122)', '(30)'], {}), '(540, 730, 122, 30)\n', (5675, 5694), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5908, 5939), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(400)', '(730)', '(122)', '(30)'], {}), '(400, 730, 122, 30)\n', (5920, 5939), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6161, 6191), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(20)', '(730)', '(122)', '(30)'], {}), '(20, 730, 122, 30)\n', (6173, 6191), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6444, 6471), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(684)', '(27)'], {}), '(0, 0, 684, 27)\n', (6456, 6471), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2313, 2346), 'PyQt5.QtWidgets.QTableWidgetItem', 'QtWidgets.QTableWidgetItem', (['"""0.0"""'], {}), "('0.0')\n", (2339, 2346), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2410, 2443), 'PyQt5.QtWidgets.QTableWidgetItem', 'QtWidgets.QTableWidgetItem', (['"""1.0"""'], {}), "('1.0')\n", (2436, 2443), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5213, 5240), 'PyQt5.QtGui.QColor', 'QtGui.QColor', (['(114)', '(159)', '(207)'], {}), '(114, 159, 207)\n', (5225, 5240), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.tests import base
from oslo_config import cfg
from gbpservice.contrib.nfp.configurator.agents import firewall as fw
from gbpservice.contrib.nfp.configurator.lib import constants as const
from gbpservice.contrib.nfp.configurator.lib import fw_constants as fw_const
from gbpservice.contrib.tests.unit.nfp.configurator.test_data import (
fw_test_data as fo)
class FWaasRpcManagerTestCase(base.BaseTestCase):
""" Implements test cases for RPC manager methods of firewall agent.
"""
def __init__(self, *args, **kwargs):
super(FWaasRpcManagerTestCase, self).__init__(*args, **kwargs)
self.fo = fo.FakeObjects()
@mock.patch(__name__ + '.fo.FakeObjects.sc')
@mock.patch(__name__ + '.fo.FakeObjects.conf')
def _get_FWaasRpcManager_object(self, conf, sc):
""" Retrieves RPC manager object of firewall agent.
:param sc: mocked service controller object of process model framework
:param conf: mocked OSLO configuration file
Returns: object of firewall's RPC manager and service controller
"""
agent = fw.FWaasRpcManager(sc, conf)
return agent, sc
def _test_event_creation(self, method):
""" Tests event creation and enqueueing for create/update/delete
operation of firewall agent's RPC manager.
Returns: none
"""
agent, sc = self._get_FWaasRpcManager_object()
context = {}
arg_dict = {'context': context,
'firewall': self.fo.firewall,
'host': self.fo.host}
with mock.patch.object(sc, 'new_event', return_value='foo') as (
mock_sc_event), (
mock.patch.object(sc, 'post_event')) as mock_sc_rpc_event:
call_method = getattr(agent, method.lower())
call_method(context, self.fo.firewall, self.fo.host)
result_dict = arg_dict
result_dict['firewall'] = {
'file_path': "/tmp/%s" % (self.fo.firewall['id'])}
mock_sc_event.assert_called_with(id=method,
data=result_dict, key=None)
mock_sc_rpc_event.assert_called_with('foo')
def test_create_firewall_fwaasrpcmanager(self):
""" Implements test case for create firewall method
of firewall agent's RPC manager.
Returns: none
"""
self._test_event_creation(fw_const.FIREWALL_CREATE_EVENT)
def test_update_firewall_fwaasrpcmanager(self):
""" Implements test case for update firewall method
of firewall agent's RPC manager.
Returns: none
"""
self._test_event_creation(fw_const.FIREWALL_UPDATE_EVENT)
def test_delete_firewall_fwaasrpcmanager(self):
""" Implements test case for delete firewall method
of firewall agent's RPC manager.
Returns: none
"""
self._test_event_creation(fw_const.FIREWALL_DELETE_EVENT)
class FwaasHandlerTestCase(base.BaseTestCase):
""" Implements test cases for event handler methods
of firewall agent.
"""
def __init__(self, *args, **kwargs):
super(FwaasHandlerTestCase, self).__init__(*args, **kwargs)
self.fo = fo.FakeObjects()
self.ev = fo.FakeEventFirewall()
self.firewall_rule = {
'id': 'rule-id', 'action': 'allow',
'destination_ip_address': '',
'destination_port': '80',
'enabled': 'enabled', 'ip_version': 'v4',
'protocol': 'tcp', 'source_ip_address': '',
'source_port': '', 'shared': False,
'position': 1
}
self.ev.data['context']['agent_info']['resource'] = 'firewall'
@mock.patch(__name__ + '.fo.FakeObjects.rpcmgr')
@mock.patch(__name__ + '.fo.FakeObjects.drivers')
@mock.patch(__name__ + '.fo.FakeObjects.sc')
def _get_FwHandler_objects(self, sc, drivers, rpcmgr):
""" Retrieves event handler object of firewall agent.
:param sc: mocked service controller object of process model framework
:param drivers: list of driver objects for firewall agent
:param rpcmgr: object of configurator's RPC manager
Returns: object of firewall agents's event handler
"""
with mock.patch.object(cfg, 'CONF') as mock_cfg:
mock_cfg.configure_mock(host='foo')
agent = fw.FWaasEventHandler(sc, drivers, rpcmgr, mock_cfg)
return agent
def _test_handle_event(self, rule_list_info=True):
""" Test handle event method of firewall agent for various
device configuration operations.
:param rule_list_info: an atrribute of firewall resource object
sent from plugin which contains the firewall rules.
Returns: None
"""
agent = self._get_FwHandler_objects()
driver = mock.Mock()
with mock.patch.object(
agent.plugin_rpc, 'set_firewall_status') as (
mock_set_fw_status), (
mock.patch.object(
agent.plugin_rpc, 'firewall_deleted')) as (mock_fw_deleted), (
mock.patch.object(
driver, fw_const.FIREWALL_CREATE_EVENT.lower())) as (
mock_create_fw), (
mock.patch.object(
driver, fw_const.FIREWALL_UPDATE_EVENT.lower())) as (
mock_update_fw), (
mock.patch.object(
driver, fw_const.FIREWALL_DELETE_EVENT.lower())) as (
mock_delete_fw), (
mock.patch.object(
agent, '_get_driver', return_value=driver)):
firewall = self.fo._fake_firewall_obj()
if not rule_list_info:
firewall_rule_list = []
else:
firewall_rule_list = [self.firewall_rule]
firewall.update({'firewall_rule_list': firewall_rule_list})
self.ev.data.get('firewall').update(
{'firewall_rule_list': firewall_rule_list})
agent_info = self.ev.data['context']['agent_info']
agent.handle_event(self.ev)
context = self.fo.neutron_context
if 'service_info' in self.fo.context:
self.fo.context.pop('service_info')
if not rule_list_info:
if self.ev.id == fw_const.FIREWALL_CREATE_EVENT:
mock_set_fw_status.assert_called_with(
agent_info,
firewall['id'], const.STATUS_ACTIVE, firewall)
elif self.ev.id == fw_const.FIREWALL_UPDATE_EVENT:
mock_set_fw_status.assert_called_with(
agent_info,
const.STATUS_ACTIVE, firewall)
elif self.ev.id == fw_const.FIREWALL_DELETE_EVENT:
mock_fw_deleted.assert_called_with(
agent_info, firewall['id'], firewall)
else:
if self.ev.id == fw_const.FIREWALL_CREATE_EVENT:
mock_create_fw.assert_called_with(
context,
firewall, self.fo.host)
elif self.ev.id == fw_const.FIREWALL_UPDATE_EVENT:
mock_update_fw.assert_called_with(
context,
firewall, self.fo.host)
elif self.ev.id == fw_const.FIREWALL_DELETE_EVENT:
mock_delete_fw.assert_called_with(
context,
firewall, self.fo.host)
def test_create_firewall_with_rule_list_info_true(self):
""" Implements test case for create firewall method
of firewall agent's event handler with firewall rules.
Returns: none
"""
self.ev.id = fw_const.FIREWALL_CREATE_EVENT
self._test_handle_event()
def test_update_firewall_with_rule_list_info_true(self):
""" Implements test case for update firewall method
of firewall agent's event handler with firewall rules.
Returns: none
"""
self.ev.id = fw_const.FIREWALL_UPDATE_EVENT
self._test_handle_event()
def test_delete_firewall_with_rule_list_info_true(self):
""" Implements test case for delete firewall method
of firewall agent's event handler with firewall rules.
Returns: none
"""
self.ev.id = fw_const.FIREWALL_DELETE_EVENT
self._test_handle_event()
def test_create_firewall_with_rule_list_info_false(self):
""" Implements test case for create firewall method
of firewall agent's event handler without firewall rules.
Returns: none
"""
self.ev.id = fw_const.FIREWALL_CREATE_EVENT
self._test_handle_event(False)
def test_update_firewall_with_rule_list_info_false(self):
""" Implements test case for update firewall method
of firewall agent's event handler without firewall rules.
Returns: none
"""
self.ev.id = fw_const.FIREWALL_UPDATE_EVENT
self._test_handle_event(False)
def test_delete_firewall_with_rule_list_info_false(self):
""" Implements test case for delete firewall method
of firewall agent's event handler without firewall rules.
Returns: none
"""
self.ev.id = fw_const.FIREWALL_DELETE_EVENT
self._test_handle_event(False)
| [
"mock.patch",
"gbpservice.contrib.nfp.configurator.agents.firewall.FWaasEventHandler",
"mock.Mock",
"gbpservice.contrib.tests.unit.nfp.configurator.test_data.fw_test_data.FakeEventFirewall",
"gbpservice.contrib.nfp.configurator.lib.fw_constants.FIREWALL_CREATE_EVENT.lower",
"gbpservice.contrib.nfp.configu... | [((1247, 1290), 'mock.patch', 'mock.patch', (["(__name__ + '.fo.FakeObjects.sc')"], {}), "(__name__ + '.fo.FakeObjects.sc')\n", (1257, 1290), False, 'import mock\n'), ((1296, 1341), 'mock.patch', 'mock.patch', (["(__name__ + '.fo.FakeObjects.conf')"], {}), "(__name__ + '.fo.FakeObjects.conf')\n", (1306, 1341), False, 'import mock\n'), ((4316, 4363), 'mock.patch', 'mock.patch', (["(__name__ + '.fo.FakeObjects.rpcmgr')"], {}), "(__name__ + '.fo.FakeObjects.rpcmgr')\n", (4326, 4363), False, 'import mock\n'), ((4369, 4417), 'mock.patch', 'mock.patch', (["(__name__ + '.fo.FakeObjects.drivers')"], {}), "(__name__ + '.fo.FakeObjects.drivers')\n", (4379, 4417), False, 'import mock\n'), ((4423, 4466), 'mock.patch', 'mock.patch', (["(__name__ + '.fo.FakeObjects.sc')"], {}), "(__name__ + '.fo.FakeObjects.sc')\n", (4433, 4466), False, 'import mock\n'), ((1224, 1240), 'gbpservice.contrib.tests.unit.nfp.configurator.test_data.fw_test_data.FakeObjects', 'fo.FakeObjects', ([], {}), '()\n', (1238, 1240), True, 'from gbpservice.contrib.tests.unit.nfp.configurator.test_data import fw_test_data as fo\n'), ((1691, 1719), 'gbpservice.contrib.nfp.configurator.agents.firewall.FWaasRpcManager', 'fw.FWaasRpcManager', (['sc', 'conf'], {}), '(sc, conf)\n', (1709, 1719), True, 'from gbpservice.contrib.nfp.configurator.agents import firewall as fw\n'), ((3827, 3843), 'gbpservice.contrib.tests.unit.nfp.configurator.test_data.fw_test_data.FakeObjects', 'fo.FakeObjects', ([], {}), '()\n', (3841, 3843), True, 'from gbpservice.contrib.tests.unit.nfp.configurator.test_data import fw_test_data as fo\n'), ((3862, 3884), 'gbpservice.contrib.tests.unit.nfp.configurator.test_data.fw_test_data.FakeEventFirewall', 'fo.FakeEventFirewall', ([], {}), '()\n', (3882, 3884), True, 'from gbpservice.contrib.tests.unit.nfp.configurator.test_data import fw_test_data as fo\n'), ((5463, 5474), 'mock.Mock', 'mock.Mock', ([], {}), '()\n', (5472, 5474), False, 'import mock\n'), ((2172, 2226), 'mock.patch.object', 'mock.patch.object', (['sc', '"""new_event"""'], {'return_value': '"""foo"""'}), "(sc, 'new_event', return_value='foo')\n", (2189, 2226), False, 'import mock\n'), ((2282, 2317), 'mock.patch.object', 'mock.patch.object', (['sc', '"""post_event"""'], {}), "(sc, 'post_event')\n", (2299, 2317), False, 'import mock\n'), ((4881, 4911), 'mock.patch.object', 'mock.patch.object', (['cfg', '"""CONF"""'], {}), "(cfg, 'CONF')\n", (4898, 4911), False, 'import mock\n'), ((4993, 5044), 'gbpservice.contrib.nfp.configurator.agents.firewall.FWaasEventHandler', 'fw.FWaasEventHandler', (['sc', 'drivers', 'rpcmgr', 'mock_cfg'], {}), '(sc, drivers, rpcmgr, mock_cfg)\n', (5013, 5044), True, 'from gbpservice.contrib.nfp.configurator.agents import firewall as fw\n'), ((5489, 5547), 'mock.patch.object', 'mock.patch.object', (['agent.plugin_rpc', '"""set_firewall_status"""'], {}), "(agent.plugin_rpc, 'set_firewall_status')\n", (5506, 5547), False, 'import mock\n'), ((5613, 5668), 'mock.patch.object', 'mock.patch.object', (['agent.plugin_rpc', '"""firewall_deleted"""'], {}), "(agent.plugin_rpc, 'firewall_deleted')\n", (5630, 5668), False, 'import mock\n'), ((6119, 6179), 'mock.patch.object', 'mock.patch.object', (['agent', '"""_get_driver"""'], {'return_value': 'driver'}), "(agent, '_get_driver', return_value=driver)\n", (6136, 6179), False, 'import mock\n'), ((5766, 5804), 'gbpservice.contrib.nfp.configurator.lib.fw_constants.FIREWALL_CREATE_EVENT.lower', 'fw_const.FIREWALL_CREATE_EVENT.lower', ([], {}), '()\n', (5802, 5804), True, 'from gbpservice.contrib.nfp.configurator.lib import fw_constants as fw_const\n'), ((5898, 5936), 'gbpservice.contrib.nfp.configurator.lib.fw_constants.FIREWALL_UPDATE_EVENT.lower', 'fw_const.FIREWALL_UPDATE_EVENT.lower', ([], {}), '()\n', (5934, 5936), True, 'from gbpservice.contrib.nfp.configurator.lib import fw_constants as fw_const\n'), ((6030, 6068), 'gbpservice.contrib.nfp.configurator.lib.fw_constants.FIREWALL_DELETE_EVENT.lower', 'fw_const.FIREWALL_DELETE_EVENT.lower', ([], {}), '()\n', (6066, 6068), True, 'from gbpservice.contrib.nfp.configurator.lib import fw_constants as fw_const\n')] |
#!/usr/bin/env python
# -- encoding: utf-8 --
#
# Copyright 2015-2016 Telefónica Investigación y Desarrollo, S.A.U
#
# This file is part of FI-WARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with <EMAIL>
#
import csv
import glob
import shelve
import copy
import os
import argparse
import tempfile
import sys
from glancesync_image import GlanceSyncImage
"""This module contains all the code that interacts directly with the glance
implementation. It isolates the main code from the glance interaction.
Therefore, this module may be changed if the API is upgraded or it is
invoked in a different way, without affecting the main module.
This is a mock implementation, used for testing.
"""
import logging
class ServersFacade(object):
images_dir = '/var/lib/glance/images'
images = dict()
# Put this property to False to use this file as a mock in a unittest
# when use_persistence is true, image information is preserved in disk.
use_persistence = False
dir_persist = './.glancesync_persist'
def __init__(self, target):
self.target = target
def get_regions(self):
"""It returns the list of regions on the specified target.
:return: a list of region names.
"""
all_regions = ServersFacade.images.keys()
target_name = self.target['target_name']
regions_list = list()
for region in all_regions:
parts = region.split(':')
if target_name == 'master':
if len(parts) != 1:
continue
regions_list.append(region)
else:
if len(parts) != 2:
continue
if parts[0] != target_name:
continue
regions_list.append(parts[1])
return regions_list
def get_imagelist(self, regionobj):
"""return a image list from the glance of the specified region
:param regionobj: The GlanceSyncRegion object of the region to list
:return: a list of GlanceSyncImage objects
"""
# clone the object: otherwise modifying the returned object
# modify the object in the images.
return copy.deepcopy(ServersFacade.images[regionobj.fullname].values())
def update_metadata(self, regionobj, image):
""" update the metadata of the image in the specified region
See GlanceSync.update_metadata_image for more details.
:param regionobj: region where it is the image to update
:param image: the image with the metadata to update
:return: this function doesn't return anything.
"""
images = ServersFacade.images[regionobj.fullname]
updatedimage = images[image.id]
updatedimage.is_public = image.is_public
updatedimage.name = image.name
# updatedimage.owner = image.owner
updatedimage.user_properties = dict(image.user_properties)
if ServersFacade.use_persistence:
images[image.id] = updatedimage
images.sync()
def upload_image(self, regionobj, image):
"""Upload the image to the glance server on the specified region.
:param regionobj: GlanceSyncRegion object; the region where the image
will be upload.
:param image: GlanceSyncImage object; the image to be uploaded.
:return: The UUID of the new image.
"""
count = 1
if regionobj.fullname not in ServersFacade.images:
ServersFacade.images[regionobj.fullname] = dict()
imageid = '1$' + image.name
while imageid in ServersFacade.images[regionobj.fullname]:
count += 1
imageid = str(count) + '$' + image.name
owner = regionobj.target['tenant'] + 'id'
new_image = GlanceSyncImage(
image.name, imageid, regionobj.fullname, owner, image.is_public,
image.checksum, image.size, image.status,
dict(image.user_properties))
ServersFacade.images[regionobj.fullname][imageid] = new_image
if ServersFacade.use_persistence:
ServersFacade.images[regionobj.fullname].sync()
return imageid
def delete_image(self, regionobj, id, confirm=True):
"""delete a image on the specified region.
Be careful, this action cannot be reverted and for this reason by
default requires confirmation!
:param regionobj: the GlanceSyncRegion object
:param id: the UUID of the image to delete
:param confirm: ask for confirmation
:return: true if image was deleted, false if it was canceled by user
"""
if regionobj.fullname not in ServersFacade.images:
return False
images = ServersFacade.images[regionobj.fullname]
if id not in images:
return False
del images[id]
if ServersFacade.use_persistence:
ServersFacade.images[regionobj.fullname].sync()
return True
def get_tenant_id(self):
"""It returns the tenant id corresponding to the target. It is
necessary to use the tenant_id instead of the tenant_name because the
first is used as the owner of the images.
:return: the tenant id
"""
if 'tenant_id' in self.target:
return self.target['tenant_id']
else:
return self.target['tenant'] + 'id'
@staticmethod
def init_persistence(dir=None, clean=False):
"""Function to start using persistence: load the data from the lass
session if it exists
:param dir: path of the directory where the persistence files go.
Default dir is ./.glancesync_persist
:param clean: if path exists, discard all existing content
:return:
"""
if dir:
ServersFacade.dir_persist = dir
ServersFacade.use_persistence = True
ServersFacade.images = dict()
if os.path.exists(dir):
for name in glob.glob(dir + '/_persist_*'):
if clean:
os.unlink(name)
else:
region = os.path.basename(name)[9:]
ServersFacade.images[region] = shelve.open(name)
else:
os.mkdir(ServersFacade.dir_persist)
@staticmethod
def add_image_to_mock(image):
"""Add the image to the mock
:param image: The image to add. If can be a GlanceSyncImage or a list
:return: This method does not return nothing.
"""
if type(image) == list:
image = GlanceSyncImage.from_field_list(image)
else:
image = copy.deepcopy(image)
if image.region not in ServersFacade.images:
if ServersFacade.use_persistence:
ServersFacade.images[image.region] =\
shelve.open(ServersFacade.dir_persist + '/_persist_' +
image.region)
else:
ServersFacade.images[image.region] = dict()
ServersFacade.images[image.region][image.id] = image
if ServersFacade.use_persistence:
ServersFacade.images[image.region].sync()
@staticmethod
def add_emptyregion_to_mock(region):
"""Add empty region to mock
:param image: The image region (e.g. other:Madrid)
:return: This method does not return nothing.
"""
if ServersFacade.use_persistence:
ServersFacade.images[region] = shelve.open(
ServersFacade.dir_persist + '/_persist_' + region)
else:
ServersFacade.images[region] = dict()
@staticmethod
def clear_mock():
"""clear all the non-persistent content of the mock"""
ServersFacade.images = dict()
# if using persintence, deleting _persist_ file is responsability of
# the caller.
@staticmethod
def add_images_from_csv_to_mock(path):
"""Add images to the mock, reading the csv files saved by the backup
tool.
:param path: The directory where the csv files are.
:return: This method does not return nothing.
Each file in path has this pattern: backup_<regionname>.csv.
"""
for file in glob.glob(path + '/*.csv'):
region_name = os.path.basename(file)[7:-4]
if region_name not in ServersFacade.images:
if ServersFacade.use_persistence:
ServersFacade.images[region_name] =\
shelve.open(ServersFacade.dir_persist + '/_persist_' +
region_name)
else:
ServersFacade.images[region_name] = dict()
with open(file) as f:
for row in csv.reader(f):
# ignore blank lines
if len(row) == 0:
continue
image = GlanceSyncImage.from_field_list(row)
ServersFacade.images[region_name][image.id] = image
if ServersFacade.use_persistence:
ServersFacade.images[region_name].sync()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Start a clean persistent session'
)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--path', default='~/.glancesync_persist/',
help='path where the persistent objects are created')
group.add_argument('--random', action='store_true',
help='create a random path')
parser.add_argument(
'initial_load',
help='directory with initial load, with files (backup_<region>.csv)')
parser.add_argument(
'--confirm', action='store_true',
help='If path exists and it is not empty, this option is required')
meta = parser.parse_args()
meta.initial_load = os.path.normpath(os.path.expanduser(meta.initial_load))
if not os.path.exists(meta.initial_load):
logging.error('The directory "%s" with the initial load must exist' %
meta.initial_load)
sys.exit(-1)
if meta.random:
meta.path = tempfile.mkdtemp(prefix='glancesync_tmp')
else:
meta.path = os.path.normpath(os.path.expanduser(meta.path))
m = 'The directory "%s" is not empty. If you are sure, pass --confirm'
if os.path.exists(meta.path) and not meta.confirm \
and len(glob.glob(meta.path + '/_persist_*')) != 0:
logging.error(m % meta.path)
sys.exit(-1)
facade = ServersFacade(dict())
facade.init_persistence(meta.path, True)
facade.add_images_from_csv_to_mock(meta.initial_load)
print('export GLANCESYNC_MOCKPERSISTENT_PATH=' + meta.path)
| [
"os.path.exists",
"copy.deepcopy",
"argparse.ArgumentParser",
"glancesync_image.GlanceSyncImage.from_field_list",
"glob.glob",
"tempfile.mkdtemp",
"os.mkdir",
"sys.exit",
"shelve.open",
"os.path.basename",
"csv.reader",
"os.unlink",
"logging.error",
"os.path.expanduser"
] | [((9754, 9825), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Start a clean persistent session"""'}), "(description='Start a clean persistent session')\n", (9777, 9825), False, 'import argparse\n'), ((6528, 6547), 'os.path.exists', 'os.path.exists', (['dir'], {}), '(dir)\n', (6542, 6547), False, 'import os\n'), ((8826, 8852), 'glob.glob', 'glob.glob', (["(path + '/*.csv')"], {}), "(path + '/*.csv')\n", (8835, 8852), False, 'import glob\n'), ((10500, 10537), 'os.path.expanduser', 'os.path.expanduser', (['meta.initial_load'], {}), '(meta.initial_load)\n', (10518, 10537), False, 'import os\n'), ((10550, 10583), 'os.path.exists', 'os.path.exists', (['meta.initial_load'], {}), '(meta.initial_load)\n', (10564, 10583), False, 'import os\n'), ((10593, 10686), 'logging.error', 'logging.error', (['(\'The directory "%s" with the initial load must exist\' % meta.initial_load)'], {}), '(\'The directory "%s" with the initial load must exist\' % meta.\n initial_load)\n', (10606, 10686), False, 'import logging\n'), ((10712, 10724), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (10720, 10724), False, 'import sys\n'), ((10766, 10807), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': '"""glancesync_tmp"""'}), "(prefix='glancesync_tmp')\n", (10782, 10807), False, 'import tempfile\n'), ((6573, 6603), 'glob.glob', 'glob.glob', (["(dir + '/_persist_*')"], {}), "(dir + '/_persist_*')\n", (6582, 6603), False, 'import glob\n'), ((6840, 6875), 'os.mkdir', 'os.mkdir', (['ServersFacade.dir_persist'], {}), '(ServersFacade.dir_persist)\n', (6848, 6875), False, 'import os\n'), ((7162, 7200), 'glancesync_image.GlanceSyncImage.from_field_list', 'GlanceSyncImage.from_field_list', (['image'], {}), '(image)\n', (7193, 7200), False, 'from glancesync_image import GlanceSyncImage\n'), ((7235, 7255), 'copy.deepcopy', 'copy.deepcopy', (['image'], {}), '(image)\n', (7248, 7255), False, 'import copy\n'), ((8072, 8134), 'shelve.open', 'shelve.open', (["(ServersFacade.dir_persist + '/_persist_' + region)"], {}), "(ServersFacade.dir_persist + '/_persist_' + region)\n", (8083, 8134), False, 'import shelve\n'), ((10855, 10884), 'os.path.expanduser', 'os.path.expanduser', (['meta.path'], {}), '(meta.path)\n', (10873, 10884), False, 'import os\n'), ((10976, 11001), 'os.path.exists', 'os.path.exists', (['meta.path'], {}), '(meta.path)\n', (10990, 11001), False, 'import os\n'), ((11110, 11138), 'logging.error', 'logging.error', (['(m % meta.path)'], {}), '(m % meta.path)\n', (11123, 11138), False, 'import logging\n'), ((11155, 11167), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (11163, 11167), False, 'import sys\n'), ((7430, 7498), 'shelve.open', 'shelve.open', (["(ServersFacade.dir_persist + '/_persist_' + image.region)"], {}), "(ServersFacade.dir_persist + '/_persist_' + image.region)\n", (7441, 7498), False, 'import shelve\n'), ((8880, 8902), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (8896, 8902), False, 'import os\n'), ((9346, 9359), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (9356, 9359), False, 'import csv\n'), ((6651, 6666), 'os.unlink', 'os.unlink', (['name'], {}), '(name)\n', (6660, 6666), False, 'import os\n'), ((6796, 6813), 'shelve.open', 'shelve.open', (['name'], {}), '(name)\n', (6807, 6813), False, 'import shelve\n'), ((9096, 9163), 'shelve.open', 'shelve.open', (["(ServersFacade.dir_persist + '/_persist_' + region_name)"], {}), "(ServersFacade.dir_persist + '/_persist_' + region_name)\n", (9107, 9163), False, 'import shelve\n'), ((9501, 9537), 'glancesync_image.GlanceSyncImage.from_field_list', 'GlanceSyncImage.from_field_list', (['row'], {}), '(row)\n', (9532, 9537), False, 'from glancesync_image import GlanceSyncImage\n'), ((11049, 11085), 'glob.glob', 'glob.glob', (["(meta.path + '/_persist_*')"], {}), "(meta.path + '/_persist_*')\n", (11058, 11085), False, 'import glob\n'), ((6718, 6740), 'os.path.basename', 'os.path.basename', (['name'], {}), '(name)\n', (6734, 6740), False, 'import os\n')] |
# -*- coding: utf-8 -*-
import json
import os
import time
import psutil
import pyautogui
pubg_url = 'steam://rungameid/578080'
PROCNAME = "TslGame.exe"
CRASH_PROCNAME = "BroCrashReporter.exe"
debug_directory = "debug_screenshots"
start_state = "HELLO"
play_state = "PLAYING"
play_timer_max = 60 * 3
matching_state = "MATCHING"
matching_timer_max = 60 * 3
loading_state = "LOADING"
loading_timer_max = 60 * 3
gameloading_state = "GAME IS LOADING"
gameloading_timer_max = 60 * 3
state = start_state
takeScrenshot = True
timer = 0.0
def getConfig():
with open('config.json', encoding='UTF-8') as data_file:
data = json.load(data_file)
return data
def getpixel(x, y):
return pyautogui.screenshot().getpixel((x, y))
def pixelMatchesColor(x, y, expectedRGBColor, tolerance=0):
pix = getpixel(x,y)
if len(pix) == 3 or len(expectedRGBColor) == 3: # RGB mode
r, g, b = pix[:3]
exR, exG, exB = expectedRGBColor[:3]
return (abs(r - exR) <= tolerance) and (abs(g - exG) <= tolerance) and (abs(b - exB) <= tolerance)
elif len(pix) == 4 and len(expectedRGBColor) == 4: # RGBA mode
r, g, b, a = pix
exR, exG, exB, exA = expectedRGBColor
return (abs(r - exR) <= tolerance) and (abs(g - exG) <= tolerance) and (abs(b - exB) <= tolerance) and (
abs(a - exA) <= tolerance)
else:
assert False, 'Color mode was expected to be length 3 (RGB) or 4 (RGBA), but pixel is length %s and expectedRGBColor is length %s' % (
len(pix), len(expectedRGBColor))
def printScreen(message):
if takeScrenshot:
if not os.path.exists(debug_directory):
os.makedirs(debug_directory)
pyautogui.screenshot('{}/{}{}.png'.format(debug_directory, time.strftime("%m.%d %H.%M.%S", time.gmtime()), message))
def changeState(value):
global state, timer
state = value
timer = 0
def killGame():
for proc in psutil.process_iter():
# check whether the process name matches
if proc.name() == PROCNAME:
proc.kill()
def matchesButton(position):
if pixelMatchesColor(position[0], position[1], white_button,
tolerance=color_tolerance) or pixelMatchesColor(position[0],
position[1],
gray_button,
tolerance=color_tolerance) \
or pixelMatchesColor(position[0],
position[1],
super_white_button,
tolerance=color_tolerance) or pixelMatchesColor(
position[0], position[1], golden_button, tolerance=color_tolerance):
return True
return False
def isGameRunning():
for proc in psutil.process_iter():
# check whether the process name matches
if proc.name() == PROCNAME:
return True
else:
return False
def checkTimer():
global state
if state == loading_state and timer > loading_timer_max:
printScreen('Timeout')
print('Timeout. Restarting the game')
changeState(start_state)
elif state == matching_state and timer > matching_timer_max:
printScreen('Timeout')
print('Timeout. Restarting the game')
changeState(start_state)
elif state == play_state and timer > play_timer_max:
printScreen('Timeout')
print('Timeout. Restarting the game')
changeState(start_state)
elif state == gameloading_state and timer > gameloading_timer_max:
printScreen('Timeout')
print('Timeout. Restarting the game')
changeState(start_state)
config = getConfig()
# Menu
print('By using this software you agree with license! You can find it in code.')
print('Choose a server:')
number = 1
for server in config['servers']:
print('{}. {}'.format(number, server['title']))
number += 1
inp = int(input('Type number: '))
inp -= 1
server_position = (config['servers'][inp]['x'], config['servers'][inp]['y'], config['servers'][inp]['title'])
print('Choose a mod:')
number = 1
for server in config['modes']:
print('{}. {}'.format(number, server['title']))
number += 1
inp = int(input('Type number: '))
inp -= 1
print('Can I take screenshots if something wrong happens? (y/N)')
if input().lower() == 'y':
print('Thanks')
else:
print("Well, if something will go wrong, then I can't help you")
takeScrenshot = False
# Position init
mode_position = (config['modes'][inp]['x'], config['modes'][inp]['y'], config['modes'][inp]['title'])
mode_tick_position = (config['modes'][inp]['tick']['x'], config['modes'][inp]['tick']['y'])
play_button_position = (config['play_button']['x'], config['play_button']['y'])
play_state_position = (config['play_state']['x'], config['play_state']['y'])
text_position = (config['text']['x'], config['text']['y'])
exit_position = (config['exit_to_lobby']['x'], config['exit_to_lobby']['y'])
error_position_check = (config['error_position']['x'], config['error_position']['y'])
error_ok_position = (config['error_ok_position']['x'], config['error_ok_position']['y'])
game_message_position = (config['game_message_position']['x'], config['game_message_position']['y'])
exit_button_position = (config['exit_button_position']['x'], config['exit_button_position']['y'])
reconnect_button_position = (config['reconnect_button_position']['x'], config['reconnect_button_position']['y'])
# Reading timings
refresh_rate = config["timers"]["refresh_rate"]
wait_after_killing_a_game = config["timers"]["wait_after_killing_a_game"]
start_delay = config["timers"]["start_delay"]
animation_delay = config["timers"]["animation_delay"]
wait_for_players = config["timers"]["wait_for_players"]
wait_for_plain = config["timers"]["wait_for_plain"]
exit_animation_delay = config["timers"]["exit_animation_delay"]
loading_delay = config["timers"]["loading_delay"]
# Colors
def getColor(config, name):
return (config["colors"][name]["r"], config["colors"][name]["g"], config["colors"][name]["b"])
color_tolerance = config["color_tolerance"]
dark_play_color = getColor(config, "dark_play_color")
play_color = getColor(config, "play_color")
matching_color = getColor(config, "matching_color")
matching_tick_color = getColor(config, "matching_tick_color")
text_start_color = getColor(config, "text_start_color")
white_button = getColor(config, "white_button")
gray_button = getColor(config, "gray_button")
golden_button = getColor(config, "golden_button")
super_white_button = getColor(config, "super_white_button")
windows_background = getColor(config, "windows_background")
exit_button_color = getColor(config, "exit_button_color")
reconnect_button_color = getColor(config, "reconnect_button_color")
# Game info
print('Server: {}. Mode: {}'.format(server_position[2], mode_position[2]))
while (1):
try:
for proc in psutil.process_iter():
# check whether the process name matches
if proc.name() == CRASH_PROCNAME:
print('Fucking bugs in PUBG. Trying to avoid them!')
proc.kill()
killGame()
time.sleep(wait_after_killing_a_game)
changeState(start_state)
except Exception as ex:
print('Something went wrong while killing bug reporter... Error message: {}'.format(ex))
if state == start_state:
if pixelMatchesColor(error_position_check[0], error_position_check[1], windows_background,
tolerance=color_tolerance):
pyautogui.press('enter')
pyautogui.click(error_ok_position[0], error_ok_position[1])
killGame()
time.sleep(wait_after_killing_a_game)
try:
os.startfile(pubg_url)
changeState(loading_state)
time.sleep(start_delay)
print('Loading PUBG')
except Exception as ex:
print('Something went wrong while starating PUBG... Error message: {}'.format(ex))
elif state == loading_state:
if pixelMatchesColor(play_state_position[0], play_state_position[1], play_color,
tolerance=color_tolerance) or pixelMatchesColor(play_state_position[0],
play_state_position[1],
dark_play_color,
tolerance=color_tolerance):
pyautogui.moveTo(play_button_position[0], play_button_position[1])
time.sleep(animation_delay)
# Pick a server
pyautogui.click(server_position[0], server_position[1])
time.sleep(animation_delay)
pyautogui.click(mode_position[0], mode_position[1])
time.sleep(animation_delay)
if pixelMatchesColor(mode_tick_position[0], mode_tick_position[1], matching_tick_color,
tolerance=color_tolerance):
pyautogui.click(mode_tick_position[0], mode_tick_position[1])
pyautogui.click(play_button_position[0], play_button_position[1])
changeState(matching_state)
time.sleep(loading_delay)
print('Starting matchmaking...')
elif pixelMatchesColor(text_position[0], text_position[1], text_start_color, tolerance=color_tolerance):
print('I see text, so the game is probably ready...')
changeState(play_state)
elif pixelMatchesColor(reconnect_button_position[0], reconnect_button_position[1], reconnect_button_color, tolerance=color_tolerance):
print('Nice orange button? I\'ll press it!')
pyautogui.click(reconnect_button_position[0], reconnect_button_position[1])
time.sleep(animation_delay)
elif matchesButton(game_message_position):
print("Game's message was denied")
pyautogui.click(game_message_position[0], game_message_position[1])
elif not pixelMatchesColor(exit_button_position[0], exit_button_position[1], exit_button_color, tolerance=color_tolerance) \
and not pixelMatchesColor(exit_button_position[0], exit_button_position[1], matching_tick_color, tolerance=color_tolerance)\
and timer > 30 and isGameRunning():
print('I can\'t see exit button, so the game is probably ready...')
time.sleep(wait_for_players)
changeState(play_state)
elif state == matching_state:
if pixelMatchesColor(play_state_position[0], play_state_position[1], play_color,
tolerance=color_tolerance) or pixelMatchesColor(play_state_position[0],
play_state_position[1],
dark_play_color,
tolerance=color_tolerance):
changeState(loading_state)
time.sleep(loading_delay)
if not pixelMatchesColor(play_state_position[0], play_state_position[1], matching_color,
tolerance=color_tolerance):
if pixelMatchesColor(play_state_position[0], play_state_position[1], matching_tick_color,
tolerance=color_tolerance):
changeState(gameloading_state)
time.sleep(loading_delay)
print('Session is loading')
elif state == gameloading_state:
if not pixelMatchesColor(play_state_position[0], play_state_position[1], matching_tick_color,
tolerance=color_tolerance):
print('Loading is complete')
time.sleep(wait_for_players)
changeState(play_state)
elif state == play_state:
# print(text_position[0], text_position[1])
if not pixelMatchesColor(text_position[0], text_position[1], text_start_color, tolerance=color_tolerance):
time.sleep(wait_for_plain)
pyautogui.press('esc')
time.sleep(animation_delay)
pyautogui.click(exit_position[0], exit_position[1])
time.sleep(exit_animation_delay)
pyautogui.click(exit_position[0], exit_position[1])
changeState(loading_state)
print('Going in menu. Loading again')
time.sleep(10)
time.sleep(refresh_rate)
timer += refresh_rate
checkTimer()
| [
"os.path.exists",
"pyautogui.press",
"os.startfile",
"os.makedirs",
"pyautogui.moveTo",
"pyautogui.screenshot",
"psutil.process_iter",
"time.sleep",
"pyautogui.click",
"json.load",
"time.gmtime"
] | [((1935, 1956), 'psutil.process_iter', 'psutil.process_iter', ([], {}), '()\n', (1954, 1956), False, 'import psutil\n'), ((2857, 2878), 'psutil.process_iter', 'psutil.process_iter', ([], {}), '()\n', (2876, 2878), False, 'import psutil\n'), ((12583, 12607), 'time.sleep', 'time.sleep', (['refresh_rate'], {}), '(refresh_rate)\n', (12593, 12607), False, 'import time\n'), ((630, 650), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (639, 650), False, 'import json\n'), ((6976, 6997), 'psutil.process_iter', 'psutil.process_iter', ([], {}), '()\n', (6995, 6997), False, 'import psutil\n'), ((7763, 7800), 'time.sleep', 'time.sleep', (['wait_after_killing_a_game'], {}), '(wait_after_killing_a_game)\n', (7773, 7800), False, 'import time\n'), ((699, 721), 'pyautogui.screenshot', 'pyautogui.screenshot', ([], {}), '()\n', (719, 721), False, 'import pyautogui\n'), ((1620, 1651), 'os.path.exists', 'os.path.exists', (['debug_directory'], {}), '(debug_directory)\n', (1634, 1651), False, 'import os\n'), ((1665, 1693), 'os.makedirs', 'os.makedirs', (['debug_directory'], {}), '(debug_directory)\n', (1676, 1693), False, 'import os\n'), ((7639, 7663), 'pyautogui.press', 'pyautogui.press', (['"""enter"""'], {}), "('enter')\n", (7654, 7663), False, 'import pyautogui\n'), ((7676, 7735), 'pyautogui.click', 'pyautogui.click', (['error_ok_position[0]', 'error_ok_position[1]'], {}), '(error_ok_position[0], error_ok_position[1])\n', (7691, 7735), False, 'import pyautogui\n'), ((7826, 7848), 'os.startfile', 'os.startfile', (['pubg_url'], {}), '(pubg_url)\n', (7838, 7848), False, 'import os\n'), ((7900, 7923), 'time.sleep', 'time.sleep', (['start_delay'], {}), '(start_delay)\n', (7910, 7923), False, 'import time\n'), ((7238, 7275), 'time.sleep', 'time.sleep', (['wait_after_killing_a_game'], {}), '(wait_after_killing_a_game)\n', (7248, 7275), False, 'import time\n'), ((8621, 8687), 'pyautogui.moveTo', 'pyautogui.moveTo', (['play_button_position[0]', 'play_button_position[1]'], {}), '(play_button_position[0], play_button_position[1])\n', (8637, 8687), False, 'import pyautogui\n'), ((8700, 8727), 'time.sleep', 'time.sleep', (['animation_delay'], {}), '(animation_delay)\n', (8710, 8727), False, 'import time\n'), ((8768, 8823), 'pyautogui.click', 'pyautogui.click', (['server_position[0]', 'server_position[1]'], {}), '(server_position[0], server_position[1])\n', (8783, 8823), False, 'import pyautogui\n'), ((8836, 8863), 'time.sleep', 'time.sleep', (['animation_delay'], {}), '(animation_delay)\n', (8846, 8863), False, 'import time\n'), ((8876, 8927), 'pyautogui.click', 'pyautogui.click', (['mode_position[0]', 'mode_position[1]'], {}), '(mode_position[0], mode_position[1])\n', (8891, 8927), False, 'import pyautogui\n'), ((8940, 8967), 'time.sleep', 'time.sleep', (['animation_delay'], {}), '(animation_delay)\n', (8950, 8967), False, 'import time\n'), ((9219, 9284), 'pyautogui.click', 'pyautogui.click', (['play_button_position[0]', 'play_button_position[1]'], {}), '(play_button_position[0], play_button_position[1])\n', (9234, 9284), False, 'import pyautogui\n'), ((9337, 9362), 'time.sleep', 'time.sleep', (['loading_delay'], {}), '(loading_delay)\n', (9347, 9362), False, 'import time\n'), ((1793, 1806), 'time.gmtime', 'time.gmtime', ([], {}), '()\n', (1804, 1806), False, 'import time\n'), ((9145, 9206), 'pyautogui.click', 'pyautogui.click', (['mode_tick_position[0]', 'mode_tick_position[1]'], {}), '(mode_tick_position[0], mode_tick_position[1])\n', (9160, 9206), False, 'import pyautogui\n'), ((11180, 11205), 'time.sleep', 'time.sleep', (['loading_delay'], {}), '(loading_delay)\n', (11190, 11205), False, 'import time\n'), ((9835, 9910), 'pyautogui.click', 'pyautogui.click', (['reconnect_button_position[0]', 'reconnect_button_position[1]'], {}), '(reconnect_button_position[0], reconnect_button_position[1])\n', (9850, 9910), False, 'import pyautogui\n'), ((9923, 9950), 'time.sleep', 'time.sleep', (['animation_delay'], {}), '(animation_delay)\n', (9933, 9950), False, 'import time\n'), ((11590, 11615), 'time.sleep', 'time.sleep', (['loading_delay'], {}), '(loading_delay)\n', (11600, 11615), False, 'import time\n'), ((11913, 11941), 'time.sleep', 'time.sleep', (['wait_for_players'], {}), '(wait_for_players)\n', (11923, 11941), False, 'import time\n'), ((10061, 10128), 'pyautogui.click', 'pyautogui.click', (['game_message_position[0]', 'game_message_position[1]'], {}), '(game_message_position[0], game_message_position[1])\n', (10076, 10128), False, 'import pyautogui\n'), ((12187, 12213), 'time.sleep', 'time.sleep', (['wait_for_plain'], {}), '(wait_for_plain)\n', (12197, 12213), False, 'import time\n'), ((12226, 12248), 'pyautogui.press', 'pyautogui.press', (['"""esc"""'], {}), "('esc')\n", (12241, 12248), False, 'import pyautogui\n'), ((12261, 12288), 'time.sleep', 'time.sleep', (['animation_delay'], {}), '(animation_delay)\n', (12271, 12288), False, 'import time\n'), ((12301, 12352), 'pyautogui.click', 'pyautogui.click', (['exit_position[0]', 'exit_position[1]'], {}), '(exit_position[0], exit_position[1])\n', (12316, 12352), False, 'import pyautogui\n'), ((12365, 12397), 'time.sleep', 'time.sleep', (['exit_animation_delay'], {}), '(exit_animation_delay)\n', (12375, 12397), False, 'import time\n'), ((12410, 12461), 'pyautogui.click', 'pyautogui.click', (['exit_position[0]', 'exit_position[1]'], {}), '(exit_position[0], exit_position[1])\n', (12425, 12461), False, 'import pyautogui\n'), ((12563, 12577), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (12573, 12577), False, 'import time\n'), ((10539, 10567), 'time.sleep', 'time.sleep', (['wait_for_players'], {}), '(wait_for_players)\n', (10549, 10567), False, 'import time\n')] |
#! C:\bin\Python35\python.exe
# -*- coding: utf-8 -*-
'''
Modified for python3 on 2012/04/29
original python2 version is Created on 2011/10/30
@author: tyama
'''
import poplib
import email.header
import string
import re
import urllib.request
import urllib.error
import urllib.parse
import http.cookiejar
import socket
import threading
import time
import random
import json
import mailcheker_data as config
from subprocess import check_call
'''
#sample
def decode_mime_header1(s0):
return ''.join( str(s, c or 'ascii') if isinstance(s, (bytes,)) \
else s for s,c in email.header.decode_header(s0) )
'''
def decode_mime_header(st):
decoded_st = ""
for s, enc in email.header.decode_header(st):
try:
if isinstance(s, str):
decoded_st += s
elif enc == 'unknown-8bit': # case of type==bytes
decoded_st += s.decode('Shift_JIS', 'ignore')
elif enc:
decoded_st += s.decode(enc, 'ignore')
else:
decoded_st += s.decode('utf-8', 'ignore')
except LookupError as e:
print('encode error:', e)
except Exception as err:
print('Unexpected error in decode, sleeping 8 sec')
print(sys.exc_info())
time.sleep(8)
return decoded_st
def extract_url(msg, fromkey, payloadkey, multiurl):
f_header = msg.get('From', str)
# rakuten mail is not correctly decoded
# the following replacement is useful
if isinstance(f_header, str):
f_header_mod = f_header.replace('==?=<', '==?= <')
else:
f_header_mod = f_header # .encode()
decoded_from = decode_mime_header(f_header_mod)
url = []
if fromkey in decoded_from:
# print "YES"
pattern = re.compile(payloadkey)
for part in msg.walk():
if part.get_content_maintype() == 'text':
body = part.get_payload()
enc = part.get_content_charset()
if isinstance(body, str):
u_body = body
elif enc == 'unknown-8bit': # case of type==bytes
u_body = body.decode('Shift_JIS', 'ignore')
elif enc:
u_body = body.decode(enc, 'ignore')
else:
u_body = body.decode('euc-jp', 'ignore')
# print enc
# print u_body
if multiurl:
result = pattern.findall(u_body)
if result:
for each in result:
url.append(each)
url = list(set(url))
# sorted(set(url), key=url.index)
else:
result = pattern.search(u_body)
if result:
url.append(result.group(1))
return url
else:
return None
def isEmailTocheck(msg, fromkey):
f_header = msg.get('From', str)
# rakuten mail is not correctly decoded
# the following replacement is useful
if isinstance(f_header, str):
f_header_mod = f_header.replace('==?=<', '==?= <')
else:
f_header_mod = f_header # .encode()
decoded_from = decode_mime_header(f_header_mod)
if fromkey in decoded_from:
return True
else:
return False
class http_get(threading.Thread):
def __init__(self, url, opener, index):
threading.Thread.__init__(self)
self.url = url
self.opener = opener
self.index = index
def run(self):
try:
response = self.opener.open(self.url)
'''
enc = response.headers.getparam('charset')
if enc:
print response.read().decode(enc,'ignore')
else:
print response.read().decode('euc-jp','ignore')
'''
print(" ", self.index, self.url)
return True
except urllib.error.HTTPError as error:
print('HTTP Error')
print(error)
except socket.timeout as error:
print('Socket time out')
print(error)
except Exception as err:
print('Unexpected error in decode, sleeping 8 sec')
print(sys.exc_info())
time.sleep(8)
return None
original_data = {
'name': 'ACME',
'shares': 100,
'price': 542.23
}
def main():
print("Base", original_data)
json_str = json.dumps(original_data)
print(json_str)
json_data = json.loads(json_str)
print(json_data)
server_list = config.server_list
user_list = config.user_list
pass_list = config.pass_list
print(server_list)
dl_list1 = config.dl_list1
dl_list2 = config.dl_list2
dl_list3 = config.dl_list3
dl_list = (dl_list1, dl_list2, dl_list3)
# lines=open('setting.dat','r').readlines()
# for line in lines:
# print line[:-1]
lastuidl_lists = []
f = open('lastmsgid.dat', 'r')
for line in f:
lastuidl_lists.append(line.split())
f.close()
out_string = []
print(lastuidl_lists)
print(dl_list)
# time out
socket.setdefaulttimeout(15.0)
# connect to server
cj = http.cookiejar.CookieJar()
cjhdr = urllib.request.HTTPCookieProcessor(cj)
opener = urllib.request.build_opener(cjhdr)
opener.addheaders = [
('User-Agent', 'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko)\
Chrome/15.0.874.120 Safari/535.2')]
for j in range(len(server_list)):
print('Start ')
server = poplib.POP3_SSL(server_list[j])
# login
server.user(user_list[j])
server.pass_(pass_list[j])
# list items on server
list_resp, list_items, list_octets = server.list()
print(list_resp)
# print (list_items)
print(list_octets)
uidl = server.uidl()
lastuidl = lastuidl_lists[j]
# print server.uidl()
'''if j==1:
lastuidl[1]='TEST'
'''
last_msg_id = 1
x = int(lastuidl[0])
if x > len(list_items):
x = len(list_items)
index = x
print(x)
if x == 0:
out_string.append('1')
out_string.append('abc')
continue
while x > 0:
# print (lastuidl[1], ":>", uidl[1][x-1].split()[1].decode('utf-8','ingore'))
if lastuidl[1] == uidl[1][x - 1].split()[1].decode('utf-8', 'ingore'):
print('equal')
break
print(x)
index = x
x -= 1
print(index)
# if uidl[1][i].split()[1] == 'ANft2MsAABBhTsOb4QzFegr+jPA':
# print 'equal'
# continue
delete_counter = 0
last_index = index
for i in range(index, len(list_items) + 1):
try:
# resp, text, octets = server.retr(i)
t_resp, t_text, t_octets = server.top(i, 1)
except Exception as err:
print('Unexpected error in server.top of Main function\n')
print('i=', i, ', index=', index)
print(sys.exc_info())
# print (text)'
t_string_text = b'\n'.join(t_text)
t_msg = email.message_from_bytes(t_string_text)
url_list = None
checkBody = False
for from_key, text_key, multiurl in dl_list[j]:
if isEmailTocheck(t_msg, from_key):
checkBody = True
break
if checkBody:
try:
resp, text, octets = server.retr(i)
except Exception as err:
print('Unexpected error in server.retr of Main function\n')
print('i=', i, ', index=', index)
print(sys.exc_info())
string_text = b'\n'.join(text)
msg = email.message_from_bytes(string_text)
for from_key, text_key, multiurl in dl_list[j]:
url_list = extract_url(msg, from_key, text_key, multiurl)
if url_list:
break
# print url_list
if url_list:
m_date = msg.get('Date')
print(m_date)
for each in url_list:
# print each
get = http_get(each, opener, i)
try:
get.start()
# server.dele(i)
delete_counter += 1
if 'r34' in each:
print('Call Chrome')
check_call(
["C:\Program Files (x86)\Google\Chrome\Application\chrome.exe",
" --disable-images", each])
except Exception as err:
print('Unexpected error in Main function', each, i)
print(sys.exc_info())
time.sleep(8)
m_subject = msg.get('Subject')
d_subject, enc = email.header.decode_header(m_subject)[0]
if enc is None:
enc = 'euc-jp'
try:
u_subject = str(d_subject, enc)
except Exception as err:
print('Unexpected error in u_subject', d_subject, enc)
print(sys.exc_info())
time.sleep(8)
print(i, " ", u_subject)
else:
print(i)
last_index = i
if i == 6:
pass # quit()
last_msg_id = len(list_items) # - delete_counter
out_string.append(str(last_msg_id))
out_string.append(uidl[1][last_index - 1].split()[1].decode('utf-8', 'ignore'))
try:
server.quit()
except Exception as err:
print('Unexpected error in server.quit()')
print(sys.exc_info())
print('End')
print(out_string[len(out_string) - 1])
# print out_string
time.sleep(2)
for i in range(len(out_string)):
if i % 2:
continue
print(out_string[i])
print(out_string[i + 1])
f = open('lastmsgid.dat', 'w')
for i in range(len(out_string)):
if i % 2:
continue
f.write(out_string[i] + ' ')
f.write(out_string[i + 1] + '\n')
f.close()
if __name__ == '__main__':
main()
print('END')
time.sleep(8)
| [
"threading.Thread.__init__",
"json.loads",
"poplib.POP3_SSL",
"re.compile",
"subprocess.check_call",
"json.dumps",
"time.sleep",
"socket.setdefaulttimeout"
] | [((4499, 4524), 'json.dumps', 'json.dumps', (['original_data'], {}), '(original_data)\n', (4509, 4524), False, 'import json\n'), ((4561, 4581), 'json.loads', 'json.loads', (['json_str'], {}), '(json_str)\n', (4571, 4581), False, 'import json\n'), ((5194, 5224), 'socket.setdefaulttimeout', 'socket.setdefaulttimeout', (['(15.0)'], {}), '(15.0)\n', (5218, 5224), False, 'import socket\n'), ((10211, 10224), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (10221, 10224), False, 'import time\n'), ((10631, 10644), 'time.sleep', 'time.sleep', (['(8)'], {}), '(8)\n', (10641, 10644), False, 'import time\n'), ((1787, 1809), 're.compile', 're.compile', (['payloadkey'], {}), '(payloadkey)\n', (1797, 1809), False, 'import re\n'), ((3457, 3488), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (3482, 3488), False, 'import threading\n'), ((5630, 5661), 'poplib.POP3_SSL', 'poplib.POP3_SSL', (['server_list[j]'], {}), '(server_list[j])\n', (5645, 5661), False, 'import poplib\n'), ((1285, 1298), 'time.sleep', 'time.sleep', (['(8)'], {}), '(8)\n', (1295, 1298), False, 'import time\n'), ((4321, 4334), 'time.sleep', 'time.sleep', (['(8)'], {}), '(8)\n', (4331, 4334), False, 'import time\n'), ((9570, 9583), 'time.sleep', 'time.sleep', (['(8)'], {}), '(8)\n', (9580, 9583), False, 'import time\n'), ((8755, 8871), 'subprocess.check_call', 'check_call', (["['C:\\\\Program Files (x86)\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe',\n ' --disable-images', each]"], {}), "([\n 'C:\\\\Program Files (x86)\\\\Google\\\\Chrome\\\\Application\\\\chrome.exe',\n ' --disable-images', each])\n", (8765, 8871), False, 'from subprocess import check_call\n'), ((9115, 9128), 'time.sleep', 'time.sleep', (['(8)'], {}), '(8)\n', (9125, 9128), False, 'import time\n')] |
import math
from math import pi
import numpy as np
import open3d as o3d
import matplotlib.pyplot as plt
import cv2
import toml
from .cameraparam import CameraParam
from .fitted_line import FittedLine
from .ransac_fit import ransac_line_fit, ransac_ground_fit
from .util import check_all_false
# TODO: output random seed used in ransac and open3d
# PCL pre-processing (the unit of these numerics is [m])
DOWNSAMPLE_VOXEL_SIZE = 0.003
DOWNSAMPLE_VOXEL_SIZE_GROUND = 0.005
# Ground fit
X_MIN = 0.
X_MAX = +1.2
Y_MIN = -0.8
Y_MAX = +0.8
GRID_SIZE = 0.080
GROUND_SEED_Z_MAX = 0.
GROUND_SEED_MARGIN = 0.080
GROUND_MARGIN = 0.030
SMOOTHING_KERNEL = GRID_SIZE * 0.5
# Clustering
# DBSCAN_EPS : Density parameter that is used to find neighbouring points
# DBSCAN_MINPOINTS : Minimum number of points to form a cluster
DBSCAN_EPS = 0.016
DBSCAN_MINPOINTS = 10
CLUSTER_MINPOINTS = 50
CMAP_CLUSTER = plt.get_cmap("tab20")
def set_pcl_fitter(toml_path):
dict_toml = toml.load(open(toml_path))
set_roll = float(dict_toml['General']['set_roll'])
set_pitch = float(dict_toml['General']['set_pitch'])
set_yaw = float(dict_toml['General']['set_yaw'])
camera_set_param = CameraParam()
camera_set_param.set_tf_rot_and_trans([set_roll, set_pitch, set_yaw], [0., 0., 0.])
return PCLFitter(camera_set_param, dict_toml)
class PCLFitter(object):
def __init__(self, camera_set_param=None, target_attribute=None):
self.depth_img = None
self.camera_param = None
self.grid_xyzw = None
if camera_set_param is None:
self.camera_set_param = CameraParam()
else:
self.camera_set_param = camera_set_param
if target_attribute is None:
self.set_parameters()
else:
self.set_target_attribute(target_attribute)
def set_target_attribute(self, dict_toml):
self.pcl_cutoff_dist = float(dict_toml['Selection']['pcl_cutoff_dist'])
self.target_max_dist = float(dict_toml['Selection']['target_max_dist'])
self.target_min_dist = float(dict_toml['Selection']['target_min_dist'])
self.target_max_len = float(dict_toml['Selection']['target_max_len'])
self.target_min_len = float(dict_toml['Selection']['target_min_len'])
self.target_max_tilt = float(dict_toml['Selection']['target_max_tilt'])
def set_parameters(self):
self.pcl_cutoff_dist = 1.1
self.target_max_dist = 0.85
self.target_min_dist = 0.3
self.target_min_len = 0.25
self.target_max_len = 0.40
self.target_max_tilt = 30.
def get_pcd_from_depth_img(self, depth_img, camera_param):
self.depth_img = depth_img
self.camera_param = camera_param
pcl_raw = self.tfm_pcl_cam2global(self.cvt_depth2pcl(self.depth_img, self.camera_param), camera_param)
pcd = self.downsample(pcl_raw, voxel_size=DOWNSAMPLE_VOXEL_SIZE)
return pcd
def fit_pcd(self, pcd, cluster_eps=DBSCAN_EPS, cluster_min_points=DBSCAN_MINPOINTS, verbose=True):
pcd_list = []
fitgeom_list = []
pcd_array = np.array(pcd.points, dtype=np.float32)
bflg_above_ground, xy_binidx, grid_xyzw, pcd_grounds_list = self.ground_fit(pcd_array)
pcd_grounds_ary_pre_downsample = np.asarray(pcd_grounds_list[2].points) # pcd_grounds = [pcd_out_of_bin, pcd_groundseed, pcd_ground]
pcd_grounds = self.downsample(pcd_grounds_ary_pre_downsample, voxel_size=DOWNSAMPLE_VOXEL_SIZE_GROUND)
ground_points_ary = np.asarray(pcd_grounds.points)
pcd_list += [ground_points_ary]
fitgeom_list.append(self.get_mesh_ground())
# TODO debug.error() send to cloud if above ground is all false
if check_all_false(bflg_above_ground):
return [], pcd_list, fitgeom_list, pcd_array, ground_points_ary
labels, cluster_pcd = self.clustering(pcd_array[bflg_above_ground],
eps=cluster_eps, min_points=cluster_min_points)
pcd_list.append(cluster_pcd)
line_list = self.line_fit(pcd_array[bflg_above_ground], labels)
self.merge_lines(line_list)
self.mark_multiline_clusters(line_list)
self.extend_lines_to_ground(line_list, grid_xyzw)
self.check_line_truncation(line_list)
self.final_selection(line_list)
if verbose:
self.print_line_info(line_list)
self.bkg_postprocess(line_list)
self.remove_noise_lines(line_list, grid_xyzw)
mesh_cylinders = self.get_line_fit_geometry(line_list)
fitgeom_list += mesh_cylinders
return line_list, pcd_list, fitgeom_list, pcd_array, ground_points_ary
def cvt_depth2pcl(self, depth_img, camera_param):
cx, cy = camera_param.center_xy
fx, fy = camera_param.focal_xy
DEPTH_MIN = 1e-3
arr_y = np.arange(depth_img.shape[0], dtype=np.float32)
arr_x = np.arange(depth_img.shape[1], dtype=np.float32)
val_x, val_y = np.meshgrid(arr_x, arr_y)
# TODO: rewrite axis convertion explicitly (i.e. zense clockwise rotation)
tmp_x = +depth_img
tmp_y = +depth_img * (val_y - cy) * (1. / fy)
tmp_z = -depth_img * (val_x - cx) * (1. / fx)
filled = (depth_img > DEPTH_MIN) * (depth_img < self.pcl_cutoff_dist + 0.2)
filled_x = tmp_x[filled]
filled_y = tmp_y[filled]
filled_z = tmp_z[filled]
pcl = np.stack([filled_x, filled_y, filled_z], axis=-1)
return pcl
def tfm_pcl_cam2global(self, pcl_camframe, camera_param):
pcl_tmp = np.dot(pcl_camframe, camera_param.rot_mtx.transpose()) + camera_param.translation
pcl_global = np.dot(pcl_tmp, self.camera_set_param.rot_mtx.transpose())
return pcl_global
def cvt_to_2d_image_xyd(self, input_points, camera_param):
points = input_points.reshape(-1, 3)
points_tmp = np.dot(points, self.camera_set_param.inv_rot_mtx.transpose())
points_camframe = np.dot(points_tmp - camera_param.translation, camera_param.inv_rot_mtx.transpose())
cx, cy = camera_param.center_xy
fx, fy = camera_param.focal_xy
depth = +points_camframe[:, 0]
val_y = +points_camframe[:, 1] / depth * fy + cy
val_x = -points_camframe[:, 2] / depth * fx + cx
xyd = np.stack([val_x, val_y, depth], axis=-1)
return xyd.reshape(input_points.shape)
def downsample(self, pcl_raw, voxel_size):
pcd_raw = self.cvt_numpy2open3d(pcl_raw, color=[0., 0., 1.])
pcd = pcd_raw.voxel_down_sample(voxel_size=voxel_size)
return pcd
def cvt_numpy2open3d(self, pcl, color=None):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(pcl.astype(np.float64))
if not color is None:
pcd.paint_uniform_color(color)
return pcd
def ground_fit(self, pcl):
x_nbin = int( (X_MAX - X_MIN) / float(GRID_SIZE) + 1e-3 )
y_nbin = int( (Y_MAX - Y_MIN) / float(GRID_SIZE) + 1e-3 )
x_edge = np.linspace(X_MIN, X_MIN + GRID_SIZE * x_nbin, x_nbin + 1).reshape(1, -1)
y_edge = np.linspace(Y_MIN, Y_MIN + GRID_SIZE * y_nbin, y_nbin + 1).reshape(1, -1)
x_ctr = (x_edge[0, 1:] + x_edge[0, :-1]) * 0.5
y_ctr = (y_edge[0, 1:] + y_edge[0, :-1]) * 0.5
pcl_tmp = pcl.reshape(-1, 1, 3)
x_binflg = (pcl_tmp[:, :, 0] >= x_edge[:, :-1]) * (pcl_tmp[:, :, 0] < x_edge[:, 1:])
y_binflg = (pcl_tmp[:, :, 1] >= y_edge[:, :-1]) * (pcl_tmp[:, :, 1] < y_edge[:, 1:])
x_binidx = np.argmax(x_binflg, axis=-1)
y_binidx = np.argmax(y_binflg, axis=-1)
x_binidx[(x_binflg.sum(axis=-1) == 0)] = -1
y_binidx[(y_binflg.sum(axis=-1) == 0)] = -1
xy_binidx = np.concatenate([x_binidx.reshape(-1,1), y_binidx.reshape(-1,1)], axis=-1)
bflg_out_of_bin = (xy_binidx == -1).sum(-1).astype(np.bool)
bflg_in_bin = (bflg_out_of_bin == False)
grid_xyzw = np.zeros([x_nbin, y_nbin, 4], dtype=np.float64)
for i_x in range(x_nbin):
for i_y in range(y_nbin):
in_bin = (x_binidx == i_x) * (y_binidx == i_y)
pcl_in_bin = pcl[in_bin]
valid = (pcl_in_bin[:, 2] < GROUND_SEED_Z_MAX)
pcl_valid = pcl_in_bin[valid]
if pcl_valid.shape[0] == 0:
z_val = 0.
wgt = 0.1
else:
z_val = pcl_valid[:, 2].min()
wgt = 1.
grid_xyzw[i_x, i_y] = [x_ctr[i_x], y_ctr[i_y], z_val, wgt]
grid_xyzw = self.fill_empy_gridz(grid_xyzw, w_thres=0.1)
pcd_groundseed = self.cvt_numpy2open3d(grid_xyzw.reshape(-1, 4)[:, :3], color=[1., 0., 1.])
pcl_ground_seed_z = grid_xyzw[x_binidx, y_binidx, 2]
bflg_ground_seed = (pcl[:, 2] < (pcl_ground_seed_z + GROUND_SEED_MARGIN)) * bflg_in_bin
grid_xyzw = ransac_ground_fit(pcl[bflg_ground_seed], xy_binidx[bflg_ground_seed], grid_xyzw)
grid_xyzw = self.fill_empy_gridz(grid_xyzw, w_thres=1.)
grid_xyzw = self.smooth_ground(grid_xyzw, kernel_size=SMOOTHING_KERNEL)
self.grid_xyzw = grid_xyzw
bflg_in_range = (np.linalg.norm(pcl[:,:2], axis=-1) < self.pcl_cutoff_dist)
bflg_valid_points = bflg_in_range * bflg_in_bin
pcl_ground_z = grid_xyzw[x_binidx, y_binidx, 2]
bflg_ground = (pcl[:, 2] < (pcl_ground_z + GROUND_MARGIN)) * bflg_valid_points
bflg_above_ground = (bflg_ground == False) * bflg_valid_points
pcd_out_of_bin = self.cvt_numpy2open3d(pcl[bflg_valid_points == False], color=[0.3, 0., 0.5])
pcd_ground = self.cvt_numpy2open3d(pcl[bflg_ground], color=[0., 0., 0.5])
pcd_all = [pcd_out_of_bin, pcd_groundseed, pcd_ground]
return bflg_above_ground, xy_binidx, grid_xyzw, pcd_all
def fill_empy_gridz(self, grid_xyzw, w_thres=0.1):
filled = (grid_xyzw[:,:,3] > w_thres)
empty = (filled == False)
# print 'filled ', filled.shape, filled.sum()
# print 'empty ', empty.shape, empty.sum()
filled_xyzw = grid_xyzw[filled].reshape(-1, 1, 4)
empty_xyzw = grid_xyzw[empty].reshape(1, -1, 4)
# print 'filled_xyzw ', filled_xyzw.shape
# print 'empty_xyzw ', empty_xyzw.shape
dist_array = np.linalg.norm(filled_xyzw[:,:,:2] - empty_xyzw[:,:,:2], axis=-1)
# print 'dist_array ', dist_array.shape
if dist_array.shape[0] != 0:
nearest_filled = np.argmin(dist_array, axis=0)
grid_xyzw[empty, 2] = filled_xyzw[nearest_filled, 0, 2]
return grid_xyzw
def smooth_ground(self, grid_xyzw, kernel_size):
vect = grid_xyzw[:,:,:2].reshape(1, -1, 2) - grid_xyzw[:,:,:2].reshape(-1, 1, 2)
dsq = (vect ** 2).sum(axis=-1)
z_orig = grid_xyzw[:,:,2].reshape(-1)
wgt = grid_xyzw[:,:,3].reshape(-1)
coeff = 0.5 / kernel_size ** 2
fill_wgt = wgt * np.exp(-dsq * coeff)
z_smooth = (z_orig * fill_wgt).sum(axis=-1) / fill_wgt.sum(axis=-1)
grid_xyzw[:,:,2].reshape(-1)[:] = z_smooth
return grid_xyzw
def get_mesh_ground(self):
return self.cvt_gridvtx2mesh(self.grid_xyzw) if self.grid_xyzw is not None else None
def cvt_gridvtx2mesh(self, grid_vtx, double_sided=True):
ngrid_x = grid_vtx.shape[0]
ngrid_y = grid_vtx.shape[1]
vertices = np.array(grid_vtx[:,:,:3].reshape(-1,3))
triangles = []
for i_x in range(grid_vtx.shape[0] - 1):
for i_y in range(grid_vtx.shape[1] - 1):
ivert_base = i_x * ngrid_y + i_y
triangles.append([ivert_base, ivert_base+ngrid_y, ivert_base+1])
triangles.append([ivert_base+ngrid_y+1, ivert_base+1, ivert_base+ngrid_y])
triangles = np.array(triangles)
if double_sided:
triangles = np.concatenate([triangles, triangles[:,::-1]], axis=0)
mesh = o3d.geometry.TriangleMesh()
mesh.vertices = o3d.utility.Vector3dVector(vertices)
mesh.triangles = o3d.utility.Vector3iVector(triangles)
mesh.paint_uniform_color([0.4, 0.4, 0.4])
mesh.compute_vertex_normals()
return mesh
def clustering(self, pcl, eps=DBSCAN_EPS, min_points=DBSCAN_MINPOINTS):
n_points = pcl.shape[0]
print('Clustering {} points ...'.format(n_points),)
pcd = self.cvt_numpy2open3d(pcl)
labels_orig = np.array(
pcd.cluster_dbscan(eps=eps, min_points=min_points, print_progress=False))
n_cluster = labels_orig.max() + 1
print('Found {} clusters.'.format(n_cluster))
cls_flg = (np.arange(n_cluster).reshape(-1,1) == labels_orig.reshape(1,-1))
n_points_in_cls = cls_flg.sum(axis=-1)
sortidx_cls = np.argsort(n_points_in_cls)[::-1]
labels = np.ones(n_points, dtype=np.int32) * -1
for i_cls in range(n_cluster):
labels[cls_flg[sortidx_cls[i_cls]]] = i_cls
colors = CMAP_CLUSTER(labels)
colors[labels < 0] = 0.8
pcd.colors = o3d.utility.Vector3dVector(colors[:, :3])
return labels, pcd
def line_fit(self, pcl, labels):
MAX_ITER_LINEFIT = 3
RANSAC_N_ITER = 500
CUT_PERCENTILE = 0.8
DTHRES_INLIER = 0.020
MAX_ROOT_Z = 0.20
line_list = []
n_cluster = labels.max() + 1
print("Line fit on %d clusters ..." % n_cluster)
do_break = False
for i_cluster in range(n_cluster):
pcl_cluster = pcl[(labels == i_cluster)]
print("Cluster #{} : {} points".format(i_cluster, pcl_cluster.shape[0]))
pcl_to_fit = pcl_cluster
for i_iter in range(MAX_ITER_LINEFIT):
n_to_fit = pcl_to_fit.shape[0]
print(" - Iteration {} : {} points".format(i_iter, n_to_fit)),
if n_to_fit < CLUSTER_MINPOINTS:
print(" - Too small!")
if i_iter == 0:
do_break = True
break
length, tfm_mtx, is_outlier = ransac_line_fit(pcl_to_fit, n_iter=RANSAC_N_ITER, dthres_inlier=DTHRES_INLIER, cut_percentile=CUT_PERCENTILE, max_root_z=(MAX_ROOT_Z if i_iter==0 else -1.))
if tfm_mtx is None:
print(" - Bad fit!")
break
print(" - Good fit!")
line_list.append(FittedLine(length, tfm_mtx, i_cluster))
pcl_to_fit = pcl_to_fit[is_outlier]
if do_break:
break
print("Found {} lines.".format(len(line_list)))
return line_list
def merge_lines(self, line_list):
MERGE_THRES_COS = math.cos(15. * pi / 180.)
MERGE_THRES_DIST = 0.10
z_array = np.array([line.position[2] for line in line_list])
sorted_idx = np.argsort(z_array)
n_line = len(line_list)
for i_line in range(n_line):
line = line_list[sorted_idx[i_line]]
for i_line2 in range(i_line + 1, n_line):
line2 = line_list[sorted_idx[i_line2]]
if not line2.parent is None:
continue
to_line2 = line2.position - line.position_center
dist_to_line2 = np.linalg.norm(to_line2)
dir_to_line2 = to_line2 / dist_to_line2
cos_to_line2 = np.dot(dir_to_line2, line.direction)
if cos_to_line2 < MERGE_THRES_COS:
continue
if dist_to_line2 > MERGE_THRES_DIST + line.length * 0.5:
continue
line2.parent = line
def count_lines_in_cluster(self, line_list):
counts = {}
for line in line_list:
if not line.cluster_id in counts:
counts[line.cluster_id] = 0
counts[line.cluster_id] += 1
return counts
def mark_multiline_clusters(self, line_list):
counts = self.count_lines_in_cluster(line_list)
for line in line_list:
if counts[line.cluster_id] > 1:
line.is_multiline_cluster = True
def extend_lines_to_ground(self, line_list, grid_xyzw):
N_AVERAGE = 4
MAX_R = GRID_SIZE
MIN_SOLITARY_LEN = 0.100
MAX_EXTEND_LEN = 0.200
MAX_GROUNDED_EXTEND_LEN = 0.060
COSZ_THRESHOLD = math.cos(45. * pi / 180.)
flatten_grid_xyz = grid_xyzw[:,:,:3].reshape(-1, 3)
for line in line_list:
if not line.parent is None:
continue
if line.is_solitary and line.length < MIN_SOLITARY_LEN:
continue
if line.direction[2] < COSZ_THRESHOLD:
continue
flatten_grid_local_frame = line.tfm_to_local_frame(flatten_grid_xyz)
flatten_grid_r = np.linalg.norm(flatten_grid_local_frame[:,:2], axis=-1)
idx_sort = np.argsort(flatten_grid_r)[0:N_AVERAGE]
weight = np.clip((MAX_R - flatten_grid_r[idx_sort]) / MAX_R, 0., 1.)
weight_sum = weight.sum()
if not weight_sum > 0.:
continue
ground_z_local_frame = np.dot(flatten_grid_local_frame[idx_sort,2], weight) / weight_sum
# idx_min = idx_sort[0]
# if flatten_grid_r[idx_min] > MAX_R:
# continue
# ground_z_local_frame = flatten_grid_local_frame[idx_min, 2]
extend_len = -ground_z_local_frame
if extend_len > MAX_EXTEND_LEN:
continue
line.extend_root(extend_len)
line.is_grounded = (extend_len <= MAX_GROUNDED_EXTEND_LEN)
def is_in_image(self, xyd, image_shape):
TOP_MARGIN = 20
SIDE_MARGIN = 20
BOTTOM_MARGIN = 0
x_val = xyd[0]
y_val = xyd[1]
if (y_val > SIDE_MARGIN
and y_val < image_shape[0] - SIDE_MARGIN
and x_val > TOP_MARGIN
and x_val < image_shape[1] - BOTTOM_MARGIN):
return True
else:
return False
def check_line_truncation(self, line_list):
SEEK_MARGIN = [10, 50]
OPENING_ANGLE = 4.
SECTOR_COLOR = 1
DEPTH_MARGIN = 0.015
MAX_OCCLUDING_PIXELS = 5
sector_mask = np.zeros(self.depth_img.shape, dtype=np.uint8)
for line in line_list:
line.sector_mask = {}
line.occlusion_mask = {}
root_is_contained = 0
tip_is_contained = 0
is_occluded = False
sector_mask = sector_mask
xyd_ends = self.cvt_to_2d_image_xyd(line.position_ends, self.camera_param)
line.xyd_ends = xyd_ends
root_is_contained += self.is_in_image(xyd_ends[0], sector_mask.shape)
tip_is_contained += self.is_in_image(xyd_ends[1], sector_mask.shape)
if line.is_solitary and line.is_grounded:
root_to_tip_xy = (xyd_ends[1] - xyd_ends[0])[:2]
sector_angle = math.atan2(root_to_tip_xy[1], root_to_tip_xy[0]) / math.pi * 180.
sector_radius = int(np.linalg.norm(root_to_tip_xy) * 0.5 + (SEEK_MARGIN[1] + SEEK_MARGIN[0]) * 0.5)
center = (xyd_ends.sum(axis=0) * 0.5).astype(np.int32)
sector_mask[:] = 0
cv2.ellipse(sector_mask, (center[0], center[1]), (sector_radius, sector_radius), sector_angle, -OPENING_ANGLE * 0.5, +OPENING_ANGLE * 0.5, SECTOR_COLOR, SEEK_MARGIN[1] - SEEK_MARGIN[0])
# TODO: what if tip is right on ?
# TODO: handle cases where sector_mask goes out of image
depth_in_sector = self.depth_img * sector_mask
occlusion_mask = (depth_in_sector < xyd_ends[1, 2] + DEPTH_MARGIN) * (depth_in_sector > 0.)
# TODO: Handle cases where the sector is out of frame in one camera
if occlusion_mask.sum() > MAX_OCCLUDING_PIXELS:
is_occluded = True
line.sector_mask = sector_mask.astype(np.bool)
line.occlusion_mask = occlusion_mask
line.tip_is_contained = (tip_is_contained != 0)
line.is_contained = ((root_is_contained * tip_is_contained) != 0)
line.is_occluded = is_occluded
def final_selection(self, line_list):
target_cosz_min = math.cos(self.target_max_tilt * pi / 180.)
for line in line_list:
if not (line.length > self.target_min_len and line.length < self.target_max_len):
continue
line_dist = line.xy_distance
if not (line_dist > self.target_min_dist and line_dist < self.target_max_dist):
continue
if line.direction[2] < target_cosz_min:
continue
line.is_final = True
def bkg_postprocess(self, line_list):
EXTEND_LEN = 1.
MIN_LEN = 0.2
target_cosz_min = math.cos(self.target_max_tilt * pi / 180.)
for line in line_list:
if line.is_good:
continue
if line.direction[2] < target_cosz_min:
continue
if line.length < MIN_LEN:
continue
if not (line.length < self.target_max_len) or not line.tip_is_contained:
line.extend_tip(EXTEND_LEN)
def remove_noise_lines(self, line_list, grid_xyzw):
MIN_LEN = 0.050
n_orig = len(line_list)
max_ground_z = np.max(grid_xyzw[:,:,2])
z_threshold = max_ground_z + 0.40
r_threshold = self.target_max_dist
n_remove = 0
for line in line_list:
if line.is_good:
continue
if ((line.xy_distance > r_threshold and line.position[2] > z_threshold)
or line.length < MIN_LEN):
line.is_ignored = True
n_remove += 1
print('Noise line removal : {} -> {}'.format(n_orig, n_orig - n_remove))
def print_line_info(self, line_list):
print('### Candidate line info #############################')
print(' Good flg=[sol, nmlc, ground, tip, ends, unoccl, final]')
print('-----------------------------------------------------')
for line in line_list:
# if not (line.is_solitary and not line.is_multiline_cluster and line.is_grounded):
if line.length < 0.200:
continue
flags = [
line.is_solitary,
not line.is_multiline_cluster,
line.is_grounded,
line.tip_is_contained,
line.is_contained,
not line.is_occluded,
line.is_final]
print(' {} flg={} len={:.3f} dist={:.3f} tilt={:.1f}deg'.format(line.is_good, flags, line.length, line.xy_distance, math.acos(line.direction[2]) / pi * 180.))
print('#####################################################')
def get_line_fit_geometry(self, line_list):
mesh_cylinders = []
for line in line_list:
# if line.is_ignored:
# continue
line_color = CMAP_CLUSTER(line.cluster_id)[:3]
if line.length <= 0.0:
print('`line.length` has non-positive value: {}'.format(line.length))
continue
mesh_cylinder = o3d.geometry.TriangleMesh.create_cylinder(radius=0.005, height=line.length)
mesh_cylinder.compute_vertex_normals()
mesh_cylinder.paint_uniform_color(line_color)
mesh_cylinder.translate([0., 0., line.length * 0.5])
mesh_cylinder.transform(line.tfm_mtx)
mesh_cylinders.append(mesh_cylinder)
line.add_mesh(mesh_cylinder)
if False:
mesh_sphere = o3d.geometry.TriangleMesh.create_sphere(radius=0.010)
mesh_sphere.compute_vertex_normals()
mesh_sphere.paint_uniform_color(line_color)
mesh_sphere.transform(line.tfm_mtx)
mesh_cylinders.append(mesh_sphere)
line.add_mesh(mesh_sphere)
return mesh_cylinders
| [
"numpy.clip",
"math.acos",
"math.cos",
"numpy.array",
"numpy.argsort",
"cv2.ellipse",
"numpy.linalg.norm",
"numpy.arange",
"open3d.geometry.TriangleMesh.create_cylinder",
"numpy.asarray",
"numpy.max",
"numpy.exp",
"numpy.stack",
"numpy.linspace",
"numpy.dot",
"open3d.geometry.TriangleM... | [((897, 918), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""tab20"""'], {}), "('tab20')\n", (909, 918), True, 'import matplotlib.pyplot as plt\n'), ((3106, 3144), 'numpy.array', 'np.array', (['pcd.points'], {'dtype': 'np.float32'}), '(pcd.points, dtype=np.float32)\n', (3114, 3144), True, 'import numpy as np\n'), ((3281, 3319), 'numpy.asarray', 'np.asarray', (['pcd_grounds_list[2].points'], {}), '(pcd_grounds_list[2].points)\n', (3291, 3319), True, 'import numpy as np\n'), ((3520, 3550), 'numpy.asarray', 'np.asarray', (['pcd_grounds.points'], {}), '(pcd_grounds.points)\n', (3530, 3550), True, 'import numpy as np\n'), ((4830, 4877), 'numpy.arange', 'np.arange', (['depth_img.shape[0]'], {'dtype': 'np.float32'}), '(depth_img.shape[0], dtype=np.float32)\n', (4839, 4877), True, 'import numpy as np\n'), ((4894, 4941), 'numpy.arange', 'np.arange', (['depth_img.shape[1]'], {'dtype': 'np.float32'}), '(depth_img.shape[1], dtype=np.float32)\n', (4903, 4941), True, 'import numpy as np\n'), ((4965, 4990), 'numpy.meshgrid', 'np.meshgrid', (['arr_x', 'arr_y'], {}), '(arr_x, arr_y)\n', (4976, 4990), True, 'import numpy as np\n'), ((5410, 5459), 'numpy.stack', 'np.stack', (['[filled_x, filled_y, filled_z]'], {'axis': '(-1)'}), '([filled_x, filled_y, filled_z], axis=-1)\n', (5418, 5459), True, 'import numpy as np\n'), ((6299, 6339), 'numpy.stack', 'np.stack', (['[val_x, val_y, depth]'], {'axis': '(-1)'}), '([val_x, val_y, depth], axis=-1)\n', (6307, 6339), True, 'import numpy as np\n'), ((6652, 6677), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (6675, 6677), True, 'import open3d as o3d\n'), ((7546, 7574), 'numpy.argmax', 'np.argmax', (['x_binflg'], {'axis': '(-1)'}), '(x_binflg, axis=-1)\n', (7555, 7574), True, 'import numpy as np\n'), ((7594, 7622), 'numpy.argmax', 'np.argmax', (['y_binflg'], {'axis': '(-1)'}), '(y_binflg, axis=-1)\n', (7603, 7622), True, 'import numpy as np\n'), ((7959, 8006), 'numpy.zeros', 'np.zeros', (['[x_nbin, y_nbin, 4]'], {'dtype': 'np.float64'}), '([x_nbin, y_nbin, 4], dtype=np.float64)\n', (7967, 8006), True, 'import numpy as np\n'), ((10319, 10388), 'numpy.linalg.norm', 'np.linalg.norm', (['(filled_xyzw[:, :, :2] - empty_xyzw[:, :, :2])'], {'axis': '(-1)'}), '(filled_xyzw[:, :, :2] - empty_xyzw[:, :, :2], axis=-1)\n', (10333, 10388), True, 'import numpy as np\n'), ((11814, 11833), 'numpy.array', 'np.array', (['triangles'], {}), '(triangles)\n', (11822, 11833), True, 'import numpy as np\n'), ((11953, 11980), 'open3d.geometry.TriangleMesh', 'o3d.geometry.TriangleMesh', ([], {}), '()\n', (11978, 11980), True, 'import open3d as o3d\n'), ((12005, 12041), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['vertices'], {}), '(vertices)\n', (12031, 12041), True, 'import open3d as o3d\n'), ((12067, 12104), 'open3d.utility.Vector3iVector', 'o3d.utility.Vector3iVector', (['triangles'], {}), '(triangles)\n', (12093, 12104), True, 'import open3d as o3d\n'), ((13067, 13108), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['colors[:, :3]'], {}), '(colors[:, :3])\n', (13093, 13108), True, 'import open3d as o3d\n'), ((14715, 14742), 'math.cos', 'math.cos', (['(15.0 * pi / 180.0)'], {}), '(15.0 * pi / 180.0)\n', (14723, 14742), False, 'import math\n'), ((14792, 14842), 'numpy.array', 'np.array', (['[line.position[2] for line in line_list]'], {}), '([line.position[2] for line in line_list])\n', (14800, 14842), True, 'import numpy as np\n'), ((14864, 14883), 'numpy.argsort', 'np.argsort', (['z_array'], {}), '(z_array)\n', (14874, 14883), True, 'import numpy as np\n'), ((16374, 16401), 'math.cos', 'math.cos', (['(45.0 * pi / 180.0)'], {}), '(45.0 * pi / 180.0)\n', (16382, 16401), False, 'import math\n'), ((18265, 18311), 'numpy.zeros', 'np.zeros', (['self.depth_img.shape'], {'dtype': 'np.uint8'}), '(self.depth_img.shape, dtype=np.uint8)\n', (18273, 18311), True, 'import numpy as np\n'), ((20348, 20391), 'math.cos', 'math.cos', (['(self.target_max_tilt * pi / 180.0)'], {}), '(self.target_max_tilt * pi / 180.0)\n', (20356, 20391), False, 'import math\n'), ((20925, 20968), 'math.cos', 'math.cos', (['(self.target_max_tilt * pi / 180.0)'], {}), '(self.target_max_tilt * pi / 180.0)\n', (20933, 20968), False, 'import math\n'), ((21459, 21485), 'numpy.max', 'np.max', (['grid_xyzw[:, :, 2]'], {}), '(grid_xyzw[:, :, 2])\n', (21465, 21485), True, 'import numpy as np\n'), ((9205, 9240), 'numpy.linalg.norm', 'np.linalg.norm', (['pcl[:, :2]'], {'axis': '(-1)'}), '(pcl[:, :2], axis=-1)\n', (9219, 9240), True, 'import numpy as np\n'), ((10498, 10527), 'numpy.argmin', 'np.argmin', (['dist_array'], {'axis': '(0)'}), '(dist_array, axis=0)\n', (10507, 10527), True, 'import numpy as np\n'), ((10956, 10976), 'numpy.exp', 'np.exp', (['(-dsq * coeff)'], {}), '(-dsq * coeff)\n', (10962, 10976), True, 'import numpy as np\n'), ((11883, 11938), 'numpy.concatenate', 'np.concatenate', (['[triangles, triangles[:, ::-1]]'], {'axis': '(0)'}), '([triangles, triangles[:, ::-1]], axis=0)\n', (11897, 11938), True, 'import numpy as np\n'), ((12790, 12817), 'numpy.argsort', 'np.argsort', (['n_points_in_cls'], {}), '(n_points_in_cls)\n', (12800, 12817), True, 'import numpy as np\n'), ((12841, 12874), 'numpy.ones', 'np.ones', (['n_points'], {'dtype': 'np.int32'}), '(n_points, dtype=np.int32)\n', (12848, 12874), True, 'import numpy as np\n'), ((16835, 16891), 'numpy.linalg.norm', 'np.linalg.norm', (['flatten_grid_local_frame[:, :2]'], {'axis': '(-1)'}), '(flatten_grid_local_frame[:, :2], axis=-1)\n', (16849, 16891), True, 'import numpy as np\n'), ((16975, 17036), 'numpy.clip', 'np.clip', (['((MAX_R - flatten_grid_r[idx_sort]) / MAX_R)', '(0.0)', '(1.0)'], {}), '((MAX_R - flatten_grid_r[idx_sort]) / MAX_R, 0.0, 1.0)\n', (16982, 17036), True, 'import numpy as np\n'), ((23325, 23400), 'open3d.geometry.TriangleMesh.create_cylinder', 'o3d.geometry.TriangleMesh.create_cylinder', ([], {'radius': '(0.005)', 'height': 'line.length'}), '(radius=0.005, height=line.length)\n', (23366, 23400), True, 'import open3d as o3d\n'), ((7024, 7082), 'numpy.linspace', 'np.linspace', (['X_MIN', '(X_MIN + GRID_SIZE * x_nbin)', '(x_nbin + 1)'], {}), '(X_MIN, X_MIN + GRID_SIZE * x_nbin, x_nbin + 1)\n', (7035, 7082), True, 'import numpy as np\n'), ((7115, 7173), 'numpy.linspace', 'np.linspace', (['Y_MIN', '(Y_MIN + GRID_SIZE * y_nbin)', '(y_nbin + 1)'], {}), '(Y_MIN, Y_MIN + GRID_SIZE * y_nbin, y_nbin + 1)\n', (7126, 7173), True, 'import numpy as np\n'), ((15283, 15307), 'numpy.linalg.norm', 'np.linalg.norm', (['to_line2'], {}), '(to_line2)\n', (15297, 15307), True, 'import numpy as np\n'), ((15395, 15431), 'numpy.dot', 'np.dot', (['dir_to_line2', 'line.direction'], {}), '(dir_to_line2, line.direction)\n', (15401, 15431), True, 'import numpy as np\n'), ((16914, 16940), 'numpy.argsort', 'np.argsort', (['flatten_grid_r'], {}), '(flatten_grid_r)\n', (16924, 16940), True, 'import numpy as np\n'), ((17169, 17222), 'numpy.dot', 'np.dot', (['flatten_grid_local_frame[idx_sort, 2]', 'weight'], {}), '(flatten_grid_local_frame[idx_sort, 2], weight)\n', (17175, 17222), True, 'import numpy as np\n'), ((19295, 19489), 'cv2.ellipse', 'cv2.ellipse', (['sector_mask', '(center[0], center[1])', '(sector_radius, sector_radius)', 'sector_angle', '(-OPENING_ANGLE * 0.5)', '(+OPENING_ANGLE * 0.5)', 'SECTOR_COLOR', '(SEEK_MARGIN[1] - SEEK_MARGIN[0])'], {}), '(sector_mask, (center[0], center[1]), (sector_radius,\n sector_radius), sector_angle, -OPENING_ANGLE * 0.5, +OPENING_ANGLE * \n 0.5, SECTOR_COLOR, SEEK_MARGIN[1] - SEEK_MARGIN[0])\n', (19306, 19489), False, 'import cv2\n'), ((23768, 23820), 'open3d.geometry.TriangleMesh.create_sphere', 'o3d.geometry.TriangleMesh.create_sphere', ([], {'radius': '(0.01)'}), '(radius=0.01)\n', (23807, 23820), True, 'import open3d as o3d\n'), ((12656, 12676), 'numpy.arange', 'np.arange', (['n_cluster'], {}), '(n_cluster)\n', (12665, 12676), True, 'import numpy as np\n'), ((18991, 19039), 'math.atan2', 'math.atan2', (['root_to_tip_xy[1]', 'root_to_tip_xy[0]'], {}), '(root_to_tip_xy[1], root_to_tip_xy[0])\n', (19001, 19039), False, 'import math\n'), ((19093, 19123), 'numpy.linalg.norm', 'np.linalg.norm', (['root_to_tip_xy'], {}), '(root_to_tip_xy)\n', (19107, 19123), True, 'import numpy as np\n'), ((22809, 22837), 'math.acos', 'math.acos', (['line.direction[2]'], {}), '(line.direction[2])\n', (22818, 22837), False, 'import math\n')] |
import turtle as t
import time
import os #connected with practice10.py
path = 'F:\\Github\\Python_season2'
os.chdir(path)
from practice10 import Snake
from practice13 import Food
from practice14 import Score
screen = t.Screen()
screen.setup(width=600,height=600)
screen.bgcolor("black")
screen.title("My snake")
screen.tracer(0)
snake = Snake()
food = Food()
score = Score()
screen.listen()
screen.onkey(snake.up,"w")
screen.onkey(snake.down,"s")
screen.onkey(snake.right,"d")
screen.onkey(snake.left,"a")
game_on_off = True
while game_on_off:
snake.movesnake()
screen.update()
time.sleep(0.1)
if snake.distancefromfood(food) < 15:
food.refresh()
snake.extendsnake()
score.increase()
if snake.distancefromwall():
game_on_off = False
score.gameover()
if snake.hitwithtail():
game_on_off = False
score.gameover()
screen.exitonclick()
| [
"practice14.Score",
"practice10.Snake",
"time.sleep",
"os.chdir",
"practice13.Food",
"turtle.Screen"
] | [((117, 131), 'os.chdir', 'os.chdir', (['path'], {}), '(path)\n', (125, 131), False, 'import os\n'), ((227, 237), 'turtle.Screen', 't.Screen', ([], {}), '()\n', (235, 237), True, 'import turtle as t\n'), ((348, 355), 'practice10.Snake', 'Snake', ([], {}), '()\n', (353, 355), False, 'from practice10 import Snake\n'), ((363, 369), 'practice13.Food', 'Food', ([], {}), '()\n', (367, 369), False, 'from practice13 import Food\n'), ((378, 385), 'practice14.Score', 'Score', ([], {}), '()\n', (383, 385), False, 'from practice14 import Score\n'), ((602, 617), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (612, 617), False, 'import time\n')] |
from py2neo import Graph
grapher = Graph("bolt://localhost:7687", auth=("neo4j", "changeme"))
x = grapher.run("MATCH (a :Person) RETURN a.name, a.city, a.age").to_data_frame()
print(f"To_data_frame() :\n{x}")
| [
"py2neo.Graph"
] | [((36, 94), 'py2neo.Graph', 'Graph', (['"""bolt://localhost:7687"""'], {'auth': "('neo4j', 'changeme')"}), "('bolt://localhost:7687', auth=('neo4j', 'changeme'))\n", (41, 94), False, 'from py2neo import Graph\n')] |
import random
import os
import subprocess
import shutil
from google.cloud import storage, logging as glogging
from core.framework import levels
from core.framework.cloudhelpers import deployments, iam, gcstorage, ssh_keys
LEVEL_PATH = 'thunder/a2finance'
RESOURCE_PREFIX = 'a2'
LOG_NAME = 'transactions'
def create():
print("Level initialization started for: " + LEVEL_PATH)
# Create randomized nonce name to avoid namespace conflicts
nonce = str(random.randint(100000000000, 999999999999))
bucket_name = f'{RESOURCE_PREFIX}-bucket-{nonce}'
# Create ssh key
ssh_private_key, ssh_public_key = ssh_keys.generate_ssh_keypair()
ssh_username = "clouduser"
try:
# Construct git repo
repo_path = os.path.dirname(os.getcwd()) + "/temp-repository-" + nonce
create_repo_files(repo_path, ssh_private_key)
print("Level initialization finished for: " + LEVEL_PATH)
# Insert deployment
config_template_args = {'nonce': nonce,
'ssh_public_key': ssh_public_key,
'ssh_username': ssh_username}
template_files = [
'core/framework/templates/bucket_acl.jinja',
'core/framework/templates/ubuntu_vm.jinja',
'core/framework/templates/service_account.jinja',
'core/framework/templates/iam_policy.jinja']
deployments.insert(LEVEL_PATH, template_files=template_files,
config_template_args=config_template_args)
print("Level setup started for: " + LEVEL_PATH)
# Upload repository to bucket
gcstorage.upload_directory_recursive(repo_path, bucket_name)
# Create logs
secret_name = create_logs()
# Create service account key file
sa_key = iam.generate_service_account_key(f'{RESOURCE_PREFIX}-access')
print(f'Level creation complete for: {LEVEL_PATH}')
start_message = (
f'Use the compromised service account credentials stored in {RESOURCE_PREFIX}-access.json to find the credit card number of {secret_name}, '
'which is hidden somewhere in the GCP project')
levels.write_start_info(
LEVEL_PATH, start_message, file_name=f'{RESOURCE_PREFIX}-access.json', file_content=sa_key)
print(
f'Instruction for the level can be accessed at thunder-ctf.cloud/levels/{LEVEL_PATH}.html')
finally:
# If there is an error, make sure to delete the temporary repository before exiting
if os.path.exists(repo_path):
shutil.rmtree(repo_path)
def create_repo_files(repo_path, ssh_private_key):
cwd = os.getcwd()
os.makedirs(repo_path + '/function')
os.chdir(repo_path)
# Make dummy cloud function files
with open(repo_path+'/function/requirements.txt', 'w+') as f:
f.write('')
with open(repo_path+'/function/main.py', 'w+') as f:
f.write('')
# Add ssh key file
with open(repo_path+'/ssh_key', 'w+') as f:
f.write(ssh_private_key)
os.chmod('ssh_key', 0o700)
# Add files in first commit, then delete key in second
subprocess.call(['git', 'init', '--q'])
p = subprocess.Popen(['git', 'add', '*'])
p.communicate()
subprocess.call(['git', 'commit', '-q', '-m', 'added initial files', ])
os.remove('ssh_key')
p = subprocess.Popen(['git', 'add', '*'])
p.communicate()
subprocess.call(
['git', 'commit', '-q', '-m', 'Oops. Deleted accidental key upload'])
# Reset working directory
os.chdir(cwd)
def create_logs():
# Load list of framework names
with open(f'core/levels/{LEVEL_PATH}/first-names.txt') as f:
first_names = f.read().split('\n')
with open(f'core/levels/{LEVEL_PATH}/last-names.txt') as f:
last_names = f.read().split('\n')
# Randomly determine a name associated with the secret
secret_name = (first_names[random.randint(0, 199)] + '_' +
last_names[random.randint(0, 299)])
# Randomly determine an index of logging of the secret transaction
secret_position = random.randint(0, 99)
logger = glogging.Client().logger(LOG_NAME)
for i in range(0, 100):
# On secret index, log the transaction with the secret as the credit card number of the struct
if i == secret_position:
logger.log_struct(
{'name': secret_name,
'transaction-total': f'${random.randint(1,300)}.{random.randint(0,9)}{random.randint(0,9)}',
'credit-card-number': levels.make_secret(LEVEL_PATH, 16)})
else:
# For the other entities, determine a random name
name = (first_names[random.randint(0, 199)] + '_' +
last_names[random.randint(0, 299)])
# If the name is not equal to the secret name, log the transaction with a random credit card number
if not name == secret_name:
logger.log_struct(
{'name': name,
'transaction-total': f'${random.randint(1,150)}.{random.randint(1,99)}',
'credit-card-number': str(random.randint(1000000000000000, 9999999999999999))})
return secret_name.replace('_', ' ')
def destroy():
print('Level tear-down started for: ' + LEVEL_PATH)
# Delete logs
client = glogging.Client()
if len([entry for entry in client.list_entries(filter_=f'logName:{LOG_NAME}')]) > 0:
logger = client.logger(LOG_NAME)
logger.delete()
# Delete starting files
levels.delete_start_files()
print('Level tear-down finished for: ' + LEVEL_PATH)
# Delete deployment
deployments.delete()
| [
"core.framework.cloudhelpers.ssh_keys.generate_ssh_keypair",
"core.framework.cloudhelpers.iam.generate_service_account_key",
"google.cloud.logging.Client",
"core.framework.levels.write_start_info",
"core.framework.levels.make_secret",
"core.framework.cloudhelpers.deployments.insert",
"os.remove",
"os.... | [((622, 653), 'core.framework.cloudhelpers.ssh_keys.generate_ssh_keypair', 'ssh_keys.generate_ssh_keypair', ([], {}), '()\n', (651, 653), False, 'from core.framework.cloudhelpers import deployments, iam, gcstorage, ssh_keys\n'), ((2664, 2675), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2673, 2675), False, 'import os\n'), ((2680, 2716), 'os.makedirs', 'os.makedirs', (["(repo_path + '/function')"], {}), "(repo_path + '/function')\n", (2691, 2716), False, 'import os\n'), ((2721, 2740), 'os.chdir', 'os.chdir', (['repo_path'], {}), '(repo_path)\n', (2729, 2740), False, 'import os\n'), ((3050, 3074), 'os.chmod', 'os.chmod', (['"""ssh_key"""', '(448)'], {}), "('ssh_key', 448)\n", (3058, 3074), False, 'import os\n'), ((3140, 3179), 'subprocess.call', 'subprocess.call', (["['git', 'init', '--q']"], {}), "(['git', 'init', '--q'])\n", (3155, 3179), False, 'import subprocess\n'), ((3188, 3225), 'subprocess.Popen', 'subprocess.Popen', (["['git', 'add', '*']"], {}), "(['git', 'add', '*'])\n", (3204, 3225), False, 'import subprocess\n'), ((3250, 3319), 'subprocess.call', 'subprocess.call', (["['git', 'commit', '-q', '-m', 'added initial files']"], {}), "(['git', 'commit', '-q', '-m', 'added initial files'])\n", (3265, 3319), False, 'import subprocess\n'), ((3326, 3346), 'os.remove', 'os.remove', (['"""ssh_key"""'], {}), "('ssh_key')\n", (3335, 3346), False, 'import os\n'), ((3355, 3392), 'subprocess.Popen', 'subprocess.Popen', (["['git', 'add', '*']"], {}), "(['git', 'add', '*'])\n", (3371, 3392), False, 'import subprocess\n'), ((3417, 3506), 'subprocess.call', 'subprocess.call', (["['git', 'commit', '-q', '-m', 'Oops. Deleted accidental key upload']"], {}), "(['git', 'commit', '-q', '-m',\n 'Oops. Deleted accidental key upload'])\n", (3432, 3506), False, 'import subprocess\n'), ((3546, 3559), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (3554, 3559), False, 'import os\n'), ((4100, 4121), 'random.randint', 'random.randint', (['(0)', '(99)'], {}), '(0, 99)\n', (4114, 4121), False, 'import random\n'), ((5348, 5365), 'google.cloud.logging.Client', 'glogging.Client', ([], {}), '()\n', (5363, 5365), True, 'from google.cloud import storage, logging as glogging\n'), ((5552, 5579), 'core.framework.levels.delete_start_files', 'levels.delete_start_files', ([], {}), '()\n', (5577, 5579), False, 'from core.framework import levels\n'), ((5666, 5686), 'core.framework.cloudhelpers.deployments.delete', 'deployments.delete', ([], {}), '()\n', (5684, 5686), False, 'from core.framework.cloudhelpers import deployments, iam, gcstorage, ssh_keys\n'), ((464, 506), 'random.randint', 'random.randint', (['(100000000000)', '(999999999999)'], {}), '(100000000000, 999999999999)\n', (478, 506), False, 'import random\n'), ((1389, 1497), 'core.framework.cloudhelpers.deployments.insert', 'deployments.insert', (['LEVEL_PATH'], {'template_files': 'template_files', 'config_template_args': 'config_template_args'}), '(LEVEL_PATH, template_files=template_files,\n config_template_args=config_template_args)\n', (1407, 1497), False, 'from core.framework.cloudhelpers import deployments, iam, gcstorage, ssh_keys\n'), ((1624, 1684), 'core.framework.cloudhelpers.gcstorage.upload_directory_recursive', 'gcstorage.upload_directory_recursive', (['repo_path', 'bucket_name'], {}), '(repo_path, bucket_name)\n', (1660, 1684), False, 'from core.framework.cloudhelpers import deployments, iam, gcstorage, ssh_keys\n'), ((1804, 1865), 'core.framework.cloudhelpers.iam.generate_service_account_key', 'iam.generate_service_account_key', (['f"""{RESOURCE_PREFIX}-access"""'], {}), "(f'{RESOURCE_PREFIX}-access')\n", (1836, 1865), False, 'from core.framework.cloudhelpers import deployments, iam, gcstorage, ssh_keys\n'), ((2173, 2293), 'core.framework.levels.write_start_info', 'levels.write_start_info', (['LEVEL_PATH', 'start_message'], {'file_name': 'f"""{RESOURCE_PREFIX}-access.json"""', 'file_content': 'sa_key'}), "(LEVEL_PATH, start_message, file_name=\n f'{RESOURCE_PREFIX}-access.json', file_content=sa_key)\n", (2196, 2293), False, 'from core.framework import levels\n'), ((2537, 2562), 'os.path.exists', 'os.path.exists', (['repo_path'], {}), '(repo_path)\n', (2551, 2562), False, 'import os\n'), ((2576, 2600), 'shutil.rmtree', 'shutil.rmtree', (['repo_path'], {}), '(repo_path)\n', (2589, 2600), False, 'import shutil\n'), ((3982, 4004), 'random.randint', 'random.randint', (['(0)', '(299)'], {}), '(0, 299)\n', (3996, 4004), False, 'import random\n'), ((4136, 4153), 'google.cloud.logging.Client', 'glogging.Client', ([], {}), '()\n', (4151, 4153), True, 'from google.cloud import storage, logging as glogging\n'), ((3920, 3942), 'random.randint', 'random.randint', (['(0)', '(199)'], {}), '(0, 199)\n', (3934, 3942), False, 'import random\n'), ((760, 771), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (769, 771), False, 'import os\n'), ((4553, 4587), 'core.framework.levels.make_secret', 'levels.make_secret', (['LEVEL_PATH', '(16)'], {}), '(LEVEL_PATH, 16)\n', (4571, 4587), False, 'from core.framework import levels\n'), ((4761, 4783), 'random.randint', 'random.randint', (['(0)', '(299)'], {}), '(0, 299)\n', (4775, 4783), False, 'import random\n'), ((4698, 4720), 'random.randint', 'random.randint', (['(0)', '(199)'], {}), '(0, 199)\n', (4712, 4720), False, 'import random\n'), ((4446, 4468), 'random.randint', 'random.randint', (['(1)', '(300)'], {}), '(1, 300)\n', (4460, 4468), False, 'import random\n'), ((4470, 4490), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (4484, 4490), False, 'import random\n'), ((4491, 4511), 'random.randint', 'random.randint', (['(0)', '(9)'], {}), '(0, 9)\n', (4505, 4511), False, 'import random\n'), ((5149, 5199), 'random.randint', 'random.randint', (['(1000000000000000)', '(9999999999999999)'], {}), '(1000000000000000, 9999999999999999)\n', (5163, 5199), False, 'import random\n'), ((5054, 5076), 'random.randint', 'random.randint', (['(1)', '(150)'], {}), '(1, 150)\n', (5068, 5076), False, 'import random\n'), ((5078, 5099), 'random.randint', 'random.randint', (['(1)', '(99)'], {}), '(1, 99)\n', (5092, 5099), False, 'import random\n')] |
import cv2
import os
from os import listdir, makedirs
from os.path import isfile, join, exists
import numpy as np
import time
import math
DEBUG = True
FACTOR = 2
RESO_X = int(576 / FACTOR)
RESO_Y = int(640 / FACTOR)
CONF_VAL = 0
THRESHOLD = 0
UPPER_BOUND = 230
LOWER_BOUND = 150
def get_file_index(filename):
index = int(filename.split('.')[0])
return index
def create_windows():
cv2.namedWindow("RGB", cv2.WINDOW_NORMAL)
cv2.namedWindow("Depth", cv2.WINDOW_NORMAL)
cv2.resizeWindow("RGB", RESO_X, RESO_Y)
cv2.resizeWindow("Depth", RESO_X, RESO_Y)
def load_yolo(model_folder):
# load the COCO class labels our YOLO model was trained on
labelsPath = model_folder + "coco.names"
LABELS = open(labelsPath).read().strip().split("\n")
weightsPath = model_folder + "yolov3-spp.weights"
configPath = model_folder + "yolov3-spp.cfg"
print("[INFO] loading YOLO from disk...")
if DEBUG:
print("label: {}\nweights: {}\nconfig: {}".format(
labelsPath, weightsPath, configPath))
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
return net, ln, LABELS
def process_frame(frame, net, ln, LABELS):
# get frame height and width
(H, W) = frame.shape[:2]
# construct a blob from the input frame and then perform a forward
# pass of the YOLO object detector, giving us our bounding boxes
# and associated probabilities
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416),
swapRB=True, crop=False)
net.setInput(blob)
start_time = time.time()
layerOutputs = net.forward(ln)
duration = time.time() - start_time
if DEBUG:
print("[INFO] processed within {}s".format(round(duration, 2)))
# initialize our lists of detected bounding boxes, confidences,
# and class IDs, respectively
boxes = []
confidences = []
# loop over each of the layer outputs
for output in layerOutputs:
# loop over each of the detections
for detection in output:
# extract the class ID and confidence (i.e., probability)
# of the current object detection
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > CONF_VAL and LABELS[classID] == "cell phone":
# scale the bounding box coordinates back relative to
# the size of the image, keeping in mind that YOLO
# actually returns the center (x, y)-coordinates of
# the bounding box followed by the boxes' width and
# height
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
# use the center (x, y)-coordinates to derive the top
# and and left corner of the bounding box
x = int(centerX - (width / 2))
y = int(centerY - (height / 2))
# update our list of bounding box coordinates and confidences
boxes.append([x, y, int(width), int(height)])
confidences.append(float(confidence))
return boxes, confidences
def detect_object(experiment_name, save_images=False):
rgb_folder = "Experiment_Frames/" + experiment_name + "/rgb_frames/"
depth_folder = "Experiment_Frames/" + experiment_name + "/depth_frames/"
model_folder = "yolo-coco/"
output_folder = "Experiment_Output/" + experiment_name + "/"
# make the folders if not exist
if not exists(rgb_folder):
makedirs(rgb_folder)
if not exists(depth_folder):
makedirs(depth_folder)
if not exists(output_folder):
makedirs(output_folder)
if not exists(output_folder + 'depth/'):
makedirs(output_folder + 'depth/')
if not exists(output_folder + 'rgb/'):
makedirs(output_folder + 'rgb/')
# load rgb images
print("[INFO] loading rgb images from disk...")
img_files = [f for f in listdir(rgb_folder) if isfile(join(rgb_folder, f))]
img_files = sorted(img_files, key=get_file_index)
# load image net
net, ln, LABELS = load_yolo(model_folder)
out_file = open(output_folder + "/" + "positions.txt", "w")
# process each frame
for img_file in img_files:
if DEBUG:
print("[INFO] processing image {}".format(img_file))
# read rgb frame
frame = cv2.imread(rgb_folder + "/" + img_file, cv2.IMREAD_COLOR)
# read depth frame
depth = cv2.imread(depth_folder + "/" + img_file)
# rotate 90 degree for phone images
# frame = cv.rotate(frame, rotateCode=cv.ROTATE_90_CLOCKWISE)
# process using YOLO
boxes, confidences = process_frame(frame, net, ln, LABELS)
# suppress boxes
idxs = cv2.dnn.NMSBoxes(boxes, confidences, CONF_VAL, THRESHOLD)
# ensure at least one detection exists
if len(idxs) > 0:
# get first box
i = idxs.flatten()[0]
# extract the bounding box coordinates
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
# draw a bounding box rectangle and label on the frame
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.rectangle(depth, (x, y), (x + w, y + h), (255, 0, 0), 2)
if save_images:
# display and save image
cv2.imshow("RGB", frame)
cv2.imwrite(output_folder +
"rgb/" + img_file, frame)
cv2.imshow("Depth", depth)
cv2.imwrite(output_folder +
"depth/" + img_file, depth)
# get centroid of the bouding box
centroid_x = x + int(w / 2)
centroid_y = y + int(h / 2)
# get average depth within the bounding box
depth_pixels = depth[x: x+w, y: y+h, 0]
depth_pixels = depth_pixels.flatten()
mask = (depth_pixels > LOWER_BOUND) & (depth_pixels < UPPER_BOUND)
depth_pixels = depth_pixels[mask]
pixel_mean = np.mean(depth_pixels)
# save timestamp and position
if not math.isnan(pixel_mean):
timestamp = img_file.split('.')[0]
out_file.write("{},{},{},{}\n".format(
timestamp, centroid_x, centroid_y, round(pixel_mean, 4)
))
if DEBUG:
print("point is ({}, {}, {})".format(
centroid_x, centroid_y, round(pixel_mean, 4)))
key = cv2.waitKey(50)
if key != -1:
cv2.destroyAllWindows()
break
out_file.close()
| [
"cv2.rectangle",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.dnn.NMSBoxes",
"os.path.exists",
"numpy.mean",
"cv2.resizeWindow",
"os.listdir",
"cv2.waitKey",
"cv2.dnn.blobFromImage",
"numpy.argmax",
"time.time",
"cv2.namedWindow",
"cv2.imread",
"cv2.imwrite",
"os.makedi... | [((400, 441), 'cv2.namedWindow', 'cv2.namedWindow', (['"""RGB"""', 'cv2.WINDOW_NORMAL'], {}), "('RGB', cv2.WINDOW_NORMAL)\n", (415, 441), False, 'import cv2\n'), ((446, 489), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Depth"""', 'cv2.WINDOW_NORMAL'], {}), "('Depth', cv2.WINDOW_NORMAL)\n", (461, 489), False, 'import cv2\n'), ((494, 533), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""RGB"""', 'RESO_X', 'RESO_Y'], {}), "('RGB', RESO_X, RESO_Y)\n", (510, 533), False, 'import cv2\n'), ((538, 579), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""Depth"""', 'RESO_X', 'RESO_Y'], {}), "('Depth', RESO_X, RESO_Y)\n", (554, 579), False, 'import cv2\n'), ((1058, 1109), 'cv2.dnn.readNetFromDarknet', 'cv2.dnn.readNetFromDarknet', (['configPath', 'weightsPath'], {}), '(configPath, weightsPath)\n', (1084, 1109), False, 'import cv2\n'), ((1523, 1599), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['frame', '(1 / 255.0)', '(416, 416)'], {'swapRB': '(True)', 'crop': '(False)'}), '(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n', (1544, 1599), False, 'import cv2\n'), ((1673, 1684), 'time.time', 'time.time', ([], {}), '()\n', (1682, 1684), False, 'import time\n'), ((1735, 1746), 'time.time', 'time.time', ([], {}), '()\n', (1744, 1746), False, 'import time\n'), ((3807, 3825), 'os.path.exists', 'exists', (['rgb_folder'], {}), '(rgb_folder)\n', (3813, 3825), False, 'from os.path import isfile, join, exists\n'), ((3835, 3855), 'os.makedirs', 'makedirs', (['rgb_folder'], {}), '(rgb_folder)\n', (3843, 3855), False, 'from os import listdir, makedirs\n'), ((3867, 3887), 'os.path.exists', 'exists', (['depth_folder'], {}), '(depth_folder)\n', (3873, 3887), False, 'from os.path import isfile, join, exists\n'), ((3897, 3919), 'os.makedirs', 'makedirs', (['depth_folder'], {}), '(depth_folder)\n', (3905, 3919), False, 'from os import listdir, makedirs\n'), ((3931, 3952), 'os.path.exists', 'exists', (['output_folder'], {}), '(output_folder)\n', (3937, 3952), False, 'from os.path import isfile, join, exists\n'), ((3962, 3985), 'os.makedirs', 'makedirs', (['output_folder'], {}), '(output_folder)\n', (3970, 3985), False, 'from os import listdir, makedirs\n'), ((3997, 4029), 'os.path.exists', 'exists', (["(output_folder + 'depth/')"], {}), "(output_folder + 'depth/')\n", (4003, 4029), False, 'from os.path import isfile, join, exists\n'), ((4039, 4073), 'os.makedirs', 'makedirs', (["(output_folder + 'depth/')"], {}), "(output_folder + 'depth/')\n", (4047, 4073), False, 'from os import listdir, makedirs\n'), ((4085, 4115), 'os.path.exists', 'exists', (["(output_folder + 'rgb/')"], {}), "(output_folder + 'rgb/')\n", (4091, 4115), False, 'from os.path import isfile, join, exists\n'), ((4125, 4157), 'os.makedirs', 'makedirs', (["(output_folder + 'rgb/')"], {}), "(output_folder + 'rgb/')\n", (4133, 4157), False, 'from os import listdir, makedirs\n'), ((4682, 4739), 'cv2.imread', 'cv2.imread', (["(rgb_folder + '/' + img_file)", 'cv2.IMREAD_COLOR'], {}), "(rgb_folder + '/' + img_file, cv2.IMREAD_COLOR)\n", (4692, 4739), False, 'import cv2\n'), ((4784, 4825), 'cv2.imread', 'cv2.imread', (["(depth_folder + '/' + img_file)"], {}), "(depth_folder + '/' + img_file)\n", (4794, 4825), False, 'import cv2\n'), ((5078, 5135), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['boxes', 'confidences', 'CONF_VAL', 'THRESHOLD'], {}), '(boxes, confidences, CONF_VAL, THRESHOLD)\n', (5094, 5135), False, 'import cv2\n'), ((6888, 6903), 'cv2.waitKey', 'cv2.waitKey', (['(50)'], {}), '(50)\n', (6899, 6903), False, 'import cv2\n'), ((2309, 2326), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (2318, 2326), True, 'import numpy as np\n'), ((4261, 4280), 'os.listdir', 'listdir', (['rgb_folder'], {}), '(rgb_folder)\n', (4268, 4280), False, 'from os import listdir, makedirs\n'), ((5499, 5559), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n', (5512, 5559), False, 'import cv2\n'), ((5572, 5632), 'cv2.rectangle', 'cv2.rectangle', (['depth', '(x, y)', '(x + w, y + h)', '(255, 0, 0)', '(2)'], {}), '(depth, (x, y), (x + w, y + h), (255, 0, 0), 2)\n', (5585, 5632), False, 'import cv2\n'), ((6420, 6441), 'numpy.mean', 'np.mean', (['depth_pixels'], {}), '(depth_pixels)\n', (6427, 6441), True, 'import numpy as np\n'), ((6938, 6961), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (6959, 6961), False, 'import cv2\n'), ((4291, 4310), 'os.path.join', 'join', (['rgb_folder', 'f'], {}), '(rgb_folder, f)\n', (4295, 4310), False, 'from os.path import isfile, join, exists\n'), ((5719, 5743), 'cv2.imshow', 'cv2.imshow', (['"""RGB"""', 'frame'], {}), "('RGB', frame)\n", (5729, 5743), False, 'import cv2\n'), ((5760, 5813), 'cv2.imwrite', 'cv2.imwrite', (["(output_folder + 'rgb/' + img_file)", 'frame'], {}), "(output_folder + 'rgb/' + img_file, frame)\n", (5771, 5813), False, 'import cv2\n'), ((5858, 5884), 'cv2.imshow', 'cv2.imshow', (['"""Depth"""', 'depth'], {}), "('Depth', depth)\n", (5868, 5884), False, 'import cv2\n'), ((5901, 5956), 'cv2.imwrite', 'cv2.imwrite', (["(output_folder + 'depth/' + img_file)", 'depth'], {}), "(output_folder + 'depth/' + img_file, depth)\n", (5912, 5956), False, 'import cv2\n'), ((6504, 6526), 'math.isnan', 'math.isnan', (['pixel_mean'], {}), '(pixel_mean)\n', (6514, 6526), False, 'import math\n'), ((2913, 2935), 'numpy.array', 'np.array', (['[W, H, W, H]'], {}), '([W, H, W, H])\n', (2921, 2935), True, 'import numpy as np\n')] |
import configparser
import unittest
from pathlib import Path
from TM1py.Objects import Dimension, Hierarchy, Element
from TM1py.Objects import ElementAttribute
from TM1py.Services import TM1Service
class TestDimensionService(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
Establishes a connection to TM1 and creates objects to use across all tests
"""
# Connection to TM1
cls.config = configparser.ConfigParser()
cls.config.read(Path(__file__).parent.joinpath('config.ini'))
cls.tm1 = TM1Service(**cls.config['tm1srv01'])
cls.prefix = "TM1py_Tests_Dimension_"
cls.dimension_name = cls.prefix + "Some_Dimension"
cls.hierarchy_name = cls.dimension_name
cls.dimension_name_with_multi_hierarchy = cls.prefix + "Dimension_With_Multiple_Hierarchies"
@classmethod
def setUp(cls):
cls.create_dimension()
@classmethod
def tearDown(cls):
cls.delete_dimensions()
@classmethod
def create_dimension(cls):
root_element = Element(name='Root', element_type='Consolidated')
elements = [root_element]
edges = {}
for i in range(1, 1001):
element_name = "Element {}".format(i)
elements.append(Element(name=element_name, element_type='Numeric'))
edges[('Root', element_name)] = i
element_attributes = [
ElementAttribute(name='Name Long', attribute_type='Alias'),
ElementAttribute(name='Name Short', attribute_type='Alias')]
h = Hierarchy(
name=cls.dimension_name,
dimension_name=cls.dimension_name,
elements=elements,
edges=edges,
element_attributes=element_attributes)
d = Dimension(name=cls.dimension_name, hierarchies=[h])
cls.tm1.dimensions.create(d)
@classmethod
def create_dimension_with_multiple_hierarchies(cls):
dimension = Dimension(cls.dimension_name_with_multi_hierarchy)
dimension.add_hierarchy(
Hierarchy(
name="Hierarchy1",
dimension_name=dimension.name,
elements=[Element("Elem1", "Numeric"), Element("Elem2", "Numeric"), Element("Elem3", "Numeric")]))
dimension.add_hierarchy(
Hierarchy(
name="Hierarchy2",
dimension_name=dimension.name,
elements=[Element("Elem1", "Numeric"), Element("Elem2", "Numeric"), Element("Elem3", "Numeric")]))
dimension.add_hierarchy(
Hierarchy(
name="Hierarchy3",
dimension_name=dimension.name,
elements=[Element("Elem1", "Numeric"), Element("Elem2", "Numeric"), Element("Elem3", "Numeric")]))
cls.tm1.dimensions.create(dimension)
@classmethod
def delete_dimensions(cls):
cls.tm1.dimensions.delete(cls.dimension_name)
if cls.tm1.dimensions.exists(cls.dimension_name_with_multi_hierarchy):
cls.tm1.dimensions.delete(cls.dimension_name_with_multi_hierarchy)
def test_get_dimension(self):
d = self.tm1.dimensions.get(dimension_name=self.dimension_name)
self.assertIsInstance(d, Dimension)
self.assertEqual(d.name, self.dimension_name)
h = d.hierarchies[0]
self.assertIsInstance(h, Hierarchy)
self.assertEqual(h.name, self.dimension_name)
self.assertEqual(len(h.elements), 1001)
self.assertEqual(len(h.edges), 1000)
def test_dimension__get__(self):
d = self.tm1.dimensions.get(dimension_name=self.dimension_name)
h = d[self.dimension_name]
self.assertIsInstance(h, Hierarchy)
self.assertEqual(h.name, self.dimension_name)
self.assertEqual(len(h.elements), 1001)
self.assertEqual(len(h.edges), 1000)
def test_dimension__contains__(self):
d = self.tm1.dimensions.get(dimension_name=self.dimension_name)
self.assertIn(self.dimension_name, d)
def test_dimension__iter__(self):
d = self.tm1.dimensions.get(dimension_name=self.dimension_name)
first_hierarchy = next(h for h in d)
self.assertIsInstance(first_hierarchy, Hierarchy)
self.assertEqual(first_hierarchy.name, self.dimension_name)
self.assertEqual(len(first_hierarchy.elements), 1001)
self.assertEqual(len(first_hierarchy.edges), 1000)
def test_dimension__len__(self):
d = self.tm1.dimensions.get(dimension_name=self.dimension_name)
self.assertEqual(len(d), 1)
def test_update_dimension(self):
# get dimension from tm1
d = self.tm1.dimensions.get(dimension_name=self.dimension_name)
# create element objects
elements = [Element(name='e1', element_type='Consolidated'),
Element(name='e2', element_type='Numeric'),
Element(name='e3', element_type='Numeric'),
Element(name='e4', element_type='Numeric')]
# create edge objects
edges = {
('e1', 'e2'): 1,
('e1', 'e3'): 1,
('e1', 'e4'): 1}
# create the element_attributes objects
element_attributes = [ElementAttribute(name='Name Long', attribute_type='Alias'),
ElementAttribute(name='Name Short', attribute_type='Alias'),
ElementAttribute(name='Currency', attribute_type='String')]
# create hierarchy object
hierarchy = Hierarchy(name=self.dimension_name, dimension_name=self.dimension_name, elements=elements,
element_attributes=element_attributes, edges=edges)
# replace existing hierarchy with new hierarchy
d.remove_hierarchy(self.dimension_name)
d.add_hierarchy(hierarchy)
# update dimension in TM1
self.tm1.dimensions.update(d)
# Test
dimension = self.tm1.dimensions.get(self.dimension_name)
self.assertEqual(len(dimension.hierarchies[0].elements), len(elements))
def test_update_dimension_remove_hierarchy(self):
self.create_dimension_with_multiple_hierarchies()
dimension = self.tm1.dimensions.get(self.dimension_name_with_multi_hierarchy)
self.assertEqual(dimension.hierarchy_names, ['Hierarchy1', 'Hierarchy2', 'Hierarchy3', 'Leaves'])
dimension.remove_hierarchy('Hierarchy2')
dimension.remove_hierarchy('Hierarchy3')
self.tm1.dimensions.update(dimension)
dimension = self.tm1.dimensions.get(self.dimension_name_with_multi_hierarchy)
self.assertEqual(dimension.hierarchy_names, ['Hierarchy1', 'Leaves'])
def test_get_all_names(self):
self.assertIn(self.dimension_name, self.tm1.dimensions.get_all_names())
def test_get_number_of_dimensions(self):
number_of_dimensions = self.tm1.dimensions.get_number_of_dimensions()
self.assertIsInstance(number_of_dimensions, int)
def test_execute_mdx(self):
mdx = "{TM1SubsetAll(" + self.dimension_name + ")}"
elements = self.tm1.dimensions.execute_mdx(self.dimension_name, mdx)
self.assertEqual(len(elements), 1001)
mdx = "{ Tm1FilterByLevel ( {TM1SubsetAll(" + self.dimension_name + ")}, 0) }"
elements = self.tm1.dimensions.execute_mdx(self.dimension_name, mdx)
self.assertEqual(len(elements), 1000)
for element in elements:
self.assertTrue(element.startswith("Element"))
def test_hierarchy_names(self):
# create dimension with two Hierarchies
self.create_dimension_with_multiple_hierarchies()
dimension = self.tm1.dimensions.get(dimension_name=self.dimension_name_with_multi_hierarchy)
self.assertEqual(
set(dimension.hierarchy_names),
{"Leaves", "Hierarchy1", "Hierarchy2", "Hierarchy3"})
dimension.remove_hierarchy("Hierarchy1")
self.assertEqual(
set(dimension.hierarchy_names),
{"Leaves", "Hierarchy2", "Hierarchy3"})
def test_remove_leaves_hierarchy(self):
# create dimension with two Hierarchies
self.create_dimension_with_multiple_hierarchies()
dimension = self.tm1.dimensions.get(dimension_name=self.dimension_name_with_multi_hierarchy)
try:
dimension.remove_hierarchy("LEAVES")
raise Exception("Did not throw expected Exception")
except ValueError:
pass
def test_remove_hierarchy(self):
# create dimension with two Hierarchies
self.create_dimension_with_multiple_hierarchies()
dimension = self.tm1.dimensions.get(dimension_name=self.dimension_name_with_multi_hierarchy)
self.assertEqual(len(dimension.hierarchies), 4)
self.assertIn("Hierarchy1", dimension)
self.assertIn("Hierarchy2", dimension)
self.assertIn("Hierarchy3", dimension)
self.assertIn("Leaves", dimension)
dimension.remove_hierarchy("Hierarchy1")
self.tm1.dimensions.update(dimension)
dimension = self.tm1.dimensions.get(dimension_name=self.dimension_name_with_multi_hierarchy)
self.assertEqual(len(dimension.hierarchies), 3)
self.assertNotIn("Hierarchy1", dimension)
self.assertIn("Hierarchy2", dimension)
self.assertIn("Hierarchy3", dimension)
self.assertIn("Leaves", dimension)
dimension.remove_hierarchy("H i e r a r c h y 3".upper())
self.tm1.dimensions.update(dimension)
dimension = self.tm1.dimensions.get(dimension_name=self.dimension_name_with_multi_hierarchy)
self.assertEqual(len(dimension.hierarchies), 2)
self.assertNotIn("Hierarchy1", dimension)
self.assertIn("Hierarchy2", dimension)
self.assertNotIn("Hierarchy3", dimension)
self.assertIn("Leaves", dimension)
def test_rename_dimension(self):
original_dimension_name = self.prefix + "Original_Dimension"
renamed_dimension_name = self.prefix + "Renamed_Dimension"
# if dimensions exist in TM1.. delete them
for dim_name in (original_dimension_name, renamed_dimension_name):
if self.tm1.dimensions.exists(dim_name):
self.tm1.dimensions.delete(dimension_name=dim_name)
# create dimension
original_dimension = Dimension(original_dimension_name)
hierarchy = Hierarchy(name=original_dimension_name, dimension_name=original_dimension_name)
hierarchy.add_element(element_name="Total", element_type="Consolidated")
hierarchy.add_element(element_name="Elem1", element_type="Numeric")
hierarchy.add_element(element_name="Elem2", element_type="Numeric")
hierarchy.add_element(element_name="Elem3", element_type="Numeric")
hierarchy.add_edge(parent="Total", component="Elem1", weight=1)
hierarchy.add_edge(parent="Total", component="Elem2", weight=1)
hierarchy.add_edge(parent="Total", component="Elem3", weight=1)
original_dimension.add_hierarchy(hierarchy)
self.tm1.dimensions.create(original_dimension)
# rename
renamed_dimension = self.tm1.dimensions.get(original_dimension.name)
renamed_dimension.name = renamed_dimension_name
self.tm1.dimensions.create(renamed_dimension)
# challenge equality of dimensions
summary1 = self.tm1.dimensions.hierarchies.get_hierarchy_summary(
dimension_name=original_dimension_name,
hierarchy_name=original_dimension_name)
summary2 = self.tm1.dimensions.hierarchies.get_hierarchy_summary(
dimension_name=renamed_dimension_name,
hierarchy_name=renamed_dimension_name)
self.assertEqual(summary1, summary2)
# delete
for dim_name in (original_dimension_name, renamed_dimension_name):
self.tm1.dimensions.delete(dimension_name=dim_name)
@classmethod
def tearDownClass(cls):
cls.tm1.logout()
if __name__ == '__main__':
unittest.main()
| [
"configparser.ConfigParser",
"pathlib.Path",
"TM1py.Objects.Dimension",
"TM1py.Services.TM1Service",
"TM1py.Objects.ElementAttribute",
"unittest.main",
"TM1py.Objects.Hierarchy",
"TM1py.Objects.Element"
] | [((11996, 12011), 'unittest.main', 'unittest.main', ([], {}), '()\n', (12009, 12011), False, 'import unittest\n'), ((449, 476), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (474, 476), False, 'import configparser\n'), ((565, 601), 'TM1py.Services.TM1Service', 'TM1Service', ([], {}), "(**cls.config['tm1srv01'])\n", (575, 601), False, 'from TM1py.Services import TM1Service\n'), ((1071, 1120), 'TM1py.Objects.Element', 'Element', ([], {'name': '"""Root"""', 'element_type': '"""Consolidated"""'}), "(name='Root', element_type='Consolidated')\n", (1078, 1120), False, 'from TM1py.Objects import Dimension, Hierarchy, Element\n'), ((1571, 1715), 'TM1py.Objects.Hierarchy', 'Hierarchy', ([], {'name': 'cls.dimension_name', 'dimension_name': 'cls.dimension_name', 'elements': 'elements', 'edges': 'edges', 'element_attributes': 'element_attributes'}), '(name=cls.dimension_name, dimension_name=cls.dimension_name,\n elements=elements, edges=edges, element_attributes=element_attributes)\n', (1580, 1715), False, 'from TM1py.Objects import Dimension, Hierarchy, Element\n'), ((1785, 1836), 'TM1py.Objects.Dimension', 'Dimension', ([], {'name': 'cls.dimension_name', 'hierarchies': '[h]'}), '(name=cls.dimension_name, hierarchies=[h])\n', (1794, 1836), False, 'from TM1py.Objects import Dimension, Hierarchy, Element\n'), ((1969, 2019), 'TM1py.Objects.Dimension', 'Dimension', (['cls.dimension_name_with_multi_hierarchy'], {}), '(cls.dimension_name_with_multi_hierarchy)\n', (1978, 2019), False, 'from TM1py.Objects import Dimension, Hierarchy, Element\n'), ((5502, 5648), 'TM1py.Objects.Hierarchy', 'Hierarchy', ([], {'name': 'self.dimension_name', 'dimension_name': 'self.dimension_name', 'elements': 'elements', 'element_attributes': 'element_attributes', 'edges': 'edges'}), '(name=self.dimension_name, dimension_name=self.dimension_name,\n elements=elements, element_attributes=element_attributes, edges=edges)\n', (5511, 5648), False, 'from TM1py.Objects import Dimension, Hierarchy, Element\n'), ((10320, 10354), 'TM1py.Objects.Dimension', 'Dimension', (['original_dimension_name'], {}), '(original_dimension_name)\n', (10329, 10354), False, 'from TM1py.Objects import Dimension, Hierarchy, Element\n'), ((10375, 10454), 'TM1py.Objects.Hierarchy', 'Hierarchy', ([], {'name': 'original_dimension_name', 'dimension_name': 'original_dimension_name'}), '(name=original_dimension_name, dimension_name=original_dimension_name)\n', (10384, 10454), False, 'from TM1py.Objects import Dimension, Hierarchy, Element\n'), ((1426, 1484), 'TM1py.Objects.ElementAttribute', 'ElementAttribute', ([], {'name': '"""Name Long"""', 'attribute_type': '"""Alias"""'}), "(name='Name Long', attribute_type='Alias')\n", (1442, 1484), False, 'from TM1py.Objects import ElementAttribute\n'), ((1498, 1557), 'TM1py.Objects.ElementAttribute', 'ElementAttribute', ([], {'name': '"""Name Short"""', 'attribute_type': '"""Alias"""'}), "(name='Name Short', attribute_type='Alias')\n", (1514, 1557), False, 'from TM1py.Objects import ElementAttribute\n'), ((4753, 4800), 'TM1py.Objects.Element', 'Element', ([], {'name': '"""e1"""', 'element_type': '"""Consolidated"""'}), "(name='e1', element_type='Consolidated')\n", (4760, 4800), False, 'from TM1py.Objects import Dimension, Hierarchy, Element\n'), ((4822, 4864), 'TM1py.Objects.Element', 'Element', ([], {'name': '"""e2"""', 'element_type': '"""Numeric"""'}), "(name='e2', element_type='Numeric')\n", (4829, 4864), False, 'from TM1py.Objects import Dimension, Hierarchy, Element\n'), ((4886, 4928), 'TM1py.Objects.Element', 'Element', ([], {'name': '"""e3"""', 'element_type': '"""Numeric"""'}), "(name='e3', element_type='Numeric')\n", (4893, 4928), False, 'from TM1py.Objects import Dimension, Hierarchy, Element\n'), ((4950, 4992), 'TM1py.Objects.Element', 'Element', ([], {'name': '"""e4"""', 'element_type': '"""Numeric"""'}), "(name='e4', element_type='Numeric')\n", (4957, 4992), False, 'from TM1py.Objects import Dimension, Hierarchy, Element\n'), ((5207, 5265), 'TM1py.Objects.ElementAttribute', 'ElementAttribute', ([], {'name': '"""Name Long"""', 'attribute_type': '"""Alias"""'}), "(name='Name Long', attribute_type='Alias')\n", (5223, 5265), False, 'from TM1py.Objects import ElementAttribute\n'), ((5297, 5356), 'TM1py.Objects.ElementAttribute', 'ElementAttribute', ([], {'name': '"""Name Short"""', 'attribute_type': '"""Alias"""'}), "(name='Name Short', attribute_type='Alias')\n", (5313, 5356), False, 'from TM1py.Objects import ElementAttribute\n'), ((5388, 5446), 'TM1py.Objects.ElementAttribute', 'ElementAttribute', ([], {'name': '"""Currency"""', 'attribute_type': '"""String"""'}), "(name='Currency', attribute_type='String')\n", (5404, 5446), False, 'from TM1py.Objects import ElementAttribute\n'), ((1285, 1335), 'TM1py.Objects.Element', 'Element', ([], {'name': 'element_name', 'element_type': '"""Numeric"""'}), "(name=element_name, element_type='Numeric')\n", (1292, 1335), False, 'from TM1py.Objects import Dimension, Hierarchy, Element\n'), ((501, 515), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (505, 515), False, 'from pathlib import Path\n'), ((2184, 2211), 'TM1py.Objects.Element', 'Element', (['"""Elem1"""', '"""Numeric"""'], {}), "('Elem1', 'Numeric')\n", (2191, 2211), False, 'from TM1py.Objects import Dimension, Hierarchy, Element\n'), ((2213, 2240), 'TM1py.Objects.Element', 'Element', (['"""Elem2"""', '"""Numeric"""'], {}), "('Elem2', 'Numeric')\n", (2220, 2240), False, 'from TM1py.Objects import Dimension, Hierarchy, Element\n'), ((2242, 2269), 'TM1py.Objects.Element', 'Element', (['"""Elem3"""', '"""Numeric"""'], {}), "('Elem3', 'Numeric')\n", (2249, 2269), False, 'from TM1py.Objects import Dimension, Hierarchy, Element\n'), ((2437, 2464), 'TM1py.Objects.Element', 'Element', (['"""Elem1"""', '"""Numeric"""'], {}), "('Elem1', 'Numeric')\n", (2444, 2464), False, 'from TM1py.Objects import Dimension, Hierarchy, Element\n'), ((2466, 2493), 'TM1py.Objects.Element', 'Element', (['"""Elem2"""', '"""Numeric"""'], {}), "('Elem2', 'Numeric')\n", (2473, 2493), False, 'from TM1py.Objects import Dimension, Hierarchy, Element\n'), ((2495, 2522), 'TM1py.Objects.Element', 'Element', (['"""Elem3"""', '"""Numeric"""'], {}), "('Elem3', 'Numeric')\n", (2502, 2522), False, 'from TM1py.Objects import Dimension, Hierarchy, Element\n'), ((2690, 2717), 'TM1py.Objects.Element', 'Element', (['"""Elem1"""', '"""Numeric"""'], {}), "('Elem1', 'Numeric')\n", (2697, 2717), False, 'from TM1py.Objects import Dimension, Hierarchy, Element\n'), ((2719, 2746), 'TM1py.Objects.Element', 'Element', (['"""Elem2"""', '"""Numeric"""'], {}), "('Elem2', 'Numeric')\n", (2726, 2746), False, 'from TM1py.Objects import Dimension, Hierarchy, Element\n'), ((2748, 2775), 'TM1py.Objects.Element', 'Element', (['"""Elem3"""', '"""Numeric"""'], {}), "('Elem3', 'Numeric')\n", (2755, 2775), False, 'from TM1py.Objects import Dimension, Hierarchy, Element\n')] |
from distutils.version import StrictVersion as SV
import unittest
import minecraft
class VersionTest(unittest.TestCase):
def test_module_version_is_a_valid_pep_386_strict_version(self):
SV(minecraft.__version__)
def test_protocol_version_is_an_int(self):
for version in minecraft.SUPPORTED_PROTOCOL_VERSIONS:
self.assertTrue(type(version) is int)
| [
"distutils.version.StrictVersion"
] | [((201, 226), 'distutils.version.StrictVersion', 'SV', (['minecraft.__version__'], {}), '(minecraft.__version__)\n', (203, 226), True, 'from distutils.version import StrictVersion as SV\n')] |
import torch
from torch.nn import functional as F
from torch import optim
from torch import nn
class PolicyNetwork(torch.nn.Module):
def __init__(self, in_dim, out_dim, alpha=0.01):
super(PolicyNetwork, self).__init__()
self.in_dim = in_dim
self.out_dim = out_dim
self.l1 = nn.Linear(in_dim, 128)
self.l2 = nn.Linear(128, 128)
self.l3 = nn.Linear(128, 64)
self.l4 = nn.Linear(64, out_dim)
self.relu = nn.LeakyReLU()
self.optimizer = torch.optim.Adam(lr=alpha, params=self.parameters())
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu:0')
self.to(self.device)
def forward(self, x):
out = torch.Tensor(x).reshape(-1, self.in_dim)
out = self.l1(out)
out = self.relu(out)
out = self.l2(out)
out = self.relu(out)
out = self.l3(out)
out = self.relu(out)
out = self.l4(out)
out = torch.tanh(out)
return out
def loss(self, q):
return -torch.sum(q)/q.shape[0]
def optimize(self, q):
torch.cuda.empty_cache()
self.optimizer.zero_grad()
loss = self.loss(q)
loss.backward(retain_graph=True)
self.optimizer.step()
return -loss.detach().numpy()
def main():
pn = PolicyNetwork(in_dim=3, out_dim=1)
x = torch.ones(10, 3)
print(pn.forward(x))
if __name__ == "__main__":
main()
| [
"torch.tanh",
"torch.nn.LeakyReLU",
"torch.Tensor",
"torch.cuda.is_available",
"torch.sum",
"torch.nn.Linear",
"torch.cuda.empty_cache",
"torch.ones"
] | [((1200, 1217), 'torch.ones', 'torch.ones', (['(10)', '(3)'], {}), '(10, 3)\n', (1210, 1217), False, 'import torch\n'), ((288, 310), 'torch.nn.Linear', 'nn.Linear', (['in_dim', '(128)'], {}), '(in_dim, 128)\n', (297, 310), False, 'from torch import nn\n'), ((323, 342), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(128)'], {}), '(128, 128)\n', (332, 342), False, 'from torch import nn\n'), ((355, 373), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(64)'], {}), '(128, 64)\n', (364, 373), False, 'from torch import nn\n'), ((386, 408), 'torch.nn.Linear', 'nn.Linear', (['(64)', 'out_dim'], {}), '(64, out_dim)\n', (395, 408), False, 'from torch import nn\n'), ((423, 437), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', ([], {}), '()\n', (435, 437), False, 'from torch import nn\n'), ((853, 868), 'torch.tanh', 'torch.tanh', (['out'], {}), '(out)\n', (863, 868), False, 'import torch\n'), ((968, 992), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (990, 992), False, 'import torch\n'), ((553, 578), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (576, 578), False, 'import torch\n'), ((651, 666), 'torch.Tensor', 'torch.Tensor', (['x'], {}), '(x)\n', (663, 666), False, 'import torch\n'), ((915, 927), 'torch.sum', 'torch.sum', (['q'], {}), '(q)\n', (924, 927), False, 'import torch\n')] |
# coding: utf-8
# # Using Dropout
# Let's see how we can use dropout for early stopping
from concept_dependency_graph import ConceptDependencyGraph
import data_generator as dg
from student import *
import simple_mdp as sm
import dynamics_model_class as dmc
import numpy as np
import dataset_utils
import tensorflow as tf
import tflearn
import copy
import time
def main():
n_concepts = 4
use_student2 = True
student2_str = '2' if use_student2 else ''
learn_prob = 0.5
lp_str = '-lp{}'.format(int(learn_prob*100)) if not use_student2 else ''
n_students = 100000
seqlen = 7
filter_mastery = True
filter_str = '' if not filter_mastery else '-filtered'
policy = 'random'
filename = 'test{}-n{}-l{}{}-{}{}.pickle'.format(student2_str, n_students, seqlen,
lp_str, policy, filter_str)
#concept_tree = sm.create_custom_dependency()
concept_tree = ConceptDependencyGraph()
concept_tree.init_default_tree(n_concepts)
if not use_student2:
test_student = Student(n=n_concepts,p_trans_satisfied=learn_prob, p_trans_not_satisfied=0.0, p_get_ex_correct_if_concepts_learned=1.0)
else:
test_student = Student2(n_concepts)
print(filename)
# load toy data
data = dataset_utils.load_data(filename='{}{}'.format(dg.SYN_DATA_DIR, filename))
print('Average posttest: {}'.format(sm.expected_reward(data)))
print('Percent of full posttest score: {}'.format(sm.percent_complete(data)))
print('Percent of all seen: {}'.format(sm.percent_all_seen(data)))
input_data_, output_mask_, target_data_ = dataset_utils.preprocess_data_for_rnn(data)
train_data = (input_data_[:,:,:], output_mask_[:,:,:], target_data_[:,:,:])
print(input_data_.shape)
print(output_mask_.shape)
print(target_data_.shape)
# test_model hidden=16
# test_model_mid hidden=10
# test_model_small hidden=5
# test_model_tiny hidden=3
model_id = "test2_model_small"
dropouts = np.array([1.0])
n_dropouts = dropouts.shape[0]
total_epochs = 14
reps = 20
class ExtractCallback(tflearn.callbacks.Callback):
def __init__(self):
self.tstates = []
def on_epoch_end(self, training_state):
self.tstates.append(copy.copy(training_state))
def test_dropout_losses():
losses = np.zeros((n_dropouts,reps,total_epochs))
val_losses = np.zeros((n_dropouts, reps,total_epochs))
for d in range(n_dropouts):
dropout = dropouts[d]
for r in range(reps):
print('----------------------------------------')
print('---------- Dropout {:3.1f} Rep {:2d} ----------'.format(dropout, r+1))
print('----------------------------------------')
ecall = ExtractCallback()
dmodel = dmc.DynamicsModel(model_id=model_id, timesteps=seqlen, dropout=dropout, load_checkpoint=False)
dmodel.train(train_data, n_epoch=total_epochs, callbacks=ecall, shuffle=False, load_checkpoint=False)
losses[d,r,:] = np.array([s.global_loss for s in ecall.tstates])
val_losses[d,r,:] = np.array([s.val_loss for s in ecall.tstates])
return losses, val_losses
losses, val_losses = test_dropout_losses()
np.savez("dropoutput",dropouts=dropouts, losses=losses, vals=val_losses)
if __name__ == '__main__':
starttime = time.time()
np.random.seed()
main()
endtime = time.time()
print('Time elapsed {}s'.format(endtime-starttime))
| [
"numpy.savez",
"dynamics_model_class.DynamicsModel",
"dataset_utils.preprocess_data_for_rnn",
"concept_dependency_graph.ConceptDependencyGraph",
"numpy.array",
"numpy.zeros",
"simple_mdp.percent_all_seen",
"numpy.random.seed",
"simple_mdp.percent_complete",
"copy.copy",
"time.time",
"simple_md... | [((951, 975), 'concept_dependency_graph.ConceptDependencyGraph', 'ConceptDependencyGraph', ([], {}), '()\n', (973, 975), False, 'from concept_dependency_graph import ConceptDependencyGraph\n'), ((1640, 1683), 'dataset_utils.preprocess_data_for_rnn', 'dataset_utils.preprocess_data_for_rnn', (['data'], {}), '(data)\n', (1677, 1683), False, 'import dataset_utils\n'), ((2026, 2041), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (2034, 2041), True, 'import numpy as np\n'), ((3349, 3422), 'numpy.savez', 'np.savez', (['"""dropoutput"""'], {'dropouts': 'dropouts', 'losses': 'losses', 'vals': 'val_losses'}), "('dropoutput', dropouts=dropouts, losses=losses, vals=val_losses)\n", (3357, 3422), True, 'import numpy as np\n'), ((3466, 3477), 'time.time', 'time.time', ([], {}), '()\n', (3475, 3477), False, 'import time\n'), ((3483, 3499), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (3497, 3499), True, 'import numpy as np\n'), ((3531, 3542), 'time.time', 'time.time', ([], {}), '()\n', (3540, 3542), False, 'import time\n'), ((2383, 2425), 'numpy.zeros', 'np.zeros', (['(n_dropouts, reps, total_epochs)'], {}), '((n_dropouts, reps, total_epochs))\n', (2391, 2425), True, 'import numpy as np\n'), ((2445, 2487), 'numpy.zeros', 'np.zeros', (['(n_dropouts, reps, total_epochs)'], {}), '((n_dropouts, reps, total_epochs))\n', (2453, 2487), True, 'import numpy as np\n'), ((1414, 1438), 'simple_mdp.expected_reward', 'sm.expected_reward', (['data'], {}), '(data)\n', (1432, 1438), True, 'import simple_mdp as sm\n'), ((1495, 1520), 'simple_mdp.percent_complete', 'sm.percent_complete', (['data'], {}), '(data)\n', (1514, 1520), True, 'import simple_mdp as sm\n'), ((1566, 1591), 'simple_mdp.percent_all_seen', 'sm.percent_all_seen', (['data'], {}), '(data)\n', (1585, 1591), True, 'import simple_mdp as sm\n'), ((2307, 2332), 'copy.copy', 'copy.copy', (['training_state'], {}), '(training_state)\n', (2316, 2332), False, 'import copy\n'), ((2885, 2983), 'dynamics_model_class.DynamicsModel', 'dmc.DynamicsModel', ([], {'model_id': 'model_id', 'timesteps': 'seqlen', 'dropout': 'dropout', 'load_checkpoint': '(False)'}), '(model_id=model_id, timesteps=seqlen, dropout=dropout,\n load_checkpoint=False)\n', (2902, 2983), True, 'import dynamics_model_class as dmc\n'), ((3130, 3178), 'numpy.array', 'np.array', (['[s.global_loss for s in ecall.tstates]'], {}), '([s.global_loss for s in ecall.tstates])\n', (3138, 3178), True, 'import numpy as np\n'), ((3215, 3260), 'numpy.array', 'np.array', (['[s.val_loss for s in ecall.tstates]'], {}), '([s.val_loss for s in ecall.tstates])\n', (3223, 3260), True, 'import numpy as np\n')] |
##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from odoo import models, fields, api
class AccountInvoiceTaxWizard(models.TransientModel):
_name = 'account.invoice.tax.wizard'
_description = 'Account Invoice Tax Wizard'
@api.model
def _get_invoice(self):
return self._context.get('active_id', False)
tax_id = fields.Many2one(
'account.tax',
'Tax',
required=True,
)
name = fields.Char(
'Tax Description',
required=True,
)
amount = fields.Float(
digits='Account',
required=True,
)
move_id = fields.Many2one(
'account.move',
'Invoice',
default=_get_invoice,
)
base = fields.Float(
digits='Account',
help='Not stored, only used to suggest amount',
)
account_analytic_id = fields.Many2one(
'account.analytic.account',
'Analytic Account',
)
invoice_type = fields.Selection(
related='move_id.type',
string='Invoice Type',
)
invoice_company_id = fields.Many2one(
'res.company',
'Company',
related='move_id.company_id',
)
@api.onchange('move_id')
def onchange_invoice(self):
self.base = self.move_id.amount_untaxed
@api.onchange('tax_id')
def onchange_tax(self):
res = self.tax_id.compute_all(self.base)
self.name = res.get('taxes', False) and res['taxes'][0].get(
'name', False) or False
@api.onchange('base', 'tax_id')
def onchange_base(self):
res = self.tax_id.compute_all(self.base)
self.amount = res.get('taxes', False) and res['taxes'][0].get(
'amount', False) or False
def confirm(self):
self.ensure_one()
if not self.move_id or not self.tax_id:
return False
invoice = self.move_id
res = self.tax_id.compute_all(self.base)
tax = res['taxes'][0]
val = {
'move_id': invoice.id,
'name': self.name,
'tax_id': self.tax_id.id,
'amount': self.amount,
'manual': True,
'sequence': 99,
'account_analytic_id': self.account_analytic_id.id,
'account_id': invoice.type in ('out_invoice', 'in_invoice') and (
tax['account_id'] or False) or (
tax['refund_account_id'] or False),
}
self.env['account.invoice.tax'].create(val)
| [
"odoo.fields.Float",
"odoo.fields.Many2one",
"odoo.api.onchange",
"odoo.fields.Selection",
"odoo.fields.Char"
] | [((540, 592), 'odoo.fields.Many2one', 'fields.Many2one', (['"""account.tax"""', '"""Tax"""'], {'required': '(True)'}), "('account.tax', 'Tax', required=True)\n", (555, 592), False, 'from odoo import models, fields, api\n'), ((635, 680), 'odoo.fields.Char', 'fields.Char', (['"""Tax Description"""'], {'required': '(True)'}), "('Tax Description', required=True)\n", (646, 680), False, 'from odoo import models, fields, api\n'), ((717, 762), 'odoo.fields.Float', 'fields.Float', ([], {'digits': '"""Account"""', 'required': '(True)'}), "(digits='Account', required=True)\n", (729, 762), False, 'from odoo import models, fields, api\n'), ((800, 864), 'odoo.fields.Many2one', 'fields.Many2one', (['"""account.move"""', '"""Invoice"""'], {'default': '_get_invoice'}), "('account.move', 'Invoice', default=_get_invoice)\n", (815, 864), False, 'from odoo import models, fields, api\n'), ((907, 985), 'odoo.fields.Float', 'fields.Float', ([], {'digits': '"""Account"""', 'help': '"""Not stored, only used to suggest amount"""'}), "(digits='Account', help='Not stored, only used to suggest amount')\n", (919, 985), False, 'from odoo import models, fields, api\n'), ((1035, 1098), 'odoo.fields.Many2one', 'fields.Many2one', (['"""account.analytic.account"""', '"""Analytic Account"""'], {}), "('account.analytic.account', 'Analytic Account')\n", (1050, 1098), False, 'from odoo import models, fields, api\n'), ((1141, 1204), 'odoo.fields.Selection', 'fields.Selection', ([], {'related': '"""move_id.type"""', 'string': '"""Invoice Type"""'}), "(related='move_id.type', string='Invoice Type')\n", (1157, 1204), False, 'from odoo import models, fields, api\n'), ((1253, 1324), 'odoo.fields.Many2one', 'fields.Many2one', (['"""res.company"""', '"""Company"""'], {'related': '"""move_id.company_id"""'}), "('res.company', 'Company', related='move_id.company_id')\n", (1268, 1324), False, 'from odoo import models, fields, api\n'), ((1362, 1385), 'odoo.api.onchange', 'api.onchange', (['"""move_id"""'], {}), "('move_id')\n", (1374, 1385), False, 'from odoo import models, fields, api\n'), ((1472, 1494), 'odoo.api.onchange', 'api.onchange', (['"""tax_id"""'], {}), "('tax_id')\n", (1484, 1494), False, 'from odoo import models, fields, api\n'), ((1683, 1713), 'odoo.api.onchange', 'api.onchange', (['"""base"""', '"""tax_id"""'], {}), "('base', 'tax_id')\n", (1695, 1713), False, 'from odoo import models, fields, api\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Parse the FRC drive station logs which are packed binary data
# Notes on comparison to DSLog-Parse:
# D-P has packet_loss as a *signed* integer, which makes no sense. Unsigned looks sensible.
import datetime
import math
import re
import struct
import bitstring
MAX_INT64 = 2**63 - 1
DSLOG_TIMESTEP = 0.020
def read_timestamp(strm):
# Time stamp: int64, uint64
b1 = strm.read(8)
b2 = strm.read(8)
if not b1 or not b2:
return None
sec = struct.unpack('>q', b1)[0]
millisec = struct.unpack('>Q', b2)[0]
# for now, ignore
dt = datetime.datetime(1904, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)
dt += datetime.timedelta(seconds=(sec + float(millisec) / MAX_INT64))
return dt
class DSLogParser():
def __init__(self, input_file):
self.strm = open(input_file, 'rb')
self.record_time_offset = datetime.timedelta(seconds=DSLOG_TIMESTEP)
self.curr_time = None
self.read_header()
return
def close(self):
self.strm.close()
return
def read_records(self):
if self.version != 3:
raise Exception("Unknown file version number {}".format(self.version))
while True:
r = self.read_record_v3()
if r is None:
break
yield r
return
def read_header(self):
self.version = struct.unpack('>i', self.strm.read(4))[0]
if self.version != 3:
raise Exception("Unknown file version number {}".format(self.version))
self.curr_time = read_timestamp(self.strm)
return
def read_record_v3(self):
data_bytes = self.strm.read(10)
if not data_bytes or len(data_bytes) < 10:
return None
pdp_bytes = self.strm.read(25)
if not pdp_bytes or len(pdp_bytes) < 25:
# should not happen!!
raise EOFError("No data for PDP. Unexpected end of file.")
res = {'time': self.curr_time}
res.update(self.parse_data_v3(data_bytes))
res.update(self.parse_pdp_v3(pdp_bytes))
self.curr_time += self.record_time_offset
return res
@staticmethod
def shifted_float(raw_value, shift_right):
return raw_value / (2.0**shift_right)
@staticmethod
def unpack_bits(raw_value):
'''Unpack and invert the bits in a byte'''
status_bits = bitstring.Bits(bytes=raw_value)
# invert them all
return [not b for b in status_bits]
@staticmethod
def uint_from_bytes(bytes, offset, size_in_bits):
'''Pull out an unsigned int from an array of bytes, with arbitrary bit start and length'''
first_byte = math.floor(offset / 8)
num_bytes = math.ceil(size_in_bits / 8)
if num_bytes == 1:
uint = struct.unpack_from('>B', bytes, first_byte)[0]
elif num_bytes == 2:
uint = struct.unpack_from('>H', bytes, first_byte)[0]
else:
# not needed here, and general case is harder
raise Exception('not supported')
# Need to mask off the incorrect high bits and then shift right to get rid of the incorrect low bits
left_bitshift = offset - first_byte * 8
right_bitshift = num_bytes * 8 - size_in_bits - left_bitshift
return (uint & (0xFFFF >> left_bitshift)) >> right_bitshift
def parse_data_v3(self, data_bytes):
raw_values = struct.unpack('>BBHBcBBH', data_bytes)
status_bits = self.unpack_bits(raw_values[4])
res = {
'round_trip_time': self.shifted_float(raw_values[0], 1),
'packet_loss': 0.04 * raw_values[1], # not shifted
'voltage': self.shifted_float(raw_values[2], 8),
'rio_cpu': 0.01 * self.shifted_float(raw_values[3], 1),
'can_usage': 0.01 * self.shifted_float(raw_values[5], 1),
'wifi_db': self.shifted_float(raw_values[6], 1),
'bandwidth': self.shifted_float(raw_values[7], 8),
'robot_disabled': status_bits[7],
'robot_auto': status_bits[6],
'robot_tele': status_bits[5],
'ds_disabled': status_bits[4],
'ds_auto': status_bits[3],
'ds_tele': status_bits[2],
'watchdog': status_bits[1],
'brownout': status_bits[0],
}
return res
def parse_pdp_v3(self, pdp_bytes):
# from CD post https://www.chiefdelphi.com/forums/showpost.php?p=1556451&postcount=11
# pdp_offsets = (8, 18, 28, 38, 52, 62, 72, 82, 92, 102, 116, 126, 136, 146, 156, 166)
# from DSLog-Reader
# these make more sense in terms of defining a packing scheme, so stick with them
# looks like this is a 64-bit int holding 6 10-bit numbers and they ignore the extra 4 bits
pdp_offsets = (8, 18, 28, 38, 48, 58,
72, 82, 92, 102, 112, 122,
136, 146, 156, 166)
vals = []
for offset in pdp_offsets:
vals.append(self.shifted_float(self.uint_from_bytes(pdp_bytes, offset, 10), 3))
total_i = 0.0
for i in vals:
total_i += i
# the scaling on R, V and T are almost certainly not correct
# need to find a reference for those values
res = {
'pdp_id': self.uint_from_bytes(pdp_bytes, 0, 8),
'pdp_currents': vals,
'pdp_resistance': self.uint_from_bytes(pdp_bytes, 176, 8),
'pdp_voltage': self.uint_from_bytes(pdp_bytes, 184, 8),
'pdp_temp': self.uint_from_bytes(pdp_bytes, 192, 8),
'pdp_total_current': total_i,
}
return res
class DSEventParser():
def __init__(self, input_file):
self.strm = open(input_file, 'rb')
self.version = None
self.start_time = None
self.read_header()
return
def close(self):
self.strm.close()
return
def read_records(self):
if self.version != 3:
raise Exception("Unknown file version number {}".format(self.version))
while True:
r = self.read_record_v3()
if r is None:
break
yield r
return
def read_header(self):
self.version = struct.unpack('>i', self.strm.read(4))[0]
if self.version != 3:
raise Exception("Unknown file version number {}".format(self.version))
self.start_time = read_timestamp(self.strm) # file starttime
return
def read_record_v3(self):
t = read_timestamp(self.strm)
if t is None:
return None
msg_len = struct.unpack('>i', self.strm.read(4))[0]
msg = struct.unpack('%ds' % msg_len, self.strm.read(msg_len))[0]
msg = msg.decode('ascii', "backslashreplace")
return {'time': t, 'message': msg}
@staticmethod
def find_match_info(filename):
rdr = DSEventParser(filename)
try:
for rec in rdr.read_records():
m = re.match(r'FMS Connected:\s+(?P<match>.*),\s+Field Time:\s+(?P<time>[0-9/ :]*)', rec['message'])
if m:
return {'match_name': m.group('match'),
'field_time': datetime.datetime.strptime(m.group('time'), '%y/%m/%d %H:%M:%S')}
finally:
rdr.close()
return None
| [
"datetime.datetime",
"math.ceil",
"math.floor",
"re.match",
"struct.unpack",
"datetime.timedelta",
"bitstring.Bits",
"struct.unpack_from"
] | [((621, 689), 'datetime.datetime', 'datetime.datetime', (['(1904)', '(1)', '(1)', '(0)', '(0)', '(0)'], {'tzinfo': 'datetime.timezone.utc'}), '(1904, 1, 1, 0, 0, 0, tzinfo=datetime.timezone.utc)\n', (638, 689), False, 'import datetime\n'), ((520, 543), 'struct.unpack', 'struct.unpack', (['""">q"""', 'b1'], {}), "('>q', b1)\n", (533, 543), False, 'import struct\n'), ((562, 585), 'struct.unpack', 'struct.unpack', (['""">Q"""', 'b2'], {}), "('>Q', b2)\n", (575, 585), False, 'import struct\n'), ((915, 957), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'DSLOG_TIMESTEP'}), '(seconds=DSLOG_TIMESTEP)\n', (933, 957), False, 'import datetime\n'), ((2436, 2467), 'bitstring.Bits', 'bitstring.Bits', ([], {'bytes': 'raw_value'}), '(bytes=raw_value)\n', (2450, 2467), False, 'import bitstring\n'), ((2732, 2754), 'math.floor', 'math.floor', (['(offset / 8)'], {}), '(offset / 8)\n', (2742, 2754), False, 'import math\n'), ((2775, 2802), 'math.ceil', 'math.ceil', (['(size_in_bits / 8)'], {}), '(size_in_bits / 8)\n', (2784, 2802), False, 'import math\n'), ((3469, 3507), 'struct.unpack', 'struct.unpack', (['""">BBHBcBBH"""', 'data_bytes'], {}), "('>BBHBcBBH', data_bytes)\n", (3482, 3507), False, 'import struct\n'), ((2850, 2893), 'struct.unpack_from', 'struct.unpack_from', (['""">B"""', 'bytes', 'first_byte'], {}), "('>B', bytes, first_byte)\n", (2868, 2893), False, 'import struct\n'), ((7080, 7187), 're.match', 're.match', (['"""FMS Connected:\\\\s+(?P<match>.*),\\\\s+Field Time:\\\\s+(?P<time>[0-9/ :]*)"""', "rec['message']"], {}), "(\n 'FMS Connected:\\\\s+(?P<match>.*),\\\\s+Field Time:\\\\s+(?P<time>[0-9/ :]*)',\n rec['message'])\n", (7088, 7187), False, 'import re\n'), ((2945, 2988), 'struct.unpack_from', 'struct.unpack_from', (['""">H"""', 'bytes', 'first_byte'], {}), "('>H', bytes, first_byte)\n", (2963, 2988), False, 'import struct\n')] |
# -*- coding: UTF-8 -*-
from __future__ import print_function
from behave import given, when, then, step
from behave_manners.pagelems import DOMScope
from behave_manners.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
@when(u'I click to have the dropdown visible')
def click_dropdown1(context):
if not context.cur_element['input'].owns:
context.cur_element.click()
context.cur_element._scope.wait_all('short', welem=context.cur_element._remote)
print("Owns: %s" % context.cur_element['input'].owns)
context.cur_overlays = context.cur_element['overlays']
@when(u'I click again to hide the dropdown')
def click_hide_dropdown(context):
input_elem = context.cur_element['input']
if input_elem.owns:
input_elem.send_keys(Keys.ESCAPE)
assert not input_elem.owns
@when(u'I click again to show the dropdown')
def click_dropdown2(context):
context.cur_element['input'].send_keys('o')
context.cur_element._scope.wait_all('short', welem=context.cur_element._remote)
assert context.cur_element['input'].owns, "Did not present overlay"
@then(u'the previous dropdown component resolves')
def check_resolve_dropdown1(context):
print("Cur dropdown %s" % context.cur_element['input'].owns)
print("Cur overlays %s" % context.cur_overlays.is_displayed())
raise AssertionError
| [
"behave.when",
"behave.then"
] | [((255, 300), 'behave.when', 'when', (['u"""I click to have the dropdown visible"""'], {}), "(u'I click to have the dropdown visible')\n", (259, 300), False, 'from behave import given, when, then, step\n'), ((623, 666), 'behave.when', 'when', (['u"""I click again to hide the dropdown"""'], {}), "(u'I click again to hide the dropdown')\n", (627, 666), False, 'from behave import given, when, then, step\n'), ((847, 890), 'behave.when', 'when', (['u"""I click again to show the dropdown"""'], {}), "(u'I click again to show the dropdown')\n", (851, 890), False, 'from behave import given, when, then, step\n'), ((1128, 1177), 'behave.then', 'then', (['u"""the previous dropdown component resolves"""'], {}), "(u'the previous dropdown component resolves')\n", (1132, 1177), False, 'from behave import given, when, then, step\n')] |
import datetime
import os
import sys
from optparse import make_option
from django.core.management.base import BaseCommand
from django.conf import settings
try:
from django.contrib.gis.utils import LayerMapping
except ImportError:
print("gdal is required")
sys.exit(1)
from tigerline.models import County
def county_import(county_shp, year):
if year == "2010":
county_mapping = {
'state_fips_code': 'STATEFP10',
'fips_code': 'COUNTYFP10',
'county_identifier': 'GEOID10',
'name': 'NAME10',
'name_and_description': 'NAMELSAD10',
'legal_statistical_description': 'LSAD10',
'fips_55_class_code': 'CLASSFP10',
'feature_class_code': 'MTFCC10',
'functional_status': 'FUNCSTAT10',
'mpoly': 'POLYGON',
}
else:
county_mapping = {
'state_fips_code': 'STATEFP',
'fips_code': 'COUNTYFP',
'county_identifier': 'GEOID',
'name': 'NAME',
'name_and_description': 'NAMELSAD',
'legal_statistical_description': 'LSAD',
'fips_55_class_code': 'CLASSFP',
'feature_class_code': 'MTFCC',
'functional_status': 'FUNCSTAT',
'mpoly': 'POLYGON',
}
lm = LayerMapping(County, county_shp, county_mapping, encoding='LATIN1')
lm.save(verbose=True)
class Command(BaseCommand):
help = 'Installs the 2010-2016 tigerline files for counties'
def add_arguments(self, parser):
parser.add_argument('--path', default='', dest='path',
help='The directory where the county data is stored.'
)
def handle(self, *args, **kwargs):
path = kwargs['path']
# With DEBUG on this will DIE.
settings.DEBUG = False
# figure out which path we want to use.
years = ["2016", "2015", "2014", "2013", "2012", "2011", "2010"]
directories = [('tl_%s_us_county' % year, year) for year in years]
tiger_file = ""
for (directory, year) in directories:
if year == "2010":
directory = directory + "10"
if os.path.exists(os.path.join(path, directory)):
print('Found %s files.' % year)
tiger_file = os.path.join(path, directory + "/" + directory + ".shp")
break
if not tiger_file:
print('Could not find files.')
exit()
print("Start Counties: %s" % datetime.datetime.now())
county_import(tiger_file, year)
print("End Counties: %s" % datetime.datetime.now())
| [
"os.path.join",
"datetime.datetime.now",
"sys.exit",
"django.contrib.gis.utils.LayerMapping"
] | [((1321, 1388), 'django.contrib.gis.utils.LayerMapping', 'LayerMapping', (['County', 'county_shp', 'county_mapping'], {'encoding': '"""LATIN1"""'}), "(County, county_shp, county_mapping, encoding='LATIN1')\n", (1333, 1388), False, 'from django.contrib.gis.utils import LayerMapping\n'), ((270, 281), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (278, 281), False, 'import sys\n'), ((2203, 2232), 'os.path.join', 'os.path.join', (['path', 'directory'], {}), '(path, directory)\n', (2215, 2232), False, 'import os\n'), ((2312, 2368), 'os.path.join', 'os.path.join', (['path', "(directory + '/' + directory + '.shp')"], {}), "(path, directory + '/' + directory + '.shp')\n", (2324, 2368), False, 'import os\n'), ((2519, 2542), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2540, 2542), False, 'import datetime\n'), ((2619, 2642), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2640, 2642), False, 'import datetime\n')] |
from __future__ import print_function
import os
import time
import tensorflow as tf
import numpy as np
import sys
from zoneout_wrapper import ZoneoutWrapper
class SequencePredictor():
def add_placeholders(self):
"""Generates placeholder variables to represent the input tensors
"""
self.inputs_placeholder = tf.placeholder(tf.int32, shape=(None, self.config.max_length), name="x")
self.labels_placeholder = tf.placeholder(tf.int32, shape=(None, self.config.max_length), name="y")
self.dropout_placeholder = tf.placeholder(tf.float32)
def create_feed_dict(self, inputs_batch, labels_batch=None, initial_state=None, keep_prob=1.0):
"""Creates the feed_dict for the model.
NOTE: You do not have to do anything here.
"""
feed_dict = {
self.inputs_placeholder: inputs_batch,
self.dropout_placeholder: keep_prob,
}
if labels_batch is not None:
feed_dict[self.labels_placeholder] = labels_batch
if initial_state is not None:
feed_dict[self.in_state] = initial_state
return feed_dict
def add_embedding(self):
""" Creates one-hot encoding for the input. No embedding is used as of now
"""
embedding = tf.one_hot(self.inputs_placeholder, self.config.num_classes)
return embedding
def add_prediction_op(self):
""" Get the input from the embedding layer
"""
x = self.add_embedding()
""" Create a RNN first & define a placeholder for the initial state
"""
if self.config.model_type == "gru":
cell = tf.nn.rnn_cell.GRUCell(self.config.hidden_size)
elif self.config.model_type == "rnn":
cell = tf.nn.rnn_cell.BasicRNNCell(self.config.hidden_size)
else:
raise Exception("Unsuppoprted model type...")
if self.config.regularization == "dropout":
cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=self.dropout_placeholder)
elif self.config.regularization == "zoneout":
cell = ZoneoutWrapper(cell, zoneout_prob=self.dropout_placeholder)
cell = tf.nn.rnn_cell.MultiRNNCell([cell] * self.config.num_layers, state_is_tuple=False)
batch_size = tf.shape(x)[0]
dynamic_max_length = tf.shape(x)[1]
zero_state = cell.zero_state(batch_size, tf.float32)
self.in_state = tf.placeholder_with_default(zero_state, [None, cell.state_size])
""" First find the sequence length and then use it to run the model
"""
#length = tf.reduce_sum(tf.reduce_max(tf.sign(x), 2), 1)
output, self.out_state = tf.nn.dynamic_rnn(cell, x, initial_state=self.in_state)
output = tf.reshape(output, shape=[-1, self.config.hidden_size])
""" Pass it through a linear + Softmax layer to get the predictions
"""
xavier_init = tf.contrib.layers.xavier_initializer()
W = tf.get_variable("W", shape=[self.config.hidden_size, self.config.num_classes], initializer=xavier_init )
b1 = tf.get_variable("b1", shape=[self.config.num_classes], initializer=xavier_init )
preds = tf.add(tf.matmul(output,W),b1)
preds = tf.reshape(preds, shape=[batch_size,dynamic_max_length, self.config.num_classes])
return preds
def add_loss_op(self, preds):
loss = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels_placeholder, logits=preds) )
scaled_loss = loss/np.log(2)
tf.summary.scalar('loss', scaled_loss);
return scaled_loss
def add_training_op(self, loss):
"""Sets up the training Ops.
"""
global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
optimizer = tf.train.AdamOptimizer(self.config.lr)
train_op = optimizer.minimize(loss, global_step=global_step)
return global_step, train_op
def loss_on_batch(self, sess, inputs_batch, labels_batch, initial_state=None):
feed = self.create_feed_dict(inputs_batch=inputs_batch, labels_batch=labels_batch, initial_state=initial_state, keep_prob=1.0)
loss, out_state = sess.run([self.loss,self.out_state], feed_dict=feed)
return loss, out_state
def train_on_batch(self, sess, inputs_batch, labels_batch, initial_state=None, dropout=1.0):
feed = self.create_feed_dict(inputs_batch=inputs_batch, labels_batch=labels_batch, initial_state=initial_state, keep_prob=dropout)
_, loss,out_state,_step, summary = sess.run([self.train_op, self.loss, self.out_state, self.global_step, self.merged_summaries], feed_dict=feed)
return loss, out_state, _step, summary
def build(self):
self.add_placeholders()
self.pred = self.add_prediction_op()
self.loss = self.add_loss_op(self.pred)
self.global_step, self.train_op = self.add_training_op(self.loss)
self.merged_summaries = tf.summary.merge_all()
def __init__(self, config):
self.config = config
self.build()
| [
"tensorflow.shape",
"tensorflow.get_variable",
"numpy.log",
"tensorflow.nn.rnn_cell.DropoutWrapper",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.placeholder",
"tensorflow.nn.rnn_cell.GRUCell",
"tensorflow.nn.dynamic_rnn",
"tensorflow.matmul",
"tensorflow.train.AdamOptimiz... | [((338, 410), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '(None, self.config.max_length)', 'name': '"""x"""'}), "(tf.int32, shape=(None, self.config.max_length), name='x')\n", (352, 410), True, 'import tensorflow as tf\n'), ((445, 517), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '(None, self.config.max_length)', 'name': '"""y"""'}), "(tf.int32, shape=(None, self.config.max_length), name='y')\n", (459, 517), True, 'import tensorflow as tf\n'), ((553, 579), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (567, 579), True, 'import tensorflow as tf\n'), ((1291, 1351), 'tensorflow.one_hot', 'tf.one_hot', (['self.inputs_placeholder', 'self.config.num_classes'], {}), '(self.inputs_placeholder, self.config.num_classes)\n', (1301, 1351), True, 'import tensorflow as tf\n'), ((2198, 2285), 'tensorflow.nn.rnn_cell.MultiRNNCell', 'tf.nn.rnn_cell.MultiRNNCell', (['([cell] * self.config.num_layers)'], {'state_is_tuple': '(False)'}), '([cell] * self.config.num_layers, state_is_tuple\n =False)\n', (2225, 2285), True, 'import tensorflow as tf\n'), ((2448, 2512), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['zero_state', '[None, cell.state_size]'], {}), '(zero_state, [None, cell.state_size])\n', (2475, 2512), True, 'import tensorflow as tf\n'), ((2700, 2755), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['cell', 'x'], {'initial_state': 'self.in_state'}), '(cell, x, initial_state=self.in_state)\n', (2717, 2755), True, 'import tensorflow as tf\n'), ((2773, 2828), 'tensorflow.reshape', 'tf.reshape', (['output'], {'shape': '[-1, self.config.hidden_size]'}), '(output, shape=[-1, self.config.hidden_size])\n', (2783, 2828), True, 'import tensorflow as tf\n'), ((2940, 2978), 'tensorflow.contrib.layers.xavier_initializer', 'tf.contrib.layers.xavier_initializer', ([], {}), '()\n', (2976, 2978), True, 'import tensorflow as tf\n'), ((2991, 3099), 'tensorflow.get_variable', 'tf.get_variable', (['"""W"""'], {'shape': '[self.config.hidden_size, self.config.num_classes]', 'initializer': 'xavier_init'}), "('W', shape=[self.config.hidden_size, self.config.\n num_classes], initializer=xavier_init)\n", (3006, 3099), True, 'import tensorflow as tf\n'), ((3109, 3188), 'tensorflow.get_variable', 'tf.get_variable', (['"""b1"""'], {'shape': '[self.config.num_classes]', 'initializer': 'xavier_init'}), "('b1', shape=[self.config.num_classes], initializer=xavier_init)\n", (3124, 3188), True, 'import tensorflow as tf\n'), ((3253, 3340), 'tensorflow.reshape', 'tf.reshape', (['preds'], {'shape': '[batch_size, dynamic_max_length, self.config.num_classes]'}), '(preds, shape=[batch_size, dynamic_max_length, self.config.\n num_classes])\n', (3263, 3340), True, 'import tensorflow as tf\n'), ((3562, 3600), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'scaled_loss'], {}), "('loss', scaled_loss)\n", (3579, 3600), True, 'import tensorflow as tf\n'), ((3738, 3805), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'dtype': 'tf.int32', 'trainable': '(False)', 'name': '"""global_step"""'}), "(0, dtype=tf.int32, trainable=False, name='global_step')\n", (3749, 3805), True, 'import tensorflow as tf\n'), ((3827, 3865), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['self.config.lr'], {}), '(self.config.lr)\n', (3849, 3865), True, 'import tensorflow as tf\n'), ((4992, 5014), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (5012, 5014), True, 'import tensorflow as tf\n'), ((1660, 1707), 'tensorflow.nn.rnn_cell.GRUCell', 'tf.nn.rnn_cell.GRUCell', (['self.config.hidden_size'], {}), '(self.config.hidden_size)\n', (1682, 1707), True, 'import tensorflow as tf\n'), ((1970, 2048), 'tensorflow.nn.rnn_cell.DropoutWrapper', 'tf.nn.rnn_cell.DropoutWrapper', (['cell'], {'output_keep_prob': 'self.dropout_placeholder'}), '(cell, output_keep_prob=self.dropout_placeholder)\n', (1999, 2048), True, 'import tensorflow as tf\n'), ((2303, 2314), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (2311, 2314), True, 'import tensorflow as tf\n'), ((2347, 2358), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (2355, 2358), True, 'import tensorflow as tf\n'), ((3213, 3233), 'tensorflow.matmul', 'tf.matmul', (['output', 'W'], {}), '(output, W)\n', (3222, 3233), True, 'import tensorflow as tf\n'), ((3422, 3519), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'self.labels_placeholder', 'logits': 'preds'}), '(labels=self.\n labels_placeholder, logits=preds)\n', (3468, 3519), True, 'import tensorflow as tf\n'), ((3544, 3553), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (3550, 3553), True, 'import numpy as np\n'), ((1773, 1825), 'tensorflow.nn.rnn_cell.BasicRNNCell', 'tf.nn.rnn_cell.BasicRNNCell', (['self.config.hidden_size'], {}), '(self.config.hidden_size)\n', (1800, 1825), True, 'import tensorflow as tf\n'), ((2122, 2181), 'zoneout_wrapper.ZoneoutWrapper', 'ZoneoutWrapper', (['cell'], {'zoneout_prob': 'self.dropout_placeholder'}), '(cell, zoneout_prob=self.dropout_placeholder)\n', (2136, 2181), False, 'from zoneout_wrapper import ZoneoutWrapper\n')] |
# Importando as dependencias
from os import system, chdir, mkdir, rmdir
from time import sleep
from socket import gethostname, gethostbyname
# Informações
__version__ = "1.0"
__tag_version__ = "Osys2 Beta"
# Ethernet and Socket informations
hostname = gethostname()
host = gethostbyname(hostname)
# Modulos e funções do OSys2
def clearDisplay():
system("cls")
# Aplicativos
# Kernel
class kernel:
def __init__(self):
clearDisplay()
try:
self.db = open("home\config.log").read()
except FileNotFoundError:
try:
mkdir("home")
except FileExistsError:
a = open("home\config.log", "wt+")
print("Welcome to Osystem"), sleep(2)
print("")
print("here you can simule Linux in your Windows NT")
print("Create your accont to use this sub-desktop")
nsm = str(input("Username: ")).strip()
a.write(nsm)
a.close()
del nsm
else:
a = open("home\config.log", "wt+")
print("Welcome to Osystem"), sleep(2)
print("")
print("here you can simule Linux in your Windows NT")
print("Create your accont to use this sub-desktop")
nsm = str(input("Username: ")).strip()
a.write(nsm)
a.close()
del nsm
self.db = open("home\config.log").read()
clearDisplay()
self.inital_mensage()
self.command_prompt()
def command_prompt(self):
while True:
cmd: str = input("{}@{}:$ ".format(hostname, self.db))
def inital_mensage(self):
print("Welcome to Osystem Beta: {} - (Gnu\Linux {})".format(__version__, __tag_version__))
print("")
# Executando
kernel()
| [
"socket.gethostbyname",
"time.sleep",
"os.mkdir",
"os.system",
"socket.gethostname"
] | [((254, 267), 'socket.gethostname', 'gethostname', ([], {}), '()\n', (265, 267), False, 'from socket import gethostname, gethostbyname\n'), ((275, 298), 'socket.gethostbyname', 'gethostbyname', (['hostname'], {}), '(hostname)\n', (288, 298), False, 'from socket import gethostname, gethostbyname\n'), ((353, 366), 'os.system', 'system', (['"""cls"""'], {}), "('cls')\n", (359, 366), False, 'from os import system, chdir, mkdir, rmdir\n'), ((591, 604), 'os.mkdir', 'mkdir', (['"""home"""'], {}), "('home')\n", (596, 604), False, 'from os import system, chdir, mkdir, rmdir\n'), ((1158, 1166), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (1163, 1166), False, 'from time import sleep\n'), ((737, 745), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (742, 745), False, 'from time import sleep\n')] |
import numpy as np
import toyplot as tp
from banditutil import create_running_ema
def selection_emas(simulation_output, alpha=0.99):
k = simulation_output["k"]
rema = create_running_ema(alpha, initial=1/k)
return [rema((a == i for a in simulation_output["selection"]), return_list=True)
for i in range(k)]
def plot_scores(out, axes):
for i, score in enumerate(zip(*out["score"])):
T = len(score)
axes.plot(score)
axes.text(T-80, score[-1]+(-1)**i*0.1, "{:.3f}".format(score[-1]), style={"font-size":"14px"})
def plot_selection(out, axes, alpha=0.99):
remas = selection_emas(out, alpha)
for i, selection in enumerate(remas):
T = len(selection)
axes.plot(selection)
| [
"banditutil.create_running_ema"
] | [((177, 217), 'banditutil.create_running_ema', 'create_running_ema', (['alpha'], {'initial': '(1 / k)'}), '(alpha, initial=1 / k)\n', (195, 217), False, 'from banditutil import create_running_ema\n')] |
from pyg_base import reducer, reducing, dictable
import pytest
from operator import add, mul
from functools import reduce
def test_reducer():
assert reducer(add, [1,2,3,4]) == 10
assert reducer(mul, [1,2,3,4]) == 24
assert reducer(add, [1]) == 1
assert reducer(add, []) is None
with pytest.raises(TypeError):
reduce(add, [])
def test_reducing():
from operator import mul
assert reducing(mul)([1,2,3,4]) == 24
assert reducing(mul)(6,4) == 24
assert reducing('__add__')([1,2,3,4]) == 10
assert reducing('__add__')(6,4) == 10
d = dictable(a = [1,2,3,5,4])
assert reducing('inc')(d, dict(a=1))
f = lambda a, b, c: a+b+c
assert reducing(f)([1,2,3,4,5], c = 0) == 15
assert reducing(f)([1,2,3,4,5], c = 1) == 19
| [
"functools.reduce",
"pyg_base.dictable",
"pyg_base.reducer",
"pytest.raises",
"pyg_base.reducing"
] | [((606, 633), 'pyg_base.dictable', 'dictable', ([], {'a': '[1, 2, 3, 5, 4]'}), '(a=[1, 2, 3, 5, 4])\n', (614, 633), False, 'from pyg_base import reducer, reducing, dictable\n'), ((158, 184), 'pyg_base.reducer', 'reducer', (['add', '[1, 2, 3, 4]'], {}), '(add, [1, 2, 3, 4])\n', (165, 184), False, 'from pyg_base import reducer, reducing, dictable\n'), ((199, 225), 'pyg_base.reducer', 'reducer', (['mul', '[1, 2, 3, 4]'], {}), '(mul, [1, 2, 3, 4])\n', (206, 225), False, 'from pyg_base import reducer, reducing, dictable\n'), ((240, 257), 'pyg_base.reducer', 'reducer', (['add', '[1]'], {}), '(add, [1])\n', (247, 257), False, 'from pyg_base import reducer, reducing, dictable\n'), ((279, 295), 'pyg_base.reducer', 'reducer', (['add', '[]'], {}), '(add, [])\n', (286, 295), False, 'from pyg_base import reducer, reducing, dictable\n'), ((313, 337), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (326, 337), False, 'import pytest\n'), ((347, 362), 'functools.reduce', 'reduce', (['add', '[]'], {}), '(add, [])\n', (353, 362), False, 'from functools import reduce\n'), ((643, 658), 'pyg_base.reducing', 'reducing', (['"""inc"""'], {}), "('inc')\n", (651, 658), False, 'from pyg_base import reducer, reducing, dictable\n'), ((427, 440), 'pyg_base.reducing', 'reducing', (['mul'], {}), '(mul)\n', (435, 440), False, 'from pyg_base import reducer, reducing, dictable\n'), ((473, 486), 'pyg_base.reducing', 'reducing', (['mul'], {}), '(mul)\n', (481, 486), False, 'from pyg_base import reducer, reducing, dictable\n'), ((514, 533), 'pyg_base.reducing', 'reducing', (['"""__add__"""'], {}), "('__add__')\n", (522, 533), False, 'from pyg_base import reducer, reducing, dictable\n'), ((562, 581), 'pyg_base.reducing', 'reducing', (['"""__add__"""'], {}), "('__add__')\n", (570, 581), False, 'from pyg_base import reducer, reducing, dictable\n'), ((719, 730), 'pyg_base.reducing', 'reducing', (['f'], {}), '(f)\n', (727, 730), False, 'from pyg_base import reducer, reducing, dictable\n'), ((768, 779), 'pyg_base.reducing', 'reducing', (['f'], {}), '(f)\n', (776, 779), False, 'from pyg_base import reducer, reducing, dictable\n')] |
from datetime import datetime
from typing import Any
from yarl import URL
from newsapp.config import Config
from newsapp.models.article import Article
from newsapp.scraper import NewsItem, ScrapeError, scrape
def get_category_url(category: str) -> URL:
category_path = Config.CATEGORIES[category]["url"]
return Config.SCRAPER_BASE_URL / category_path
def parse_category_page(category: str) -> set[NewsItem]:
category_url = get_category_url(category)
category_page = scrape(category_url)
urls: set[URL] = set()
for comp in category_page.findAll("div", {"class": "news-item"}):
url = URL(comp.a["href"])
slug = url.path.strip("/").split("/")[-1]
serial_no = int(slug.split("-")[-1])
# URL paths containing "?_szc_galeri" are not valid articles
if "?_szc_galeri" in url.path:
continue
# Check if Article with given serial_no exists
if Article.query.filter_by(serial_no=int(serial_no)).first():
continue
urls.add(NewsItem(url=url, slug=slug, serial_no=serial_no, category=category))
return urls
def parse_news_item(news_item: NewsItem) -> dict[str, Any]:
page = scrape(news_item.url)
article = page.find("article")
if article is None:
raise ScrapeError(f"{news_item} doesn't contain article element.")
parsed = {}
# Article content
parsed["title"] = article.h1.text
parsed["subtitle"] = article.h2.text
parsed["content"] = " ".join([p.text for p in article.findAll("p")])
# Link info
parsed["url"] = str(news_item.url)
parsed["category"] = news_item.category
parsed["serial_no"] = news_item.serial_no
# Parse article date
meta_date = article.findAll("span", {"class": "content-meta-date"})[-1]
date_str = meta_date.time["datetime"]
parsed["date"] = datetime.strptime(date_str, "%Y-%m-%dT%H:%M:%S%z")
return parsed
| [
"newsapp.scraper.ScrapeError",
"newsapp.scraper.NewsItem",
"datetime.datetime.strptime",
"newsapp.scraper.scrape",
"yarl.URL"
] | [((505, 525), 'newsapp.scraper.scrape', 'scrape', (['category_url'], {}), '(category_url)\n', (511, 525), False, 'from newsapp.scraper import NewsItem, ScrapeError, scrape\n'), ((1228, 1249), 'newsapp.scraper.scrape', 'scrape', (['news_item.url'], {}), '(news_item.url)\n', (1234, 1249), False, 'from newsapp.scraper import NewsItem, ScrapeError, scrape\n'), ((1907, 1957), 'datetime.datetime.strptime', 'datetime.strptime', (['date_str', '"""%Y-%m-%dT%H:%M:%S%z"""'], {}), "(date_str, '%Y-%m-%dT%H:%M:%S%z')\n", (1924, 1957), False, 'from datetime import datetime\n'), ((642, 661), 'yarl.URL', 'URL', (["comp.a['href']"], {}), "(comp.a['href'])\n", (645, 661), False, 'from yarl import URL\n'), ((1328, 1388), 'newsapp.scraper.ScrapeError', 'ScrapeError', (['f"""{news_item} doesn\'t contain article element."""'], {}), '(f"{news_item} doesn\'t contain article element.")\n', (1339, 1388), False, 'from newsapp.scraper import NewsItem, ScrapeError, scrape\n'), ((1064, 1132), 'newsapp.scraper.NewsItem', 'NewsItem', ([], {'url': 'url', 'slug': 'slug', 'serial_no': 'serial_no', 'category': 'category'}), '(url=url, slug=slug, serial_no=serial_no, category=category)\n', (1072, 1132), False, 'from newsapp.scraper import NewsItem, ScrapeError, scrape\n')] |
# Copyright (c) 2015 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = '<NAME>'
__docformat__ = "restructuredtext en"
from Bio import AlignIO, SeqIO
from Bio import Phylo
from StringIO import StringIO
from Alignment import Alignment
from AnalyseCDR import AnalyseCDR
import os
import subprocess
import copy
import re
import sys
class Dnaml:
"""A class to manage preparation of files for dnaml, invocation, and parsing of results. At the moment this is
restricted to the use of dnaml for ancestral reconstruction, based on an existing tree."""
def __init__(self):
pass
def run_dnaml(self, seq_align, ptree, seqpattern, cdrfile, wdir, rep, tag=""):
"""Run dnaml. Arguments are:
seq_align: the input nt sequences (MultipleSequenceAlignment)
ptree: phylogenetic tree (Bio.Phylo)
seqpattern: A list of sequence number directives, in the format accepted by Alignment.set_position_numbers
wdir: the name of a directory that run_paml should use. This must exist already.
rep: a function that takes a string as an argument. This will be called should an error or warning be
necessary (may be called multiple times in one invocation).
tag: an optional tag to prefix filenames with
Sequences in seq_align must be the same length, must start on a codon boundary, and be an integral number
of codons in length. The first sequence must be the ancestral sequence or outgroup. Exactly he same sequence
names must occur in the alignment and the tree. Sequence name format is pretty flexible (sequences are
mapped to names acceptable to PAML and remapped after PAML has run).
"""
root_id = seq_align[0].id
# Translate clade names to something safe
namedict = {}
serial = 1
for seq in seq_align:
namedict[seq.id] = "N%09d" % serial
seq.id = namedict[seq.id]
serial += 1
qtree = copy.deepcopy(ptree)
for clade in qtree.get_terminals():
if clade.name and clade.name in namedict:
clade.name = namedict[clade.name]
# Root the tree on the first record
first = "N%09d" % 1
try:
qtree.root_with_outgroup(qtree.find_clades(name=re.escape(first)).next())
except:
raise ValueError("Error: root sequence not found in tree.")
try:
inv_dict = {v: k for k, v in namedict.items()}
ptree.root_with_outgroup(ptree.find_clades(name=re.escape(inv_dict[first])))
Phylo.write(ptree, wdir + "/" + "input_treefile.new", "newick", plain=False)
except:
raise ValueError("Error rooting trees: check for corrupt tree file or duplicated sequences.")
# Write the sequences, in PHYLIP format (real PHYLIP format, as used by PHYLIP!)
with open(wdir + "/" + "infile", "w") as f:
f.write(" %d %d\n" % (len(seq_align), len(seq_align[0])))
for seq in seq_align:
f.write("%10s%s\n" % (seq.id, seq.seq.upper()))
# Write the tree file
Phylo.write(qtree, wdir + "/" + "intree", "newick")
if os.path.exists(wdir + "/" + "outfile"):
os.remove(wdir + "/" + "outfile")
if os.path.exists(wdir + "/" + "outtree"):
os.remove(wdir + "/" + "outtree")
# The path to the module may reference either a .py or a .pyc file...
ctlfile = os.path.abspath(__file__).replace(".pyc", ".ctl") if ".pyc" in os.path.abspath(__file__) \
else os.path.abspath(__file__).replace(".py", ".ctl")
# Check for dnaml in the current directory
dnamlfile = os.path.abspath(__file__).replace("Dnaml.pyc", "dnaml") if ".pyc" in os.path.abspath(__file__) \
else os.path.abspath(__file__).replace("Dnaml.py", "dnaml")
if not os.path.exists(dnamlfile):
dnamlfile = "dnaml" # must be on the path somewhere
with open(wdir + "/" + "dnaml.txt", "w") as o, open(ctlfile, "r") as i:
subprocess.call(dnamlfile, cwd=wdir, stdin = i, stdout=o)
if not os.path.isfile(wdir + "/" + "outfile"):
rep("No output returned by dnaml: please check the logs for the issue.")
return None
if os.path.isfile(wdir + "/" + "outfile.txt"):
os.remove(wdir + "/" + "outfile.txt")
os.rename(wdir + "/" + "outfile", wdir + "/" + "outfile.txt")
intseqs = self.__parse_outfile(wdir + "/" + "outfile.txt")
if not intseqs:
rep("Unexpected output returned by dnaml: please check the logs for the issue.")
return None
# Custom sort function to put the root record first, then others supplied by the user, then intermediate nodes
def key_ids(rec):
if rec.id == "N%09d" % 1:
return 'a__' + rec.id
elif 'node_' in rec.id:
return 'z__' + "%04d" % (int)(rec.id.split("_")[1])
else:
return 'l__' + rec.id
labelled_tree = Phylo.read(wdir + "/" + "outtree", "newick")
intseqs.seek(0)
int_seqs = Alignment(file_name=intseqs, format="fasta")
int_seqs.sort(key=key_ids)
intseqs.seek(0)
int_aas = Alignment()
int_aas.read_nt(intseqs, "fasta")
int_aas.sort(key=key_ids)
int_aas.set_position_numbers(position_numbers = seqpattern)
# Put back the original names in all our collections
for seq in int_seqs:
if seq.id in inv_dict:
seq.id = inv_dict[seq.id]
seq.name = ""
seq.description = ""
for seq in int_aas:
if seq.id in inv_dict:
seq.id = inv_dict[seq.id]
seq.name = ""
seq.description = ""
nodeid = 1
for clade in labelled_tree.find_clades(order="preorder"):
if clade.name is None:
clade.name = "node_%d" % nodeid # This relies on our traversal using the same order as dnaml
nodeid += 1
else:
if clade.name in inv_dict:
clade.name = inv_dict[clade.name]
# Now we need to map the labelling of the nodes in the labelled tree to the nodes in the original tree
self.__map_names(ptree, labelled_tree)
Phylo.write(ptree, wdir + "/" + tag + "intermediates_treefile.new", "newick", plain=False)
cladenames = []
new_int_aas = Alignment()
for clade in ptree.find_clades():
if clade.name is not None:
cladenames.append(clade.name)
for rec in int_aas:
if rec.id in cladenames:
new_int_aas.append(rec)
int_aas = new_int_aas
int_aas.set_position_numbers(position_numbers = seqpattern)
copy_tree = copy.deepcopy(ptree)
# Calculate AA diffs between each node and its parent, and write to the tree
labels = {}
def diffkey(diff):
return int_aas.index_of(diff[1:-1])
for clade in ptree.find_clades():
if clade.name is not None:
parent = self.__get_parent(ptree, clade)
if parent is None:
path = ptree.get_path(clade)
if len(path) == 1 and clade.name != first:
fname = inv_dict[first]
parent = ptree.find_clades(name = re.escape(fname)).next()
if parent is not None and parent.name is not None:
diffs = list(int_aas.seqdiff(clade.name, parent.name))
diffs.sort(key = diffkey)
diffs = "+".join(diffs)
if "node_" in clade.name:
labels[clade.name] = diffs
else:
labels[clade.name] = str(clade.name) + " " + diffs
for clade in ptree.find_clades():
if clade.name is not None and clade.name in labels:
clade.name = labels[clade.name]
Phylo.write(ptree, wdir + "/" + tag + "annotated_treefile.new", "newick", plain=False)
# Now write a tree with summary CDR/FR total changes
if cdrfile is not None:
ptree = copy.deepcopy(copy_tree)
acdr = AnalyseCDR(int_aas, file_name=cdrfile)
labels = {}
for clade in ptree.find_clades():
if clade.name is not None:
parent = self.__get_parent(ptree, clade)
if parent is None:
path = ptree.get_path(clade)
if len(path) == 1 and clade.name != first:
fname = inv_dict[first]
parent = ptree.find_clades(name = re.escape(fname)).next()
if parent is not None and parent.name is not None:
diffs = acdr.category_diff(clade.name, parent.name)
if "node_" in clade.name:
labels[clade.name] = diffs
else:
labels[clade.name] = str(clade.name) + " " + diffs
for clade in ptree.find_clades():
if clade.name is not None and clade.name in labels:
clade.name = labels[clade.name]
Phylo.write(ptree, wdir + "/" + tag + "annotated_treefile_sum.new", "newick", plain=False)
# And write a tree with counts of total AA changes
ptree = copy.deepcopy(copy_tree)
labels = {}
for clade in ptree.find_clades():
if clade.name is not None:
parent = self.__get_parent(ptree, clade)
if parent is None:
path = ptree.get_path(clade)
if len(path) == 1 and clade.name != first:
fname = inv_dict[first]
parent = ptree.find_clades(name = re.escape(fname)).next()
if parent is not None and parent.name is not None:
diffs = list(int_aas.seqdiff(clade.name, parent.name))
if "node_" in clade.name:
labels[clade.name] = str(len(diffs)) if len(diffs) > 0 else ""
else:
labels[clade.name] = str(clade.name) + (" " + str(len(diffs)) if len(diffs) > 0 else "")
for clade in ptree.find_clades():
if clade.name is not None and clade.name in labels:
clade.name = labels[clade.name]
Phylo.write(ptree, wdir + "/" + tag + "annotated_treefile_tot.new", "newick", plain=False)
f = open(wdir + "/" + tag + "aa_alignment.txt", "w")
f.write(int_aas.report(100))
f.close()
f = open(wdir + "/" + tag + "nt_alignment.txt", "w")
f.write(int_seqs.report(100))
f.close()
for rec in int_aas:
rec.description = ""
AlignIO.write(int_aas, wdir + "/" + tag + "aa_alignment.fa", "fasta")
AlignIO.write(int_seqs, wdir + "/" + tag + "nt_alignment.fa", "fasta")
return int_aas
def __parse_outfile(self, filename):
"""Internal method to parse the dnaml output file created after ancestral reconstruction."""
#Fish out the tree with node labels, and the ancestral sequences
seqs = {}
with open(filename, "r") as f:
for line in f:
if "Reconstructed sequence" in line or not line:
break
if not line:
return
for line in f:
if len(line) > 10:
id = line[:11].replace(" ", "")
if 'N' not in id:
id = "node_" + id
seq = line[11:].strip().replace(" ", "")
seqs[id] = seqs.get(id, "") + seq
intseqs = StringIO()
for id,seq in seqs.iteritems():
intseqs.write(">%s\n" % id)
intseqs.write("%s\n" % seq)
return intseqs
def __get_parent(self, tree, child_clade):
"""Internal method to find the parent of a clade"""
node_path = tree.get_path(child_clade)
if len(node_path) > 1:
return node_path[-2]
else:
return None
def __map_names(self, ptree, reftree):
"""Map the names of intermediate nodes across from reftree to ptree"""
for clade in ptree.find_clades(order = 'postorder'):
if clade.name is None:
childname = clade.clades[0].name
if childname is not None:
refchild = reftree.find_clades(name=re.escape(childname))
refp = self.__get_parent(reftree, refchild.next())
if refp is not None:
clade.name = refp.name
elif clade != ptree.clade:
clade.name = reftree.root.name
| [
"StringIO.StringIO",
"os.path.exists",
"re.escape",
"os.rename",
"os.path.isfile",
"os.remove",
"Bio.Phylo.write",
"AnalyseCDR.AnalyseCDR",
"subprocess.call",
"copy.deepcopy",
"os.path.abspath",
"Bio.Phylo.read",
"Alignment.Alignment",
"Bio.AlignIO.write"
] | [((3033, 3053), 'copy.deepcopy', 'copy.deepcopy', (['ptree'], {}), '(ptree)\n', (3046, 3053), False, 'import copy\n'), ((4240, 4291), 'Bio.Phylo.write', 'Phylo.write', (['qtree', "(wdir + '/' + 'intree')", '"""newick"""'], {}), "(qtree, wdir + '/' + 'intree', 'newick')\n", (4251, 4291), False, 'from Bio import Phylo\n'), ((4304, 4342), 'os.path.exists', 'os.path.exists', (["(wdir + '/' + 'outfile')"], {}), "(wdir + '/' + 'outfile')\n", (4318, 4342), False, 'import os\n'), ((4401, 4439), 'os.path.exists', 'os.path.exists', (["(wdir + '/' + 'outtree')"], {}), "(wdir + '/' + 'outtree')\n", (4415, 4439), False, 'import os\n'), ((5459, 5501), 'os.path.isfile', 'os.path.isfile', (["(wdir + '/' + 'outfile.txt')"], {}), "(wdir + '/' + 'outfile.txt')\n", (5473, 5501), False, 'import os\n'), ((5561, 5622), 'os.rename', 'os.rename', (["(wdir + '/' + 'outfile')", "(wdir + '/' + 'outfile.txt')"], {}), "(wdir + '/' + 'outfile', wdir + '/' + 'outfile.txt')\n", (5570, 5622), False, 'import os\n'), ((6240, 6284), 'Bio.Phylo.read', 'Phylo.read', (["(wdir + '/' + 'outtree')", '"""newick"""'], {}), "(wdir + '/' + 'outtree', 'newick')\n", (6250, 6284), False, 'from Bio import Phylo\n'), ((6328, 6372), 'Alignment.Alignment', 'Alignment', ([], {'file_name': 'intseqs', 'format': '"""fasta"""'}), "(file_name=intseqs, format='fasta')\n", (6337, 6372), False, 'from Alignment import Alignment\n'), ((6450, 6461), 'Alignment.Alignment', 'Alignment', ([], {}), '()\n', (6459, 6461), False, 'from Alignment import Alignment\n'), ((7551, 7645), 'Bio.Phylo.write', 'Phylo.write', (['ptree', "(wdir + '/' + tag + 'intermediates_treefile.new')", '"""newick"""'], {'plain': '(False)'}), "(ptree, wdir + '/' + tag + 'intermediates_treefile.new',\n 'newick', plain=False)\n", (7562, 7645), False, 'from Bio import Phylo\n'), ((7689, 7700), 'Alignment.Alignment', 'Alignment', ([], {}), '()\n', (7698, 7700), False, 'from Alignment import Alignment\n'), ((8055, 8075), 'copy.deepcopy', 'copy.deepcopy', (['ptree'], {}), '(ptree)\n', (8068, 8075), False, 'import copy\n'), ((9271, 9361), 'Bio.Phylo.write', 'Phylo.write', (['ptree', "(wdir + '/' + tag + 'annotated_treefile.new')", '"""newick"""'], {'plain': '(False)'}), "(ptree, wdir + '/' + tag + 'annotated_treefile.new', 'newick',\n plain=False)\n", (9282, 9361), False, 'from Bio import Phylo\n'), ((10760, 10784), 'copy.deepcopy', 'copy.deepcopy', (['copy_tree'], {}), '(copy_tree)\n', (10773, 10784), False, 'import copy\n'), ((11802, 11896), 'Bio.Phylo.write', 'Phylo.write', (['ptree', "(wdir + '/' + tag + 'annotated_treefile_tot.new')", '"""newick"""'], {'plain': '(False)'}), "(ptree, wdir + '/' + tag + 'annotated_treefile_tot.new',\n 'newick', plain=False)\n", (11813, 11896), False, 'from Bio import Phylo\n'), ((12207, 12276), 'Bio.AlignIO.write', 'AlignIO.write', (['int_aas', "(wdir + '/' + tag + 'aa_alignment.fa')", '"""fasta"""'], {}), "(int_aas, wdir + '/' + tag + 'aa_alignment.fa', 'fasta')\n", (12220, 12276), False, 'from Bio import AlignIO, SeqIO\n'), ((12285, 12355), 'Bio.AlignIO.write', 'AlignIO.write', (['int_seqs', "(wdir + '/' + tag + 'nt_alignment.fa')", '"""fasta"""'], {}), "(int_seqs, wdir + '/' + tag + 'nt_alignment.fa', 'fasta')\n", (12298, 12355), False, 'from Bio import AlignIO, SeqIO\n'), ((13150, 13160), 'StringIO.StringIO', 'StringIO', ([], {}), '()\n', (13158, 13160), False, 'from StringIO import StringIO\n'), ((3684, 3760), 'Bio.Phylo.write', 'Phylo.write', (['ptree', "(wdir + '/' + 'input_treefile.new')", '"""newick"""'], {'plain': '(False)'}), "(ptree, wdir + '/' + 'input_treefile.new', 'newick', plain=False)\n", (3695, 3760), False, 'from Bio import Phylo\n'), ((4356, 4389), 'os.remove', 'os.remove', (["(wdir + '/' + 'outfile')"], {}), "(wdir + '/' + 'outfile')\n", (4365, 4389), False, 'import os\n'), ((4453, 4486), 'os.remove', 'os.remove', (["(wdir + '/' + 'outtree')"], {}), "(wdir + '/' + 'outtree')\n", (4462, 4486), False, 'import os\n'), ((5032, 5057), 'os.path.exists', 'os.path.exists', (['dnamlfile'], {}), '(dnamlfile)\n', (5046, 5057), False, 'import os\n'), ((5224, 5279), 'subprocess.call', 'subprocess.call', (['dnamlfile'], {'cwd': 'wdir', 'stdin': 'i', 'stdout': 'o'}), '(dnamlfile, cwd=wdir, stdin=i, stdout=o)\n', (5239, 5279), False, 'import subprocess\n'), ((5298, 5336), 'os.path.isfile', 'os.path.isfile', (["(wdir + '/' + 'outfile')"], {}), "(wdir + '/' + 'outfile')\n", (5312, 5336), False, 'import os\n'), ((5515, 5552), 'os.remove', 'os.remove', (["(wdir + '/' + 'outfile.txt')"], {}), "(wdir + '/' + 'outfile.txt')\n", (5524, 5552), False, 'import os\n'), ((9473, 9497), 'copy.deepcopy', 'copy.deepcopy', (['copy_tree'], {}), '(copy_tree)\n', (9486, 9497), False, 'import copy\n'), ((9517, 9555), 'AnalyseCDR.AnalyseCDR', 'AnalyseCDR', (['int_aas'], {'file_name': 'cdrfile'}), '(int_aas, file_name=cdrfile)\n', (9527, 9555), False, 'from AnalyseCDR import AnalyseCDR\n'), ((10592, 10686), 'Bio.Phylo.write', 'Phylo.write', (['ptree', "(wdir + '/' + tag + 'annotated_treefile_sum.new')", '"""newick"""'], {'plain': '(False)'}), "(ptree, wdir + '/' + tag + 'annotated_treefile_sum.new',\n 'newick', plain=False)\n", (10603, 10686), False, 'from Bio import Phylo\n'), ((4656, 4681), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4671, 4681), False, 'import os\n'), ((4908, 4933), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4923, 4933), False, 'import os\n'), ((4593, 4618), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4608, 4618), False, 'import os\n'), ((4701, 4726), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4716, 4726), False, 'import os\n'), ((4839, 4864), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4854, 4864), False, 'import os\n'), ((4953, 4978), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4968, 4978), False, 'import os\n'), ((3643, 3669), 're.escape', 're.escape', (['inv_dict[first]'], {}), '(inv_dict[first])\n', (3652, 3669), False, 'import re\n'), ((13929, 13949), 're.escape', 're.escape', (['childname'], {}), '(childname)\n', (13938, 13949), False, 'import re\n'), ((3359, 3375), 're.escape', 're.escape', (['first'], {}), '(first)\n', (3368, 3375), False, 'import re\n'), ((8651, 8667), 're.escape', 're.escape', (['fname'], {}), '(fname)\n', (8660, 8667), False, 'import re\n'), ((11198, 11214), 're.escape', 're.escape', (['fname'], {}), '(fname)\n', (11207, 11214), False, 'import re\n'), ((10013, 10029), 're.escape', 're.escape', (['fname'], {}), '(fname)\n', (10022, 10029), False, 'import re\n')] |
from traits.api import (HasPrivateTraits, Instance, List, Dict)
from .player import Player
from .board import GameBoard
from .tech_board import TechBoard
from gaia_project.faction_board.player_panel import PlayerPanel
from .layout import Layout
from .constants import BASIC_4P_SETUP
import pygame
import sys
class CommunicationLayer(HasPrivateTraits):
pass
class LocalCommunicationLayer(CommunicationLayer):
players = List(Instance(Player))
board = Instance(GameBoard)
tech_board = Instance(TechBoard)
player_panels = Dict(Instance(Player), Instance(PlayerPanel))
layout = Instance(Layout)
def __init__(self, players=None, cfg=BASIC_4P_SETUP, *args, **kwargs):
super().__init__(*args, **kwargs)
if players is not None:
self.players = players
else:
self.players = [Player('<NAME>', 'Freddy'),
Player('Xenos', 'Jebediah'),
Player('Taklons', 'Vivian')]
self.layout = Layout(self.players, cfg)
self.board = self.layout.board
self.tech_board = self.layout.tech_board
pp_w, pp_h = self.layout.player_panel_coords()
self.player_panels = {
player : (
PlayerPanel(pp_w, pp_h, player)
if player is not self.players[0] else
self.layout.player_panel) for player in self.players}
pygame.init()
pygame.event.set_allowed(None)
pygame.event.set_allowed((pygame.QUIT, pygame.MOUSEBUTTONUP,
pygame.VIDEORESIZE))
def make_move(self, player, game_state):
# set the layout to have the current player panel showing
if player.intelligence == 'human':
self.layout.player_panel = self.player_panels[player]
self.layout.player_panel.hide_choice()
self.update_gfx()
move = self.process_events()
return move
elif player.intelligence == 'automa':
move = player.automa.make_move(player, game_state)
elif player.intelligence == 'ai':
raise NotImplementedError
else:
raise NotImplemented
def make_choice(self, player, choice, move):
self.layout.player_panel = self.player_panels[player]
self.layout.player_panel.show_choice(choice, move.description)
self.update_gfx()
choice = self.process_events()
print('gottud chois')
return choice
def inform_illegal_choice(self, player, explanation):
self.layout.player_panel = self.player_panels[player]
self.layout.player_panel.display_error(explanation)
self.update_gfx()
self.process_events()
def process_events(self):
while True:
#we are now accepting mouse events
pygame.event.set_allowed(pygame.MOUSEBUTTONUP)
for event in pygame.event.get():
#this event does not need to be executed in order
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
#this event does not need to be executed in order
elif event.type == pygame.VIDEORESIZE:
self.layout.resize(event.w, event.h)
elif event.type == pygame.MOUSEBUTTONUP:
#disallow mouse events until this is handled
pygame.event.set_blocked(pygame.MOUSEBUTTONUP)
origin_surf = self.layout.determine_origin(event.pos)
if origin_surf is None:
continue
event = self.layout.pass_event(origin_surf, event.pos)
if event is not None:
return event
def add_building(self, player, coords, building_type, lantid_share=False):
x, y = coords
self.board.add_building(x, y, player.color, building_type,
lantid_share=lantid_share)
def add_orbital(self, player, coords, orbital_type):
x, y = coords
self.board.add_orbital(x, y, player.color, orbital_type)
def techup(self, player, tech_track):
self.tech_board.techup(player.color, tech_track)
def update_available_buildings(self, player):
pass
def update_bonus_tiles(self, tiles):
for player in self.player_panels:
panel = self.player_panels[player]
panel.update_bonus_tiles(tiles)
def update_turn_order(self, next_order):
pass
def update_advanced_tech_tiles(self, tiles):
pass
def update_terraforming_fed(self, fed):
pass
def update_available_feds(self, feds):
pass
def update_available_power_actions(self, power_actions):
self.tech_board.update_power_actions(power_actions)
def update_available_special_actions(self, player, spec_actions):
panel = self.player_panels[player]
panel.update_special_actions( spec_actions[player] )
def update_misc_info(self, score):
pass
def update_gfx(self):
self.layout.paint()
| [
"traits.api.Instance",
"pygame.init",
"pygame.event.set_allowed",
"pygame.event.get",
"pygame.quit",
"pygame.event.set_blocked",
"sys.exit",
"gaia_project.faction_board.player_panel.PlayerPanel"
] | [((464, 483), 'traits.api.Instance', 'Instance', (['GameBoard'], {}), '(GameBoard)\n', (472, 483), False, 'from traits.api import HasPrivateTraits, Instance, List, Dict\n'), ((499, 518), 'traits.api.Instance', 'Instance', (['TechBoard'], {}), '(TechBoard)\n', (507, 518), False, 'from traits.api import HasPrivateTraits, Instance, List, Dict\n'), ((595, 611), 'traits.api.Instance', 'Instance', (['Layout'], {}), '(Layout)\n', (603, 611), False, 'from traits.api import HasPrivateTraits, Instance, List, Dict\n'), ((435, 451), 'traits.api.Instance', 'Instance', (['Player'], {}), '(Player)\n', (443, 451), False, 'from traits.api import HasPrivateTraits, Instance, List, Dict\n'), ((542, 558), 'traits.api.Instance', 'Instance', (['Player'], {}), '(Player)\n', (550, 558), False, 'from traits.api import HasPrivateTraits, Instance, List, Dict\n'), ((560, 581), 'traits.api.Instance', 'Instance', (['PlayerPanel'], {}), '(PlayerPanel)\n', (568, 581), False, 'from traits.api import HasPrivateTraits, Instance, List, Dict\n'), ((1340, 1353), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1351, 1353), False, 'import pygame\n'), ((1359, 1389), 'pygame.event.set_allowed', 'pygame.event.set_allowed', (['None'], {}), '(None)\n', (1383, 1389), False, 'import pygame\n'), ((1394, 1480), 'pygame.event.set_allowed', 'pygame.event.set_allowed', (['(pygame.QUIT, pygame.MOUSEBUTTONUP, pygame.VIDEORESIZE)'], {}), '((pygame.QUIT, pygame.MOUSEBUTTONUP, pygame.\n VIDEORESIZE))\n', (1418, 1480), False, 'import pygame\n'), ((2646, 2692), 'pygame.event.set_allowed', 'pygame.event.set_allowed', (['pygame.MOUSEBUTTONUP'], {}), '(pygame.MOUSEBUTTONUP)\n', (2670, 2692), False, 'import pygame\n'), ((2713, 2731), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2729, 2731), False, 'import pygame\n'), ((1181, 1212), 'gaia_project.faction_board.player_panel.PlayerPanel', 'PlayerPanel', (['pp_w', 'pp_h', 'player'], {}), '(pp_w, pp_h, player)\n', (1192, 1212), False, 'from gaia_project.faction_board.player_panel import PlayerPanel\n'), ((2842, 2855), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (2853, 2855), False, 'import pygame\n'), ((2866, 2876), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2874, 2876), False, 'import sys\n'), ((3153, 3199), 'pygame.event.set_blocked', 'pygame.event.set_blocked', (['pygame.MOUSEBUTTONUP'], {}), '(pygame.MOUSEBUTTONUP)\n', (3177, 3199), False, 'import pygame\n')] |
import base64
import json
import activity
import os
import requests
import boto.sqs
from boto.sqs.message import Message
from provider import eif
"""
ConvertJATS.py activity
"""
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0, parentdir)
class activity_ApprovePublication(activity.activity):
def __init__(self, settings, logger, conn=None, token=None, activity_task=None):
activity.activity.__init__(self, settings, logger, conn, token, activity_task)
self.name = "ApprovePublication"
self.version = "1"
self.default_task_heartbeat_timeout = 30
self.default_task_schedule_to_close_timeout = 60 * 5
self.default_task_schedule_to_start_timeout = 30
self.default_task_start_to_close_timeout = 60 * 5
self.description = "Approve a previously submitted article"
self.rules = []
self.info = None
self.logger = logger
# TODO : better exception handling
def do_activity(self, data=None):
"""
Do the work
"""
if self.logger:
self.logger.info('data: %s' % json.dumps(data, sort_keys=True, indent=4))
article_id = data['article_id']
version = data['version']
run = data['run']
try:
self.emit_monitor_event(self.settings, article_id, version, run,
"Approve Publication", "start",
"Starting approval of article " + article_id)
publication_data = data['publication_data']
article_version_id = str(article_id) + '.' + str(version)
destination = self.settings.drupal_approve_endpoint
destination = destination + article_version_id + '.json'
headers = {'content-type': 'application/json'}
auth = None
if self.settings.drupal_update_user and self.settings.drupal_update_user != '':
auth = requests.auth.HTTPBasicAuth(self.settings.drupal_update_user,
self.settings.drupal_update_pass)
r = requests.put(destination, data="{ \"publish\": \"1\" }", headers=headers, auth=auth)
self.logger.info("PUT response was %s, retrying" % r.status_code)
if r.status_code == 500:
return activity.activity.ACTIVITY_TEMPORARY_FAILURE
if r.status_code == 200:
self.set_monitor_property(self.settings, article_id, 'publication-status',
'published', "text", version=version)
message = base64.decodestring(publication_data)
message = self.modify_update_date(message, r)
sqs_conn = boto.sqs.connect_to_region(
self.settings.sqs_region,
aws_access_key_id=self.settings.aws_access_key_id,
aws_secret_access_key=self.settings.aws_secret_access_key)
out_queue = sqs_conn.get_queue(self.settings.workflow_starter_queue)
m = Message()
m.set_body(message)
out_queue.write(m)
else:
self.emit_monitor_event(self.settings, article_id, version, run,
"Approve Publication", "error",
"Website ingest returned an error code: " +
str(r.status_code))
self.logger.error("Body:" + r.text)
return False
except Exception as e:
self.logger.exception("Exception when submitting article EIF")
self.emit_monitor_event(self.settings, article_id, version, run,
"Approve Publication", "error",
"Error approving article publication for " + article_id +
" message:" + str(e.message))
return False
self.emit_monitor_event(self.settings, article_id, version, run,
"Approve Publication", "end",
"Finished approving article" + article_id +
" status was " + str(r.status_code))
return True
def modify_update_date(self, message, response):
update_date = self.extract_update_date(
self.workflow_data(message),
response.json())
if update_date:
message_json = json.loads(message)
if ("workflow_data" in message_json and
"update_date" in message_json["workflow_data"]):
message_json["workflow_data"]["update_date"] = update_date
message = json.dumps(message_json)
return message
def workflow_data(self, message):
message_json = json.loads(message)
if "workflow_data" in message_json:
return message_json["workflow_data"]
return {}
def extract_update_date(self, passthrough_json, response_json):
return eif.extract_update_date(passthrough_json, response_json)
| [
"json.loads",
"activity.activity.__init__",
"requests.auth.HTTPBasicAuth",
"provider.eif.extract_update_date",
"json.dumps",
"os.sys.path.insert",
"boto.sqs.message.Message",
"requests.put",
"os.path.abspath",
"base64.decodestring"
] | [((251, 283), 'os.sys.path.insert', 'os.sys.path.insert', (['(0)', 'parentdir'], {}), '(0, parentdir)\n', (269, 283), False, 'import os\n'), ((223, 248), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (238, 248), False, 'import os\n'), ((433, 511), 'activity.activity.__init__', 'activity.activity.__init__', (['self', 'settings', 'logger', 'conn', 'token', 'activity_task'], {}), '(self, settings, logger, conn, token, activity_task)\n', (459, 511), False, 'import activity\n'), ((4907, 4926), 'json.loads', 'json.loads', (['message'], {}), '(message)\n', (4917, 4926), False, 'import json\n'), ((5122, 5178), 'provider.eif.extract_update_date', 'eif.extract_update_date', (['passthrough_json', 'response_json'], {}), '(passthrough_json, response_json)\n', (5145, 5178), False, 'from provider import eif\n'), ((2155, 2240), 'requests.put', 'requests.put', (['destination'], {'data': '"""{ "publish": "1" }"""', 'headers': 'headers', 'auth': 'auth'}), '(destination, data=\'{ "publish": "1" }\', headers=headers, auth=auth\n )\n', (2167, 2240), False, 'import requests\n'), ((4559, 4578), 'json.loads', 'json.loads', (['message'], {}), '(message)\n', (4569, 4578), False, 'import json\n'), ((1992, 2092), 'requests.auth.HTTPBasicAuth', 'requests.auth.HTTPBasicAuth', (['self.settings.drupal_update_user', 'self.settings.drupal_update_pass'], {}), '(self.settings.drupal_update_user, self.settings\n .drupal_update_pass)\n', (2019, 2092), False, 'import requests\n'), ((2659, 2696), 'base64.decodestring', 'base64.decodestring', (['publication_data'], {}), '(publication_data)\n', (2678, 2696), False, 'import base64\n'), ((3118, 3127), 'boto.sqs.message.Message', 'Message', ([], {}), '()\n', (3125, 3127), False, 'from boto.sqs.message import Message\n'), ((4797, 4821), 'json.dumps', 'json.dumps', (['message_json'], {}), '(message_json)\n', (4807, 4821), False, 'import json\n'), ((1144, 1186), 'json.dumps', 'json.dumps', (['data'], {'sort_keys': '(True)', 'indent': '(4)'}), '(data, sort_keys=True, indent=4)\n', (1154, 1186), False, 'import json\n')] |
"""
Spam comments to be classified by the relational model.
"""
import os
import numpy as np
class Comments:
def __init__(self, config_obj, util_obj):
self.config_obj = config_obj
self.util_obj = util_obj
# public
def build(self, df, dset, data_f=None, tuffy=False, iden='0'):
"""Writes predicate info to the designated data folder.
df: comments dataframe.
dset: dataset (e.g. val or test).
data_f: data folder to save predicate files.
tuffy: boolean indicating if tuffy is the engine being used."""
if data_f is None:
data_f = self.define_file_folders()
unique_df = self.drop_duplicate_comments(df)
if tuffy:
self.write_tuffy_predicates(unique_df, dset, data_f)
else:
self.write_psl_predicates(unique_df, dset, data_f, iden=iden)
# private
def define_file_folders(self):
rel_dir = self.config_obj.rel_dir
domain = self.config_obj.domain
data_f = rel_dir + 'data/' + domain + '/'
if not os.path.exists(data_f):
os.makedirs(data_f)
return data_f
def drop_duplicate_comments(self, df):
temp_df = df.filter(['com_id', 'ind_pred', 'label'], axis=1)
unique_df = temp_df.drop_duplicates()
return unique_df
def write_psl_predicates(self, df, dset, data_f, iden='0'):
df.to_csv(data_f + dset + '_no_label_' + iden + '.tsv',
columns=['com_id'], sep='\t', header=None, index=None)
df.to_csv(data_f + dset + '_' + iden + '.tsv',
columns=['com_id', 'label'], sep='\t', header=None,
index=None)
df.to_csv(data_f + dset + '_pred_' + iden + '.tsv',
columns=['com_id', 'ind_pred'], sep='\t', header=None,
index=None)
def write_tuffy_predicates(self, df, dset, data_f):
ev = open(data_f + dset + '_evidence.txt', 'w')
q = open(data_f + dset + '_query.txt', 'w')
for index, row in df.iterrows():
pred = row.ind_pred
com_id = str(int(row.com_id))
wgt = str(np.log(self.util_obj.div0(pred, (1 - pred))))
ev.write('Indpred(' + com_id + ', ' + wgt + ')\n')
q.write('Spam(' + com_id + ')\n')
ev.close()
q.close()
| [
"os.path.exists",
"os.makedirs"
] | [((1070, 1092), 'os.path.exists', 'os.path.exists', (['data_f'], {}), '(data_f)\n', (1084, 1092), False, 'import os\n'), ((1106, 1125), 'os.makedirs', 'os.makedirs', (['data_f'], {}), '(data_f)\n', (1117, 1125), False, 'import os\n')] |
import os
import logging
import json
from nnattack.variables import auto_var, get_file_name
from params import (
compare_attacks,
compare_defense,
#compare_nns,
nn_k1_robustness,
nn_k3_robustness,
nn_k1_approx_robustness_figs,
dt_robustness_figs,
rf_robustness_figs,
nn_k1_robustness_figs,
nn_k3_robustness_figs,
dt_robustness,
rf_robustness,
mlp_ap_robustness,
mlp_at_robustness,
lr_ap_robustness,
lr_at_robustness,
nn1_def,
nn3_def,
dt_def,
rf_def,
lr_def,
mlp_def,
)
from main import eps_accuracy
logging.basicConfig(level=logging.DEBUG)
DEBUG = True if os.environ.get('DEBUG', False) else False
def main():
experiments = [
compare_attacks(),
compare_defense(),
#nn_k1_robustness_figs(),
#nn_k3_robustness_figs(),
#rf_robustness_figs(),
#dt_robustness_figs(),
dt_robustness(),
rf_robustness(),
nn_k3_robustness(),
nn_k1_robustness(),
#mlp_ap_robustness(),
#mlp_at_robustness(),
#lr_ap_robustness(),
#lr_at_robustness(),
#nn1_def(),
#nn3_def(),
#dt_def(),
#rf_def(),
#lr_def(),
#mlp_def(),
]
grid_params = []
for exp in experiments:
exp_fn, _, grid_param, run_param = exp()
if isinstance(grid_param, list):
grid_params.extend(grid_param)
else:
grid_params.append(grid_param)
if DEBUG:
run_param['n_jobs'] = 1
run_param['allow_failure'] = False
else:
run_param['n_jobs'] = 4
run_param['allow_failure'] = True
auto_var.run_grid_params(exp_fn, grid_params, **run_param)
#auto_var.run_grid_params(delete_file, grid_params, n_jobs=1,
# with_hook=False, allow_failure=False)
#auto_var.run_grid_params(celery_run, grid_params, n_jobs=1,
# allow_failure=False)
#auto_var.run_grid_params(temp_fix, grid_params, n_jobs=6,
# allow_failure=False, with_hook=False)
def delete_file(auto_var):
os.unlink(get_file_name(auto_var) + '.json')
def celery_run(auto_var):
run_exp.delay(auto_var.var_value)
from main import set_random_seed
import numpy as np
from sklearn.preprocessing import OneHotEncoder, MinMaxScaler
def temp_fix(auto_var):
file_name = get_file_name(auto_var)
print(file_name)
if os.path.exists("%s.json" % file_name):
with open("%s.json" % file_name, "r") as f:
ret = json.load(f)
if "tst_score" in ret:
return
else:
return
random_state = set_random_seed(auto_var)
ord = auto_var.get_var("ord")
X, y, eps_list = auto_var.get_var("dataset")
idxs = np.arange(len(X))
random_state.shuffle(idxs)
trnX, tstX, trny, tsty = X[idxs[:-200]], X[idxs[-200:]], y[idxs[:-200]], y[idxs[-200:]]
scaler = MinMaxScaler()
trnX = scaler.fit_transform(trnX)
tstX = scaler.transform(tstX)
lbl_enc = OneHotEncoder(categories=[np.sort(np.unique(y))], sparse=False)
#lbl_enc = OneHotEncoder(sparse=False)
lbl_enc.fit(trny.reshape(-1, 1))
auto_var.set_intermidiate_variable("lbl_enc", lbl_enc)
results = []
auto_var.set_intermidiate_variable("trnX", trnX)
auto_var.set_intermidiate_variable("trny", trny)
model_name = auto_var.get_variable_value("model")
attack_name = auto_var.get_variable_value("attack")
if 'adv_rf' in model_name:
pre_model = auto_var.get_var_with_argument('model', model_name[4:])
pre_model.fit(trnX, trny)
if 'blackbox' in attack_name:
auto_var.set_intermidiate_variable("model", pre_model)
elif 'adv_nn' in model_name and 'blackbox' in attack_name:
pre_model = auto_var.get_var_with_argument('model', model_name[4:])
pre_model.fit(trnX, trny)
auto_var.set_intermidiate_variable("model", pre_model)
model = auto_var.get_var("model")
auto_var.set_intermidiate_variable("model", model)
model.fit(trnX, trny)
pred = model.predict(tstX)
ori_tstX, ori_tsty = tstX, tsty # len = 200
idxs = np.where(pred == tsty)[0]
random_state.shuffle(idxs)
augX = None
if ('adv' in model_name) or ('advPruning' in model_name) or ('robustv2' in model_name):
assert hasattr(model, 'augX')
auto_var.set_intermidiate_variable("trnX", model.augX)
auto_var.set_intermidiate_variable("trny", model.augy)
augX, augy = model.augX, model.augy
ret['tst_score'] = (model.predict(ori_tstX) == ori_tsty).mean()
with open("%s.json" % file_name, "w") as f:
json.dump(ret, f)
if __name__ == "__main__":
main()
| [
"nnattack.variables.auto_var.set_intermidiate_variable",
"nnattack.variables.auto_var.get_var",
"nnattack.variables.auto_var.run_grid_params",
"os.path.exists",
"numpy.where",
"params.dt_robustness",
"sklearn.preprocessing.MinMaxScaler",
"nnattack.variables.get_file_name",
"nnattack.variables.auto_v... | [((595, 635), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (614, 635), False, 'import logging\n'), ((653, 683), 'os.environ.get', 'os.environ.get', (['"""DEBUG"""', '(False)'], {}), "('DEBUG', False)\n", (667, 683), False, 'import os\n'), ((1680, 1738), 'nnattack.variables.auto_var.run_grid_params', 'auto_var.run_grid_params', (['exp_fn', 'grid_params'], {}), '(exp_fn, grid_params, **run_param)\n', (1704, 1738), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((2419, 2442), 'nnattack.variables.get_file_name', 'get_file_name', (['auto_var'], {}), '(auto_var)\n', (2432, 2442), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((2471, 2508), 'os.path.exists', 'os.path.exists', (["('%s.json' % file_name)"], {}), "('%s.json' % file_name)\n", (2485, 2508), False, 'import os\n'), ((2688, 2713), 'main.set_random_seed', 'set_random_seed', (['auto_var'], {}), '(auto_var)\n', (2703, 2713), False, 'from main import set_random_seed\n'), ((2724, 2747), 'nnattack.variables.auto_var.get_var', 'auto_var.get_var', (['"""ord"""'], {}), "('ord')\n", (2740, 2747), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((2770, 2797), 'nnattack.variables.auto_var.get_var', 'auto_var.get_var', (['"""dataset"""'], {}), "('dataset')\n", (2786, 2797), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((2964, 2978), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (2976, 2978), False, 'from sklearn.preprocessing import OneHotEncoder, MinMaxScaler\n'), ((3215, 3269), 'nnattack.variables.auto_var.set_intermidiate_variable', 'auto_var.set_intermidiate_variable', (['"""lbl_enc"""', 'lbl_enc'], {}), "('lbl_enc', lbl_enc)\n", (3249, 3269), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((3293, 3341), 'nnattack.variables.auto_var.set_intermidiate_variable', 'auto_var.set_intermidiate_variable', (['"""trnX"""', 'trnX'], {}), "('trnX', trnX)\n", (3327, 3341), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((3346, 3394), 'nnattack.variables.auto_var.set_intermidiate_variable', 'auto_var.set_intermidiate_variable', (['"""trny"""', 'trny'], {}), "('trny', trny)\n", (3380, 3394), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((3413, 3449), 'nnattack.variables.auto_var.get_variable_value', 'auto_var.get_variable_value', (['"""model"""'], {}), "('model')\n", (3440, 3449), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((3468, 3505), 'nnattack.variables.auto_var.get_variable_value', 'auto_var.get_variable_value', (['"""attack"""'], {}), "('attack')\n", (3495, 3505), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((4001, 4026), 'nnattack.variables.auto_var.get_var', 'auto_var.get_var', (['"""model"""'], {}), "('model')\n", (4017, 4026), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((4031, 4081), 'nnattack.variables.auto_var.set_intermidiate_variable', 'auto_var.set_intermidiate_variable', (['"""model"""', 'model'], {}), "('model', model)\n", (4065, 4081), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((736, 753), 'params.compare_attacks', 'compare_attacks', ([], {}), '()\n', (751, 753), False, 'from params import compare_attacks, compare_defense, nn_k1_robustness, nn_k3_robustness, nn_k1_approx_robustness_figs, dt_robustness_figs, rf_robustness_figs, nn_k1_robustness_figs, nn_k3_robustness_figs, dt_robustness, rf_robustness, mlp_ap_robustness, mlp_at_robustness, lr_ap_robustness, lr_at_robustness, nn1_def, nn3_def, dt_def, rf_def, lr_def, mlp_def\n'), ((763, 780), 'params.compare_defense', 'compare_defense', ([], {}), '()\n', (778, 780), False, 'from params import compare_attacks, compare_defense, nn_k1_robustness, nn_k3_robustness, nn_k1_approx_robustness_figs, dt_robustness_figs, rf_robustness_figs, nn_k1_robustness_figs, nn_k3_robustness_figs, dt_robustness, rf_robustness, mlp_ap_robustness, mlp_at_robustness, lr_ap_robustness, lr_at_robustness, nn1_def, nn3_def, dt_def, rf_def, lr_def, mlp_def\n'), ((922, 937), 'params.dt_robustness', 'dt_robustness', ([], {}), '()\n', (935, 937), False, 'from params import compare_attacks, compare_defense, nn_k1_robustness, nn_k3_robustness, nn_k1_approx_robustness_figs, dt_robustness_figs, rf_robustness_figs, nn_k1_robustness_figs, nn_k3_robustness_figs, dt_robustness, rf_robustness, mlp_ap_robustness, mlp_at_robustness, lr_ap_robustness, lr_at_robustness, nn1_def, nn3_def, dt_def, rf_def, lr_def, mlp_def\n'), ((947, 962), 'params.rf_robustness', 'rf_robustness', ([], {}), '()\n', (960, 962), False, 'from params import compare_attacks, compare_defense, nn_k1_robustness, nn_k3_robustness, nn_k1_approx_robustness_figs, dt_robustness_figs, rf_robustness_figs, nn_k1_robustness_figs, nn_k3_robustness_figs, dt_robustness, rf_robustness, mlp_ap_robustness, mlp_at_robustness, lr_ap_robustness, lr_at_robustness, nn1_def, nn3_def, dt_def, rf_def, lr_def, mlp_def\n'), ((972, 990), 'params.nn_k3_robustness', 'nn_k3_robustness', ([], {}), '()\n', (988, 990), False, 'from params import compare_attacks, compare_defense, nn_k1_robustness, nn_k3_robustness, nn_k1_approx_robustness_figs, dt_robustness_figs, rf_robustness_figs, nn_k1_robustness_figs, nn_k3_robustness_figs, dt_robustness, rf_robustness, mlp_ap_robustness, mlp_at_robustness, lr_ap_robustness, lr_at_robustness, nn1_def, nn3_def, dt_def, rf_def, lr_def, mlp_def\n'), ((1000, 1018), 'params.nn_k1_robustness', 'nn_k1_robustness', ([], {}), '()\n', (1016, 1018), False, 'from params import compare_attacks, compare_defense, nn_k1_robustness, nn_k3_robustness, nn_k1_approx_robustness_figs, dt_robustness_figs, rf_robustness_figs, nn_k1_robustness_figs, nn_k3_robustness_figs, dt_robustness, rf_robustness, mlp_ap_robustness, mlp_at_robustness, lr_ap_robustness, lr_at_robustness, nn1_def, nn3_def, dt_def, rf_def, lr_def, mlp_def\n'), ((3557, 3612), 'nnattack.variables.auto_var.get_var_with_argument', 'auto_var.get_var_with_argument', (['"""model"""', 'model_name[4:]'], {}), "('model', model_name[4:])\n", (3587, 3612), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((4199, 4221), 'numpy.where', 'np.where', (['(pred == tsty)'], {}), '(pred == tsty)\n', (4207, 4221), True, 'import numpy as np\n'), ((4411, 4465), 'nnattack.variables.auto_var.set_intermidiate_variable', 'auto_var.set_intermidiate_variable', (['"""trnX"""', 'model.augX'], {}), "('trnX', model.augX)\n", (4445, 4465), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((4474, 4528), 'nnattack.variables.auto_var.set_intermidiate_variable', 'auto_var.set_intermidiate_variable', (['"""trny"""', 'model.augy'], {}), "('trny', model.augy)\n", (4508, 4528), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((4698, 4715), 'json.dump', 'json.dump', (['ret', 'f'], {}), '(ret, f)\n', (4707, 4715), False, 'import json\n'), ((2163, 2186), 'nnattack.variables.get_file_name', 'get_file_name', (['auto_var'], {}), '(auto_var)\n', (2176, 2186), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((2580, 2592), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2589, 2592), False, 'import json\n'), ((3697, 3751), 'nnattack.variables.auto_var.set_intermidiate_variable', 'auto_var.set_intermidiate_variable', (['"""model"""', 'pre_model'], {}), "('model', pre_model)\n", (3731, 3751), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((3835, 3890), 'nnattack.variables.auto_var.get_var_with_argument', 'auto_var.get_var_with_argument', (['"""model"""', 'model_name[4:]'], {}), "('model', model_name[4:])\n", (3865, 3890), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((3933, 3987), 'nnattack.variables.auto_var.set_intermidiate_variable', 'auto_var.set_intermidiate_variable', (['"""model"""', 'pre_model'], {}), "('model', pre_model)\n", (3967, 3987), False, 'from nnattack.variables import auto_var, get_file_name\n'), ((3100, 3112), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (3109, 3112), True, 'import numpy as np\n')] |
import torch
import os
from tqdm import tqdm
import numpy as np
from multiprocessing.pool import Pool
from itertools import islice, cycle
from utils.logging import logger
from utils.misc import ensure_dir
class Vocab(object):
def __init__(self):
self.tok2idx = {}
self.idx2tok = []
self.add('<pad>') # PAD index is 0
self.add('<unk>') # UNK index is 1
self.add('<bos>') # BOS index is 2
self.add('<eos>') # EOS index is 3
def __len__(self):
return len(self.idx2tok)
def add(self, token):
if token not in self.tok2idx:
self.tok2idx[token] = len(self.idx2tok)
self.idx2tok.append(token)
def encode(self, token):
return self.tok2idx.get(token, self.tok2idx['<unk>'])
def decode(self, token_id):
assert token_id < len(self.idx2tok), \
'token id must be less than %d, got %d' % (len(self.idx2tok), token_id)
return self.idx2tok[token_id]
def split_corpus(path, shard_size):
with open(path, "r") as f:
if shard_size <= 0:
yield f.readlines()
else:
while True:
shard = list(islice(f, shard_size))
if not shard:
break
yield shard
def build_vocab(src_file, max_vocab_size=0):
with open(src_file, 'r') as f:
tokens = f.read().split()
freq_dict = {}
for t in tokens:
freq_dict[t] = freq_dict.get(t, 0) + 1
tokens = sorted(
list(freq_dict.items()),
key=lambda x: x[1],
reverse=True
)
vsize = max_vocab_size if max_vocab_size > 0 else len(tokens)
vocab = [t[0] for t in tokens[:vsize]]
ret = Vocab()
for t in vocab:
ret.add(t)
return ret
def _worker(args):
src, tgt, feat_ext, vocab = args
if tgt == '':
return None
try:
return feat_ext(src), tgt, [vocab.encode(x) for x in ('<bos> '+tgt+' <eos>').split()]
except Exception as e:
return None
def build_shards(src_dir, save_dir, src_file, tgt_file, vocab,
shard_size, feat_ext, mode='train', feats=None
):
src_shards = split_corpus(src_file, shard_size)
tgt_shards = split_corpus(tgt_file, shard_size)
ensure_dir(save_dir)
shard_index = 0
for src_shard, tgt_shard in zip(src_shards, tgt_shards):
logger.info('Building %s shard %d' % (mode, shard_index))
audio_paths = [os.path.join(src_dir, p.strip()) for p in src_shard]
assert all([os.path.exists(p) for p in audio_paths]), \
"following audio files not found: %s" % \
' '.join([p.strip() for p in audio_paths if not os.path.exists(p)])
targets = [t.strip() for t in tgt_shard]
src_tgt_pairs = list(zip(audio_paths, targets, cycle([feat_ext]), cycle([vocab])))
with Pool(50) as p:
result = list(tqdm(p.imap(_worker, src_tgt_pairs), total=len(src_tgt_pairs)))
result = [r for r in result if r is not None]
audio_feats, transcriptions, indices = zip(*result)
shard = {
'src': np.asarray(audio_feats),
'tgt': np.asarray(transcriptions),
'indices': np.asarray([np.asarray(x).reshape(-1,1) for x in indices]),
'feats': feats
}
shard_path = os.path.join(save_dir, '%s.%05d.pt' % (mode, shard_index))
logger.info('Saving shard %d to %s' % (shard_index, shard_path))
torch.save(shard, shard_path)
shard_index += 1 | [
"os.path.exists",
"itertools.cycle",
"itertools.islice",
"utils.misc.ensure_dir",
"os.path.join",
"numpy.asarray",
"torch.save",
"multiprocessing.pool.Pool",
"utils.logging.logger.info"
] | [((2338, 2358), 'utils.misc.ensure_dir', 'ensure_dir', (['save_dir'], {}), '(save_dir)\n', (2348, 2358), False, 'from utils.misc import ensure_dir\n'), ((2449, 2506), 'utils.logging.logger.info', 'logger.info', (["('Building %s shard %d' % (mode, shard_index))"], {}), "('Building %s shard %d' % (mode, shard_index))\n", (2460, 2506), False, 'from utils.logging import logger\n'), ((3417, 3475), 'os.path.join', 'os.path.join', (['save_dir', "('%s.%05d.pt' % (mode, shard_index))"], {}), "(save_dir, '%s.%05d.pt' % (mode, shard_index))\n", (3429, 3475), False, 'import os\n'), ((3484, 3548), 'utils.logging.logger.info', 'logger.info', (["('Saving shard %d to %s' % (shard_index, shard_path))"], {}), "('Saving shard %d to %s' % (shard_index, shard_path))\n", (3495, 3548), False, 'from utils.logging import logger\n'), ((3557, 3586), 'torch.save', 'torch.save', (['shard', 'shard_path'], {}), '(shard, shard_path)\n', (3567, 3586), False, 'import torch\n'), ((2936, 2944), 'multiprocessing.pool.Pool', 'Pool', (['(50)'], {}), '(50)\n', (2940, 2944), False, 'from multiprocessing.pool import Pool\n'), ((3201, 3224), 'numpy.asarray', 'np.asarray', (['audio_feats'], {}), '(audio_feats)\n', (3211, 3224), True, 'import numpy as np\n'), ((3246, 3272), 'numpy.asarray', 'np.asarray', (['transcriptions'], {}), '(transcriptions)\n', (3256, 3272), True, 'import numpy as np\n'), ((2603, 2620), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (2617, 2620), False, 'import os\n'), ((2886, 2903), 'itertools.cycle', 'cycle', (['[feat_ext]'], {}), '([feat_ext])\n', (2891, 2903), False, 'from itertools import islice, cycle\n'), ((2905, 2919), 'itertools.cycle', 'cycle', (['[vocab]'], {}), '([vocab])\n', (2910, 2919), False, 'from itertools import islice, cycle\n'), ((1179, 1200), 'itertools.islice', 'islice', (['f', 'shard_size'], {}), '(f, shard_size)\n', (1185, 1200), False, 'from itertools import islice, cycle\n'), ((2761, 2778), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (2775, 2778), False, 'import os\n'), ((3310, 3323), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (3320, 3323), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import reduce
from operator import mul
from compas.geometry import Point
from compas.geometry import Scale
from compas.geometry import Translation
from compas.geometry import Rotation
from compas.geometry import transform_points
from .object import Object
class NetworkObject(Object):
"""Class for representing COMPAS networkes in Rhino.
Attributes
----------
anchor : int
The node of the network that is anchored to the location of the object.
location : :class:`compas.geometry.Point`
The location of the object.
Default is the origin of the world coordinate system.
scale : float
A uniform scaling factor for the object in the scene.
The scale is applied relative to the location of the object in the scene.
rotation : list[float]
The rotation angles around the 3 axis of the coordinate system
with the origin placed at the location of the object in the scene.
node_xyz : dict[int, list[float]]
The view coordinates of the network object.
"""
SETTINGS = {
'color.nodes': (255, 255, 255),
'color.edges': (0, 0, 0),
'show.nodes': True,
'show.edges': True,
'show.nodelabels': False,
'show.edgelabels': False,
}
def __init__(self, *args, **kwargs):
super(NetworkObject, self).__init__(*args, **kwargs)
self._anchor = None
self._location = None
self._scale = None
self._rotation = None
@property
def network(self):
return self.item
@network.setter
def network(self, network):
self.item = network
@property
def anchor(self):
return self._anchor
@anchor.setter
def anchor(self, node):
if self.network.has_node(node):
self._anchor = node
@property
def location(self):
if not self._location:
self._location = Point(0, 0, 0)
return self._location
@location.setter
def location(self, location):
self._location = Point(*location)
@property
def scale(self):
if not self._scale:
self._scale = 1.0
return self._scale
@scale.setter
def scale(self, scale):
self._scale = scale
@property
def rotation(self):
if not self._rotation:
self._rotation = [0, 0, 0]
return self._rotation
@rotation.setter
def rotation(self, rotation):
self._rotation = rotation
@property
def node_xyz(self):
origin = Point(0, 0, 0)
nodes = list(self.network.nodes())
xyz = self.network.nodes_attributes(['x', 'y', 'z'], keys=nodes)
stack = []
if self.scale != 1.0:
S = Scale.from_factors([self.scale] * 3)
stack.append(S)
if self.rotation != [0, 0, 0]:
R = Rotation.from_euler_angles(self.rotation)
stack.append(R)
if self.location != origin:
if self.anchor is not None:
xyz = self.network.node_attributes(self.anchor, 'xyz')
point = Point(* xyz)
T1 = Translation.from_vector(origin - point)
stack.insert(0, T1)
T2 = Translation.from_vector(self.location)
stack.append(T2)
if stack:
X = reduce(mul, stack[::-1])
xyz = transform_points(xyz, X)
return dict(zip(nodes, xyz))
def select_nodes(self):
raise NotImplementedError
def select_edges(self):
raise NotImplementedError
def modify_nodes(self, nodes, names=None):
raise NotImplementedError
def modify_edges(self, edges, names=None):
raise NotImplementedError
def move_node(self, node):
raise NotImplementedError
def move_edge(self, edge):
raise NotImplementedError
| [
"compas.geometry.Point",
"compas.geometry.Translation.from_vector",
"functools.reduce",
"compas.geometry.Rotation.from_euler_angles",
"compas.geometry.Scale.from_factors",
"compas.geometry.transform_points"
] | [((2171, 2187), 'compas.geometry.Point', 'Point', (['*location'], {}), '(*location)\n', (2176, 2187), False, 'from compas.geometry import Point\n'), ((2669, 2683), 'compas.geometry.Point', 'Point', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (2674, 2683), False, 'from compas.geometry import Point\n'), ((2045, 2059), 'compas.geometry.Point', 'Point', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (2050, 2059), False, 'from compas.geometry import Point\n'), ((2866, 2902), 'compas.geometry.Scale.from_factors', 'Scale.from_factors', (['([self.scale] * 3)'], {}), '([self.scale] * 3)\n', (2884, 2902), False, 'from compas.geometry import Scale\n'), ((2986, 3027), 'compas.geometry.Rotation.from_euler_angles', 'Rotation.from_euler_angles', (['self.rotation'], {}), '(self.rotation)\n', (3012, 3027), False, 'from compas.geometry import Rotation\n'), ((3354, 3392), 'compas.geometry.Translation.from_vector', 'Translation.from_vector', (['self.location'], {}), '(self.location)\n', (3377, 3392), False, 'from compas.geometry import Translation\n'), ((3456, 3480), 'functools.reduce', 'reduce', (['mul', 'stack[::-1]'], {}), '(mul, stack[::-1])\n', (3462, 3480), False, 'from functools import reduce\n'), ((3499, 3523), 'compas.geometry.transform_points', 'transform_points', (['xyz', 'X'], {}), '(xyz, X)\n', (3515, 3523), False, 'from compas.geometry import transform_points\n'), ((3227, 3238), 'compas.geometry.Point', 'Point', (['*xyz'], {}), '(*xyz)\n', (3232, 3238), False, 'from compas.geometry import Point\n'), ((3261, 3300), 'compas.geometry.Translation.from_vector', 'Translation.from_vector', (['(origin - point)'], {}), '(origin - point)\n', (3284, 3300), False, 'from compas.geometry import Translation\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Protocol Data Units
===================
"""
import struct
from bacpypes.debugging import bacpypes_debugging, DebugContents, ModuleLogger
from bacpypes.comm import PDUData, PCI
from bacpypes.errors import DecodingError
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# a dictionary of functions and classes
request_types = {}
response_types = {}
def register_request_type(klass):
request_types[klass.functionCode] = klass
def register_response_type(klass):
response_types[klass.functionCode] = klass
#
# Packing and Unpacking Functions
#
def _packBitsToString(bits):
barry = []
i = packed = 0
for bit in bits:
if bit:
packed += 128
i += 1
if i == 8:
barry.append(packed)
i = packed = 0
else:
packed >>= 1
if i > 0 and i < 8:
packed >>= 7 - i
barry.append(packed)
return struct.pack("B" * len(barry), *barry)
def _unpackBitsFromString(string):
barry = struct.unpack("B" * len(string), string)
bits = []
for byte in barry:
for bit in range(8):
bits.append((byte & 1) == 1)
byte >>= 1
return bits
#
# _Struct
#
class _Struct:
"""
This is an abstract class for functions that pack and unpack the
variably encoded portion of a PDU. Each of the derived classes
produces or consumes a number of 16-registers.
"""
registerLength = None
def pack(self, value):
raise NotImplementedError("pack is not implemented in %s" % (self.__class__.__name__,))
def unpack(self, registers):
raise NotImplementedError("unpack is not implemented in %s" % (self.__class__.__name__,))
@bacpypes_debugging
class Byte(_Struct):
"""
This class packs and unpacks a register as an unsigned octet.
"""
registerLength = 1
def pack(self, value):
if _debug: Byte._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, int):
try:
value = int(value)
except TypeError:
Byte._error("coercion error: %r not an int", value)
value = 0
return [value & 0xFF]
def unpack(self, registers):
if _debug: Byte._debug("unpack %r", registers)
return registers[0]
@bacpypes_debugging
class Int(_Struct):
"""
This class packs and unpacks a register as a 16-bit signed integer.
"""
registerLength = 1
def pack(self, value):
if _debug: Int._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, int):
try:
value = int(value)
except TypeError:
Int._error("coercion error: %r not an int", value)
value = 0
return [value & 0xFFFF]
def unpack(self, registers):
if _debug: Int._debug("unpack %r", registers)
value = registers[0]
if (value & 0x8000):
value = (-1 << 16) | value
return value
@bacpypes_debugging
class UnsignedInt(_Struct):
"""
This class packs and unpacks a register as a 16-bit unsigned integer.
"""
registerLength = 1
def pack(self, value):
if _debug: UnsignedInt._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, int):
try:
value = int(value)
except TypeError:
UnsignedInt._error("coercion error: %r not an int", value)
value = 0
return [value & 0xFFFF]
def unpack(self, registers):
if _debug: UnsignedInt._debug("unpack %r", registers)
return registers[0]
@bacpypes_debugging
class DoubleInt(_Struct):
"""
This class packs and unpacks a pair of registers as a 32-bit signed integer.
"""
registerLength = 2
def pack(self, value):
if _debug: DoubleInt._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, int):
try:
value = int(value)
except TypeError:
DoubleInt._error("coercion error: %r not an int", value)
value = 0
return [(value >> 16) & 0xFFFF, value & 0xFFFF]
def unpack(self, registers):
if _debug: DoubleInt._debug("unpack %r", registers)
value = (registers[0] << 16) | registers[1]
if (value & 0x80000000):
value = (-1 << 32) | value
return value
@bacpypes_debugging
class UnsignedDoubleInt(_Struct):
"""
This class packs and unpacks a pair of registers as a 32-bit unsigned integer.
"""
registerLength = 2
def pack(self, value):
if _debug: UnsignedDoubleInt._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, int):
try:
value = int(value)
except TypeError:
UnsignedDoubleInt._error("coercion error: %r not an int", value)
value = 0
return [(value >> 16) & 0xFFFF, value & 0xFFFF]
def unpack(self, registers):
if _debug: UnsignedDoubleInt._debug("unpack %r", registers)
return (registers[0] << 16) | registers[1]
@bacpypes_debugging
class Real(_Struct):
registerLength = 2
def pack(self, value):
if _debug: Real._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, float):
try:
value = float(value)
except TypeError:
BigEndianReal._error("coercion error: %r not a float", value)
value = 0.0
registers = struct.unpack(">HH", struct.pack(">f", value))
return [registers[1], registers[0]]
def unpack(self, registers):
if _debug: Real._debug("unpack %r", registers)
value, = struct.unpack(">f", struct.pack(">HH", registers[1], registers[0]))
return value
@bacpypes_debugging
class ROCReal(_Struct):
registerLength = 1
def pack(self, value):
if _debug: ROCReal._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, float):
try:
value = float(value)
except TypeError:
ROCReal._error("coercion error: %r not a float", value)
value = 0.0
raise NotImplementedError("packing ROCReal is not supported")
def unpack(self, registers):
if _debug: ROCReal._debug("unpack %r", registers)
# byte-swap the registers
r0, r1 = registers
r0 = ((r0 & 0xFF00) >> 8) | ((r0 & 0x00FF) << 8)
r1 = ((r1 & 0xFF00) >> 8) | ((r1 & 0x00FF) << 8)
value, = struct.unpack(">f", struct.pack(">HH", r1, r0))
return value
@bacpypes_debugging
class BigEndianDoubleInt(_Struct):
"""
This class packs and unpacks a pair of registers as a bit endian 32-bit signed integer.
"""
registerLength = 2
def pack(self, value):
if _debug: BigEndianDoubleInt._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, int):
try:
value = int(value)
except TypeError:
BigEndianDoubleInt._error("coercion error: %r not an int", value)
value = 0
return [value & 0xFFFF, (value >> 16) & 0xFFFF]
def unpack(self, registers):
if _debug: BigEndianDoubleInt._debug("unpack %r", registers)
value = (registers[1] << 16) | registers[0]
if (value & 0x80000000):
value = (-1 << 32) | value
return value
@bacpypes_debugging
class BigEndianUnsignedDoubleInt(_Struct):
"""
This class packs and unpacks a pair of registers as a bit endian 32-bit unsigned integer.
"""
registerLength = 2
def pack(self, value):
if _debug: BigEndianUnsignedDoubleInt._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, int):
try:
value = int(value)
except TypeError:
BigEndianUnsignedDoubleInt._error("coercion error: %r not an int", value)
value = 0
return [value & 0xFFFF, (value >> 16) & 0xFFFF]
def unpack(self, registers):
if _debug: BigEndianUnsignedDoubleInt._debug("unpack %r", registers)
return (registers[1] << 16) | registers[0]
@bacpypes_debugging
class BigEndianReal(_Struct):
registerLength = 2
def pack(self, value):
if _debug: BigEndianReal._debug("pack %r", value)
# convert the value if necessary
if not isinstance(value, float):
try:
value = float(value)
except TypeError:
BigEndianReal._error("coercion error: %r not a float", value)
value = 0.0
registers = struct.unpack(">HH", struct.pack(">f", value))
return [registers[0], registers[1]]
def unpack(self, registers):
if _debug: BigEndianReal._debug("unpack %r", registers)
value, = struct.unpack(">f", struct.pack(">HH", registers[0], registers[1]))
return value
@bacpypes_debugging
class String(_Struct):
"""
This class packs and unpacks a list of registers as a null terminated string.
"""
def __init__(self, registerLength=6):
if _debug: String._debug("__init__ %r", registerLength)
# save the length
self.registerLength = registerLength
def pack(self, value):
if _debug: String._debug("pack %r", value)
raise NotImplementedError("packing strings is not implemeted")
def unpack(self, registers):
if _debug: String._debug("unpack %r", registers)
octets = []
for reg in registers:
octets.append(reg >> 8)
octets.append(reg & 0xFF)
value = ''.join(chr(c) for c in octets)
value = value[:value.find('\x00')]
return value
@bacpypes_debugging
class BigEndianString(_Struct):
"""
This class packs and unpacks a list of registers as a null terminated string.
"""
def __init__(self, registerLength=6):
if _debug: String._debug("__init__ %r", registerLength)
# save the length
self.registerLength = registerLength
def pack(self, value):
if _debug: String._debug("pack %r", value)
raise NotImplementedError("packing strings is not implemeted")
def unpack(self, registers):
if _debug: String._debug("unpack %r", registers)
octets = []
for reg in registers:
octets.append(reg & 0xFF)
octets.append(reg >> 8)
value = ''.join(chr(c) for c in octets)
value = value[:value.find('\x00')]
return value
#
# ModbusStruct
#
ModbusStruct = {
'byte': Byte(),
'int': Int(),
'uint': UnsignedInt(),
'dint': DoubleInt(),
'udint': UnsignedDoubleInt(),
'real': Real(),
'roc-real': ROCReal(),
'be-dint': BigEndianDoubleInt(),
'be-udint': BigEndianUnsignedDoubleInt(),
'be-real': BigEndianReal(),
'str': String(),
'be-str': BigEndianString(),
}
#
# MPCI
#
@bacpypes_debugging
class MPCI(PCI, DebugContents):
"""
This class contains the MODBUS protocol control information which
is the 8 octet header at the front of all MODBUS PDUs.
"""
_debug_contents = (
'mpduTransactionID',
'mpduProtocolID',
'mpduLength',
'mpduUnitID',
'mpduFunctionCode',
)
readCoils = 1
readDiscreteInputs = 2
readMultipleRegisters = 3
readInputRegisters = 4
writeSingleCoil = 5
writeSingleRegister = 6
writeMultipleCoils = 15
writeMultipleRegisters = 16
readWriteMultipleRegisters = 23
announceMaster = 100
registerSlave = 105
def __init__(self, *args, **kwargs):
if _debug: MPCI._debug("__init__ %r %r", args, kwargs)
PCI.__init__(self, *args, **kwargs)
self.mpduTransactionID = 0
self.mpduProtocolID = 0
self.mpduLength = None
self.mpduUnitID = 0
self.mpduFunctionCode = None
def update(self, mpci):
if _debug: MPCI._debug("update %r", mpci)
PCI.update(self, mpci)
self.mpduTransactionID = mpci.mpduTransactionID
self.mpduProtocolID = mpci.mpduProtocolID
self.mpduLength = mpci.mpduLength
self.mpduUnitID = mpci.mpduUnitID
self.mpduFunctionCode = mpci.mpduFunctionCode
def encode(self, pdu):
"""Encode the contents into the PDU."""
if _debug: MPCI._debug("encode %r", pdu)
PCI.update(pdu, self)
pdu.put_short(self.mpduTransactionID)
pdu.put_short(self.mpduProtocolID)
pdu.put_short(self.mpduLength)
pdu.put(self.mpduUnitID)
pdu.put(self.mpduFunctionCode)
def decode(self, pdu):
"""Decode the contents of the PDU."""
if _debug: MPCI._debug("decode %r", pdu)
PCI.update(self, pdu)
self.mpduTransactionID = pdu.get_short()
self.mpduProtocolID = pdu.get_short()
self.mpduLength = pdu.get_short()
self.mpduUnitID = pdu.get()
self.mpduFunctionCode = pdu.get()
# check the length
if self.mpduLength != len(pdu.pduData) + 2:
raise DecodingError("invalid length")
#
# MPDU
#
@bacpypes_debugging
class MPDU(MPCI, PDUData):
"""
This class is a generic MODBUS PDU. It inherits the :class:`MPCI`
layer and the more generic PDU data functions.
"""
def __init__(self, *args, **kwargs):
if _debug: MPDU._debug("__init__ %r %r", args, kwargs)
MPCI.__init__(self, **kwargs)
PDUData.__init__(self, *args)
def encode(self, pdu):
if _debug: MPDU._debug("encode %r", pdu)
MPCI.encode(self, pdu)
pdu.put_data(self.pduData)
def decode(self, pdu):
if _debug: MPDU._debug("decode %r", pdu)
MPCI.decode(self, pdu)
self.pduData = pdu.get_data(len(pdu.pduData))
#------------------------------
@bacpypes_debugging
class ReadBitsRequestBase(MPCI, DebugContents):
"""
Base class for messages requesting bit values. This is inherited by
both :class:`ReadCoilsRequest` and :class:`ReadDiscreteInputsRequest`.
"""
_debug_contents = ('address', 'count')
def __init__(self, address, count, **kwargs):
if _debug: ReadBitsRequestBase._debug("__init__ %r %r %r", address, count, kwargs)
MPCI.__init__(self, **kwargs)
self.address = address
self.count = count
def encode(self, pdu):
if _debug: ReadBitsRequestBase._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.address)
pdu.put_short(self.count)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ReadBitsRequestBase._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.address = pdu.get_short()
self.count = pdu.get_short()
@bacpypes_debugging
class ReadBitsResponseBase(MPCI, DebugContents):
"""
Base class for messages that are responses to reading bit values.
This is inherited by both :class:`ReadCoilsResponse` and
:class:`ReadDiscreteInputsResponse`.
"""
_debug_contents = ('bits',)
def __init__(self, values=None, **kwargs):
if _debug: ReadBitsResponseBase._debug("__init__ %r %r", values, kwargs)
MPCI.__init__(self, **kwargs)
if values is not None:
self.bits = values
else:
self.bits = []
def encode(self, pdu):
if _debug: ReadBitsResponseBase._debug("encode %r", pdu)
MPCI.update(pdu, self)
stringbits = _packBitsToString(self.bits)
if _debug: ReadBitsResponseBase._debug(" - stringbits: %r", stringbits)
pdu.put(len(stringbits))
pdu.put_data(stringbits)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ReadBitsResponseBase._debug("decode %r", pdu)
MPCI.update(self, pdu)
datalen = pdu.get()
self.bits = _unpackBitsFromString(pdu.get_data(datalen))
@bacpypes_debugging
class ReadRegistersRequestBase(MPCI, DebugContents):
"""
Base class for messages requesting register values.
This is inherited by both :class:`ReadMultipleRegistersRequest` and
:class:`ReadInputRegistersRequest`.
"""
_debug_contents = ('address', 'count')
def __init__(self, address=None, count=None, **kwargs):
if _debug: ReadRegistersRequestBase._debug("__init__ %r %r %r", address, count, kwargs)
MPCI.__init__(self, **kwargs)
self.address = address
self.count = count
def encode(self, pdu):
if _debug: ReadRegistersRequestBase._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.address)
pdu.put_short(self.count)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ReadRegistersRequestBase._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.address = pdu.get_short()
self.count = pdu.get_short()
@bacpypes_debugging
class ReadRegistersResponseBase(MPCI, DebugContents):
"""
Base class for messages requesting register values.
This is inherited by both :class:`ReadMultipleRegistersResponse` and
:class:`ReadInputRegistersResponse`.
"""
_debug_contents = ('registers',)
def __init__(self, values=None, **kwargs):
if _debug: ReadRegistersResponseBase._debug("__init__ %r %r", values, kwargs)
MPCI.__init__(self, **kwargs)
if values is not None:
self.registers = values
else:
self.registers = []
def encode(self, pdu):
if _debug: ReadRegistersResponseBase._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put(len(self.registers) * 2)
for reg in self.registers:
pdu.put_short(reg)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ReadRegistersResponseBase._debug("decode %r", pdu)
MPCI.update(self, pdu)
datalen = pdu.get()
self.registers = []
for i in range(datalen // 2):
self.registers.append(pdu.get_short())
@bacpypes_debugging
class ReadWriteValueBase(MPCI, DebugContents):
"""
Base class for messages reading and writing values. This class is
inherted by :class:`WriteSingleCoilRequest`, :class:`WriteSingleCoilResponse`,
:class:`WriteSingleRegisterRequest`, and :class:`WriteSingleRegisterResponse`.
"""
_debug_contents = ('address', 'value')
def __init__(self, address=None, value=None, **kwargs):
if _debug: ReadWriteValueBase._debug("__init__ %r %r %r", address, value, kwargs)
MPCI.__init__(self, **kwargs)
self.address = address
self.value = value
def encode(self, pdu):
if _debug: ReadWriteValueBase._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.address)
pdu.put_short(self.value)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ReadWriteValueBase._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.address = pdu.get_short()
self.value = pdu.get_short()
#------------------------------
#
# ReadCoils
#
@bacpypes_debugging
class ReadCoilsRequest(ReadBitsRequestBase):
"""
Read Coils Request
"""
functionCode = MPCI.readCoils
def __init__(self, address=None, count=None, **kwargs):
if _debug: ReadCoilsRequest._debug("__init__ %r %r %r", address, count, kwargs)
ReadBitsRequestBase.__init__(self, address, count, **kwargs)
self.mpduFunctionCode = ReadCoilsRequest.functionCode
register_request_type(ReadCoilsRequest)
@bacpypes_debugging
class ReadCoilsResponse(ReadBitsResponseBase):
"""
Read Coils Response
"""
functionCode = MPCI.readCoils
def __init__(self, values=None, **kwargs):
if _debug: ReadCoilsResponse._debug("__init__ %r %r", values, kwargs)
ReadBitsResponseBase.__init__(self, values, **kwargs)
self.mpduFunctionCode = ReadCoilsResponse.functionCode
register_response_type(ReadCoilsResponse)
#
# ReadDescreteInputs
#
@bacpypes_debugging
class ReadDiscreteInputsRequest(ReadBitsRequestBase):
"""
Read Discrete Inputs Request
"""
functionCode = MPCI.readDiscreteInputs
def __init__(self, address=None, count=None, **kwargs):
if _debug: ReadDiscreteInputsRequest._debug("__init__ %r %r %r", address, count, kwargs)
ReadBitsRequestBase.__init__(self, address, count, **kwargs)
self.mpduFunctionCode = ReadDiscreteInputsRequest.functionCode
register_request_type(ReadDiscreteInputsRequest)
@bacpypes_debugging
class ReadDiscreteInputsResponse(ReadBitsResponseBase):
"""
Read Discrete Inputs Response
"""
functionCode = MPCI.readDiscreteInputs
def __init__(self, values=None, **kwargs):
if _debug: ReadDiscreteInputsResponse._debug("__init__ %r %r", values, kwargs)
ReadBitsResponseBase.__init__(self, values, **kwargs)
self.mpduFunctionCode = ReadDiscreteInputsResponse.functionCode
register_response_type(ReadDiscreteInputsResponse)
#
# ReadMultipleRegisters
#
@bacpypes_debugging
class ReadMultipleRegistersRequest(ReadRegistersRequestBase):
"""
Read Multiple Registers Request
"""
functionCode = MPCI.readMultipleRegisters
def __init__(self, address=None, count=None, **kwargs):
if _debug: ReadMultipleRegistersRequest._debug("__init__ %r %r %r", address, count, kwargs)
ReadRegistersRequestBase.__init__(self, address, count, **kwargs)
self.mpduFunctionCode = ReadMultipleRegistersRequest.functionCode
register_request_type(ReadMultipleRegistersRequest)
@bacpypes_debugging
class ReadMultipleRegistersResponse(ReadRegistersResponseBase):
"""
Read Multiple Registers Response
"""
functionCode = MPCI.readMultipleRegisters
def __init__(self, values=None, **kwargs):
if _debug: ReadMultipleRegistersResponse._debug("__init__ %r %r", values, kwargs)
ReadRegistersResponseBase.__init__(self, values, **kwargs)
self.mpduFunctionCode = ReadMultipleRegistersResponse.functionCode
register_response_type(ReadMultipleRegistersResponse)
#
# ReadInputRegisters
#
@bacpypes_debugging
class ReadInputRegistersRequest(ReadRegistersRequestBase):
"""
Read Input Registers Request
"""
functionCode = MPCI.readInputRegisters
def __init__(self, address=None, count=None, **kwargs):
if _debug: ReadInputRegistersRequest._debug("__init__ %r %r %r", address, count, kwargs)
ReadRegistersRequestBase.__init__(self, address, count, **kwargs)
self.mpduFunctionCode = ReadInputRegistersRequest.functionCode
register_request_type(ReadInputRegistersRequest)
@bacpypes_debugging
class ReadInputRegistersResponse(ReadRegistersResponseBase):
"""
Read Input Registers Response
"""
functionCode = MPCI.readInputRegisters
def __init__(self, values=None, **kwargs):
if _debug: ReadInputRegistersResponse._debug("__init__ %r %r", values, kwargs)
ReadRegistersResponseBase.__init__(self, values, **kwargs)
self.mpduFunctionCode = ReadInputRegistersResponse.functionCode
register_response_type(ReadInputRegistersResponse)
#
# WriteSingleCoil
#
@bacpypes_debugging
class WriteSingleCoilRequest(ReadWriteValueBase):
"""
Write Single Coil Request
"""
functionCode = MPCI.writeSingleCoil
def __init__(self, address=None, value=None, **kwargs):
if _debug: WriteSingleCoilRequest._debug("__init__ %r %r %r", address, value, kwargs)
ReadWriteValueBase.__init__(self, address, value, **kwargs)
self.mpduFunctionCode = WriteSingleCoilRequest.functionCode
register_request_type(WriteSingleCoilRequest)
@bacpypes_debugging
class WriteSingleCoilResponse(ReadWriteValueBase):
"""
Write Single Coil Response
"""
functionCode = MPCI.writeSingleCoil
def __init__(self, address=None, value=None, **kwargs):
if _debug: WriteSingleCoilResponse._debug("__init__ %r %r %r", address, value, kwargs)
ReadWriteValueBase.__init__(self, address, value, **kwargs)
self.mpduFunctionCode = WriteSingleCoilResponse.functionCode
register_response_type(WriteSingleCoilResponse)
#
# WriteSingleRegister
#
@bacpypes_debugging
class WriteSingleRegisterRequest(ReadWriteValueBase):
"""
Write Single Register Request
"""
functionCode = MPCI.writeSingleRegister
def __init__(self, address=None, value=None, **kwargs):
if _debug: WriteSingleRegisterRequest._debug("__init__ %r %r %r", address, value, kwargs)
ReadWriteValueBase.__init__(self, address, value, **kwargs)
self.mpduFunctionCode = WriteSingleRegisterRequest.functionCode
register_request_type(WriteSingleRegisterRequest)
@bacpypes_debugging
class WriteSingleRegisterResponse(ReadWriteValueBase):
"""
Write Single Register Response
"""
functionCode = MPCI.writeSingleRegister
def __init__(self, address=None, value=None, **kwargs):
if _debug: WriteSingleRegisterResponse._debug("__init__ %r %r %r", address, value, kwargs)
ReadWriteValueBase.__init__(self, address, value, **kwargs)
self.mpduFunctionCode = WriteSingleRegisterResponse.functionCode
register_response_type(WriteSingleRegisterResponse)
#
# WriteMultipleCoils
#
@bacpypes_debugging
class WriteMultipleCoilsRequest(MPCI, DebugContents):
"""
Write Multiple Coils Request
"""
_debug_contents = ('address', 'count', 'coils')
functionCode = MPCI.writeMultipleCoils
def __init__(self, address=None, count=None, coils=None, **kwargs):
if _debug: WriteMultipleCoilsRequest._debug("__init__ %r %r %r %r", address, count, coils, kwargs)
MPCI.__init__(self, **kwargs)
self.mpduFunctionCode = WriteMultipleCoilsRequest.functionCode
self.address = address
self.count = count
if coils is not None:
self.coils = coils
else:
self.coils = [False] * count
def encode(self, pdu):
if _debug: WriteMultipleCoilsRequest._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.address)
pdu.put_short(self.count)
stringbits = _packBitsToString(self.coils)
pdu.put(len(stringbits))
pdu.put_data(stringbits)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: WriteMultipleCoilsRequest._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.address = pdu.get_short()
self.count = pdu.get_short()
datalen = pdu.get()
coils = _unpackBitsFromString(pdu.get_data(datalen))
self.coils = coils[:self.count]
register_request_type(WriteMultipleCoilsRequest)
@bacpypes_debugging
class WriteMultipleCoilsResponse(MPCI, DebugContents):
"""
Write Multiple Coils Response
"""
_debug_contents = ('address', 'count')
functionCode = MPCI.writeMultipleCoils
def __init__(self, address=None, count=None, **kwargs):
if _debug: WriteMultipleCoilsResponse._debug("__init__ %r %r %r", address, count, kwargs)
MPCI.__init__(self, **kwargs)
self.mpduFunctionCode = WriteMultipleCoilsResponse.functionCode
self.address = address
self.count = count
def encode(self, pdu):
if _debug: WriteMultipleCoilsResponse._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.address)
pdu.put_short(self.count)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: WriteMultipleCoilsResponse._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.address = pdu.get_short()
self.count = pdu.get_short()
register_response_type(WriteMultipleCoilsResponse)
#
# WriteMultipleRegisters
#
@bacpypes_debugging
class WriteMultipleRegistersRequest(MPCI, DebugContents):
"""
Write Multiple Registers Request
"""
_debug_contents = ('address', 'count', 'registers')
functionCode = MPCI.writeMultipleRegisters
def __init__(self, address=None, count=None, registers=None, **kwargs):
if _debug: WriteMultipleRegistersRequest._debug("__init__ %r %r %r %r", address, count, registers, kwargs)
MPCI.__init__(self, **kwargs)
self.mpduFunctionCode = WriteMultipleRegistersRequest.functionCode
self.address = address
self.count = count
if registers is not None:
self.registers = registers
elif count is not None:
self.registers = [0] * self.count
else:
self.registers = None
def encode(self, pdu):
if _debug: WriteMultipleRegistersRequest._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.address)
pdu.put_short(self.count)
pdu.put(len(self.registers) * 2)
for reg in self.registers:
pdu.put_short(reg)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: WriteMultipleRegistersRequest._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.address = pdu.get_short()
self.count = pdu.get_short()
datalen = pdu.get()
self.registers = []
for i in range(datalen // 2):
self.registers.append(pdu.get_short())
register_request_type(WriteMultipleRegistersRequest)
@bacpypes_debugging
class WriteMultipleRegistersResponse(MPCI, DebugContents):
"""
Write Multiple Registers Response
"""
_debug_contents = ('address', 'count')
functionCode = MPCI.writeMultipleRegisters
def __init__(self, address=None, count=None, **kwargs):
if _debug: WriteMultipleRegistersResponse._debug("__init__ %r %r %r", address, count, kwargs)
MPCI.__init__(self, **kwargs)
self.mpduFunctionCode = WriteMultipleRegistersResponse.functionCode
self.address = address
self.count = count
def encode(self, pdu):
if _debug: WriteMultipleRegistersResponse._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.address)
pdu.put_short(self.count)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: WriteMultipleRegistersResponse._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.address = pdu.get_short()
self.count = pdu.get_short()
register_response_type(WriteMultipleRegistersResponse)
#
# ReadWriteMultipleRegistersRequest
#
@bacpypes_debugging
class ReadWriteMultipleRegistersRequest(MPCI, DebugContents):
"""
Read Write Multiple Registers Request
"""
_debug_contents = ('raddress', 'rcount', 'waddress', 'wcount', 'registers')
functionCode = MPCI.readWriteMultipleRegisters
def __init__(self, raddress=None, rcount=None, waddress=None, wcount=None, registers=None, **kwargs):
if _debug: ReadWriteMultipleRegistersRequest._debug("__init__ %r %r %r %r %r %r", raddress, rcount, waddress, wcount, registers, kwargs)
MPCI.__init__(self, **kwargs)
self.mpduFunctionCode = ReadWriteMultipleRegistersRequest.functionCode
self.raddress = raddress
self.rcount = rcount
self.waddress = waddress
self.wcount = wcount
if registers is not None:
self.registers = registers
else:
self.registers = [0] * wcount
def encode(self, pdu):
if _debug: ReadWriteMultipleRegistersRequest._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put_short(self.raddress)
pdu.put_short(self.rcount)
pdu.put_short(self.waddress)
pdu.put_short(self.wcount)
pdu.put(len(self.registers) * 2)
for reg in self.registers:
pdu.put_short(reg)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ReadWriteMultipleRegistersRequest._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.raddress = pdu.get_short()
self.rcount = pdu.get_short()
self.waddress = pdu.get_short()
self.wcount = pdu.get_short()
datalen = pdu.get()
self.registers = []
for i in range(datalen // 2):
self.registers.append(pdu.get_short())
register_request_type(ReadWriteMultipleRegistersRequest)
@bacpypes_debugging
class ReadWriteMultipleRegistersResponse(MPCI, DebugContents):
"""
Read Write Multiple Registers Response
"""
_debug_contents = ('registers',)
functionCode = MPCI.readWriteMultipleRegisters
def __init__(self, registers=None, **kwargs):
if _debug: ReadWriteMultipleRegistersResponse._debug("__init__ %r %r", registers, kwargs)
MPCI.__init__(self, **kwargs)
self.mpduFunctionCode = ReadWriteMultipleRegistersResponse.functionCode
if registers is not None:
self.registers = registers
else:
self.registers = []
def encode(self, pdu):
if _debug: ReadWriteMultipleRegistersResponse._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put(len(self.registers) * 2)
for reg in self.registers:
pdu.put_short(reg)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ReadWriteMultipleRegistersResponse._debug("decode %r", pdu)
MPCI.update(self, pdu)
datalen = pdu.get()
self.registers = []
for i in range(datalen // 2):
self.registers.append(pdu.get_short())
register_response_type(ReadWriteMultipleRegistersResponse)
#
# ExceptionResponse
#
@bacpypes_debugging
class ExceptionResponse(MPCI, DebugContents):
"""
Exception Response
"""
_debug_contents = ('exceptionCode',)
ILLEGAL_FUNCTION = 0x01
ILLEGAL_DATA_ADDRESS = 0x02
ILLEGAL_DATA_VALUE = 0x03
ILLEGAL_RESPONSE_LENGTH = 0x04
ACKNOWLEDGE = 0x05
SLAVE_DEVICE_BUSY = 0x06
NEGATIVE_ACKNOWLEDGE = 0x07
MEMORY_PARITY_ERROR = 0x08
GATEWAY_PATH_UNAVAILABLE = 0x0A
GATEWAY_TARGET_DEVICE_FAILED_TO_RESPOND = 0x0B
def __init__(self, function=None, exceptionCode=None, **kwargs):
if _debug: ExceptionResponse._debug("__init__ %r %r %r", function, exceptionCode, kwargs)
MPCI.__init__(self, **kwargs)
if function is not None:
self.mpduFunctionCode = function + 128
else:
self.mpduFunctionCode = None
self.exceptionCode = exceptionCode
def encode(self, pdu):
if _debug: ExceptionResponse._debug("encode %r", pdu)
MPCI.update(pdu, self)
pdu.put(self.exceptionCode)
pdu.mpduLength = len(pdu.pduData) + 2
def decode(self, pdu):
if _debug: ExceptionResponse._debug("decode %r", pdu)
MPCI.update(self, pdu)
self.exceptionCode = pdu.get()
| [
"bacpypes.comm.PCI.__init__",
"bacpypes.comm.PDUData.__init__",
"bacpypes.comm.PCI.update",
"struct.pack",
"bacpypes.errors.DecodingError"
] | [((12248, 12283), 'bacpypes.comm.PCI.__init__', 'PCI.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (12260, 12283), False, 'from bacpypes.comm import PDUData, PCI\n'), ((12535, 12557), 'bacpypes.comm.PCI.update', 'PCI.update', (['self', 'mpci'], {}), '(self, mpci)\n', (12545, 12557), False, 'from bacpypes.comm import PDUData, PCI\n'), ((12936, 12957), 'bacpypes.comm.PCI.update', 'PCI.update', (['pdu', 'self'], {}), '(pdu, self)\n', (12946, 12957), False, 'from bacpypes.comm import PDUData, PCI\n'), ((13291, 13312), 'bacpypes.comm.PCI.update', 'PCI.update', (['self', 'pdu'], {}), '(self, pdu)\n', (13301, 13312), False, 'from bacpypes.comm import PDUData, PCI\n'), ((14012, 14041), 'bacpypes.comm.PDUData.__init__', 'PDUData.__init__', (['self', '*args'], {}), '(self, *args)\n', (14028, 14041), False, 'from bacpypes.comm import PDUData, PCI\n'), ((5808, 5832), 'struct.pack', 'struct.pack', (['""">f"""', 'value'], {}), "('>f', value)\n", (5819, 5832), False, 'import struct\n'), ((6005, 6051), 'struct.pack', 'struct.pack', (['""">HH"""', 'registers[1]', 'registers[0]'], {}), "('>HH', registers[1], registers[0])\n", (6016, 6051), False, 'import struct\n'), ((6867, 6893), 'struct.pack', 'struct.pack', (['""">HH"""', 'r1', 'r0'], {}), "('>HH', r1, r0)\n", (6878, 6893), False, 'import struct\n'), ((9047, 9071), 'struct.pack', 'struct.pack', (['""">f"""', 'value'], {}), "('>f', value)\n", (9058, 9071), False, 'import struct\n'), ((9253, 9299), 'struct.pack', 'struct.pack', (['""">HH"""', 'registers[0]', 'registers[1]'], {}), "('>HH', registers[0], registers[1])\n", (9264, 9299), False, 'import struct\n'), ((13627, 13658), 'bacpypes.errors.DecodingError', 'DecodingError', (['"""invalid length"""'], {}), "('invalid length')\n", (13640, 13658), False, 'from bacpypes.errors import DecodingError\n')] |
import re
import logging
from pydispatch import dispatcher
__author__ = 'edzard'
logger = logging.getLogger(__name__)
_filters = {}
def _handler(sender, **kwargs):
global _filters
for parameter_name in kwargs:
if parameter_name in _filters:
data = kwargs[parameter_name]
if _filters[parameter_name].match(data) is None:
return
logger.info("<{}> event from {} -> {}".format(kwargs['signal'], sender, kwargs))
dispatcher.connect(_handler, signal=dispatcher.Any, sender=dispatcher.Any)
def set_filter(**kwargs):
global _filters
for parameter_name in kwargs:
regex = kwargs[parameter_name]
pattern = re.compile(regex)
_filters[parameter_name] = pattern | [
"logging.getLogger",
"pydispatch.dispatcher.connect",
"re.compile"
] | [((92, 119), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (109, 119), False, 'import logging\n'), ((474, 548), 'pydispatch.dispatcher.connect', 'dispatcher.connect', (['_handler'], {'signal': 'dispatcher.Any', 'sender': 'dispatcher.Any'}), '(_handler, signal=dispatcher.Any, sender=dispatcher.Any)\n', (492, 548), False, 'from pydispatch import dispatcher\n'), ((688, 705), 're.compile', 're.compile', (['regex'], {}), '(regex)\n', (698, 705), False, 'import re\n')] |
# pylint: disable=no-self-use
import re
import pytest
from . import AbstractViewsTests, factory_build_layers, get_test_default_layers
@pytest.fixture(scope="function")
@pytest.mark.usefixtures("dbsession", "transact")
def layer_vectortiles_test_data(dbsession, transact):
del transact
from c2cgeoportal_commons.models.main import LayerVectorTiles, OGCServer
servers = [OGCServer(name=f"server_{i}") for i in range(0, 4)]
for i, server in enumerate(servers):
server.url = f"http://wms.geo.admin.ch_{i}"
server.image_type = "image/jpeg" if i % 2 == 0 else "image/png"
def layer_builder(i):
name = f"layer_vectortiles_{i}"
layer = LayerVectorTiles(name=name)
layer.layer = name
layer.public = 1 == i % 2
layer.style = "https://vectortiles-staging.geoportail.lu/styles/roadmap/style.json"
layer.xyz = "https://vectortiles-staging.geoportail.lu/styles/roadmap/{z}/{x}/{y}.png"
return layer
data = factory_build_layers(layer_builder, dbsession)
data["default"] = get_test_default_layers(dbsession, server)
dbsession.flush()
yield data
@pytest.mark.usefixtures("layer_vectortiles_test_data", "test_app")
class TestLayerVectortiles(AbstractViewsTests):
_prefix = "/admin/layers_vectortiles"
def test_index_rendering(self, test_app):
resp = self.get(test_app)
self.check_left_menu(resp, "Vector Tiles Layers")
expected = [
("actions", "", "false"),
("id", "id", "true"),
("name", "Name"),
("description", "Description"),
("public", "Public"),
("geo_table", "Geo table"),
("exclude_properties", "Exclude properties"),
("style", "Style"),
("xyz", "Raster URL"),
("dimensions", "Dimensions", "false"),
("interfaces", "Interfaces"),
("restrictionareas", "Restriction areas", "false"),
("parents_relation", "Parents", "false"),
("metadatas", "Metadatas", "false"),
]
self.check_grid_headers(resp, expected)
def test_grid_complex_column_val(self, test_app, layer_vectortiles_test_data):
json = self.check_search(test_app, sort="name")
row = json["rows"][0]
layer = layer_vectortiles_test_data["layers"][0]
assert layer.id == int(row["_id_"])
assert layer.name == row["name"]
def test_new(self, test_app, layer_vectortiles_test_data, dbsession):
default_vectortiles = layer_vectortiles_test_data["default"]["vectortiles"]
default_vectortiles.name = "so can I not be found"
dbsession.flush()
form = self.get_item(test_app, "new").form
assert "" == self.get_first_field_named(form, "id").value
assert "" == self.get_first_field_named(form, "name").value
assert "" == self.get_first_field_named(form, "style").value
assert "" == self.get_first_field_named(form, "xyz").value
def test_grid_search(self, test_app):
self.check_search(test_app, "layer_vectortiles_10", total=1)
def test_base_edit(self, test_app, layer_vectortiles_test_data):
layer = layer_vectortiles_test_data["layers"][10]
form = self.get_item(test_app, layer.id).form
assert "layer_vectortiles_10" == self.get_first_field_named(form, "name").value
assert "" == self.get_first_field_named(form, "description").value
def test_public_checkbox_edit(self, test_app, layer_vectortiles_test_data):
layer = layer_vectortiles_test_data["layers"][10]
form = self.get_item(test_app, layer.id).form
assert not form["public"].checked
layer = layer_vectortiles_test_data["layers"][11]
form = self.get_item(test_app, layer.id).form
assert form["public"].checked
def test_edit(self, test_app, layer_vectortiles_test_data, dbsession):
layer = layer_vectortiles_test_data["layers"][0]
form = self.get_item(test_app, layer.id).form
assert str(layer.id) == self.get_first_field_named(form, "id").value
assert "hidden" == self.get_first_field_named(form, "id").attrs["type"]
assert layer.name == self.get_first_field_named(form, "name").value
assert str(layer.description or "") == self.get_first_field_named(form, "description").value
assert layer.public is False
assert layer.public == form["public"].checked
assert str(layer.geo_table or "") == form["geo_table"].value
assert str(layer.exclude_properties or "") == form["exclude_properties"].value
assert str(layer.style or "") == form["style"].value
assert str(layer.xyz or "") == form["xyz"].value
interfaces = layer_vectortiles_test_data["interfaces"]
assert {interfaces[0].id, interfaces[2].id} == {i.id for i in layer.interfaces}
self._check_interfaces(form, interfaces, layer)
ras = layer_vectortiles_test_data["restrictionareas"]
assert {ras[0].id, ras[2].id} == {i.id for i in layer.restrictionareas}
self._check_restrictionsareas(form, ras, layer)
new_values = {
"name": "new_name",
"description": "new description",
"public": True,
"geo_table": "new_geo_table",
"exclude_properties": "property1,property2",
"style": "https://new_style.json",
"xyz": "https://new_style/{x}/{y}/{z}.png",
}
for key, value in new_values.items():
self.set_first_field_named(form, key, value)
form["interfaces"] = [interfaces[1].id, interfaces[3].id]
form["restrictionareas"] = [ras[1].id, ras[3].id]
resp = form.submit("submit")
assert str(layer.id) == re.match(
fr"http://localhost{self._prefix}/(.*)\?msg_col=submit_ok", resp.location
).group(1)
dbsession.expire(layer)
for key, value in new_values.items():
if isinstance(value, bool):
assert value == getattr(layer, key)
else:
assert str(value or "") == str(getattr(layer, key) or "")
assert {interfaces[1].id, interfaces[3].id} == {interface.id for interface in layer.interfaces}
assert {ras[1].id, ras[3].id} == {ra.id for ra in layer.restrictionareas}
def test_submit_new(self, dbsession, test_app, layer_vectortiles_test_data):
from c2cgeoportal_commons.models.main import LayerVectorTiles
resp = test_app.post(
"/admin/layers_vectortiles/new",
{
"name": "new_name",
"description": "new description",
"public": True,
"style": "https://new_style/styles/layer/style.json",
"xyz": "https://new_style/styles/layer/{z}/{x}/{y}.png",
},
status=302,
)
layer = dbsession.query(LayerVectorTiles).filter(LayerVectorTiles.name == "new_name").one()
assert str(layer.id) == re.match(
r"http://localhost/admin/layers_vectortiles/(.*)\?msg_col=submit_ok", resp.location
).group(1)
def test_duplicate(self, layer_vectortiles_test_data, test_app, dbsession):
from c2cgeoportal_commons.models.main import LayerVectorTiles
layer = layer_vectortiles_test_data["layers"][3]
resp = test_app.get(f"/admin/layers_vectortiles/{layer.id}/duplicate", status=200)
form = resp.form
assert "" == self.get_first_field_named(form, "id").value
assert layer.name == self.get_first_field_named(form, "name").value
assert str(layer.description or "") == self.get_first_field_named(form, "description").value
assert layer.public is True
assert layer.public == form["public"].checked
assert str(layer.geo_table or "") == form["geo_table"].value
assert str(layer.exclude_properties or "") == form["exclude_properties"].value
assert str(layer.style or "") == form["style"].value
assert str(layer.xyz or "") == form["xyz"].value
interfaces = layer_vectortiles_test_data["interfaces"]
assert {interfaces[3].id, interfaces[1].id} == {i.id for i in layer.interfaces}
self._check_interfaces(form, interfaces, layer)
self.set_first_field_named(form, "name", "clone")
resp = form.submit("submit")
layer = dbsession.query(LayerVectorTiles).filter(LayerVectorTiles.name == "clone").one()
assert str(layer.id) == re.match(
r"http://localhost/admin/layers_vectortiles/(.*)\?msg_col=submit_ok", resp.location
).group(1)
assert layer.id == layer.metadatas[0].item_id
assert layer_vectortiles_test_data["layers"][3].metadatas[0].name == layer.metadatas[0].name
assert layer_vectortiles_test_data["layers"][3].metadatas[1].name == layer.metadatas[1].name
def test_delete(self, test_app, dbsession):
from c2cgeoportal_commons.models.main import Layer, LayerVectorTiles, TreeItem
layer_id = dbsession.query(LayerVectorTiles.id).first().id
test_app.delete(f"/admin/layers_vectortiles/{layer_id}", status=200)
assert dbsession.query(LayerVectorTiles).get(layer_id) is None
assert dbsession.query(Layer).get(layer_id) is None
assert dbsession.query(TreeItem).get(layer_id) is None
| [
"re.match",
"pytest.mark.usefixtures",
"pytest.fixture",
"c2cgeoportal_commons.models.main.LayerVectorTiles",
"c2cgeoportal_commons.models.main.OGCServer"
] | [((140, 172), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""function"""'}), "(scope='function')\n", (154, 172), False, 'import pytest\n'), ((174, 222), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""dbsession"""', '"""transact"""'], {}), "('dbsession', 'transact')\n", (197, 222), False, 'import pytest\n'), ((1151, 1217), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""layer_vectortiles_test_data"""', '"""test_app"""'], {}), "('layer_vectortiles_test_data', 'test_app')\n", (1174, 1217), False, 'import pytest\n'), ((388, 417), 'c2cgeoportal_commons.models.main.OGCServer', 'OGCServer', ([], {'name': 'f"""server_{i}"""'}), "(name=f'server_{i}')\n", (397, 417), False, 'from c2cgeoportal_commons.models.main import LayerVectorTiles, OGCServer\n'), ((688, 715), 'c2cgeoportal_commons.models.main.LayerVectorTiles', 'LayerVectorTiles', ([], {'name': 'name'}), '(name=name)\n', (704, 715), False, 'from c2cgeoportal_commons.models.main import Layer, LayerVectorTiles, TreeItem\n'), ((5795, 5883), 're.match', 're.match', (['f"""http://localhost{self._prefix}/(.*)\\\\?msg_col=submit_ok"""', 'resp.location'], {}), "(f'http://localhost{self._prefix}/(.*)\\\\?msg_col=submit_ok', resp.\n location)\n", (5803, 5883), False, 'import re\n'), ((7044, 7141), 're.match', 're.match', (['"""http://localhost/admin/layers_vectortiles/(.*)\\\\?msg_col=submit_ok"""', 'resp.location'], {}), "('http://localhost/admin/layers_vectortiles/(.*)\\\\?msg_col=submit_ok',\n resp.location)\n", (7052, 7141), False, 'import re\n'), ((8536, 8633), 're.match', 're.match', (['"""http://localhost/admin/layers_vectortiles/(.*)\\\\?msg_col=submit_ok"""', 'resp.location'], {}), "('http://localhost/admin/layers_vectortiles/(.*)\\\\?msg_col=submit_ok',\n resp.location)\n", (8544, 8633), False, 'import re\n')] |
#encoding: utf-8
from __future__ import print_function
from builtins import str
import ipaddress
import datetime
import os
import sys
from twisted.names import client, dns, server, hosts as hosts_module, root, cache, resolve
from twisted.internet import reactor
from twisted.python.runtime import platform
TTL = 0
dict = {}
dont_print_ip_ranges = ['172.16.58.3/16','172.16.31.10/16']
dont_rebind_nameservers = ["ns1.", "ns2."]
FILENAME = "dns-log-" + str(datetime.datetime.now().strftime("%H-%M-%S.%f-%d-%m-%Y"))+'.log'
WHITELISTEDIP = ''
INTERNALIP = ''
SERVERIP = ''
PORT = 53
DOMAIN = ''
def OpenLogFile():
global f
major = sys.version_info[0]
if major == 3:
f = open(FILENAME, 'a')
else:
f = open(FILENAME, 'ab')
def CloseLogFile():
f.close()
def search_file_for_all(hosts_file, name):
results = []
if name.decode().lower() not in dont_rebind_nameservers:
if name not in dict or dict[name] < 1:
ip = WHITELISTEDIP
else:
ip = INTERNALIP
if name not in dict:
dict[name] = 0
dict[name] += 1
else:
ip = SERVERIP
print('================================================================================================')
print("ServerTime - A record: ",datetime.datetime.now().strftime("%H:%M:%S.%f %d-%m-%Y"), sep='')
print('Response with A record: ', name.decode('utf-8'), ' -> ', ip, sep='')
print('================================================================================================')
OpenLogFile()
print('================================================================================================', file=f)
print("ServerTime - A record: ",datetime.datetime.now().strftime("%H:%M:%S.%f %d-%m-%Y"), sep='', file=f)
print('Response with A record: ', name.decode('utf-8'), ' -> ', ip, sep='', file=f)
print('================================================================================================', file=f)
CloseLogFile()
results.append(hosts_module.nativeString(ip))
return results
class Resolver(hosts_module.Resolver):
def _aRecords(self, name):
return tuple([
dns.RRHeader(name, dns.A, dns.IN, TTL, dns.Record_A(addr, TTL))
for addr in search_file_for_all(hosts_module.FilePath(self.file), name)
if hosts_module.isIPAddress(addr)
])
class PrintClientAddressDNSServerFactory(server.DNSServerFactory):
def check_network(self, network):
for dont_print_ip_range in dont_print_ip_ranges:
if ipaddress.ip_address(u"%s" % network) in ipaddress.ip_network(u"%s" % dont_print_ip_range):
return True
return False
def buildProtocol(self, addr):
if not self.check_network(addr.host):
print('------------------------------------------------------------------------------------------------')
print("ServerTime - DNSServerFactory: ",datetime.datetime.now().strftime("%H:%M:%S.%f %d-%m-%Y"), sep='')
print("Request: Connection to DNSServerFactory from: ", addr.host," on port: ",addr.port," using ",addr.type,sep='')
print('------------------------------------------------------------------------------------------------')
OpenLogFile()
print('------------------------------------------------------------------------------------------------', file=f)
print("ServerTime: - DNSServerFactory: ",datetime.datetime.now().strftime("%H:%M:%S.%f %d-%m-%Y"), file=f, sep='')
print("Request: Connection to DNSServerFactory from: ", addr.host," on port: ",addr.port," using ",addr.type, file=f, sep='')
print('------------------------------------------------------------------------------------------------', file=f)
CloseLogFile()
return server.DNSServerFactory.buildProtocol(self, addr)
class PrintClientAddressDNSDatagramProtocol(dns.DNSDatagramProtocol):
def check_network(self, network):
for dont_print_ip_range in dont_print_ip_ranges:
if ipaddress.ip_address(u"%s" % network) in ipaddress.ip_network(u"%s" % dont_print_ip_range):
return True
return False
def datagramReceived(self, datagram, addr):
if not self.check_network(addr[0]):
print('------------------------------------------------------------------------------------------------')
print("ServerTime - DNSDatagramProtocol: ",datetime.datetime.now().strftime("%H:%M:%S.%f %d-%m-%Y"), sep='')
print("Request: Datagram to DNSDatagramProtocol from: ", addr[0], " on port: ", addr[1], sep='')
print('------------------------------------------------------------------------------------------------')
OpenLogFile()
print('------------------------------------------------------------------------------------------------', file=f)
print("ServerTime - DNSDatagramProtocol: ",datetime.datetime.now().strftime("%H:%M:%S.%f %d-%m-%Y"), file=f, sep='')
print("Request: Datagram to DNSDatagramProtocol from: ", addr[0], " on port: ", addr[1], file=f, sep='')
print('------------------------------------------------------------------------------------------------', file=f)
CloseLogFile()
return dns.DNSDatagramProtocol.datagramReceived(self, datagram, addr)
def create_resolver(servers=None, resolvconf=None, hosts=None):
if platform.getType() == 'posix':
if resolvconf is None:
resolvconf = b'/etc/resolv.conf'
if hosts is None:
hosts = b'/etc/hosts'
the_resolver = client.Resolver(resolvconf, servers)
host_resolver = Resolver(hosts)
else:
if hosts is None:
hosts = r'c:\windows\hosts'
from twisted.internet import reactor
bootstrap = client._ThreadedResolverImpl(reactor)
host_resolver = Resolver(hosts)
the_resolver = root.bootstrap(bootstrap, resolverFactory=client.Resolver)
return resolve.ResolverChain([host_resolver, cache.CacheResolver(), the_resolver])
def main(port):
factory = PrintClientAddressDNSServerFactory(
clients=[create_resolver(servers=[('8.8.8.8', 53)], hosts='hosts')],
)
protocol = PrintClientAddressDNSDatagramProtocol(controller=factory)
reactor.listenUDP(PORT, protocol)
reactor.listenTCP(PORT, factory)
print('-------------------------------------------------------------------------------------------------------------')
print("DNS Server started...\nListening on 0.0.0.0:" + str(PORT))
print("Log file name: " + FILENAME)
print("Not showing/logging requests from IP range: " + ', '.join(dont_print_ip_ranges))
print("Not rebinding requests for A records: " + ', '.join(dont_rebind_nameservers) + " -> " + SERVERIP)
print('-------------------------------------------------------------------------------------------------------------\n\n')
reactor.run()
if __name__ == '__main__':
if len(sys.argv) != 6:
print("Usage: python "+sys.argv[0]+" WhitelistedIP InternalIP ServerIP Port Domain")
print ("Example: python "+sys.argv[0]+" 192.168.3.11 169.254.169.254 192.168.3.11 53 localdomains.pw")
exit(1)
else:
WHITELISTEDIP = sys.argv[1]
INTERNALIP = sys.argv[2]
SERVERIP = sys.argv[3]
PORT = int(sys.argv[4])
DOMAIN = sys.argv[5]
dont_rebind_nameservers = [dont_rebind_nameservers[0] + DOMAIN, dont_rebind_nameservers[1] + DOMAIN]
main(PORT)
| [
"twisted.names.hosts.nativeString",
"twisted.names.cache.CacheResolver",
"twisted.names.dns.Record_A",
"twisted.names.server.DNSServerFactory.buildProtocol",
"twisted.names.client.Resolver",
"twisted.names.root.bootstrap",
"twisted.names.client._ThreadedResolverImpl",
"twisted.internet.reactor.listenU... | [((6386, 6419), 'twisted.internet.reactor.listenUDP', 'reactor.listenUDP', (['PORT', 'protocol'], {}), '(PORT, protocol)\n', (6403, 6419), False, 'from twisted.internet import reactor\n'), ((6424, 6456), 'twisted.internet.reactor.listenTCP', 'reactor.listenTCP', (['PORT', 'factory'], {}), '(PORT, factory)\n', (6441, 6456), False, 'from twisted.internet import reactor\n'), ((7022, 7035), 'twisted.internet.reactor.run', 'reactor.run', ([], {}), '()\n', (7033, 7035), False, 'from twisted.internet import reactor\n'), ((2035, 2064), 'twisted.names.hosts.nativeString', 'hosts_module.nativeString', (['ip'], {}), '(ip)\n', (2060, 2064), True, 'from twisted.names import client, dns, server, hosts as hosts_module, root, cache, resolve\n'), ((3868, 3917), 'twisted.names.server.DNSServerFactory.buildProtocol', 'server.DNSServerFactory.buildProtocol', (['self', 'addr'], {}), '(self, addr)\n', (3905, 3917), False, 'from twisted.names import client, dns, server, hosts as hosts_module, root, cache, resolve\n'), ((5367, 5429), 'twisted.names.dns.DNSDatagramProtocol.datagramReceived', 'dns.DNSDatagramProtocol.datagramReceived', (['self', 'datagram', 'addr'], {}), '(self, datagram, addr)\n', (5407, 5429), False, 'from twisted.names import client, dns, server, hosts as hosts_module, root, cache, resolve\n'), ((5503, 5521), 'twisted.python.runtime.platform.getType', 'platform.getType', ([], {}), '()\n', (5519, 5521), False, 'from twisted.python.runtime import platform\n'), ((5693, 5729), 'twisted.names.client.Resolver', 'client.Resolver', (['resolvconf', 'servers'], {}), '(resolvconf, servers)\n', (5708, 5729), False, 'from twisted.names import client, dns, server, hosts as hosts_module, root, cache, resolve\n'), ((5911, 5948), 'twisted.names.client._ThreadedResolverImpl', 'client._ThreadedResolverImpl', (['reactor'], {}), '(reactor)\n', (5939, 5948), False, 'from twisted.names import client, dns, server, hosts as hosts_module, root, cache, resolve\n'), ((6012, 6070), 'twisted.names.root.bootstrap', 'root.bootstrap', (['bootstrap'], {'resolverFactory': 'client.Resolver'}), '(bootstrap, resolverFactory=client.Resolver)\n', (6026, 6070), False, 'from twisted.names import client, dns, server, hosts as hosts_module, root, cache, resolve\n'), ((6120, 6141), 'twisted.names.cache.CacheResolver', 'cache.CacheResolver', ([], {}), '()\n', (6139, 6141), False, 'from twisted.names import client, dns, server, hosts as hosts_module, root, cache, resolve\n'), ((6639, 6648), 'builtins.str', 'str', (['PORT'], {}), '(PORT)\n', (6642, 6648), False, 'from builtins import str\n'), ((1289, 1312), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1310, 1312), False, 'import datetime\n'), ((1717, 1740), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1738, 1740), False, 'import datetime\n'), ((2576, 2613), 'ipaddress.ip_address', 'ipaddress.ip_address', (["(u'%s' % network)"], {}), "(u'%s' % network)\n", (2596, 2613), False, 'import ipaddress\n'), ((2617, 2666), 'ipaddress.ip_network', 'ipaddress.ip_network', (["(u'%s' % dont_print_ip_range)"], {}), "(u'%s' % dont_print_ip_range)\n", (2637, 2666), False, 'import ipaddress\n'), ((4100, 4137), 'ipaddress.ip_address', 'ipaddress.ip_address', (["(u'%s' % network)"], {}), "(u'%s' % network)\n", (4120, 4137), False, 'import ipaddress\n'), ((4141, 4190), 'ipaddress.ip_network', 'ipaddress.ip_network', (["(u'%s' % dont_print_ip_range)"], {}), "(u'%s' % dont_print_ip_range)\n", (4161, 4190), False, 'import ipaddress\n'), ((456, 479), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (477, 479), False, 'import datetime\n'), ((2231, 2254), 'twisted.names.dns.Record_A', 'dns.Record_A', (['addr', 'TTL'], {}), '(addr, TTL)\n', (2243, 2254), False, 'from twisted.names import client, dns, server, hosts as hosts_module, root, cache, resolve\n'), ((2355, 2385), 'twisted.names.hosts.isIPAddress', 'hosts_module.isIPAddress', (['addr'], {}), '(addr)\n', (2379, 2385), True, 'from twisted.names import client, dns, server, hosts as hosts_module, root, cache, resolve\n'), ((2300, 2332), 'twisted.names.hosts.FilePath', 'hosts_module.FilePath', (['self.file'], {}), '(self.file)\n', (2321, 2332), True, 'from twisted.names import client, dns, server, hosts as hosts_module, root, cache, resolve\n'), ((2970, 2993), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2991, 2993), False, 'import datetime\n'), ((3488, 3511), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3509, 3511), False, 'import datetime\n'), ((4508, 4531), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4529, 4531), False, 'import datetime\n'), ((5008, 5031), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5029, 5031), False, 'import datetime\n')] |
#!/usr/bin/env python
#from difflib import SequenceMatcher
import discord
import numpy as np
from joblib import dump, load
import sys
import os
import math
helpText = "Tell me the sizes of two of your gems and i will calculate the optimal size of the third gem based on the current ritual bonus. If running a ritual with a bigger third gem will still yield a maximum sized gem (2,999,997). You can also change the bonus to use or get the current bonus used for calculations.\n"
helpText = helpText + "Type \"!max SIZE1,SIZE2\" where SIZE1 and SIZE2 are numeric values, the sizes of your first and second gem respectively. Type !bonus [set=VAL] to get or set the current ritual bonus.\n"
def calculate(text):
text = text.split(",")
if len(text) != 2:
return helpText
val = [-1,-1]
for i in range(2):
try:
val[i] = int(text[i].replace(" ",""))
except ValueError:
return "Could not parse value \"" + text[i] + "\". Must be an integer value, check your input and try again!"
bonus = load('bonus.joblib')
target = math.ceil(2999997/bonus) - val[0] - val[1]
if 0 < target <= 999999:
return "Optimal 3rd gem size is " + str(target) + ". Absolute bonus will be " + str(2999997 - val[0] - val[1] - target) + "."
elif not 0 < val[0] <= 999999 or not 0 < val[1] <= 999999:
return "Whoops, something went wrong with your input values... valid gems have a positive value below 1,000,000. Your input values where \"" + text[0] + "\" and \"" + text[1] + "\". Check your input and try again!"
else:
return "Optimal 3rd gem size would be " + str(target) + ". Try bigger 1st and 2nd gem for a maximum gem ritual."
def bonus(text):
text = text.split("=")
if len(text) != 2 or text[0].lower().replace(" ","") != "set":
return "Current ritual bonus is " + str(load('bonus.joblib'))
try:
val = float(text[1].replace(" ",""))
except ValueError:
return "Could not parse value \"" + text[i] + "\". Must be a floating point value, check your input and try again!"
dump(val,'bonus.joblib')
return "Bonus set to " + str(val) +"."
if len(sys.argv) > 1:
TOKEN = sys.argv[1]
else:
TOKEN = os.environ["ACCESS_TOKEN"]
client = discord.Client()
@client.event
async def on_message(message):
if message.author == client.user:
return
if message.content.startswith('!max'):
msg = calculate(message.content[4:])
msg = msg.format(message)
await message.channel.send(msg)
elif message.content.startswith('!bonus'):
msg = bonus(message.content[6:])
msg = msg.format(message)
await message.channel.send(msg)
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
client.run(TOKEN)
| [
"discord.Client",
"math.ceil",
"joblib.dump",
"joblib.load"
] | [((2147, 2163), 'discord.Client', 'discord.Client', ([], {}), '()\n', (2161, 2163), False, 'import discord\n'), ((1004, 1024), 'joblib.load', 'load', (['"""bonus.joblib"""'], {}), "('bonus.joblib')\n", (1008, 1024), False, 'from joblib import dump, load\n'), ((1986, 2011), 'joblib.dump', 'dump', (['val', '"""bonus.joblib"""'], {}), "(val, 'bonus.joblib')\n", (1990, 2011), False, 'from joblib import dump, load\n'), ((1035, 1061), 'math.ceil', 'math.ceil', (['(2999997 / bonus)'], {}), '(2999997 / bonus)\n', (1044, 1061), False, 'import math\n'), ((1779, 1799), 'joblib.load', 'load', (['"""bonus.joblib"""'], {}), "('bonus.joblib')\n", (1783, 1799), False, 'from joblib import dump, load\n')] |
from flask import current_app
from flask_script import Manager
from flask_migrate import MigrateCommand
from info import create_app
# 创建应用
app = create_app("dev")
# 创建管理器
mgr = Manager(app)
# 添加迁移命令
mgr.add_command("mc", MigrateCommand)
# 生成超级管理员命令
@mgr.option("-u", dest="username")
@mgr.option("-p", dest="password")
def create_superuser(username, password):
if not all([username, password]):
print("账号/密码不完整")
return
from info.models import User
from info import db
user = User()
user.mobile = username
user.password = password
user.nick_name = username
user.is_admin = True
try:
db.session.add(user)
db.session.commit()
except BaseException as e:
current_app.logger.error(e)
db.session.rollback()
print("生成失败")
print("生成管理员成功")
if __name__ == '__main__':
mgr.run() | [
"info.models.User",
"info.db.session.add",
"flask_script.Manager",
"info.create_app",
"info.db.session.rollback",
"info.db.session.commit",
"flask.current_app.logger.error"
] | [((146, 163), 'info.create_app', 'create_app', (['"""dev"""'], {}), "('dev')\n", (156, 163), False, 'from info import create_app\n'), ((178, 190), 'flask_script.Manager', 'Manager', (['app'], {}), '(app)\n', (185, 190), False, 'from flask_script import Manager\n'), ((511, 517), 'info.models.User', 'User', ([], {}), '()\n', (515, 517), False, 'from info.models import User\n'), ((647, 667), 'info.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (661, 667), False, 'from info import db\n'), ((676, 695), 'info.db.session.commit', 'db.session.commit', ([], {}), '()\n', (693, 695), False, 'from info import db\n'), ((735, 762), 'flask.current_app.logger.error', 'current_app.logger.error', (['e'], {}), '(e)\n', (759, 762), False, 'from flask import current_app\n'), ((771, 792), 'info.db.session.rollback', 'db.session.rollback', ([], {}), '()\n', (790, 792), False, 'from info import db\n')] |
from typing import List
from hwtHls.ssa.context import SsaContext
from hwtHls.ssa.instr import SsaInstrBranch, SsaInstr
from hwtHls.ssa.phi import SsaPhi
class SsaBasicBlock():
"""
Basic Block from Static Single Assignment (SSA) normal form of code.
:ivar label: label for debug purposes
:ivar predecessors: list of block from where the control flow can go to this block
:ivar phis: list of phi functions which are selecting a value for a variable based on predecessor block
:ivar body: statements of this block
:ivar successors: an object to keep track of jumps from this block
:ivar origns: list of objects which contributed to existence of this object
"""
def __init__(self, ctx: SsaContext, label:str):
self.ctx = ctx
self.label = label
self.predecessors: List[SsaBasicBlock] = []
self.phis: List[SsaPhi] = []
self.body: List[SsaInstr] = []
self.successors = SsaInstrBranch(self)
self.origins = []
def appendPhi(self, phi: SsaPhi):
assert phi.block is None, (phi, phi.block, self)
# assert not self.body, ("Adding phi if already have instructions", self, phi)
phi.block = self
self.phis.append(phi)
def insertPhi(self, index: int, phi: SsaPhi):
assert phi.block is None, (phi, phi.block, self)
phi.block = self
self.phis.insert(index, phi)
def appendInstruction(self, instr: SsaInstr):
assert instr.block is None, (instr, instr.block, self)
instr.block = self
self.body.append(instr)
def insertInstruction(self, index: int, instr: SsaInstr):
assert instr.block is None, (instr, instr.block, self)
instr.block = self
self.body.insert(index, instr)
def __repr__(self):
return f"<{self.__class__.__name__:s} {self.label:s}>"
| [
"hwtHls.ssa.instr.SsaInstrBranch"
] | [((956, 976), 'hwtHls.ssa.instr.SsaInstrBranch', 'SsaInstrBranch', (['self'], {}), '(self)\n', (970, 976), False, 'from hwtHls.ssa.instr import SsaInstrBranch, SsaInstr\n')] |
#!/usr/bin/env python
"""cli module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import click
import traceback
import importlib
from . import version
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.command(context_settings=CONTEXT_SETTINGS)
@click.argument('modulename')
@click.option('--debug', default=False, help='')
@click.version_option(version.__version__, '--version')
def cli(**kwargs):
modulename = kwargs['modulename']
module = importlib.import_module(modulename)
try:
version = module.__version__
except:
version = ''
click.secho("{} == ".format(modulename), nl=False)
click.secho("{}".format(version), fg='green', bold=True)
if __name__ == '__main__':
cli()
| [
"click.argument",
"importlib.import_module",
"click.option",
"click.version_option",
"click.command"
] | [((284, 332), 'click.command', 'click.command', ([], {'context_settings': 'CONTEXT_SETTINGS'}), '(context_settings=CONTEXT_SETTINGS)\n', (297, 332), False, 'import click\n'), ((334, 362), 'click.argument', 'click.argument', (['"""modulename"""'], {}), "('modulename')\n", (348, 362), False, 'import click\n'), ((364, 411), 'click.option', 'click.option', (['"""--debug"""'], {'default': '(False)', 'help': '""""""'}), "('--debug', default=False, help='')\n", (376, 411), False, 'import click\n'), ((413, 467), 'click.version_option', 'click.version_option', (['version.__version__', '"""--version"""'], {}), "(version.__version__, '--version')\n", (433, 467), False, 'import click\n'), ((538, 573), 'importlib.import_module', 'importlib.import_module', (['modulename'], {}), '(modulename)\n', (561, 573), False, 'import importlib\n')] |
import os
import time
import argparse
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from config import get_config, export_config
from model.textcnn import TextCNN
from model.textrnn import TextRNN
from sklearn.model_selection import train_test_split
from dataloader import Word2VecEmbeddings, Doc2VecEmbeddings, Char2VecEmbeddings, DataLoader, DataIterator
parser = argparse.ArgumentParser(description='train/test movie review classification model')
parser.add_argument('--checkpoint', type=str, help='pre-trained model', default=None)
parser.add_argument('--refine_data', type=bool, help='solving data imbalance problem', default=False)
args = parser.parse_args()
# parsed args
checkpoint = args.checkpoint
refine_data = args.refine_data
# Configuration
config, _ = get_config()
np.random.seed(config.seed)
tf.set_random_seed(config.seed)
def data_distribution(y_, size=10, img='dist.png'):
"""
movie rate data distribution via plot chart
:param y_: rate data, numpy array
:param size: classes, int
:param img: save to, str
:return: numpy array
"""
from matplotlib import pyplot as plt
# showing data distribution
y_dist = np.zeros((10,), dtype=np.int32)
for y in tqdm(y_):
if size == 1:
y_dist[y - 1] += 1
else:
y_dist[np.argmax(y, axis=-1)] += 1
plt.figure(figsize=(10, 8))
plt.xlabel('rate')
plt.ylabel('frequency')
plt.grid(True)
plt.bar(range(size), y_dist, width=.35, align='center', alpha=.5, label='rainfall')
plt.xticks(range(10), list(range(1, 11)))
plt.savefig(img)
plt.show()
return y_dist
def data_confusion_matrix(y_pred, y_true, labels, normalize=True):
import itertools
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
"""
0-3: bad
4-7: normal
7-10: good
"""
def labeling(y):
if 0 <= y < 3:
return 0
elif 3 <= y < 7:
return 1
else:
return 2
y_pred = np.array([labeling(y) for y in y_pred])
y_true = np.array([labeling(y[0]) for y in y_true])[:-20]
assert y_pred.shape[0] == y_true.shape[0]
cnf_mat = confusion_matrix(y_pred, y_true)
np.set_printoptions(precision=2)
if normalize:
cnf_mat = cnf_mat.astype('float') / cnf_mat.sum(axis=1)[:, np.newaxis]
plt.figure()
plt.imshow(cnf_mat, interpolation='nearest', cmap=plt.cm.Blues)
plt.title("Confusion Matrix")
plt.colorbar()
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, labels, rotation=45)
plt.yticks(tick_marks, labels)
thresh = cnf_mat.max() / 2.
for i, j in itertools.product(range(cnf_mat.shape[0]), range(cnf_mat.shape[1])):
plt.text(j, i, format(cnf_mat[i, j], '.2f'),
horizontalalignment="center",
color="white" if cnf_mat[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.savefig("./confusion_matrix.png")
plt.show()
def load_trained_embeds(embed_mode='char'):
"""
:param embed_mode: embedding mode, str
:return: embedding vector, numpy array
"""
if embed_mode == 'd2v':
vec = Doc2VecEmbeddings(config.d2v_model, config.embed_size) # Doc2Vec Loader
if config.verbose:
print("[+] Doc2Vec loaded! Total %d pre-trained sentences, %d dims" % (len(vec), config.embed_size))
elif embed_mode == 'w2v':
vec = Word2VecEmbeddings(config.w2v_model, config.embed_size) # WOrd2Vec Loader
if config.verbose:
print("[+] Word2Vec loaded! Total %d pre-trained words, %d dims" % (len(vec), config.embed_size))
else:
vec = Char2VecEmbeddings()
if config.verbose:
print("[+] Using Char2Vec, %d dims" % config.embed_size)
return vec
if __name__ == '__main__':
embed_type = config.use_pre_trained_embeds
# Stage 1 : loading trained embeddings
vectors = load_trained_embeds(embed_type)
# Stage 2 : loading tokenize data
if config.use_pre_trained_embeds == 'c2v': # Char2Vec
if os.path.isfile(config.processed_dataset):
ds = DataLoader(file=config.processed_dataset,
fn_to_save=None,
load_from='db',
n_classes=config.n_classes,
analyzer='char',
is_analyzed=True,
use_save=False,
config=config) # DataSet Loader
else:
ds = DataLoader(file=None,
fn_to_save=config.processed_dataset,
load_from='db',
n_classes=config.n_classes,
analyzer='char',
is_analyzed=False,
use_save=True,
config=config) # DataSet Loader
ds_len = len(ds)
x_data = np.zeros((ds_len, config.sequence_length), dtype=np.uint8)
sen_len = list()
min_length, max_length, avg_length = config.sequence_length, 0, 0
for i in tqdm(range(ds_len)):
sentence = ' '.join(ds.sentences[i]).strip('\n')
sentence_length = len(sentence)
if sentence_length < min_length:
min_length = sentence_length
if sentence_length > max_length:
max_length = sentence_length
sen_len.append(sentence_length)
sent = vectors.decompose_str_as_one_hot(sentence,
warning=False)[:config.sequence_length]
x_data[i] = np.pad(sent, (0, config.sequence_length - len(sent)), 'constant', constant_values=0)
if config.verbose:
print("[*] Total %d samples (training)" % x_data.shape[0])
print(" [*] min length of reviews : %d" % min_length)
print(" [*] max length of reviews : %d" % max_length)
avg_length = sum(sen_len) / x_data.shape[0]
print(" [*] avg length of reviews : %d" % avg_length)
else: # Word2Vec / Doc2Vec
ds = DataLoader(file=config.processed_dataset,
n_classes=config.n_classes,
analyzer=None,
is_analyzed=True,
use_save=False,
config=config) # DataSet Loader
ds_len = len(ds)
x_data = np.zeros((ds_len, config.sequence_length), dtype=np.int32)
for i in tqdm(range(ds_len)):
sent = ds.sentences[i][:config.sequence_length]
x_data[i] = np.pad(vectors.words_to_index(sent),
(0, config.sequence_length - len(sent)), 'constant', constant_values=config.vocab_size)
y_data = np.array(ds.labels).reshape(-1, config.n_classes)
ds = None
if config.verbose:
print("[*] sentence to %s index conversion finish!" % config.use_pre_trained_embeds)
if refine_data:
# resizing the amount of rate-10 data
# 2.5M to 500K # downsize to 20%
if not config.n_classes == 1:
rate_10_idx = [idx for idx, y in tqdm(enumerate(y_data)) if np.argmax(y, axis=-1) == 9]
else:
rate_10_idx = [idx for idx, y in tqdm(enumerate(y_data)) if y == 10]
rand_idx = np.random.choice(rate_10_idx, 4 * len(rate_10_idx) // 5)
x_data = np.delete(x_data, rand_idx, axis=0).reshape(-1, config.sequence_length)
y_data = np.delete(y_data, rand_idx, axis=0).reshape(-1, config.n_classes)
if config.verbose:
print("[*] refined comment : ", x_data.shape)
print("[*] refined rate : ", y_data.shape)
# shuffle/split data
x_train, x_valid, y_train, y_valid = train_test_split(x_data, y_data, random_state=config.seed,
test_size=config.test_size, shuffle=True)
if config.verbose:
print("[*] train/test %d/%d(%.1f/%.1f) split!" % (len(y_train), len(y_valid),
1. - config.test_size, config.test_size))
del x_data, y_data
data_size = x_train.shape[0]
# DataSet Iterator
di = DataIterator(x=x_train, y=y_train, batch_size=config.batch_size)
if config.device == 'gpu':
dev_config = tf.ConfigProto()
dev_config.gpu_options.allow_growth = True
else:
dev_config = None
with tf.Session(config=dev_config) as s:
if config.model == 'charcnn':
# Model Loaded
model = TextCNN(s=s,
mode=config.mode,
w2v_embeds=vectors.embeds if not embed_type == 'c2v' else None,
n_classes=config.n_classes,
optimizer=config.optimizer,
kernel_sizes=config.kernel_size,
n_filters=config.filter_size,
n_dims=config.embed_size,
vocab_size=config.character_size if embed_type == 'c2v' else config.vocab_size + 1,
sequence_length=config.sequence_length,
lr=config.lr,
lr_decay=config.lr_decay,
lr_lower_boundary=config.lr_lower_boundary,
fc_unit=config.fc_unit,
th=config.act_threshold,
grad_clip=config.grad_clip,
summary=config.pretrained,
score_function=config.score_function,
use_se_module=config.use_se_module,
se_radio=config.se_ratio,
se_type=config.se_type,
use_multi_channel=config.use_multi_channel)
elif config.model == 'charrnn':
model = TextRNN(s=s,
mode=config.mode,
w2v_embeds=vectors.embeds if not embed_type == 'c2v' else None,
n_classes=config.n_classes,
optimizer=config.optimizer,
n_gru_cells=config.n_gru_cells,
n_gru_layers=config.n_gru_layers,
n_attention_size=config.n_attention_size,
n_dims=config.embed_size,
vocab_size=config.character_size if embed_type == 'c2v' else config.vocab_size + 1,
sequence_length=config.sequence_length,
lr=config.lr,
lr_decay=config.lr_decay,
lr_lower_boundary=config.lr_lower_boundary,
fc_unit=config.fc_unit,
grad_clip=config.grad_clip,
summary=config.pretrained)
else:
raise NotImplementedError("[-] Not Implemented Yet")
if config.verbose:
print("[+] %s model loaded" % config.model)
# Initializing
s.run(tf.global_variables_initializer())
# exporting config
export_config()
# loading checkpoint
global_step = 0
if checkpoint:
print("[*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(config.pretrained)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
model.saver.restore(s, ckpt.model_checkpoint_path)
global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])
print("[+] global step : %d" % global_step, " successfully loaded")
else:
print('[-] No checkpoint file found')
start_time = time.time()
if config.is_train:
best_loss = 1e1 # initial value
batch_size = config.batch_size
model.global_step.assign(tf.constant(global_step))
restored_epochs = global_step // (data_size // batch_size)
for epoch in range(restored_epochs, config.epochs):
for x_tr, y_tr in di.iterate():
# training
_, loss, acc = s.run([model.train_op, model.loss, model.accuracy],
feed_dict={
model.x: x_tr,
model.y: y_tr,
model.do_rate: config.drop_out,
})
if global_step and global_step % config.logging_step == 0:
# validation
rand_idx = np.random.choice(np.arange(len(y_valid)), len(y_valid) // 20) # 5% of valid data
x_va, y_va = x_valid[rand_idx], y_valid[rand_idx]
valid_loss, valid_acc = 0., 0.
valid_iter = len(y_va) // batch_size
for i in tqdm(range(0, valid_iter)):
v_loss, v_acc = s.run([model.loss, model.accuracy],
feed_dict={
model.x: x_va[batch_size * i:batch_size * (i + 1)],
model.y: y_va[batch_size * i:batch_size * (i + 1)],
model.do_rate: .0,
})
valid_acc += v_acc
valid_loss += v_loss
valid_loss /= valid_iter
valid_acc /= valid_iter
print("[*] epoch %03d global step %07d" % (epoch, global_step),
" train_loss : {:.8f} train_acc : {:.4f}".format(loss, acc),
" valid_loss : {:.8f} valid_acc : {:.4f}".format(valid_loss, valid_acc))
# summary
summary = s.run(model.merged,
feed_dict={
model.x: x_tr,
model.y: y_tr,
model.do_rate: .0,
})
# Summary saver
model.writer.add_summary(summary, global_step)
# Model save
model.saver.save(s, config.pretrained + '%s.ckpt' % config.model,
global_step=global_step)
if valid_loss < best_loss:
print("[+] model improved {:.7f} to {:.7f}".format(best_loss, valid_loss))
best_loss = valid_loss
model.best_saver.save(s, config.pretrained + '%s-best_loss.ckpt' % config.model,
global_step=global_step)
print()
model.global_step.assign_add(tf.constant(1))
global_step += 1
end_time = time.time()
print("[+] Training Done! Elapsed {:.8f}s".format(end_time - start_time))
else: # test
x_train, y_train = None, None
x_va, y_va = x_valid, y_valid
valid_loss, valid_acc = 0., 0.
batch_size = config.batch_size
valid_iter = len(y_va) // config.batch_size
v_rates = []
for i in tqdm(range(0, valid_iter)):
v_loss, v_acc, v_rate = s.run([model.loss, model.accuracy, model.rates],
feed_dict={
model.x: x_va[batch_size * i:batch_size * (i + 1)],
model.y: y_va[batch_size * i:batch_size * (i + 1)],
model.do_rate: .0,
})
valid_acc += v_acc
valid_loss += v_loss
for j in v_rate:
v_rates.append(j)
valid_loss /= valid_iter
valid_acc /= valid_iter
print("[+] Validation Result (%s model %d global steps), total %d samples" %
(config.model, global_step, x_valid.shape[0]))
print(" => valid_loss (MSE) : {:.8f} valid_acc (th=1.0) : {:.4f}".format(valid_loss, valid_acc))
"""
with open('pred.txt', 'w') as f:
f.writelines([str("{:.4f}\n".format(rate[0])) for rate in v_rates])
"""
# confusion matrix
data_confusion_matrix(v_rates, y_va, ["bad", "normal", "good"])
| [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.ylabel",
"model.textcnn.TextCNN",
"numpy.array",
"tensorflow.set_random_seed",
"dataloader.DataLoader",
"model.textrnn.TextRNN",
"matplotlib.pyplot.imshow",
"config.export_config",
"dataloader.Doc2VecEmbeddings",
"argparse.ArgumentParser",
"numpy.de... | [((388, 476), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""train/test movie review classification model"""'}), "(description=\n 'train/test movie review classification model')\n", (411, 476), False, 'import argparse\n'), ((791, 803), 'config.get_config', 'get_config', ([], {}), '()\n', (801, 803), False, 'from config import get_config, export_config\n'), ((805, 832), 'numpy.random.seed', 'np.random.seed', (['config.seed'], {}), '(config.seed)\n', (819, 832), True, 'import numpy as np\n'), ((833, 864), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['config.seed'], {}), '(config.seed)\n', (851, 864), True, 'import tensorflow as tf\n'), ((1192, 1223), 'numpy.zeros', 'np.zeros', (['(10,)'], {'dtype': 'np.int32'}), '((10,), dtype=np.int32)\n', (1200, 1223), True, 'import numpy as np\n'), ((1237, 1245), 'tqdm.tqdm', 'tqdm', (['y_'], {}), '(y_)\n', (1241, 1245), False, 'from tqdm import tqdm\n'), ((1366, 1393), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (1376, 1393), True, 'import matplotlib.pyplot as plt\n'), ((1399, 1417), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""rate"""'], {}), "('rate')\n", (1409, 1417), True, 'import matplotlib.pyplot as plt\n'), ((1422, 1445), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""frequency"""'], {}), "('frequency')\n", (1432, 1445), True, 'import matplotlib.pyplot as plt\n'), ((1450, 1464), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1458, 1464), True, 'import matplotlib.pyplot as plt\n'), ((1605, 1621), 'matplotlib.pyplot.savefig', 'plt.savefig', (['img'], {}), '(img)\n', (1616, 1621), True, 'import matplotlib.pyplot as plt\n'), ((1626, 1636), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1634, 1636), True, 'import matplotlib.pyplot as plt\n'), ((2217, 2249), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_pred', 'y_true'], {}), '(y_pred, y_true)\n', (2233, 2249), False, 'from sklearn.metrics import confusion_matrix\n'), ((2254, 2286), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(2)'}), '(precision=2)\n', (2273, 2286), True, 'import numpy as np\n'), ((2390, 2402), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2400, 2402), True, 'import matplotlib.pyplot as plt\n'), ((2408, 2471), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cnf_mat'], {'interpolation': '"""nearest"""', 'cmap': 'plt.cm.Blues'}), "(cnf_mat, interpolation='nearest', cmap=plt.cm.Blues)\n", (2418, 2471), True, 'import matplotlib.pyplot as plt\n'), ((2476, 2505), 'matplotlib.pyplot.title', 'plt.title', (['"""Confusion Matrix"""'], {}), "('Confusion Matrix')\n", (2485, 2505), True, 'import matplotlib.pyplot as plt\n'), ((2510, 2524), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2522, 2524), True, 'import matplotlib.pyplot as plt\n'), ((2570, 2613), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'labels'], {'rotation': '(45)'}), '(tick_marks, labels, rotation=45)\n', (2580, 2613), True, 'import matplotlib.pyplot as plt\n'), ((2618, 2648), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'labels'], {}), '(tick_marks, labels)\n', (2628, 2648), True, 'import matplotlib.pyplot as plt\n'), ((2943, 2967), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (2953, 2967), True, 'import matplotlib.pyplot as plt\n'), ((2972, 3001), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (2982, 3001), True, 'import matplotlib.pyplot as plt\n'), ((3006, 3024), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3022, 3024), True, 'import matplotlib.pyplot as plt\n'), ((3030, 3067), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./confusion_matrix.png"""'], {}), "('./confusion_matrix.png')\n", (3041, 3067), True, 'import matplotlib.pyplot as plt\n'), ((3073, 3083), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3081, 3083), True, 'import matplotlib.pyplot as plt\n'), ((7918, 8023), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x_data', 'y_data'], {'random_state': 'config.seed', 'test_size': 'config.test_size', 'shuffle': '(True)'}), '(x_data, y_data, random_state=config.seed, test_size=config\n .test_size, shuffle=True)\n', (7934, 8023), False, 'from sklearn.model_selection import train_test_split\n'), ((8377, 8441), 'dataloader.DataIterator', 'DataIterator', ([], {'x': 'x_train', 'y': 'y_train', 'batch_size': 'config.batch_size'}), '(x=x_train, y=y_train, batch_size=config.batch_size)\n', (8389, 8441), False, 'from dataloader import Word2VecEmbeddings, Doc2VecEmbeddings, Char2VecEmbeddings, DataLoader, DataIterator\n'), ((3274, 3328), 'dataloader.Doc2VecEmbeddings', 'Doc2VecEmbeddings', (['config.d2v_model', 'config.embed_size'], {}), '(config.d2v_model, config.embed_size)\n', (3291, 3328), False, 'from dataloader import Word2VecEmbeddings, Doc2VecEmbeddings, Char2VecEmbeddings, DataLoader, DataIterator\n'), ((4174, 4214), 'os.path.isfile', 'os.path.isfile', (['config.processed_dataset'], {}), '(config.processed_dataset)\n', (4188, 4214), False, 'import os\n'), ((5074, 5132), 'numpy.zeros', 'np.zeros', (['(ds_len, config.sequence_length)'], {'dtype': 'np.uint8'}), '((ds_len, config.sequence_length), dtype=np.uint8)\n', (5082, 5132), True, 'import numpy as np\n'), ((6267, 6404), 'dataloader.DataLoader', 'DataLoader', ([], {'file': 'config.processed_dataset', 'n_classes': 'config.n_classes', 'analyzer': 'None', 'is_analyzed': '(True)', 'use_save': '(False)', 'config': 'config'}), '(file=config.processed_dataset, n_classes=config.n_classes,\n analyzer=None, is_analyzed=True, use_save=False, config=config)\n', (6277, 6404), False, 'from dataloader import Word2VecEmbeddings, Doc2VecEmbeddings, Char2VecEmbeddings, DataLoader, DataIterator\n'), ((6583, 6641), 'numpy.zeros', 'np.zeros', (['(ds_len, config.sequence_length)'], {'dtype': 'np.int32'}), '((ds_len, config.sequence_length), dtype=np.int32)\n', (6591, 6641), True, 'import numpy as np\n'), ((8495, 8511), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (8509, 8511), True, 'import tensorflow as tf\n'), ((8609, 8638), 'tensorflow.Session', 'tf.Session', ([], {'config': 'dev_config'}), '(config=dev_config)\n', (8619, 8638), True, 'import tensorflow as tf\n'), ((11390, 11405), 'config.export_config', 'export_config', ([], {}), '()\n', (11403, 11405), False, 'from config import get_config, export_config\n'), ((12033, 12044), 'time.time', 'time.time', ([], {}), '()\n', (12042, 12044), False, 'import time\n'), ((3531, 3586), 'dataloader.Word2VecEmbeddings', 'Word2VecEmbeddings', (['config.w2v_model', 'config.embed_size'], {}), '(config.w2v_model, config.embed_size)\n', (3549, 3586), False, 'from dataloader import Word2VecEmbeddings, Doc2VecEmbeddings, Char2VecEmbeddings, DataLoader, DataIterator\n'), ((3767, 3787), 'dataloader.Char2VecEmbeddings', 'Char2VecEmbeddings', ([], {}), '()\n', (3785, 3787), False, 'from dataloader import Word2VecEmbeddings, Doc2VecEmbeddings, Char2VecEmbeddings, DataLoader, DataIterator\n'), ((4233, 4410), 'dataloader.DataLoader', 'DataLoader', ([], {'file': 'config.processed_dataset', 'fn_to_save': 'None', 'load_from': '"""db"""', 'n_classes': 'config.n_classes', 'analyzer': '"""char"""', 'is_analyzed': '(True)', 'use_save': '(False)', 'config': 'config'}), "(file=config.processed_dataset, fn_to_save=None, load_from='db',\n n_classes=config.n_classes, analyzer='char', is_analyzed=True, use_save\n =False, config=config)\n", (4243, 4410), False, 'from dataloader import Word2VecEmbeddings, Doc2VecEmbeddings, Char2VecEmbeddings, DataLoader, DataIterator\n'), ((4647, 4823), 'dataloader.DataLoader', 'DataLoader', ([], {'file': 'None', 'fn_to_save': 'config.processed_dataset', 'load_from': '"""db"""', 'n_classes': 'config.n_classes', 'analyzer': '"""char"""', 'is_analyzed': '(False)', 'use_save': '(True)', 'config': 'config'}), "(file=None, fn_to_save=config.processed_dataset, load_from='db',\n n_classes=config.n_classes, analyzer='char', is_analyzed=False,\n use_save=True, config=config)\n", (4657, 4823), False, 'from dataloader import Word2VecEmbeddings, Doc2VecEmbeddings, Char2VecEmbeddings, DataLoader, DataIterator\n'), ((6934, 6953), 'numpy.array', 'np.array', (['ds.labels'], {}), '(ds.labels)\n', (6942, 6953), True, 'import numpy as np\n'), ((8730, 9494), 'model.textcnn.TextCNN', 'TextCNN', ([], {'s': 's', 'mode': 'config.mode', 'w2v_embeds': "(vectors.embeds if not embed_type == 'c2v' else None)", 'n_classes': 'config.n_classes', 'optimizer': 'config.optimizer', 'kernel_sizes': 'config.kernel_size', 'n_filters': 'config.filter_size', 'n_dims': 'config.embed_size', 'vocab_size': "(config.character_size if embed_type == 'c2v' else config.vocab_size + 1)", 'sequence_length': 'config.sequence_length', 'lr': 'config.lr', 'lr_decay': 'config.lr_decay', 'lr_lower_boundary': 'config.lr_lower_boundary', 'fc_unit': 'config.fc_unit', 'th': 'config.act_threshold', 'grad_clip': 'config.grad_clip', 'summary': 'config.pretrained', 'score_function': 'config.score_function', 'use_se_module': 'config.use_se_module', 'se_radio': 'config.se_ratio', 'se_type': 'config.se_type', 'use_multi_channel': 'config.use_multi_channel'}), "(s=s, mode=config.mode, w2v_embeds=vectors.embeds if not embed_type ==\n 'c2v' else None, n_classes=config.n_classes, optimizer=config.optimizer,\n kernel_sizes=config.kernel_size, n_filters=config.filter_size, n_dims=\n config.embed_size, vocab_size=config.character_size if embed_type ==\n 'c2v' else config.vocab_size + 1, sequence_length=config.\n sequence_length, lr=config.lr, lr_decay=config.lr_decay,\n lr_lower_boundary=config.lr_lower_boundary, fc_unit=config.fc_unit, th=\n config.act_threshold, grad_clip=config.grad_clip, summary=config.\n pretrained, score_function=config.score_function, use_se_module=config.\n use_se_module, se_radio=config.se_ratio, se_type=config.se_type,\n use_multi_channel=config.use_multi_channel)\n", (8737, 9494), False, 'from model.textcnn import TextCNN\n'), ((11319, 11352), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (11350, 11352), True, 'import tensorflow as tf\n'), ((11551, 11599), 'tensorflow.train.get_checkpoint_state', 'tf.train.get_checkpoint_state', (['config.pretrained'], {}), '(config.pretrained)\n', (11580, 11599), True, 'import tensorflow as tf\n'), ((15486, 15497), 'time.time', 'time.time', ([], {}), '()\n', (15495, 15497), False, 'import time\n'), ((1333, 1354), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(-1)'}), '(y, axis=-1)\n', (1342, 1354), True, 'import numpy as np\n'), ((7552, 7587), 'numpy.delete', 'np.delete', (['x_data', 'rand_idx'], {'axis': '(0)'}), '(x_data, rand_idx, axis=0)\n', (7561, 7587), True, 'import numpy as np\n'), ((7641, 7676), 'numpy.delete', 'np.delete', (['y_data', 'rand_idx'], {'axis': '(0)'}), '(y_data, rand_idx, axis=0)\n', (7650, 7676), True, 'import numpy as np\n'), ((10098, 10703), 'model.textrnn.TextRNN', 'TextRNN', ([], {'s': 's', 'mode': 'config.mode', 'w2v_embeds': "(vectors.embeds if not embed_type == 'c2v' else None)", 'n_classes': 'config.n_classes', 'optimizer': 'config.optimizer', 'n_gru_cells': 'config.n_gru_cells', 'n_gru_layers': 'config.n_gru_layers', 'n_attention_size': 'config.n_attention_size', 'n_dims': 'config.embed_size', 'vocab_size': "(config.character_size if embed_type == 'c2v' else config.vocab_size + 1)", 'sequence_length': 'config.sequence_length', 'lr': 'config.lr', 'lr_decay': 'config.lr_decay', 'lr_lower_boundary': 'config.lr_lower_boundary', 'fc_unit': 'config.fc_unit', 'grad_clip': 'config.grad_clip', 'summary': 'config.pretrained'}), "(s=s, mode=config.mode, w2v_embeds=vectors.embeds if not embed_type ==\n 'c2v' else None, n_classes=config.n_classes, optimizer=config.optimizer,\n n_gru_cells=config.n_gru_cells, n_gru_layers=config.n_gru_layers,\n n_attention_size=config.n_attention_size, n_dims=config.embed_size,\n vocab_size=config.character_size if embed_type == 'c2v' else config.\n vocab_size + 1, sequence_length=config.sequence_length, lr=config.lr,\n lr_decay=config.lr_decay, lr_lower_boundary=config.lr_lower_boundary,\n fc_unit=config.fc_unit, grad_clip=config.grad_clip, summary=config.\n pretrained)\n", (10105, 10703), False, 'from model.textrnn import TextRNN\n'), ((12199, 12223), 'tensorflow.constant', 'tf.constant', (['global_step'], {}), '(global_step)\n', (12210, 12223), True, 'import tensorflow as tf\n'), ((7334, 7355), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(-1)'}), '(y, axis=-1)\n', (7343, 7355), True, 'import numpy as np\n'), ((15409, 15423), 'tensorflow.constant', 'tf.constant', (['(1)'], {}), '(1)\n', (15420, 15423), True, 'import tensorflow as tf\n')] |
import pytest
from flake8.exceptions import ExecutionError
from flake8_adjustable_complexity.config import DEFAULT_CONFIG
@pytest.mark.parametrize(
('args', 'max_mccabe_complexity'),
[
(['--max-mccabe-complexity=5'], 5),
(['--max-adjustable-complexity=10'], 10),
([], DEFAULT_CONFIG.max_mccabe_complexity),
],
)
def test_parse_max_mccabe_complexity(parse_options, args, max_mccabe_complexity):
config = parse_options(args)
assert config.max_mccabe_complexity == max_mccabe_complexity
@pytest.mark.parametrize(
('args', 'max_complexity_per_path'),
[
(
[
'--per-path-max-adjustable-complexity',
'foo.py:10,bar.py:20',
],
{
'foo.py': 10,
'bar.py': 20,
},
),
([], DEFAULT_CONFIG.max_complexity_per_path),
],
)
def test_parse_max_complexity_per_path(parse_options, args, max_complexity_per_path):
config = parse_options(args)
assert config.max_complexity_per_path == max_complexity_per_path
def test_parse_max_complexity_per_path_error(parse_options):
args = [
'--per-path-max-adjustable-complexity',
'foo.py:invalid-complexity',
]
with pytest.raises(ExecutionError) as excinfo:
parse_options(args)
assert "Couldn\'t parse --per-path-adjustable-max-complexity" in str(excinfo.value)
@pytest.mark.parametrize(
('args', 'var_names_blacklist'),
[
(
['--var-names-extra-blacklist=my_obj,my_var'],
DEFAULT_CONFIG.var_names_blacklist | {'my_obj', 'my_var'},
),
(
['--var-names-whitelist=var,result'],
DEFAULT_CONFIG.var_names_blacklist - {'var', 'result'},
),
(
[
'--var-names-extra-blacklist=my_obj,my_var',
'--var-names-whitelist=var,result',
],
(DEFAULT_CONFIG.var_names_blacklist | {'my_obj', 'my_var'}) - {'var', 'result'},
),
([], DEFAULT_CONFIG.var_names_blacklist),
],
)
def test_parse_var_names_blacklist(parse_options, args, var_names_blacklist):
config = parse_options(args)
assert config.var_names_blacklist == var_names_blacklist
| [
"pytest.mark.parametrize",
"pytest.raises"
] | [((126, 318), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('args', 'max_mccabe_complexity')", "[(['--max-mccabe-complexity=5'], 5), (['--max-adjustable-complexity=10'], \n 10), ([], DEFAULT_CONFIG.max_mccabe_complexity)]"], {}), "(('args', 'max_mccabe_complexity'), [([\n '--max-mccabe-complexity=5'], 5), (['--max-adjustable-complexity=10'], \n 10), ([], DEFAULT_CONFIG.max_mccabe_complexity)])\n", (149, 318), False, 'import pytest\n'), ((535, 755), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('args', 'max_complexity_per_path')", "[(['--per-path-max-adjustable-complexity', 'foo.py:10,bar.py:20'], {\n 'foo.py': 10, 'bar.py': 20}), ([], DEFAULT_CONFIG.max_complexity_per_path)]"], {}), "(('args', 'max_complexity_per_path'), [([\n '--per-path-max-adjustable-complexity', 'foo.py:10,bar.py:20'], {\n 'foo.py': 10, 'bar.py': 20}), ([], DEFAULT_CONFIG.max_complexity_per_path)]\n )\n", (558, 755), False, 'import pytest\n'), ((1432, 1934), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('args', 'var_names_blacklist')", "[(['--var-names-extra-blacklist=my_obj,my_var'], DEFAULT_CONFIG.\n var_names_blacklist | {'my_obj', 'my_var'}), ([\n '--var-names-whitelist=var,result'], DEFAULT_CONFIG.var_names_blacklist -\n {'var', 'result'}), (['--var-names-extra-blacklist=my_obj,my_var',\n '--var-names-whitelist=var,result'], (DEFAULT_CONFIG.\n var_names_blacklist | {'my_obj', 'my_var'}) - {'var', 'result'}), ([],\n DEFAULT_CONFIG.var_names_blacklist)]"], {}), "(('args', 'var_names_blacklist'), [([\n '--var-names-extra-blacklist=my_obj,my_var'], DEFAULT_CONFIG.\n var_names_blacklist | {'my_obj', 'my_var'}), ([\n '--var-names-whitelist=var,result'], DEFAULT_CONFIG.var_names_blacklist -\n {'var', 'result'}), (['--var-names-extra-blacklist=my_obj,my_var',\n '--var-names-whitelist=var,result'], (DEFAULT_CONFIG.\n var_names_blacklist | {'my_obj', 'my_var'}) - {'var', 'result'}), ([],\n DEFAULT_CONFIG.var_names_blacklist)])\n", (1455, 1934), False, 'import pytest\n'), ((1270, 1299), 'pytest.raises', 'pytest.raises', (['ExecutionError'], {}), '(ExecutionError)\n', (1283, 1299), False, 'import pytest\n')] |
"""
Profile the time needed for retrieval.
We consider retrieval in a corpus of 1M videos, 1K videos are added, 10K queries are retrieved.
Calculate the time needed for adding 1K videos, and performing retrieval for 10K queries.
1, Data Loading time is ignored, consider it is hidden by computation time.
2, Sort time is ignored, since it is the similar among the methods.
"""
import os
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import pprint
from tqdm import tqdm, trange
from baselines.crossmodal_moment_localization.model_xml import XML, xml_base_config
from baselines.mixture_embedding_experts.model import MEE, mee_base_cfg
from baselines.clip_alignment_with_language.model import CALWithSub, cal_base_cfg
from baselines.excl.model import EXCL, excl_base_cfg
from utils.basic_utils import save_json
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO)
def mask_logits(target, mask):
return target * mask + (1 - mask) * (-1e10)
class ProfileBase(object):
N_NewQuery = 1e4
N_NewVideo = 1e3
N_Videos = 1e6
AvgVideoLength = 100
ClipLength = 5
AvgClipPerVideo = int(AvgVideoLength / ClipLength) # max_ctx_l
AvgWordInQuery = 15
# estimated by
# scales=[2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], => max_proposal = 14
AvgProposalPerVideo = 170
MaxClipPerProposal = 14 # pad to this length
AvgClipPerProposal = 7 # 6.88
VideoFeatureDim = 3074 # 1024 + 2048 + 2 (TEF)
SubFeatureDim = 770
QueryFeatureDim = 768
HiddenSize = 256
N_Runs = 5 # Get the average time
def __init__(self, device=torch.device("cuda:0"), ctx_batch_size=400, query_batch_size=100):
self.device = device
self.ctx_batch_size = ctx_batch_size
self.query_batch_size = query_batch_size
self.model_config = self.get_model_config()
print(self.model_config)
self.model = self.get_model()
def get_model(self):
return None
def get_model_config(self):
return None
def set_ctx_batch_size(self, batch_size):
self.ctx_batch_size = batch_size
def set_query_batch_size(self, batch_size):
self.query_batch_size = batch_size
def cast_dict_inputs_to_device(self, dict_inputs, device):
return {k: v.to(device) for k, v in dict_inputs.items()}
def get_fake_ctx_raw_input_st_ed(self, no_tef=False):
return dict(
video_feat=torch.FloatTensor(self.ctx_batch_size, self.model_config.max_ctx_l,
self.VideoFeatureDim - 2*no_tef),
sub_feat=torch.FloatTensor(self.ctx_batch_size, self.model_config.max_ctx_l, self.SubFeatureDim - 2*no_tef),
ctx_mask=torch.FloatTensor(self.ctx_batch_size, self.model_config.max_ctx_l),
)
def get_fake_raw_query(self):
return dict(
query_feat=torch.FloatTensor(self.query_batch_size, self.AvgWordInQuery, self.QueryFeatureDim),
query_mask=torch.ones(self.query_batch_size, self.AvgWordInQuery)
)
"""
from baselines.profiling.profile_main import ProfileXML
profile_xml = ProfileXML(ctx_batch_size=400, query_batch_size=100)
profile_xml.get_ctx_encoding_time()
"""
class ProfileXML(ProfileBase):
def get_model_config(self):
xml_base_config["ctx_mode"] = "video_sub_tef"
xml_base_config["merge_two_stream"] = True
xml_base_config["cross_att"] = True
xml_base_config["max_ctx_l"] = self.AvgClipPerVideo
xml_base_config["visual_input_size"] = self.VideoFeatureDim
xml_base_config["query_input_size"] = self.QueryFeatureDim
xml_base_config["sub_input_size"] = self.SubFeatureDim
xml_base_config["hidden_size"] = self.HiddenSize
return xml_base_config
def get_model(self):
model = XML(self.model_config)
model.to(self.device)
model.eval()
return model
def get_fake_encoded_ctx(self):
return dict(
ctx_feat=torch.FloatTensor(self.ctx_batch_size, self.model_config.max_ctx_l, self.HiddenSize),
ctx_mask=torch.FloatTensor(self.ctx_batch_size, self.model_config.max_ctx_l),
)
def get_fake_encoded_query(self):
return dict(query_feat=torch.FloatTensor(self.ctx_batch_size, self.HiddenSize))
def _get_ctx_encoding_time(self, video_feat, sub_feat, ctx_mask):
"""Considered two modalities"""
torch.cuda.synchronize()
st_time = time.time()
self.model.cross_encode_context(video_feat, ctx_mask, sub_feat, ctx_mask)
torch.cuda.synchronize()
return time.time() - st_time
def get_ctx_encoding_time(self):
with torch.no_grad():
fake_ctx_inputs = self.cast_dict_inputs_to_device(self.get_fake_ctx_raw_input_st_ed(), self.device)
raw_video = fake_ctx_inputs["video_feat"]
raw_sub = fake_ctx_inputs["sub_feat"]
ctx_mask = fake_ctx_inputs["ctx_mask"]
times = []
for _ in trange(self.N_Runs):
times += [self._get_ctx_encoding_time(raw_video, raw_sub, ctx_mask)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
def _get_query_encoding_time(self, raw_query, query_mask):
"""Considered two modalities"""
torch.cuda.synchronize()
st_time = time.time()
encoded_query = self.model.encode_input(raw_query, query_mask,
self.model.query_input_proj,
self.model.query_encoder,
self.model.query_pos_embed) # (N, Lq, D)
# video level
video_query, sub_query = \
self.model.get_modularized_queries(encoded_query, query_mask, return_modular_att=False)
# st ed
video_query = self.model.video_query_linear(video_query)
sub_query = self.model.sub_query_linear(sub_query)
torch.cuda.synchronize()
return time.time() - st_time
def get_query_encoding_time(self):
with torch.no_grad():
query_inputs = self.cast_dict_inputs_to_device(self.get_fake_raw_query(), self.device)
raw_query = query_inputs["query_feat"]
query_mask = query_inputs["query_mask"]
times = []
for _ in trange(self.N_Runs):
times += [self._get_query_encoding_time(raw_query, query_mask)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
def _get_retrieval_time(self, encoded_video_query, encoded_video, ctx_mask):
"""Consider the queries are encoded, Calculate in a single modality then multiply by 2."""
torch.cuda.synchronize()
st_time = time.time()
self.model.get_video_level_scores(encoded_video_query, encoded_video, ctx_mask)
torch.cuda.synchronize()
return (time.time() - st_time) * 2
def get_retrieval_time(self):
with torch.no_grad():
encoded_query = self.cast_dict_inputs_to_device(self.get_fake_encoded_query(), self.device)["query_feat"]
fake_ctx_inputs = self.cast_dict_inputs_to_device(self.get_fake_encoded_ctx(), self.device)
encoded_ctx = fake_ctx_inputs["ctx_feat"]
ctx_mask = fake_ctx_inputs["ctx_mask"]
times = []
for _ in trange(self.N_Runs):
times += [self._get_retrieval_time(encoded_query, encoded_ctx, ctx_mask)]
times = torch.FloatTensor(times) # since we have two modalities
return dict(avg=float(times.mean()), std=float(times.std()))
def _get_span_prediction_time(self, query_feat, ctx_feat, ctx_mask):
"""Considered two modalities"""
torch.cuda.synchronize()
st_time = time.time()
similarity = torch.einsum("md,nld->mnl", query_feat, ctx_feat)
similarity = (similarity + similarity) / 2 # (Nq, Nv, L) from query to all videos.
n_q, n_c, l = similarity.shape
similarity = similarity.view(n_q * n_c, 1, l)
st_prob = self.model.merged_st_predictor(similarity).view(n_q, n_c, l) # (Nq, Nv, L)
ed_prob = self.model.merged_ed_predictor(similarity).view(n_q, n_c, l) # (Nq, Nv, L)
st_prob = mask_logits(st_prob, ctx_mask) # (N, L)
ed_prob = mask_logits(ed_prob, ctx_mask)
torch.cuda.synchronize()
return time.time() - st_time
def get_span_prediction_time(self):
with torch.no_grad():
encoded_query = self.cast_dict_inputs_to_device(self.get_fake_encoded_query(), self.device)["query_feat"]
fake_ctx_inputs = self.cast_dict_inputs_to_device(self.get_fake_encoded_ctx(), self.device)
encoded_ctx = fake_ctx_inputs["ctx_feat"]
ctx_mask = fake_ctx_inputs["ctx_mask"]
times = []
for _ in trange(self.N_Runs):
times += [self._get_span_prediction_time(encoded_query, encoded_ctx, ctx_mask)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
"""
from baselines.profiling.profile_main import ProfileMEE
profile_mee = ProfileMEE(ctx_batch_size=400, query_batch_size=100)
profile_mee.get_ctx_encoding_time()
"""
class ProfileMEE(ProfileBase):
def get_model_config(self):
mee_base_cfg["ctx_mode"] = "video_sub"
mee_base_cfg["text_input_size"] = self.QueryFeatureDim
mee_base_cfg["vid_input_size"] = self.VideoFeatureDim
mee_base_cfg["output_size"] = self.HiddenSize
return mee_base_cfg
def get_model(self):
model = MEE(self.model_config)
model.to(self.device)
model.eval()
return model
def get_fake_raw_ctx(self):
return dict(
vid_feat=torch.FloatTensor(self.ctx_batch_size, self.VideoFeatureDim),
sub_feat=torch.FloatTensor(self.ctx_batch_size, self.QueryFeatureDim)
)
def get_fake_encoded_ctx_query(self):
return dict(
ctx_feat=torch.FloatTensor(self.ctx_batch_size, self.HiddenSize),
query_feat=torch.FloatTensor(self.ctx_batch_size, self.HiddenSize)
)
def _get_ctx_encoding_time(self, vid_feat, sub_feat):
torch.cuda.synchronize()
st_time = time.time()
self.model.video_gu(vid_feat)
self.model.sub_gu(sub_feat)
torch.cuda.synchronize()
return time.time() - st_time
def get_ctx_encoding_time(self):
feat_dict = self.cast_dict_inputs_to_device(self.get_fake_raw_ctx(), self.device)
with torch.no_grad():
times = []
for _ in trange(self.N_Runs):
times += [self._get_ctx_encoding_time(**feat_dict)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
def _get_query_encoding_time(self, query_feat):
"""Considered 2 modalities"""
torch.cuda.synchronize()
st_time = time.time()
pooled_query = self.model.query_pooling(query_feat) # (N, Dt)
video_query = self.model.video_query_gu(pooled_query)
sub_query = self.model.sub_query_gu(pooled_query)
stream_weights = self.model.moe_fc(pooled_query) # (N, 2)
torch.cuda.synchronize()
return time.time() - st_time
def get_query_encoding_time(self):
raw_query = self.cast_dict_inputs_to_device(self.get_fake_raw_query(), self.device)["query_feat"]
with torch.no_grad():
times = []
for _ in trange(self.N_Runs):
times += [self._get_query_encoding_time(raw_query)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
def _get_retrieval_time(self, encoded_query, encoded_ctx):
"""Considered 2 modalities"""
torch.cuda.synchronize()
st_time = time.time()
torch.einsum("md,nd->mn", encoded_query, encoded_ctx) # (N, N)
torch.cuda.synchronize()
return (time.time() - st_time) * 2
def get_retrieval_time(self):
model_inputs = self.cast_dict_inputs_to_device(self.get_fake_encoded_ctx_query(), self.device)
encoded_query = model_inputs["ctx_feat"]
encoded_ctx = model_inputs["query_feat"]
with torch.no_grad():
times = []
for _ in trange(self.N_Runs):
times += [self._get_retrieval_time(encoded_query, encoded_ctx)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
class ProfileCAL(ProfileBase):
def get_model_config(self):
cal_base_cfg["ctx_mode"] = "video_sub"
cal_base_cfg["embedding_size"] = self.QueryFeatureDim
cal_base_cfg["visual_input_size"] = self.VideoFeatureDim * 2
cal_base_cfg["textual_input_size"] = self.SubFeatureDim * 2
cal_base_cfg["output_size"] = self.HiddenSize
return cal_base_cfg
def get_model(self):
model = CALWithSub(self.model_config)
model.to(self.device)
model.eval()
return model
def get_fake_raw_ctx(self, model_name="cal"):
"""The features are `*2` since they use both global and local features"""
return dict(
sub_feat=torch.FloatTensor(self.ctx_batch_size, self.AvgProposalPerVideo,
self.AvgClipPerProposal, self.SubFeatureDim * 2),
vid_feat=torch.FloatTensor(self.ctx_batch_size, self.AvgProposalPerVideo,
self.AvgClipPerProposal, self.VideoFeatureDim * 2))
def _get_ctx_encoding_time(self, sub_feat, vid_feat, model_name="cal"):
if model_name == "mcn":
sub_feat = sub_feat.sum(2)
vid_feat = vid_feat.sum(2)
torch.cuda.synchronize()
st_time = time.time()
self.model.moment_encoder(vid_feat, module_name="video")
self.model.moment_encoder(sub_feat, module_name="sub")
torch.cuda.synchronize()
return time.time() - st_time
def get_ctx_encoding_time(self, model_name="cal"):
"""model_name: str, `cal` or `mcn`"""
feat_dict = self.cast_dict_inputs_to_device(
self.get_fake_raw_ctx(model_name=model_name), self.device)
feat_dict["model_name"] = model_name
with torch.no_grad():
times = []
for _ in trange(self.N_Runs):
times += [self._get_ctx_encoding_time(**feat_dict)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
def _get_query_encoding_time(self, query_feat, query_mask):
torch.cuda.synchronize()
st_time = time.time()
self.model.query_encoder(query_feat, query_mask)
torch.cuda.synchronize()
return time.time() - st_time
def get_query_encoding_time(self):
feat_dict = self.cast_dict_inputs_to_device(self.get_fake_raw_query(), self.device)
with torch.no_grad():
times = []
for _ in trange(self.N_Runs):
times += [self._get_query_encoding_time(**feat_dict)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
class ProfileExCL(ProfileBase):
def get_model_config(self):
excl_base_cfg["ctx_mode"] = "video_sub"
excl_base_cfg["query_input_size"] = self.QueryFeatureDim
excl_base_cfg["visual_input_size"] = self.VideoFeatureDim
excl_base_cfg["sub_input_size"] = self.SubFeatureDim
excl_base_cfg["output_size"] = self.HiddenSize
return excl_base_cfg
def get_model(self):
model = EXCL(self.model_config)
model.to(self.device)
model.eval()
return model
def get_fake_raw_input(self):
"""The features are `*2` since they use both global and local features"""
return dict(
query_feat=torch.FloatTensor(self.ctx_batch_size, self.AvgWordInQuery, self.QueryFeatureDim),
query_mask=torch.ones((self.ctx_batch_size, self.AvgWordInQuery)),
sub_feat=torch.FloatTensor(self.ctx_batch_size, self.AvgClipPerVideo, self.SubFeatureDim),
sub_mask=torch.ones(self.ctx_batch_size, self.AvgClipPerVideo),
video_feat=torch.FloatTensor(self.ctx_batch_size, self.AvgClipPerVideo, self.VideoFeatureDim),
video_mask=torch.ones(self.ctx_batch_size, self.AvgClipPerVideo),
tef_feat=torch.FloatTensor(self.ctx_batch_size, self.AvgClipPerVideo, 2),
tef_mask=torch.ones(self.ctx_batch_size, self.AvgClipPerVideo),
st_ed_indices=torch.ones(2, 2), # not used.
)
def _get_prediction_time(self, input_dict):
torch.cuda.synchronize()
st_time = time.time()
self.model(**input_dict)
torch.cuda.synchronize()
return time.time() - st_time
def get_prediction_time(self):
"""model_name: str, `cal` or `mcn`"""
feat_dict = self.cast_dict_inputs_to_device(
self.get_fake_raw_input(), self.device)
feat_dict["is_training"] = False
with torch.no_grad():
times = []
for _ in trange(self.N_Runs):
times += [self._get_prediction_time(feat_dict)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, help="")
parser.add_argument("--ctx_batch_size", type=int, default=400)
parser.add_argument("--query_batch_size", type=int, default=100)
parser.add_argument("--save_dir", type=str, default="baselines/profiling/cache")
args = parser.parse_args()
model = args.model
query_batch_size = args.query_batch_size
ctx_batch_size = args.ctx_batch_size
if model == "mee":
profile_mee = ProfileMEE(ctx_batch_size=ctx_batch_size, query_batch_size=query_batch_size)
# use the 2nd one to report time
profile_mee.get_ctx_encoding_time()
ctx_enc_time = profile_mee.get_ctx_encoding_time()
query_enc_time = profile_mee.get_query_encoding_time()
elif model == "cal":
profile_cal = ProfileCAL(ctx_batch_size=ctx_batch_size, query_batch_size=query_batch_size)
# use the 2nd one to report time
profile_cal.get_ctx_encoding_time()
ctx_enc_time = profile_cal.get_ctx_encoding_time(model_name="cal")
query_enc_time = profile_cal.get_query_encoding_time()
elif model == "mcn":
profile_cal = ProfileCAL(ctx_batch_size=ctx_batch_size, query_batch_size=query_batch_size)
# use the 2nd one to report time
profile_cal.get_ctx_encoding_time()
ctx_enc_time = profile_cal.get_ctx_encoding_time(model_name="mcn")
query_enc_time = profile_cal.get_query_encoding_time()
elif model == "xml":
profile_xml = ProfileXML(ctx_batch_size=ctx_batch_size, query_batch_size=query_batch_size)
# use the 2nd one to report time
profile_xml.get_ctx_encoding_time()
ctx_enc_time = profile_xml.get_ctx_encoding_time()
query_enc_time = profile_xml.get_query_encoding_time()
elif model == "excl":
profile_excl = ProfileExCL(ctx_batch_size=ctx_batch_size, query_batch_size=ctx_batch_size)
# use the 2nd one to report time
profile_excl.get_prediction_time()
ctx_enc_time = profile_excl.get_prediction_time()
query_enc_time = 0
# Calculate the total time as ctx_enc_time * (100 * 1M / ctx_batch_size)
else:
raise NotImplementedError
# ctx_enc_time = ctx_enc_time
save_path = os.path.join(args.save_dir, "{}_profile_main.json".format(model))
n_videos = ProfileBase.N_Videos
res = dict(
ctx_enc_time=ctx_enc_time,
ctx_enc_avg_time_all_videos=ctx_enc_time["avg"] * n_videos / ctx_batch_size,
query_enc_time=query_enc_time,
n_videos=n_videos,
ctx_batch_size=ctx_batch_size,
query_batch_size=query_batch_size,
model=model
)
save_json(res, save_path, save_pretty=True)
pprint.pprint(res)
| [
"logging.getLogger",
"logging.basicConfig",
"torch.ones",
"argparse.ArgumentParser",
"tqdm.trange",
"baselines.crossmodal_moment_localization.model_xml.XML",
"torch.FloatTensor",
"torch.cuda.synchronize",
"baselines.clip_alignment_with_language.model.CALWithSub",
"baselines.excl.model.EXCL",
"ut... | [((869, 896), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (886, 896), False, 'import logging\n'), ((897, 1046), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s', datefmt\n ='%Y-%m-%d %H:%M:%S', level=logging.INFO)\n", (916, 1046), False, 'import logging\n'), ((17800, 17825), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (17823, 17825), False, 'import argparse\n'), ((20482, 20525), 'utils.basic_utils.save_json', 'save_json', (['res', 'save_path'], {'save_pretty': '(True)'}), '(res, save_path, save_pretty=True)\n', (20491, 20525), False, 'from utils.basic_utils import save_json\n'), ((20530, 20548), 'pprint.pprint', 'pprint.pprint', (['res'], {}), '(res)\n', (20543, 20548), False, 'import pprint\n'), ((1794, 1816), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (1806, 1816), False, 'import torch\n'), ((4005, 4027), 'baselines.crossmodal_moment_localization.model_xml.XML', 'XML', (['self.model_config'], {}), '(self.model_config)\n', (4008, 4027), False, 'from baselines.crossmodal_moment_localization.model_xml import XML, xml_base_config\n'), ((4611, 4635), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (4633, 4635), False, 'import torch\n'), ((4654, 4665), 'time.time', 'time.time', ([], {}), '()\n', (4663, 4665), False, 'import time\n'), ((4756, 4780), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (4778, 4780), False, 'import torch\n'), ((5529, 5553), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (5551, 5553), False, 'import torch\n'), ((5572, 5583), 'time.time', 'time.time', ([], {}), '()\n', (5581, 5583), False, 'import time\n'), ((6201, 6225), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (6223, 6225), False, 'import torch\n'), ((6983, 7007), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (7005, 7007), False, 'import torch\n'), ((7026, 7037), 'time.time', 'time.time', ([], {}), '()\n', (7035, 7037), False, 'import time\n'), ((7134, 7158), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (7156, 7158), False, 'import torch\n'), ((8017, 8041), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (8039, 8041), False, 'import torch\n'), ((8060, 8071), 'time.time', 'time.time', ([], {}), '()\n', (8069, 8071), False, 'import time\n'), ((8093, 8142), 'torch.einsum', 'torch.einsum', (['"""md,nld->mnl"""', 'query_feat', 'ctx_feat'], {}), "('md,nld->mnl', query_feat, ctx_feat)\n", (8105, 8142), False, 'import torch\n'), ((8633, 8657), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (8655, 8657), False, 'import torch\n'), ((9898, 9920), 'baselines.mixture_embedding_experts.model.MEE', 'MEE', (['self.model_config'], {}), '(self.model_config)\n', (9901, 9920), False, 'from baselines.mixture_embedding_experts.model import MEE, mee_base_cfg\n'), ((10520, 10544), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (10542, 10544), False, 'import torch\n'), ((10563, 10574), 'time.time', 'time.time', ([], {}), '()\n', (10572, 10574), False, 'import time\n'), ((10657, 10681), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (10679, 10681), False, 'import torch\n'), ((11223, 11247), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (11245, 11247), False, 'import torch\n'), ((11266, 11277), 'time.time', 'time.time', ([], {}), '()\n', (11275, 11277), False, 'import time\n'), ((11544, 11568), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (11566, 11568), False, 'import torch\n'), ((12139, 12163), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (12161, 12163), False, 'import torch\n'), ((12182, 12193), 'time.time', 'time.time', ([], {}), '()\n', (12191, 12193), False, 'import time\n'), ((12202, 12255), 'torch.einsum', 'torch.einsum', (['"""md,nd->mn"""', 'encoded_query', 'encoded_ctx'], {}), "('md,nd->mn', encoded_query, encoded_ctx)\n", (12214, 12255), False, 'import torch\n'), ((12274, 12298), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (12296, 12298), False, 'import torch\n'), ((13302, 13331), 'baselines.clip_alignment_with_language.model.CALWithSub', 'CALWithSub', (['self.model_config'], {}), '(self.model_config)\n', (13312, 13331), False, 'from baselines.clip_alignment_with_language.model import CALWithSub, cal_base_cfg\n'), ((14105, 14129), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (14127, 14129), False, 'import torch\n'), ((14148, 14159), 'time.time', 'time.time', ([], {}), '()\n', (14157, 14159), False, 'import time\n'), ((14296, 14320), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (14318, 14320), False, 'import torch\n'), ((14979, 15003), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (15001, 15003), False, 'import torch\n'), ((15022, 15033), 'time.time', 'time.time', ([], {}), '()\n', (15031, 15033), False, 'import time\n'), ((15099, 15123), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (15121, 15123), False, 'import torch\n'), ((16004, 16027), 'baselines.excl.model.EXCL', 'EXCL', (['self.model_config'], {}), '(self.model_config)\n', (16008, 16027), False, 'from baselines.excl.model import EXCL, excl_base_cfg\n'), ((17079, 17103), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (17101, 17103), False, 'import torch\n'), ((17122, 17133), 'time.time', 'time.time', ([], {}), '()\n', (17131, 17133), False, 'import time\n'), ((17175, 17199), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (17197, 17199), False, 'import torch\n'), ((4796, 4807), 'time.time', 'time.time', ([], {}), '()\n', (4805, 4807), False, 'import time\n'), ((4869, 4884), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4882, 4884), False, 'import torch\n'), ((5197, 5216), 'tqdm.trange', 'trange', (['self.N_Runs'], {}), '(self.N_Runs)\n', (5203, 5216), False, 'from tqdm import tqdm, trange\n'), ((5323, 5347), 'torch.FloatTensor', 'torch.FloatTensor', (['times'], {}), '(times)\n', (5340, 5347), False, 'import torch\n'), ((6241, 6252), 'time.time', 'time.time', ([], {}), '()\n', (6250, 6252), False, 'import time\n'), ((6316, 6331), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6329, 6331), False, 'import torch\n'), ((6579, 6598), 'tqdm.trange', 'trange', (['self.N_Runs'], {}), '(self.N_Runs)\n', (6585, 6598), False, 'from tqdm import tqdm, trange\n'), ((6700, 6724), 'torch.FloatTensor', 'torch.FloatTensor', (['times'], {}), '(times)\n', (6717, 6724), False, 'import torch\n'), ((7250, 7265), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7263, 7265), False, 'import torch\n'), ((7638, 7657), 'tqdm.trange', 'trange', (['self.N_Runs'], {}), '(self.N_Runs)\n', (7644, 7657), False, 'from tqdm import tqdm, trange\n'), ((7769, 7793), 'torch.FloatTensor', 'torch.FloatTensor', (['times'], {}), '(times)\n', (7786, 7793), False, 'import torch\n'), ((8673, 8684), 'time.time', 'time.time', ([], {}), '()\n', (8682, 8684), False, 'import time\n'), ((8749, 8764), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8762, 8764), False, 'import torch\n'), ((9137, 9156), 'tqdm.trange', 'trange', (['self.N_Runs'], {}), '(self.N_Runs)\n', (9143, 9156), False, 'from tqdm import tqdm, trange\n'), ((9274, 9298), 'torch.FloatTensor', 'torch.FloatTensor', (['times'], {}), '(times)\n', (9291, 9298), False, 'import torch\n'), ((10697, 10708), 'time.time', 'time.time', ([], {}), '()\n', (10706, 10708), False, 'import time\n'), ((10860, 10875), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10873, 10875), False, 'import torch\n'), ((10921, 10940), 'tqdm.trange', 'trange', (['self.N_Runs'], {}), '(self.N_Runs)\n', (10927, 10940), False, 'from tqdm import tqdm, trange\n'), ((11030, 11054), 'torch.FloatTensor', 'torch.FloatTensor', (['times'], {}), '(times)\n', (11047, 11054), False, 'import torch\n'), ((11584, 11595), 'time.time', 'time.time', ([], {}), '()\n', (11593, 11595), False, 'import time\n'), ((11765, 11780), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (11778, 11780), False, 'import torch\n'), ((11826, 11845), 'tqdm.trange', 'trange', (['self.N_Runs'], {}), '(self.N_Runs)\n', (11832, 11845), False, 'from tqdm import tqdm, trange\n'), ((11935, 11959), 'torch.FloatTensor', 'torch.FloatTensor', (['times'], {}), '(times)\n', (11952, 11959), False, 'import torch\n'), ((12591, 12606), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12604, 12606), False, 'import torch\n'), ((12652, 12671), 'tqdm.trange', 'trange', (['self.N_Runs'], {}), '(self.N_Runs)\n', (12658, 12671), False, 'from tqdm import tqdm, trange\n'), ((12773, 12797), 'torch.FloatTensor', 'torch.FloatTensor', (['times'], {}), '(times)\n', (12790, 12797), False, 'import torch\n'), ((14336, 14347), 'time.time', 'time.time', ([], {}), '()\n', (14345, 14347), False, 'import time\n'), ((14642, 14657), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14655, 14657), False, 'import torch\n'), ((14703, 14722), 'tqdm.trange', 'trange', (['self.N_Runs'], {}), '(self.N_Runs)\n', (14709, 14722), False, 'from tqdm import tqdm, trange\n'), ((14812, 14836), 'torch.FloatTensor', 'torch.FloatTensor', (['times'], {}), '(times)\n', (14829, 14836), False, 'import torch\n'), ((15139, 15150), 'time.time', 'time.time', ([], {}), '()\n', (15148, 15150), False, 'import time\n'), ((15306, 15321), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15319, 15321), False, 'import torch\n'), ((15367, 15386), 'tqdm.trange', 'trange', (['self.N_Runs'], {}), '(self.N_Runs)\n', (15373, 15386), False, 'from tqdm import tqdm, trange\n'), ((15478, 15502), 'torch.FloatTensor', 'torch.FloatTensor', (['times'], {}), '(times)\n', (15495, 15502), False, 'import torch\n'), ((17215, 17226), 'time.time', 'time.time', ([], {}), '()\n', (17224, 17226), False, 'import time\n'), ((17478, 17493), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17491, 17493), False, 'import torch\n'), ((17539, 17558), 'tqdm.trange', 'trange', (['self.N_Runs'], {}), '(self.N_Runs)\n', (17545, 17558), False, 'from tqdm import tqdm, trange\n'), ((17644, 17668), 'torch.FloatTensor', 'torch.FloatTensor', (['times'], {}), '(times)\n', (17661, 17668), False, 'import torch\n'), ((2618, 2725), 'torch.FloatTensor', 'torch.FloatTensor', (['self.ctx_batch_size', 'self.model_config.max_ctx_l', '(self.VideoFeatureDim - 2 * no_tef)'], {}), '(self.ctx_batch_size, self.model_config.max_ctx_l, self.\n VideoFeatureDim - 2 * no_tef)\n', (2635, 2725), False, 'import torch\n'), ((2782, 2887), 'torch.FloatTensor', 'torch.FloatTensor', (['self.ctx_batch_size', 'self.model_config.max_ctx_l', '(self.SubFeatureDim - 2 * no_tef)'], {}), '(self.ctx_batch_size, self.model_config.max_ctx_l, self.\n SubFeatureDim - 2 * no_tef)\n', (2799, 2887), False, 'import torch\n'), ((2903, 2970), 'torch.FloatTensor', 'torch.FloatTensor', (['self.ctx_batch_size', 'self.model_config.max_ctx_l'], {}), '(self.ctx_batch_size, self.model_config.max_ctx_l)\n', (2920, 2970), False, 'import torch\n'), ((3061, 3149), 'torch.FloatTensor', 'torch.FloatTensor', (['self.query_batch_size', 'self.AvgWordInQuery', 'self.QueryFeatureDim'], {}), '(self.query_batch_size, self.AvgWordInQuery, self.\n QueryFeatureDim)\n', (3078, 3149), False, 'import torch\n'), ((3169, 3223), 'torch.ones', 'torch.ones', (['self.query_batch_size', 'self.AvgWordInQuery'], {}), '(self.query_batch_size, self.AvgWordInQuery)\n', (3179, 3223), False, 'import torch\n'), ((4179, 4268), 'torch.FloatTensor', 'torch.FloatTensor', (['self.ctx_batch_size', 'self.model_config.max_ctx_l', 'self.HiddenSize'], {}), '(self.ctx_batch_size, self.model_config.max_ctx_l, self.\n HiddenSize)\n', (4196, 4268), False, 'import torch\n'), ((4286, 4353), 'torch.FloatTensor', 'torch.FloatTensor', (['self.ctx_batch_size', 'self.model_config.max_ctx_l'], {}), '(self.ctx_batch_size, self.model_config.max_ctx_l)\n', (4303, 4353), False, 'import torch\n'), ((4435, 4490), 'torch.FloatTensor', 'torch.FloatTensor', (['self.ctx_batch_size', 'self.HiddenSize'], {}), '(self.ctx_batch_size, self.HiddenSize)\n', (4452, 4490), False, 'import torch\n'), ((7175, 7186), 'time.time', 'time.time', ([], {}), '()\n', (7184, 7186), False, 'import time\n'), ((10068, 10128), 'torch.FloatTensor', 'torch.FloatTensor', (['self.ctx_batch_size', 'self.VideoFeatureDim'], {}), '(self.ctx_batch_size, self.VideoFeatureDim)\n', (10085, 10128), False, 'import torch\n'), ((10151, 10211), 'torch.FloatTensor', 'torch.FloatTensor', (['self.ctx_batch_size', 'self.QueryFeatureDim'], {}), '(self.ctx_batch_size, self.QueryFeatureDim)\n', (10168, 10211), False, 'import torch\n'), ((10307, 10362), 'torch.FloatTensor', 'torch.FloatTensor', (['self.ctx_batch_size', 'self.HiddenSize'], {}), '(self.ctx_batch_size, self.HiddenSize)\n', (10324, 10362), False, 'import torch\n'), ((10387, 10442), 'torch.FloatTensor', 'torch.FloatTensor', (['self.ctx_batch_size', 'self.HiddenSize'], {}), '(self.ctx_batch_size, self.HiddenSize)\n', (10404, 10442), False, 'import torch\n'), ((12315, 12326), 'time.time', 'time.time', ([], {}), '()\n', (12324, 12326), False, 'import time\n'), ((13579, 13697), 'torch.FloatTensor', 'torch.FloatTensor', (['self.ctx_batch_size', 'self.AvgProposalPerVideo', 'self.AvgClipPerProposal', '(self.SubFeatureDim * 2)'], {}), '(self.ctx_batch_size, self.AvgProposalPerVideo, self.\n AvgClipPerProposal, self.SubFeatureDim * 2)\n', (13596, 13697), False, 'import torch\n'), ((13754, 13874), 'torch.FloatTensor', 'torch.FloatTensor', (['self.ctx_batch_size', 'self.AvgProposalPerVideo', 'self.AvgClipPerProposal', '(self.VideoFeatureDim * 2)'], {}), '(self.ctx_batch_size, self.AvgProposalPerVideo, self.\n AvgClipPerProposal, self.VideoFeatureDim * 2)\n', (13771, 13874), False, 'import torch\n'), ((16261, 16347), 'torch.FloatTensor', 'torch.FloatTensor', (['self.ctx_batch_size', 'self.AvgWordInQuery', 'self.QueryFeatureDim'], {}), '(self.ctx_batch_size, self.AvgWordInQuery, self.\n QueryFeatureDim)\n', (16278, 16347), False, 'import torch\n'), ((16367, 16421), 'torch.ones', 'torch.ones', (['(self.ctx_batch_size, self.AvgWordInQuery)'], {}), '((self.ctx_batch_size, self.AvgWordInQuery))\n', (16377, 16421), False, 'import torch\n'), ((16444, 16529), 'torch.FloatTensor', 'torch.FloatTensor', (['self.ctx_batch_size', 'self.AvgClipPerVideo', 'self.SubFeatureDim'], {}), '(self.ctx_batch_size, self.AvgClipPerVideo, self.SubFeatureDim\n )\n', (16461, 16529), False, 'import torch\n'), ((16548, 16601), 'torch.ones', 'torch.ones', (['self.ctx_batch_size', 'self.AvgClipPerVideo'], {}), '(self.ctx_batch_size, self.AvgClipPerVideo)\n', (16558, 16601), False, 'import torch\n'), ((16627, 16714), 'torch.FloatTensor', 'torch.FloatTensor', (['self.ctx_batch_size', 'self.AvgClipPerVideo', 'self.VideoFeatureDim'], {}), '(self.ctx_batch_size, self.AvgClipPerVideo, self.\n VideoFeatureDim)\n', (16644, 16714), False, 'import torch\n'), ((16735, 16788), 'torch.ones', 'torch.ones', (['self.ctx_batch_size', 'self.AvgClipPerVideo'], {}), '(self.ctx_batch_size, self.AvgClipPerVideo)\n', (16745, 16788), False, 'import torch\n'), ((16812, 16875), 'torch.FloatTensor', 'torch.FloatTensor', (['self.ctx_batch_size', 'self.AvgClipPerVideo', '(2)'], {}), '(self.ctx_batch_size, self.AvgClipPerVideo, 2)\n', (16829, 16875), False, 'import torch\n'), ((16899, 16952), 'torch.ones', 'torch.ones', (['self.ctx_batch_size', 'self.AvgClipPerVideo'], {}), '(self.ctx_batch_size, self.AvgClipPerVideo)\n', (16909, 16952), False, 'import torch\n'), ((16981, 16997), 'torch.ones', 'torch.ones', (['(2)', '(2)'], {}), '(2, 2)\n', (16991, 16997), False, 'import torch\n')] |
"""Project: Eskapade - A python-based package for data analysis.
Macro: esk501_fix_pandas_dataframe
Created: 2017/04/26
Description:
Macro illustrates how to call FixPandasDataFrame link that gives columns
consistent names and datatypes.
Default settings perform the following clean-up steps on an
input dataframe:
- Fix all column names. Eg. remove punctuation and strange characters,
and convert spaces to underscores.
- Check for various possible nans in the dataset, then make all nans
consistent by turning them into numpy.nan (= float)
- Per column, assess dynamically the most consistent datatype (ignoring
all nans in that column). Eg. bool, int, float, datetime64, string.
- Per column, make the data types of all rows consistent, by using the
identified (or imposed) data type (by default ignoring all nans)
Authors:
KPMG Advanced Analytics & Big Data team, Amstelveen, The Netherlands
Redistribution and use in source and binary forms, with or without
modification, are permitted according to the terms listed in the file
LICENSE.
"""
import tempfile
from eskapade import ConfigObject, Chain
from eskapade import core_ops, analysis, data_quality
from eskapade import process_manager
from eskapade.logger import Logger
logger = Logger()
logger.debug('Now parsing configuration file esk501_fix_pandas_dataframe')
#########################################################################################
# --- minimal analysis information
settings = process_manager.service(ConfigObject)
settings['analysisName'] = 'esk501_fix_pandas_dataframe'
settings['version'] = 0
#########################################################################################
# --- Analysis values, settings, helper functions, configuration flags.
# dummy dataframe filled with inconsistent data types per column
tmp = b"""A,B,C,D,E,F,G,H
True,foo,1.0,1,1,1,a,a
False,bar,2.0,2,2,2.5,b,b
nan,3,bal,3,bla,bar,c,1
,nan,NaN,NaN,nan,nan,d,2
,,,,,,,3
1,2,,,,,,,6
"""
f = tempfile.NamedTemporaryFile(delete=False)
f.write(tmp)
f.close()
# file is not immediately deleted because we used delete=False
# used below with f.name
#########################################################################################
# --- now set up the chains and links based on configuration flags
ch = Chain('DataPrep')
# --- 0. pandas read_csv has multiple settings to help reading in of buggy csv's.
# o The option error_bad_lines=False skips lines with too few or too many values
# o The option encoding='latin1' interprets most non-standard characters
read_data = analysis.ReadToDf(key='vrh',
reader='csv',
path=f.name,
error_bad_lines=False,
encoding='latin1')
ch.add(read_data)
# --- 1. standard setting:
# o convert all nans to np.nan (= float)
# o convert all rows in a column to most occuring datatype in that column
fixer = data_quality.FixPandasDataFrame(name='fixer1')
fixer.read_key = 'vrh'
fixer.store_key = '<KEY>'
ch.add(fixer)
# --- 2. force certain columns to specified datatype
fixer = data_quality.FixPandasDataFrame(name='fixer2')
fixer.read_key = 'vrh'
fixer.store_key = '<KEY>'
fixer.var_dtype = {'B': int, 'C': str}
ch.add(fixer)
# --- 3. convert all nans to data type consistent with rest of column
fixer = data_quality.FixPandasDataFrame(name='fixer3')
fixer.read_key = 'vrh'
fixer.store_key = '<KEY>'
fixer.convert_inconsistent_nans = True
# set a specific nan (GREPME) for a given column (G)
fixer.var_nan = {'G': 'GREPME'}
ch.add(fixer)
# --- 4. compare results
pds = core_ops.PrintDs(name='pds2')
pds.keys = ['vrh', '<KEY>', '<KEY>', '<KEY>']
ch.add(pds)
# --- 5. write out fixed dataframe - turned off in this example
# The dataframe will be saved with the numpy writer which will
# restore the dtypes when reloading the dataframe
writedata = analysis.WriteFromDf(name='writer',
key='<KEY>',
path='tmp.npz',
)
# ch.add(writedata)
#########################################################################################
logger.debug('Done parsing configuration file esk501_fix_pandas_dataframe')
| [
"eskapade.Chain",
"eskapade.logger.Logger",
"eskapade.process_manager.service",
"eskapade.analysis.ReadToDf",
"tempfile.NamedTemporaryFile",
"eskapade.data_quality.FixPandasDataFrame",
"eskapade.analysis.WriteFromDf",
"eskapade.core_ops.PrintDs"
] | [((1299, 1307), 'eskapade.logger.Logger', 'Logger', ([], {}), '()\n', (1305, 1307), False, 'from eskapade.logger import Logger\n'), ((1522, 1559), 'eskapade.process_manager.service', 'process_manager.service', (['ConfigObject'], {}), '(ConfigObject)\n', (1545, 1559), False, 'from eskapade import process_manager\n'), ((2025, 2066), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (2052, 2066), False, 'import tempfile\n'), ((2342, 2359), 'eskapade.Chain', 'Chain', (['"""DataPrep"""'], {}), "('DataPrep')\n", (2347, 2359), False, 'from eskapade import ConfigObject, Chain\n'), ((2617, 2719), 'eskapade.analysis.ReadToDf', 'analysis.ReadToDf', ([], {'key': '"""vrh"""', 'reader': '"""csv"""', 'path': 'f.name', 'error_bad_lines': '(False)', 'encoding': '"""latin1"""'}), "(key='vrh', reader='csv', path=f.name, error_bad_lines=\n False, encoding='latin1')\n", (2634, 2719), False, 'from eskapade import core_ops, analysis, data_quality\n'), ((3012, 3058), 'eskapade.data_quality.FixPandasDataFrame', 'data_quality.FixPandasDataFrame', ([], {'name': '"""fixer1"""'}), "(name='fixer1')\n", (3043, 3058), False, 'from eskapade import core_ops, analysis, data_quality\n'), ((3184, 3230), 'eskapade.data_quality.FixPandasDataFrame', 'data_quality.FixPandasDataFrame', ([], {'name': '"""fixer2"""'}), "(name='fixer2')\n", (3215, 3230), False, 'from eskapade import core_ops, analysis, data_quality\n'), ((3412, 3458), 'eskapade.data_quality.FixPandasDataFrame', 'data_quality.FixPandasDataFrame', ([], {'name': '"""fixer3"""'}), "(name='fixer3')\n", (3443, 3458), False, 'from eskapade import core_ops, analysis, data_quality\n'), ((3678, 3707), 'eskapade.core_ops.PrintDs', 'core_ops.PrintDs', ([], {'name': '"""pds2"""'}), "(name='pds2')\n", (3694, 3707), False, 'from eskapade import core_ops, analysis, data_quality\n'), ((3956, 4020), 'eskapade.analysis.WriteFromDf', 'analysis.WriteFromDf', ([], {'name': '"""writer"""', 'key': '"""<KEY>"""', 'path': '"""tmp.npz"""'}), "(name='writer', key='<KEY>', path='tmp.npz')\n", (3976, 4020), False, 'from eskapade import core_ops, analysis, data_quality\n')] |
import numpy as np
import microdf as mdf
def gini(df, col, w=None, negatives=None):
"""Calculates Gini index.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:param negatives: An optional string indicating how to treat negative
values of x:
'zero' replaces negative values with zeroes.
'shift' subtracts the minimum value from all values of x,
when this minimum is negative. That is, it adds the absolute
minimum value.
Defaults to None, which leaves negative values as they are.
:returns: A float, the Gini index.
"""
# Requires float numpy arrays (not pandas Series or lists) to work.
x = np.array(df[col]).astype("float")
if negatives == "zero":
x[x < 0] = 0
if negatives == "shift" and np.amin(x) < 0:
x -= np.amin(x)
if w is not None:
w = np.array(df[w]).astype("float")
sorted_indices = np.argsort(x)
sorted_x = x[sorted_indices]
sorted_w = w[sorted_indices]
cumw = np.cumsum(sorted_w)
cumxw = np.cumsum(sorted_x * sorted_w)
return np.sum(cumxw[1:] * cumw[:-1] - cumxw[:-1] * cumw[1:]) / (
cumxw[-1] * cumw[-1]
)
else:
sorted_x = np.sort(x)
n = len(x)
cumxw = np.cumsum(sorted_x)
# The above formula, with all weights equal to 1 simplifies to:
return (n + 1 - 2 * np.sum(cumxw) / cumxw[-1]) / n
def top_x_pct_share(df, col, top_x_pct, w=None):
"""Calculates top x% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param top_x_pct: Decimal between 0 and 1 of the top %, e.g. 0.1, 0.001.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top x%.
"""
threshold = mdf.weighted_quantile(df, col, w, 1 - top_x_pct)
top_x_pct_sum = mdf.weighted_sum(df[df[col] >= threshold], col, w)
total_sum = mdf.weighted_sum(df, col, w)
return top_x_pct_sum / total_sum
def bottom_x_pct_share(df, col, bottom_x_pct, w=None):
"""Calculates bottom x% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param bottom_x_pct: Decimal between 0 and 1 of the top %, e.g. 0.1, 0.001.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the bottom x%.
"""
return 1 - top_x_pct_share(df, col, 1 - bottom_x_pct, w, top=False)
def bottom_50_pct_share(df, col, w=None):
"""Calculates bottom 50% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the bottom 50%.
"""
return bottom_x_pct_share(df, col, 0.5, w)
def top_50_pct_share(df, col, w=None):
"""Calculates top 50% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 50%.
"""
return top_x_pct_share(df, col, 0.5, w)
def top_10_pct_share(df, col, w=None):
"""Calculates top 10% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 10%.
"""
return top_x_pct_share(df, col, 0.1, w)
def top_1_pct_share(df, col, w=None):
"""Calculates top 1% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 1%.
"""
return top_x_pct_share(df, col, 0.01, w)
def top_0_1_pct_share(df, col, w=None):
"""Calculates top 0.1% share.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 0.1%.
"""
return top_x_pct_share(df, col, 0.001, w)
def t10_b50(df, col, w=None):
"""Calculates ratio between the top 10% and bottom 50% shares.
:param df: DataFrame.
:param col: Name of column in df representing value.
:param w: Column representing weight in df.
:returns: The share of w-weighted val held by the top 10% divided by
the share of w-weighted val held by the bottom 50%.
"""
return top_10_pct_share(df, col, w) / bottom_50_pct_share(df, col, w)
| [
"numpy.amin",
"numpy.sort",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"microdf.weighted_sum",
"numpy.cumsum",
"microdf.weighted_quantile"
] | [((1888, 1936), 'microdf.weighted_quantile', 'mdf.weighted_quantile', (['df', 'col', 'w', '(1 - top_x_pct)'], {}), '(df, col, w, 1 - top_x_pct)\n', (1909, 1936), True, 'import microdf as mdf\n'), ((1957, 2007), 'microdf.weighted_sum', 'mdf.weighted_sum', (['df[df[col] >= threshold]', 'col', 'w'], {}), '(df[df[col] >= threshold], col, w)\n', (1973, 2007), True, 'import microdf as mdf\n'), ((2024, 2052), 'microdf.weighted_sum', 'mdf.weighted_sum', (['df', 'col', 'w'], {}), '(df, col, w)\n', (2040, 2052), True, 'import microdf as mdf\n'), ((895, 905), 'numpy.amin', 'np.amin', (['x'], {}), '(x)\n', (902, 905), True, 'import numpy as np\n'), ((997, 1010), 'numpy.argsort', 'np.argsort', (['x'], {}), '(x)\n', (1007, 1010), True, 'import numpy as np\n'), ((1100, 1119), 'numpy.cumsum', 'np.cumsum', (['sorted_w'], {}), '(sorted_w)\n', (1109, 1119), True, 'import numpy as np\n'), ((1136, 1166), 'numpy.cumsum', 'np.cumsum', (['(sorted_x * sorted_w)'], {}), '(sorted_x * sorted_w)\n', (1145, 1166), True, 'import numpy as np\n'), ((1312, 1322), 'numpy.sort', 'np.sort', (['x'], {}), '(x)\n', (1319, 1322), True, 'import numpy as np\n'), ((1358, 1377), 'numpy.cumsum', 'np.cumsum', (['sorted_x'], {}), '(sorted_x)\n', (1367, 1377), True, 'import numpy as np\n'), ((751, 768), 'numpy.array', 'np.array', (['df[col]'], {}), '(df[col])\n', (759, 768), True, 'import numpy as np\n'), ((866, 876), 'numpy.amin', 'np.amin', (['x'], {}), '(x)\n', (873, 876), True, 'import numpy as np\n'), ((1182, 1235), 'numpy.sum', 'np.sum', (['(cumxw[1:] * cumw[:-1] - cumxw[:-1] * cumw[1:])'], {}), '(cumxw[1:] * cumw[:-1] - cumxw[:-1] * cumw[1:])\n', (1188, 1235), True, 'import numpy as np\n'), ((940, 955), 'numpy.array', 'np.array', (['df[w]'], {}), '(df[w])\n', (948, 955), True, 'import numpy as np\n'), ((1478, 1491), 'numpy.sum', 'np.sum', (['cumxw'], {}), '(cumxw)\n', (1484, 1491), True, 'import numpy as np\n')] |
from django.db import models
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from embed_video.fields import EmbedYoutubeField, EmbedSoundcloudField
from .custom_model_fields import RestrictedImageField
from .default_settings import ADOPTEE_STORIES_CONFIG as config
class NamesToStringMixin():
NAME_ATTRIBUTES = ['english_name', 'pinyin_name', 'chinese_name']
@property
def name(self):
s = []
for name in self.NAME_ATTRIBUTES:
name_string = getattr(self, name, None)
if name_string:
s.append(name_string)
return ' '.join(s)
class Adoptee(models.Model, NamesToStringMixin):
# english_name must have a value || (pinyin_name && chinese_name)
# must have a value implemented form level
english_name = models.CharField(max_length=150, null=True, blank=True,
# Translators: Name of a field in the admin page
db_index=True, verbose_name=_('English Name'))
pinyin_name = models.CharField(max_length=150, null=True, blank=True,
# Translators: Name of a field in the admin page
db_index=True, verbose_name=_('Pinyin Name'))
chinese_name = models.CharField(max_length=50, null=True, blank=True,
# Translators: Name of a field in the admin page
db_index=True, verbose_name=_('Chinese Name'))
photo_front_story = RestrictedImageField(maximum_size=config['PHOTO_FRONT_STORY_MAX_SIZE'],
required_width=config['PHOTO_FRONT_STORY_WIDTH'],
required_height=config['PHOTO_FRONT_STORY_HEIGHT'],
required_formats=config['FORMATS'],
null=True, blank=True,
# Translators: Name of a field in the admin page
verbose_name=_('Photo Front Story'))
# Translators: Name of a field in the admin page
front_story = models.ForeignKey('StoryTeller', null=True, verbose_name=_('Front Story'), blank=True,
limit_choices_to={'approved': True})
# Translators: Name of a field in the admin page
created = models.DateTimeField(auto_now_add=True, verbose_name=_('Created At'))
# Translators: Name of a field in the admin page
updated = models.DateTimeField(auto_now=True, verbose_name=_('Updated At'))
class Meta:
ordering = ['-created']
# Translators: Name of a field in the admin page
verbose_name = _('Adoptee')
# Translators: Name of a field in the admin page
verbose_name_plural = _('Adoptees')
def __str__(self):
string = ' '.join([force_text(self._meta.verbose_name), self.name])
return string
class MultimediaItem(models.Model):
# english_caption || chinese_caption must have a value implemented
# form level
english_caption = models.CharField(max_length=200, null=True, blank=True,
# Translators: Name of a field in the admin page
verbose_name=_('English Caption'))
chinese_caption = models.CharField(max_length=200, null=True, blank=True,
# Translators: Name of a field in the admin page
verbose_name=_('Chinese Caption'))
# Translators: Name of a field in the admin page
approved = models.BooleanField(default=False, verbose_name=_('Approved'))
# Translators: Name of a field in the admin page
story_teller = models.ForeignKey('StoryTeller', null=True, verbose_name=_('Story Teller'))
# Translators: Name of a field in the admin pages
created = models.DateTimeField(auto_now_add=True, verbose_name=_('Created At'))
# Translators: Name of a field in the admin page
updated = models.DateTimeField(auto_now=True, verbose_name=_('Updated At'))
class Meta:
verbose_name = _('Multimedia Item')
abstract = True
ordering = ['-created']
def __str__(self):
return ' '.join([force_text(self._meta.verbose_name), self.story_teller.name, force_text(self.created)])
class Audio(MultimediaItem):
# Translators: name of field in the admin page
audio = EmbedSoundcloudField(verbose_name=_('Audio Soundcloud Embed'))
class Meta(MultimediaItem.Meta):
abstract = False
# Translators: Name of a field in the admin page
verbose_name = _('Audio item')
# Translators: Name of a field in the admin page
verbose_name_plural = _('Audio items')
class Video(MultimediaItem):
# Translators: name of field in the admin page
video = EmbedYoutubeField(verbose_name=_('Video Youtube Embed'))
class Meta(MultimediaItem.Meta):
abstract = False
# Translators: Name of a field in the admin page
verbose_name = _('Video item')
# Translators: Name of a field in the admin page
verbose_name_plural = _('Video items')
class Photo(MultimediaItem):
# file size and type checking added on form level
# Translators: Name of a field in the admin page
photo_file = models.ImageField(verbose_name=_('Photo File'))
class Meta(MultimediaItem.Meta):
abstract = False
# Translators: Name of a field in the admin page
verbose_name = _('Photo')
# Translators: Name of a field in the admin page
verbose_name_plural = _('Photos')
class RelationshipCategory(models.Model, NamesToStringMixin):
# english_name must have a value || chinese name must have a value at first
# but to publish both must have a value or all stories with an untranslated
# category must only show up english side/chinese side
# Translators: Name of a field in the admin page
english_name = models.CharField(max_length=30, null=True, verbose_name=_('English Name'),
blank=True)
# Translators: Name of a field in the admin page
chinese_name = models.CharField(max_length=30, null=True, verbose_name=_('Chinese Name'),
blank=True)
# Translators: Name of a field in the admin page
approved = models.BooleanField(default=False, verbose_name=_('Approved'))
# Translators: Name of a field in the admin page
created = models.DateTimeField(auto_now_add=True,
verbose_name=_('Created At'))
# Translators: Name of a field in the admin page
updated = models.DateTimeField(auto_now=True,
verbose_name=_('Updated At'))
# Translators: Label for the number determining the order of the relationship category for admins
order = models.IntegerField(null=True, blank=True, verbose_name=_('Position of relationship category'))
class Meta:
ordering = ['order']
# Translators: Name of a field in the admin page
verbose_name = _('Relationship Category')
# Translators: Name of a field in the admin page
verbose_name_plural = _('Relationship Categories')
def __str__(self):
string = ' '.join([force_text(self._meta.verbose_name), self.name])
return string
class StoryTeller(models.Model, NamesToStringMixin):
relationship_to_story = models.ForeignKey('RelationshipCategory',
# Translators: Name of a field in the admin page
verbose_name=_('Relationship to Story'))
# One version of story text because I don't want adoptee's stories to be different between who is viewing it
# Translators: Name of a field in the admin page
story_text = models.TextField(verbose_name=_('Story Text'))
# Translators: Name of a field in the admin page
email = models.EmailField(verbose_name=_('Email'))
# Translators: Name of a field in the admin page
approved = models.BooleanField(default=False, verbose_name=_('Approved'))
related_adoptee = models.ForeignKey('Adoptee', related_name='stories',
# Translators: Name of a field in the admin page
verbose_name=_('Related Adoptee'))
# english_name must have a value || (pinyin_name && chinese_name)
# must have a value implemented form level
english_name = models.CharField(max_length=150, null=True,
# Translators: Name of a field in the admin page
verbose_name=_('English Name'),
blank=True)
chinese_name = models.CharField(max_length=50, null=True,
# Translators: Name of a field in the admin page
verbose_name=_('Chinese Name'),
blank=True)
pinyin_name = models.CharField(max_length=150, null=True,
# Translators: Name of a field in the admin page
verbose_name=_('Pinyin Name'),
blank=True)
created = models.DateTimeField(auto_now_add=True,
# Translators: Name of a field in the admin page
verbose_name=_('Created At'))
updated = models.DateTimeField(auto_now=True,
# Translators: Name of a field in the admin page
verbose_name=_('Updated At'))
class Meta:
ordering = ['-updated', '-created']
# Translators: Name of a field in the admin page
verbose_name = _('Story Teller')
# Translators: Name of a field in the admin page
verbose_name_plural = _('Story Tellers')
def __str__(self):
string = ' '.join([force_text(self._meta.verbose_name), self.name])
return string
class AboutPerson(models.Model, NamesToStringMixin):
photo = RestrictedImageField(maximum_size=config['PHOTO_FRONT_STORY_MAX_SIZE'],
required_height=config['ABOUT_PHOTO_HEIGHT'],
required_width=config['ABOUT_PHOTO_WIDTH'],
required_formats=config['FORMATS'],
verbose_name=_('Picture of person on about page'))
english_caption = models.CharField(max_length=200, null=True, blank=True,
# Translators: Name of a field in the admin page
verbose_name=_('English Caption'))
chinese_caption = models.CharField(max_length=200, null=True, blank=True,
# Translators: Name of a field in the admin page
verbose_name=_('Chinese Caption'))
about_text_english = models.TextField(verbose_name=_('About text for that person in English.'),
help_text=_('Should include paragraph markup:'
'e.g. <p>This is a paragraph</p>'
'<p>This is a different paragraph</p>'),
null=True, blank=True)
about_text_chinese = models.TextField(verbose_name=_('About text for that person in Chinese.'),
help_text=_('Should include paragraph markup:'
'e.g. <p>This is a paragraph</p>'
'<p>This is a different paragraph</p>'),
null=True, blank=True)
published = models.BooleanField(verbose_name=_('Published status'))
english_name = models.CharField(max_length=150, null=True,
# Translators: Name of a field in the admin page
verbose_name=_('English Name'),
blank=True)
chinese_name = models.CharField(max_length=50, null=True,
# Translators: Name of a field in the admin page
verbose_name=_('Chinese Name'),
blank=True)
pinyin_name = models.CharField(max_length=150, null=True,
# Translators: Name of a field in the admin page
verbose_name=_('Pinyin Name'),
blank=True)
order = models.IntegerField(verbose_name=_('Position of person in about page'))
class Meta:
ordering = ['order']
verbose_name = _('About Person')
verbose_name_plural = _('About People')
def __str__(self):
string = ' '.join([force_text(self._meta.verbose_name), self.name])
return string
| [
"django.utils.encoding.force_text",
"django.utils.translation.ugettext_lazy"
] | [((2790, 2802), 'django.utils.translation.ugettext_lazy', '_', (['"""Adoptee"""'], {}), "('Adoptee')\n", (2791, 2802), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2890, 2903), 'django.utils.translation.ugettext_lazy', '_', (['"""Adoptees"""'], {}), "('Adoptees')\n", (2891, 2903), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4224, 4244), 'django.utils.translation.ugettext_lazy', '_', (['"""Multimedia Item"""'], {}), "('Multimedia Item')\n", (4225, 4244), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4738, 4753), 'django.utils.translation.ugettext_lazy', '_', (['"""Audio item"""'], {}), "('Audio item')\n", (4739, 4753), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4841, 4857), 'django.utils.translation.ugettext_lazy', '_', (['"""Audio items"""'], {}), "('Audio items')\n", (4842, 4857), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5152, 5167), 'django.utils.translation.ugettext_lazy', '_', (['"""Video item"""'], {}), "('Video item')\n", (5153, 5167), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5255, 5271), 'django.utils.translation.ugettext_lazy', '_', (['"""Video items"""'], {}), "('Video items')\n", (5256, 5271), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5619, 5629), 'django.utils.translation.ugettext_lazy', '_', (['"""Photo"""'], {}), "('Photo')\n", (5620, 5629), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5717, 5728), 'django.utils.translation.ugettext_lazy', '_', (['"""Photos"""'], {}), "('Photos')\n", (5718, 5728), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7210, 7236), 'django.utils.translation.ugettext_lazy', '_', (['"""Relationship Category"""'], {}), "('Relationship Category')\n", (7211, 7236), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7324, 7352), 'django.utils.translation.ugettext_lazy', '_', (['"""Relationship Categories"""'], {}), "('Relationship Categories')\n", (7325, 7352), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9937, 9954), 'django.utils.translation.ugettext_lazy', '_', (['"""Story Teller"""'], {}), "('Story Teller')\n", (9938, 9954), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((10042, 10060), 'django.utils.translation.ugettext_lazy', '_', (['"""Story Tellers"""'], {}), "('Story Tellers')\n", (10043, 10060), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((12996, 13013), 'django.utils.translation.ugettext_lazy', '_', (['"""About Person"""'], {}), "('About Person')\n", (12997, 13013), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((13044, 13061), 'django.utils.translation.ugettext_lazy', '_', (['"""About People"""'], {}), "('About People')\n", (13045, 13061), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1045, 1062), 'django.utils.translation.ugettext_lazy', '_', (['"""English Name"""'], {}), "('English Name')\n", (1046, 1062), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1285, 1301), 'django.utils.translation.ugettext_lazy', '_', (['"""Pinyin Name"""'], {}), "('Pinyin Name')\n", (1286, 1301), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1526, 1543), 'django.utils.translation.ugettext_lazy', '_', (['"""Chinese Name"""'], {}), "('Chinese Name')\n", (1527, 1543), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2135, 2157), 'django.utils.translation.ugettext_lazy', '_', (['"""Photo Front Story"""'], {}), "('Photo Front Story')\n", (2136, 2157), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2288, 2304), 'django.utils.translation.ugettext_lazy', '_', (['"""Front Story"""'], {}), "('Front Story')\n", (2289, 2304), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2511, 2526), 'django.utils.translation.ugettext_lazy', '_', (['"""Created At"""'], {}), "('Created At')\n", (2512, 2526), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2644, 2659), 'django.utils.translation.ugettext_lazy', '_', (['"""Updated At"""'], {}), "('Updated At')\n", (2645, 2659), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3370, 3390), 'django.utils.translation.ugettext_lazy', '_', (['"""English Caption"""'], {}), "('English Caption')\n", (3371, 3390), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3610, 3630), 'django.utils.translation.ugettext_lazy', '_', (['"""Chinese Caption"""'], {}), "('Chinese Caption')\n", (3611, 3630), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3749, 3762), 'django.utils.translation.ugettext_lazy', '_', (['"""Approved"""'], {}), "('Approved')\n", (3750, 3762), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((3893, 3910), 'django.utils.translation.ugettext_lazy', '_', (['"""Story Teller"""'], {}), "('Story Teller')\n", (3894, 3910), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4034, 4049), 'django.utils.translation.ugettext_lazy', '_', (['"""Created At"""'], {}), "('Created At')\n", (4035, 4049), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4167, 4182), 'django.utils.translation.ugettext_lazy', '_', (['"""Updated At"""'], {}), "('Updated At')\n", (4168, 4182), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4566, 4593), 'django.utils.translation.ugettext_lazy', '_', (['"""Audio Soundcloud Embed"""'], {}), "('Audio Soundcloud Embed')\n", (4567, 4593), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4983, 5007), 'django.utils.translation.ugettext_lazy', '_', (['"""Video Youtube Embed"""'], {}), "('Video Youtube Embed')\n", (4984, 5007), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((5459, 5474), 'django.utils.translation.ugettext_lazy', '_', (['"""Photo File"""'], {}), "('Photo File')\n", (5460, 5474), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6140, 6157), 'django.utils.translation.ugettext_lazy', '_', (['"""English Name"""'], {}), "('English Name')\n", (6141, 6157), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6335, 6352), 'django.utils.translation.ugettext_lazy', '_', (['"""Chinese Name"""'], {}), "('Chinese Name')\n", (6336, 6352), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6519, 6532), 'django.utils.translation.ugettext_lazy', '_', (['"""Approved"""'], {}), "('Approved')\n", (6520, 6532), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6689, 6704), 'django.utils.translation.ugettext_lazy', '_', (['"""Created At"""'], {}), "('Created At')\n", (6690, 6704), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((6857, 6872), 'django.utils.translation.ugettext_lazy', '_', (['"""Updated At"""'], {}), "('Updated At')\n", (6858, 6872), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7044, 7082), 'django.utils.translation.ugettext_lazy', '_', (['"""Position of relationship category"""'], {}), "('Position of relationship category')\n", (7045, 7082), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7754, 7780), 'django.utils.translation.ugettext_lazy', '_', (['"""Relationship to Story"""'], {}), "('Relationship to Story')\n", (7755, 7780), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((7995, 8010), 'django.utils.translation.ugettext_lazy', '_', (['"""Story Text"""'], {}), "('Story Text')\n", (7996, 8010), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8108, 8118), 'django.utils.translation.ugettext_lazy', '_', (['"""Email"""'], {}), "('Email')\n", (8109, 8118), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8236, 8249), 'django.utils.translation.ugettext_lazy', '_', (['"""Approved"""'], {}), "('Approved')\n", (8237, 8249), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8468, 8488), 'django.utils.translation.ugettext_lazy', '_', (['"""Related Adoptee"""'], {}), "('Related Adoptee')\n", (8469, 8488), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((8805, 8822), 'django.utils.translation.ugettext_lazy', '_', (['"""English Name"""'], {}), "('English Name')\n", (8806, 8822), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9068, 9085), 'django.utils.translation.ugettext_lazy', '_', (['"""Chinese Name"""'], {}), "('Chinese Name')\n", (9069, 9085), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9329, 9345), 'django.utils.translation.ugettext_lazy', '_', (['"""Pinyin Name"""'], {}), "('Pinyin Name')\n", (9330, 9345), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9580, 9595), 'django.utils.translation.ugettext_lazy', '_', (['"""Created At"""'], {}), "('Created At')\n", (9581, 9595), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((9779, 9794), 'django.utils.translation.ugettext_lazy', '_', (['"""Updated At"""'], {}), "('Updated At')\n", (9780, 9794), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((10593, 10629), 'django.utils.translation.ugettext_lazy', '_', (['"""Picture of person on about page"""'], {}), "('Picture of person on about page')\n", (10594, 10629), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((10849, 10869), 'django.utils.translation.ugettext_lazy', '_', (['"""English Caption"""'], {}), "('English Caption')\n", (10850, 10869), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((11089, 11109), 'django.utils.translation.ugettext_lazy', '_', (['"""Chinese Caption"""'], {}), "('Chinese Caption')\n", (11090, 11109), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((11166, 11209), 'django.utils.translation.ugettext_lazy', '_', (['"""About text for that person in English."""'], {}), "('About text for that person in English.')\n", (11167, 11209), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((11263, 11372), 'django.utils.translation.ugettext_lazy', '_', (['"""Should include paragraph markup:e.g. <p>This is a paragraph</p><p>This is a different paragraph</p>"""'], {}), "('Should include paragraph markup:e.g. <p>This is a paragraph</p><p>This is a different paragraph</p>'\n )\n", (11264, 11372), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((11603, 11646), 'django.utils.translation.ugettext_lazy', '_', (['"""About text for that person in Chinese."""'], {}), "('About text for that person in Chinese.')\n", (11604, 11646), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((11700, 11809), 'django.utils.translation.ugettext_lazy', '_', (['"""Should include paragraph markup:e.g. <p>This is a paragraph</p><p>This is a different paragraph</p>"""'], {}), "('Should include paragraph markup:e.g. <p>This is a paragraph</p><p>This is a different paragraph</p>'\n )\n", (11701, 11809), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((12034, 12055), 'django.utils.translation.ugettext_lazy', '_', (['"""Published status"""'], {}), "('Published status')\n", (12035, 12055), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((12254, 12271), 'django.utils.translation.ugettext_lazy', '_', (['"""English Name"""'], {}), "('English Name')\n", (12255, 12271), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((12517, 12534), 'django.utils.translation.ugettext_lazy', '_', (['"""Chinese Name"""'], {}), "('Chinese Name')\n", (12518, 12534), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((12778, 12794), 'django.utils.translation.ugettext_lazy', '_', (['"""Pinyin Name"""'], {}), "('Pinyin Name')\n", (12779, 12794), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((12888, 12925), 'django.utils.translation.ugettext_lazy', '_', (['"""Position of person in about page"""'], {}), "('Position of person in about page')\n", (12889, 12925), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((2955, 2990), 'django.utils.encoding.force_text', 'force_text', (['self._meta.verbose_name'], {}), '(self._meta.verbose_name)\n', (2965, 2990), False, 'from django.utils.encoding import force_text\n'), ((4350, 4385), 'django.utils.encoding.force_text', 'force_text', (['self._meta.verbose_name'], {}), '(self._meta.verbose_name)\n', (4360, 4385), False, 'from django.utils.encoding import force_text\n'), ((4411, 4435), 'django.utils.encoding.force_text', 'force_text', (['self.created'], {}), '(self.created)\n', (4421, 4435), False, 'from django.utils.encoding import force_text\n'), ((7404, 7439), 'django.utils.encoding.force_text', 'force_text', (['self._meta.verbose_name'], {}), '(self._meta.verbose_name)\n', (7414, 7439), False, 'from django.utils.encoding import force_text\n'), ((10112, 10147), 'django.utils.encoding.force_text', 'force_text', (['self._meta.verbose_name'], {}), '(self._meta.verbose_name)\n', (10122, 10147), False, 'from django.utils.encoding import force_text\n'), ((13113, 13148), 'django.utils.encoding.force_text', 'force_text', (['self._meta.verbose_name'], {}), '(self._meta.verbose_name)\n', (13123, 13148), False, 'from django.utils.encoding import force_text\n')] |
import tensorflow as tf
def check_gpu():
n_gpus = len(tf.config.experimental.list_physical_devices('GPU'))
print("Num GPUs Available: ", n_gpus)
check_gpu()
| [
"tensorflow.config.experimental.list_physical_devices"
] | [((60, 111), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (104, 111), True, 'import tensorflow as tf\n')] |
from django.utils.translation import get_language
def django_settings(request):
return {
"LANGUAGE": get_language(),
}
| [
"django.utils.translation.get_language"
] | [((116, 130), 'django.utils.translation.get_language', 'get_language', ([], {}), '()\n', (128, 130), False, 'from django.utils.translation import get_language\n')] |
# -*- coding: utf-8 -*-
import wx
from wx.py.shell import Shell
import scipy.ndimage as ndimg
import numpy as np
# from imagepy import IPy
from imagepy.core.engine import Free
from sciapp import Source
cmds = {'app':'app', 'np':np, 'ndimg':ndimg, 'update':None, 'get_img':None}
class Macros(dict):
def __init__(self):
for i in Source.manager('plugin').names():
if not isinstance(i, str) or i == 'Command Line':
#print(PluginsManager.plgs[i])
continue
name = ''.join(list(filter(str.isalnum, i)))
exec("self.run_%s = lambda para=None, plg=Source.manager('plugin').get(i):plg().start(cmds['app'], para)"%name)
class Plugin(wx.Panel):
title = 'Command Line'
single = None
def __init__(self, parent):
wx.Panel.__init__ ( self, parent, id = wx.ID_ANY,
pos = wx.DefaultPosition, size = wx.Size( 500,300 ),
style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL )
cmds['app'] = parent
cmds['get_img'] = lambda name=None, app=self: self.app.get_img()
cmds['update'] = lambda app=self: self.app.get_img().update()
shell = Shell(self, locals=cmds)
bSizer = wx.BoxSizer( wx.VERTICAL )
bSizer.Add( shell, 1, wx.EXPAND|wx.ALL, 5 )
self.SetSizer(bSizer)
cmds['plgs'] = Macros()
shell.run('# plgs.run_name() to call a ImagePy plugin.\n')
shell.run('# app is avalible here, and get_img() to get the current ImagePlus, update() to redraw.\n') | [
"sciapp.Source.manager",
"wx.BoxSizer",
"wx.py.shell.Shell",
"wx.Size"
] | [((1208, 1232), 'wx.py.shell.Shell', 'Shell', (['self'], {'locals': 'cmds'}), '(self, locals=cmds)\n', (1213, 1232), False, 'from wx.py.shell import Shell\n'), ((1250, 1274), 'wx.BoxSizer', 'wx.BoxSizer', (['wx.VERTICAL'], {}), '(wx.VERTICAL)\n', (1261, 1274), False, 'import wx\n'), ((342, 366), 'sciapp.Source.manager', 'Source.manager', (['"""plugin"""'], {}), "('plugin')\n", (356, 366), False, 'from sciapp import Source\n'), ((917, 934), 'wx.Size', 'wx.Size', (['(500)', '(300)'], {}), '(500, 300)\n', (924, 934), False, 'import wx\n')] |
from django.db import models
# Create your models here.
class Car_basic(models.Model):
car_number = models.CharField(max_length=10, null=True, blank=True)
car_color = models.CharField(max_length=10, null=True, blank=True)
register_date = models.DateTimeField(null=True, blank=True)
user_name = models.CharField(max_length=20, null=True, blank=True)
traffic_number = models.CharField(max_length=30, null=True, blank=True)
# traffic_area = models.CharField(max_length=50, null=True, blank=True)
release_time = models.DateTimeField(null=True, blank=True)
car_type = models.CharField(max_length=10, null=True, blank=True)
manufacture_name = models.CharField(max_length=20, null=True, blank=True)
manufacture_type = models.CharField(max_length=50, null=True, blank=True)
manufacture_date = models.DateTimeField(null=True, blank=True)
# goods_type = models.CharField(max_length=10, null=True, blank=True) # 国产/进口
vin_number = models.CharField(max_length=50, null=True, blank=True)
total_number = models.CharField(max_length=50, null=True, blank=True)
# car_size = models.CharField(max_length=50, null=True, blank=True)
# packing_size = models.CharField(max_length=50, null=True, blank=True)
# total_number = models.CharField(max_length=50, null=True, blank=True)
curb_weight = models.CharField(max_length=50, null=True, blank=True)
traction_weight = models.CharField(max_length=50, null=True, blank=True)
sure_weight = models.CharField(max_length=50, null=True, blank=True)
sure_people_number = models.CharField(max_length=50, null=True, blank=True)
engine_type = models.CharField(max_length=50, null=True, blank=True)
engine_number = models.CharField(max_length=50, null=True, blank=True)
engine_displacement = models.CharField(max_length=50, null=True, blank=True)
engine_power = models.CharField(max_length=50, null=True, blank=True)
displacement_standard = models.CharField(max_length=50, null=True, blank=True)
battery_type = models.CharField(max_length=50, null=True, blank=True)
motor_model = models.CharField(max_length=50, null=True, blank=True)
motor_power = models.CharField(max_length=50, null=True, blank=True)
power_type = models.CharField(max_length=20, null=True, blank=True)
axle_number = models.CharField(max_length=10, null=True, blank=True)
detection_year_date = models.DateTimeField(null=True, blank=True)
year_dated = models.DateTimeField(null=True, blank=True)
# wheelbase = models.CharField(max_length=10, null=True, blank=True)
# tires_number = models.CharField(max_length=10, null=True, blank=True)
# braking_system = models.CharField(max_length=10, null=True, blank=True)
# brak_form = models.CharField(max_length=20, null=True, blank=True)
# abs_boolean = models.CharField(max_length=10, null=True, blank=True)
# transmission_form = models.CharField(max_length=10, null=True, blank=True)
# slow_machine = models.CharField(max_length=10, null=True, blank=True)
# air_system = models.CharField(max_length=10, null=True, blank=True)
# gps_boolean = models.CharField(max_length=10, null=True, blank=True)
class Meta:
db_table = 'car_basic'
verbose_name = 'car_basic'
class Car_detection(models.Model):
car_number = models.CharField(max_length=10, null=True, blank=True)
detection_name = models.CharField(max_length=30, null=True, blank=True)
detection_date = models.DateTimeField(null=True, blank=True)
report_id = models.CharField(max_length=30, null=True, blank=True)
detection_validity = models.DateTimeField(null=True, blank=True)
register_name = models.CharField(max_length=10, null=True, blank=True)
other = models.CharField(max_length=100, null=True, blank=True)
class Meta:
db_table = 'car_detection'
verbose_name = 'car_detection'
class Car_repair(models.Model):
car_number = models.CharField(max_length=10, null=True, blank=True)
total_km = models.DecimalField(max_digits=12, decimal_places=3, null=True, blank=True)
repair_date = models.DateTimeField(null=True, blank=True)
repair_type = models.CharField(max_length=10, null=True, blank=True)
repair_content = models.CharField(max_length=50, null=True, blank=True)
repair_name = models.CharField(max_length=30, null=True, blank=True)
other = models.CharField(max_length=100, null=True, blank=True)
register_name = models.CharField(max_length=10, null=True, blank=True)
card_number = models.CharField(max_length=20, null=True, blank=True)
class Meta:
db_table = 'car_repair'
verbose_name = 'car_repair'
class Car_replace(models.Model):
car_number = models.CharField(max_length=10, null=True, blank=True)
replace_name = models.CharField(max_length=20, null=True, blank=True)
replace_date = models.DateTimeField(null=True, blank=True)
produce_name = models.CharField(max_length=20, null=True, blank=True)
replace_type = models.CharField(max_length=20, null=True, blank=True)
produce_id = models.CharField(max_length=20, null=True, blank=True)
repair_company = models.CharField(max_length=20, null=True, blank=True)
register_name = models.CharField(max_length=20, null=True, blank=True)
pass_id = models.CharField(max_length=100, null=True, blank=True)
class Meta:
db_table = 'car_replace'
verbose_name = 'car_replace'
class Car_change(models.Model):
car_number = models.CharField(max_length=10, null=True, blank=True)
change_content = models.CharField(max_length=100, null=True, blank=True)
change_date = models.DateTimeField(null=True, blank=True)
change_reason = models.CharField(max_length=20, null=True, blank=True)
register_name = models.CharField(max_length=20, null=True, blank=True)
other = models.CharField(max_length=100, null=True, blank=True)
class Meta:
db_table = 'car_change'
verbose_name = 'car_change'
class Car_km(models.Model):
car_number = models.CharField(max_length=10, null=True, blank=True)
start_date = models.DateTimeField(null=True, blank=True)
end_date = models.DateTimeField(null=True, blank=True)
start_address = models.CharField(max_length=100, null=True, blank=True)
end_address = models.CharField(max_length=100, null=True, blank=True)
km = models.DecimalField(max_digits=12, decimal_places=3, null=True, blank=True)
register_name = models.CharField(max_length=10, null=True, blank=True)
other = models.CharField(max_length=100, null=True, blank=True)
class Meta:
db_table = 'car_km'
verbose_name = 'car_km'
class Car_traffic(models.Model):
car_number = models.CharField(max_length=10, null=True, blank=True)
traffic_location = models.CharField(max_length=30, null=True, blank=True)
traffic_date = models.DateTimeField(null=True, blank=True)
traffic_type = models.CharField(max_length=10, null=True, blank=True)
traffic_responsibility = models.CharField(max_length=30, null=True, blank=True)
register_name = models.CharField(max_length=30, null=True, blank=True)
car_situation = models.CharField(max_length=100, null=True, blank=True)
other = models.CharField(max_length=100, null=True, blank=True)
class Meta:
db_table = 'car_traffic'
verbose_name = 'car_traffic'
| [
"django.db.models.DateTimeField",
"django.db.models.DecimalField",
"django.db.models.CharField"
] | [((106, 160), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'null': '(True)', 'blank': '(True)'}), '(max_length=10, null=True, blank=True)\n', (122, 160), False, 'from django.db import models\n'), ((177, 231), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'null': '(True)', 'blank': '(True)'}), '(max_length=10, null=True, blank=True)\n', (193, 231), False, 'from django.db import models\n'), ((252, 295), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (272, 295), False, 'from django.db import models\n'), ((312, 366), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)', 'blank': '(True)'}), '(max_length=20, null=True, blank=True)\n', (328, 366), False, 'from django.db import models\n'), ((388, 442), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'null': '(True)', 'blank': '(True)'}), '(max_length=30, null=True, blank=True)\n', (404, 442), False, 'from django.db import models\n'), ((538, 581), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (558, 581), False, 'from django.db import models\n'), ((597, 651), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'null': '(True)', 'blank': '(True)'}), '(max_length=10, null=True, blank=True)\n', (613, 651), False, 'from django.db import models\n'), ((675, 729), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)', 'blank': '(True)'}), '(max_length=20, null=True, blank=True)\n', (691, 729), False, 'from django.db import models\n'), ((753, 807), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)'}), '(max_length=50, null=True, blank=True)\n', (769, 807), False, 'from django.db import models\n'), ((831, 874), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (851, 874), False, 'from django.db import models\n'), ((975, 1029), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)'}), '(max_length=50, null=True, blank=True)\n', (991, 1029), False, 'from django.db import models\n'), ((1049, 1103), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)'}), '(max_length=50, null=True, blank=True)\n', (1065, 1103), False, 'from django.db import models\n'), ((1346, 1400), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)'}), '(max_length=50, null=True, blank=True)\n', (1362, 1400), False, 'from django.db import models\n'), ((1423, 1477), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)'}), '(max_length=50, null=True, blank=True)\n', (1439, 1477), False, 'from django.db import models\n'), ((1496, 1550), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)'}), '(max_length=50, null=True, blank=True)\n', (1512, 1550), False, 'from django.db import models\n'), ((1576, 1630), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)'}), '(max_length=50, null=True, blank=True)\n', (1592, 1630), False, 'from django.db import models\n'), ((1649, 1703), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)'}), '(max_length=50, null=True, blank=True)\n', (1665, 1703), False, 'from django.db import models\n'), ((1724, 1778), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)'}), '(max_length=50, null=True, blank=True)\n', (1740, 1778), False, 'from django.db import models\n'), ((1805, 1859), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)'}), '(max_length=50, null=True, blank=True)\n', (1821, 1859), False, 'from django.db import models\n'), ((1879, 1933), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)'}), '(max_length=50, null=True, blank=True)\n', (1895, 1933), False, 'from django.db import models\n'), ((1962, 2016), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)'}), '(max_length=50, null=True, blank=True)\n', (1978, 2016), False, 'from django.db import models\n'), ((2036, 2090), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)'}), '(max_length=50, null=True, blank=True)\n', (2052, 2090), False, 'from django.db import models\n'), ((2109, 2163), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)'}), '(max_length=50, null=True, blank=True)\n', (2125, 2163), False, 'from django.db import models\n'), ((2182, 2236), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)'}), '(max_length=50, null=True, blank=True)\n', (2198, 2236), False, 'from django.db import models\n'), ((2254, 2308), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)', 'blank': '(True)'}), '(max_length=20, null=True, blank=True)\n', (2270, 2308), False, 'from django.db import models\n'), ((2327, 2381), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'null': '(True)', 'blank': '(True)'}), '(max_length=10, null=True, blank=True)\n', (2343, 2381), False, 'from django.db import models\n'), ((2408, 2451), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (2428, 2451), False, 'from django.db import models\n'), ((2469, 2512), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (2489, 2512), False, 'from django.db import models\n'), ((3332, 3386), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'null': '(True)', 'blank': '(True)'}), '(max_length=10, null=True, blank=True)\n', (3348, 3386), False, 'from django.db import models\n'), ((3408, 3462), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'null': '(True)', 'blank': '(True)'}), '(max_length=30, null=True, blank=True)\n', (3424, 3462), False, 'from django.db import models\n'), ((3484, 3527), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (3504, 3527), False, 'from django.db import models\n'), ((3544, 3598), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'null': '(True)', 'blank': '(True)'}), '(max_length=30, null=True, blank=True)\n', (3560, 3598), False, 'from django.db import models\n'), ((3624, 3667), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (3644, 3667), False, 'from django.db import models\n'), ((3688, 3742), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'null': '(True)', 'blank': '(True)'}), '(max_length=10, null=True, blank=True)\n', (3704, 3742), False, 'from django.db import models\n'), ((3755, 3810), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (3771, 3810), False, 'from django.db import models\n'), ((3953, 4007), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'null': '(True)', 'blank': '(True)'}), '(max_length=10, null=True, blank=True)\n', (3969, 4007), False, 'from django.db import models\n'), ((4023, 4098), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(12)', 'decimal_places': '(3)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=12, decimal_places=3, null=True, blank=True)\n', (4042, 4098), False, 'from django.db import models\n'), ((4117, 4160), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (4137, 4160), False, 'from django.db import models\n'), ((4179, 4233), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'null': '(True)', 'blank': '(True)'}), '(max_length=10, null=True, blank=True)\n', (4195, 4233), False, 'from django.db import models\n'), ((4255, 4309), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'null': '(True)', 'blank': '(True)'}), '(max_length=50, null=True, blank=True)\n', (4271, 4309), False, 'from django.db import models\n'), ((4328, 4382), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'null': '(True)', 'blank': '(True)'}), '(max_length=30, null=True, blank=True)\n', (4344, 4382), False, 'from django.db import models\n'), ((4395, 4450), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (4411, 4450), False, 'from django.db import models\n'), ((4471, 4525), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'null': '(True)', 'blank': '(True)'}), '(max_length=10, null=True, blank=True)\n', (4487, 4525), False, 'from django.db import models\n'), ((4544, 4598), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)', 'blank': '(True)'}), '(max_length=20, null=True, blank=True)\n', (4560, 4598), False, 'from django.db import models\n'), ((4736, 4790), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'null': '(True)', 'blank': '(True)'}), '(max_length=10, null=True, blank=True)\n', (4752, 4790), False, 'from django.db import models\n'), ((4810, 4864), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)', 'blank': '(True)'}), '(max_length=20, null=True, blank=True)\n', (4826, 4864), False, 'from django.db import models\n'), ((4884, 4927), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (4904, 4927), False, 'from django.db import models\n'), ((4947, 5001), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)', 'blank': '(True)'}), '(max_length=20, null=True, blank=True)\n', (4963, 5001), False, 'from django.db import models\n'), ((5021, 5075), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)', 'blank': '(True)'}), '(max_length=20, null=True, blank=True)\n', (5037, 5075), False, 'from django.db import models\n'), ((5093, 5147), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)', 'blank': '(True)'}), '(max_length=20, null=True, blank=True)\n', (5109, 5147), False, 'from django.db import models\n'), ((5169, 5223), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)', 'blank': '(True)'}), '(max_length=20, null=True, blank=True)\n', (5185, 5223), False, 'from django.db import models\n'), ((5244, 5298), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)', 'blank': '(True)'}), '(max_length=20, null=True, blank=True)\n', (5260, 5298), False, 'from django.db import models\n'), ((5313, 5368), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (5329, 5368), False, 'from django.db import models\n'), ((5507, 5561), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'null': '(True)', 'blank': '(True)'}), '(max_length=10, null=True, blank=True)\n', (5523, 5561), False, 'from django.db import models\n'), ((5583, 5638), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (5599, 5638), False, 'from django.db import models\n'), ((5657, 5700), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (5677, 5700), False, 'from django.db import models\n'), ((5721, 5775), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)', 'blank': '(True)'}), '(max_length=20, null=True, blank=True)\n', (5737, 5775), False, 'from django.db import models\n'), ((5796, 5850), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)', 'blank': '(True)'}), '(max_length=20, null=True, blank=True)\n', (5812, 5850), False, 'from django.db import models\n'), ((5863, 5918), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (5879, 5918), False, 'from django.db import models\n'), ((6051, 6105), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'null': '(True)', 'blank': '(True)'}), '(max_length=10, null=True, blank=True)\n', (6067, 6105), False, 'from django.db import models\n'), ((6123, 6166), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (6143, 6166), False, 'from django.db import models\n'), ((6182, 6225), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (6202, 6225), False, 'from django.db import models\n'), ((6246, 6301), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (6262, 6301), False, 'from django.db import models\n'), ((6320, 6375), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (6336, 6375), False, 'from django.db import models\n'), ((6385, 6460), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'max_digits': '(12)', 'decimal_places': '(3)', 'null': '(True)', 'blank': '(True)'}), '(max_digits=12, decimal_places=3, null=True, blank=True)\n', (6404, 6460), False, 'from django.db import models\n'), ((6481, 6535), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'null': '(True)', 'blank': '(True)'}), '(max_length=10, null=True, blank=True)\n', (6497, 6535), False, 'from django.db import models\n'), ((6548, 6603), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (6564, 6603), False, 'from django.db import models\n'), ((6733, 6787), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'null': '(True)', 'blank': '(True)'}), '(max_length=10, null=True, blank=True)\n', (6749, 6787), False, 'from django.db import models\n'), ((6811, 6865), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'null': '(True)', 'blank': '(True)'}), '(max_length=30, null=True, blank=True)\n', (6827, 6865), False, 'from django.db import models\n'), ((6885, 6928), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (6905, 6928), False, 'from django.db import models\n'), ((6948, 7002), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'null': '(True)', 'blank': '(True)'}), '(max_length=10, null=True, blank=True)\n', (6964, 7002), False, 'from django.db import models\n'), ((7032, 7086), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'null': '(True)', 'blank': '(True)'}), '(max_length=30, null=True, blank=True)\n', (7048, 7086), False, 'from django.db import models\n'), ((7107, 7161), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'null': '(True)', 'blank': '(True)'}), '(max_length=30, null=True, blank=True)\n', (7123, 7161), False, 'from django.db import models\n'), ((7182, 7237), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (7198, 7237), False, 'from django.db import models\n'), ((7250, 7305), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)', 'blank': '(True)'}), '(max_length=100, null=True, blank=True)\n', (7266, 7305), False, 'from django.db import models\n')] |
# build.py
import os
import platform
import sys
from distutils.core import setup
from torch.utils.ffi import create_extension
extra_compile_args = ['-std=c++11', '-fPIC']
warp_ctc_path = "../build"
if platform.system() == 'Darwin':
lib_ext = ".dylib"
else:
lib_ext = ".so"
if "WARP_CTC_PATH" in os.environ:
warp_ctc_path = os.environ["WARP_CTC_PATH"]
if not os.path.exists(os.path.join(warp_ctc_path, "libwarpctc" + lib_ext)):
print(("Could not find libwarpctc.so in {}.\n"
"Build warp-ctc and set WARP_CTC_PATH to the location of"
" libwarpctc.so (default is '../build')").format(warp_ctc_path))
sys.exit(1)
include_dirs = [os.path.realpath('../include')]
ffi = create_extension(
name='warp_ctc',
language='c++',
headers=['src/binding.h'],
sources=['src/binding.cpp'],
with_cuda=True,
include_dirs=include_dirs,
library_dirs=[os.path.realpath(warp_ctc_path)],
runtime_library_dirs=[os.path.realpath(warp_ctc_path)],
libraries=['warpctc'],
extra_compile_args=extra_compile_args)
ffi = ffi.distutils_extension()
ffi.name = 'warpctc_pytorch._warp_ctc'
setup(
name="warpctc_pytorch",
version="0.1",
packages=["warpctc_pytorch"],
ext_modules=[ffi],
)
| [
"distutils.core.setup",
"os.path.join",
"os.path.realpath",
"platform.system",
"sys.exit"
] | [((1137, 1234), 'distutils.core.setup', 'setup', ([], {'name': '"""warpctc_pytorch"""', 'version': '"""0.1"""', 'packages': "['warpctc_pytorch']", 'ext_modules': '[ffi]'}), "(name='warpctc_pytorch', version='0.1', packages=['warpctc_pytorch'],\n ext_modules=[ffi])\n", (1142, 1234), False, 'from distutils.core import setup\n'), ((204, 221), 'platform.system', 'platform.system', ([], {}), '()\n', (219, 221), False, 'import platform\n'), ((643, 654), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (651, 654), False, 'import sys\n'), ((671, 701), 'os.path.realpath', 'os.path.realpath', (['"""../include"""'], {}), "('../include')\n", (687, 701), False, 'import os\n'), ((389, 440), 'os.path.join', 'os.path.join', (['warp_ctc_path', "('libwarpctc' + lib_ext)"], {}), "(warp_ctc_path, 'libwarpctc' + lib_ext)\n", (401, 440), False, 'import os\n'), ((902, 933), 'os.path.realpath', 'os.path.realpath', (['warp_ctc_path'], {}), '(warp_ctc_path)\n', (918, 933), False, 'import os\n'), ((962, 993), 'os.path.realpath', 'os.path.realpath', (['warp_ctc_path'], {}), '(warp_ctc_path)\n', (978, 993), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 26 16:48:20 2017
@author: AC
"""
import logging
from lxml import html
import requests
import zoopla_rq
logger = logging.getLogger()
class HomeListing():
def __init__(self, config, parent_id, history_id, survey_id):
self.config = config
self.parent_id = parent_id
self.history_id = history_id
self.survey_id = survey_id
self.time = None
self.price = None
#add other attributes
def get_property_history_info(self):
property_history_url = (self.config.URL_PROPERTY_ROOT
+ str(self.room_id))
response = zoopla_rq.rq_request_with_repeats(self.config,
property_history_url)
if response is not None:
page = response.text
tree = html.fromstring(page)
self.__get_history_info_from_tree(tree)
return True
else:
return False
def __get_time(self, tree):
temp = tree.xpath('//p[@id="historic-listing-title"]/strong/text()')
time = temp[0].strip()
time = time.replace('\n', '')
self.time = time
def __get_price(self, tree):
temp = tree.xpath('//strong[@class="buyers"]/text()')
price = temp[0].strip()
self.price = price
def __get_history_info_from_tree(self, tree):
self.__get_time(tree)
self.__get_price(tree)
| [
"logging.getLogger",
"zoopla_rq.rq_request_with_repeats",
"lxml.html.fromstring"
] | [((162, 181), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (179, 181), False, 'import logging\n'), ((678, 746), 'zoopla_rq.rq_request_with_repeats', 'zoopla_rq.rq_request_with_repeats', (['self.config', 'property_history_url'], {}), '(self.config, property_history_url)\n', (711, 746), False, 'import zoopla_rq\n'), ((886, 907), 'lxml.html.fromstring', 'html.fromstring', (['page'], {}), '(page)\n', (901, 907), False, 'from lxml import html\n')] |
import math
import numpy as np
from numba import cuda, float32
from numba.cuda.testing import unittest
import numba.cuda.random
from numba.cuda.testing import skip_on_cudasim, CUDATestCase
from numba.cuda.random import \
xoroshiro128p_uniform_float32, xoroshiro128p_normal_float32, \
xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64
from numba.core import config
# Distributions
UNIFORM = 1
NORMAL = 2
@cuda.jit
def rng_kernel_float32(states, out, count, distribution):
thread_id = cuda.grid(1)
for i in range(count):
if distribution == UNIFORM:
out[thread_id * count + i] = xoroshiro128p_uniform_float32(states, thread_id)
elif distribution == NORMAL:
out[thread_id * count + i] = xoroshiro128p_normal_float32(states, thread_id)
@cuda.jit
def rng_kernel_float64(states, out, count, distribution):
thread_id = cuda.grid(1)
for i in range(count):
if distribution == UNIFORM:
out[thread_id * count + i] = xoroshiro128p_uniform_float64(states, thread_id)
elif distribution == NORMAL:
out[thread_id * count + i] = xoroshiro128p_normal_float64(states, thread_id)
class TestCudaRandomXoroshiro128p(CUDATestCase):
def test_create(self):
states = cuda.random.create_xoroshiro128p_states(10, seed=1)
s = states.copy_to_host()
self.assertEqual(len(np.unique(s)), 10)
def test_create_subsequence_start(self):
states = cuda.random.create_xoroshiro128p_states(10, seed=1)
s1 = states.copy_to_host()
states = cuda.random.create_xoroshiro128p_states(10, seed=1,
subsequence_start=3)
s2 = states.copy_to_host()
# Starting seeds should match up with offset of 3
np.testing.assert_array_equal(s1[3:], s2[:-3])
def test_create_stream(self):
stream = cuda.stream()
states = cuda.random.create_xoroshiro128p_states(10, seed=1, stream=stream)
s = states.copy_to_host()
self.assertEqual(len(np.unique(s)), 10)
def check_uniform(self, kernel_func, dtype):
states = cuda.random.create_xoroshiro128p_states(32 * 2, seed=1)
out = np.zeros(2 * 32 * 32, dtype=np.float32)
kernel_func[2, 32](states, out, 32, UNIFORM)
self.assertAlmostEqual(out.min(), 0.0, delta=1e-3)
self.assertAlmostEqual(out.max(), 1.0, delta=1e-3)
self.assertAlmostEqual(out.mean(), 0.5, delta=1.5e-2)
self.assertAlmostEqual(out.std(), 1.0/(2*math.sqrt(3)), delta=6e-3)
def test_uniform_float32(self):
self.check_uniform(rng_kernel_float32, np.float32)
@skip_on_cudasim('skip test for speed under cudasim')
def test_uniform_float64(self):
self.check_uniform(rng_kernel_float64, np.float64)
def check_normal(self, kernel_func, dtype):
states = cuda.random.create_xoroshiro128p_states(32 * 2, seed=1)
out = np.zeros(2 * 32 * 32, dtype=dtype)
kernel_func[2, 32](states, out, 32, NORMAL)
self.assertAlmostEqual(out.mean(), 0.0, delta=4e-3)
self.assertAlmostEqual(out.std(), 1.0, delta=2e-3)
def test_normal_float32(self):
self.check_normal(rng_kernel_float32, np.float32)
@skip_on_cudasim('skip test for speed under cudasim')
def test_normal_float64(self):
self.check_normal(rng_kernel_float64, np.float64)
if __name__ == '__main__':
unittest.main()
| [
"numba.cuda.random.create_xoroshiro128p_states",
"numba.cuda.random.xoroshiro128p_normal_float64",
"numba.cuda.random.xoroshiro128p_uniform_float32",
"numpy.unique",
"numba.cuda.grid",
"numba.cuda.random.xoroshiro128p_normal_float32",
"math.sqrt",
"numba.cuda.random.xoroshiro128p_uniform_float64",
"... | [((512, 524), 'numba.cuda.grid', 'cuda.grid', (['(1)'], {}), '(1)\n', (521, 524), False, 'from numba import cuda, float32\n'), ((891, 903), 'numba.cuda.grid', 'cuda.grid', (['(1)'], {}), '(1)\n', (900, 903), False, 'from numba import cuda, float32\n'), ((2636, 2688), 'numba.cuda.testing.skip_on_cudasim', 'skip_on_cudasim', (['"""skip test for speed under cudasim"""'], {}), "('skip test for speed under cudasim')\n", (2651, 2688), False, 'from numba.cuda.testing import skip_on_cudasim, CUDATestCase\n'), ((3228, 3280), 'numba.cuda.testing.skip_on_cudasim', 'skip_on_cudasim', (['"""skip test for speed under cudasim"""'], {}), "('skip test for speed under cudasim')\n", (3243, 3280), False, 'from numba.cuda.testing import skip_on_cudasim, CUDATestCase\n'), ((3406, 3421), 'numba.cuda.testing.unittest.main', 'unittest.main', ([], {}), '()\n', (3419, 3421), False, 'from numba.cuda.testing import unittest\n'), ((1279, 1330), 'numba.cuda.random.create_xoroshiro128p_states', 'cuda.random.create_xoroshiro128p_states', (['(10)'], {'seed': '(1)'}), '(10, seed=1)\n', (1318, 1330), False, 'from numba import cuda, float32\n'), ((1476, 1527), 'numba.cuda.random.create_xoroshiro128p_states', 'cuda.random.create_xoroshiro128p_states', (['(10)'], {'seed': '(1)'}), '(10, seed=1)\n', (1515, 1527), False, 'from numba import cuda, float32\n'), ((1581, 1653), 'numba.cuda.random.create_xoroshiro128p_states', 'cuda.random.create_xoroshiro128p_states', (['(10)'], {'seed': '(1)', 'subsequence_start': '(3)'}), '(10, seed=1, subsequence_start=3)\n', (1620, 1653), False, 'from numba import cuda, float32\n'), ((1768, 1814), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['s1[3:]', 's2[:-3]'], {}), '(s1[3:], s2[:-3])\n', (1797, 1814), True, 'import numpy as np\n'), ((1867, 1880), 'numba.cuda.stream', 'cuda.stream', ([], {}), '()\n', (1878, 1880), False, 'from numba import cuda, float32\n'), ((1898, 1964), 'numba.cuda.random.create_xoroshiro128p_states', 'cuda.random.create_xoroshiro128p_states', (['(10)'], {'seed': '(1)', 'stream': 'stream'}), '(10, seed=1, stream=stream)\n', (1937, 1964), False, 'from numba import cuda, float32\n'), ((2114, 2169), 'numba.cuda.random.create_xoroshiro128p_states', 'cuda.random.create_xoroshiro128p_states', (['(32 * 2)'], {'seed': '(1)'}), '(32 * 2, seed=1)\n', (2153, 2169), False, 'from numba import cuda, float32\n'), ((2184, 2223), 'numpy.zeros', 'np.zeros', (['(2 * 32 * 32)'], {'dtype': 'np.float32'}), '(2 * 32 * 32, dtype=np.float32)\n', (2192, 2223), True, 'import numpy as np\n'), ((2850, 2905), 'numba.cuda.random.create_xoroshiro128p_states', 'cuda.random.create_xoroshiro128p_states', (['(32 * 2)'], {'seed': '(1)'}), '(32 * 2, seed=1)\n', (2889, 2905), False, 'from numba import cuda, float32\n'), ((2920, 2954), 'numpy.zeros', 'np.zeros', (['(2 * 32 * 32)'], {'dtype': 'dtype'}), '(2 * 32 * 32, dtype=dtype)\n', (2928, 2954), True, 'import numpy as np\n'), ((630, 678), 'numba.cuda.random.xoroshiro128p_uniform_float32', 'xoroshiro128p_uniform_float32', (['states', 'thread_id'], {}), '(states, thread_id)\n', (659, 678), False, 'from numba.cuda.random import xoroshiro128p_uniform_float32, xoroshiro128p_normal_float32, xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64\n'), ((1009, 1057), 'numba.cuda.random.xoroshiro128p_uniform_float64', 'xoroshiro128p_uniform_float64', (['states', 'thread_id'], {}), '(states, thread_id)\n', (1038, 1057), False, 'from numba.cuda.random import xoroshiro128p_uniform_float32, xoroshiro128p_normal_float32, xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64\n'), ((757, 804), 'numba.cuda.random.xoroshiro128p_normal_float32', 'xoroshiro128p_normal_float32', (['states', 'thread_id'], {}), '(states, thread_id)\n', (785, 804), False, 'from numba.cuda.random import xoroshiro128p_uniform_float32, xoroshiro128p_normal_float32, xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64\n'), ((1136, 1183), 'numba.cuda.random.xoroshiro128p_normal_float64', 'xoroshiro128p_normal_float64', (['states', 'thread_id'], {}), '(states, thread_id)\n', (1164, 1183), False, 'from numba.cuda.random import xoroshiro128p_uniform_float32, xoroshiro128p_normal_float32, xoroshiro128p_uniform_float64, xoroshiro128p_normal_float64\n'), ((1394, 1406), 'numpy.unique', 'np.unique', (['s'], {}), '(s)\n', (1403, 1406), True, 'import numpy as np\n'), ((2028, 2040), 'numpy.unique', 'np.unique', (['s'], {}), '(s)\n', (2037, 2040), True, 'import numpy as np\n'), ((2507, 2519), 'math.sqrt', 'math.sqrt', (['(3)'], {}), '(3)\n', (2516, 2519), False, 'import math\n')] |
# -*- coding: utf-8 -*-
# Functions and Script to extract data
import blocksci
import pandas as pd
import numpy as np
import networkx as nx
import multiprocessing as mp
import itertools
import random
import time
import string
import pickle
import csv
import gc
import os, sys
from functools import partial
#***********CLASSES AND FUNTIONS***********
# Class that creates a blockchain a blockchain partition (dictionary) given data range and partition type (blocks,days,weeks)
class BchainPartition():
def __init__(self,chain,start_timestamp,end_timestamp,ptype='blocks',sample_size=10):
blocks=chain.range(start=start_timestamp,end=end_timestamp)
self.block_h=blocks.height
print('Start_block: {}'.format(self.block_h[0]))
print('End_block: {}'.format(self.block_h[-1]))
if sample_size>0: #Samples blocks from the
sample_list=list(np.random.choice(self.block_h,sample_size))
sample_blocks=[chain[ix_b] for ix_b in sample_list]
txs=[b.txes for b in sample_blocks]
self.partition={h:[t for t in t_l] for h,t_l in zip(sample_list,txs)}
self.no_parts=len(sample_blocks)
else:
if ptype=='blocks':
self.partition={b.height:[tx for tx in b.txes] for b in blocks}
self.no_parts=np.int32(len(blocks))
print('Number of Blocks: {} '.format(len(blocks)))
print('Highest block height: {}'.format(blocks[-1].height))
print('Number of Transactions: {} '.format(len(txs)))
# ***TODO: Create partition for other types of partitions (use tx.block_time)
# Function that takes blockchain partition and outputs pandas data frame with features
# for the graph defined by each split in the partition
def partition_data(chainpartiton,directory,filename):
# Dictionary with partition
partition=chainpartiton.partition
partindex=partition.keys()
parts=partition.values()
data_tuples=[]
graphs=[]
print('Number of parts: {}'.format(len(partindex)))
tuples=[(index,part) for index,part in zip(partindex,parts)]
no_parts=len(tuples)
processed=0
for t in tuples:
data_i,columns_i,graph_i=graph_features(t,slice_type='blocks')
with open(filename,'a') as f:
writer = csv.writer(f, delimiter=',')
if len(data_tuples)==0: # Write column names on first pass
writer.writerow(columns_i)
writer.writerow(data_i)
# Save graph
nx.write_gpickle(graph_i,directory+str(graph_i.graph['graph_id'])+'.gpickle')
data_tuples.append((data_i,columns_i))
graphs.append(graph_i)
processed+=1
progress=(processed/no_parts)*100
#sys.stdout.write("Download progress: %d%% \r" % (progress) )
sys.stdout.write("Download progress: {:07.4f} \r".format(progress) )
sys.stdout.flush()
'''
chunksize=len(tuples)%ncpu
with mp.Pool(processes=ncpu) as pool:
data_tuples=pool.map(graph_features,tuples,chunksize)
'''
columns=data_tuples[0][1] #This value is being re-written. This design choice is to mantain consistency with columns.
data=[i for i,j in data_tuples]
data=np.array(data)
df=pd.DataFrame(data=data[:,:],columns=columns)
return (df,graphs)
# Function that receives a chain part (list of transactions), generates transaction graph and calculates statistics
def graph_features(chain_part_tuple,slice_type='blocks'):
index=chain_part_tuple[0]
chain_part=chain_part_tuple[1]
block_height=chain_part[-1].block_height
graph=block_graph(chain_part,index,slice_type)
nx.info(graph)
nodes=graph.nodes(data=True)
edges=graph.edges(data=True)
data=[index]
columns=['block_height']
# Number of Nodes
no_nodes=nx.number_of_nodes(graph)
data.append(no_nodes)
columns.append('no_nodes')
# Number of Edges (address to address transactions)
no_edges=nx.number_of_edges(graph)
data.append(no_edges)
columns.append('no_edges')
# Total value transacted
total_value=np.sum(np.array([a['value'] for n1,n2,a in edges]))
data.append(total_value)
columns.append('value_transacted')
# Total Density
density=nx.density(graph)
data.append(density)
columns.append('total_density')
# Nodes with self loops nx.loops nodes_with_selfloops(G) nodes_with_selfloops(G)
nodes_self=nx.number_of_selfloops(graph)
data.append(nodes_self)
columns.append('nodes_self')
# Value of self loops nodes_with_selfloops(G)
values=np.array([a['value'] for n1,n2,a in nx.selfloop_edges(graph,data=True)])
selfloop_value=np.sum(values)
data.append(selfloop_value)
columns.append('selfloop_value')
# Number of transactions to old addresses
old_nodes=[n for n,a in nodes if a['block_created']<block_height]
edges_to_old=graph.in_edges(old_nodes,data=True)
data.append(len(edges_to_old))
columns.append('old_nodes_in')
# Ratio of transactions to old addresses to total transactions
ratio_oldin_totalin=len(edges_to_old)/(no_edges+1)
data.append(ratio_oldin_totalin)
columns.append('ratio_oldin_totalin')
# Value of transactions to old addresses
value_to_old=[a['value'] for n1,n2,a in edges_to_old]
data.append(np.sum(np.array(value_to_old)))
columns.append('value_to_old')
# Old address density
old_graph=nx.induced_subgraph(graph,old_nodes)
old_density=nx.density(old_graph)
data.append(old_density)
columns.append('old_density')
# ***TODO*** (Aggregated graph analysis)
# Accumulated reuse
# Dominance (Agg graph or new vs. old dominance)
#https://networkx.github.io/documentation/stable/reference/algorithms/dominance.html
# Common ancenstors (as with dominance the address ancestor path should be proportional
#to the blockchain lenght if address reuse is minimal)
#***********
#print('{} Processed'.format(index))
return (data,columns,graph)
# Function that creates transaction graph for a given number transactions
def block_graph(txs,index,slice_type):
# Create graph and process
graph = nx.MultiDiGraph(graph_id=index,slice_type=slice_type)
nodes=[]
edges=[]
# Extract transactions information
init_block=txs[0].block.height
txs_dic={tx.index:tx for tx in txs}
txs_ix=list(txs_dic.keys())
txs_ix.sort()
start_ix=txs_ix[0]
end_ix=txs_ix[-1]
# Generate edges to input to graph
# TODO:Re-write for pre-process: See last answ with qeues https://stackoverflow.com/questions/33107019/multiple-threads-writing-to-the-same-csv-in-python
'''
with mp.Pool(processes=ncpu) as pool:
edges=pool.map(extract_nodes_edges,txs,chunksize)
'''
for tx in txs:
edges_i,nodes_i=extract_nodes_edges(tx)
nodes.append(nodes_i)
edges.append(edges_i)
nodes=list(itertools.chain.from_iterable(nodes))
edges=list(itertools.chain.from_iterable(edges))
# Input to graph
graph.add_nodes_from(nodes)
graph.add_edges_from(edges)
#print('Generated Graph for Block starting at:{}'.format(init_block))
return graph
# Function that receives a transaction and generates nodes and edges from addresses in transaction
def extract_nodes_edges(transaction):
# Initialize values and get info from transaction
edges=[]
output_value=transaction.output_value
block_height=transaction.block_height
tx_id=transaction.index
# Get inputs, types and values
inputs=transaction.inputs.address
input_val=transaction.inputs.value
input_nodes=[(inp.address_num,{'raw_type':inp.raw_type,'block_created':inp.first_tx.block.height})for inp in inputs]
# Get outputs and types
outputs=transaction.outputs.address
output_nodes=[(out.address_num,{'raw_type':out.raw_type,'block_created':out.first_tx.block.height})for out in outputs]
# ****TODO: Add address balance as attribute to node****
# Create nodes
nodes=input_nodes+output_nodes
# Create edges (NetworkX will automatically create nodes when given edges)
for i in range(len(inputs)):
value=input_val[i]
prop_value=value/len(outputs)
for o in range(len(outputs)):
edge=(inputs[i].address_num,outputs[o].address_num,{'value':prop_value,'tx_id':block_height})
edges.append(edge)
return edges,nodes
#***********SCRIPT***********
# Point to parsed blockchain data
ncpu=mp.cpu_count()
chain = blocksci.Blockchain("/home/ubuntu/bitcoin")
types=blocksci.address_type.types
total_blocks=chain.blocks
print('Total Blocks up to {}: {} '.format(total_blocks[-1].time,len(total_blocks)))
#---SCRIPT: generates data for graphs in each part of the partition
# Create directories and files to store graphs and dataframe
# Generate an extraction ID (Each id has random id)
extraction_id = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(6)])
print('Extraction id: {}'.format(extraction_id))
#---Save Dataframes
# Create directory and save
start='2010-02-01 00:00:00'
end='2018-02-01 11:59:59'
blocks=chain.range(start=start,end=end)
sample_size=35000
start_c=start
start_c=start_c.replace('-','_').replace(' ','_').replace(':','_')
end_c=end
end_c=end_c.replace('-','_').replace(' ','_').replace(':','_')
directory='extractions/'+extraction_id+'-'+str(sample_size)+'-blocks-'+start_c+'-'+end_c+'/graphs'+'/'
if not os.path.exists(directory):
os.makedirs(directory)
# Create Filename and save
filename='extractions/'+extraction_id+'-'+str(sample_size)+'-blocks-'+start_c+'-'+end_c+'/'+extraction_id+'-'+str(sample_size)+'-blocks-'+start_c+'-'+end_c+'.csv'
start_time=time.time()
partition=BchainPartition(chain,start,end,sample_size=sample_size)
df,graphs=partition_data(partition,directory,filename)
df.head()
end_time=time.time()
print('Time taken={}'.format(end_time-start_time))
print('\n***EXTRACTION COMPLETED SUCCESSFULLY***')
| [
"networkx.number_of_selfloops",
"networkx.induced_subgraph",
"multiprocessing.cpu_count",
"numpy.array",
"blocksci.Blockchain",
"os.path.exists",
"networkx.info",
"itertools.chain.from_iterable",
"networkx.number_of_nodes",
"pandas.DataFrame",
"sys.stdout.flush",
"networkx.MultiDiGraph",
"ra... | [((8556, 8570), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (8568, 8570), True, 'import multiprocessing as mp\n'), ((8579, 8622), 'blocksci.Blockchain', 'blocksci.Blockchain', (['"""/home/ubuntu/bitcoin"""'], {}), "('/home/ubuntu/bitcoin')\n", (8598, 8622), False, 'import blocksci\n'), ((9788, 9799), 'time.time', 'time.time', ([], {}), '()\n', (9797, 9799), False, 'import time\n'), ((9942, 9953), 'time.time', 'time.time', ([], {}), '()\n', (9951, 9953), False, 'import time\n'), ((3245, 3259), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (3253, 3259), True, 'import numpy as np\n'), ((3267, 3313), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data[:, :]', 'columns': 'columns'}), '(data=data[:, :], columns=columns)\n', (3279, 3313), True, 'import pandas as pd\n'), ((3679, 3693), 'networkx.info', 'nx.info', (['graph'], {}), '(graph)\n', (3686, 3693), True, 'import networkx as nx\n'), ((3842, 3867), 'networkx.number_of_nodes', 'nx.number_of_nodes', (['graph'], {}), '(graph)\n', (3860, 3867), True, 'import networkx as nx\n'), ((3995, 4020), 'networkx.number_of_edges', 'nx.number_of_edges', (['graph'], {}), '(graph)\n', (4013, 4020), True, 'import networkx as nx\n'), ((4277, 4294), 'networkx.density', 'nx.density', (['graph'], {}), '(graph)\n', (4287, 4294), True, 'import networkx as nx\n'), ((4457, 4486), 'networkx.number_of_selfloops', 'nx.number_of_selfloops', (['graph'], {}), '(graph)\n', (4479, 4486), True, 'import networkx as nx\n'), ((4702, 4716), 'numpy.sum', 'np.sum', (['values'], {}), '(values)\n', (4708, 4716), True, 'import numpy as np\n'), ((5460, 5497), 'networkx.induced_subgraph', 'nx.induced_subgraph', (['graph', 'old_nodes'], {}), '(graph, old_nodes)\n', (5479, 5497), True, 'import networkx as nx\n'), ((5513, 5534), 'networkx.density', 'nx.density', (['old_graph'], {}), '(old_graph)\n', (5523, 5534), True, 'import networkx as nx\n'), ((6215, 6269), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', ([], {'graph_id': 'index', 'slice_type': 'slice_type'}), '(graph_id=index, slice_type=slice_type)\n', (6230, 6269), True, 'import networkx as nx\n'), ((9529, 9554), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (9543, 9554), False, 'import os, sys\n'), ((9560, 9582), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (9571, 9582), False, 'import os, sys\n'), ((2905, 2923), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2921, 2923), False, 'import os, sys\n'), ((4131, 4176), 'numpy.array', 'np.array', (["[a['value'] for n1, n2, a in edges]"], {}), "([a['value'] for n1, n2, a in edges])\n", (4139, 4176), True, 'import numpy as np\n'), ((6971, 7007), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['nodes'], {}), '(nodes)\n', (7000, 7007), False, 'import itertools\n'), ((7024, 7060), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['edges'], {}), '(edges)\n', (7053, 7060), False, 'import itertools\n'), ((8977, 9028), 'random.choice', 'random.choice', (['(string.ascii_letters + string.digits)'], {}), '(string.ascii_letters + string.digits)\n', (8990, 9028), False, 'import random\n'), ((2318, 2346), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (2328, 2346), False, 'import csv\n'), ((5358, 5380), 'numpy.array', 'np.array', (['value_to_old'], {}), '(value_to_old)\n', (5366, 5380), True, 'import numpy as np\n'), ((898, 941), 'numpy.random.choice', 'np.random.choice', (['self.block_h', 'sample_size'], {}), '(self.block_h, sample_size)\n', (914, 941), True, 'import numpy as np\n'), ((4646, 4681), 'networkx.selfloop_edges', 'nx.selfloop_edges', (['graph'], {'data': '(True)'}), '(graph, data=True)\n', (4663, 4681), True, 'import networkx as nx\n')] |
# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import sys
from absl import logging
def generate_stand_vocab(old_vocab, new_vocab):
vocab_file = open(new_vocab, 'w')
vocab_file.write('<pad>' + '\t' + '0' + '\n')
vocab_file.write('<s>' + '\t' + '1' + '\n')
vocab_file.write('</s>' + '\t' + '2' + '\n')
vocab_file.write('<unk>' + '\t' + '3' + '\n')
vocab_file.write('<sos>' + '\t' + '4' + '\n')
vocab_file.write('<eos>' + '\t' + '5' + '\n')
idx = 6
with open(old_vocab, 'r') as f:
for i, line in enumerate(f.readlines()):
if i > 2:
vocab_file.write(line.strip() + '\t' +
str(idx) + '\n')
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
if len(sys.argv) != 3:
logging.error("Usage python {} old_vocab new_vocab".format(sys.argv[0]))
sys.exit(-1)
old_vocab = sys.argv[1]
new_vocab = sys.argv[2]
generate_stand_vocab(old_vocab, new_vocab)
| [
"absl.logging.set_verbosity",
"sys.exit"
] | [((1360, 1395), 'absl.logging.set_verbosity', 'logging.set_verbosity', (['logging.INFO'], {}), '(logging.INFO)\n', (1381, 1395), False, 'from absl import logging\n'), ((1503, 1515), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (1511, 1515), False, 'import sys\n')] |
import functools
from gevent import spawn
from ..task import Task
class GeventTask(Task):
"""
Task that spawns a greenlet
"""
def start(self, *args, **kwargs):
return spawn(functools.partial(Task.start, self, *args, **kwargs))
| [
"functools.partial"
] | [((198, 250), 'functools.partial', 'functools.partial', (['Task.start', 'self', '*args'], {}), '(Task.start, self, *args, **kwargs)\n', (215, 250), False, 'import functools\n')] |
from elasticapm.instrumentation.packages.base import AbstractInstrumentedModule
from elasticapm.traces import capture_span
from elasticapm.utils import default_ports
from elasticapm.utils.compat import urlparse
def get_host_from_url(url):
parsed_url = urlparse.urlparse(url)
host = parsed_url.hostname or " "
if parsed_url.port and default_ports.get(parsed_url.scheme) != parsed_url.port:
host += ":" + str(parsed_url.port)
return host
class HttpClientTornadoInstrumentation(AbstractInstrumentedModule):
name = "tornado"
instrument_list = [("tornado.httpclient", "HTTPResponse")]
def call(self, module, method, wrapped, instance, args, kwargs):
http_request_proxy = args[0]
url = http_request_proxy.url
duration = kwargs.get('request_time', 0)
start_time = kwargs.get('start_time', 0)
print("Inicio da requisicao")
signature = "{} {}".format(http_request_proxy.method.upper(), get_host_from_url(http_request_proxy.url))
print("start time tornado")
with capture_span(signature, "ext.http.tornado", {"url": url}, leaf=True, start_time=start_time,
duration=duration):
print("Tornado test")
teste = wrapped(*args, **kwargs)
return teste
# return wrapped(*args, **kwargs)
# http_request = kwargs.get("url", None)
# kwargs__http = vars(http_request)
# del kwargs__http['_body']
# del kwargs__http['_headers']
# del kwargs__http['_body_producer']
# del kwargs__http['_streaming_callback']
# del kwargs__http['_header_callback']
# del kwargs__http['_prepare_curl_callback']
# del kwargs__http['start_time']
# url = http_request.url
# signature = "{} {}".format(http_request.method.upper(), get_host_from_url(http_request.url))
#
# with capture_span(signature, "ext.http.tornado", {"url": url}, leaf=True):
# return wrapped(*args, **kwargs__http)
| [
"elasticapm.utils.compat.urlparse.urlparse",
"elasticapm.utils.default_ports.get",
"elasticapm.traces.capture_span"
] | [((258, 280), 'elasticapm.utils.compat.urlparse.urlparse', 'urlparse.urlparse', (['url'], {}), '(url)\n', (275, 280), False, 'from elasticapm.utils.compat import urlparse\n'), ((347, 383), 'elasticapm.utils.default_ports.get', 'default_ports.get', (['parsed_url.scheme'], {}), '(parsed_url.scheme)\n', (364, 383), False, 'from elasticapm.utils import default_ports\n'), ((1062, 1176), 'elasticapm.traces.capture_span', 'capture_span', (['signature', '"""ext.http.tornado"""', "{'url': url}"], {'leaf': '(True)', 'start_time': 'start_time', 'duration': 'duration'}), "(signature, 'ext.http.tornado', {'url': url}, leaf=True,\n start_time=start_time, duration=duration)\n", (1074, 1176), False, 'from elasticapm.traces import capture_span\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-03-27 14:01
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('python_management', '0003_added_unique_constraint'),
]
operations = [
migrations.RemoveField(
model_name='pythonmanagementdeleterequest',
name='virtual_env_name',
),
migrations.RemoveField(
model_name='pythonmanagementfindvirtualenvsrequest',
name='virtual_env_name',
),
]
| [
"django.db.migrations.RemoveField"
] | [((307, 403), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""pythonmanagementdeleterequest"""', 'name': '"""virtual_env_name"""'}), "(model_name='pythonmanagementdeleterequest', name=\n 'virtual_env_name')\n", (329, 403), False, 'from django.db import migrations\n'), ((443, 547), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""pythonmanagementfindvirtualenvsrequest"""', 'name': '"""virtual_env_name"""'}), "(model_name='pythonmanagementfindvirtualenvsrequest',\n name='virtual_env_name')\n", (465, 547), False, 'from django.db import migrations\n')] |
# author__Farhad_Mirkarimi-*- coding: utf-8 -*-
import os
import h5py
import glob, os
import numpy as np
import matplotlib.pyplot as plt
from numpy import mean
from numpy import std
import torch
import torch.nn as nn
from tqdm.auto import tqdm, trange
from numpy.random import default_rng
import torch.nn.functional as F
import argparse
import gc
gc.collect()
print(np.version.version)
from all_params import all_params
from joint_training import joint_training
################ parsing input args###################
parser = argparse.ArgumentParser(description='provide arguments for neural capacity estimation')
#parser.add_argument('--SNR', type=int, default=[10], help='Signal to noise(unit)')
parser.add_argument('--SNR',nargs='+',type=int)
parser.add_argument('--init_epoch', type=int, default=100, help='First round epoch')
parser.add_argument('--max_epoch', type=int, default=3000, help='joint training epoch')
parser.add_argument('--seed_size', type=int, default=2, help='seed size for discrete inputs')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--hidden_dim_critic', type=int, default=256, help='hidden dim for mi_est net')
parser.add_argument('--hidden_dim_nit', type=int, default=256, help='hidden_dim for nit net')
parser.add_argument('--dim', type=int, default=1, help='dimension for mi_est net')
parser.add_argument('--dim_nit', type=int, default=1, help='dimension for NIT net')
parser.add_argument('--layer_mi', type=int, default=4, help='layer number for mi_est net')
parser.add_argument('--layer_nit', type=int, default=4, help='layer number for nit_net')
parser.add_argument('--lr_rate_nit', type=float, default=.0001, help='training lr')
parser.add_argument('--lr_rate_mi_est', type=float, default=.00001, help='training lr')
parser.add_argument('--type_channel', type=str, default='conts_awgn', help='channel name')
parser.add_argument('--estimator', type=str, default='mine', help='estimator type')
parser.add_argument('--activation', type=str, default='relu', help='activation function')
parser.add_argument('--peak', type=float, default=None, help='peak_amplitude constraint')
parser.add_argument('--positive', type=float, default=None, help='positivity of input')
#parser.add_argument('--verbose', dest='verbose', action='store_true')
#parser.set_defaults(verbose=False)
args = parser.parse_args()
######################################################3
nit_params,critic_params=all_params(dim=args.dim,layers_critic=args.layer_mi,embed_dim=32,hidden_dim_critic=256,activation_F1='relu',lr_critic=.0001,dim_NIT=args.dim_nit,layers_NIT=args.layer_nit,hidden_dim_NIT=256,t_x_power=1,lr_NIT=.0001,channel_type=args.type_channel,peak_amp=args.peak,positive=args.positive)
batch_x0,cap= joint_training(typeinp=args.type_channel,nit_params=nit_params,critic_params=critic_params,SNR=args.SNR,estimator=args.estimator,init_epoch=args.init_epoch,max_epoch=args.max_epoch,itr_every_nit=2,itr_every_mi=5,batch_size=args.batch_size,seed_size=args.seed_size)
| [
"all_params.all_params",
"joint_training.joint_training",
"argparse.ArgumentParser",
"gc.collect"
] | [((347, 359), 'gc.collect', 'gc.collect', ([], {}), '()\n', (357, 359), False, 'import gc\n'), ((527, 619), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""provide arguments for neural capacity estimation"""'}), "(description=\n 'provide arguments for neural capacity estimation')\n", (550, 619), False, 'import argparse\n'), ((2801, 3122), 'all_params.all_params', 'all_params', ([], {'dim': 'args.dim', 'layers_critic': 'args.layer_mi', 'embed_dim': '(32)', 'hidden_dim_critic': '(256)', 'activation_F1': '"""relu"""', 'lr_critic': '(0.0001)', 'dim_NIT': 'args.dim_nit', 'layers_NIT': 'args.layer_nit', 'hidden_dim_NIT': '(256)', 't_x_power': '(1)', 'lr_NIT': '(0.0001)', 'channel_type': 'args.type_channel', 'peak_amp': 'args.peak', 'positive': 'args.positive'}), "(dim=args.dim, layers_critic=args.layer_mi, embed_dim=32,\n hidden_dim_critic=256, activation_F1='relu', lr_critic=0.0001, dim_NIT=\n args.dim_nit, layers_NIT=args.layer_nit, hidden_dim_NIT=256, t_x_power=\n 1, lr_NIT=0.0001, channel_type=args.type_channel, peak_amp=args.peak,\n positive=args.positive)\n", (2811, 3122), False, 'from all_params import all_params\n'), ((3104, 3390), 'joint_training.joint_training', 'joint_training', ([], {'typeinp': 'args.type_channel', 'nit_params': 'nit_params', 'critic_params': 'critic_params', 'SNR': 'args.SNR', 'estimator': 'args.estimator', 'init_epoch': 'args.init_epoch', 'max_epoch': 'args.max_epoch', 'itr_every_nit': '(2)', 'itr_every_mi': '(5)', 'batch_size': 'args.batch_size', 'seed_size': 'args.seed_size'}), '(typeinp=args.type_channel, nit_params=nit_params,\n critic_params=critic_params, SNR=args.SNR, estimator=args.estimator,\n init_epoch=args.init_epoch, max_epoch=args.max_epoch, itr_every_nit=2,\n itr_every_mi=5, batch_size=args.batch_size, seed_size=args.seed_size)\n', (3118, 3390), False, 'from joint_training import joint_training\n')] |
import datetime
import numpy as np
import libpySat as pySat
from astropy import _erfa as erfa
from scipy.misc import derivative
from scipy import interpolate
class TransformPolarMotion:
def __init__(self,fxp,fyp):
self.fxp=fxp
self.fyp=fyp
self.epochSave = datetime.datetime.now()
self.rotSave = np.matrix(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float)
self.sprime=0.0
def __getPolarMotion(self, epoch: datetime.datetime):
"""
:param epoch:
:return: polar motion: x,y [mas]
"""
mjd=pySat.UTC2MJD(epoch)
return self.fxp(mjd),self.fyp(mjd)
def __getPolarMotionDot(self, epoch: datetime.datetime):
"""
:param epoch:
:return: polar motion: x,y [mas/s]
"""
mjd=pySat.UTC2MJD(epoch)
xpdot=derivative(self.fxp,mjd,dx=1e-3,n=1)
ypdot = derivative(self.fyp, mjd, dx=1e-3,n=1)
return xpdot,ypdot
def getMatrix_PolarMotion(self,epoch:datetime.datetime):
"""
Get the polar motion matrix. Relates ITRF to TIRS.
:param epoch:
:return:
"""
if (epoch !=self.epochSave):
xp,yp = self.__getPolarMotion(epoch)
# TODO: Implementation of tidal and libration terms for polar motion...
xp*=np.pi/180.0/3600.0
yp*=np.pi/180.0/3600.0
sp= self.__getTIO(epoch)
#print(xp,yp,sp)
rxy= np.matmul(pySat.RotationMatrix3DY(xp),pySat.RotationMatrix3DX(yp))
rs=pySat.RotationMatrix3DZ(-sp)
self.rotSave=np.matmul(rs,rxy)
self.epochSave = epoch
return self.rotSave
else:
return self.rotSave
def __getTIO(self, epoch:datetime.datetime ):
"""
Gets the Terrestrial Intermediate Origin (TIO) locator s'
Terrestrial Intermediate Ref Sys (TIRS) defined by TIO and CIP.
TIRS related to to CIRS by Earth Rotation Angle
:param epoch:
:return:
"""
mjd = pySat.pySatTime.UTC2MJD(epoch)
self.sprime=erfa.sp00(2400000.5,mjd)
return self.sprime
def getMatrix_PolarMotionDot(self,epoch:datetime.datetime):
"""
Get the polar motion matrix. Relates ITRF to TIRS.
:param epoch:
:return:
"""
# TODO: Implementation of tidal and libration terms for polar motion...
xp, yp = self.__getPolarMotion(epoch)
xpDot,ypDot = self.__getPolarMotionDot(epoch)
xp *= np.pi / 180.0 / 3600.0
yp *= np.pi / 180.0 / 3600.0
xpDot*=np.pi/180.0/3600.0
ypDot*=np.pi/180.0/3600.0
spDot = -47.0 / 1.0e6 / 3600.0 / 180.0 * np.pi / 86400.0 / 36525.0
sp = self.__getTIO(epoch)
print('Pmotion dot:',xpDot,ypDot,spDot)
rxy= np.matmul(pySat.RotationMatrix3DY(xp),pySat.RotationMatrix3DX(yp))
rxyDot = np.matmul(xpDot* pySat.RotationMatrix3DY(xp), pySat.RotationMatrix3DX(yp)) \
+np.matmul( pySat.RotationMatrix3DY(xp),ypDot* pySat.RotationMatrix3DX(yp))
rs=pySat.RotationMatrix3DZ(-sp)
rsDot=-spDot*pySat.RotationMatrix3DZ(-sp)
return np.matmul(rsDot,rxy) + np.matmul(rs,rxyDot)
| [
"astropy._erfa.sp00",
"libpySat.UTC2MJD",
"libpySat.pySatTime.UTC2MJD",
"scipy.misc.derivative",
"datetime.datetime.now",
"libpySat.RotationMatrix3DZ",
"libpySat.RotationMatrix3DY",
"numpy.matmul",
"libpySat.RotationMatrix3DX",
"numpy.matrix"
] | [((288, 311), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (309, 311), False, 'import datetime\n'), ((335, 392), 'numpy.matrix', 'np.matrix', (['([0, 0, 0], [0, 0, 0], [0, 0, 0])'], {'dtype': 'float'}), '(([0, 0, 0], [0, 0, 0], [0, 0, 0]), dtype=float)\n', (344, 392), True, 'import numpy as np\n'), ((577, 597), 'libpySat.UTC2MJD', 'pySat.UTC2MJD', (['epoch'], {}), '(epoch)\n', (590, 597), True, 'import libpySat as pySat\n'), ((806, 826), 'libpySat.UTC2MJD', 'pySat.UTC2MJD', (['epoch'], {}), '(epoch)\n', (819, 826), True, 'import libpySat as pySat\n'), ((841, 881), 'scipy.misc.derivative', 'derivative', (['self.fxp', 'mjd'], {'dx': '(0.001)', 'n': '(1)'}), '(self.fxp, mjd, dx=0.001, n=1)\n', (851, 881), False, 'from scipy.misc import derivative\n'), ((894, 934), 'scipy.misc.derivative', 'derivative', (['self.fyp', 'mjd'], {'dx': '(0.001)', 'n': '(1)'}), '(self.fyp, mjd, dx=0.001, n=1)\n', (904, 934), False, 'from scipy.misc import derivative\n'), ((2056, 2086), 'libpySat.pySatTime.UTC2MJD', 'pySat.pySatTime.UTC2MJD', (['epoch'], {}), '(epoch)\n', (2079, 2086), True, 'import libpySat as pySat\n'), ((2107, 2132), 'astropy._erfa.sp00', 'erfa.sp00', (['(2400000.5)', 'mjd'], {}), '(2400000.5, mjd)\n', (2116, 2132), True, 'from astropy import _erfa as erfa\n'), ((3106, 3134), 'libpySat.RotationMatrix3DZ', 'pySat.RotationMatrix3DZ', (['(-sp)'], {}), '(-sp)\n', (3129, 3134), True, 'import libpySat as pySat\n'), ((1549, 1577), 'libpySat.RotationMatrix3DZ', 'pySat.RotationMatrix3DZ', (['(-sp)'], {}), '(-sp)\n', (1572, 1577), True, 'import libpySat as pySat\n'), ((1603, 1621), 'numpy.matmul', 'np.matmul', (['rs', 'rxy'], {}), '(rs, rxy)\n', (1612, 1621), True, 'import numpy as np\n'), ((2851, 2878), 'libpySat.RotationMatrix3DY', 'pySat.RotationMatrix3DY', (['xp'], {}), '(xp)\n', (2874, 2878), True, 'import libpySat as pySat\n'), ((2879, 2906), 'libpySat.RotationMatrix3DX', 'pySat.RotationMatrix3DX', (['yp'], {}), '(yp)\n', (2902, 2906), True, 'import libpySat as pySat\n'), ((3156, 3184), 'libpySat.RotationMatrix3DZ', 'pySat.RotationMatrix3DZ', (['(-sp)'], {}), '(-sp)\n', (3179, 3184), True, 'import libpySat as pySat\n'), ((3201, 3222), 'numpy.matmul', 'np.matmul', (['rsDot', 'rxy'], {}), '(rsDot, rxy)\n', (3210, 3222), True, 'import numpy as np\n'), ((3224, 3245), 'numpy.matmul', 'np.matmul', (['rs', 'rxyDot'], {}), '(rs, rxyDot)\n', (3233, 3245), True, 'import numpy as np\n'), ((1477, 1504), 'libpySat.RotationMatrix3DY', 'pySat.RotationMatrix3DY', (['xp'], {}), '(xp)\n', (1500, 1504), True, 'import libpySat as pySat\n'), ((1505, 1532), 'libpySat.RotationMatrix3DX', 'pySat.RotationMatrix3DX', (['yp'], {}), '(yp)\n', (1528, 1532), True, 'import libpySat as pySat\n'), ((2971, 2998), 'libpySat.RotationMatrix3DX', 'pySat.RotationMatrix3DX', (['yp'], {}), '(yp)\n', (2994, 2998), True, 'import libpySat as pySat\n'), ((3031, 3058), 'libpySat.RotationMatrix3DY', 'pySat.RotationMatrix3DY', (['xp'], {}), '(xp)\n', (3054, 3058), True, 'import libpySat as pySat\n'), ((2942, 2969), 'libpySat.RotationMatrix3DY', 'pySat.RotationMatrix3DY', (['xp'], {}), '(xp)\n', (2965, 2969), True, 'import libpySat as pySat\n'), ((3066, 3093), 'libpySat.RotationMatrix3DX', 'pySat.RotationMatrix3DX', (['yp'], {}), '(yp)\n', (3089, 3093), True, 'import libpySat as pySat\n')] |
import numpy as np
import pandas as pd
class WetChickenBaselinePolicy:
def __init__(self, env, gamma, method='heuristic', epsilon=0.1, convergence=0.1, learning_rate=0.1, max_nb_it=999,
order_epsilon=3, order_learning_rate=3):
self.env = env
self.gamma = gamma
self.nb_states = env.width * env.length
self.nb_actions = 5
self.pi = np.ones((self.nb_states, self.nb_actions)) / self.nb_actions
self.epsilon = epsilon
self.convergence = convergence
self.learning_rate = learning_rate
self.method = method
self.max_nb_it = max_nb_it
self.order_epsilon = order_epsilon
self.order_learning_rate = order_learning_rate
self.compute_baseline()
def compute_baseline(self):
if self.method == 'fixed_learning':
old_q = np.zeros((self.nb_states, self.nb_actions))
q = np.ones((self.nb_states, self.nb_actions)) * 1 / (1 - self.gamma) * 4 # Optimistic initialisation
nb_it = 0
state = self.env.get_state_int()
# while (np.linalg.norm(old_q - q) > self.convergence) and nb_it < 999:
while nb_it < self.max_nb_it:
action = np.random.choice(self.pi.shape[1], p=self.pi[state])
state, reward, next_state = self.env.step(action)
old_q = q.copy()
q[state, action] += self.learning_rate * (
reward + self.gamma * np.max(q[next_state, :]) - q[state, action])
self.pi = self.epsilon * np.ones((self.nb_states, self.nb_actions)) / 5
for s in range(self.nb_states):
self.pi[s, np.argmax(q[s, :])] += 1 - self.epsilon
nb_it += 1
elif self.method == 'variable_learning':
old_q = np.zeros((self.nb_states, self.nb_actions))
q = np.ones((self.nb_states, self.nb_actions)) * 1 / (1 - self.gamma) * 4 # Optimistic initialisation
nb_it = 0
state = self.env.get_state_int()
# while (np.linalg.norm(old_q - q) > self.convergence) and nb_it < 999:
while nb_it < self.max_nb_it:
nb_it += 1
epsilon = self.epsilon * 1 / nb_it ** (1 / self.order_epsilon)
learning_rate = self.learning_rate * 1 / nb_it ** (1 / self.order_learning_rate)
action = np.random.choice(self.pi.shape[1], p=self.pi[state])
state, reward, next_state = self.env.step(action)
old_q = q.copy()
q[state, action] += learning_rate * (
reward + self.gamma * np.max(q[next_state, :]) - q[state, action])
self.pi = epsilon * np.ones((self.nb_states, self.nb_actions)) / 5
for s in range(self.nb_states):
self.pi[s, np.argmax(q[s, :])] += 1 - epsilon
elif self.method == 'variable_learning':
old_q = np.zeros((self.nb_states, self.nb_actions))
q = np.ones((self.nb_states, self.nb_actions)) * 1 / (1 - self.gamma) * 4 # Optimistic initialisation
nb_it = 0
state = self.env.get_state_int()
# while (np.linalg.norm(old_q - q) > self.convergence) and nb_it < 999:
while nb_it < self.max_nb_it:
nb_it += 1
epsilon = self.epsilon * 1 / nb_it ** (1 / self.order_epsilon)
learning_rate = self.learning_rate * 1 / nb_it ** (1 / self.order_learning_rate)
action = np.random.choice(self.pi.shape[1], p=self.pi[state])
state, reward, next_state = self.env.step(action)
old_q = q.copy()
q[state, action] += learning_rate * (
reward + self.gamma * np.max(q[next_state, :]) - q[state, action])
self.pi = epsilon * np.ones((self.nb_states, self.nb_actions)) / 5
for s in range(self.nb_states):
self.pi[s, np.argmax(q[s, :])] += 1 - epsilon
elif self.method == 'state_count_dependent_variable':
old_q = np.zeros((self.nb_states, self.nb_actions))
q = np.ones((self.nb_states, self.nb_actions)) * 1 / (1 - self.gamma) * 4 # Optimistic initialisation
nb_it = 0
state = self.env.get_state_int()
# while (np.linalg.norm(old_q - q) > self.convergence) and nb_it < 999:
count_state_action = np.zeros((self.nb_states, self.nb_actions))
while nb_it < self.max_nb_it:
nb_it += 1
epsilon = self.epsilon * 1 / nb_it ** (1 / self.order_epsilon)
action = np.random.choice(self.pi.shape[1], p=self.pi[state])
count_state_action[state, action] += 1
learning_rate = self.learning_rate * 1 / count_state_action[state, action] ** (
1 / self.order_learning_rate)
state, reward, next_state = self.env.step(action)
old_q = q.copy()
q[state, action] += learning_rate * (
reward + self.gamma * np.max(q[next_state, :]) - q[state, action])
self.pi = epsilon * np.ones((self.nb_states, self.nb_actions)) / 5
for s in range(self.nb_states):
self.pi[s, np.argmax(q[s, :])] += 1 - epsilon
elif self.method == 'heuristic':
# Try to get to in the middle of the river and then paddle as strong as possible against the stream
# I.e. try to get to state (2,2), as a number 12, and then choose action 2
pi = np.zeros((self.nb_states, self.nb_actions))
for state in range(self.nb_states):
for action in range(self.nb_actions):
x, y = int(state / self.nb_actions), state % self.nb_actions
if x > 2:
pi[state, 2] = 1 # We are too close to the waterfall ==> paddle as strong as possible
elif y < 2:
pi[state, 4] = 1 # We are not in immediate danger, but too close to the left ==> go right
elif y > 2:
pi[state, 3] = 1 # We are not in immediate danger, but too close to the right ==> go left
elif x == 2:
pi[state, 2] = 1 # We are perfect now, try to keep the position by paddling as strong as poss
elif x == 1:
pi[state, 1] = 1 # Close to perfect, just paddle a bit
else:
pi[state, 0] = 1 # Right lane, but too high up, just drift with the river
self.pi = (1 - self.epsilon) * pi + self.epsilon * self.pi
else:
print(
f'Method {self.method} is not available. Only acceptable methods are: \'heuristic\' and \'state_count_dependent_learning\' ')
class ContinuousWetChickenHeuristic:
def __init__(self, epsilon):
self.epsilon = epsilon
def pi(self, state):
x, y = state[0], state[1]
pi = np.zeros(5)
if x > 2.5:
pi[2] = 1 # We are too close to the waterfall ==> paddle as strong as possible
elif y < 2:
pi[4] = 1 # We are not in immediate danger, but too close to the left ==> go right
elif y > 3:
pi[3] = 1 # We are not in immediate danger, but too close to the right ==> go left
elif x > 2:
pi[2] = 1 # We are perfect now, try to keep the position by paddling as strong as poss
elif x > 1:
pi[1] = 1 # Close to perfect, just paddle a bit
else:
pi[0] = 1 # Right lane, but too high up, just drift with the river
pi = (1 - self.epsilon) * pi + self.epsilon * 1/5
return pi
| [
"numpy.ones",
"numpy.random.choice",
"numpy.argmax",
"numpy.max",
"numpy.zeros"
] | [((7130, 7141), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (7138, 7141), True, 'import numpy as np\n'), ((394, 436), 'numpy.ones', 'np.ones', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (401, 436), True, 'import numpy as np\n'), ((859, 902), 'numpy.zeros', 'np.zeros', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (867, 902), True, 'import numpy as np\n'), ((1236, 1288), 'numpy.random.choice', 'np.random.choice', (['self.pi.shape[1]'], {'p': 'self.pi[state]'}), '(self.pi.shape[1], p=self.pi[state])\n', (1252, 1288), True, 'import numpy as np\n'), ((1841, 1884), 'numpy.zeros', 'np.zeros', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (1849, 1884), True, 'import numpy as np\n'), ((2421, 2473), 'numpy.random.choice', 'np.random.choice', (['self.pi.shape[1]'], {'p': 'self.pi[state]'}), '(self.pi.shape[1], p=self.pi[state])\n', (2437, 2473), True, 'import numpy as np\n'), ((2984, 3027), 'numpy.zeros', 'np.zeros', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (2992, 3027), True, 'import numpy as np\n'), ((919, 961), 'numpy.ones', 'np.ones', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (926, 961), True, 'import numpy as np\n'), ((1579, 1621), 'numpy.ones', 'np.ones', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (1586, 1621), True, 'import numpy as np\n'), ((3564, 3616), 'numpy.random.choice', 'np.random.choice', (['self.pi.shape[1]'], {'p': 'self.pi[state]'}), '(self.pi.shape[1], p=self.pi[state])\n', (3580, 3616), True, 'import numpy as np\n'), ((4140, 4183), 'numpy.zeros', 'np.zeros', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (4148, 4183), True, 'import numpy as np\n'), ((4483, 4526), 'numpy.zeros', 'np.zeros', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (4491, 4526), True, 'import numpy as np\n'), ((1705, 1723), 'numpy.argmax', 'np.argmax', (['q[s, :]'], {}), '(q[s, :])\n', (1714, 1723), True, 'import numpy as np\n'), ((1901, 1943), 'numpy.ones', 'np.ones', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (1908, 1943), True, 'import numpy as np\n'), ((2754, 2796), 'numpy.ones', 'np.ones', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (2761, 2796), True, 'import numpy as np\n'), ((4700, 4752), 'numpy.random.choice', 'np.random.choice', (['self.pi.shape[1]'], {'p': 'self.pi[state]'}), '(self.pi.shape[1], p=self.pi[state])\n', (4716, 4752), True, 'import numpy as np\n'), ((5656, 5699), 'numpy.zeros', 'np.zeros', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (5664, 5699), True, 'import numpy as np\n'), ((1493, 1517), 'numpy.max', 'np.max', (['q[next_state, :]'], {}), '(q[next_state, :])\n', (1499, 1517), True, 'import numpy as np\n'), ((2880, 2898), 'numpy.argmax', 'np.argmax', (['q[s, :]'], {}), '(q[s, :])\n', (2889, 2898), True, 'import numpy as np\n'), ((3044, 3086), 'numpy.ones', 'np.ones', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (3051, 3086), True, 'import numpy as np\n'), ((3897, 3939), 'numpy.ones', 'np.ones', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (3904, 3939), True, 'import numpy as np\n'), ((2673, 2697), 'numpy.max', 'np.max', (['q[next_state, :]'], {}), '(q[next_state, :])\n', (2679, 2697), True, 'import numpy as np\n'), ((4023, 4041), 'numpy.argmax', 'np.argmax', (['q[s, :]'], {}), '(q[s, :])\n', (4032, 4041), True, 'import numpy as np\n'), ((4200, 4242), 'numpy.ones', 'np.ones', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (4207, 4242), True, 'import numpy as np\n'), ((5238, 5280), 'numpy.ones', 'np.ones', (['(self.nb_states, self.nb_actions)'], {}), '((self.nb_states, self.nb_actions))\n', (5245, 5280), True, 'import numpy as np\n'), ((3816, 3840), 'numpy.max', 'np.max', (['q[next_state, :]'], {}), '(q[next_state, :])\n', (3822, 3840), True, 'import numpy as np\n'), ((5364, 5382), 'numpy.argmax', 'np.argmax', (['q[s, :]'], {}), '(q[s, :])\n', (5373, 5382), True, 'import numpy as np\n'), ((5157, 5181), 'numpy.max', 'np.max', (['q[next_state, :]'], {}), '(q[next_state, :])\n', (5163, 5181), True, 'import numpy as np\n')] |