code
stringlengths
22
1.05M
apis
listlengths
1
3.31k
extract_api
stringlengths
75
3.25M
import os import sys import argparse import numpy as np import theano.tensor as T homepath = os.path.join('..', '..') if not homepath in sys.path: sys.path.insert(0, homepath) from dlearn.models.layer import FullConnLayer, ConvPoolLayer from dlearn.models.nnet import NeuralNet from dlearn.utils import actfuncs, costfuncs from dlearn.utils.serialize import load_data, save_data from dlearn.optimization import sgd # Program arguments parser desctxt = """ Train latent network. Use learned attribute and segmentation network. """ dataset_txt = """ The input dataset data_name.pkl. """ attr_txt = """ The attribute network model_name.pkl. """ seg_txt = """ The segmentation network model_name.pkl. """ output_txt = """ If not specified, the output model will be saved as model_latent.pkl. Otherwise it will be saved as model_latent_name.pkl. """ parser = argparse.ArgumentParser(description=desctxt) parser.add_argument('-d', '--dataset', nargs=1, required=True, metavar='name', help=dataset_txt) parser.add_argument('-a', '--attribute', nargs=1, required=True, metavar='name', help=attr_txt) parser.add_argument('-s', '--segmentation', nargs=1, required=True, metavar='name', help=seg_txt) parser.add_argument('-o', '--output', nargs='?', default=None, metavar='name', help=output_txt) args = parser.parse_args() def train_model(dataset, attr_model, seg_model): def shape_constrained_pooling(fmaps): s = fmaps.sum(axis=[2, 3]) Z = abs(actfuncs.tanh(fmaps)).sum(axis=[2, 3]) return s / Z X = T.tensor4() A = T.matrix() feature_layers = [] feature_layers.append(ConvPoolLayer( input=X, input_shape=(3, 160, 80), filter_shape=(32, 3, 5, 5), pool_shape=(2, 2), active_func=actfuncs.tanh, flatten=False, W=attr_model.blocks[0]._W, b=0.0 )) feature_layers.append(ConvPoolLayer( input=feature_layers[-1].output, input_shape=feature_layers[-1].output_shape, filter_shape=(64, 32, 5, 5), pool_shape=(2, 2), active_func=actfuncs.tanh, flatten=False, W=attr_model.blocks[1]._W, b=0.0 )) seg_layers = [] seg_layers.append(FullConnLayer( input=feature_layers[-1].output.flatten(2), input_shape=np.prod(feature_layers[-1].output_shape), output_shape=1024, dropout_ratio=0.1, active_func=actfuncs.tanh, W=seg_model.blocks[2]._W, b=seg_model.blocks[2]._b )) seg_layers.append(FullConnLayer( input=seg_layers[-1].output, input_shape=seg_layers[-1].output_shape, output_shape=37 * 17, dropout_input=seg_layers[-1].dropout_output, active_func=actfuncs.sigmoid, W=seg_model.blocks[3]._W, b=seg_model.blocks[3]._b )) S = seg_layers[-1].output S = S * (S >= 0.1) S = S.reshape((S.shape[0], 37, 17)) S = S.dimshuffle(0, 'x', 1, 2) S_dropout = seg_layers[-1].dropout_output S_dropout = S_dropout * (S_dropout >= 0.1) S_dropout = S_dropout.reshape((S_dropout.shape[0], 37, 17)) S_dropout = S_dropout.dimshuffle(0, 'x', 1, 2) attr_layers = [] ''' attr_layers.append(ConvPoolLayer( input=feature_layers[-1].output * S, input_shape=feature_layers[-1].output_shape, filter_shape=(128, 64, 3, 3), pool_shape=(2, 2), dropout_input=feature_layers[-1].output * S_dropout, active_func=actfuncs.tanh, flatten=False, W=attr_model.blocks[2]._W, b=0.0 )) ''' attr_layers.append(FullConnLayer( input=shape_constrained_pooling(feature_layers[-1].output * S), input_shape=feature_layers[-1].output_shape, output_shape=64, dropout_input=shape_constrained_pooling( feature_layers[-1].dropout_output * S_dropout), dropout_ratio=0.1, active_func=actfuncs.tanh, W=attr_model.blocks[2]._W, b=attr_model.blocks[2]._b )) attr_layers.append(FullConnLayer( input=attr_layers[-1].output, input_shape=attr_layers[-1].output_shape, output_shape=11, dropout_input=attr_layers[-1].dropout_output, active_func=actfuncs.sigmoid, W=attr_model.blocks[3]._W, b=attr_model.blocks[3]._b )) model = NeuralNet(feature_layers + seg_layers + attr_layers, X, attr_layers[-1].output) model.target = A model.cost = costfuncs.binxent(attr_layers[-1].dropout_output, A) + \ 1e-3 * model.get_norm(2) model.error = costfuncs.binerr(attr_layers[-1].output, A) sgd.train(model, dataset, lr=1e-3, momentum=0.9, batch_size=100, n_epochs=300, epoch_waiting=10) return model if __name__ == '__main__': dataset_file = 'data_{0}.pkl'.format(args.dataset[0]) attr_file = 'model_{0}.pkl'.format(args.attribute[0]) seg_file = 'model_{0}.pkl'.format(args.segmentation[0]) out_file = 'model_latent.pkl' if args.output is None else \ 'model_latent_{0}.pkl'.format(args.output) dataset = load_data(dataset_file) attr_model = load_data(attr_file) seg_model = load_data(seg_file) model = train_model(dataset, attr_model, seg_model) save_data(model, out_file)
[ "argparse.ArgumentParser", "theano.tensor.tensor4", "dlearn.utils.serialize.load_data", "dlearn.optimization.sgd.train", "dlearn.utils.costfuncs.binxent", "sys.path.insert", "dlearn.utils.costfuncs.binerr", "dlearn.models.layer.ConvPoolLayer", "numpy.prod", "dlearn.utils.actfuncs.tanh", "dlearn.utils.serialize.save_data", "dlearn.models.nnet.NeuralNet", "os.path.join", "dlearn.models.layer.FullConnLayer", "theano.tensor.matrix" ]
[((94, 118), 'os.path.join', 'os.path.join', (['""".."""', '""".."""'], {}), "('..', '..')\n", (106, 118), False, 'import os\n'), ((868, 912), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desctxt'}), '(description=desctxt)\n', (891, 912), False, 'import argparse\n'), ((153, 181), 'sys.path.insert', 'sys.path.insert', (['(0)', 'homepath'], {}), '(0, homepath)\n', (168, 181), False, 'import sys\n'), ((1622, 1633), 'theano.tensor.tensor4', 'T.tensor4', ([], {}), '()\n', (1631, 1633), True, 'import theano.tensor as T\n'), ((1642, 1652), 'theano.tensor.matrix', 'T.matrix', ([], {}), '()\n', (1650, 1652), True, 'import theano.tensor as T\n'), ((4436, 4515), 'dlearn.models.nnet.NeuralNet', 'NeuralNet', (['(feature_layers + seg_layers + attr_layers)', 'X', 'attr_layers[-1].output'], {}), '(feature_layers + seg_layers + attr_layers, X, attr_layers[-1].output)\n', (4445, 4515), False, 'from dlearn.models.nnet import NeuralNet\n'), ((4685, 4728), 'dlearn.utils.costfuncs.binerr', 'costfuncs.binerr', (['attr_layers[-1].output', 'A'], {}), '(attr_layers[-1].output, A)\n', (4701, 4728), False, 'from dlearn.utils import actfuncs, costfuncs\n'), ((4734, 4836), 'dlearn.optimization.sgd.train', 'sgd.train', (['model', 'dataset'], {'lr': '(0.001)', 'momentum': '(0.9)', 'batch_size': '(100)', 'n_epochs': '(300)', 'epoch_waiting': '(10)'}), '(model, dataset, lr=0.001, momentum=0.9, batch_size=100, n_epochs=\n 300, epoch_waiting=10)\n', (4743, 4836), False, 'from dlearn.optimization import sgd\n'), ((5219, 5242), 'dlearn.utils.serialize.load_data', 'load_data', (['dataset_file'], {}), '(dataset_file)\n', (5228, 5242), False, 'from dlearn.utils.serialize import load_data, save_data\n'), ((5260, 5280), 'dlearn.utils.serialize.load_data', 'load_data', (['attr_file'], {}), '(attr_file)\n', (5269, 5280), False, 'from dlearn.utils.serialize import load_data, save_data\n'), ((5297, 5316), 'dlearn.utils.serialize.load_data', 'load_data', (['seg_file'], {}), '(seg_file)\n', (5306, 5316), False, 'from dlearn.utils.serialize import load_data, save_data\n'), ((5379, 5405), 'dlearn.utils.serialize.save_data', 'save_data', (['model', 'out_file'], {}), '(model, out_file)\n', (5388, 5405), False, 'from dlearn.utils.serialize import load_data, save_data\n'), ((1704, 1884), 'dlearn.models.layer.ConvPoolLayer', 'ConvPoolLayer', ([], {'input': 'X', 'input_shape': '(3, 160, 80)', 'filter_shape': '(32, 3, 5, 5)', 'pool_shape': '(2, 2)', 'active_func': 'actfuncs.tanh', 'flatten': '(False)', 'W': 'attr_model.blocks[0]._W', 'b': '(0.0)'}), '(input=X, input_shape=(3, 160, 80), filter_shape=(32, 3, 5, 5),\n pool_shape=(2, 2), active_func=actfuncs.tanh, flatten=False, W=\n attr_model.blocks[0]._W, b=0.0)\n', (1717, 1884), False, 'from dlearn.models.layer import FullConnLayer, ConvPoolLayer\n'), ((1974, 2198), 'dlearn.models.layer.ConvPoolLayer', 'ConvPoolLayer', ([], {'input': 'feature_layers[-1].output', 'input_shape': 'feature_layers[-1].output_shape', 'filter_shape': '(64, 32, 5, 5)', 'pool_shape': '(2, 2)', 'active_func': 'actfuncs.tanh', 'flatten': '(False)', 'W': 'attr_model.blocks[1]._W', 'b': '(0.0)'}), '(input=feature_layers[-1].output, input_shape=feature_layers[-\n 1].output_shape, filter_shape=(64, 32, 5, 5), pool_shape=(2, 2),\n active_func=actfuncs.tanh, flatten=False, W=attr_model.blocks[1]._W, b=0.0)\n', (1987, 2198), False, 'from dlearn.models.layer import FullConnLayer, ConvPoolLayer\n'), ((2619, 2865), 'dlearn.models.layer.FullConnLayer', 'FullConnLayer', ([], {'input': 'seg_layers[-1].output', 'input_shape': 'seg_layers[-1].output_shape', 'output_shape': '(37 * 17)', 'dropout_input': 'seg_layers[-1].dropout_output', 'active_func': 'actfuncs.sigmoid', 'W': 'seg_model.blocks[3]._W', 'b': 'seg_model.blocks[3]._b'}), '(input=seg_layers[-1].output, input_shape=seg_layers[-1].\n output_shape, output_shape=37 * 17, dropout_input=seg_layers[-1].\n dropout_output, active_func=actfuncs.sigmoid, W=seg_model.blocks[3]._W,\n b=seg_model.blocks[3]._b)\n', (2632, 2865), False, 'from dlearn.models.layer import FullConnLayer, ConvPoolLayer\n'), ((4127, 4373), 'dlearn.models.layer.FullConnLayer', 'FullConnLayer', ([], {'input': 'attr_layers[-1].output', 'input_shape': 'attr_layers[-1].output_shape', 'output_shape': '(11)', 'dropout_input': 'attr_layers[-1].dropout_output', 'active_func': 'actfuncs.sigmoid', 'W': 'attr_model.blocks[3]._W', 'b': 'attr_model.blocks[3]._b'}), '(input=attr_layers[-1].output, input_shape=attr_layers[-1].\n output_shape, output_shape=11, dropout_input=attr_layers[-1].\n dropout_output, active_func=actfuncs.sigmoid, W=attr_model.blocks[3]._W,\n b=attr_model.blocks[3]._b)\n', (4140, 4373), False, 'from dlearn.models.layer import FullConnLayer, ConvPoolLayer\n'), ((4577, 4629), 'dlearn.utils.costfuncs.binxent', 'costfuncs.binxent', (['attr_layers[-1].dropout_output', 'A'], {}), '(attr_layers[-1].dropout_output, A)\n', (4594, 4629), False, 'from dlearn.utils import actfuncs, costfuncs\n'), ((2391, 2431), 'numpy.prod', 'np.prod', (['feature_layers[-1].output_shape'], {}), '(feature_layers[-1].output_shape)\n', (2398, 2431), True, 'import numpy as np\n'), ((1553, 1573), 'dlearn.utils.actfuncs.tanh', 'actfuncs.tanh', (['fmaps'], {}), '(fmaps)\n', (1566, 1573), False, 'from dlearn.utils import actfuncs, costfuncs\n')]
import pytest from zentropi import Agent from zentropi import Frame from zentropi import WebsocketTransport from zentropi.transport import websocket class MockWebsockets(object): def __init__(self, login_ok=True, send_ok=True, recv_ok=True): self._login_ok = login_ok self._send_ok = send_ok self._recv_ok = recv_ok self.frame = None async def connect(self, endpoint): return self async def close(self): pass async def send(self, data): if not self._send_ok: raise ConnectionAbortedError() frame = Frame.from_json(data) if frame.name == 'login': if self._login_ok: self.frame = frame.reply('login-ok').to_json() else: self.frame = frame.reply('login-failed').to_json() return self.frame = data async def recv(self): if not self._recv_ok: raise ConnectionAbortedError() return self.frame @pytest.mark.asyncio async def test_websocket_transport(monkeypatch): monkeypatch.setattr(websocket, 'websockets', MockWebsockets()) wt = WebsocketTransport() frame = Frame('test-frame') await wt.connect('ws://localhost:6789/', 'test-token') assert wt.connected is True await wt.send(frame) assert wt.connection.frame frame_recv = await wt.recv() assert frame_recv.name == 'test-frame' await wt.close() assert wt.connected is False @pytest.mark.asyncio @pytest.mark.xfail(raises=PermissionError) async def test_websocket_transport_login_fail(monkeypatch): monkeypatch.setattr(websocket, 'websockets', MockWebsockets(login_ok=False)) wt = WebsocketTransport() frame = Frame('test-frame') await wt.connect('ws://localhost:6789/', 'test-token') @pytest.mark.asyncio @pytest.mark.xfail(raises=ConnectionError) async def test_websocket_transport_send_fail(monkeypatch): monkeypatch.setattr(websocket, 'websockets', MockWebsockets(send_ok=False)) wt = WebsocketTransport() frame = Frame('test-frame') await wt.connect('ws://localhost:6789/', 'test-token') @pytest.mark.asyncio @pytest.mark.xfail(raises=ConnectionError) async def test_websocket_transport_recv_fail(monkeypatch): monkeypatch.setattr(websocket, 'websockets', MockWebsockets(recv_ok=False)) wt = WebsocketTransport() frame = Frame('test-frame') await wt.connect('ws://localhost:6789/', 'test-token') # @pytest.mark.asyncio # async def test_agent_with_websocket_endpoint(): # a = Agent('test-agent') # test_event_handler_was_run = False # @a.on_event('startup') # async def startup(frame): # pragma: no cover # await a.connect('ws://localhost:6789/', 'test-token') # await a.event('test') # @a.on_event('test') # async def test(frame): # pragma: no cover # nonlocal test_event_handler_was_run # test_event_handler_was_run = True # await a.close() # a.stop() # await a.start() # assert test_event_handler_was_run is True # @pytest.mark.asyncio # @pytest.mark.xfail(raises=ConnectionError) # async def test_agent_with_websocket_login_fail(): # a = Agent('test-agent') # @a.on_event('startup') # async def startup(frame): # pragma: no cover # await a.connect('ws://localhost:6789/', 'fail-token') # await a.start()
[ "zentropi.WebsocketTransport", "zentropi.Frame.from_json", "zentropi.Frame", "pytest.mark.xfail" ]
[((1511, 1552), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'PermissionError'}), '(raises=PermissionError)\n', (1528, 1552), False, 'import pytest\n'), ((1839, 1880), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'ConnectionError'}), '(raises=ConnectionError)\n', (1856, 1880), False, 'import pytest\n'), ((2164, 2205), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'raises': 'ConnectionError'}), '(raises=ConnectionError)\n', (2181, 2205), False, 'import pytest\n'), ((1156, 1176), 'zentropi.WebsocketTransport', 'WebsocketTransport', ([], {}), '()\n', (1174, 1176), False, 'from zentropi import WebsocketTransport\n'), ((1189, 1208), 'zentropi.Frame', 'Frame', (['"""test-frame"""'], {}), "('test-frame')\n", (1194, 1208), False, 'from zentropi import Frame\n'), ((1703, 1723), 'zentropi.WebsocketTransport', 'WebsocketTransport', ([], {}), '()\n', (1721, 1723), False, 'from zentropi import WebsocketTransport\n'), ((1736, 1755), 'zentropi.Frame', 'Frame', (['"""test-frame"""'], {}), "('test-frame')\n", (1741, 1755), False, 'from zentropi import Frame\n'), ((2029, 2049), 'zentropi.WebsocketTransport', 'WebsocketTransport', ([], {}), '()\n', (2047, 2049), False, 'from zentropi import WebsocketTransport\n'), ((2062, 2081), 'zentropi.Frame', 'Frame', (['"""test-frame"""'], {}), "('test-frame')\n", (2067, 2081), False, 'from zentropi import Frame\n'), ((2354, 2374), 'zentropi.WebsocketTransport', 'WebsocketTransport', ([], {}), '()\n', (2372, 2374), False, 'from zentropi import WebsocketTransport\n'), ((2387, 2406), 'zentropi.Frame', 'Frame', (['"""test-frame"""'], {}), "('test-frame')\n", (2392, 2406), False, 'from zentropi import Frame\n'), ((596, 617), 'zentropi.Frame.from_json', 'Frame.from_json', (['data'], {}), '(data)\n', (611, 617), False, 'from zentropi import Frame\n')]
from typing import Generator, Mapping, Union from flask_babel import lazy_gettext from app.questionnaire.location import Location from .context import Context from .section_summary_context import SectionSummaryContext class SubmitQuestionnaireContext(Context): def __call__( self, answers_are_editable: bool = True ) -> dict[str, Union[str, dict]]: summary_options = self._schema.get_summary_options() collapsible = summary_options.get("collapsible", False) submission_schema: Mapping = self._schema.get_submission() or {} title = submission_schema.get("title") or lazy_gettext( "Check your answers and submit" ) submit_button = submission_schema.get("button") or lazy_gettext( "Submit answers" ) guidance = submission_schema.get("guidance") or lazy_gettext( "Please submit this survey to complete it" ) warning = submission_schema.get("warning") or None context = { "title": title, "guidance": guidance, "warning": warning, "submit_button": submit_button, } if summary_options: context["summary"] = self._get_summary_context( collapsible, answers_are_editable ) return context def _get_summary_context( self, collapsible: bool, answers_are_editable: bool ) -> dict[str, Union[list, bool, str]]: groups = list(self._build_all_groups()) return { "groups": groups, "answers_are_editable": answers_are_editable, "collapsible": collapsible, "summary_type": "Summary", } def _build_all_groups(self) -> Generator[dict, None, None]: """ NB: Does not support repeating sections """ for section_id in self._router.enabled_section_ids: location = Location(section_id=section_id) section_summary_context = SectionSummaryContext( language=self._language, schema=self._schema, answer_store=self._answer_store, list_store=self._list_store, progress_store=self._progress_store, metadata=self._metadata, current_location=location, return_to="final-summary", routing_path=self._router.routing_path(section_id), ) section: Mapping = self._schema.get_section(section_id) or {} if section.get("summary", {}).get("items"): break for group in section_summary_context()["summary"]["groups"]: yield group
[ "app.questionnaire.location.Location", "flask_babel.lazy_gettext" ]
[((619, 664), 'flask_babel.lazy_gettext', 'lazy_gettext', (['"""Check your answers and submit"""'], {}), "('Check your answers and submit')\n", (631, 664), False, 'from flask_babel import lazy_gettext\n'), ((746, 776), 'flask_babel.lazy_gettext', 'lazy_gettext', (['"""Submit answers"""'], {}), "('Submit answers')\n", (758, 776), False, 'from flask_babel import lazy_gettext\n'), ((855, 911), 'flask_babel.lazy_gettext', 'lazy_gettext', (['"""Please submit this survey to complete it"""'], {}), "('Please submit this survey to complete it')\n", (867, 911), False, 'from flask_babel import lazy_gettext\n'), ((1921, 1952), 'app.questionnaire.location.Location', 'Location', ([], {'section_id': 'section_id'}), '(section_id=section_id)\n', (1929, 1952), False, 'from app.questionnaire.location import Location\n')]
import os import sys if len(sys.argv) > 1: branch = sys.argv[1] else: branch = ' ' os.chdir('../') os.system("git config --global log.date local") os.chdir('../../') os.system("git log -1 --format=\"%ci\" > gitTime.txt")
[ "os.system", "os.chdir" ]
[((98, 113), 'os.chdir', 'os.chdir', (['"""../"""'], {}), "('../')\n", (106, 113), False, 'import os\n'), ((115, 162), 'os.system', 'os.system', (['"""git config --global log.date local"""'], {}), "('git config --global log.date local')\n", (124, 162), False, 'import os\n'), ((166, 184), 'os.chdir', 'os.chdir', (['"""../../"""'], {}), "('../../')\n", (174, 184), False, 'import os\n'), ((186, 238), 'os.system', 'os.system', (['"""git log -1 --format="%ci" > gitTime.txt"""'], {}), '(\'git log -1 --format="%ci" > gitTime.txt\')\n', (195, 238), False, 'import os\n')]
from mock import patch from ghtools.command.status import status, parser class TestRepo(object): def setup(self): self.patcher = patch('ghtools.command.status.Repo') self.mock_repo = self.patcher.start() self.mock_repo.return_value.set_build_status.return_value.json.return_value = {} def teardown(self): self.patcher.stop() def test_status(self): args = parser.parse_args([ 'alphagov/foobar', 'mybranch', 'pending', '--description', 'Running on Jenkins', '--url', 'http://ci.alphagov.co.uk/foo', '--context', 'CI' ]) status(args) self.mock_repo.assert_called_with('alphagov/foobar') self.mock_repo.return_value.set_build_status.assert_called_with( 'mybranch', { 'state': 'pending', 'target_url': 'http://ci.alphagov.co.uk/foo', 'description': 'Running on Jenkins', 'context': 'CI' } )
[ "ghtools.command.status.parser.parse_args", "mock.patch", "ghtools.command.status.status" ]
[((144, 180), 'mock.patch', 'patch', (['"""ghtools.command.status.Repo"""'], {}), "('ghtools.command.status.Repo')\n", (149, 180), False, 'from mock import patch\n'), ((412, 580), 'ghtools.command.status.parser.parse_args', 'parser.parse_args', (["['alphagov/foobar', 'mybranch', 'pending', '--description',\n 'Running on Jenkins', '--url', 'http://ci.alphagov.co.uk/foo',\n '--context', 'CI']"], {}), "(['alphagov/foobar', 'mybranch', 'pending',\n '--description', 'Running on Jenkins', '--url',\n 'http://ci.alphagov.co.uk/foo', '--context', 'CI'])\n", (429, 580), False, 'from ghtools.command.status import status, parser\n'), ((663, 675), 'ghtools.command.status.status', 'status', (['args'], {}), '(args)\n', (669, 675), False, 'from ghtools.command.status import status, parser\n')]
import json, time from datetime import datetime class MemberList: def __init__(self): self.Name = "" self.Update = "" self.Members = "" def MessageFormVerify(self, content): content = content if '#' in content and '\n' in content: return True return False def MessageFormPharse(self, message): self.Members = message.split('\n') def GetClanName(self, staffList, message): author = message.author.name + "#" + message.author.discriminator self.Name = staffList[author] def GetUpdate(self): self.Update = time.time() def Save(self): with open('datas/ClanList.json', 'r+', encoding='utf-8-sig') as clanListFile : clanLists = json.load(clanListFile) clanLists[self.Name]['Update'] = self.Update clanLists[self.Name]['Members'] = self.Members clanListFile.truncate(0) clanListFile.seek(0) clanListFile.write(json.dumps(clanLists,ensure_ascii=False, indent=4)) def GetStrUpdateTime(self): return datetime.fromtimestamp(self.Update).strftime('%Y-%m-%d %H:%M:%S') def ToText(self): return '\n'.join(self.Members)
[ "datetime.datetime.fromtimestamp", "json.load", "json.dumps", "time.time" ]
[((620, 631), 'time.time', 'time.time', ([], {}), '()\n', (629, 631), False, 'import json, time\n'), ((764, 787), 'json.load', 'json.load', (['clanListFile'], {}), '(clanListFile)\n', (773, 787), False, 'import json, time\n'), ((1007, 1058), 'json.dumps', 'json.dumps', (['clanLists'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(clanLists, ensure_ascii=False, indent=4)\n', (1017, 1058), False, 'import json, time\n'), ((1107, 1142), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', (['self.Update'], {}), '(self.Update)\n', (1129, 1142), False, 'from datetime import datetime\n')]
import numpy as np from common import numerical_gradient, softmax, cross_entropy_error from collections import OrderedDict class Relu: def __init__(self): self.mask = None def forward(self, x): self.mask = (x <= 0) out = x.copy() out[self.mask] = 0 return out def backward(self, dout): dout[self.mask] = 0 dx = dout return dx class Affine: def __init__(self, W, b): self.W = W self.b = b self.x = None self.dW = None self.db = None def forward(self, x): self.x = x return np.dot(x, self.W) + self.b def backward(self, dout): dx = np.dot(dout, self.W.T) self.dW = np.dot(self.x.T, dout) self.db = np.sum(dout, axis=0) return dx class SoftmaxWithLoss: def __init__(self): self.loss = None self.y = None self.x = None def forward(self, x, t): self.t = t self.y = softmax(x) self.loss = cross_entropy_error(self.y, self.t) return self.loss def backward(self, dout=1): batch_size = self.t.shape[0] dx = (self.y - self.t) / batch_size return dx class TowLayerNet: def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01): self.params = {} self.params['W1'] = weight_init_std * \ np.random.randn(input_size, hidden_size) self.params['b1'] = np.zeros(hidden_size) self.params['W2'] = weight_init_std * \ np.random.randn(hidden_size, output_size) self.params['b2'] = np.zeros(output_size) self.layers = OrderedDict() self.layers['Affine1'] = \ Affine(self.params['W1'], self.params['b1']) self.layers['Relu1'] = Relu() self.layers['Affine2'] = \ Affine(self.params['W2'], self.params['b2']) self.lastLayer = SoftmaxWithLoss() def predict(self, x): for layer in self.layers.values(): x = layer.forward(x) return x def loss(self, x, t): y = self.predict(x) return self.lastLayer.forward(y, t) def accuracy(self, x, t): y = self.predict(x) y = np.argmax(y, axis=1) if t.ndim != 1: t = np.argmax(t, axis=1) accuracy = np.sum(y == t) / float(x.shape[0]) return accuracy def numerical_gradient(self, x, t): loss_W = lambda W: self.loss(x, t) grads = {} grads['W1'] = numerical_gradient(loss_W, self.params['W1']) grads['b1'] = numerical_gradient(loss_W, self.params['b1']) grads['W2'] = numerical_gradient(loss_W, self.params['W2']) grads['b2'] = numerical_gradient(loss_W, self.params['b2']) return grads def gradient(self, x, t): # 순전파 self.loss(x, t) dout = 1 dout = self.lastLayer.backward(dout) layers = list(self.layers.values()) layers.reverse() for layer in layers: dout = layer.backward(dout) grads = {} grads['W1'] = self.layers['Affine1'].dW grads['b1'] = self.layers['Affine1'].db grads['W2'] = self.layers['Affine2'].dW grads['b2'] = self.layers['Affine2'].db return grads # ========== ========== # # 오차역전파법을 통해 구한 기울기 검증하기 from dataset.mnist import load_mnist (x_train, t_train), (x_test, t_test) = \ load_mnist(normalize=True, one_hot_label=True) network = TowLayerNet(input_size=784, hidden_size=50, output_size=10) x_batch = x_train[:3] t_batch = t_train[:3] grad_numerical = network.numerical_gradient(x_batch, t_batch) grad_backprop = network.gradient(x_batch, t_batch) for k in grad_numerical.keys(): diff = np.average(np.abs(grad_backprop[k] - grad_numerical[k])) print(k + ' : ' + str(diff)) # ========== ========== # # 학습 구현 iters_num = 10000 train_size = x_train.shape[0] batch_size = 100 learning_rate = 0.1 train_loss_list = [] train_acc_list = [] test_acc_list = [] iter_per_epoch = max(train_size / batch_size, 1) print('#========== ==========#') print('iters_num: %s' % str(iters_num)) print('train_size: %s' % str(train_size)) print('batch_size: %s' % str(batch_size)) print('learning_rate: %s' % str(learning_rate)) print('iter_per_epoch: %s' % str(iter_per_epoch)) print('#========== ==========#') for i in range(iters_num): batch_mask = np.random.choice(train_size, batch_size) # print(batch_mask) x_batch = x_train[batch_mask] t_batch = t_train[batch_mask] # 오차역전파법으로 기울기 구하기 grad = network.gradient(x_batch, t_batch) # 갱신 for key in ('W1', 'b1', 'W2', 'b2'): network.params[key] -= learning_rate * grad[key] loss = network.loss(x_batch, t_batch) train_loss_list.append(loss) if i % iter_per_epoch == 0: train_acc = network.accuracy(x_train, t_train) test_acc = network.accuracy(x_test, t_test) train_acc_list.append(train_acc) test_acc_list.append(test_acc) print(train_acc, test_acc)
[ "numpy.sum", "numpy.abs", "numpy.argmax", "common.numerical_gradient", "numpy.random.randn", "common.softmax", "numpy.zeros", "dataset.mnist.load_mnist", "numpy.random.choice", "collections.OrderedDict", "numpy.dot", "common.cross_entropy_error" ]
[((3510, 3556), 'dataset.mnist.load_mnist', 'load_mnist', ([], {'normalize': '(True)', 'one_hot_label': '(True)'}), '(normalize=True, one_hot_label=True)\n', (3520, 3556), False, 'from dataset.mnist import load_mnist\n'), ((4487, 4527), 'numpy.random.choice', 'np.random.choice', (['train_size', 'batch_size'], {}), '(train_size, batch_size)\n', (4503, 4527), True, 'import numpy as np\n'), ((697, 719), 'numpy.dot', 'np.dot', (['dout', 'self.W.T'], {}), '(dout, self.W.T)\n', (703, 719), True, 'import numpy as np\n'), ((738, 760), 'numpy.dot', 'np.dot', (['self.x.T', 'dout'], {}), '(self.x.T, dout)\n', (744, 760), True, 'import numpy as np\n'), ((779, 799), 'numpy.sum', 'np.sum', (['dout'], {'axis': '(0)'}), '(dout, axis=0)\n', (785, 799), True, 'import numpy as np\n'), ((1003, 1013), 'common.softmax', 'softmax', (['x'], {}), '(x)\n', (1010, 1013), False, 'from common import numerical_gradient, softmax, cross_entropy_error\n'), ((1034, 1069), 'common.cross_entropy_error', 'cross_entropy_error', (['self.y', 'self.t'], {}), '(self.y, self.t)\n', (1053, 1069), False, 'from common import numerical_gradient, softmax, cross_entropy_error\n'), ((1519, 1540), 'numpy.zeros', 'np.zeros', (['hidden_size'], {}), '(hidden_size)\n', (1527, 1540), True, 'import numpy as np\n'), ((1687, 1708), 'numpy.zeros', 'np.zeros', (['output_size'], {}), '(output_size)\n', (1695, 1708), True, 'import numpy as np\n'), ((1732, 1745), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1743, 1745), False, 'from collections import OrderedDict\n'), ((2302, 2322), 'numpy.argmax', 'np.argmax', (['y'], {'axis': '(1)'}), '(y, axis=1)\n', (2311, 2322), True, 'import numpy as np\n'), ((2589, 2634), 'common.numerical_gradient', 'numerical_gradient', (['loss_W', "self.params['W1']"], {}), "(loss_W, self.params['W1'])\n", (2607, 2634), False, 'from common import numerical_gradient, softmax, cross_entropy_error\n'), ((2657, 2702), 'common.numerical_gradient', 'numerical_gradient', (['loss_W', "self.params['b1']"], {}), "(loss_W, self.params['b1'])\n", (2675, 2702), False, 'from common import numerical_gradient, softmax, cross_entropy_error\n'), ((2725, 2770), 'common.numerical_gradient', 'numerical_gradient', (['loss_W', "self.params['W2']"], {}), "(loss_W, self.params['W2'])\n", (2743, 2770), False, 'from common import numerical_gradient, softmax, cross_entropy_error\n'), ((2793, 2838), 'common.numerical_gradient', 'numerical_gradient', (['loss_W', "self.params['b2']"], {}), "(loss_W, self.params['b2'])\n", (2811, 2838), False, 'from common import numerical_gradient, softmax, cross_entropy_error\n'), ((3841, 3885), 'numpy.abs', 'np.abs', (['(grad_backprop[k] - grad_numerical[k])'], {}), '(grad_backprop[k] - grad_numerical[k])\n', (3847, 3885), True, 'import numpy as np\n'), ((626, 643), 'numpy.dot', 'np.dot', (['x', 'self.W'], {}), '(x, self.W)\n', (632, 643), True, 'import numpy as np\n'), ((1450, 1490), 'numpy.random.randn', 'np.random.randn', (['input_size', 'hidden_size'], {}), '(input_size, hidden_size)\n', (1465, 1490), True, 'import numpy as np\n'), ((1617, 1658), 'numpy.random.randn', 'np.random.randn', (['hidden_size', 'output_size'], {}), '(hidden_size, output_size)\n', (1632, 1658), True, 'import numpy as np\n'), ((2363, 2383), 'numpy.argmax', 'np.argmax', (['t'], {'axis': '(1)'}), '(t, axis=1)\n', (2372, 2383), True, 'import numpy as np\n'), ((2404, 2418), 'numpy.sum', 'np.sum', (['(y == t)'], {}), '(y == t)\n', (2410, 2418), True, 'import numpy as np\n')]
#!/usr/bin/env python import numpy as np from scipy.stats import norm def bachelier(So, K, sigma, T, option_type): ''' Calculate European option price using Bachelier model: dSt = sigma * S0 * dWt St = S0*(1 + sigma*Wt) Parameter --------- So: float price of underlying asset at time 0 K: float strike price of option sigma: float variance of Brownian motion T: float length of time option_type: str type of European option. Including: van call/put (vanilla), con call/put (cash-or-nothing), aon call/put (asset-or-nothing) Return ------ val: value of the option at time 0 ''' xs = (K-So) / (So * sigma * np.sqrt(T)) val = None if So == K: return sigma*So*np.sqrt(T/(2*np.pi)) if option_type == 'van call': val = (So - K) * norm.cdf(-xs) + So*sigma*np.sqrt(T)*norm.pdf(-xs) elif option_type == 'van put': val = (K - So) * norm.cdf(xs) + So*sigma*np.sqrt(T)*norm.pdf(xs) elif option_type == 'con call': val = norm.cdf(-xs) elif option_type == 'con put': val = norm.cdf(xs) elif option_type == 'aon call': val = So*norm.cdf(-xs) + So*sigma*np.sqrt(T)*norm.pdf(-xs) elif option_type == 'aon put': val = So*norm.cdf(xs) - So*sigma*np.sqrt(T)*norm.pdf(xs) else: raise(ValueError("Option type is invalid. " + "Should be either 'van call', 'van put', 'con call', 'con put', 'aon call', or 'aon put'")) return val def black_scholes(So, K, r, sigma, T, option_type): ''' Calculate European option price using Black-Scholes (1973) model: dSt = r*dSt + sigma*St*dWt St = S0*exp{(r-sigma^2/2)t + sigma*Wt} Parameter --------- So: float price of underlying asset at time 0 K: float strike price of option r: float drift of St sigma: float variance of Brownian motion T: float length of time option_type: str type of European option. Including: van call/put (vanilla), con call/put (cash-or-nothing), aon call/put (asset-or-nothing) Return ------ val: value of the option at time 0 ''' d1 = (np.log(So/K) + (r+sigma**2/2)*T) / (sigma*np.sqrt(T)) d2 = (np.log(So/K) + (r-sigma**2/2)*T) / (sigma*np.sqrt(T)) val = None if So == K: return sigma*So*np.sqrt(T/(2*np.pi)) if option_type == 'van call': val = So*norm.cdf(d1) - K*np.e**(-r*T)*norm.cdf(d2) elif option_type == 'van put': val = -So*norm.cdf(-d1) + K*np.e**(-r*T)*norm.cdf(-d2) elif option_type == 'con call': val = np.e**(-r*T) * norm.cdf(d2) elif option_type == 'con put': val = np.e**(-r*T) * norm.cdf(-d2) elif option_type == 'aon call': val = So*norm.cdf(d1) elif option_type == 'aon put': val = So*norm.cdf(-d1) else: raise(ValueError("Option type is invalid. " + "Should be either 'van call', 'van put', 'con call', 'con put', 'aon call', or 'aon put'")) return val
[ "scipy.stats.norm.cdf", "scipy.stats.norm.pdf", "numpy.log", "numpy.sqrt" ]
[((748, 758), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (755, 758), True, 'import numpy as np\n'), ((820, 844), 'numpy.sqrt', 'np.sqrt', (['(T / (2 * np.pi))'], {}), '(T / (2 * np.pi))\n', (827, 844), True, 'import numpy as np\n'), ((2341, 2355), 'numpy.log', 'np.log', (['(So / K)'], {}), '(So / K)\n', (2347, 2355), True, 'import numpy as np\n'), ((2383, 2393), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (2390, 2393), True, 'import numpy as np\n'), ((2405, 2419), 'numpy.log', 'np.log', (['(So / K)'], {}), '(So / K)\n', (2411, 2419), True, 'import numpy as np\n'), ((2447, 2457), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (2454, 2457), True, 'import numpy as np\n'), ((2519, 2543), 'numpy.sqrt', 'np.sqrt', (['(T / (2 * np.pi))'], {}), '(T / (2 * np.pi))\n', (2526, 2543), True, 'import numpy as np\n'), ((905, 918), 'scipy.stats.norm.cdf', 'norm.cdf', (['(-xs)'], {}), '(-xs)\n', (913, 918), False, 'from scipy.stats import norm\n'), ((941, 954), 'scipy.stats.norm.pdf', 'norm.pdf', (['(-xs)'], {}), '(-xs)\n', (949, 954), False, 'from scipy.stats import norm\n'), ((1113, 1126), 'scipy.stats.norm.cdf', 'norm.cdf', (['(-xs)'], {}), '(-xs)\n', (1121, 1126), False, 'from scipy.stats import norm\n'), ((2596, 2608), 'scipy.stats.norm.cdf', 'norm.cdf', (['d1'], {}), '(d1)\n', (2604, 2608), False, 'from scipy.stats import norm\n'), ((2626, 2638), 'scipy.stats.norm.cdf', 'norm.cdf', (['d2'], {}), '(d2)\n', (2634, 2638), False, 'from scipy.stats import norm\n'), ((930, 940), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (937, 940), True, 'import numpy as np\n'), ((1015, 1027), 'scipy.stats.norm.cdf', 'norm.cdf', (['xs'], {}), '(xs)\n', (1023, 1027), False, 'from scipy.stats import norm\n'), ((1050, 1062), 'scipy.stats.norm.pdf', 'norm.pdf', (['xs'], {}), '(xs)\n', (1058, 1062), False, 'from scipy.stats import norm\n'), ((1176, 1188), 'scipy.stats.norm.cdf', 'norm.cdf', (['xs'], {}), '(xs)\n', (1184, 1188), False, 'from scipy.stats import norm\n'), ((2692, 2705), 'scipy.stats.norm.cdf', 'norm.cdf', (['(-d1)'], {}), '(-d1)\n', (2700, 2705), False, 'from scipy.stats import norm\n'), ((2723, 2736), 'scipy.stats.norm.cdf', 'norm.cdf', (['(-d2)'], {}), '(-d2)\n', (2731, 2736), False, 'from scipy.stats import norm\n'), ((2802, 2814), 'scipy.stats.norm.cdf', 'norm.cdf', (['d2'], {}), '(d2)\n', (2810, 2814), False, 'from scipy.stats import norm\n'), ((1039, 1049), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (1046, 1049), True, 'import numpy as np\n'), ((2879, 2892), 'scipy.stats.norm.cdf', 'norm.cdf', (['(-d2)'], {}), '(-d2)\n', (2887, 2892), False, 'from scipy.stats import norm\n'), ((2946, 2958), 'scipy.stats.norm.cdf', 'norm.cdf', (['d1'], {}), '(d1)\n', (2954, 2958), False, 'from scipy.stats import norm\n'), ((1242, 1255), 'scipy.stats.norm.cdf', 'norm.cdf', (['(-xs)'], {}), '(-xs)\n', (1250, 1255), False, 'from scipy.stats import norm\n'), ((1278, 1291), 'scipy.stats.norm.pdf', 'norm.pdf', (['(-xs)'], {}), '(-xs)\n', (1286, 1291), False, 'from scipy.stats import norm\n'), ((3011, 3024), 'scipy.stats.norm.cdf', 'norm.cdf', (['(-d1)'], {}), '(-d1)\n', (3019, 3024), False, 'from scipy.stats import norm\n'), ((1267, 1277), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (1274, 1277), True, 'import numpy as np\n'), ((1344, 1356), 'scipy.stats.norm.cdf', 'norm.cdf', (['xs'], {}), '(xs)\n', (1352, 1356), False, 'from scipy.stats import norm\n'), ((1379, 1391), 'scipy.stats.norm.pdf', 'norm.pdf', (['xs'], {}), '(xs)\n', (1387, 1391), False, 'from scipy.stats import norm\n'), ((1368, 1378), 'numpy.sqrt', 'np.sqrt', (['T'], {}), '(T)\n', (1375, 1378), True, 'import numpy as np\n')]
# -*- coding: utf-8 -*- from unittest import TestCase from flask import url_for from flask_login import current_user from flask_login import login_user from werkzeug.exceptions import Forbidden from werkzeug.wrappers import Response from app import create_app from app import db from app.configuration import TestConfiguration from app.userprofile import logout_required from app.userprofile import Permission from app.userprofile import permission_required from app.userprofile import permission_required_all from app.userprofile import permission_required_one_of from app.userprofile import Role from app.userprofile import User class DecoratorsTest(TestCase): def setUp(self): """ Initialize the test cases. """ self.app = create_app(TestConfiguration) self.app_context = self.app.app_context() self.app_context.push() self.request_context = self.app.test_request_context() self.request_context.push() db.create_all() def tearDown(self): """ Reset the test cases. """ db.session.remove() db.drop_all() self.request_context.pop() self.app_context.pop() @staticmethod def view_function() -> str: """ A simple test "view" function. :return: 'Decorated View'. """ return 'Decorated View' def test_logout_required_logged_out(self): """ Test the `logout_required` decorator with an anonymous user. Expected result: The decorated view function is returned. """ view_function = logout_required(self.view_function) response = view_function() self.assertEqual(self.view_function(), response) def test_logout_required_logged_in(self): """ Test the `logout_required` decorator with a logged-in user. Expected result: The redirect response to the home page is returned. """ email = '<EMAIL>' name = '<NAME>' user = User(email, name) db.session.add(user) db.session.commit() login_user(user) redirect_function = logout_required(self.view_function) response = redirect_function() self.assertIsInstance(response, Response) self.assertEqual(302, response.status_code) self.assertEqual(url_for('main.index'), response.location) def test_permission_required_no_role(self): """ Test the `permission_required` decorator if the user does not have a role. Expected result: The request is aborted with an error 403. """ # Ensure the user has no role. self.assertFalse(hasattr(current_user, 'role')) with self.assertRaises(Forbidden): permission_required(Permission.EditRole)(self.view_function)() def test_permission_required_no_permission(self): """ Test the `permission_required` decorator if the user does not have the requested permission. Expected result: The request is aborted with an error 403. """ email = '<EMAIL>' name = '<NAME>' password = '<PASSWORD>' user = User(email, name) user.set_password(password) user.role = Role('Administrator') db.session.add(user) db.session.commit() user.login(email, password) permission = Permission.EditRole self.assertFalse(user.role.has_permission(permission)) with self.assertRaises(Forbidden): permission_required(permission)(self.view_function)() def test_permission_required_has_permission(self): """ Test the `permission_required` decorator if the user has the requested permission. Expected result: The decorated view function is returned. """ email = '<EMAIL>' name = '<NAME>' password = '<PASSWORD>' user = User(email, name) user.set_password(password) user.role = Role('Administrator') db.session.add(user) db.session.commit() user.login(email, password) permission = Permission.EditRole user.role.permissions = permission self.assertTrue(user.role.has_permission(permission)) view_function = permission_required(permission)(self.view_function) response = view_function() self.assertEqual(self.view_function(), response) def test_permission_required_all_not_all_permissions(self): """ Test the `permission_required_all` decorator if the user does not have all the requested permissions. Expected result: The request is aborted with an error 403. """ email = '<EMAIL>' name = '<NAME>' password = '<PASSWORD>' user = User(email, name) user.set_password(password) user.role = Role('Administrator') user.role.permissions = Permission.EditRole db.session.add(user) db.session.commit() user.login(email, password) self.assertTrue(user.role.has_permission(Permission.EditRole)) self.assertFalse(user.role.has_permission(Permission.EditUser)) with self.assertRaises(Forbidden): permission_required_all(Permission.EditRole, Permission.EditUser)(self.view_function)() def test_permission_required_all_has_permissions(self): """ Test the `permission_required` decorator if the user has all the requested permission. Expected result: The decorated view function is returned. """ email = '<EMAIL>' name = '<NAME>' password = '<PASSWORD>' user = User(email, name) user.set_password(password) user.role = Role('Administrator') user.role.permissions = Permission.EditRole | Permission.EditUser db.session.add(user) db.session.commit() user.login(email, password) self.assertTrue(user.role.has_permissions_all(Permission.EditRole, Permission.EditUser)) view_function = permission_required_all(Permission.EditRole, Permission.EditUser)(self.view_function) response = view_function() self.assertEqual(self.view_function(), response) def test_permission_required_one_of_no_permission(self): """ Test the `permission_required_one_of` decorator if the user does not have any of the requested permissions. Expected result: The request is aborted with an error 403. """ email = '<EMAIL>' name = '<NAME>' password = '<PASSWORD>' user = User(email, name) user.set_password(password) user.role = Role('Administrator') db.session.add(user) db.session.commit() user.login(email, password) self.assertEqual(Permission(0), user.role.permissions) with self.assertRaises(Forbidden): permission_required_one_of(Permission.EditRole, Permission.EditUser)(self.view_function)() def test_permission_required_one_of_has_permission(self): """ Test the `permission_required` decorator if the user has one of the requested permission, but not all. Expected result: The decorated view function is returned. """ email = '<EMAIL>' name = '<NAME>' password = '<PASSWORD>' user = User(email, name) user.set_password(password) user.role = Role('Administrator') user.role.permissions = Permission.EditRole db.session.add(user) db.session.commit() user.login(email, password) self.assertTrue(user.role.has_permission(Permission.EditRole)) self.assertFalse(user.role.has_permission(Permission.EditUser)) view_function = permission_required_one_of(Permission.EditRole, Permission.EditUser)(self.view_function) response = view_function() self.assertEqual(self.view_function(), response)
[ "app.userprofile.permission_required_one_of", "app.db.drop_all", "app.userprofile.logout_required", "flask_login.login_user", "app.userprofile.permission_required_all", "app.userprofile.Permission", "app.create_app", "app.userprofile.User", "flask.url_for", "app.db.session.commit", "app.userprofile.Role", "app.userprofile.permission_required", "app.db.create_all", "app.db.session.add", "app.db.session.remove" ]
[((773, 802), 'app.create_app', 'create_app', (['TestConfiguration'], {}), '(TestConfiguration)\n', (783, 802), False, 'from app import create_app\n'), ((992, 1007), 'app.db.create_all', 'db.create_all', ([], {}), '()\n', (1005, 1007), False, 'from app import db\n'), ((1100, 1119), 'app.db.session.remove', 'db.session.remove', ([], {}), '()\n', (1117, 1119), False, 'from app import db\n'), ((1128, 1141), 'app.db.drop_all', 'db.drop_all', ([], {}), '()\n', (1139, 1141), False, 'from app import db\n'), ((1640, 1675), 'app.userprofile.logout_required', 'logout_required', (['self.view_function'], {}), '(self.view_function)\n', (1655, 1675), False, 'from app.userprofile import logout_required\n'), ((2059, 2076), 'app.userprofile.User', 'User', (['email', 'name'], {}), '(email, name)\n', (2063, 2076), False, 'from app.userprofile import User\n'), ((2086, 2106), 'app.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (2100, 2106), False, 'from app import db\n'), ((2115, 2134), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2132, 2134), False, 'from app import db\n'), ((2143, 2159), 'flask_login.login_user', 'login_user', (['user'], {}), '(user)\n', (2153, 2159), False, 'from flask_login import login_user\n'), ((2189, 2224), 'app.userprofile.logout_required', 'logout_required', (['self.view_function'], {}), '(self.view_function)\n', (2204, 2224), False, 'from app.userprofile import logout_required\n'), ((3234, 3251), 'app.userprofile.User', 'User', (['email', 'name'], {}), '(email, name)\n', (3238, 3251), False, 'from app.userprofile import User\n'), ((3308, 3329), 'app.userprofile.Role', 'Role', (['"""Administrator"""'], {}), "('Administrator')\n", (3312, 3329), False, 'from app.userprofile import Role\n'), ((3339, 3359), 'app.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (3353, 3359), False, 'from app import db\n'), ((3368, 3387), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (3385, 3387), False, 'from app import db\n'), ((3984, 4001), 'app.userprofile.User', 'User', (['email', 'name'], {}), '(email, name)\n', (3988, 4001), False, 'from app.userprofile import User\n'), ((4058, 4079), 'app.userprofile.Role', 'Role', (['"""Administrator"""'], {}), "('Administrator')\n", (4062, 4079), False, 'from app.userprofile import Role\n'), ((4089, 4109), 'app.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (4103, 4109), False, 'from app import db\n'), ((4118, 4137), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (4135, 4137), False, 'from app import db\n'), ((4865, 4882), 'app.userprofile.User', 'User', (['email', 'name'], {}), '(email, name)\n', (4869, 4882), False, 'from app.userprofile import User\n'), ((4939, 4960), 'app.userprofile.Role', 'Role', (['"""Administrator"""'], {}), "('Administrator')\n", (4943, 4960), False, 'from app.userprofile import Role\n'), ((5022, 5042), 'app.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (5036, 5042), False, 'from app import db\n'), ((5051, 5070), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (5068, 5070), False, 'from app import db\n'), ((5749, 5766), 'app.userprofile.User', 'User', (['email', 'name'], {}), '(email, name)\n', (5753, 5766), False, 'from app.userprofile import User\n'), ((5823, 5844), 'app.userprofile.Role', 'Role', (['"""Administrator"""'], {}), "('Administrator')\n", (5827, 5844), False, 'from app.userprofile import Role\n'), ((5928, 5948), 'app.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (5942, 5948), False, 'from app import db\n'), ((5957, 5976), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (5974, 5976), False, 'from app import db\n'), ((6691, 6708), 'app.userprofile.User', 'User', (['email', 'name'], {}), '(email, name)\n', (6695, 6708), False, 'from app.userprofile import User\n'), ((6765, 6786), 'app.userprofile.Role', 'Role', (['"""Administrator"""'], {}), "('Administrator')\n", (6769, 6786), False, 'from app.userprofile import Role\n'), ((6796, 6816), 'app.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (6810, 6816), False, 'from app import db\n'), ((6825, 6844), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (6842, 6844), False, 'from app import db\n'), ((7464, 7481), 'app.userprofile.User', 'User', (['email', 'name'], {}), '(email, name)\n', (7468, 7481), False, 'from app.userprofile import User\n'), ((7538, 7559), 'app.userprofile.Role', 'Role', (['"""Administrator"""'], {}), "('Administrator')\n", (7542, 7559), False, 'from app.userprofile import Role\n'), ((7621, 7641), 'app.db.session.add', 'db.session.add', (['user'], {}), '(user)\n', (7635, 7641), False, 'from app import db\n'), ((7650, 7669), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (7667, 7669), False, 'from app import db\n'), ((2391, 2412), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (2398, 2412), False, 'from flask import url_for\n'), ((4348, 4379), 'app.userprofile.permission_required', 'permission_required', (['permission'], {}), '(permission)\n', (4367, 4379), False, 'from app.userprofile import permission_required\n'), ((6137, 6202), 'app.userprofile.permission_required_all', 'permission_required_all', (['Permission.EditRole', 'Permission.EditUser'], {}), '(Permission.EditRole, Permission.EditUser)\n', (6160, 6202), False, 'from app.userprofile import permission_required_all\n'), ((6908, 6921), 'app.userprofile.Permission', 'Permission', (['(0)'], {}), '(0)\n', (6918, 6921), False, 'from app.userprofile import Permission\n'), ((7876, 7944), 'app.userprofile.permission_required_one_of', 'permission_required_one_of', (['Permission.EditRole', 'Permission.EditUser'], {}), '(Permission.EditRole, Permission.EditUser)\n', (7902, 7944), False, 'from app.userprofile import permission_required_one_of\n'), ((2817, 2857), 'app.userprofile.permission_required', 'permission_required', (['Permission.EditRole'], {}), '(Permission.EditRole)\n', (2836, 2857), False, 'from app.userprofile import permission_required\n'), ((3586, 3617), 'app.userprofile.permission_required', 'permission_required', (['permission'], {}), '(permission)\n', (3605, 3617), False, 'from app.userprofile import permission_required\n'), ((5308, 5373), 'app.userprofile.permission_required_all', 'permission_required_all', (['Permission.EditRole', 'Permission.EditUser'], {}), '(Permission.EditRole, Permission.EditUser)\n', (5331, 5373), False, 'from app.userprofile import permission_required_all\n'), ((7002, 7070), 'app.userprofile.permission_required_one_of', 'permission_required_one_of', (['Permission.EditRole', 'Permission.EditUser'], {}), '(Permission.EditRole, Permission.EditUser)\n', (7028, 7070), False, 'from app.userprofile import permission_required_one_of\n')]
from pydlm import dlm, trend, seasonality from scipy.stats import norm import numpy as np import matplotlib.pyplot as plt # A linear trend linear_trend = trend(degree=1, discount=1, name='linear_trend', w=10) # A seasonality time_series = [] for i in range(10): if i == 0: x_sim = np.random.normal(0,1,1) else: x_sim = np.random.normal(x_sim,10,1) time_series.append(np.random.normal(x_sim,10,1)) time_series = np.array(time_series) simple_dlm = dlm(time_series) + linear_trend simple_dlm.fit() filteredMean = simple_dlm.getMean(filterType='forwardFilter') filteredVar = simple_dlm.getVar(filterType='forwardFilter') ll = 0 one_step_ahead_samples = [] for i in range(len(time_series)): tmp_samples = [] for j in range(1000): tmp = np.random.normal(filteredMean[i],filteredVar[i], 1) tmp_samples.append(np.random.normal(tmp,1,1)) one_step_ahead_samples.append(tmp_samples) one_step_ahead_samples = np.array(one_step_ahead_samples) upper_pi = [] lower_pi = [] for p in one_step_ahead_samples: upper_pi.append(np.percentile(p,95)) lower_pi.append(np.percentile(p,5)) time_series_shifted = time_series #plt.plot(range(len(time_series_shifted)),time_series_shifted,color='orange') #plt.fill_between(range(len(time_series_shifted)),upper_pi,lower_pi,alpha=.3) #plt.show() from pykalman import KalmanFilter random_state = np.random.RandomState(0) transition_matrix = 1 transition_offset = .1 observation_matrix = 1 observation_offset = 1 transition_covariance = 10 observation_covariance = 1 initial_state_mean = 0 initial_state_covariance = 1 # sample from model kf = KalmanFilter( transition_matrix, observation_matrix, transition_covariance, observation_covariance, transition_offset, observation_offset, initial_state_mean, initial_state_covariance, random_state=random_state ) filtered_state_means, filtered_state_variances = kf.filter(time_series) filteredMean = filtered_state_means.reshape((-1)) filteredVar = filtered_state_variances.reshape((-1)) one_step_ahead_samples = [] for i in range(len(time_series)): tmp_samples = [] for j in range(10000): tmp = np.random.normal(filteredMean[i],filteredVar[i], 1) tmp2 = np.random.normal(tmp,10,1) tmp_samples.append(np.random.normal(tmp2,10,1)) one_step_ahead_samples.append(tmp_samples) one_step_ahead_samples = np.array(one_step_ahead_samples) upper_pi = [] lower_pi = [] for p in one_step_ahead_samples: upper_pi.append(np.percentile(p,95)) lower_pi.append(np.percentile(p,5)) time_series = time_series.reshape((-1)) time_series_shifted = time_series.tolist()[1:] + [10] plt.plot(range(len(time_series_shifted)),time_series_shifted,color='orange') plt.fill_between(range(len(time_series_shifted)),upper_pi,lower_pi,alpha=.3) plt.show()
[ "matplotlib.pyplot.show", "pydlm.trend", "numpy.random.RandomState", "pykalman.KalmanFilter", "numpy.percentile", "numpy.array", "numpy.random.normal", "pydlm.dlm" ]
[((155, 209), 'pydlm.trend', 'trend', ([], {'degree': '(1)', 'discount': '(1)', 'name': '"""linear_trend"""', 'w': '(10)'}), "(degree=1, discount=1, name='linear_trend', w=10)\n", (160, 209), False, 'from pydlm import dlm, trend, seasonality\n'), ((441, 462), 'numpy.array', 'np.array', (['time_series'], {}), '(time_series)\n', (449, 462), True, 'import numpy as np\n'), ((957, 989), 'numpy.array', 'np.array', (['one_step_ahead_samples'], {}), '(one_step_ahead_samples)\n', (965, 989), True, 'import numpy as np\n'), ((1388, 1412), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (1409, 1412), True, 'import numpy as np\n'), ((1637, 1855), 'pykalman.KalmanFilter', 'KalmanFilter', (['transition_matrix', 'observation_matrix', 'transition_covariance', 'observation_covariance', 'transition_offset', 'observation_offset', 'initial_state_mean', 'initial_state_covariance'], {'random_state': 'random_state'}), '(transition_matrix, observation_matrix, transition_covariance,\n observation_covariance, transition_offset, observation_offset,\n initial_state_mean, initial_state_covariance, random_state=random_state)\n', (1649, 1855), False, 'from pykalman import KalmanFilter\n'), ((2477, 2509), 'numpy.array', 'np.array', (['one_step_ahead_samples'], {}), '(one_step_ahead_samples)\n', (2485, 2509), True, 'import numpy as np\n'), ((2903, 2913), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2911, 2913), True, 'import matplotlib.pyplot as plt\n'), ((476, 492), 'pydlm.dlm', 'dlm', (['time_series'], {}), '(time_series)\n', (479, 492), False, 'from pydlm import dlm, trend, seasonality\n'), ((295, 320), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(1)'], {}), '(0, 1, 1)\n', (311, 320), True, 'import numpy as np\n'), ((345, 375), 'numpy.random.normal', 'np.random.normal', (['x_sim', '(10)', '(1)'], {}), '(x_sim, 10, 1)\n', (361, 375), True, 'import numpy as np\n'), ((397, 427), 'numpy.random.normal', 'np.random.normal', (['x_sim', '(10)', '(1)'], {}), '(x_sim, 10, 1)\n', (413, 427), True, 'import numpy as np\n'), ((779, 831), 'numpy.random.normal', 'np.random.normal', (['filteredMean[i]', 'filteredVar[i]', '(1)'], {}), '(filteredMean[i], filteredVar[i], 1)\n', (795, 831), True, 'import numpy as np\n'), ((1072, 1092), 'numpy.percentile', 'np.percentile', (['p', '(95)'], {}), '(p, 95)\n', (1085, 1092), True, 'import numpy as np\n'), ((1113, 1132), 'numpy.percentile', 'np.percentile', (['p', '(5)'], {}), '(p, 5)\n', (1126, 1132), True, 'import numpy as np\n'), ((2246, 2298), 'numpy.random.normal', 'np.random.normal', (['filteredMean[i]', 'filteredVar[i]', '(1)'], {}), '(filteredMean[i], filteredVar[i], 1)\n', (2262, 2298), True, 'import numpy as np\n'), ((2313, 2341), 'numpy.random.normal', 'np.random.normal', (['tmp', '(10)', '(1)'], {}), '(tmp, 10, 1)\n', (2329, 2341), True, 'import numpy as np\n'), ((2592, 2612), 'numpy.percentile', 'np.percentile', (['p', '(95)'], {}), '(p, 95)\n', (2605, 2612), True, 'import numpy as np\n'), ((2633, 2652), 'numpy.percentile', 'np.percentile', (['p', '(5)'], {}), '(p, 5)\n', (2646, 2652), True, 'import numpy as np\n'), ((858, 885), 'numpy.random.normal', 'np.random.normal', (['tmp', '(1)', '(1)'], {}), '(tmp, 1, 1)\n', (874, 885), True, 'import numpy as np\n'), ((2376, 2405), 'numpy.random.normal', 'np.random.normal', (['tmp2', '(10)', '(1)'], {}), '(tmp2, 10, 1)\n', (2392, 2405), True, 'import numpy as np\n')]
""" Implementation of :py:class:`Dataset` object. A folder containing a set of subjects with CT and RS in dicom format is converted into nii format. A new folder is created keeping the same organization. """ import os import numpy as np from dcmrtstruct2nii import dcmrtstruct2nii, list_rt_structs class Dataset: """ From dicom to dataset class. Convert CT and RTSTRUCT into nii, readable by deep learning frameworks. All subfolders representing subject must contain the CT and the RS associated. Example: >>> from segmentation_rt.rs2mask import Dataset >>> structures = ['Heart', 'Breast L', 'Breast R'] >>> dataset = Dataset('data/dicom_dataset', 'data/nii_dataset', structures) >>> dataset.make() :param string path: Root directory. :param string export_path: Export path. :param list[string] structures: List of desired structure(s). :param bool force: Force export even if one structure is missing. """ def __init__(self, path, export_path, structures, force=True): self.path = path self.export_path = export_path self.structures = structures self.dataset_name = os.path.basename(export_path) self.force = force self.root_path = os.path.dirname(self.path) self.patients = [folder for folder in os.listdir(self.path) if os.path.isdir(os.path.join(self.path, folder))] self.patient_paths = [os.path.join(self.path, patient) for patient in self.patients] self.rs_paths = self.get_rs() def __str__(self): return self.dataset_name def get_rs(self): """ List RTSTRUCT for each patient. :rtype: list[str] """ rs_paths = [] for path in self.patient_paths: files = [filename for filename in os.listdir(path) if filename.startswith("RS")] assert len(files) > 0, 'at least one RS is required' rs = files[0] rs_paths.append(os.path.join(path, rs)) return rs_paths def find_structures(self, index): """ List missing and not missing structures in a RTSTRUCT. :param index: index of the patient. :type index: int :return: List missing and not missing structures. :rtype: (list[str],list[str]) """ structures = list_rt_structs(self.rs_paths[index]) ref_structures = np.array(self.structures) maks = np.in1d(ref_structures, structures) not_missing = ref_structures[maks] missing = ref_structures[~maks] if len(missing): print(f"WARNING ! Some structures are missing: {missing}\n") return missing, not_missing def make(self): """Create structures and convert the CT in nii format for each subject.""" print(f"Structure(s) to export: {self.structures}") print(f"Patient(s) identification : {self.patients}") for index, path_patient in enumerate(self.patient_paths): patient_id = self.patients[index] print(f"Exporting {index + 1} ({patient_id}) on {len(self.patients)}") nii_output = os.path.join(self.export_path, patient_id) missing, not_missing = self.find_structures(index) if len(missing) == 0 or self.force: dcmrtstruct2nii(self.rs_paths[index], path_patient, nii_output, not_missing, False, mask_foreground_value=1) nii_maks = [nii_mask for nii_mask in os.listdir(nii_output) if nii_mask.startswith('mask')] for nii in nii_maks: name = os.path.splitext(nii)[0].split("_")[1].replace("-", " ") os.rename(os.path.join(nii_output, nii), os.path.join(nii_output, name + '.nii')) os.rename(os.path.join(nii_output, "image.nii"), os.path.join(nii_output, "ct.nii")) else: print(f"Skip {patient_id} because of missing structure(s)") print("Export done")
[ "dcmrtstruct2nii.list_rt_structs", "dcmrtstruct2nii.dcmrtstruct2nii", "os.path.basename", "os.path.dirname", "numpy.array", "os.path.splitext", "os.path.join", "os.listdir", "numpy.in1d" ]
[((1215, 1244), 'os.path.basename', 'os.path.basename', (['export_path'], {}), '(export_path)\n', (1231, 1244), False, 'import os\n'), ((1298, 1324), 'os.path.dirname', 'os.path.dirname', (['self.path'], {}), '(self.path)\n', (1313, 1324), False, 'import os\n'), ((2406, 2443), 'dcmrtstruct2nii.list_rt_structs', 'list_rt_structs', (['self.rs_paths[index]'], {}), '(self.rs_paths[index])\n', (2421, 2443), False, 'from dcmrtstruct2nii import dcmrtstruct2nii, list_rt_structs\n'), ((2469, 2494), 'numpy.array', 'np.array', (['self.structures'], {}), '(self.structures)\n', (2477, 2494), True, 'import numpy as np\n'), ((2510, 2545), 'numpy.in1d', 'np.in1d', (['ref_structures', 'structures'], {}), '(ref_structures, structures)\n', (2517, 2545), True, 'import numpy as np\n'), ((1499, 1531), 'os.path.join', 'os.path.join', (['self.path', 'patient'], {}), '(self.path, patient)\n', (1511, 1531), False, 'import os\n'), ((3214, 3256), 'os.path.join', 'os.path.join', (['self.export_path', 'patient_id'], {}), '(self.export_path, patient_id)\n', (3226, 3256), False, 'import os\n'), ((1371, 1392), 'os.listdir', 'os.listdir', (['self.path'], {}), '(self.path)\n', (1381, 1392), False, 'import os\n'), ((2045, 2067), 'os.path.join', 'os.path.join', (['path', 'rs'], {}), '(path, rs)\n', (2057, 2067), False, 'import os\n'), ((3385, 3497), 'dcmrtstruct2nii.dcmrtstruct2nii', 'dcmrtstruct2nii', (['self.rs_paths[index]', 'path_patient', 'nii_output', 'not_missing', '(False)'], {'mask_foreground_value': '(1)'}), '(self.rs_paths[index], path_patient, nii_output, not_missing,\n False, mask_foreground_value=1)\n', (3400, 3497), False, 'from dcmrtstruct2nii import dcmrtstruct2nii, list_rt_structs\n'), ((1435, 1466), 'os.path.join', 'os.path.join', (['self.path', 'folder'], {}), '(self.path, folder)\n', (1447, 1466), False, 'import os\n'), ((1879, 1895), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1889, 1895), False, 'import os\n'), ((3885, 3922), 'os.path.join', 'os.path.join', (['nii_output', '"""image.nii"""'], {}), "(nii_output, 'image.nii')\n", (3897, 3922), False, 'import os\n'), ((3924, 3958), 'os.path.join', 'os.path.join', (['nii_output', '"""ct.nii"""'], {}), "(nii_output, 'ct.nii')\n", (3936, 3958), False, 'import os\n'), ((3580, 3602), 'os.listdir', 'os.listdir', (['nii_output'], {}), '(nii_output)\n', (3590, 3602), False, 'import os\n'), ((3786, 3815), 'os.path.join', 'os.path.join', (['nii_output', 'nii'], {}), '(nii_output, nii)\n', (3798, 3815), False, 'import os\n'), ((3817, 3856), 'os.path.join', 'os.path.join', (['nii_output', "(name + '.nii')"], {}), "(nii_output, name + '.nii')\n", (3829, 3856), False, 'import os\n'), ((3699, 3720), 'os.path.splitext', 'os.path.splitext', (['nii'], {}), '(nii)\n', (3715, 3720), False, 'import os\n')]
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Print prettier and more detailed exceptions.""" import math import os import sys import traceback from telemetry.core import util def PrintFormattedException(exception_class, exception, tb): """Prints an Exception in a more useful format than the default. TODO(tonyg): Consider further enhancements. For instance: - Report stacks to maintainers like depot_tools does. - Add a debug flag to automatically start pdb upon exception. """ def _GetFinalFrame(frame): final_frame = None while frame is not None: final_frame = frame frame = frame.tb_next return final_frame def _AbbreviateMiddle(target, middle, length): assert length >= 0, 'Must provide positive length' assert len(middle) <= length, 'middle must not be greater than length' if len(target) <= length: return target half_length = (length - len(middle)) / 2. return '%s%s%s' % (target[:int(math.floor(half_length))], middle, target[-int(math.ceil(half_length)):]) base_dir = os.path.abspath(util.GetChromiumSrcDir()) formatted_exception = traceback.format_exception( exception_class, exception, tb) extracted_tb = traceback.extract_tb(tb) traceback_header = formatted_exception[0].strip() exception = ''.join([l[2:] if l[:2] == ' ' else l for l in traceback.format_exception_only(exception_class, exception)]) local_variables = [(variable, value) for variable, value in _GetFinalFrame(tb).tb_frame.f_locals.iteritems() if variable != 'self'] # Format the traceback. print >> sys.stderr print >> sys.stderr, traceback_header for filename, line, function, text in extracted_tb: filename = os.path.abspath(filename) if filename.startswith(base_dir): filename = filename[len(base_dir)+1:] print >> sys.stderr, ' %s at %s:%d' % (function, filename, line) print >> sys.stderr, ' %s' % text # Format the locals. if local_variables: print >> sys.stderr print >> sys.stderr, 'Locals:' longest_variable = max([len(v) for v, _ in local_variables]) for variable, value in sorted(local_variables): value = repr(value) possibly_truncated_value = _AbbreviateMiddle(value, ' ... ', 1024) truncation_indication = '' if len(possibly_truncated_value) != len(value): truncation_indication = ' (truncated)' print >> sys.stderr, ' %s: %s%s' % (variable.ljust(longest_variable + 1), possibly_truncated_value, truncation_indication) # Format the exception. print >> sys.stderr print >> sys.stderr, exception
[ "os.path.abspath", "traceback.format_exception_only", "traceback.format_exception", "math.ceil", "math.floor", "telemetry.core.util.GetChromiumSrcDir", "traceback.extract_tb" ]
[((1288, 1346), 'traceback.format_exception', 'traceback.format_exception', (['exception_class', 'exception', 'tb'], {}), '(exception_class, exception, tb)\n', (1314, 1346), False, 'import traceback\n'), ((1371, 1395), 'traceback.extract_tb', 'traceback.extract_tb', (['tb'], {}), '(tb)\n', (1391, 1395), False, 'import traceback\n'), ((1238, 1262), 'telemetry.core.util.GetChromiumSrcDir', 'util.GetChromiumSrcDir', ([], {}), '()\n', (1260, 1262), False, 'from telemetry.core import util\n'), ((1984, 2009), 'os.path.abspath', 'os.path.abspath', (['filename'], {}), '(filename)\n', (1999, 2009), False, 'import os\n'), ((1533, 1592), 'traceback.format_exception_only', 'traceback.format_exception_only', (['exception_class', 'exception'], {}), '(exception_class, exception)\n', (1564, 1592), False, 'import traceback\n'), ((1088, 1111), 'math.floor', 'math.floor', (['half_length'], {}), '(half_length)\n', (1098, 1111), False, 'import math\n'), ((1181, 1203), 'math.ceil', 'math.ceil', (['half_length'], {}), '(half_length)\n', (1190, 1203), False, 'import math\n')]
# Copyright (c) 2011-2015 Berkeley Model United Nations. All rights reserved. # Use of this source code is governed by a BSD License (see LICENSE). import json from django.urls import reverse from django.test import TestCase from django.test.client import Client from rest_framework import exceptions from huxley.accounts.models import User from huxley.core.models import Conference from huxley.api import tests from huxley.api.tests import auto from huxley.utils.test import models class UserDetailGetTestCase(tests.RetrieveAPITestCase): url_name = 'api:user_detail' def test_anonymous_user(self): '''It should reject request from an anonymous user.''' user = models.new_user() response = self.get_response(user.id) self.assertNotAuthenticated(response) def test_other_user(self): '''It should reject request from another user.''' user1 = models.new_user(username='user1') user2 = models.new_user(username='user2', password='<PASSWORD>') self.client.login(username='user2', password='<PASSWORD>') response = self.get_response(user1.id) self.assertPermissionDenied(response) def test_superuser(self): '''It should return the correct fields for a superuser.''' user1 = models.new_user(username='user1') user2 = models.new_superuser(username='user2', password='<PASSWORD>') self.client.login(username='user2', password='<PASSWORD>') response = self.get_response(user1.id) self.assertEqual(response.data, { 'id': user1.id, 'username': user1.username, 'first_name': user1.first_name, 'last_name': user1.last_name, 'user_type': user1.user_type, 'school': user1.school_id, 'committee': user1.committee_id, 'delegate': user1.delegate_id }) def test_self(self): '''It should return the correct fields for a single user.''' school = models.new_school() user = school.advisor self.client.login(username=user.username, password='<PASSWORD>') response = self.get_response(user.id) self.assertEqual(response.data, { 'id': user.id, 'username': user.username, 'first_name': user.first_name, 'last_name': user.last_name, 'user_type': user.user_type, 'school': { 'id': school.id, 'name': school.name, 'address': school.address, 'city': school.city, 'state': school.state, 'zip_code': school.zip_code, 'country': school.country, 'primary_name': school.primary_name, 'primary_gender': school.primary_gender, 'primary_email': school.primary_email, 'primary_phone': school.primary_phone, 'primary_type': school.primary_type, 'secondary_name': school.secondary_name, 'secondary_gender': school.secondary_gender, 'secondary_email': school.secondary_email, 'secondary_phone': school.secondary_phone, 'secondary_type': school.secondary_type, 'program_type': school.program_type, 'times_attended': school.times_attended, 'international': school.international, }, 'committee': user.committee_id, 'delegate': user.delegate_id }) def test_chair(self): '''It should have the correct fields for chairs.''' committee = models.new_committee() user = models.new_user( username='testuser', password='<PASSWORD>', user_type=User.TYPE_CHAIR, committee_id=committee.id) self.client.login(username='testuser', password='<PASSWORD>') response = self.get_response(user.id) self.assertEqual(response.data, { 'id': user.id, 'username': user.username, 'first_name': user.first_name, 'last_name': user.last_name, 'user_type': user.user_type, 'school': user.school_id, 'committee': user.committee_id, 'delegate': user.delegate_id }) class UserDetailDeleteTestCase(auto.DestroyAPIAutoTestCase): url_name = 'api:user_detail' @classmethod def get_test_object(cls): return models.new_user() def test_anonymous_user(self): '''It should reject the request from an anonymous user.''' self.do_test(expected_error=auto.EXP_NOT_AUTHENTICATED) def test_other_user(self): '''It should reject the request from another user.''' models.new_school(user=self.default_user) self.as_default_user().do_test( expected_error=auto.EXP_PERMISSION_DENIED) def test_self(self): '''It should allow a user to delete themself.''' self.as_user(self.object).do_test() def test_superuser(self): '''It should allow a superuser to delete a user.''' self.as_superuser().do_test() class UserDetailPatchTestCase(tests.PartialUpdateAPITestCase): url_name = 'api:user_detail' params = {'first_name': 'first', 'last_name': 'last'} def setUp(self): self.user = models.new_user(username='user1', password='<PASSWORD>') def test_anonymous_user(self): '''An anonymous user should not be able to change information.''' response = self.get_response(self.user.id, params=self.params) self.assertNotAuthenticated(response) user = User.objects.get(id=self.user.id) self.assertEqual(user.first_name, 'Test') self.assertEqual(user.last_name, 'User') def test_other_user(self): '''Another user should not be able to change information about any other user.''' models.new_user(username='user2', password='<PASSWORD>') self.client.login(username='user2', password='<PASSWORD>') response = self.get_response(self.user.id, params=self.params) self.assertPermissionDenied(response) user = User.objects.get(id=self.user.id) self.assertEqual(user.first_name, 'Test') self.assertEqual(user.last_name, 'User') def test_self(self): '''A User should be allowed to change information about himself.''' self.client.login(username='user1', password='<PASSWORD>') response = self.get_response(self.user.id, params=self.params) user = User.objects.get(id=self.user.id) self.assertEqual(response.data['first_name'], user.first_name) self.assertEqual(response.data['last_name'], user.last_name) def test_superuser(self): '''A superuser should be allowed to change information about a user.''' models.new_superuser(username='user2', password='<PASSWORD>') self.client.login(username='user2', password='<PASSWORD>') response = self.get_response(self.user.id, params=self.params) user = User.objects.get(id=self.user.id) self.assertEqual(response.data['first_name'], user.first_name) self.assertEqual(response.data['last_name'], user.last_name) class UserListGetTestCase(tests.ListAPITestCase): url_name = 'api:user_list' def test_anonymous_user(self): '''It should reject the request from an anonymous user.''' models.new_user(username='user1') models.new_user(username='user2') response = self.get_response() self.assertNotAuthenticated(response) def test_user(self): '''It should reject the request from a regular user.''' models.new_user(username='user1', password='<PASSWORD>') models.new_user(username='user2') self.client.login(username='user1', password='<PASSWORD>') response = self.get_response() self.assertPermissionDenied(response) def test_superuser(self): '''It should allow a superuser to list all users.''' user1 = models.new_superuser(username='user1', password='<PASSWORD>') user2 = models.new_user(username='user2') self.client.login(username='user1', password='<PASSWORD>') response = self.get_response() self.assertEqual(response.data, [ {'id': user1.id, 'username': user1.username, 'first_name': user1.first_name, 'last_name': user1.last_name, 'user_type': user1.user_type, 'school': user1.school_id, 'committee': user1.committee_id, 'delegate': user1.delegate_id}, {'id': user2.id, 'username': user2.username, 'first_name': user2.first_name, 'last_name': user2.last_name, 'user_type': user2.user_type, 'school': user2.school_id, 'committee': user2.committee_id, 'delegate': user2.delegate_id}, ]) class UserListPostTestCase(tests.CreateAPITestCase): url_name = 'api:user_list' params = {'username': 'Kunal', 'password': 'password', 'first_name': 'Kunal', 'last_name': 'Mehta'} def test_valid(self): params = self.get_params() response = self.get_response(params) user_query = User.objects.filter(id=response.data['id']) self.assertTrue(user_query.exists()) user = User.objects.get(id=response.data['id']) self.assertEqual(response.data, { 'id': user.id, 'username': user.username, 'first_name': user.first_name, 'last_name': user.last_name, 'user_type': User.TYPE_ADVISOR, 'school': user.school_id, 'email': user.email }) def test_empty_username(self): response = self.get_response(params=self.get_params(username='')) self.assertEqual(response.data, { 'username': [u'This field may not be blank.'] }) def test_taken_username(self): models.new_user(username='_Kunal', password='<PASSWORD>') response = self.get_response(params=self.get_params(username='_Kunal')) self.assertEqual(response.data, { 'username': [u'A user with that username already exists.'] }) def test_invalid_username(self): response = self.get_response(params=self.get_params(username='>Kunal')) self.assertEqual(response.data['username'], [ exceptions.ErrorDetail(u'Enter a valid username. This value may contain only ' u'letters, numbers, and @/./+/-/_ characters.', code='invalid') ] ) def test_empty_password(self): response = self.get_response(params=self.get_params(password='')) self.assertEqual(response.data, { 'password': [u'This field may not be blank.'] }) def test_invalid_password(self): response = self.get_response(params=self.get_params(password='><PASSWORD>')) self.assertEqual(response.data, { 'password': ['<PASSWORD>.'] }) def test_empty_first_name(self): response = self.get_response(params=self.get_params(first_name='')) self.assertEqual(response.data, { 'first_name': ['This field is required.'] }) def test_empty_last_name(self): response = self.get_response(params=self.get_params(last_name='')) self.assertEqual(response.data, { 'last_name': ['This field is required.'] }) def test_username_length(self): response = self.get_response(params=self.get_params(username='user')) self.assertEqual(response.data, { 'username': ['Username must be at least 5 characters.'] }) def test_password_length(self): response = self.get_response(params=self.get_params(password='<PASSWORD>')) self.assertEqual(response.data, { 'password': ['Password must be at least 6 characters.'] }) def test_invalid(self): conf = Conference.get_current() conf.open_reg = False conf.save() params = self.get_params() response = self.get_response(params) self.assertEqual(response.data, { 'detail': 'Conference registration is closed.' }) conf.open_reg = True conf.save() class CurrentUserTestCase(TestCase): fixtures = ['conference'] def setUp(self): self.client = Client() self.url = reverse('api:current_user') self.maxDiff = None def get_data(self, url): return json.loads(self.client.get(url).content) def test_login(self): user = models.new_user(username='lol', password='<PASSWORD>') user2 = models.new_user(username='bunny', password='<PASSWORD>') credentials = {'username': 'lol', 'password': '<PASSWORD>'} response = self.client.post( self.url, data=json.dumps(credentials), content_type='application/json') self.assertEqual(response.status_code, 201) self.assertEqual(int(self.client.session['_auth_user_id']), user.id) credentials = {'username': 'bunny', 'password': '<PASSWORD>'} response = self.client.post( self.url, data=json.dumps(credentials), content_type='application/json') self.assertEqual(int(self.client.session['_auth_user_id']), user.id) data = json.loads(response.content) self.assertEqual(data['detail'], 'Another user is currently logged in.') def test_logout(self): user = models.new_user(username='lol', password='<PASSWORD>') self.client.login(username='lol', password='<PASSWORD>') self.assertEqual(int(self.client.session['_auth_user_id']), user.id) response = self.client.delete(self.url) self.assertEqual(response.status_code, 200) self.assertTrue('_auth_user_id' not in self.client.session) def test_get(self): data = self.get_data(self.url) self.assertEqual(len(data.keys()), 1) self.assertEqual(data['detail'], u'Not found.') school = models.new_school() user = school.advisor self.client.login(username=user.username, password='<PASSWORD>') data = self.get_data(self.url) self.assertEqual(len(data.keys()), 8) self.assertEqual(data['id'], user.id) self.assertEqual(data['username'], user.username) self.assertEqual(data['first_name'], user.first_name) self.assertEqual(data['last_name'], user.last_name) self.assertEqual(data['user_type'], User.TYPE_ADVISOR) self.assertEqual(data['school'], { u'id': school.id, u'name': str(school.name), u'address': str(school.address), u'city': str(school.city), u'state': str(school.state), u'zip_code': str(school.zip_code), u'country': str(school.country), u'primary_name': str(school.primary_name), u'primary_gender': school.primary_gender, u'primary_email': str(school.primary_email), u'primary_phone': str(school.primary_phone), u'primary_type': school.primary_type, u'secondary_name': str(school.secondary_name), u'secondary_gender': school.secondary_gender, u'secondary_email': str(school.secondary_email), u'secondary_phone': str(school.secondary_phone), u'secondary_type': school.secondary_type, u'program_type': school.program_type, u'times_attended': school.times_attended, u'international': school.international, }) class DelegateUserCreateTestCase(tests.PartialUpdateAPITestCase): url_name = 'api:delegate_detail' def setUp(self): self.advisor = models.new_user(username='advisor', password='<PASSWORD>') self.school = models.new_school(user=self.advisor) self.registration = models.new_registration(school=self.school) self.assignment = models.new_assignment(registration=self.registration) self.delegate = models.new_delegate( school=self.school, assignment=self.assignment) self.delegate.assignment = None self.delegate.save() self.superuser = models.new_user(is_superuser=True) self.params = {'email': '<EMAIL>'} self.assign_params = {'assignment': self.assignment.id} self.unassign_params = {'assignment': None} def test_delegate_no_user(self): self.client.login(username='advisor', password='<PASSWORD>') response = self.get_response(self.delegate.id, params=self.params) self.assertFalse( User.objects.filter(delegate__id=self.delegate.id).exists()) def test_delegate_user_create(self): self.client.login(username='advisor', password='<PASSWORD>') response = self.get_response( self.delegate.id, params=self.assign_params) self.assertTrue( User.objects.filter(delegate__id=self.delegate.id).exists()) def test_delegate_user_unassign(self): self.client.login(username='advisor', password='<PASSWORD>') response1 = self.get_response( self.delegate.id, params=self.assign_params) response2 = self.get_response( self.delegate.id, params=self.unassign_params) self.assertTrue( User.objects.filter(delegate__id=self.delegate.id).exists()) class DelegateUserDestroyTestCase(tests.DestroyAPITestCase): url_name = 'api:delegate_detail' def setUp(self): self.advisor = models.new_user(username='advisor', password='<PASSWORD>') self.school = models.new_school(user=self.advisor) self.registration = models.new_registration(school=self.school) self.assignment = models.new_assignment(registration=self.registration) self.delegate = models.new_delegate( school=self.school, assignment=self.assignment) self.delegate.assignment = None self.delegate.save() self.superuser = models.new_user(is_superuser=True) self.delegate_user = models.new_user( username='delegate', delegate=self.delegate, user_type=User.TYPE_DELEGATE) def test_delegate_user_destroy(self): self.client.login(username='advisor', password='<PASSWORD>') response = self.get_response(self.delegate.id) self.assertFalse( User.objects.filter(delegate__id=self.delegate.id).exists())
[ "huxley.accounts.models.User.objects.filter", "huxley.utils.test.models.new_committee", "huxley.utils.test.models.new_user", "huxley.core.models.Conference.get_current", "json.loads", "huxley.accounts.models.User.objects.get", "huxley.utils.test.models.new_assignment", "huxley.utils.test.models.new_superuser", "rest_framework.exceptions.ErrorDetail", "json.dumps", "django.test.client.Client", "huxley.utils.test.models.new_school", "django.urls.reverse", "huxley.utils.test.models.new_registration", "huxley.utils.test.models.new_delegate" ]
[((692, 709), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {}), '()\n', (707, 709), False, 'from huxley.utils.test import models\n'), ((909, 942), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {'username': '"""user1"""'}), "(username='user1')\n", (924, 942), False, 'from huxley.utils.test import models\n'), ((959, 1015), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {'username': '"""user2"""', 'password': '"""<PASSWORD>"""'}), "(username='user2', password='<PASSWORD>')\n", (974, 1015), False, 'from huxley.utils.test import models\n'), ((1292, 1325), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {'username': '"""user1"""'}), "(username='user1')\n", (1307, 1325), False, 'from huxley.utils.test import models\n'), ((1342, 1403), 'huxley.utils.test.models.new_superuser', 'models.new_superuser', ([], {'username': '"""user2"""', 'password': '"""<PASSWORD>"""'}), "(username='user2', password='<PASSWORD>')\n", (1362, 1403), False, 'from huxley.utils.test import models\n'), ((2007, 2026), 'huxley.utils.test.models.new_school', 'models.new_school', ([], {}), '()\n', (2024, 2026), False, 'from huxley.utils.test import models\n'), ((3660, 3682), 'huxley.utils.test.models.new_committee', 'models.new_committee', ([], {}), '()\n', (3680, 3682), False, 'from huxley.utils.test import models\n'), ((3698, 3816), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {'username': '"""testuser"""', 'password': '"""<PASSWORD>"""', 'user_type': 'User.TYPE_CHAIR', 'committee_id': 'committee.id'}), "(username='testuser', password='<PASSWORD>', user_type=User.\n TYPE_CHAIR, committee_id=committee.id)\n", (3713, 3816), False, 'from huxley.utils.test import models\n'), ((4504, 4521), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {}), '()\n', (4519, 4521), False, 'from huxley.utils.test import models\n'), ((4791, 4832), 'huxley.utils.test.models.new_school', 'models.new_school', ([], {'user': 'self.default_user'}), '(user=self.default_user)\n', (4808, 4832), False, 'from huxley.utils.test import models\n'), ((5382, 5438), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {'username': '"""user1"""', 'password': '"""<PASSWORD>"""'}), "(username='user1', password='<PASSWORD>')\n", (5397, 5438), False, 'from huxley.utils.test import models\n'), ((5682, 5715), 'huxley.accounts.models.User.objects.get', 'User.objects.get', ([], {'id': 'self.user.id'}), '(id=self.user.id)\n', (5698, 5715), False, 'from huxley.accounts.models import User\n'), ((5945, 6001), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {'username': '"""user2"""', 'password': '"""<PASSWORD>"""'}), "(username='user2', password='<PASSWORD>')\n", (5960, 6001), False, 'from huxley.utils.test import models\n'), ((6203, 6236), 'huxley.accounts.models.User.objects.get', 'User.objects.get', ([], {'id': 'self.user.id'}), '(id=self.user.id)\n', (6219, 6236), False, 'from huxley.accounts.models import User\n'), ((6592, 6625), 'huxley.accounts.models.User.objects.get', 'User.objects.get', ([], {'id': 'self.user.id'}), '(id=self.user.id)\n', (6608, 6625), False, 'from huxley.accounts.models import User\n'), ((6885, 6946), 'huxley.utils.test.models.new_superuser', 'models.new_superuser', ([], {'username': '"""user2"""', 'password': '"""<PASSWORD>"""'}), "(username='user2', password='<PASSWORD>')\n", (6905, 6946), False, 'from huxley.utils.test import models\n'), ((7101, 7134), 'huxley.accounts.models.User.objects.get', 'User.objects.get', ([], {'id': 'self.user.id'}), '(id=self.user.id)\n', (7117, 7134), False, 'from huxley.accounts.models import User\n'), ((7469, 7502), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {'username': '"""user1"""'}), "(username='user1')\n", (7484, 7502), False, 'from huxley.utils.test import models\n'), ((7511, 7544), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {'username': '"""user2"""'}), "(username='user2')\n", (7526, 7544), False, 'from huxley.utils.test import models\n'), ((7729, 7785), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {'username': '"""user1"""', 'password': '"""<PASSWORD>"""'}), "(username='user1', password='<PASSWORD>')\n", (7744, 7785), False, 'from huxley.utils.test import models\n'), ((7794, 7827), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {'username': '"""user2"""'}), "(username='user2')\n", (7809, 7827), False, 'from huxley.utils.test import models\n'), ((8089, 8150), 'huxley.utils.test.models.new_superuser', 'models.new_superuser', ([], {'username': '"""user1"""', 'password': '"""<PASSWORD>"""'}), "(username='user1', password='<PASSWORD>')\n", (8109, 8150), False, 'from huxley.utils.test import models\n'), ((8167, 8200), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {'username': '"""user2"""'}), "(username='user2')\n", (8182, 8200), False, 'from huxley.utils.test import models\n'), ((9386, 9429), 'huxley.accounts.models.User.objects.filter', 'User.objects.filter', ([], {'id': "response.data['id']"}), "(id=response.data['id'])\n", (9405, 9429), False, 'from huxley.accounts.models import User\n'), ((9491, 9531), 'huxley.accounts.models.User.objects.get', 'User.objects.get', ([], {'id': "response.data['id']"}), "(id=response.data['id'])\n", (9507, 9531), False, 'from huxley.accounts.models import User\n'), ((10114, 10171), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {'username': '"""_Kunal"""', 'password': '"""<PASSWORD>"""'}), "(username='_Kunal', password='<PASSWORD>')\n", (10129, 10171), False, 'from huxley.utils.test import models\n'), ((12145, 12169), 'huxley.core.models.Conference.get_current', 'Conference.get_current', ([], {}), '()\n', (12167, 12169), False, 'from huxley.core.models import Conference\n'), ((12576, 12584), 'django.test.client.Client', 'Client', ([], {}), '()\n', (12582, 12584), False, 'from django.test.client import Client\n'), ((12604, 12631), 'django.urls.reverse', 'reverse', (['"""api:current_user"""'], {}), "('api:current_user')\n", (12611, 12631), False, 'from django.urls import reverse\n'), ((12788, 12842), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {'username': '"""lol"""', 'password': '"""<PASSWORD>"""'}), "(username='lol', password='<PASSWORD>')\n", (12803, 12842), False, 'from huxley.utils.test import models\n'), ((12859, 12915), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {'username': '"""bunny"""', 'password': '"""<PASSWORD>"""'}), "(username='bunny', password='<PASSWORD>')\n", (12874, 12915), False, 'from huxley.utils.test import models\n'), ((13570, 13598), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (13580, 13598), False, 'import json\n'), ((13748, 13802), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {'username': '"""lol"""', 'password': '"""<PASSWORD>"""'}), "(username='lol', password='<PASSWORD>')\n", (13763, 13802), False, 'from huxley.utils.test import models\n'), ((14300, 14319), 'huxley.utils.test.models.new_school', 'models.new_school', ([], {}), '()\n', (14317, 14319), False, 'from huxley.utils.test import models\n'), ((16010, 16068), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {'username': '"""advisor"""', 'password': '"""<PASSWORD>"""'}), "(username='advisor', password='<PASSWORD>')\n", (16025, 16068), False, 'from huxley.utils.test import models\n'), ((16091, 16127), 'huxley.utils.test.models.new_school', 'models.new_school', ([], {'user': 'self.advisor'}), '(user=self.advisor)\n', (16108, 16127), False, 'from huxley.utils.test import models\n'), ((16156, 16199), 'huxley.utils.test.models.new_registration', 'models.new_registration', ([], {'school': 'self.school'}), '(school=self.school)\n', (16179, 16199), False, 'from huxley.utils.test import models\n'), ((16226, 16279), 'huxley.utils.test.models.new_assignment', 'models.new_assignment', ([], {'registration': 'self.registration'}), '(registration=self.registration)\n', (16247, 16279), False, 'from huxley.utils.test import models\n'), ((16304, 16371), 'huxley.utils.test.models.new_delegate', 'models.new_delegate', ([], {'school': 'self.school', 'assignment': 'self.assignment'}), '(school=self.school, assignment=self.assignment)\n', (16323, 16371), False, 'from huxley.utils.test import models\n'), ((16479, 16513), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {'is_superuser': '(True)'}), '(is_superuser=True)\n', (16494, 16513), False, 'from huxley.utils.test import models\n'), ((17808, 17866), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {'username': '"""advisor"""', 'password': '"""<PASSWORD>"""'}), "(username='advisor', password='<PASSWORD>')\n", (17823, 17866), False, 'from huxley.utils.test import models\n'), ((17889, 17925), 'huxley.utils.test.models.new_school', 'models.new_school', ([], {'user': 'self.advisor'}), '(user=self.advisor)\n', (17906, 17925), False, 'from huxley.utils.test import models\n'), ((17954, 17997), 'huxley.utils.test.models.new_registration', 'models.new_registration', ([], {'school': 'self.school'}), '(school=self.school)\n', (17977, 17997), False, 'from huxley.utils.test import models\n'), ((18024, 18077), 'huxley.utils.test.models.new_assignment', 'models.new_assignment', ([], {'registration': 'self.registration'}), '(registration=self.registration)\n', (18045, 18077), False, 'from huxley.utils.test import models\n'), ((18102, 18169), 'huxley.utils.test.models.new_delegate', 'models.new_delegate', ([], {'school': 'self.school', 'assignment': 'self.assignment'}), '(school=self.school, assignment=self.assignment)\n', (18121, 18169), False, 'from huxley.utils.test import models\n'), ((18277, 18311), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {'is_superuser': '(True)'}), '(is_superuser=True)\n', (18292, 18311), False, 'from huxley.utils.test import models\n'), ((18341, 18436), 'huxley.utils.test.models.new_user', 'models.new_user', ([], {'username': '"""delegate"""', 'delegate': 'self.delegate', 'user_type': 'User.TYPE_DELEGATE'}), "(username='delegate', delegate=self.delegate, user_type=User\n .TYPE_DELEGATE)\n", (18356, 18436), False, 'from huxley.utils.test import models\n'), ((10564, 10712), 'rest_framework.exceptions.ErrorDetail', 'exceptions.ErrorDetail', (['u"""Enter a valid username. This value may contain only letters, numbers, and @/./+/-/_ characters."""'], {'code': '"""invalid"""'}), "(\n u'Enter a valid username. This value may contain only letters, numbers, and @/./+/-/_ characters.'\n , code='invalid')\n", (10586, 10712), False, 'from rest_framework import exceptions\n'), ((13061, 13084), 'json.dumps', 'json.dumps', (['credentials'], {}), '(credentials)\n', (13071, 13084), False, 'import json\n'), ((13407, 13430), 'json.dumps', 'json.dumps', (['credentials'], {}), '(credentials)\n', (13417, 13430), False, 'import json\n'), ((16893, 16943), 'huxley.accounts.models.User.objects.filter', 'User.objects.filter', ([], {'delegate__id': 'self.delegate.id'}), '(delegate__id=self.delegate.id)\n', (16912, 16943), False, 'from huxley.accounts.models import User\n'), ((17197, 17247), 'huxley.accounts.models.User.objects.filter', 'User.objects.filter', ([], {'delegate__id': 'self.delegate.id'}), '(delegate__id=self.delegate.id)\n', (17216, 17247), False, 'from huxley.accounts.models import User\n'), ((17602, 17652), 'huxley.accounts.models.User.objects.filter', 'User.objects.filter', ([], {'delegate__id': 'self.delegate.id'}), '(delegate__id=self.delegate.id)\n', (17621, 17652), False, 'from huxley.accounts.models import User\n'), ((18674, 18724), 'huxley.accounts.models.User.objects.filter', 'User.objects.filter', ([], {'delegate__id': 'self.delegate.id'}), '(delegate__id=self.delegate.id)\n', (18693, 18724), False, 'from huxley.accounts.models import User\n')]
# подключаем модуль случайных чисел import random # подключаем модуль для графиков import plotly.graph_objs as go # сколько денег будет на старте для каждой стратегии startmoney = 1000000 # коэффициент ставки c1 = 0.001 # количество побед и проигрышей win = 0 loose = 0 # количество игр, сыгранный по первой стратегии games = 0 # статистика для первой стратегии balance1 = [] games1 = [] # статистика для второй стратегии balance2 = [] games2 = [] # статистика для третьей стратегии balance3 = [] games3 = [] # начинаем играть с полной суммой # первая стратегия — отрицательное матожидание, как в казино money = startmoney # пока у нас ещё есть деньги while money > 0: # ставка — постоянная часть от первоначальной суммы bet = startmoney * c1 # если ставка получилась больше, чем у нас осталось денег — ставим всё, что осталось, чтобы не уйти в минус if bet > money: bet = money # после ставки количество денег уменьшилось money -= bet # записываем очередную игру в статистику — деньги и номер игры balance1.append(money) games1.append(len(games1) + 1) # крутим рулетку, на которой 18 чёрных чисел, 18 красных и одно зеро. Мы ставим на чёрное ball = random.randint(1, 37) # пусть первые 18 будут чёрными — для простоты алгоритма # если наша ставка сыграла — мы попали в нужный диапазон if ball in range(1, 19): # получаем назад нашу ставку в двойном размере money += bet * 2 # увеличиваем количество побед win += 1 else: # иначе — увеличиваем количество проигрышей loose += 1 games = win + loose # выводим результат игры по первой стратегии print( "Выиграно ставок: " + str(win) + " (" + str(win / games * 100) + "%). " + " Проиграно ставок: " + str(loose) + " (" + str(loose / games * 100) + "%). " ) # началась вторая стратегия, тоже стартуем с полной суммой # вторая стратегия — с нулевым матожиданием money = startmoney # обнуляем статистику win = 0 loose = 0 # начинаем играть с полной суммой money = startmoney # играем, пока есть деньги или пока мы не сыграем столько же игр, как и в первый раз while (money > 0) and (win + loose < games): # ставка — постоянная часть от первоначальной суммы bet = startmoney * c1 # если ставка получилась больше, чем у нас осталось денег — ставим всё, что осталось, чтобы не уйти в минус if bet > money: bet = money # после ставки количество денег уменьшилось money -= bet # записываем очередную игру в статистику — деньги и номер игры balance2.append(money) games2.append(len(games2) + 1) # крутим рулетку, на которой 18 чёрных чисел, 18 красных. Так как всего поровну, матожидание будет равно нулю. # Ставим, как и в прошлом случае, на чёрное ball = random.randint(1, 36) # пусть первые 18 будут чёрными — для простоты алгоритма # если наша ставка сыграла — мы попали в нужный диапазон if ball in range(1, 19): # получаем назад нашу ставку в двойном размере money += bet * 2 # увеличиваем количество побед win += 1 else: # иначе — увеличиваем количество проигрышей loose += 1 # выводим результат игры по второй стратегии print( "Выиграно ставок: " + str(win) + " (" + str(win / games * 100) + "%). " + " Проиграно ставок: " + str(loose) + " (" + str(loose / games * 100) + "%). " ) # началась третья стратегия, тоже стартуем с полной суммой # третья стратегия — с положительным матожиданием money = startmoney # обнуляем статистику win = 0 loose = 0 # начинаем играть с полной суммой money = startmoney # играем, пока есть деньги или пока мы не сыграем столько же игр, как и в первый раз while (money > 0) and (win + loose < games): # ставка — постоянная часть от первоначальной суммы bet = startmoney * c1 # если ставка получилась больше, чем у нас осталось денег — ставим всё, что осталось, чтобы не уйти в минус if bet > money: bet = money # после ставки количество денег уменьшилось money -= bet # записываем очередную игру в статистику — деньги и номер игры balance3.append(money) games3.append(len(games3) + 1) # крутим рулетку, на которой 18 чёрных чисел, 17 красных. Так как чёрных больше, а мы ставим на чёрное, то матожидание будет положительным # Ставим, как и в прошлом случае, на чёрное ball = random.randint(1, 35) # пусть первые 18 будут чёрными — для простоты алгоритма # если наша ставка сыграла — мы попали в нужный диапазон if ball in range(1, 19): # получаем назад нашу ставку в двойном размере money += bet * 2 # увеличиваем количество побед win += 1 else: # иначе — увеличиваем количество проигрышей loose += 1 # выводим результат игры по третьей стратегии print( "Выиграно ставок: " + str(win) + " (" + str(win / games * 100) + "%). " + " Проиграно ставок: " + str(loose) + " (" + str(loose / games * 100) + "%). " ) # строим графики fig = go.Figure() # для первой стратегии fig.add_trace( go.Scatter(x=games1, y=balance1, name="Отрицательное матожидание") ) # для второй fig.add_trace(go.Scatter(x=games2, y=balance2, name="Нулевое матожидание")) # и для третьей fig.add_trace( go.Scatter(x=games3, y=balance3, name="Положительное матожидание") ) # выводим графики в браузер fig.show()
[ "plotly.graph_objs.Scatter", "plotly.graph_objs.Figure", "random.randint" ]
[((5110, 5121), 'plotly.graph_objs.Figure', 'go.Figure', ([], {}), '()\n', (5119, 5121), True, 'import plotly.graph_objs as go\n'), ((1214, 1235), 'random.randint', 'random.randint', (['(1)', '(37)'], {}), '(1, 37)\n', (1228, 1235), False, 'import random\n'), ((2822, 2843), 'random.randint', 'random.randint', (['(1)', '(36)'], {}), '(1, 36)\n', (2836, 2843), False, 'import random\n'), ((4447, 4468), 'random.randint', 'random.randint', (['(1)', '(35)'], {}), '(1, 35)\n', (4461, 4468), False, 'import random\n'), ((5164, 5230), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': 'games1', 'y': 'balance1', 'name': '"""Отрицательное матожидание"""'}), "(x=games1, y=balance1, name='Отрицательное матожидание')\n", (5174, 5230), True, 'import plotly.graph_objs as go\n'), ((5260, 5320), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': 'games2', 'y': 'balance2', 'name': '"""Нулевое матожидание"""'}), "(x=games2, y=balance2, name='Нулевое матожидание')\n", (5270, 5320), True, 'import plotly.graph_objs as go\n'), ((5357, 5423), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': 'games3', 'y': 'balance3', 'name': '"""Положительное матожидание"""'}), "(x=games3, y=balance3, name='Положительное матожидание')\n", (5367, 5423), True, 'import plotly.graph_objs as go\n')]
"""Unit tests for posts app urls """ import pytest from django.contrib.auth import get_user_model from django.urls import resolve, reverse from model_bakery import baker from mutadi.private_messages.models import PrivateMessage pytestmark = pytest.mark.django_db User = get_user_model() class TestPrivateMessageUrls: """Group multiple tests for Private Message urls""" @pytest.fixture def proto_user(self): """Fixture for baked User model.""" return baker.make( User, username=baker.seq("User-"), _quantity=3, ) @pytest.fixture def proto_private_message(self, proto_user): """Fixture for baked PrivateMessage model.""" return baker.make( PrivateMessage, sender=proto_user[0], recipient=proto_user[1], content=( "Proident nisi cillum sit tempor " "reprehenderit proident in non fugiat ex id." ), ) def test_inbox_reverse(self): """inbox should reverse to /messages/inbox/.""" assert reverse("inbox") == "/messages/inbox/" def test_inbox_resolve(self): """/messages/inbox/ should resolve to inbox.""" assert resolve("/messages/inbox/").view_name == "inbox" def test_outbox_reverse(self): """outbox should reverse to /messages/outbox/.""" assert reverse("outbox") == "/messages/outbox/" def test_outbox_resolve(self): """/messages/outbox/ should resolve to outbox.""" assert resolve("/messages/outbox/").view_name == "outbox" def test_delete_message_reverse(self, proto_private_message): """ delete_message should reverse to /messages/message_detail/{proto_private_message.pk}/delete. """ assert ( reverse( "delete_message", args=[ f"{proto_private_message.pk}", ], ) == f"/messages/message_detail/{proto_private_message.pk}/delete" ) def test_delete_message_resolve(self, proto_private_message): """ /messages/message_detail/{proto_private_message.pk}/delete should resolve to delete_message. """ assert ( resolve( f"/messages/message_detail/{proto_private_message.pk}/delete" ).view_name == "delete_message" ) def test_message_detail_reverse(self, proto_private_message): """ message_detail should reverse to /messages/message_detail/{proto_private_message.pk}. """ assert ( reverse( "message_detail", args=[ f"{proto_private_message.pk}", ], ) == f"/messages/message_detail/{proto_private_message.pk}" ) def test_message_detail_resolve(self, proto_private_message): """ /messages/message_detail/{proto_private_message.pk} should resolve to message_detail. """ assert ( resolve( f"/messages/message_detail/{proto_private_message.pk}" ).view_name == "message_detail" ) def test_compose_message_reverse(self): """ compose_message should reverse to /messages/compose_message/. """ assert reverse("compose_message") == "/messages/compose_message/" def test_compose_message_resolve(self): """ /messages/compose_message/ should resolve to compose_message. """ assert ( resolve("/messages/compose_message/").view_name == "compose_message" )
[ "django.contrib.auth.get_user_model", "model_bakery.baker.make", "django.urls.reverse", "django.urls.resolve", "model_bakery.baker.seq" ]
[((272, 288), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (286, 288), False, 'from django.contrib.auth import get_user_model\n'), ((728, 902), 'model_bakery.baker.make', 'baker.make', (['PrivateMessage'], {'sender': 'proto_user[0]', 'recipient': 'proto_user[1]', 'content': '"""Proident nisi cillum sit tempor reprehenderit proident in non fugiat ex id."""'}), "(PrivateMessage, sender=proto_user[0], recipient=proto_user[1],\n content=\n 'Proident nisi cillum sit tempor reprehenderit proident in non fugiat ex id.'\n )\n", (738, 902), False, 'from model_bakery import baker\n'), ((1105, 1121), 'django.urls.reverse', 'reverse', (['"""inbox"""'], {}), "('inbox')\n", (1112, 1121), False, 'from django.urls import resolve, reverse\n'), ((1408, 1425), 'django.urls.reverse', 'reverse', (['"""outbox"""'], {}), "('outbox')\n", (1415, 1425), False, 'from django.urls import resolve, reverse\n'), ((1838, 1901), 'django.urls.reverse', 'reverse', (['"""delete_message"""'], {'args': "[f'{proto_private_message.pk}']"}), "('delete_message', args=[f'{proto_private_message.pk}'])\n", (1845, 1901), False, 'from django.urls import resolve, reverse\n'), ((2679, 2742), 'django.urls.reverse', 'reverse', (['"""message_detail"""'], {'args': "[f'{proto_private_message.pk}']"}), "('message_detail', args=[f'{proto_private_message.pk}'])\n", (2686, 2742), False, 'from django.urls import resolve, reverse\n'), ((3431, 3457), 'django.urls.reverse', 'reverse', (['"""compose_message"""'], {}), "('compose_message')\n", (3438, 3457), False, 'from django.urls import resolve, reverse\n'), ((534, 552), 'model_bakery.baker.seq', 'baker.seq', (['"""User-"""'], {}), "('User-')\n", (543, 552), False, 'from model_bakery import baker\n'), ((1250, 1277), 'django.urls.resolve', 'resolve', (['"""/messages/inbox/"""'], {}), "('/messages/inbox/')\n", (1257, 1277), False, 'from django.urls import resolve, reverse\n'), ((1558, 1586), 'django.urls.resolve', 'resolve', (['"""/messages/outbox/"""'], {}), "('/messages/outbox/')\n", (1565, 1586), False, 'from django.urls import resolve, reverse\n'), ((2304, 2374), 'django.urls.resolve', 'resolve', (['f"""/messages/message_detail/{proto_private_message.pk}/delete"""'], {}), "(f'/messages/message_detail/{proto_private_message.pk}/delete')\n", (2311, 2374), False, 'from django.urls import resolve, reverse\n'), ((3131, 3194), 'django.urls.resolve', 'resolve', (['f"""/messages/message_detail/{proto_private_message.pk}"""'], {}), "(f'/messages/message_detail/{proto_private_message.pk}')\n", (3138, 3194), False, 'from django.urls import resolve, reverse\n'), ((3658, 3695), 'django.urls.resolve', 'resolve', (['"""/messages/compose_message/"""'], {}), "('/messages/compose_message/')\n", (3665, 3695), False, 'from django.urls import resolve, reverse\n')]
""" Time to make the super-fertilizer from our ingredients. """ from random import random from evennia.utils import interactive from ..state import BaseState from .. import objects GREETING = """ This is the situation, {name}: The |rJester|n wants to win your village's yearly |wpie-eating contest|n. As it turns out, you are one of her most dangerous opponents. Today, the day of the contest, she invited you to her small cabin for a 'strategy chat'. But she tricked you and now you are |wlocked in|n! If you don't get out before the contest starts she'll get to eat all those pies on her own and surely win! When you get into the cabin, the *chest was just opened, revealing its secrets to the world ... """ INTRO1 = """ Over by the door, Vale claps its hands. """ # ------------------------------------------------------------ # chest (kept open) # ------------------------------------------------------------ CHEST_DESC = """ The chest stands open and prop it up so it won't close again accidentally. It's pretty small, most of its size was clearly taken up by the intricate locking mechanism. Inside it you can see a brass *monocular and a partly burnt *letter. """ class ChestOpen(objects.EvscaperoomObject): pass # ------------------------------------------------------------ # looking glass # ------------------------------------------------------------ LOOKINGGLASS_DESC = """ This is a bronze monocular - a 'looking glass' of the type used by sea captains. It seems to have pretty strong lenses. Better not look at the sun! """ LOOKINGGLASS_APPLY = """ You see only a blur. Things in here is way too close. """ LOOKINGGLASS_APPLY2 = """ You could maybe direct light through the monocular to put this on fire, but you doubt it'd burn very well. """ LOOKINGGLASS_APPLY_WINDOW = """ The leaves of a distant tree fills your vision. A bird jumps around on a branch, unaware of your spying. Interesting, but not very useful right now. However, as you hold up the tube to the window in the right angle you notice that it causes a very focused spot of intense light around the location of the fireplace. """ LOOKINGGLASS_BOOK = """ You consider burning the Jester's book with the Looking glass. But what if you need something in there for a later puzzle? There must be something else of similar dryness that you could burn! """ LOOKINGGLASS_BED = """ While the mattress might burn if it was packed with dry straw. But instead it's packed with fresh grass from the meadow, no way that'll burn any time soon. The whole thing is even a bit damp ... Guess now you know why! Clearly the Jester didn't want you to torch her bed. """ LOOKINGGLASS_APPLY_TO_ROOM = """ ~You ~focus the *monocular at {target}. """ LOOKINGGLASS_THINK = """ If you use the looking glass on something that is easily flammable you might be able to direct enough sunlight onto it to put it on fire! """ class LookingGlass(objects.Usable): target_flag = "looking_glass_sun" def at_apply(self, caller, action, obj): self.msg_room(caller, LOOKINGGLASS_APPLY_TO_ROOM.format(target=obj.key)) if obj.check_flag("burnable"): # actually burnable item self.room.score(2, "burn with monocular") obj.handle_burn(caller, self) elif obj.key == "book": self.room.score(1, "try to burn book") self.msg_char(caller, LOOKINGGLASS_BOOK.strip()) elif obj.key == "bed": self.room.achievement(caller, "Fool planning", "Figured out why the bed is damp") self.msg_char(caller, LOOKINGGLASS_BED.strip()) else: self.room.score(1, "used monocular with window") self.msg_char(caller, LOOKINGGLASS_APPLY_WINDOW.strip()) def get_callsigns(self): txt = "Actions that make sense: *use on <thing> and *think" return [], txt def at_focus_think(self, caller, **kwargs): self.msg_char(caller, LOOKINGGLASS_THINK.strip()) def at_cannot_apply(self, caller, action, obj): self.msg_room(caller, LOOKINGGLASS_APPLY_TO_ROOM.format(target=obj.key)) if random() < 0.5: self.msg_char(caller, LOOKINGGLASS_APPLY) else: self.room.score(1, "Getting random insight from monocular") self.msg_char(caller, LOOKINGGLASS_APPLY2) # ------------------------------------------------------------ # letter # ------------------------------------------------------------ LETTER_DESC = """ This letter sits in a crumpled and partly burned envelope; this looks like something that was never delivered. On the front of the envelope it simply says: To Agda """ LETTER_READ = """ The letter is written in a compact and tight handwriting. My beloved Agda, I have written so many letters that I've then torn up. Know that I've always loved you in secret and that I never aimed to hurt you. But that doesn't change the fact that I am the reason you are the way you are today. You will know how happy I was when you confided in me. But after that kid got hurt, I just couldn't live with the secret. I laced the champion's pies with my potion. I am a coward, I could not think of any other way to stop him. How was I to know he'd lose his appetite that day of all days? He has been so cold to you since, but at least you are apart now. I did not know about your other circumstance until much later. But I understand why your parents had to do what they did in secret. This guilt too is on my shoulders, for I was the one that made you incapable of such a responsibility in the first place. You used to hit me out of sport, now you can't even raise your hand to defend yourself. It's maddening! I experiment with my hintberries daily to find a way to reverse your condition. Even if you won't love me, but continue to ridicule and mock me, I have no bigger wish than to see the old you return. Because then one day, maybe I'll forgive myself. Yours forever, Vale """ class Letter(objects.Readable): def at_read(self, caller): self.room.score(2, "read letter") self.msg_char(caller, LETTER_READ.strip()) if not self.check_flag("read_already"): # reading the letter makes the plant mixable so # we can create the fertilizer plant = self.room.state.create_object( PlantMixable, key="plant") plant.db.desc = PLANT_DESC.strip() # ------------------------------------------------------------ # potted plant (on table) # ------------------------------------------------------------ PLANT_DESC = """ On the table, on the side nearest to the window, stands a potted plant - it's a rose cutling, no more than a little green stem and a few leaves. """ PLANT_DIG_ROOM = """ ~You digs around in the dirt of the *plant, to no avail. """ PLANT_DIG = """ Carefully you probe around in the small pot with your fingers, but even after circling the cutling and fully probed the bottom of the pot you don't find anything hidden in the dirt. You shuffle the displaced dirt back into place around the fledging little plant. """ PLANT_FEEL_ROOM = """ ~You ~prick a finger on *plant, letting a drop of blood fall into the dirt. """ PLANT_FEEL = """ Ouch! It may be small, but this thing already has thorns! You draw a drop of blood and let it fall into the dirt. """ PLANT_MIX_RESET = """ The mix does not seem to work. ~you ~wipe off the top layer of soil from the *plant and ~start again. """ PLANT_MIX_SUCCESS = """ As ~you ~drop {ingredient} into the soil of the *plant, the rose stickling suddently starts to shift and writhe. ~You quickly ~back away. """ class PlantMixable(objects.Feelable, objects.Mixable): mixer_flag = "fertilizer_mixer" ingredient_recipe = [ "childmaker", "ashes", "ashes", "ashes", "hintberries", "blood", ] def at_object_creation(self): super().at_object_creation() self.set_flag("blood") self.set_flag("fertilizer_mixer") def at_focus_dig(self, caller, **kwargs): self.msg_room(caller, PLANT_DIG_ROOM.strip(), True) self.msg_char(caller, PLANT_DIG.strip()) # reset any mixture now self.db.ingredients = [] def at_focus_feel(self, caller, **kwargs): # add ourselves! self.msg_room(caller, PLANT_FEEL_ROOM.strip(), True) self.handle_mix(caller, self, txt=PLANT_FEEL.strip()) def at_mix(self, caller, ingredient, txt=None): self.msg_char(caller, txt) def at_mix_failure(self, caller, ingredient, **kwargs): self.msg_room(caller, PLANT_MIX_RESET.strip()) @interactive def at_mix_success(self, caller, ingredient, **kwargs): self.room.score(2, "Made fertilizer") self.msg_room(caller, PLANT_MIX_SUCCESS.format(ingredient=ingredient.key).lstrip()) yield(2) self.next_state() # ------------------------------------------------------------ # state # ------------------------------------------------------------ STATE_HINT_LVL1 = """ It's time to grow to the occation. The *locket that fell down into the *ashes looks important. """ STATE_HINT_LVL2 = """ The *letter may be useful for figuring out how to open the *locket. Inside you'll find the end of a recipe. See if you can find the beginning of it somewhere. """ STATE_HINT_LVL3 = """ Say 'Agda' to the locket to open it. Read about the FERTILIZER in *book to get the first part of the fertilizer recipe. To make the fertilizer, put the ingredients in the potted *plant. You need a drop of blood as the last ingredient. Maybe if you pricked your finger? """ STATE_HINT_LVL4 = """ Use the following ingredients with *plant: - *childmaker potion - *ashes - *ashes - *ashes - *pie (the hintberry pie) Finally, examine and feel the potted *plant to prick yourself on its thorns and get a drop of blood. """ class State(BaseState): next_state = "state_010_burn_firewood" hints = [STATE_HINT_LVL1, STATE_HINT_LVL2, STATE_HINT_LVL3, STATE_HINT_LVL4] def character_enters(self, character): self.cinematic(GREETING.format(name=character.key), target=character) @interactive def init(self): # we don't need the lever anymore lever = self.get_object("lever") if lever: lever.delete() # chest needs no further interaction chest = self.create_object( ChestOpen, key="chest") chest.db.desc = CHEST_DESC.strip() lookingglass = self.create_object( LookingGlass, key="looking glass", aliases=["monocular", "lookingglass", "glass"]) lookingglass.db.desc = LOOKINGGLASS_DESC.strip() letter = self.create_object( Letter, key="letter") letter.db.desc = LETTER_DESC.strip() yield(3) self.msg(INTRO1.rstrip()) def clean(self): super().clean() self.room.progress(84)
[ "random.random" ]
[((4165, 4173), 'random.random', 'random', ([], {}), '()\n', (4171, 4173), False, 'from random import random\n')]
#!/usr/bin/python # coding: utf8 from __future__ import absolute_import from geocoder.location import Location from geocoder.here import HereResult, HereQuery class HereReverseResult(HereResult): @property def ok(self): return bool(self.address) class HereReverse(HereQuery): """ HERE Geocoding REST API ======================= Send a request to the geocode endpoint to find an address using a combination of country, state, county, city, postal code, district, street and house number. API Reference ------------- https://developer.here.com/rest-apis/documentation/geocoder """ provider = 'here' method = 'reverse' _RESULT_CLASS = HereReverseResult _URL = 'http://reverse.geocoder.cit.api.here.com/6.2/reversegeocode.json' def _build_params(self, location, provider_key, **kwargs): params = super(HereReverse, self)._build_params(location, provider_key, **kwargs) del params['searchtext'] location = str(Location(location)) params.update({ 'prox': location, 'mode': 'retrieveAddresses', 'gen': 8, }) return params if __name__ == '__main__': g = HereReverse([45.4049053, -75.7077965]) g.debug()
[ "geocoder.location.Location" ]
[((1016, 1034), 'geocoder.location.Location', 'Location', (['location'], {}), '(location)\n', (1024, 1034), False, 'from geocoder.location import Location\n')]
import json import logging import os import shutil import subprocess from urllib.parse import parse_qsl, urlsplit import requests def es_search(index, clip_id): """Queries the elasticsearch for video metadata""" search_url = 'http://search.granicus.com/api/%s/_search' % index query = {'query': {'match': {'video_id': {'query': clip_id}}}, 'size': 1} query = json.dumps(query) result = requests.post(search_url, data=query) result = json.loads(result.content) # FIXME: This is fragile is fuck. try: video = result['hits']['hits'][0]['_source']['http'] except KeyError: video = None return video def get(url, filename=None): """Save contents of a URL to a file.""" # TODO: Make this not require curl/wget/aria2c # Sadly resumable downloads with status updates is non-trivial with requests. # aria2c multi-connection is 3-4x faster because granicus servers are slow if not filename: filename = urlsplit(url).path.split('/')[-1] if shutil.which('aria2c'): conns = '4' cmd = ['aria2c', '--summary-interval', '0', '--auto-file-renaming=false', '-x', conns, '-s', conns, '-o', filename, url] elif shutil.which('wget'): cmd = ['wget', '-c', '-O', filename, url] elif shutil.which('curl'): cmd = ['curl', '-L', '-o', filename, '-O', '-C', '-', url] else: raise EnvironmentError("No curl or wget...what is this place?") logging.info('Running %s', " ".join(cmd)) proc = subprocess.Popen(cmd) proc.communicate() return filename def remux(filename, output_filename, srt_filename=None, chapter_filename=None): """Demux an MP4 file into aac audio and h264 video""" if not shutil.which('ffmpeg'): raise EnvironmentError("Couldn't find ffmpeg.") if not shutil.which('MP4Box'): raise EnvironmentError("Couldn't find MP4Box.") cmd = ['ffmpeg', '-y', '-i', filename, '-vcodec', 'copy', '-an', '-bsf:v', 'h264_mp4toannexb', "%s.h264" % filename, '-vn', '-acodec', 'copy', "%s.aac" % filename] proc = subprocess.Popen(cmd) proc.communicate() logging.info('Demuxing: %s', " ".join(cmd)) cmd = ["MP4Box", "-add", "%s.h264#video" % filename, "-add", "%s.aac#audio" % filename] if srt_filename and os.path.exists(srt_filename): cmd.extend(['-add', "%s#lang=eng" % srt_filename]) if srt_filename and os.path.exists(chapter_filename): cmd.extend(['-chap', chapter_filename]) cmd.extend(['-new', output_filename]) logging.info('Remuxing: %s', " ".join(cmd)) proc = subprocess.Popen(cmd) proc.communicate()
[ "subprocess.Popen", "json.loads", "os.path.exists", "shutil.which", "json.dumps", "urllib.parse.urlsplit", "requests.post" ]
[((378, 395), 'json.dumps', 'json.dumps', (['query'], {}), '(query)\n', (388, 395), False, 'import json\n'), ((409, 446), 'requests.post', 'requests.post', (['search_url'], {'data': 'query'}), '(search_url, data=query)\n', (422, 446), False, 'import requests\n'), ((460, 486), 'json.loads', 'json.loads', (['result.content'], {}), '(result.content)\n', (470, 486), False, 'import json\n'), ((1022, 1044), 'shutil.which', 'shutil.which', (['"""aria2c"""'], {}), "('aria2c')\n", (1034, 1044), False, 'import shutil\n'), ((1528, 1549), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {}), '(cmd)\n', (1544, 1549), False, 'import subprocess\n'), ((2117, 2138), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {}), '(cmd)\n', (2133, 2138), False, 'import subprocess\n'), ((2633, 2654), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {}), '(cmd)\n', (2649, 2654), False, 'import subprocess\n'), ((1219, 1239), 'shutil.which', 'shutil.which', (['"""wget"""'], {}), "('wget')\n", (1231, 1239), False, 'import shutil\n'), ((1744, 1766), 'shutil.which', 'shutil.which', (['"""ffmpeg"""'], {}), "('ffmpeg')\n", (1756, 1766), False, 'import shutil\n'), ((1835, 1857), 'shutil.which', 'shutil.which', (['"""MP4Box"""'], {}), "('MP4Box')\n", (1847, 1857), False, 'import shutil\n'), ((2337, 2365), 'os.path.exists', 'os.path.exists', (['srt_filename'], {}), '(srt_filename)\n', (2351, 2365), False, 'import os\n'), ((2450, 2482), 'os.path.exists', 'os.path.exists', (['chapter_filename'], {}), '(chapter_filename)\n', (2464, 2482), False, 'import os\n'), ((1300, 1320), 'shutil.which', 'shutil.which', (['"""curl"""'], {}), "('curl')\n", (1312, 1320), False, 'import shutil\n'), ((981, 994), 'urllib.parse.urlsplit', 'urlsplit', (['url'], {}), '(url)\n', (989, 994), False, 'from urllib.parse import parse_qsl, urlsplit\n')]
#!/usr/bin/env python # Python Network Programming Cookbook -- Chapter - 4 # This program requires Python 3.5.2 or any later version # It may run on any other version with/without modifications. # # Follow the comments inline to make it run on Python 2.7.x. import argparse import urllib.request # Comment out the above line and uncomment the below for Python 2.7.x. #import urllib2 REMOTE_SERVER_HOST = 'http://www.cnn.com' class HTTPClient: def __init__(self, host): self.host = host def fetch(self): response = urllib.request.urlopen(self.host) # Comment out the above line and uncomment the below for Python 2.7.x. #response = urllib2.urlopen(self.host) data = response.read() text = data.decode('utf-8') return text if __name__ == "__main__": parser = argparse.ArgumentParser(description='HTTP Client Example') parser.add_argument('--host', action="store", dest="host", default=REMOTE_SERVER_HOST) given_args = parser.parse_args() host = given_args.host client = HTTPClient(host) print (client.fetch())
[ "argparse.ArgumentParser" ]
[((833, 891), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""HTTP Client Example"""'}), "(description='HTTP Client Example')\n", (856, 891), False, 'import argparse\n')]
import json import requests import dash import dash_table import dash_core_components as dcc import dash_html_components as html import dash_bootstrap_components as dbc import dash_leaflet as dl import pandas as pd from pyproj import Transformer import plotly.graph_objects as go from dash.dependencies import Output, Input, State import index MAP_ID = "map-id" BASE_LAYER_ID = "base-layer-id" BASE_LAYER_DROPDOWN_ID = "base-layer-drop-down-id" COORDINATE_CLICK_ID = "coordinate-click-id" def irishgrid2xy(grid_ref): """ Converts irish grid reference as string i.e. "N 15904 34671" to xy (easting northing) with an origin at the bottem left of grid "V" """ # 5x5 grid letters, missing I grid = [("V", "W", "X", "Y", "Z"), ("Q", "R", "S", "T", "U"), ("L", "M", "N", "O", "P"), ("F", "G", "H", "J", "K"), ("A", "B", "C", "D", "E")] grid_ref = grid_ref.split(" ") letter = grid_ref[0].upper() easting = grid_ref[1] northing = grid_ref[2] if len(easting) == 5 & len(northing) == 5: for i in range(0,5): if letter in grid[i]: northing_corr = i easting_corr = (grid[i].index(letter)) easting = '%s%s' % (easting_corr, easting) northing = '%s%s' % (northing_corr, northing) return easting, northing def xy2irishgrid(x, y): """ Convert x and y coordinate integers into irish grid reference string """ x = str(x) y = str(y) grid = [("V", "W", "X", "Y", "Z"), ("Q", "R", "S", "T", "U"), ("L", "M", "N", "O", "P"), ("F", "G", "H", "J", "K"), ("A", "B", "C", "D", "E")] if (len(x) > 6) | (len(y) > 6): return "Not in IRE" if len(x) < 6: easting_corr = '0' easting = x else: easting_corr = x[0] easting = x[1:] if len(y) < 6: northing_corr = '0' northing = y else: northing_corr = y[0] northing = y[1:] try: letter = grid[int(northing_corr)][int(easting_corr)] except: return "Not in IRE" grid_ref = '%s %s %s' % (letter, easting, northing) return grid_ref def xy2latlon(x, y): transformer = Transformer.from_crs("epsg:29903", "epsg:4326") lat, lon = transformer.transform(x , y) lat, lon = round(lat,5), round(lon,5) return lat, lon def latlon2xy(lat, lon): transformer = Transformer.from_crs( "epsg:4326","epsg:29903") x, y = transformer.transform(lat, lon) x, y = int(x), int(y) return x, y app = dash.Dash(__name__, url_base_pathname="/grid2latlon/", meta_tags=[{"name": "viewport", "content": "width=device-width, initial-scale=1"}], external_stylesheets=[dbc.themes.BOOTSTRAP, ], prevent_initial_callbacks=True, suppress_callback_exceptions=True) server = app.server app.title = 'Irish Grid to Lat Lon' @app.callback(Output("location-text", "children"), [Input(MAP_ID, "location_lat_lon_acc")]) def update_location(location): return "You are within {} meters of (lat,lon) = ({},{})".format(location[2], location[0], location[1]) @app.callback(Output("grid_ref_input_table", "style_table"), Output("xy_input_table", "style_table"), Output("latlon_input_table", "style_table"), Input("inputSelector", "value")) def change_input_table(table): if table == "xy": return {'display': 'none'}, {'display': 'block'}, {'display': 'none'} elif table == "latlon": return {'display': 'none'}, {'display': 'none'}, {'display': 'block'} return {'display': 'block'}, {'display': 'none'}, {'display': 'none'} @app.callback(Output('latlon_input_table', 'data'), Input('latlon_input_table', 'data'), Input(MAP_ID, 'click_lat_lng'), Input('xy_input_table', 'data'), ) def update_on_click(latlonRows,click_lat_lon,xyRows): ctx = dash.callback_context triggeredID=ctx.triggered[0]['prop_id'].split('.')[0] if triggeredID == MAP_ID: for row in latlonRows: if row == {}: row['lat']=round(click_lat_lon[0],5) row['lon']=round(click_lat_lon[1],5) return latlonRows elif triggeredID == 'xy_input_table': latlonRows=[] for row in xyRows: try: lat, lon = xy2latlon(row['x'], row['y']) latlonRows.append({'lat':lat,'lon':lon}) except: latlonRows.append({'lat':"",'lon':""}) return latlonRows return latlonRows @app.callback(Output('xy_input_table', 'data'), Input('grid_ref_input_table', 'data'), ) def update_on_click(gridRefRows): xyRows = [] for row in gridRefRows: try: x, y = irishgrid2xy(row['grid_ref']) xyRows.append({'x':x,'y':y}) except: xyRows.append({'x':"",'y':""}) return xyRows @app.callback( Output('output_table', 'data'), Input('latlon_input_table', 'data')) def update_latlon(rows): for row in rows: try: lat, lon = row['lat'], row['lon'] x, y = latlon2xy(lat, lon) row['x'], row['y'] = x, y row['grid_ref'] = xy2irishgrid(x, y) except: "fail" return rows @app.callback( Output('markers', 'children'), Input('output_table', 'data')) def update_output(rows): markers_list=[] i=0 for row in rows: try: marker = dl.Marker(position=[row['lat'], row['lon']], children=dl.Tooltip( [html.B("Marker {}".format(i)), html.Br(), "Grid Ref: {}".format(row['grid_ref']), html.Br(), "Lat: {:.2f} \u00b0".format(row['lat']), html.Br(), "Lon: {:.2f} \u00b0".format(row['lon']), html.Br(), "X: {}".format(row['x']), html.Br(), "Y: {}".format(row['y']), html.Br(), ])) markers_list.append(marker) i+=1 except: "fail" return markers_list # Create layout. app.layout = html.Div(children=[ html.Div(id='page-content', children=index.create_layout(app)), ]) if __name__ == '__main__': app.run_server(debug=True)
[ "dash.Dash", "dash_html_components.Br", "index.create_layout", "dash.dependencies.Input", "pyproj.Transformer.from_crs", "dash.dependencies.Output" ]
[((2594, 2857), 'dash.Dash', 'dash.Dash', (['__name__'], {'url_base_pathname': '"""/grid2latlon/"""', 'meta_tags': "[{'name': 'viewport', 'content': 'width=device-width, initial-scale=1'}]", 'external_stylesheets': '[dbc.themes.BOOTSTRAP]', 'prevent_initial_callbacks': '(True)', 'suppress_callback_exceptions': '(True)'}), "(__name__, url_base_pathname='/grid2latlon/', meta_tags=[{'name':\n 'viewport', 'content': 'width=device-width, initial-scale=1'}],\n external_stylesheets=[dbc.themes.BOOTSTRAP], prevent_initial_callbacks=\n True, suppress_callback_exceptions=True)\n", (2603, 2857), False, 'import dash\n'), ((2256, 2303), 'pyproj.Transformer.from_crs', 'Transformer.from_crs', (['"""epsg:29903"""', '"""epsg:4326"""'], {}), "('epsg:29903', 'epsg:4326')\n", (2276, 2303), False, 'from pyproj import Transformer\n'), ((2454, 2501), 'pyproj.Transformer.from_crs', 'Transformer.from_crs', (['"""epsg:4326"""', '"""epsg:29903"""'], {}), "('epsg:4326', 'epsg:29903')\n", (2474, 2501), False, 'from pyproj import Transformer\n'), ((3031, 3066), 'dash.dependencies.Output', 'Output', (['"""location-text"""', '"""children"""'], {}), "('location-text', 'children')\n", (3037, 3066), False, 'from dash.dependencies import Output, Input, State\n'), ((3262, 3307), 'dash.dependencies.Output', 'Output', (['"""grid_ref_input_table"""', '"""style_table"""'], {}), "('grid_ref_input_table', 'style_table')\n", (3268, 3307), False, 'from dash.dependencies import Output, Input, State\n'), ((3323, 3362), 'dash.dependencies.Output', 'Output', (['"""xy_input_table"""', '"""style_table"""'], {}), "('xy_input_table', 'style_table')\n", (3329, 3362), False, 'from dash.dependencies import Output, Input, State\n'), ((3379, 3422), 'dash.dependencies.Output', 'Output', (['"""latlon_input_table"""', '"""style_table"""'], {}), "('latlon_input_table', 'style_table')\n", (3385, 3422), False, 'from dash.dependencies import Output, Input, State\n'), ((3439, 3470), 'dash.dependencies.Input', 'Input', (['"""inputSelector"""', '"""value"""'], {}), "('inputSelector', 'value')\n", (3444, 3470), False, 'from dash.dependencies import Output, Input, State\n'), ((3798, 3834), 'dash.dependencies.Output', 'Output', (['"""latlon_input_table"""', '"""data"""'], {}), "('latlon_input_table', 'data')\n", (3804, 3834), False, 'from dash.dependencies import Output, Input, State\n'), ((3850, 3885), 'dash.dependencies.Input', 'Input', (['"""latlon_input_table"""', '"""data"""'], {}), "('latlon_input_table', 'data')\n", (3855, 3885), False, 'from dash.dependencies import Output, Input, State\n'), ((3901, 3931), 'dash.dependencies.Input', 'Input', (['MAP_ID', '"""click_lat_lng"""'], {}), "(MAP_ID, 'click_lat_lng')\n", (3906, 3931), False, 'from dash.dependencies import Output, Input, State\n'), ((3947, 3978), 'dash.dependencies.Input', 'Input', (['"""xy_input_table"""', '"""data"""'], {}), "('xy_input_table', 'data')\n", (3952, 3978), False, 'from dash.dependencies import Output, Input, State\n'), ((4728, 4760), 'dash.dependencies.Output', 'Output', (['"""xy_input_table"""', '"""data"""'], {}), "('xy_input_table', 'data')\n", (4734, 4760), False, 'from dash.dependencies import Output, Input, State\n'), ((4776, 4813), 'dash.dependencies.Input', 'Input', (['"""grid_ref_input_table"""', '"""data"""'], {}), "('grid_ref_input_table', 'data')\n", (4781, 4813), False, 'from dash.dependencies import Output, Input, State\n'), ((5114, 5144), 'dash.dependencies.Output', 'Output', (['"""output_table"""', '"""data"""'], {}), "('output_table', 'data')\n", (5120, 5144), False, 'from dash.dependencies import Output, Input, State\n'), ((5150, 5185), 'dash.dependencies.Input', 'Input', (['"""latlon_input_table"""', '"""data"""'], {}), "('latlon_input_table', 'data')\n", (5155, 5185), False, 'from dash.dependencies import Output, Input, State\n'), ((5489, 5518), 'dash.dependencies.Output', 'Output', (['"""markers"""', '"""children"""'], {}), "('markers', 'children')\n", (5495, 5518), False, 'from dash.dependencies import Output, Input, State\n'), ((5524, 5553), 'dash.dependencies.Input', 'Input', (['"""output_table"""', '"""data"""'], {}), "('output_table', 'data')\n", (5529, 5553), False, 'from dash.dependencies import Output, Input, State\n'), ((3069, 3106), 'dash.dependencies.Input', 'Input', (['MAP_ID', '"""location_lat_lon_acc"""'], {}), "(MAP_ID, 'location_lat_lon_acc')\n", (3074, 3106), False, 'from dash.dependencies import Output, Input, State\n'), ((6749, 6773), 'index.create_layout', 'index.create_layout', (['app'], {}), '(app)\n', (6768, 6773), False, 'import index\n'), ((5874, 5883), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (5881, 5883), True, 'import dash_html_components as html\n'), ((5997, 6006), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (6004, 6006), True, 'import dash_html_components as html\n'), ((6121, 6130), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (6128, 6130), True, 'import dash_html_components as html\n'), ((6246, 6255), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (6253, 6255), True, 'import dash_html_components as html\n'), ((6355, 6364), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (6362, 6364), True, 'import dash_html_components as html\n'), ((6465, 6474), 'dash_html_components.Br', 'html.Br', ([], {}), '()\n', (6472, 6474), True, 'import dash_html_components as html\n')]
import numpy as numpy a = numpy.array([5,2,6,2,7,5,6,8,2,9]) print ('First array:') print(a) print('\n') print('Unique values of first array:') u = numpy.unique(a) print(u) print('\n') print('Unique array and indices array:') u, indices = numpy.unique(a, return_index = True) print (indices) print('\n') print('We can see each number corresponds to index in original array:') print(a) print('\n') print('Indices of unique array:') u, indices = numpy.unique(a, return_inverse = True) print (u) print ('\n') print('Indices are:') print(indices) print('\n') print('Reconstruct the original array using indices') print (u[indices]) print ('\n') print('Return the count repetitions of unique elements:') u, indices = numpy.unique(a, return_counts = True) print(u) print(indices)
[ "numpy.array", "numpy.unique" ]
[((26, 69), 'numpy.array', 'numpy.array', (['[5, 2, 6, 2, 7, 5, 6, 8, 2, 9]'], {}), '([5, 2, 6, 2, 7, 5, 6, 8, 2, 9])\n', (37, 69), True, 'import numpy as numpy\n'), ((150, 165), 'numpy.unique', 'numpy.unique', (['a'], {}), '(a)\n', (162, 165), True, 'import numpy as numpy\n'), ((242, 276), 'numpy.unique', 'numpy.unique', (['a'], {'return_index': '(True)'}), '(a, return_index=True)\n', (254, 276), True, 'import numpy as numpy\n'), ((449, 485), 'numpy.unique', 'numpy.unique', (['a'], {'return_inverse': '(True)'}), '(a, return_inverse=True)\n', (461, 485), True, 'import numpy as numpy\n'), ((720, 755), 'numpy.unique', 'numpy.unique', (['a'], {'return_counts': '(True)'}), '(a, return_counts=True)\n', (732, 755), True, 'import numpy as numpy\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- import traceback import logging class MyErr(Exception): pass def division(s): n = int(s) if n == 0: raise MyErr('Divisor cannot be %s' % s) return 10 / n def func(x): try: division(x) except MyErr as me: print(me) traceback.print_exc() print('traceback') finally: print('End func()') func('0') print('-' * 40) def func2(x): try: division(x) except MyErr as me: print(me) logging.exception(me) print('logging...') finally: print('End func2()') func2('0') print('END')
[ "logging.exception", "traceback.print_exc" ]
[((323, 344), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (342, 344), False, 'import traceback\n'), ((537, 558), 'logging.exception', 'logging.exception', (['me'], {}), '(me)\n', (554, 558), False, 'import logging\n')]
import keras from kfp import components chicago_taxi_dataset_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/e3337b8bdcd63636934954e592d4b32c95b49129/components/datasets/Chicago%20Taxi/component.yaml') pandas_transform_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml') keras_train_classifier_from_csv_op = components.load_component_from_url('https://raw.githubusercontent.com/kubeflow/pipelines/f6aabf7f10b1f545f1fd5079aa8071845224f8e7/components/keras/Train_classifier/from_CSV/component.yaml') number_of_classes = 2 # Creating the network dense_network_with_sigmoid = keras.Sequential(layers=[ keras.layers.Dense(10, activation=keras.activations.sigmoid), keras.layers.Dense(number_of_classes, activation=keras.activations.sigmoid), ]) def keras_classifier_pipeline(): training_data_in_csv = chicago_taxi_dataset_op( where='trip_start_timestamp >= "2019-01-01" AND trip_start_timestamp < "2019-02-01"', select='tips,trip_seconds,trip_miles,pickup_community_area,dropoff_community_area,fare,tolls,extras,trip_total', limit=1000, ).output training_data_for_classification_in_csv = pandas_transform_csv_op( table=training_data_in_csv, transform_code='''df.insert(0, "was_tipped", df["tips"] > 0); del df["tips"]; df = df.fillna(0)''', ).output features_in_csv = pandas_transform_csv_op( table=training_data_for_classification_in_csv, transform_code='''df = df.drop(columns=["was_tipped"])''', ).output labels_in_csv = pandas_transform_csv_op( table=training_data_for_classification_in_csv, transform_code='''df = df["was_tipped"] * 1''', ).output keras_train_classifier_from_csv_op( training_features=features_in_csv, training_labels=labels_in_csv, network_json=dense_network_with_sigmoid.to_json(), learning_rate=0.1, num_epochs=100, ) if __name__ == '__main__': kfp_endpoint = None kfp.Client(host=kfp_endpoint).create_run_from_pipeline_func(keras_classifier_pipeline, arguments={})
[ "keras.layers.Dense", "kfp.components.load_component_from_url" ]
[((68, 259), 'kfp.components.load_component_from_url', 'components.load_component_from_url', (['"""https://raw.githubusercontent.com/kubeflow/pipelines/e3337b8bdcd63636934954e592d4b32c95b49129/components/datasets/Chicago%20Taxi/component.yaml"""'], {}), "(\n 'https://raw.githubusercontent.com/kubeflow/pipelines/e3337b8bdcd63636934954e592d4b32c95b49129/components/datasets/Chicago%20Taxi/component.yaml'\n )\n", (102, 259), False, 'from kfp import components\n'), ((276, 484), 'kfp.components.load_component_from_url', 'components.load_component_from_url', (['"""https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml"""'], {}), "(\n 'https://raw.githubusercontent.com/kubeflow/pipelines/6162d55998b176b50267d351241100bb0ee715bc/components/pandas/Transform_DataFrame/in_CSV_format/component.yaml'\n )\n", (310, 484), False, 'from kfp import components\n'), ((512, 711), 'kfp.components.load_component_from_url', 'components.load_component_from_url', (['"""https://raw.githubusercontent.com/kubeflow/pipelines/f6aabf7f10b1f545f1fd5079aa8071845224f8e7/components/keras/Train_classifier/from_CSV/component.yaml"""'], {}), "(\n 'https://raw.githubusercontent.com/kubeflow/pipelines/f6aabf7f10b1f545f1fd5079aa8071845224f8e7/components/keras/Train_classifier/from_CSV/component.yaml'\n )\n", (546, 711), False, 'from kfp import components\n'), ((808, 868), 'keras.layers.Dense', 'keras.layers.Dense', (['(10)'], {'activation': 'keras.activations.sigmoid'}), '(10, activation=keras.activations.sigmoid)\n', (826, 868), False, 'import keras\n'), ((874, 949), 'keras.layers.Dense', 'keras.layers.Dense', (['number_of_classes'], {'activation': 'keras.activations.sigmoid'}), '(number_of_classes, activation=keras.activations.sigmoid)\n', (892, 949), False, 'import keras\n')]
# -*- coding: utf-8 -*- """Add PED file content to VCF file header. Usage:: $ snappy-ped_to_vcf_header --ped-file PED --output TXT """ from __future__ import print_function import argparse from collections import OrderedDict, defaultdict, namedtuple import os import re import sys __author__ = "<NAME> <<EMAIL>>" #: Translation scheme for PED attributes (sex, disease) to text PED_TRANSLATE = OrderedDict( [ ("Sex", OrderedDict([("0", "Unknown"), ("1", "Male"), ("2", "Female")])), ("Disease", OrderedDict([("0", "Unknown"), ("1", "Unaffected"), ("2", "Affected")])), ] ) #: Template VCF header string for PED attributes (sex, disease) TPL_META = "##META=<ID={id},Type=String,Number=1,Values={values}>" #: Template VCF header string for samples in PED TPL_SAMPLE = "##SAMPLE=<ID={id},Sex={sex},Disease={disease}>" #: Template VCF header string for pedigree structure TPL_PEDIGREE = "##PEDIGREE=<ID={id},Family={family},Father={father},Mother={mother}>" #: Donor representation Donor = namedtuple("Donor", ["family", "id", "father", "mother", "sex", "disease"]) def parse_ped(ped_file): """Parse a given PED file and yield each line as a Donor.""" for line in ped_file.readlines(): line = re.split("\s+", line.rstrip())[:6] if line[0].startswith("#"): continue if not len(line) == 6: raise Exception("PED file not complete.") yield Donor(*line) def ped_vcf_header(donors): """Return VCF header string given donors.""" snippet = [] families = defaultdict(list) for key, value in PED_TRANSLATE.items(): snippet.append(TPL_META.format(id=key, values="[{}]".format(", ".join(value.values())))) for donor in donors: families[donor.family].append(donor) snippet.append( TPL_SAMPLE.format( id=donor.id, sex=PED_TRANSLATE["Sex"][donor.sex], disease=PED_TRANSLATE["Disease"][donor.disease], ) ) for _, members in families.items(): for member in members: if len(members) == 1 or not member.father == "0" or not member.mother == "0": snippet.append( TPL_PEDIGREE.format( id=member.id, family=member.family, father=member.father, mother=member.mother, ) ) return "\n".join(snippet) def write_header_snippet(header, output): """Open a text file (create folders if necessary) and write content.""" if os.path.dirname(output) and not os.path.exists(os.path.dirname(output)): os.makedirs(os.path.dirname(output)) with open(output, "wt") as fh: fh.write("{}\n".format(header)) def run(args): """Program entry point after parsing the command line.""" donors = parse_ped(args.ped_file) header = ped_vcf_header(donors) write_header_snippet(header, args.output) def main(argv=None): """Program entry point for parsing the command line.""" parser = argparse.ArgumentParser( description=("Parse PED file and transform pedigree information into" "VCF header format") ) parser.add_argument( "--ped-file", type=argparse.FileType("rt"), required=True, help="PED file that contains the pedigree information", ) parser.add_argument( "--output", type=str, required=True, help="File with PED information as VCF header snippet" ) args = parser.parse_args(argv) run(args) if __name__ == "__main__": sys.exit(main())
[ "argparse.ArgumentParser", "os.path.dirname", "collections.defaultdict", "collections.namedtuple", "collections.OrderedDict", "argparse.FileType" ]
[((1022, 1097), 'collections.namedtuple', 'namedtuple', (['"""Donor"""', "['family', 'id', 'father', 'mother', 'sex', 'disease']"], {}), "('Donor', ['family', 'id', 'father', 'mother', 'sex', 'disease'])\n", (1032, 1097), False, 'from collections import OrderedDict, defaultdict, namedtuple\n'), ((1561, 1578), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1572, 1578), False, 'from collections import OrderedDict, defaultdict, namedtuple\n'), ((3117, 3232), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Parse PED file and transform pedigree information intoVCF header format"""'}), "(description=\n 'Parse PED file and transform pedigree information intoVCF header format')\n", (3140, 3232), False, 'import argparse\n'), ((2628, 2651), 'os.path.dirname', 'os.path.dirname', (['output'], {}), '(output)\n', (2643, 2651), False, 'import os\n'), ((439, 502), 'collections.OrderedDict', 'OrderedDict', (["[('0', 'Unknown'), ('1', 'Male'), ('2', 'Female')]"], {}), "([('0', 'Unknown'), ('1', 'Male'), ('2', 'Female')])\n", (450, 502), False, 'from collections import OrderedDict, defaultdict, namedtuple\n'), ((525, 596), 'collections.OrderedDict', 'OrderedDict', (["[('0', 'Unknown'), ('1', 'Unaffected'), ('2', 'Affected')]"], {}), "([('0', 'Unknown'), ('1', 'Unaffected'), ('2', 'Affected')])\n", (536, 596), False, 'from collections import OrderedDict, defaultdict, namedtuple\n'), ((2721, 2744), 'os.path.dirname', 'os.path.dirname', (['output'], {}), '(output)\n', (2736, 2744), False, 'import os\n'), ((3307, 3330), 'argparse.FileType', 'argparse.FileType', (['"""rt"""'], {}), "('rt')\n", (3324, 3330), False, 'import argparse\n'), ((2675, 2698), 'os.path.dirname', 'os.path.dirname', (['output'], {}), '(output)\n', (2690, 2698), False, 'import os\n')]
from pkgutil import iter_modules EXTENSIONS = frozenset( extension.name for extension in iter_modules(("xythrion/extensions",), "xythrion.extensions.") )
[ "pkgutil.iter_modules" ]
[((94, 156), 'pkgutil.iter_modules', 'iter_modules', (["('xythrion/extensions',)", '"""xythrion.extensions."""'], {}), "(('xythrion/extensions',), 'xythrion.extensions.')\n", (106, 156), False, 'from pkgutil import iter_modules\n')]
#!/usr/bin/env python3 ''' Query the VDB ''' import argparse import logging import sys import pprint import pandas as pd import orjson as json from biograph.internal import vdb def parse_args(clargs): ''' biograph vdb query args ''' parser = argparse.ArgumentParser( description='Query the Spiral Variant DataBase (VDB)' ) if not clargs: clargs.append('--help') parser.add_argument('sample', type=str, nargs='?', help='Match this sample or annotation name') parser.add_argument('-a', '--aid', type=str, help='Match this analysis id') parser.add_argument('-g', '--group', help='Use this VDB group', default=None) parser.add_argument('--all', action='store_true', help='List every available analysis') parser.add_argument('-v', '--verbose', action='store_true', help='Show full details for each analysis') parser.add_argument('--annotation', action='store_true', help='Show a list of all available annotations') parser.add_argument('--lookup', type=str, help='Look up annotations with this VARID') parser.add_argument('--debug', action='store_true', help=argparse.SUPPRESS) return parser.parse_args(clargs) def list_entries(entries, verbose, annotation): ''' Pretty print query results ''' if verbose: for entry in entries: if annotation: aid, sample, refname, build, imported_on, description, version, refhash, header = entry else: aid, sample, refname, build, imported_on, description, refhash, header = entry print(f""" name: {sample} aid: {aid} build: {build} refname: {refname} refhash: {refhash} imported_on: {imported_on}""" ) if not pd.isna(description): print(f'description: {description}') if annotation: print(f'version: {version}') print() for line in header.split('\n'): if line.lower().startswith( ( '##filter', '##filedate', '##reference', '##info', '##format', '##alt', '##contig', '##refhash', '##fileformat', '#chrom', '##sequence-region', '##gff-version', ) ): continue if line.lower().startswith('##source='): subs = line.split(',') print(subs[0][2:]) for sub in subs[1:]: print(' ', sub) else: print(line[2:]) print('-=' * 10) return # not verbose if annotation: print(f"{'name':<16} {'version':<12} {'build':<7} {'analysis_id':<36} {'imported_on':<24} description") else: print(f"{'name':<16} {'refname':<10} {'build':<7} {'analysis_id':<36} {'imported_on':<24} description") for entry in entries: if annotation: aid, sample, _, build, imported_on, description, version = entry print(f"{sample:<16} {version:<12} {build:<7} {aid:<36} {imported_on.ctime():<24} {'' if pd.isna(description) else description}") else: aid, sample, refname, build, imported_on, description = entry print(f"{sample:<16} {refname:10} {build:<7} {aid:<36} {imported_on.ctime():<24} {'' if pd.isna(description) else description}") def lookup(db, varid_lookup): ''' look up an annotation ''' urls = { 'ClinVar': 'https://www.ncbi.nlm.nih.gov/clinvar/variation/{varid}/', 'ensGene': 'https://uswest.ensembl.org/Homo_sapiens/Gene/Summary?db=core;g={varid}', 'Ensembl': 'https://uswest.ensembl.org/Homo_sapiens/Gene/Summary?db=core;g={varid}', 'knownGene': 'https://uswest.ensembl.org/Homo_sapiens/Gene/Summary?db=core;g={varid}', 'GCF_000001405.39': 'https://www.ncbi.nlm.nih.gov/search/all/?term={varid}', 'ncbiRefSeq': 'https://www.ncbi.nlm.nih.gov/search/all/?term={varid}', 'GeneCards': 'https://www.genecards.org/cgi-bin/carddisp.pl?{varid}', 'UniProt': 'https://www.uniprot.org/uniprot/{varid}', 'OMIM': 'https://omim.org/entry/{varid}', } pp = pprint.PrettyPrinter(indent=4) query = f''' SELECT name, chrom, pos, varid, build, t_info FROM {db.path.annotation.data_table} WHERE varid = '{varid_lookup}' ''' for (name, chrom, pos, varid, build, t_info) in db.query(query): if name in urls: url = urls[name].format(varid=varid) else: url = '' print(f"{name}:{varid} at {chrom}:{pos} on {build} {url}") pp.pprint(json.loads(t_info)) print('') def main(clargs): ''' the main event ''' args = parse_args(clargs) logLevel = logging.DEBUG if args.debug else logging.WARNING logging.basicConfig(stream=sys.stderr, level=logLevel, format='%(message)s') db = vdb.connect(group=args.group) if args.lookup: lookup(db, args.lookup) exit(0) if args.annotation: table = db.path.annotation.meta_table sample_field = 'name' ready = db.get_crawler_state(db.path.annotation.crawler) == 'READY' else: table = db.path.vcf.meta_table sample_field = 'sample' ready = db.get_crawler_state(db.path.vcf.crawler) == 'READY' if not ready: logging.warning(f"NOTE: The crawler is currently running. Some data may not yet be indexed.") if not db.query(f"SHOW TABLES LIKE '{table}'"): raise SystemExit(f"{db.group} is empty. Run 'biograph vdb import --group {db.group}' to import VCF data,\nor 'biograph vdb group --crawl {db.group}' to update the index.") if not args.annotation: logging.warning(f"vdb group '{db.group}' {'(frozen)' if db.is_frozen() else ''}") query_filters = [] if not args.all: if args.aid: query_filters.append(f''' AND aid = '{args.aid}' ''') if args.sample: if args.annotation: query_filters.append(f"AND name = '{args.sample}'") else: query_filters.append(f"AND sample = '{args.sample}'") query_filter = '\n'.join(query_filters) fields = [ 'aid', sample_field, 'refname', 'build', 'imported_on', 'description' ] if args.annotation: fields.append('version') if args.verbose: fields.extend(['refhash', 'header']) query = f''' SELECT {','.join(fields)} FROM {table} WHERE 1=1 {query_filter} ORDER BY {sample_field} ASC, imported_on DESC ; ''' logging.debug(query) list_entries(db.query(query, cache=False), args.verbose, args.annotation) if __name__ == '__main__': main(sys.argv[1:])
[ "logging.debug", "argparse.ArgumentParser", "logging.basicConfig", "pandas.isna", "logging.warning", "orjson.loads", "pprint.PrettyPrinter", "biograph.internal.vdb.connect" ]
[((252, 330), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Query the Spiral Variant DataBase (VDB)"""'}), "(description='Query the Spiral Variant DataBase (VDB)')\n", (275, 330), False, 'import argparse\n'), ((4506, 4536), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (4526, 4536), False, 'import pprint\n'), ((5146, 5222), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stderr', 'level': 'logLevel', 'format': '"""%(message)s"""'}), "(stream=sys.stderr, level=logLevel, format='%(message)s')\n", (5165, 5222), False, 'import logging\n'), ((5233, 5262), 'biograph.internal.vdb.connect', 'vdb.connect', ([], {'group': 'args.group'}), '(group=args.group)\n', (5244, 5262), False, 'from biograph.internal import vdb\n'), ((7005, 7025), 'logging.debug', 'logging.debug', (['query'], {}), '(query)\n', (7018, 7025), False, 'import logging\n'), ((5686, 5789), 'logging.warning', 'logging.warning', (['f"""NOTE: The crawler is currently running. Some data may not yet be indexed."""'], {}), "(\n f'NOTE: The crawler is currently running. Some data may not yet be indexed.'\n )\n", (5701, 5789), False, 'import logging\n'), ((4963, 4981), 'orjson.loads', 'json.loads', (['t_info'], {}), '(t_info)\n', (4973, 4981), True, 'import orjson as json\n'), ((1729, 1749), 'pandas.isna', 'pd.isna', (['description'], {}), '(description)\n', (1736, 1749), True, 'import pandas as pd\n'), ((3425, 3445), 'pandas.isna', 'pd.isna', (['description'], {}), '(description)\n', (3432, 3445), True, 'import pandas as pd\n'), ((3659, 3679), 'pandas.isna', 'pd.isna', (['description'], {}), '(description)\n', (3666, 3679), True, 'import pandas as pd\n')]
import logging import environ from .base import * # noqa _env = environ.Env() logging.disable(logging.DEBUG) # # invariants # DEBUG = False CSRF_COOKIE_SECURE = True SESSION_COOKIE_SECURE = True # # pulled from environment # SECRET_KEY = _env('SECRET_KEY') ALLOWED_HOSTS = ('.herokuapp.com') _additional_hosts = _env('ADDITIONAL_ALLOWED_HOSTS', default='') if _additional_hosts: ALLOWED_HOSTS += _additional_hosts.split(',')
[ "logging.disable", "environ.Env" ]
[((69, 82), 'environ.Env', 'environ.Env', ([], {}), '()\n', (80, 82), False, 'import environ\n'), ((84, 114), 'logging.disable', 'logging.disable', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (99, 114), False, 'import logging\n')]
import logging from plumbum import CommandNotFound, local from changes import shell log = logging.getLogger(__name__) def get_test_runner(): test_runners = ['tox', 'nosetests', 'py.test'] test_runner = None for runner in test_runners: try: test_runner = local[runner] except CommandNotFound: continue return test_runner def run_tests(): """Executes your tests.""" test_runner = get_test_runner() if test_runner: result = test_runner() log.info('Test execution returned:\n%s' % result) return result else: log.info('No test runner found') return None def run_test_command(context): if context.test_command: result = shell.dry_run(context.test_command, context.dry_run) log.info('Test command "%s", returned %s', context.test_command, result) return True
[ "changes.shell.dry_run", "logging.getLogger" ]
[((93, 120), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (110, 120), False, 'import logging\n'), ((745, 797), 'changes.shell.dry_run', 'shell.dry_run', (['context.test_command', 'context.dry_run'], {}), '(context.test_command, context.dry_run)\n', (758, 797), False, 'from changes import shell\n')]
import sqlite3 from sqlite3 import Error def create_connection(): """ create a database connection to the SQLite database specified by the db_file :return: Connection object or None """ try: conn = sqlite3.connect(":memory:") return conn except Error as e: print(e) return None def create_reads_table(conn, key="sequence"): """ create a table on the SQLite database :param conn: connection of database :return: Connection object or None """ c = conn.cursor() c.execute("CREATE TABLE reads" " (name text, sequence text," " chrom text, start int," " PRIMARY KEY(%s, chrom, start))" % key) conn.commit() def insert_row_in_reads_table(cur, fields): """ create a table on the SQLite database :param cur: connection of database :param fields: list with columns to fill table :return: Connection object or None """ # c = conn.cursor() cur.execute("INSERT INTO reads VALUES" " (\"%s\", \"%s\", \"%s\", %s)" % (fields[0], fields[1], fields[2], fields[3])) def select_all_reads(conn): """ Query all rows in the reads table :param conn: the Connection object :return: """ cur = conn.cursor() cur.execute("SELECT * FROM reads") rows = cur.fetchall() return rows
[ "sqlite3.connect" ]
[((232, 259), 'sqlite3.connect', 'sqlite3.connect', (['""":memory:"""'], {}), "(':memory:')\n", (247, 259), False, 'import sqlite3\n')]
#!/usr/bin/env python #coding:utf-8 """ Author: --<v1ll4n> Purpose: Provide some useful thread utils Created: 2016/10/29 """ import uuid import time import unittest try: from queue import Queue, Empty except: from Queue import Queue, Empty import threading from threading import Thread import inspect import traceback #---------------------------------------------------------------------- def start_thread(func, *args, **kwargs): """""" ret = Thread(target=func, args=args, kwargs=kwargs) ret.daemon = True ret.start() ######################################################################## class Contractor(object): """Create Multi-Thread to support the concurrence of many tasks""" #---------------------------------------------------------------------- def __init__(self, thread_max=50): """Constructor""" self.task_list = [] self.result_queue = Queue() self.lock = threading.Lock() self.thread_max = thread_max self._current_thread_count = 0 self._executed_task_count = 0 self._task_count = 0 def _uuid1_str(self): '''Returns: random UUID tag ''' return str(uuid.uuid1()) #---------------------------------------------------------------------- def feed(self, target_func, *vargs, **kwargs): """""" self.add_task(target_func, *vargs, **kwargs) def add_task(self, target_func, *args, **argv): '''Add task to Pool and wait to exec Params: target_func : A callable obj, the entity of the current task args : the args of [target_func] argv : the argv of [target_func] ''' assert callable(target_func), '[!] Function can \'t be called' ret = {} ret['func'] = target_func ret['args'] = args ret['argv'] = argv #ret['uuid'] = self.signal_name self._task_count = self._task_count + 1 self.task_list.append(ret) def start(self): """""" ret = Thread(target=self._run) ret.daemon = True ret.start() return self.result_queue #---------------------------------------------------------------------- def _run(self): """""" for i in self.task_list: #print self.current_thread_count while self.thread_max <= self._current_thread_count: time.sleep(0.3) self._start_task(i) def _start_task(self, task): """""" self._current_thread_count = self._current_thread_count + 1 try: ret = Thread(target=self._worker, args=(task,)) ret.daemon = True ret.start() except TypeError: self._current_thread_count = self._current_thread_count - 1 def _worker(self, dictobj): """""" func = dictobj['func'] args = dictobj['args'] argv = dictobj['argv'] try: result = func(*args, **argv) except Exception as e: #print 'ecp occured' result = tuple([e, traceback.extract_stack()]) self.lock.acquire() self._executed_task_count = self._executed_task_count + 1 self._add_result_to_queue(result=result) self.lock.release() def _add_result_to_queue(self, **kw): """""" assert 'result' in kw, '[!] Result Error!' self.result_queue.put(kw['result']) self._current_thread_count = self._current_thread_count - 1 #---------------------------------------------------------------------- def get_result_queue(self): """""" return self.result_queue #---------------------------------------------------------------------- def get_task_list(self): """""" self.task_list #---------------------------------------------------------------------- def get_result_generator(self): """""" while True: try: ret = self.result_queue.get(timeout=1) yield ret except Empty: if self._task_count == self._executed_task_count: break else: pass #---------------------------------------------------------------------- @property def task_count(self): """""" return self._task_count #---------------------------------------------------------------------- @property def executed_task_count(self): """""" return self._executed_task_count #---------------------------------------------------------------------- @property def percent(self): """""" return float(self._task_count)/float(self._executed_task_count) #---------------------------------------------------------------------- @property def current_thread_count(self): """""" return self._current_thread_count class UtilsTest(unittest.case.TestCase): def runTest(self): ms = inspect.getmembers(self) ms = [x[0] for x in ms] for i in ms: if callable(getattr(self,i)): if i.startswith('test_'): getattr(self, i)() def test_pool(self): def demo_task(*args): '''simulate the plugin.run''' print('[!] Computing!') time.sleep(args[0]) print('[!] Finished!') print() returns = 'Runtime Length : %s' % str(args) return returns pool = Contractor() pool.add_task(demo_task, 7) pool.add_task(demo_task, 3) q = pool.start() print(pool._current_thread_count) self.assertIsInstance(q, Queue) r = q.get() print(r) self.assertIsInstance(r, str) r = q.get() print(r) self.assertIsInstance(r, str) print(pool._current_thread_count) if __name__ == '__main__': unittest.main()
[ "unittest.main", "threading.Thread", "Queue.Queue", "time.sleep", "threading.Lock", "traceback.extract_stack", "uuid.uuid1", "inspect.getmembers" ]
[((493, 538), 'threading.Thread', 'Thread', ([], {'target': 'func', 'args': 'args', 'kwargs': 'kwargs'}), '(target=func, args=args, kwargs=kwargs)\n', (499, 538), False, 'from threading import Thread\n'), ((6315, 6330), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6328, 6330), False, 'import unittest\n'), ((966, 973), 'Queue.Queue', 'Queue', ([], {}), '()\n', (971, 973), False, 'from Queue import Queue, Empty\n'), ((997, 1013), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (1011, 1013), False, 'import threading\n'), ((2149, 2173), 'threading.Thread', 'Thread', ([], {'target': 'self._run'}), '(target=self._run)\n', (2155, 2173), False, 'from threading import Thread\n'), ((5338, 5362), 'inspect.getmembers', 'inspect.getmembers', (['self'], {}), '(self)\n', (5356, 5362), False, 'import inspect\n'), ((1263, 1275), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (1273, 1275), False, 'import uuid\n'), ((2750, 2791), 'threading.Thread', 'Thread', ([], {'target': 'self._worker', 'args': '(task,)'}), '(target=self._worker, args=(task,))\n', (2756, 2791), False, 'from threading import Thread\n'), ((5700, 5719), 'time.sleep', 'time.sleep', (['args[0]'], {}), '(args[0])\n', (5710, 5719), False, 'import time\n'), ((2537, 2552), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (2547, 2552), False, 'import time\n'), ((3251, 3276), 'traceback.extract_stack', 'traceback.extract_stack', ([], {}), '()\n', (3274, 3276), False, 'import traceback\n')]
# Generated by Django 2.2.2 on 2019-07-25 08:57 from django.db import migrations, models import gnosis.eth.django.models class Migration(migrations.Migration): dependencies = [ ('history', '0001_initial'), ] operations = [ migrations.AddField( model_name='multisigconfirmation', name='signature', field=gnosis.eth.django.models.HexField(max_length=500, null=True), ), migrations.AlterField( model_name='multisigconfirmation', name='block_date_time', field=models.DateTimeField(null=True), ), migrations.AlterField( model_name='multisigconfirmation', name='block_number', field=gnosis.eth.django.models.Uint256Field(null=True), ), migrations.AlterField( model_name='multisigconfirmation', name='transaction_hash', field=gnosis.eth.django.models.Sha3HashField(null=True), ), ]
[ "django.db.models.DateTimeField" ]
[((578, 609), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)'}), '(null=True)\n', (598, 609), False, 'from django.db import migrations, models\n')]
import logging __author__ = 'e.kolpakov' class Curriculum: def __init__(self): self._competency_index = {} self._fact_index = {} self._lesson_index = {} def register_competency(self, competency): """ Registers competency with curriculum. :param competency: Competency """ self._register(competency, self._competency_index) def register_fact(self, fact): """ Registers fact with curriculum :param fact: Fact :return: None """ self._register(fact, self._fact_index) def register_lesson(self, lesson): """ Registers lesson with curriculum :param Lesson lesson: Lesson to register :return: None """ self._register(lesson, self._lesson_index) def find_competency(self, competency_code): """ Finds competency by code :param competency_code: str :rtype: knowledge_representation.Competency """ return self._find(competency_code, self._competency_index) def find_fact(self, fact_code): """ Finds fact by code :param fact_code: str :rtype: knowledge_representation.Fact """ return self._find(fact_code, self._fact_index) def find_lesson(self, lesson_code): """ Finds lesson by code :param lesson_code: str :rtype: BaseLesson """ return self._find(lesson_code, self._lesson_index) def all_competencies(self): return self._competency_index.values() def all_facts(self): return self._fact_index.values() def all_lessons(self): return self._lesson_index.values() def find_lessons(self, lesson_type=None): filters = [] if lesson_type is not None: filters.append(lambda lesson: isinstance(lesson, lesson_type)) composite_filter = lambda lesson: all(subfilter(lesson) for subfilter in filters) return filter(composite_filter, self._lesson_index.values()) @staticmethod def _register(entity, index, message="{0} already registered", code_selector=None): code_selector = code_selector if code_selector else lambda x: x.code code = code_selector(entity) if code in index: message = message.format(entity) logging.getLogger(__name__).warn(message) raise ValueError(message) index[code] = entity @staticmethod def _find(code, index, default=None): """ :param str code: code to look up :param dict index: index to search :param object|None default: default value if object is not found :rtype: object """ return index.get(code, default)
[ "logging.getLogger" ]
[((2374, 2401), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2391, 2401), False, 'import logging\n')]
""" <NAME> 7-19-2019 Version 1.0 https://github.com/tasanuma714/Raspberry-Pi-Security-Camera-using-Google-Coral-USB-Accelerator ***Big Credit to Adrian at PyImageSearch for the base code of this file. https://www.pyimagesearch.com/2019/04/22/getting-started-with-google-corals-tpu-usb-accelerator/ https://www.pyimagesearch.com/2019/05/13/object-detection-and-image-classification-with-google-coral-usb-accelerator/ File Description: This is the main file which executes the object detection. Boxes will be formed around recognized the objects in the coco_labels.txt with a confidence level of over 0.3. Evey second, detected objects and the timestamp will be written onto surveillance.txt. If a pre-set object is detected, a text message via Pushetta will be sent and record.py is called to record and upload the vidoe to Google Drive. Every hour, log.py is called to upload the contents of surveillance.txt to Google Drive. """ # imports from edgetpu.detection.engine import DetectionEngine from imutils.video import VideoStream from PIL import Image import argparse import imutils import cv2 import time import datetime from datetime import datetime from time import strftime from pushetta import Pushetta from subprocess import call import sys # starting time start_time = time.time() # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-m", "--model", required=True, help="path to TensorFlow Lite object detection model") ap.add_argument("-l", "--labels", required=True, help="path to labels file") ap.add_argument("-c", "--confidence", type=float, default=0.3, help="minimum probability to filter weak detections") args = vars(ap.parse_args()) # initialize the labels dictionary print("[INFO] parsing class labels...") labels = {} # loop over the class labels file for row in open(args["labels"]): # unpack the row and update the labels dictionary (classID, label) = row.strip().split(maxsplit=1) labels[int(classID)] = label.strip() # load the Google Coral object detection model print("[INFO] loading Coral model...") model = DetectionEngine(args["model"]) # initialize the video stream and allow the camera sensor to warmup print("[INFO] starting video stream...") # vs = VideoStream(src=0).start() vs = VideoStream(usePiCamera=True).start() time.sleep(2.0) # initializing base variables sec = -1 prevLabel = [] API_KEY="<KEY>" CHANNEL_NAME="RaspiSecurityCamera" p=Pushetta(API_KEY) # loop over the frames from the video stream while True: # grab the frame from the threaded video stream and resize it # to have a maximum width of 500 pixels frame = vs.read() frame = imutils.resize(frame, width=1000) orig = frame.copy() # prepare the frame for object detection by converting (1) it # from BGR to RGB channel ordering and then (2) from a NumPy # array to PIL image format frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = Image.fromarray(frame) # make predictions on the input frame start = time.time() results = model.DetectWithImage(frame, threshold=args["confidence"], keep_aspect_ratio=True, relative_coord=False) end = time.time() # loop over the results for r in results: # extract the bounding box and box and predicted class label box = r.bounding_box.flatten().astype("int") (startX, startY, endX, endY) = box label = labels[r.label_id] # draw the bounding box and label on the image cv2.rectangle(orig, (startX, startY), (endX, endY), (0, 255, 0), 2) y = startY - 15 if startY - 15 > 15 else startY + 15 text = "{}: {:.2f}%".format(label, r.score * 100) cv2.putText(orig, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) # appending to surveillance.txt file1 = open("surveillance.txt", "a") # checking if one seccond passed if sec != time.strftime("%S"): target = False; # print timestamp print(time.ctime()) file1.write(time.ctime() + "\n") # prints every element in results list for r in results: print(labels[r.label_id], end=", ") file1.write(labels[r.label_id] + ", ") # sets pre-set target element if labels[r.label_id] == "cup": target = True print("/") file1.write("/" + "\n") # resets the second to compare sec = time.strftime("%S") # checks for target element if target: p.pushMessage(CHANNEL_NAME, "The cup has been detected") print("recording") file1.write("recording") vs.stop() call(["python3", "record.py"]) sys.exit() file1.close() # checks elapsed time to upload the contents of surveillance.txt elapsed_time = time.time() - start_time if(elapsed_time > 3600): start_time = time.time() print("logging") call(["python3", "log.py"]) # show the output frame and wait for a key press cv2.imshow("Frame", orig) key = cv2.waitKey(1) & 0xFF # if the `q` key was pressed, break from the loop if key == ord("q"): break # do a bit of cleanup cv2.destroyAllWindows() vs.stop()
[ "imutils.video.VideoStream", "cv2.putText", "argparse.ArgumentParser", "cv2.cvtColor", "edgetpu.detection.engine.DetectionEngine", "pushetta.Pushetta", "cv2.waitKey", "cv2.imshow", "time.strftime", "time.ctime", "time.sleep", "time.time", "PIL.Image.fromarray", "subprocess.call", "cv2.rectangle", "imutils.resize", "cv2.destroyAllWindows", "sys.exit" ]
[((1340, 1351), 'time.time', 'time.time', ([], {}), '()\n', (1349, 1351), False, 'import time\n'), ((1417, 1442), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1440, 1442), False, 'import argparse\n'), ((2183, 2213), 'edgetpu.detection.engine.DetectionEngine', 'DetectionEngine', (["args['model']"], {}), "(args['model'])\n", (2198, 2213), False, 'from edgetpu.detection.engine import DetectionEngine\n'), ((2407, 2422), 'time.sleep', 'time.sleep', (['(2.0)'], {}), '(2.0)\n', (2417, 2422), False, 'import time\n'), ((2538, 2555), 'pushetta.Pushetta', 'Pushetta', (['API_KEY'], {}), '(API_KEY)\n', (2546, 2555), False, 'from pushetta import Pushetta\n'), ((5182, 5205), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (5203, 5205), False, 'import cv2\n'), ((2753, 2786), 'imutils.resize', 'imutils.resize', (['frame'], {'width': '(1000)'}), '(frame, width=1000)\n', (2767, 2786), False, 'import imutils\n'), ((2978, 3016), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (2990, 3016), False, 'import cv2\n'), ((3027, 3049), 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {}), '(frame)\n', (3042, 3049), False, 'from PIL import Image\n'), ((3102, 3113), 'time.time', 'time.time', ([], {}), '()\n', (3111, 3113), False, 'import time\n'), ((3242, 3253), 'time.time', 'time.time', ([], {}), '()\n', (3251, 3253), False, 'import time\n'), ((5015, 5040), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'orig'], {}), "('Frame', orig)\n", (5025, 5040), False, 'import cv2\n'), ((2368, 2397), 'imutils.video.VideoStream', 'VideoStream', ([], {'usePiCamera': '(True)'}), '(usePiCamera=True)\n', (2379, 2397), False, 'from imutils.video import VideoStream\n'), ((3537, 3604), 'cv2.rectangle', 'cv2.rectangle', (['orig', '(startX, startY)', '(endX, endY)', '(0, 255, 0)', '(2)'], {}), '(orig, (startX, startY), (endX, endY), (0, 255, 0), 2)\n', (3550, 3604), False, 'import cv2\n'), ((3721, 3808), 'cv2.putText', 'cv2.putText', (['orig', 'text', '(startX, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 255, 0)', '(2)'], {}), '(orig, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255,\n 0), 2)\n', (3732, 3808), False, 'import cv2\n'), ((4828, 4839), 'time.time', 'time.time', ([], {}), '()\n', (4837, 4839), False, 'import time\n'), ((4897, 4908), 'time.time', 'time.time', ([], {}), '()\n', (4906, 4908), False, 'import time\n'), ((4932, 4959), 'subprocess.call', 'call', (["['python3', 'log.py']"], {}), "(['python3', 'log.py'])\n", (4936, 4959), False, 'from subprocess import call\n'), ((5049, 5063), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (5060, 5063), False, 'import cv2\n'), ((3940, 3959), 'time.strftime', 'time.strftime', (['"""%S"""'], {}), "('%S')\n", (3953, 3959), False, 'import time\n'), ((4443, 4462), 'time.strftime', 'time.strftime', (['"""%S"""'], {}), "('%S')\n", (4456, 4462), False, 'import time\n'), ((4022, 4034), 'time.ctime', 'time.ctime', ([], {}), '()\n', (4032, 4034), False, 'import time\n'), ((4670, 4700), 'subprocess.call', 'call', (["['python3', 'record.py']"], {}), "(['python3', 'record.py'])\n", (4674, 4700), False, 'from subprocess import call\n'), ((4706, 4716), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4714, 4716), False, 'import sys\n'), ((4064, 4076), 'time.ctime', 'time.ctime', ([], {}), '()\n', (4074, 4076), False, 'import time\n')]
import os import numpy import pandas from skimage import io def read_ids_from_csv(csv_file): """ Reads a column named 'ID' from csv_file. This function was created to make sure basic I/O works in unit testing. """ csv = pandas.read_csv(csv_file) return csv.ID def read_hpa_image(image_id, root_dir): """ Reads a four channel HPA cell image given by 'image_id' from 'root_dir' and returns it as a (H x W x 4) numpy array. """ root = os.path.join(root_dir, image_id) stems = ("_red.png", "_blue.png", "_yellow.png", "_green.png") paths = [root+stem for stem in stems] image = [io.imread(path) for path in paths] return numpy.dstack(image)
[ "pandas.read_csv", "os.path.join", "skimage.io.imread", "numpy.dstack" ]
[((242, 267), 'pandas.read_csv', 'pandas.read_csv', (['csv_file'], {}), '(csv_file)\n', (257, 267), False, 'import pandas\n'), ((480, 512), 'os.path.join', 'os.path.join', (['root_dir', 'image_id'], {}), '(root_dir, image_id)\n', (492, 512), False, 'import os\n'), ((681, 700), 'numpy.dstack', 'numpy.dstack', (['image'], {}), '(image)\n', (693, 700), False, 'import numpy\n'), ((635, 650), 'skimage.io.imread', 'io.imread', (['path'], {}), '(path)\n', (644, 650), False, 'from skimage import io\n')]
import pandas as pd cities_df = pd.read_csv("Resources/cities.csv") cities_df.to_html('Resources/cities.html', index=False)
[ "pandas.read_csv" ]
[((32, 67), 'pandas.read_csv', 'pd.read_csv', (['"""Resources/cities.csv"""'], {}), "('Resources/cities.csv')\n", (43, 67), True, 'import pandas as pd\n')]
# python3 # pylint: disable=g-bad-file-header # Copyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """Prior losses are losses that regulate towards the prior. These might take the form of weight regularization, or sampling "fake data". These prior_losses are used in e.g. supervised/prior_experiment.py. """ from absl import logging import dataclasses from enn import base from enn import utils import haiku as hk import jax import jax.numpy as jnp import typing_extensions class FakeInputGenerator(typing_extensions.Protocol): def __call__(self, batch: base.Batch, key: base.RngKey) -> base.Array: """Generates a fake batch of input=x for use in prior regularization.""" @dataclasses.dataclass class MatchingGaussianData(FakeInputGenerator): """Generates a fake batch of input=x for use in prior regularization.""" scale: float = 1. def __call__(self, batch: base.Batch, key: base.RngKey) -> base.Array: """Generates a fake batch of input=x for use in prior regularization.""" return jax.random.normal(key, batch.x.shape) * self.scale def make_gaussian_dataset(batch_size: int, input_dim: int, seed: int = 0) -> base.BatchIterator: """Returns a batch iterator over random Gaussian data.""" sample_fn = jax.jit(lambda x: jax.random.normal(x, [batch_size, input_dim])) def batch_iterator(): rng = hk.PRNGSequence(seed) while True: x = sample_fn(next(rng)) yield base.Batch(x, y=jnp.ones([x.shape[0], 1])) return batch_iterator() def variance_kl(var: base.Array, pred_log_var: base.Array) -> base.Array: """Compute the KL divergence between Gaussian variance with matched means.""" log_var = jnp.log(var) pred_var = jnp.exp(pred_log_var) return 0.5 * (pred_log_var - log_var + var / pred_var - 1) def generate_batched_forward_at_data( num_index_sample: int, x: base.Array, enn: base.EpistemicNetwork, params: hk.Params, key: base.RngKey) -> base.Output: """Generate enn output for batch of data with indices based on random key.""" batched_indexer = utils.make_batch_indexer(enn.indexer, num_index_sample) batched_forward = jax.vmap(enn.apply, in_axes=[None, None, 0]) batched_out = batched_forward(params, x, batched_indexer(key)) return batched_out def l2_training_penalty(batched_out: base.Output): """Penalize the L2 magnitude of the training network.""" if isinstance(batched_out, base.OutputWithPrior): return 0.5 * jnp.mean(jnp.square(batched_out.train)) else: logging.warning('L2 weight penalty only works for OutputWithPrior.') return 0. def distill_mean_regression(batched_out: base.Output, distill_out: base.Output) -> base.Array: """Train the mean of the regression to the distill network.""" observed_mean = jnp.mean(utils.parse_net_output(batched_out), axis=0) distill_mean = jnp.squeeze(utils.parse_net_output(distill_out)) return jnp.mean(jnp.square(distill_mean - observed_mean)) def distill_mean_classification(batched_out: base.Output, distill_out: base.Output) -> base.Array: """Train the mean of the classification to the distill network.""" batched_logits = utils.parse_net_output(batched_out) batched_probs = jax.nn.softmax(batched_logits, axis=-1) mean_probs = jnp.mean(batched_probs, axis=0) distill_probs = jax.nn.softmax(utils.parse_net_output(distill_out), axis=-1) return jnp.mean(jnp.sum( mean_probs * jnp.log(mean_probs / distill_probs), axis=1)) def distill_var_regression(batched_out: base.Output, distill_out: base.Output) -> base.Array: """Train the variance of the regression to the distill network.""" assert isinstance(distill_out, base.OutputWithPrior) observed_var = jnp.var(utils.parse_net_output(batched_out), axis=0) return jnp.mean(variance_kl(observed_var, distill_out.extra['log_var'])) def distill_var_classification(batched_out: base.Output, distill_out: base.Output) -> base.Array: """Train the variance of the classification to the distill network.""" assert isinstance(distill_out, base.OutputWithPrior) batched_logits = utils.parse_net_output(batched_out) observed_var = jnp.var(jax.nn.softmax(batched_logits, axis=-1)) return jnp.mean(variance_kl(observed_var, distill_out.extra['log_var'])) @dataclasses.dataclass class RegressionPriorLoss(base.LossFn): """Regress fake data back to prior, and distill mean/var to mean_index.""" num_index_sample: int input_generator: FakeInputGenerator = MatchingGaussianData() scale: float = 1. distill_index: bool = False def __call__(self, enn: base.EpistemicNetwork, params: hk.Params, batch: base.Batch, key: base.RngKey) -> base.Array: index_key, data_key = jax.random.split(key) fake_x = self.input_generator(batch, data_key) # TODO(author2): Complete prior loss refactor --> MultilossExperiment batched_out = generate_batched_forward_at_data( self.num_index_sample, fake_x, enn, params, index_key) # Regularize towards prior output loss = self.scale * l2_training_penalty(batched_out) # Distill aggregate stats to the "mean_index" if hasattr(enn.indexer, 'mean_index') and self.distill_index: distill_out = enn.apply(params, fake_x, enn.indexer.mean_index) loss += distill_mean_regression(batched_out, distill_out) loss += distill_var_regression(batched_out, distill_out) return loss, {} @dataclasses.dataclass class ClassificationPriorLoss(base.LossFn): """Penalize fake data back to prior, and distill mean/var to mean_index.""" num_index_sample: int input_generator: FakeInputGenerator = MatchingGaussianData() scale: float = 1. distill_index: bool = False def __call__(self, enn: base.EpistemicNetwork, params: hk.Params, batch: base.Batch, key: base.RngKey) -> base.Array: index_key, data_key = jax.random.split(key) fake_x = self.input_generator(batch, data_key) # TODO(author2): Complete prior loss refactor --> MultilossExperiment batched_out = generate_batched_forward_at_data( self.num_index_sample, fake_x, enn, params, index_key) # Regularize towards prior output loss = self.scale * l2_training_penalty(batched_out) # Distill aggregate stats to the "mean_index" if hasattr(enn.indexer, 'mean_index') and self.distill_index: distill_out = enn.apply(params, fake_x, enn.indexer.mean_index) loss += distill_mean_classification(batched_out, distill_out) loss += distill_var_classification(batched_out, distill_out) return loss, {}
[ "jax.vmap", "jax.numpy.log", "jax.numpy.exp", "jax.numpy.mean", "jax.numpy.square", "jax.random.split", "jax.random.normal", "absl.logging.warning", "haiku.PRNGSequence", "jax.numpy.ones", "jax.nn.softmax", "enn.utils.make_batch_indexer", "enn.utils.parse_net_output" ]
[((2349, 2361), 'jax.numpy.log', 'jnp.log', (['var'], {}), '(var)\n', (2356, 2361), True, 'import jax.numpy as jnp\n'), ((2375, 2396), 'jax.numpy.exp', 'jnp.exp', (['pred_log_var'], {}), '(pred_log_var)\n', (2382, 2396), True, 'import jax.numpy as jnp\n'), ((2737, 2792), 'enn.utils.make_batch_indexer', 'utils.make_batch_indexer', (['enn.indexer', 'num_index_sample'], {}), '(enn.indexer, num_index_sample)\n', (2761, 2792), False, 'from enn import utils\n'), ((2813, 2857), 'jax.vmap', 'jax.vmap', (['enn.apply'], {'in_axes': '[None, None, 0]'}), '(enn.apply, in_axes=[None, None, 0])\n', (2821, 2857), False, 'import jax\n'), ((3869, 3904), 'enn.utils.parse_net_output', 'utils.parse_net_output', (['batched_out'], {}), '(batched_out)\n', (3891, 3904), False, 'from enn import utils\n'), ((3923, 3962), 'jax.nn.softmax', 'jax.nn.softmax', (['batched_logits'], {'axis': '(-1)'}), '(batched_logits, axis=-1)\n', (3937, 3962), False, 'import jax\n'), ((3978, 4009), 'jax.numpy.mean', 'jnp.mean', (['batched_probs'], {'axis': '(0)'}), '(batched_probs, axis=0)\n', (3986, 4009), True, 'import jax.numpy as jnp\n'), ((4851, 4886), 'enn.utils.parse_net_output', 'utils.parse_net_output', (['batched_out'], {}), '(batched_out)\n', (4873, 4886), False, 'from enn import utils\n'), ((2031, 2052), 'haiku.PRNGSequence', 'hk.PRNGSequence', (['seed'], {}), '(seed)\n', (2046, 2052), True, 'import haiku as hk\n'), ((3177, 3245), 'absl.logging.warning', 'logging.warning', (['"""L2 weight penalty only works for OutputWithPrior."""'], {}), "('L2 weight penalty only works for OutputWithPrior.')\n", (3192, 3245), False, 'from absl import logging\n'), ((3477, 3512), 'enn.utils.parse_net_output', 'utils.parse_net_output', (['batched_out'], {}), '(batched_out)\n', (3499, 3512), False, 'from enn import utils\n'), ((3551, 3586), 'enn.utils.parse_net_output', 'utils.parse_net_output', (['distill_out'], {}), '(distill_out)\n', (3573, 3586), False, 'from enn import utils\n'), ((3606, 3646), 'jax.numpy.square', 'jnp.square', (['(distill_mean - observed_mean)'], {}), '(distill_mean - observed_mean)\n', (3616, 3646), True, 'import jax.numpy as jnp\n'), ((4043, 4078), 'enn.utils.parse_net_output', 'utils.parse_net_output', (['distill_out'], {}), '(distill_out)\n', (4065, 4078), False, 'from enn import utils\n'), ((4453, 4488), 'enn.utils.parse_net_output', 'utils.parse_net_output', (['batched_out'], {}), '(batched_out)\n', (4475, 4488), False, 'from enn import utils\n'), ((4912, 4951), 'jax.nn.softmax', 'jax.nn.softmax', (['batched_logits'], {'axis': '(-1)'}), '(batched_logits, axis=-1)\n', (4926, 4951), False, 'import jax\n'), ((5469, 5490), 'jax.random.split', 'jax.random.split', (['key'], {}), '(key)\n', (5485, 5490), False, 'import jax\n'), ((6608, 6629), 'jax.random.split', 'jax.random.split', (['key'], {}), '(key)\n', (6624, 6629), False, 'import jax\n'), ((1656, 1693), 'jax.random.normal', 'jax.random.normal', (['key', 'batch.x.shape'], {}), '(key, batch.x.shape)\n', (1673, 1693), False, 'import jax\n'), ((1950, 1995), 'jax.random.normal', 'jax.random.normal', (['x', '[batch_size, input_dim]'], {}), '(x, [batch_size, input_dim])\n', (1967, 1995), False, 'import jax\n'), ((3134, 3163), 'jax.numpy.square', 'jnp.square', (['batched_out.train'], {}), '(batched_out.train)\n', (3144, 3163), True, 'import jax.numpy as jnp\n'), ((4135, 4170), 'jax.numpy.log', 'jnp.log', (['(mean_probs / distill_probs)'], {}), '(mean_probs / distill_probs)\n', (4142, 4170), True, 'import jax.numpy as jnp\n'), ((2128, 2153), 'jax.numpy.ones', 'jnp.ones', (['[x.shape[0], 1]'], {}), '([x.shape[0], 1])\n', (2136, 2153), True, 'import jax.numpy as jnp\n')]
import os from base64 import b64decode from flask import render_template, request from io import BytesIO from json import dumps as to_json_string from traceback import format_exc from flask_wtf.csrf import CSRFError from sqlalchemy.exc import SQLAlchemyError from werkzeug.exceptions import HTTPException from application_database import * from application_utils import * from application_login import * # Home page host_redirect('/pages/browse.html', '/') host_redirect('/pages/browse.html', '/index.html') # Short name redirects host_redirect('/pages/browse.html', '/browse.html') host_redirect('/pages/editor.html', '/editor.html') host_redirect('/pages/telemetry.html', '/telemetry.html') host_redirect('/pages/triangles.html', '/triangles.html') host_redirect('/pages/validate.html', '/validate.html') host_redirect('/pages/webcrow.html', '/webcrow.html') # Large blocks of data host_statically('data') host_statically('engine') host_statically('sourcemaps') # Actual page sources host_statically('pages/browse.js') host_statically('pages/editor.html') host_statically('pages/editor.js') host_statically('pages/telemetry.js') host_statically('pages/triangles.html') host_statically('pages/triangles.js') host_statically('pages/validate.html') host_statically('pages/webcrow.html') host_statically('pages/webcrow.js') if application.debug: host_redirect('/pages/test.html', '/test.html') host_redirect('/pages/verify_puzzles.html', '/verify_puzzles.html') host_redirect('/pages/editor_test.html', '/editor_test.html') host_redirect('/pages/challenge.html', '/challenge.html') host_statically('pages/test.html') host_statically('pages/test.js') host_statically('pages/editor_test.html') host_statically('pages/editor_test.js') host_statically('pages/verify_puzzles.html') host_statically('pages/verify_puzzles.js') host_statically('pages/challenge.html') host_statically('pages/challenge.js') host_statically('pages/_UTM.html') host_statically('pages/_UTM.js') host_statically('pages/_UTM2.js') host_statically('pages/left_door.html') host_statically('pages/left_door.js') host_statically('images') def page_not_found(error=None): return render_template('404_generic.html'), 404 application.register_error_handler(404, page_not_found) application.register_error_handler(CSRFError, page_not_found) # In case of a database error, cancel any active transactions to prevent the database getting stuck. def handle_database_error(exc): if db.session.is_active: # db imported from application_database.py db.session.rollback() return '', 500 application.register_error_handler(SQLAlchemyError, handle_database_error) # We do not actually want to handle HTTP exceptions (e.g. 405), we want to just return them to the caller. # https://flask.palletsprojects.com/en/2.0.x/errorhandling/#generic-exception-handlers application.register_error_handler(HTTPException, lambda exc: exc) def handle_exception(exc): message = f'Caught a {type(exc).__name__}: {format_exc()}' add_feedback(message) return '', 500 application.register_error_handler(Exception, handle_exception) # Publishing puzzles @csrf.exempt def publish(): solution_json = request.form['solution'] data = validate_and_capture_image(solution_json) if 'error' in data: return data['error'], 400 title = data.get('title', 'Unnamed Puzzle') # [22:] to remove the "data:image/png;base64," prefix image = BytesIO(b64decode(data['screenshot'][22:])) puzzle_json = data['puzzle_json'] display_hash = create_puzzle(title, puzzle_json, solution_json, image) return display_hash, 200 application.add_url_rule('/publish', 'publish', publish, methods=['POST']) application.add_url_rule('/publish', 'publish_get', page_not_found, methods=['GET']) # Playing published puzzles def play(display_hash): puzzle = get_puzzle(display_hash) if not puzzle or not puzzle.puzzle_json: return render_template('404_puzzle.html', display_hash=display_hash) return render_template('play_template.html', puzzle=puzzle.puzzle_json, display_hash=display_hash, title=puzzle.title, image=puzzle.url ) application.add_url_rule('/play/<display_hash>', 'play', play) # Getting puzzles from the DB to show on the browse page def browse(): sort_type = request.args.get('sort_type', 'date') # date order = request.args.get('order', '') # asc, desc offset = request.args.get('offset', 0) limit = request.args.get('limit', 100) puzzles = get_puzzles(sort_type, order, offset, limit) output = [] for puzzle in puzzles: output.append({ 'display_hash': puzzle.display_hash, 'url': puzzle.url, 'title': puzzle.title, }) if len(output) == 0: return '', 204 else: return to_json_string(output), 200 application.add_url_rule('/browse', 'browse', browse) @csrf.exempt def telemetry(): kwargs = { 'session_id': request.form['session_id'], 'event_type': request.form['event_type'], 'server_version': '%version%', 'client_version': request.form['version'], 'page': request.environ.get('HTTP_REFERER', ''), } if kwargs['page']: page_parts = kwargs['page'].split('/') if page_parts[-2] == 'play': kwargs['puzzle'] = page_parts[-1] if kwargs['event_type'] in ['feedback', 'error']: # Users providing feedback and javascript errors add_telemetry(**kwargs, data=request.form['data']) elif kwargs['event_type'] == 'puzzle_start': # Page load on play_template add_puzzle_start(**kwargs) elif kwargs['event_type'] == 'puzzle_solve': # Successful solve on play_template add_puzzle_solve(**kwargs) else: print('Unknown event type: ' + kwargs['event_type']) return '', 200 application.add_url_rule('/telemetry', 'telemetry', telemetry, methods=['POST']) application.add_url_rule('/telemetry', 'telemetry_get', page_not_found, methods=['GET']) # Verifying that puzzles are valid if application.debug: def verify_puzzles(): import csv with open('puzzles.tsv', newline='') as csvfile: puzzles = [row for row in csv.reader(csvfile, delimiter='\t')] return render_template('verify_puzzles.html', puzzles=puzzles) application.add_url_rule('/pages/verify_puzzles.html', 'verify_puzzles', verify_puzzles) if __name__ == '__main__': extra_files = [] for root, dirs, files in os.walk('.'): if 'images' in root: continue if '.git' in root: continue for file in files: extra_files.append(root + os.sep + file) # To make this server visible from the local network, add host='0.0.0.0' application.run(extra_files=extra_files)
[ "csv.reader", "flask.request.args.get", "os.walk", "base64.b64decode", "json.dumps", "flask.request.environ.get", "traceback.format_exc", "flask.render_template" ]
[((3995, 4128), 'flask.render_template', 'render_template', (['"""play_template.html"""'], {'puzzle': 'puzzle.puzzle_json', 'display_hash': 'display_hash', 'title': 'puzzle.title', 'image': 'puzzle.url'}), "('play_template.html', puzzle=puzzle.puzzle_json,\n display_hash=display_hash, title=puzzle.title, image=puzzle.url)\n", (4010, 4128), False, 'from flask import render_template, request\n'), ((4293, 4330), 'flask.request.args.get', 'request.args.get', (['"""sort_type"""', '"""date"""'], {}), "('sort_type', 'date')\n", (4309, 4330), False, 'from flask import render_template, request\n'), ((4348, 4377), 'flask.request.args.get', 'request.args.get', (['"""order"""', '""""""'], {}), "('order', '')\n", (4364, 4377), False, 'from flask import render_template, request\n'), ((4401, 4430), 'flask.request.args.get', 'request.args.get', (['"""offset"""', '(0)'], {}), "('offset', 0)\n", (4417, 4430), False, 'from flask import render_template, request\n'), ((4441, 4471), 'flask.request.args.get', 'request.args.get', (['"""limit"""', '(100)'], {}), "('limit', 100)\n", (4457, 4471), False, 'from flask import render_template, request\n'), ((6331, 6343), 'os.walk', 'os.walk', (['"""."""'], {}), "('.')\n", (6338, 6343), False, 'import os\n'), ((2193, 2228), 'flask.render_template', 'render_template', (['"""404_generic.html"""'], {}), "('404_generic.html')\n", (2208, 2228), False, 'from flask import render_template, request\n'), ((3448, 3482), 'base64.b64decode', 'b64decode', (["data['screenshot'][22:]"], {}), "(data['screenshot'][22:])\n", (3457, 3482), False, 'from base64 import b64decode\n'), ((3923, 3984), 'flask.render_template', 'render_template', (['"""404_puzzle.html"""'], {'display_hash': 'display_hash'}), "('404_puzzle.html', display_hash=display_hash)\n", (3938, 3984), False, 'from flask import render_template, request\n'), ((5066, 5105), 'flask.request.environ.get', 'request.environ.get', (['"""HTTP_REFERER"""', '""""""'], {}), "('HTTP_REFERER', '')\n", (5085, 5105), False, 'from flask import render_template, request\n'), ((3010, 3022), 'traceback.format_exc', 'format_exc', ([], {}), '()\n', (3020, 3022), False, 'from traceback import format_exc\n'), ((4754, 4776), 'json.dumps', 'to_json_string', (['output'], {}), '(output)\n', (4768, 4776), True, 'from json import dumps as to_json_string\n'), ((6110, 6165), 'flask.render_template', 'render_template', (['"""verify_puzzles.html"""'], {'puzzles': 'puzzles'}), "('verify_puzzles.html', puzzles=puzzles)\n", (6125, 6165), False, 'from flask import render_template, request\n'), ((6060, 6095), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '"""\t"""'}), "(csvfile, delimiter='\\t')\n", (6070, 6095), False, 'import csv\n')]
# test sfr renumbering schemes and other random utilities import gsflow import os from gsflow.utils import SfrRenumber ws = os.path.abspath(os.path.dirname(__file__)) def test_sfr_renumber(): # simple test to ensure no crashes in the renumbering schemes # expand this later to test LAK, AG, and GAGE local_ws = os.path.join(ws, "..", "examples", "data", "sagehen", "gsflow") control_file = "saghen_new_cont.control" gsf = gsflow.GsflowModel.load_from_file(os.path.join(local_ws, control_file)) ml = gsf.mf # renumber by topology sfrenum = SfrRenumber(model=ml) sfrenum.renumber_sfr() sfrenum.renumber_all() # renumber by dis sfrenum = SfrRenumber(model=ml, scheme="dis") sfrenum.renumber_sfr() sfrenum.renumber_all() # renumber by strtop sfrenum = SfrRenumber(model=ml, scheme="sfr") sfrenum.renumber_sfr() sfrenum.renumber_all() if __name__ == "__main__": test_sfr_renumber()
[ "os.path.dirname", "os.path.join", "gsflow.utils.SfrRenumber" ]
[((148, 173), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (163, 173), False, 'import os\n'), ((339, 402), 'os.path.join', 'os.path.join', (['ws', '""".."""', '"""examples"""', '"""data"""', '"""sagehen"""', '"""gsflow"""'], {}), "(ws, '..', 'examples', 'data', 'sagehen', 'gsflow')\n", (351, 402), False, 'import os\n'), ((596, 617), 'gsflow.utils.SfrRenumber', 'SfrRenumber', ([], {'model': 'ml'}), '(model=ml)\n', (607, 617), False, 'from gsflow.utils import SfrRenumber\n'), ((714, 749), 'gsflow.utils.SfrRenumber', 'SfrRenumber', ([], {'model': 'ml', 'scheme': '"""dis"""'}), "(model=ml, scheme='dis')\n", (725, 749), False, 'from gsflow.utils import SfrRenumber\n'), ((849, 884), 'gsflow.utils.SfrRenumber', 'SfrRenumber', ([], {'model': 'ml', 'scheme': '"""sfr"""'}), "(model=ml, scheme='sfr')\n", (860, 884), False, 'from gsflow.utils import SfrRenumber\n'), ((496, 532), 'os.path.join', 'os.path.join', (['local_ws', 'control_file'], {}), '(local_ws, control_file)\n', (508, 532), False, 'import os\n')]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('salts', '0029_shooting_ticket_id'), ] operations = [ migrations.RenameField('TestResult', 'test_id', 'session_id'), migrations.RenameField('Shooting', 'test_id', 'session_id') ]
[ "django.db.migrations.RenameField" ]
[((249, 310), 'django.db.migrations.RenameField', 'migrations.RenameField', (['"""TestResult"""', '"""test_id"""', '"""session_id"""'], {}), "('TestResult', 'test_id', 'session_id')\n", (271, 310), False, 'from django.db import migrations, models\n'), ((320, 379), 'django.db.migrations.RenameField', 'migrations.RenameField', (['"""Shooting"""', '"""test_id"""', '"""session_id"""'], {}), "('Shooting', 'test_id', 'session_id')\n", (342, 379), False, 'from django.db import migrations, models\n')]
import pika connection = pika.BlockingConnection( pika.ConnectionParameters(host='localhost')) ch = connection.channel() ch.exchange_declare(exchange='logs', exchange_type='fanout') ch.basic_publish(exchange='logs', routing_key='', body='this is testing fanout') print('message sent') connection.close()
[ "pika.ConnectionParameters" ]
[((55, 98), 'pika.ConnectionParameters', 'pika.ConnectionParameters', ([], {'host': '"""localhost"""'}), "(host='localhost')\n", (80, 98), False, 'import pika\n')]
from django.contrib.auth.models import AnonymousUser from django.test import TestCase, RequestFactory, Client from django.contrib.auth import get_user_model from django.urls.base import reverse from django.conf import settings from .models import Token from .views import user_auth UserModel = get_user_model() class GetAutoAuthTestCase(TestCase): def setUp(self): self.client = Client() self.factory = RequestFactory() self.user = UserModel.objects.create(username="jacob", email="<EMAIL>", password="<PASSWORD>") self.url_token = Token.objects.create(user=self.user) def test_auth(self): auth_url = "{0}?token={1}".format(reverse('get_autologin:auth'), self.url_token.token) request = self.factory.get(auth_url) request.user = AnonymousUser() request.session = self.client.session response = user_auth(request) response.client = self.client self.assertRedirects(response, settings.LOGIN_REDIRECT_URL, status_code=302, target_status_code=200)
[ "django.contrib.auth.models.AnonymousUser", "django.test.RequestFactory", "django.test.Client", "django.contrib.auth.get_user_model", "django.urls.base.reverse" ]
[((296, 312), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (310, 312), False, 'from django.contrib.auth import get_user_model\n'), ((395, 403), 'django.test.Client', 'Client', ([], {}), '()\n', (401, 403), False, 'from django.test import TestCase, RequestFactory, Client\n'), ((427, 443), 'django.test.RequestFactory', 'RequestFactory', ([], {}), '()\n', (441, 443), False, 'from django.test import TestCase, RequestFactory, Client\n'), ((798, 813), 'django.contrib.auth.models.AnonymousUser', 'AnonymousUser', ([], {}), '()\n', (811, 813), False, 'from django.contrib.auth.models import AnonymousUser\n'), ((677, 706), 'django.urls.base.reverse', 'reverse', (['"""get_autologin:auth"""'], {}), "('get_autologin:auth')\n", (684, 706), False, 'from django.urls.base import reverse\n')]
from matplotlib import pyplot as plt import pickle import numpy as np import os,sys ''' results = [] for i in range(10): with open(f'/home/yiran/pc_mapping/arena-v2/examples/bc_saved_models/refactor_success_max_mine/run{i}/test_result.npy', 'rb') as f: result_i = pickle.load(f) result_number = [v for (k,v) in result_i.items()] results.append(result_number) results = np.array(results) result_mean = results.mean(axis=0) result_std = results.std(axis=0) print(result_mean, result_std) exit() ''' x = [1,3,5,7,9,11,13,15,17,19] y_ub = np.arange(1,21,2) y_heuristic = [1.0, 3.0, 4.99, 6.83, 8.53, 9.46, 10.89, 12.3, 13.69, 13.64] y_DDQN = [0.99, 2.88, 4.78, 3.82, 2.37, 2.14, 1.35, 1.01, 0.91,1.09] y_refactor_max = [0.98, 2.86, 4.64, 5.67, 5.81, 5.82, 5.35, 5.07, 3.34, 3.11] y_refactor_success_max = [0.99, 3.0, 4.94, 6.55, 7.74, 8.47, 8.48, 7.72, 7.29, 5.85] y_refactor_purify_10of10_max = [1.0, 2.97, 4.85, 6.76, 8.05, 8.42, 8.66, 8.03, 7.58, 5.65] y_refactor_purify_9of10_max = [1.0, 3.0, 4.94, 6.69, 8.27, 9.27, 9.3, 8.88, 8.87, 7.94] y_refactor_purify_8of10_max = [1.0, 3.0, 4.95, 6.76, 7.68, 8.14, 8.11, 8.18, 6.99, 5.09] y_refactor_purify_7of10_max = [1.0, 3.0, 4.93, 6.91, 8.32, 9.46, 10.64, 11.7, 11.81, 10.86] y_refactor_purify_6of10_max = [1.0, 2.97, 4.93, 6.78, 8.35, 9.87, 10.78, 11.29, 12.0, 11.09] y_refactor_purify_5of10_max = [1.0, 2.94, 5.0, 6.59, 8.28, 8.96, 10.22, 10.34, 10.93, 10.56] y_refactor_purify_4of10_max = [1.0, 2.97, 5.0, 6.79, 8.16, 9.27, 8.16, 7.82, 7.47, 6.02] y_refactor_purify_3of10_max = [1.0, 2.95, 4.96, 6.56, 7.96, 9.14, 8.64, 7.64, 7.36, 4.54] y_refactor_purify_2of10_max = [1.0, 3.0, 4.95, 6.75, 8.32, 9.49, 9.55, 9.73, 9.75, 8.04] y_refactor_purify_1of10_max = [1.0, 2.97, 4.96, 6.75, 7.92, 8.09, 7.92, 6.62, 5.85, 4.7] plt.xlabel('number of coins') plt.ylabel('collected coins (mean of 100 runs)') plt.xlim(0, 19) plt.xticks(np.arange(1,21,2)) plt.ylim(0, 19) plt.yticks(np.arange(1,21,2)) plt.plot(x, y_ub, label='max score') plt.plot(x, y_heuristic, label='IL heuristic') plt.plot(x, y_refactor_purify_6of10_max, label='IL purify') plt.plot(x, y_refactor_success_max, label='IL successful traj') plt.plot(x, y_refactor_max, label='IL all traj') plt.plot(x, y_DDQN, label='DoubleDQN') plt.legend(loc='upper left') plt.show()
[ "matplotlib.pyplot.xlim", "matplotlib.pyplot.show", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylim", "matplotlib.pyplot.legend", "numpy.arange", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel" ]
[((556, 575), 'numpy.arange', 'np.arange', (['(1)', '(21)', '(2)'], {}), '(1, 21, 2)\n', (565, 575), True, 'import numpy as np\n'), ((1806, 1835), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of coins"""'], {}), "('number of coins')\n", (1816, 1835), True, 'from matplotlib import pyplot as plt\n'), ((1836, 1884), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""collected coins (mean of 100 runs)"""'], {}), "('collected coins (mean of 100 runs)')\n", (1846, 1884), True, 'from matplotlib import pyplot as plt\n'), ((1885, 1900), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(19)'], {}), '(0, 19)\n', (1893, 1900), True, 'from matplotlib import pyplot as plt\n'), ((1931, 1946), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(19)'], {}), '(0, 19)\n', (1939, 1946), True, 'from matplotlib import pyplot as plt\n'), ((1977, 2013), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_ub'], {'label': '"""max score"""'}), "(x, y_ub, label='max score')\n", (1985, 2013), True, 'from matplotlib import pyplot as plt\n'), ((2014, 2060), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_heuristic'], {'label': '"""IL heuristic"""'}), "(x, y_heuristic, label='IL heuristic')\n", (2022, 2060), True, 'from matplotlib import pyplot as plt\n'), ((2061, 2120), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_refactor_purify_6of10_max'], {'label': '"""IL purify"""'}), "(x, y_refactor_purify_6of10_max, label='IL purify')\n", (2069, 2120), True, 'from matplotlib import pyplot as plt\n'), ((2121, 2184), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_refactor_success_max'], {'label': '"""IL successful traj"""'}), "(x, y_refactor_success_max, label='IL successful traj')\n", (2129, 2184), True, 'from matplotlib import pyplot as plt\n'), ((2185, 2233), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_refactor_max'], {'label': '"""IL all traj"""'}), "(x, y_refactor_max, label='IL all traj')\n", (2193, 2233), True, 'from matplotlib import pyplot as plt\n'), ((2234, 2272), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y_DDQN'], {'label': '"""DoubleDQN"""'}), "(x, y_DDQN, label='DoubleDQN')\n", (2242, 2272), True, 'from matplotlib import pyplot as plt\n'), ((2273, 2301), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (2283, 2301), True, 'from matplotlib import pyplot as plt\n'), ((2302, 2312), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2310, 2312), True, 'from matplotlib import pyplot as plt\n'), ((1912, 1931), 'numpy.arange', 'np.arange', (['(1)', '(21)', '(2)'], {}), '(1, 21, 2)\n', (1921, 1931), True, 'import numpy as np\n'), ((1958, 1977), 'numpy.arange', 'np.arange', (['(1)', '(21)', '(2)'], {}), '(1, 21, 2)\n', (1967, 1977), True, 'import numpy as np\n')]
#!/usr/bin/python """ This is the main python file to run. """ import sys from application import Application # -- Functions --------------------------------------------------------------- def main(): """ The main function. """ app = Application(sys.argv) app.run() # -- Main entry point -------------------------------------------------------- if __name__ == "__main__": main()
[ "application.Application" ]
[((267, 288), 'application.Application', 'Application', (['sys.argv'], {}), '(sys.argv)\n', (278, 288), False, 'from application import Application\n')]
import subprocess import ftplib import os import time from hide_data import * from datetime import datetime # Creazione delle cartelle nominate per giorno, mese, anno, ora, minuti, secondi def create_path_folder(init_path): day = time.strftime("%d", time.localtime()) month = time.strftime("%m", time.localtime()) year = time.strftime("%y", time.localtime()) ora = time.strftime("%H", time.localtime()) minuti = time.strftime("%M", time.localtime()) secondi = time.strftime("%S", time.localtime()) # Creazione delle cartella a seconda dell'orario if ora == "21" and minuti == "30" and secondi == "00": path_for_G = init_path + day + "-" + month + "-" + year + "-Ora" + ora + "-minuti" + minuti + "-secondi" + secondi + " Backup completo" else: path_for_G = init_path + day + "-" + month + "-" + year + "-Ora" + ora + "-minuti" + minuti + "-secondi" + secondi return path_for_G def check_if_connection_exist(net_path, password): try: os.chdir("G:\ICT") except: # Il primo richiamo a NET USE serve per creare una connessione al server call_to_server2003_with_CMD = " NET USE " + net_path + " " + password subprocess.Popen(call_to_server2003_with_CMD, stdout=subprocess.PIPE, shell=True) # Il secondo richiamo a NET USE crea un'unità virtuale dove andar a scrivere i dati o elaborare dati call_to_server2003_with_CMD = " NET USE G: " + net_path + " " + password subprocess.Popen(call_to_server2003_with_CMD, stdout=subprocess.PIPE, shell=True) # --> FINE DELLA CREAZIONE DELLA COMUNICAZIONE TRA SERVER E DATI # Cache date e Cache files per download files solo se i files vengono modificati date = [ # DATA DI CREAZIONE DEI FILES ] files = [ # NOMI DEI FILES ] #--> Fine Cache def download_files(ip, user, password, path_files, path_for_G): ftp = ftplib.FTP(ip) ftp.login(user, password) ftp.cwd(path_files) filenames = ftp.nlst() for filename in filenames: file_time = ftp.sendcmd("MDTM " + filename) file_convert_time = datetime.strptime(file_time[4:], "%Y%m%d%H%M%S").strftime("%Y-%m-%d %H.%M") for e in range(len(date)): if files[e] == filename: print(filename, "\n") print(date[e], "\n") if file_convert_time > date[e]: local_filename = os.path.join(path_for_G, filename) file = open(local_filename, 'wb') ftp.retrbinary('RETR ' + filename, file.write) file.close() date[e] = file_convert_time print("agg", "\n", date[e], "\n") else: print("il file è nella versione più recente") try: time.sleep(3) os.rmdir(path_for_G) except: print("file in folder") ftp.quit()
[ "subprocess.Popen", "time.sleep", "datetime.datetime.strptime", "ftplib.FTP", "os.rmdir", "os.path.join", "os.chdir", "time.localtime" ]
[((1913, 1927), 'ftplib.FTP', 'ftplib.FTP', (['ip'], {}), '(ip)\n', (1923, 1927), False, 'import ftplib\n'), ((264, 280), 'time.localtime', 'time.localtime', ([], {}), '()\n', (278, 280), False, 'import time\n'), ((315, 331), 'time.localtime', 'time.localtime', ([], {}), '()\n', (329, 331), False, 'import time\n'), ((365, 381), 'time.localtime', 'time.localtime', ([], {}), '()\n', (379, 381), False, 'import time\n'), ((414, 430), 'time.localtime', 'time.localtime', ([], {}), '()\n', (428, 430), False, 'import time\n'), ((466, 482), 'time.localtime', 'time.localtime', ([], {}), '()\n', (480, 482), False, 'import time\n'), ((519, 535), 'time.localtime', 'time.localtime', ([], {}), '()\n', (533, 535), False, 'import time\n'), ((1023, 1042), 'os.chdir', 'os.chdir', (['"""G:\\\\ICT"""'], {}), "('G:\\\\ICT')\n", (1031, 1042), False, 'import os\n'), ((2853, 2866), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2863, 2866), False, 'import time\n'), ((2876, 2896), 'os.rmdir', 'os.rmdir', (['path_for_G'], {}), '(path_for_G)\n', (2884, 2896), False, 'import os\n'), ((1217, 1303), 'subprocess.Popen', 'subprocess.Popen', (['call_to_server2003_with_CMD'], {'stdout': 'subprocess.PIPE', 'shell': '(True)'}), '(call_to_server2003_with_CMD, stdout=subprocess.PIPE, shell\n =True)\n', (1233, 1303), False, 'import subprocess\n'), ((1494, 1580), 'subprocess.Popen', 'subprocess.Popen', (['call_to_server2003_with_CMD'], {'stdout': 'subprocess.PIPE', 'shell': '(True)'}), '(call_to_server2003_with_CMD, stdout=subprocess.PIPE, shell\n =True)\n', (1510, 1580), False, 'import subprocess\n'), ((2130, 2178), 'datetime.datetime.strptime', 'datetime.strptime', (['file_time[4:]', '"""%Y%m%d%H%M%S"""'], {}), "(file_time[4:], '%Y%m%d%H%M%S')\n", (2147, 2178), False, 'from datetime import datetime\n'), ((2444, 2478), 'os.path.join', 'os.path.join', (['path_for_G', 'filename'], {}), '(path_for_G, filename)\n', (2456, 2478), False, 'import os\n')]
import numpy as np import pandas as pd def complexity_hjorth(signal): """Hjorth's Complexity and Parameters Hjorth Parameters are indicators of statistical properties used in signal processing in the time domain introduced by Hjorth (1970). The parameters are activity, mobility, and complexity. NeuroKit returns complexity directly in the output tuple, but the other parameters can be found in the dictionary. - The **complexity** parameter gives an estimate of the bandwidth of the signal, which indicates the similarity of the shape of the signal to a pure sine wave (where the value converges to 1). Complexity is define as the ratio of the mobility of the first derivative of the signal to the mobility of the signal. - The **mobility** parameter represents the mean frequency or the proportion of standard deviation of the power spectrum. This is defined as the square root of variance of the first derivative of the signal divided by the variance of the signal. - The **activity** parameter is simply the variance of the signal. See Also -------- fractal_petrosian Parameters ---------- signal : Union[list, np.array, pd.Series] The signal (i.e., a time series) in the form of a vector of values. Returns ------- hjorth : float Hjorth's Complexity. info : dict A dictionary containing additional information regarding the parameters used to compute Hjorth's Complexity. Examples ---------- >>> import neurokit2 as nk >>> >>> signal = nk.signal_simulate(duration=2, frequency=5) >>> >>> complexity, info = nk.complexity_hjorth(signal) >>> complexity #doctest: +SKIP References ---------- - https://github.com/raphaelvallat/antropy/blob/master/antropy """ # Sanity checks if isinstance(signal, (np.ndarray, pd.DataFrame)) and signal.ndim > 1: raise ValueError( "Multidimensional inputs (e.g., matrices or multichannel data) are not supported yet." ) # Calculate derivatives dx = np.diff(signal) ddx = np.diff(dx) # Calculate variance and its derivatives x_var = np.var(signal) # = activity dx_var = np.var(dx) ddx_var = np.var(ddx) # Mobility and complexity mobility = np.sqrt(dx_var / x_var) complexity = np.sqrt(ddx_var / dx_var) / mobility return complexity, {"Mobility": mobility, "Activity": x_var}
[ "numpy.diff", "numpy.var", "numpy.sqrt" ]
[((2112, 2127), 'numpy.diff', 'np.diff', (['signal'], {}), '(signal)\n', (2119, 2127), True, 'import numpy as np\n'), ((2138, 2149), 'numpy.diff', 'np.diff', (['dx'], {}), '(dx)\n', (2145, 2149), True, 'import numpy as np\n'), ((2208, 2222), 'numpy.var', 'np.var', (['signal'], {}), '(signal)\n', (2214, 2222), True, 'import numpy as np\n'), ((2250, 2260), 'numpy.var', 'np.var', (['dx'], {}), '(dx)\n', (2256, 2260), True, 'import numpy as np\n'), ((2275, 2286), 'numpy.var', 'np.var', (['ddx'], {}), '(ddx)\n', (2281, 2286), True, 'import numpy as np\n'), ((2333, 2356), 'numpy.sqrt', 'np.sqrt', (['(dx_var / x_var)'], {}), '(dx_var / x_var)\n', (2340, 2356), True, 'import numpy as np\n'), ((2374, 2399), 'numpy.sqrt', 'np.sqrt', (['(ddx_var / dx_var)'], {}), '(ddx_var / dx_var)\n', (2381, 2399), True, 'import numpy as np\n')]
# ERFNet full model definition for Pytorch # Sept 2017 # <NAME> ####################### import torch import torch.nn as nn import torch.nn.init as init import torch.nn.functional as F class DownsamplerBlock(nn.Module): def __init__(self, ninput, noutput): super().__init__() self.conv = nn.Conv2d(ninput, noutput - ninput, (3, 3), stride=2, padding=1, bias=True) self.conv2 = nn.Conv2d(16, 64, (1, 1), stride=1, padding=0, bias=True) self.pool = nn.MaxPool2d(2, stride=2, return_indices=True) self.bn = nn.BatchNorm2d(noutput, eps=1e-3) def forward(self, input): c = input a = self.conv(input) b, max_indices = self.pool(input) # print(a.shape,b.shape,c.shape,max_indices.shape,"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx") output1 = torch.cat([a, b], 1) if b.shape[1] == 16: b_c = self.conv2(b) else: b_c = b output = self.bn(output1) return F.relu(output), max_indices, b, b_c, output1 class non_bottleneck_1d(nn.Module): def __init__(self, chann, dropprob, dilated): super().__init__() self.conv3x1_1 = nn.Conv2d(chann, chann, (3, 1), stride=1, padding=(1, 0), bias=True) self.conv1x3_1 = nn.Conv2d(chann, chann, (1, 3), stride=1, padding=(0, 1), bias=True) self.bn1 = nn.BatchNorm2d(chann, eps=1e-03) self.conv3x1_2 = nn.Conv2d(chann, chann, (3, 1), stride=1, padding=(1 * dilated, 0), bias=True, dilation=(dilated, 1)) self.conv1x3_2 = nn.Conv2d(chann, chann, (1, 3), stride=1, padding=(0, 1 * dilated), bias=True, dilation=(1, dilated)) self.bn2 = nn.BatchNorm2d(chann, eps=1e-03) self.dropout = nn.Dropout2d(dropprob) def forward(self, input): output = self.conv3x1_1(input) output = F.relu(output) output = self.conv1x3_1(output) output = self.bn1(output) output = F.relu(output) output = self.conv3x1_2(output) output = F.relu(output) output = self.conv1x3_2(output) output = self.bn2(output) if (self.dropout.p != 0): output = self.dropout(output) return F.relu(output + input), output # +input = identity (residual connection) class RPNet(nn.Module): def __init__(self, num_classes): super().__init__() self.initial_block = DownsamplerBlock(3, 16) self.l0d1 = non_bottleneck_1d(16, 0.03, 1) self.down0_25 = DownsamplerBlock(16, 64) self.l1d1 = non_bottleneck_1d(64, 0.03, 1) self.l1d2 = non_bottleneck_1d(64, 0.03, 1) self.l1d3 = non_bottleneck_1d(64, 0.03, 1) self.l1d4 = non_bottleneck_1d(64, 0.03, 1) self.l1d5 = non_bottleneck_1d(64, 0.03, 1) self.down0_125 = DownsamplerBlock(64, 128) self.l2d1 = non_bottleneck_1d(128, 0.3, 2) self.l2d2 = non_bottleneck_1d(128, 0.3, 4) self.l2d3 = non_bottleneck_1d(128, 0.3, 8) self.l2d4 = non_bottleneck_1d(128, 0.3, 16) self.l3d1 = non_bottleneck_1d(128, 0.3, 2) self.l3d2 = non_bottleneck_1d(128, 0.3, 4) self.l3d3 = non_bottleneck_1d(128, 0.3, 8) self.l3d4 = non_bottleneck_1d(128, 0.3, 16) # Only in encoder mode: self.conv2d1 = nn.Conv2d( 128, num_classes, kernel_size=1, stride=1, padding=0, bias=True) self.conv2d2 = nn.Conv2d( 192, num_classes, kernel_size=1, stride=1, padding=0, bias=True) self.conv2d3 = nn.Conv2d( 36, num_classes, kernel_size=1, stride=1, padding=0, bias=True) self.conv2d4 = nn.Conv2d( 16, num_classes, kernel_size=1, stride=1, padding=0, bias=False) self.conv2d5 = nn.Conv2d( 64, num_classes, kernel_size=1, stride=1, padding=0, bias=False) self.main_unpool1 = nn.MaxUnpool2d(kernel_size=2) self.main_unpool2 = nn.MaxUnpool2d(kernel_size=2) def forward(self, input, predict=False): output, max_indices0_0, d, d_d, dd = self.initial_block(input) output, y = self.l0d1(output) output, max_indices1_0, d1, d1_d1, ddd = self.down0_25(output) # print(d1.shape,'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx') d2 = self.main_unpool1(d1, max_indices1_0) d_1 = d2 - dd output, y = self.l1d1(output) output, y = self.l1d2(output) output, y = self.l1d3(output) output, y = self.l1d4(output) cc_2 = self.conv2d4(d_1) output, max_indices2_0, d3, d3_d3, dddd = self.down0_125(output) d4 = self.main_unpool2(d3, max_indices2_0) d_2 = d4 - d1_d1 cc_4 = self.conv2d5(d_2) output, y = self.l2d1(output) output, y = self.l2d2(output) output, y = self.l2d3(output) output, y = self.l2d4(output) output, y = self.l3d1(output) output, y = self.l3d2(output) output, y = self.l3d3(output) output, y = self.l3d4(output) x1_81 = output x1_8 = self.conv2d1(output) x1_8_2 = torch.nn.functional.interpolate(x1_81, scale_factor=2, mode='bilinear') out4 = torch.cat((x1_8_2, d_2), 1) x1_41 = self.conv2d2(out4) x1_4 = x1_41 + cc_4 x1_4_2 = torch.nn.functional.interpolate(x1_4, scale_factor=2, mode='bilinear') out2 = torch.cat((x1_4_2, d_1), 1) x1_21 = self.conv2d3(out2) x1_2 = x1_21 + cc_2 x1_1 = torch.nn.functional.interpolate(x1_2, scale_factor=2, mode='bilinear') return x1_1, x1_2, x1_4, x1_8
[ "torch.nn.Dropout2d", "torch.nn.Conv2d", "torch.cat", "torch.nn.BatchNorm2d", "torch.nn.functional.relu", "torch.nn.MaxPool2d", "torch.nn.functional.interpolate", "torch.nn.MaxUnpool2d" ]
[((326, 401), 'torch.nn.Conv2d', 'nn.Conv2d', (['ninput', '(noutput - ninput)', '(3, 3)'], {'stride': '(2)', 'padding': '(1)', 'bias': '(True)'}), '(ninput, noutput - ninput, (3, 3), stride=2, padding=1, bias=True)\n', (335, 401), True, 'import torch.nn as nn\n'), ((424, 481), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', '(64)', '(1, 1)'], {'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(16, 64, (1, 1), stride=1, padding=0, bias=True)\n', (433, 481), True, 'import torch.nn as nn\n'), ((503, 549), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {'stride': '(2)', 'return_indices': '(True)'}), '(2, stride=2, return_indices=True)\n', (515, 549), True, 'import torch.nn as nn\n'), ((569, 603), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['noutput'], {'eps': '(0.001)'}), '(noutput, eps=0.001)\n', (583, 603), True, 'import torch.nn as nn\n'), ((842, 862), 'torch.cat', 'torch.cat', (['[a, b]', '(1)'], {}), '([a, b], 1)\n', (851, 862), False, 'import torch\n'), ((1206, 1274), 'torch.nn.Conv2d', 'nn.Conv2d', (['chann', 'chann', '(3, 1)'], {'stride': '(1)', 'padding': '(1, 0)', 'bias': '(True)'}), '(chann, chann, (3, 1), stride=1, padding=(1, 0), bias=True)\n', (1215, 1274), True, 'import torch.nn as nn\n'), ((1303, 1371), 'torch.nn.Conv2d', 'nn.Conv2d', (['chann', 'chann', '(1, 3)'], {'stride': '(1)', 'padding': '(0, 1)', 'bias': '(True)'}), '(chann, chann, (1, 3), stride=1, padding=(0, 1), bias=True)\n', (1312, 1371), True, 'import torch.nn as nn\n'), ((1394, 1426), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['chann'], {'eps': '(0.001)'}), '(chann, eps=0.001)\n', (1408, 1426), True, 'import torch.nn as nn\n'), ((1455, 1561), 'torch.nn.Conv2d', 'nn.Conv2d', (['chann', 'chann', '(3, 1)'], {'stride': '(1)', 'padding': '(1 * dilated, 0)', 'bias': '(True)', 'dilation': '(dilated, 1)'}), '(chann, chann, (3, 1), stride=1, padding=(1 * dilated, 0), bias=\n True, dilation=(dilated, 1))\n', (1464, 1561), True, 'import torch.nn as nn\n'), ((1621, 1727), 'torch.nn.Conv2d', 'nn.Conv2d', (['chann', 'chann', '(1, 3)'], {'stride': '(1)', 'padding': '(0, 1 * dilated)', 'bias': '(True)', 'dilation': '(1, dilated)'}), '(chann, chann, (1, 3), stride=1, padding=(0, 1 * dilated), bias=\n True, dilation=(1, dilated))\n', (1630, 1727), True, 'import torch.nn as nn\n'), ((1781, 1813), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['chann'], {'eps': '(0.001)'}), '(chann, eps=0.001)\n', (1795, 1813), True, 'import torch.nn as nn\n'), ((1840, 1862), 'torch.nn.Dropout2d', 'nn.Dropout2d', (['dropprob'], {}), '(dropprob)\n', (1852, 1862), True, 'import torch.nn as nn\n'), ((1954, 1968), 'torch.nn.functional.relu', 'F.relu', (['output'], {}), '(output)\n', (1960, 1968), True, 'import torch.nn.functional as F\n'), ((2063, 2077), 'torch.nn.functional.relu', 'F.relu', (['output'], {}), '(output)\n', (2069, 2077), True, 'import torch.nn.functional as F\n'), ((2139, 2153), 'torch.nn.functional.relu', 'F.relu', (['output'], {}), '(output)\n', (2145, 2153), True, 'import torch.nn.functional as F\n'), ((3450, 3524), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', 'num_classes'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(128, num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n', (3459, 3524), True, 'import torch.nn as nn\n'), ((3628, 3702), 'torch.nn.Conv2d', 'nn.Conv2d', (['(192)', 'num_classes'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(192, num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n', (3637, 3702), True, 'import torch.nn as nn\n'), ((3806, 3879), 'torch.nn.Conv2d', 'nn.Conv2d', (['(36)', 'num_classes'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(36, num_classes, kernel_size=1, stride=1, padding=0, bias=True)\n', (3815, 3879), True, 'import torch.nn as nn\n'), ((3983, 4057), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16)', 'num_classes'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(16, num_classes, kernel_size=1, stride=1, padding=0, bias=False)\n', (3992, 4057), True, 'import torch.nn as nn\n'), ((4161, 4235), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', 'num_classes'], {'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(64, num_classes, kernel_size=1, stride=1, padding=0, bias=False)\n', (4170, 4235), True, 'import torch.nn as nn\n'), ((4346, 4375), 'torch.nn.MaxUnpool2d', 'nn.MaxUnpool2d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (4360, 4375), True, 'import torch.nn as nn\n'), ((4405, 4434), 'torch.nn.MaxUnpool2d', 'nn.MaxUnpool2d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (4419, 4434), True, 'import torch.nn as nn\n'), ((5594, 5665), 'torch.nn.functional.interpolate', 'torch.nn.functional.interpolate', (['x1_81'], {'scale_factor': '(2)', 'mode': '"""bilinear"""'}), "(x1_81, scale_factor=2, mode='bilinear')\n", (5625, 5665), False, 'import torch\n'), ((5684, 5711), 'torch.cat', 'torch.cat', (['(x1_8_2, d_2)', '(1)'], {}), '((x1_8_2, d_2), 1)\n', (5693, 5711), False, 'import torch\n'), ((5797, 5867), 'torch.nn.functional.interpolate', 'torch.nn.functional.interpolate', (['x1_4'], {'scale_factor': '(2)', 'mode': '"""bilinear"""'}), "(x1_4, scale_factor=2, mode='bilinear')\n", (5828, 5867), False, 'import torch\n'), ((5884, 5911), 'torch.cat', 'torch.cat', (['(x1_4_2, d_1)', '(1)'], {}), '((x1_4_2, d_1), 1)\n', (5893, 5911), False, 'import torch\n'), ((5995, 6065), 'torch.nn.functional.interpolate', 'torch.nn.functional.interpolate', (['x1_2'], {'scale_factor': '(2)', 'mode': '"""bilinear"""'}), "(x1_2, scale_factor=2, mode='bilinear')\n", (6026, 6065), False, 'import torch\n'), ((1013, 1027), 'torch.nn.functional.relu', 'F.relu', (['output'], {}), '(output)\n', (1019, 1027), True, 'import torch.nn.functional as F\n'), ((2328, 2350), 'torch.nn.functional.relu', 'F.relu', (['(output + input)'], {}), '(output + input)\n', (2334, 2350), True, 'import torch.nn.functional as F\n')]
import numpy as np import warnings def remove_base(seq, base, tolerance=1e-4): """ Functionality: Remove x from (x \sqcup z) Since there might be some float errors, I allow for a mismatch of the time_stamps between two seqs no larger than a threshold. The threshold value: tolerance * max_time_stamp :param list seq: x \sqcup z :param list base: x :param float tolerance: A rate. :rtype: list :return: z """ if len(seq) == 0: return seq tolerance = tolerance * seq[-1]['time_since_start'] n_seq = len(seq) n_base = len(base) seq_types = np.empty(shape=[n_seq], dtype=np.int64) seq_time_stamps = np.empty(shape=[n_seq], dtype=np.float32) base_types = np.empty(shape=[n_base], dtype=np.int64) base_time_stamps = np.empty(shape=[n_base], dtype=np.float32) for token_idx, token in enumerate(seq): seq_types[token_idx] = token['type_event'] seq_time_stamps[token_idx] = token['time_since_start'] for token_idx, token in enumerate(base): base_types[token_idx] = token['type_event'] base_time_stamps[token_idx] = token['time_since_start'] type_equal = base_types.repeat(n_seq).reshape(n_base, n_seq) type_equal = type_equal == seq_types time_equal = base_time_stamps.repeat(n_seq).reshape(n_base, n_seq) time_equal = np.abs(time_equal - seq_time_stamps) < tolerance to_remove = (type_equal & time_equal).any(axis=0) rst = list() for token_idx in np.where(~to_remove)[0]: rst.append(seq[token_idx]) if len(rst) + len(base) != len(seq): warnings.warn('Some base tokens are missing from the seq!') return rst def remove_bases_for_test(all_particles, golds, bases): """ Helper function for testing. Functionality: Remove observed tokens from proposed particles and gold seqs. :param list all_particles: x \sqcup z_m :param list golds: x \sqcup z :param list bases: x :rtype: list, list :return: particles (only z_m) and gold seqs (only z) """ assert len(all_particles) == len(golds) == len(bases) rst_particles = list() rst_golds = list() for particles, gold, base in zip(all_particles, golds, bases): new_particles = list() for particle in particles: new_particles.append(remove_base(particle, base)) rst_particles.append(new_particles) rst_golds.append(remove_base(gold, base)) return rst_particles, rst_golds # Following codes are just for testing if __name__ == '__main__': import pickle dataset = pickle.load(open('data/pilottaxi/train.pkl', 'rb')) seq = dataset['seqs'][0] # base = dataset['seqs_obs'][0] base = list() from pprint import pprint pprint('seq:') pprint(seq) pprint('base:') pprint(base) pprint('after removal:') pprint(remove_base(seq, base)) assert len(seq) == len(remove_base(seq, base))
[ "numpy.abs", "numpy.empty", "numpy.where", "pprint.pprint", "warnings.warn" ]
[((611, 650), 'numpy.empty', 'np.empty', ([], {'shape': '[n_seq]', 'dtype': 'np.int64'}), '(shape=[n_seq], dtype=np.int64)\n', (619, 650), True, 'import numpy as np\n'), ((673, 714), 'numpy.empty', 'np.empty', ([], {'shape': '[n_seq]', 'dtype': 'np.float32'}), '(shape=[n_seq], dtype=np.float32)\n', (681, 714), True, 'import numpy as np\n'), ((732, 772), 'numpy.empty', 'np.empty', ([], {'shape': '[n_base]', 'dtype': 'np.int64'}), '(shape=[n_base], dtype=np.int64)\n', (740, 772), True, 'import numpy as np\n'), ((796, 838), 'numpy.empty', 'np.empty', ([], {'shape': '[n_base]', 'dtype': 'np.float32'}), '(shape=[n_base], dtype=np.float32)\n', (804, 838), True, 'import numpy as np\n'), ((2761, 2775), 'pprint.pprint', 'pprint', (['"""seq:"""'], {}), "('seq:')\n", (2767, 2775), False, 'from pprint import pprint\n'), ((2780, 2791), 'pprint.pprint', 'pprint', (['seq'], {}), '(seq)\n', (2786, 2791), False, 'from pprint import pprint\n'), ((2796, 2811), 'pprint.pprint', 'pprint', (['"""base:"""'], {}), "('base:')\n", (2802, 2811), False, 'from pprint import pprint\n'), ((2816, 2828), 'pprint.pprint', 'pprint', (['base'], {}), '(base)\n', (2822, 2828), False, 'from pprint import pprint\n'), ((2833, 2857), 'pprint.pprint', 'pprint', (['"""after removal:"""'], {}), "('after removal:')\n", (2839, 2857), False, 'from pprint import pprint\n'), ((1354, 1390), 'numpy.abs', 'np.abs', (['(time_equal - seq_time_stamps)'], {}), '(time_equal - seq_time_stamps)\n', (1360, 1390), True, 'import numpy as np\n'), ((1497, 1517), 'numpy.where', 'np.where', (['(~to_remove)'], {}), '(~to_remove)\n', (1505, 1517), True, 'import numpy as np\n'), ((1607, 1666), 'warnings.warn', 'warnings.warn', (['"""Some base tokens are missing from the seq!"""'], {}), "('Some base tokens are missing from the seq!')\n", (1620, 1666), False, 'import warnings\n')]
""" ASGI config for stockze project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/dev/howto/deployment/asgi/ """ import os import sys from pathlib import Path from django.core.asgi import get_asgi_application # This allows easy placement of apps within the interior # stockze directory. ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent sys.path.append(str(ROOT_DIR / "stockze")) # If DJANGO_SETTINGS_MODULE is unset, default to the local settings os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local") # This application object is used by any ASGI server configured to use this file. django_application = get_asgi_application() # Apply ASGI middleware here. # from helloworld.asgi import HelloWorldApplication # application = HelloWorldApplication(application) # Import websocket application here, so apps from django_application are loaded first from config.websocket import websocket_application # noqa isort:skip ''' import pypeln as pl import asyncio django_application = pl.task.map(django_application(scope, receive, send), workers=max) django_application = list(django_application) ''' async def application(scope, receive, send): if scope["type"] == "http": await django_application(scope, receive, send) elif scope["type"] == "websocket": await websocket_application(scope, receive, send) else: raise NotImplementedError(f"Unknown scope type {scope['type']}") import socketio import engineio sio = socketio.AsyncServer(async_mode='asgi', client_manager=socketio.AsyncRedisManager('redis://redis:6379/0'), logger=False, engineio_logger=True, ping_timeout=60000000, ping_interval= 6000000) application = engineio.ASGIApp(sio, application) ''' import pypeln as pl application = pl.task.map(application, workers=max) application = list(application) ''' #import sys #stage = pl.process.map(application, stage, workers=3, on_start=on_start, on_end=on_end) #application = pl.sync.map(application, workers=1) #application = list(application) ''' async def application(scope, receive, send): if scope["type"] == "http": django_application(scope, receive, send) await pl.task.each.django_application(scope, receive, send) elif scope["type"] == "websocket": websocket_application(scope, receive, send) await pl.task.each.websocket_application(scope, receive, send) else: raise NotImplementedError(f"Unknown scope type {scope['type']}") def application(scope, receive, send): if scope["type"] == "http": async def django_application(scope, receive, send) stage = pl.task.map(django_application, workers=max) await stage elif scope["type"] == "websocket": async def websocket_application(scope, receive, send) stage = pl.task.map(websocket_application, workers=max) await stage else: raise NotImplementedError(f"Unknown scope type {scope['type']}") ''' ''' import socketio import engineio #import gevent #import eventlet sio = socketio.AsyncServer(async_mode='asgi', client_manager=socketio.AsyncRedisManager('redis://redis:6379/0')) #sio = socketio.AsyncServer(async_mode='gevent/eventlet', client_manager=socketio.AsyncRedisManager('redis://redis:6379/0')) #fails #sio = socketio.AsyncServer(async_mode='aiohttp', client_manager=socketio.AsyncRedisManager('redis://redis:6379/0')) #works #sio = socketio.AsyncServer(async_mode='tornado', client_manager=socketio.AsyncRedisManager('redis://redis:6379/0')) #works aiohttp application = engineio.ASGIApp(sio, application) #application = socketio.ASGIApp(sio, application) #?complicated ''' #application = pl.sync.map(application, workers=max) #application = list(application) #import pypeln as pl #application = pl.task.map(application) #application = pl.process.map(application, stage, workers=3, on_start=on_start, on_end=on_end) #sio = socketio.AsyncServer(async_mode='wsgi', client_manager=socketio.AsyncRedisManager('redis://redis:6379/0')) #reduced #application = socketio.ASGIApp(sio, application) #?complicated #application = pl.task.each(application) #import asyncio #application = asyncio.run(application()) #works 404 #application = asyncio.run(application.serve_forever()) #438 #application = asyncio.run(application.start_server()) #438 #application = asyncio.start_server(application()) #works 454 #asyncio.run_until_complete(application.serve_forever()) #works 604 - 603 - 547 #asyncio.run(_main_coroutine(application, functools.partial(asyncio.start_server), _do_nothing, container)) #342 #asyncio.run(_main_coroutine(application.serve_forever(), functools.partial(asyncio.start_server), _do_nothing, container)) #684 fast reset by peer - 553 - 720 - 435 timeout - 727 #asyncio.start_server(_main_coroutine(application.serve_forever(), functools.partial(asyncio.start_server), _do_nothing, container)) # 543 - 715 fast 559 #asyncio.start_server(_main_coroutine(application.serve_forever(), functools.partial(asyncio.run), _do_nothing, container)) #533 fast 439 #asyncio.run(_main_coroutine(application.serve_forever(), functools.partial(asyncio.run), _do_nothing, container)) # 856 fast - timeout - 522 - 438 #asyncio.run_until_complete(_main_coroutine(application.serve_forever(), functools.partial(asyncio.start_server), _do_nothing, container)) #564 #asyncio.run_until_complete(_main_coroutine(application.serve_forever(), functools.partial(asyncio.run_until_complete), _do_nothing, container)) #709 -539 -
[ "os.environ.setdefault", "django.core.asgi.get_asgi_application", "socketio.AsyncRedisManager", "engineio.ASGIApp", "config.websocket.websocket_application", "pathlib.Path" ]
[((574, 646), 'os.environ.setdefault', 'os.environ.setdefault', (['"""DJANGO_SETTINGS_MODULE"""', '"""config.settings.local"""'], {}), "('DJANGO_SETTINGS_MODULE', 'config.settings.local')\n", (595, 646), False, 'import os\n'), ((751, 773), 'django.core.asgi.get_asgi_application', 'get_asgi_application', ([], {}), '()\n', (771, 773), False, 'from django.core.asgi import get_asgi_application\n'), ((1797, 1831), 'engineio.ASGIApp', 'engineio.ASGIApp', (['sio', 'application'], {}), '(sio, application)\n', (1813, 1831), False, 'import engineio\n'), ((1648, 1698), 'socketio.AsyncRedisManager', 'socketio.AsyncRedisManager', (['"""redis://redis:6379/0"""'], {}), "('redis://redis:6379/0')\n", (1674, 1698), False, 'import socketio\n'), ((412, 426), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (416, 426), False, 'from pathlib import Path\n'), ((1427, 1470), 'config.websocket.websocket_application', 'websocket_application', (['scope', 'receive', 'send'], {}), '(scope, receive, send)\n', (1448, 1470), False, 'from config.websocket import websocket_application\n')]
import pyarrow as pa import rstr import random # Each tuple specifies a type of string to generate. The first entry specifies # how many unique strings to generate (rstr is pretty slow). The second # specifies how often to insert a string from that pool of unique strings into # the actual dataset compared to inserting strings from the other pools. The # third is the regex that the strings for this pool should match. generator_config = [ (100, 1, r'.*[tT][eE][rR][aA][tT][iI][dD][eE][ \t\n]+[dD][iI][vV][iI][nN][gG][ \t\n]+([sS][uU][bB])+[sS][uU][rR][fF][aA][cC][eE].*'), (100, 3, r'.*[Tt][Aa][Xx][Ii].*'), (300, 20, r'.*.*.*'), # long random strings (500, 20, r'.*'), # short random strings ] # Target size for the dataset. Generation stops when either limit is reached. target_num_rows = 10_000_000 target_num_bytes = 1_000_000_000 # Construct pools of random strings abiding by the generator configuration. random_strings = [] frequency_norm = 0 for _, frequency, _ in generator_config: frequency_norm += frequency cumulative_frequency = 0.0 for num_unique, frequency, regex in generator_config: print('Creating random strings for /' + regex + '/...') cumulative_frequency += frequency / frequency_norm string_pool = [rstr.xeger(regex) for _ in range(num_unique)] random_strings.append((cumulative_frequency, string_pool)) # Construct the test data. print('Constructing test data...') data = [] total_len = 0 print() while total_len < target_num_bytes and len(data) < target_num_rows: r = random.random() for cumulative_frequency, string_pool in random_strings: if r <= cumulative_frequency: s = random.choice(string_pool) total_len += len(s) data.append(s) break if len(data) % 1000 == 0: print('\033[A\033[K{:.1f}%...'.format( min(max(total_len / target_num_bytes, len(data) / target_num_rows) * 100, 100))) # Write the generated data to a record batch. print('Converting to record batch...') field = pa.field('text', pa.utf8(), nullable=False) schema = pa.schema([field]) arrays = [pa.array(data, pa.utf8())] with pa.RecordBatchFileWriter('input.rb', schema) as writer: print('Writing file...') writer.write(pa.RecordBatch.from_arrays(arrays, schema=schema)) print('Done!')
[ "rstr.xeger", "pyarrow.schema", "pyarrow.RecordBatch.from_arrays", "random.choice", "pyarrow.utf8", "random.random", "pyarrow.RecordBatchFileWriter" ]
[((2097, 2115), 'pyarrow.schema', 'pa.schema', (['[field]'], {}), '([field])\n', (2106, 2115), True, 'import pyarrow as pa\n'), ((1545, 1560), 'random.random', 'random.random', ([], {}), '()\n', (1558, 1560), False, 'import random\n'), ((2061, 2070), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (2068, 2070), True, 'import pyarrow as pa\n'), ((2158, 2202), 'pyarrow.RecordBatchFileWriter', 'pa.RecordBatchFileWriter', (['"""input.rb"""', 'schema'], {}), "('input.rb', schema)\n", (2182, 2202), True, 'import pyarrow as pa\n'), ((1265, 1282), 'rstr.xeger', 'rstr.xeger', (['regex'], {}), '(regex)\n', (1275, 1282), False, 'import rstr\n'), ((2141, 2150), 'pyarrow.utf8', 'pa.utf8', ([], {}), '()\n', (2148, 2150), True, 'import pyarrow as pa\n'), ((2260, 2309), 'pyarrow.RecordBatch.from_arrays', 'pa.RecordBatch.from_arrays', (['arrays'], {'schema': 'schema'}), '(arrays, schema=schema)\n', (2286, 2309), True, 'import pyarrow as pa\n'), ((1676, 1702), 'random.choice', 'random.choice', (['string_pool'], {}), '(string_pool)\n', (1689, 1702), False, 'import random\n')]
from setuptools import setup, find_packages setup( name = "disney", version = "1.0", description = "A history of Shanghai Disney waiting time", long_description = "A history of Shanghai Disney waiting time", license = "Apache License", url = "http://s.gaott.info", author = "gtt116", author_email = "<EMAIL>", packages = find_packages(), include_package_data = True, platforms = "any", install_requires = [], scripts = [], entry_points = { 'console_scripts': [ 'disney-fetch = disney.fetch:main', 'disney-publish = disney.publish:main', ] } )
[ "setuptools.find_packages" ]
[((359, 374), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (372, 374), False, 'from setuptools import setup, find_packages\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Oct 31 16:55:35 2019 From CS230 Code Examples @author: qwang """ import os import logging import shutil import torch import json import pandas as pd import numpy as np import matplotlib.pyplot as plt #%% def save_dict_to_json(d, json_path): """ Save dict of floats to json file d: dict of float-castable values (np.float, int, float, etc.) """ with open(json_path, 'w') as fout: d = {key: float(value) for key, value in d.items()} json.dump(d, fout, indent=4) #%% Checkpoint def save_checkpoint(state, is_best, checkdir): """ Save model and training parameters at checkpoint + 'last.pth.tar'. If is_best==True, also saves checkpoint + 'best.pth.tar' Params: state: (dict) contains model's state_dict, may contain other keys such as epoch, optimizer state_dict is_best: (bool) True if it is the best model seen till now checkdir: (string) folder where parameters are to be saved """ filepath = os.path.join(checkdir, 'last.pth.tar') if os.path.exists(checkdir) == False: os.mkdir(checkdir) torch.save(state, filepath) if is_best: shutil.copyfile(filepath, os.path.join(checkdir, 'best.pth.tar')) def load_checkpoint(checkfile, model, optimizer=None): """ Load model parameters (state_dict) from checkfile. If optimizer is provided, loads state_dict of optimizer assuming it is present in checkpoint. Params: checkfile: (string) filename which needs to be loaded model: (torch.nn.Module) model for which the parameters are loaded optimizer: (torch.optim) optional: resume optimizer from checkpoint """ if os.path.exists(checkfile) == False: raise("File doesn't exist {}".format(checkfile)) checkfile = torch.load(checkfile) model.load_state_dict(checkfile['state_dict']) if optimizer: optimizer.load_state_dict(checkfile['optim_dict']) return checkfile #%% Metrics def metrics(preds, y, th=0.5): """ Params: preds: torch tensor, [batch_size, output_dim] y: torch tensor, [batch_size] Yields: A dictionary of accuracy, f1 score, recall, precision and specificity """ # y_preds = preds.argmax(dim=1, keepdim=False) # [batch_size, output_dim] --> [batch_size] if torch.cuda.device_count() == 1: # y_preds = (preds[:,1] > th).type(torch.ShortTensor).cuda() y_preds = (preds[:,1] > th).int().type(torch.LongTensor).cuda() else: # y_preds = (preds[:,1] > th).type(torch.ShortTensor) y_preds = (preds[:,1] > th).int().type(torch.LongTensor) ones = torch.ones_like(y_preds) zeros = torch.zeros_like(y_preds) pos = torch.eq(y_preds, y).sum().item() tp = (torch.eq(y_preds, ones) & torch.eq(y, ones)).sum().item() tn = (torch.eq(y_preds, zeros) & torch.eq(y, zeros)).sum().item() fp = (torch.eq(y_preds, ones) & torch.eq(y, zeros)).sum().item() fn = (torch.eq(y_preds, zeros) & torch.eq(y, ones)).sum().item() assert pos == tp + tn acc = pos / y.shape[0] # torch.FloatTensor([y.shape[0]]) f1 = 2*tp / (2*tp + fp + fn) if (2*tp + fp + fn != 0) else 0 rec = tp / (tp + fn) if (tp + fn != 0) else 0 ppv = tp / (tp + fp) if (tp + fp != 0) else 0 spc = tn / (tn + fp) if (tn + fp != 0) else 0 return {'accuracy': acc, 'f1': f1, 'recall': rec, 'precision': ppv, 'specificity': spc} #%% Plot performance def plot_prfs(prfs_json_path): with open(prfs_json_path) as f: dat = json.load(f) # Create scores dataframe epochs = int(len(dat['prfs'])/2) train_df = pd.DataFrame(columns=['Loss', 'Accuracy', 'F1', 'Recall', 'Precision', 'Specificity']) valid_df = pd.DataFrame(columns=['Loss', 'Accuracy', 'F1', 'Recall', 'Precision', 'Specificity']) for i in range(epochs): train_df.loc[i] = list(dat['prfs']['train_'+str(i+1)].values()) valid_df.loc[i] = list(dat['prfs']['valid_'+str(i+1)].values()) # Plot plt.figure(figsize=(15,5)) x = np.arange(len(train_df)) + 1 # Loss / F1 plt.subplot(1, 2, 1) plt.title("Loss and F1") plt.plot(x, train_df['Loss'], label="train_loss", color='C5') plt.plot(x, valid_df['Loss'], label="val_loss", color='C5', linestyle='--') plt.plot(x, train_df['F1'], label="train_f1", color='C9') plt.plot(x, valid_df['F1'], label="val_f1", color='C9', linestyle='--') plt.xticks(np.arange(2, len(x)+2, step=2)) plt.legend(loc='upper right') # Accuracy / Recall plt.subplot(1, 2, 2) plt.title("Accuracy and Recall") plt.plot(x, train_df['Accuracy'], label="train_acc", color='C0', alpha=0.8) plt.plot(x, valid_df['Accuracy'], label="val_acc", color='C0', linestyle='--', alpha=0.8) #plt.plot(x, train_df['F1'], label="train_f1", color='C9') #plt.plot(x, valid_df['F1'], label="val_f1", color='C9', linestyle='--') plt.plot(x, train_df['Recall'], label="train_rec", color='C1', alpha=0.8) plt.plot(x, valid_df['Recall'], label="val_rec", color='C1', linestyle='--', alpha=0.8) plt.xticks(np.arange(2, len(x)+2, step=2)) plt.legend(loc='lower right') # Save png output_dir = os.path.dirname(prfs_json_path) plt.savefig(os.path.join(output_dir, 'prfs.png'))
[ "torch.ones_like", "pandas.DataFrame", "matplotlib.pyplot.subplot", "matplotlib.pyplot.title", "json.dump", "os.mkdir", "torch.zeros_like", "matplotlib.pyplot.plot", "json.load", "torch.eq", "torch.load", "matplotlib.pyplot.legend", "os.path.dirname", "os.path.exists", "torch.cuda.device_count", "torch.save", "matplotlib.pyplot.figure", "os.path.join" ]
[((1118, 1156), 'os.path.join', 'os.path.join', (['checkdir', '"""last.pth.tar"""'], {}), "(checkdir, 'last.pth.tar')\n", (1130, 1156), False, 'import os\n'), ((1230, 1257), 'torch.save', 'torch.save', (['state', 'filepath'], {}), '(state, filepath)\n', (1240, 1257), False, 'import torch\n'), ((1958, 1979), 'torch.load', 'torch.load', (['checkfile'], {}), '(checkfile)\n', (1968, 1979), False, 'import torch\n'), ((2861, 2885), 'torch.ones_like', 'torch.ones_like', (['y_preds'], {}), '(y_preds)\n', (2876, 2885), False, 'import torch\n'), ((2898, 2923), 'torch.zeros_like', 'torch.zeros_like', (['y_preds'], {}), '(y_preds)\n', (2914, 2923), False, 'import torch\n'), ((3873, 3963), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Loss', 'Accuracy', 'F1', 'Recall', 'Precision', 'Specificity']"}), "(columns=['Loss', 'Accuracy', 'F1', 'Recall', 'Precision',\n 'Specificity'])\n", (3885, 3963), True, 'import pandas as pd\n'), ((3975, 4065), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Loss', 'Accuracy', 'F1', 'Recall', 'Precision', 'Specificity']"}), "(columns=['Loss', 'Accuracy', 'F1', 'Recall', 'Precision',\n 'Specificity'])\n", (3987, 4065), True, 'import pandas as pd\n'), ((4255, 4282), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 5)'}), '(figsize=(15, 5))\n', (4265, 4282), True, 'import matplotlib.pyplot as plt\n'), ((4342, 4362), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (4353, 4362), True, 'import matplotlib.pyplot as plt\n'), ((4367, 4391), 'matplotlib.pyplot.title', 'plt.title', (['"""Loss and F1"""'], {}), "('Loss and F1')\n", (4376, 4391), True, 'import matplotlib.pyplot as plt\n'), ((4396, 4457), 'matplotlib.pyplot.plot', 'plt.plot', (['x', "train_df['Loss']"], {'label': '"""train_loss"""', 'color': '"""C5"""'}), "(x, train_df['Loss'], label='train_loss', color='C5')\n", (4404, 4457), True, 'import matplotlib.pyplot as plt\n'), ((4462, 4537), 'matplotlib.pyplot.plot', 'plt.plot', (['x', "valid_df['Loss']"], {'label': '"""val_loss"""', 'color': '"""C5"""', 'linestyle': '"""--"""'}), "(x, valid_df['Loss'], label='val_loss', color='C5', linestyle='--')\n", (4470, 4537), True, 'import matplotlib.pyplot as plt\n'), ((4542, 4599), 'matplotlib.pyplot.plot', 'plt.plot', (['x', "train_df['F1']"], {'label': '"""train_f1"""', 'color': '"""C9"""'}), "(x, train_df['F1'], label='train_f1', color='C9')\n", (4550, 4599), True, 'import matplotlib.pyplot as plt\n'), ((4604, 4675), 'matplotlib.pyplot.plot', 'plt.plot', (['x', "valid_df['F1']"], {'label': '"""val_f1"""', 'color': '"""C9"""', 'linestyle': '"""--"""'}), "(x, valid_df['F1'], label='val_f1', color='C9', linestyle='--')\n", (4612, 4675), True, 'import matplotlib.pyplot as plt\n'), ((4727, 4756), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (4737, 4756), True, 'import matplotlib.pyplot as plt\n'), ((4785, 4805), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (4796, 4805), True, 'import matplotlib.pyplot as plt\n'), ((4810, 4842), 'matplotlib.pyplot.title', 'plt.title', (['"""Accuracy and Recall"""'], {}), "('Accuracy and Recall')\n", (4819, 4842), True, 'import matplotlib.pyplot as plt\n'), ((4847, 4922), 'matplotlib.pyplot.plot', 'plt.plot', (['x', "train_df['Accuracy']"], {'label': '"""train_acc"""', 'color': '"""C0"""', 'alpha': '(0.8)'}), "(x, train_df['Accuracy'], label='train_acc', color='C0', alpha=0.8)\n", (4855, 4922), True, 'import matplotlib.pyplot as plt\n'), ((4927, 5021), 'matplotlib.pyplot.plot', 'plt.plot', (['x', "valid_df['Accuracy']"], {'label': '"""val_acc"""', 'color': '"""C0"""', 'linestyle': '"""--"""', 'alpha': '(0.8)'}), "(x, valid_df['Accuracy'], label='val_acc', color='C0', linestyle=\n '--', alpha=0.8)\n", (4935, 5021), True, 'import matplotlib.pyplot as plt\n'), ((5161, 5234), 'matplotlib.pyplot.plot', 'plt.plot', (['x', "train_df['Recall']"], {'label': '"""train_rec"""', 'color': '"""C1"""', 'alpha': '(0.8)'}), "(x, train_df['Recall'], label='train_rec', color='C1', alpha=0.8)\n", (5169, 5234), True, 'import matplotlib.pyplot as plt\n'), ((5239, 5330), 'matplotlib.pyplot.plot', 'plt.plot', (['x', "valid_df['Recall']"], {'label': '"""val_rec"""', 'color': '"""C1"""', 'linestyle': '"""--"""', 'alpha': '(0.8)'}), "(x, valid_df['Recall'], label='val_rec', color='C1', linestyle='--',\n alpha=0.8)\n", (5247, 5330), True, 'import matplotlib.pyplot as plt\n'), ((5378, 5407), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (5388, 5407), True, 'import matplotlib.pyplot as plt\n'), ((5449, 5480), 'os.path.dirname', 'os.path.dirname', (['prfs_json_path'], {}), '(prfs_json_path)\n', (5464, 5480), False, 'import os\n'), ((573, 601), 'json.dump', 'json.dump', (['d', 'fout'], {'indent': '(4)'}), '(d, fout, indent=4)\n', (582, 601), False, 'import json\n'), ((1164, 1188), 'os.path.exists', 'os.path.exists', (['checkdir'], {}), '(checkdir)\n', (1178, 1188), False, 'import os\n'), ((1207, 1225), 'os.mkdir', 'os.mkdir', (['checkdir'], {}), '(checkdir)\n', (1215, 1225), False, 'import os\n'), ((1849, 1874), 'os.path.exists', 'os.path.exists', (['checkfile'], {}), '(checkfile)\n', (1863, 1874), False, 'import os\n'), ((2535, 2560), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (2558, 2560), False, 'import torch\n'), ((3769, 3781), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3778, 3781), False, 'import json\n'), ((5497, 5533), 'os.path.join', 'os.path.join', (['output_dir', '"""prfs.png"""'], {}), "(output_dir, 'prfs.png')\n", (5509, 5533), False, 'import os\n'), ((1317, 1355), 'os.path.join', 'os.path.join', (['checkdir', '"""best.pth.tar"""'], {}), "(checkdir, 'best.pth.tar')\n", (1329, 1355), False, 'import os\n'), ((2939, 2959), 'torch.eq', 'torch.eq', (['y_preds', 'y'], {}), '(y_preds, y)\n', (2947, 2959), False, 'import torch\n'), ((2983, 3006), 'torch.eq', 'torch.eq', (['y_preds', 'ones'], {}), '(y_preds, ones)\n', (2991, 3006), False, 'import torch\n'), ((3009, 3026), 'torch.eq', 'torch.eq', (['y', 'ones'], {}), '(y, ones)\n', (3017, 3026), False, 'import torch\n'), ((3051, 3075), 'torch.eq', 'torch.eq', (['y_preds', 'zeros'], {}), '(y_preds, zeros)\n', (3059, 3075), False, 'import torch\n'), ((3078, 3096), 'torch.eq', 'torch.eq', (['y', 'zeros'], {}), '(y, zeros)\n', (3086, 3096), False, 'import torch\n'), ((3121, 3144), 'torch.eq', 'torch.eq', (['y_preds', 'ones'], {}), '(y_preds, ones)\n', (3129, 3144), False, 'import torch\n'), ((3147, 3165), 'torch.eq', 'torch.eq', (['y', 'zeros'], {}), '(y, zeros)\n', (3155, 3165), False, 'import torch\n'), ((3190, 3214), 'torch.eq', 'torch.eq', (['y_preds', 'zeros'], {}), '(y_preds, zeros)\n', (3198, 3214), False, 'import torch\n'), ((3217, 3234), 'torch.eq', 'torch.eq', (['y', 'ones'], {}), '(y, ones)\n', (3225, 3234), False, 'import torch\n')]
#!/usr/bin/python3 # -*- coding:utf-8 -*- import os import sys import signal import time from datetime import datetime from datetime import timedelta # import cv2 as cv import pandas as pd import matplotlib as mpl import matplotlib.pyplot as plt # 导入模块 matplotlib.pyplot,并简写成 plt import numpy as np # 导入模块 numpy,并简写成 np import csv l_rush = { # 'amount0': [0, 0, 0, 0, 0, 0, 0, 0, 0], # 'amount1': [0, 0, 0, 0, 0, 0, 0, 0, 0], # 'amount2': [0, 0, 0, 0, 0, 0, 0, 0, 0], 'test': [0, 0, 0, 0, 0, 0, 0, 0, 0], 'price__': [0, 0, 0, 0, 0, 0, 0, 0, 0], 'rsi6_12': [0, 0, 0, 0, 0, 0, 0, 0, 0], 'ma4___9': [0, 0, 0, 0, 0, 0, 0, 0, 0], 'ma9__18': [0, 0, 0, 0, 0, 0, 0, 0, 0]} l_run = { # 'amount0': [0, 0, 0, 0, 0, 0, 0, 0, 0], # 'amount1': [0, 0, 0, 0, 0, 0, 0, 0, 0], # 'amount2': [0, 0, 0, 0, 0, 0, 0, 0, 0], 'test': [0, 0, 0, 0, 0, 0, 0, 0, 0], 'price__': [0, 0, 0, 0, 0, 0, 0, 0, 0], 'rsi6_12': [0, 0, 0, 0, 0, 0, 0, 0, 0], 'ma4___9': [0, 0, 0, 0, 0, 0, 0, 0, 0], 'ma9__18': [0, 0, 0, 0, 0, 0, 0, 0, 0]} if os.path.exists('./statistics.csv'): csv_data = pd.read_csv('./statistics.csv', header=None) # 读取数据 data = csv_data.values.tolist() name = 'init' price = 100 hold = False buy = 1 sell = 1 number = -1 total = -100 for i in data: code = i[0] tag = i[2] if i[3] == 'run': k = 0 l_run[tag][0] += 1 for j in range(1, 9-k): if i[5+k] > i[5+j+k]: l_run[tag][j+k] += 1 elif i[3] == 'rush': k = 0 l_rush[tag][0] += 1 for j in range(1, 9-k): if i[5+k] < i[5+k+j]: l_rush[tag][j+k] += 1 # print(i[1]) if i[1] != name: print('name:', name, round(price, 2)) number += 1 name = i[1] total += price price = 100 hold = False if i[2] == 'ma9__18': if i[3] == 'rush' and hold == False: hold = True buy = i[6] if i[3] == 'run' and hold == True: hold = False sell = i[6] price = price*sell/buy # print('name:', name, round(price, 2)) print('name:', name, round(price, 2)) total += price number += 1 print('total:', round(total, 2), number, round(total/number, 2)) # exit(0) print('rush', l_rush) for (key, value) in l_rush.items(): if value[0] != 0: v1 = value[1]/value[0]*100 v2 = value[2]/value[0]*100 v3 = value[3]/value[0]*100 v4 = value[4]/value[0]*100 v5 = value[5]/value[0]*100 v10 = value[6]/value[0]*100 v20 = value[7]/value[0]*100 v30 = value[8]/value[0]*100 print('%s:1day:%.2f%%,2day:%.2f%%,3day:%.2f%%,4day:%.2f%%,5day:%.2f%%,10day:%.2f%%,20day:%.2f%%,30day:%.2f%%' % ( key, v1, v2, v3, v4, v5, v10, v20, v30)) print('\nrun', l_run) for (key, value) in l_run.items(): if value[0] != 0: v1 = value[1]/value[0]*100 v2 = value[2]/value[0]*100 v3 = value[3]/value[0]*100 v4 = value[4]/value[0]*100 v5 = value[5]/value[0]*100 v10 = value[6]/value[0]*100 v20 = value[7]/value[0]*100 v30 = value[8]/value[0]*100 print('%s:1day:%.2f%%,2day:%.2f%%,3day:%.2f%%,4day:%.2f%%,5day:%.2f%%,10day:%.2f%%,20day:%.2f%%,30day:%.2f%%' % ( key, v1, v2, v3, v4, v5, v10, v20, v30))
[ "pandas.read_csv", "os.path.exists" ]
[((1086, 1120), 'os.path.exists', 'os.path.exists', (['"""./statistics.csv"""'], {}), "('./statistics.csv')\n", (1100, 1120), False, 'import os\n'), ((1137, 1181), 'pandas.read_csv', 'pd.read_csv', (['"""./statistics.csv"""'], {'header': 'None'}), "('./statistics.csv', header=None)\n", (1148, 1181), True, 'import pandas as pd\n')]
"""Utility functions.""" import os def exec_os_cmd(command): return os.popen(command).read()
[ "os.popen" ]
[((74, 91), 'os.popen', 'os.popen', (['command'], {}), '(command)\n', (82, 91), False, 'import os\n')]
# ====================================================================== # Science for Hungry People # Advent of Code 2015 Day 15 -- <NAME> -- https://adventofcode.com # # Python implementation by Dr. <NAME> III # ====================================================================== # ====================================================================== # i n g r e d i e n t . p y # ====================================================================== "Ingredient for the Advent of Code 2015 Day 15 puzzle" # ---------------------------------------------------------------------- # import # ---------------------------------------------------------------------- import re # ---------------------------------------------------------------------- # constants # ---------------------------------------------------------------------- # Butterscotch: capacity -1, durability -2, flavor 6, texture 3, calories 8 RE_INGREDIENT = re.compile("([A-Za-z]+): capacity (-?[0-9]+), durability (-?[0-9]+)," + " flavor (-?[0-9]+), texture (-?[0-9]+), calories (-?[0-9]+)") # ====================================================================== # Ingredient # ====================================================================== class Ingredient(object): # pylint: disable=R0902, R0205 "Object for Science for Hungry People" def __init__(self, text=None, part2=False): # 1. Set the initial values self.part2 = part2 self.text = text self.name = "" self.qualities = [0, 0, 0, 0] self.cals = 0 # 2. Process text (if any) if text is not None and len(text) > 0: match = RE_INGREDIENT.match(text) if not match: print("Unable to parse", text) else: name, capacity, durability, flavor, texture, calories = match.groups() self.name = name self.qualities = [int(capacity), int(durability), int(flavor), int(texture)] self.cals = int(calories) def properties(self, teaspoons=1): "Return the score for the ingredient" return [teaspoons * _ for _ in self.qualities] def calories(self, teaspoons=1): "Return the calaries" return teaspoons * self.cals # ---------------------------------------------------------------------- # module initialization # ---------------------------------------------------------------------- if __name__ == '__main__': pass # ====================================================================== # end i n g r e d i e n t . p y end # ======================================================================
[ "re.compile" ]
[((1083, 1221), 're.compile', 're.compile', (["('([A-Za-z]+): capacity (-?[0-9]+), durability (-?[0-9]+),' +\n ' flavor (-?[0-9]+), texture (-?[0-9]+), calories (-?[0-9]+)')"], {}), "('([A-Za-z]+): capacity (-?[0-9]+), durability (-?[0-9]+),' +\n ' flavor (-?[0-9]+), texture (-?[0-9]+), calories (-?[0-9]+)')\n", (1093, 1221), False, 'import re\n')]
from torch import Tensor, Generator from typing import TypeVar, List, Optional, Tuple, Sequence from torch import default_generator from torch.utils.data import Dataset, Subset T_co = TypeVar('T_co', covariant=True) T = TypeVar('T') from torch._utils import _accumulate from torch import randperm import torch class Subset(Dataset[T_co]): r""" Subset of a dataset at specified indices. Arguments: dataset (Dataset): The whole Dataset indices (sequence): Indices in the whole set selected for subset """ dataset: Dataset[T_co] indices: Sequence[int] def __init__(self, dataset: Dataset[T_co], indices: Sequence[int]) -> None: self.dataset = dataset self.indices = indices def __getitem__(self, idx): return self.dataset[self.indices[idx]] def __len__(self): return len(self.indices) def inverse_transform(self, x): if hasattr(self.dataset, "inverse_transform"): return self.dataset.inverse_transform(x) else: return x def order_split(dataset: Dataset[T], lengths: list) -> List[Subset[T]]: r""" Randomly split a dataset into non-overlapping new datasets of given lengths. Optionally fix the generator for reproducible results, e.g.: >>> order_split(range(10), [3, 7], generator=torch.Generator().manual_seed(42)) >>> order_split(range(10), [3, -1], generator=torch.Generator().manual_seed(42)) Arguments: dataset (Dataset): Dataset to be split lengths (list): lengths of splits to be produced """ try: idx = lengths.index(-1) lengths[idx] = len(dataset) - sum(lengths) + 1 except: # Cannot verify that dataset is Sized if sum(lengths) != len(dataset): # type: ignore raise ValueError("Sum of input lengths does not equal the length of the input dataset!") # indices = randperm(sum(lengths), generator=generator).tolist() indices = torch.arange(sum(lengths), dtype=torch.long).tolist() return [Subset(dataset, indices[offset - length : offset]) for offset, length in zip(_accumulate(lengths), lengths)]
[ "torch.utils.data.Subset", "typing.TypeVar", "torch._utils._accumulate" ]
[((184, 215), 'typing.TypeVar', 'TypeVar', (['"""T_co"""'], {'covariant': '(True)'}), "('T_co', covariant=True)\n", (191, 215), False, 'from typing import TypeVar, List, Optional, Tuple, Sequence\n'), ((220, 232), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (227, 232), False, 'from typing import TypeVar, List, Optional, Tuple, Sequence\n'), ((2045, 2093), 'torch.utils.data.Subset', 'Subset', (['dataset', 'indices[offset - length:offset]'], {}), '(dataset, indices[offset - length:offset])\n', (2051, 2093), False, 'from torch.utils.data import Dataset, Subset\n'), ((2122, 2142), 'torch._utils._accumulate', '_accumulate', (['lengths'], {}), '(lengths)\n', (2133, 2142), False, 'from torch._utils import _accumulate\n')]
from graphviz import Digraph from octopus.api.edge import (EDGE_UNCONDITIONAL, EDGE_CONDITIONAL_TRUE, EDGE_CONDITIONAL_FALSE, EDGE_FALLTHROUGH, EDGE_CALL) import logging log = logging.getLogger(__name__) log.setLevel(level=logging.DEBUG) def insert_edges_to_graph(graph, edges, call): # remove duplicate edges edges = list(set(edges)) # create link between block for edge in edges: if edge.type == EDGE_UNCONDITIONAL: graph.edge(edge.node_from, edge.node_to, color='blue') elif edge.type == EDGE_CONDITIONAL_TRUE: graph.edge(edge.node_from, edge.node_to, color='green') elif edge.type == EDGE_CONDITIONAL_FALSE: graph.edge(edge.node_from, edge.node_to, color='red') elif edge.type == EDGE_FALLTHROUGH: graph.edge(edge.node_from, edge.node_to, color='cyan') elif edge.type == EDGE_CALL and call: graph.edge(edge.node_from, edge.node_to, color='yellow') else: raise Exception('Edge type unknown') class Graph(object): def __init__(self, basicblocks, edges, functions=None, filename='graph.gv', design=None): self.basicblocks = basicblocks self.edges = edges self.filename = filename self.design = design or {'shape': 'box', 'fontname': 'Courier', 'fontsize': '30.0', 'rank': 'same'} def view_ssa(self, call=False, view=True): self.view(view=view, call=call, ssa=True) def view_simplify(self, call=False, view=True): self.view(view=view, call=call, simplify=True) def view(self, view=True, simplify=False, call=False, ssa=False): g = Digraph(self.filename, filename=self.filename) with g.subgraph(name='global', node_attr=self.design) as c: c.label = 'global' # create all the basicblocks (blocks) for basicblock in self.basicblocks: if simplify: # create node c.node(basicblock.name, label=basicblock.name) else: if ssa: label = basicblock.instructions_ssa() else: label = basicblock.instructions_details() # the escape sequences "\n", "\l" and "\r" # divide the label into lines, centered, # left-justified, and right-justified, respectively. label = label.replace('\n', '\l') # create node c.node(basicblock.name, label=label) # insert edges on the graph insert_edges_to_graph(g, self.edges, call) g.render(self.filename, view=view) # g.view() class CFGGraph(Graph): def __init__(self, cfg, filename='graph.cfg.gv', design=None): Graph.__init__(self, cfg.basicblocks, cfg.edges, filename=filename, design=design) self.cfg = cfg def view_functions_ssa(self, call=False, view=True): self.view_functions(view=view, call=call, ssa=True) def view_functions_simplify(self, call=False, view=True): self.view_functions(view=view, call=call, simplify=True) def view_functions(self, view=True, simplify=False, call=False, ssa=False, color='grey'): g = Digraph('G', filename=self.filename) g.attr(rankdir='TB') g.attr(overlap='scale') g.attr(splines='spline') g.attr(ratio='fill') count = 0 for func in self.cfg.functions: with g.subgraph(name='cluster_%d' % count, node_attr=self.design) as c: if func.name == func.prefered_name: name = func.name else: name = func.prefered_name + ' - ' + func.name c.attr(label=name) c.attr(color=color) c.attr(fontsize='50.0') c.attr(overlap='false') c.attr(splines='spline') c.attr(ratio='fill') # create all the basicblocks (blocks) for basicblock in func.basicblocks: if simplify: # create node c.node(basicblock.name, label=basicblock.name, splines='true') else: if ssa: label = basicblock.instructions_ssa() else: label = basicblock.instructions_details() # the escape sequences "\n", "\l" and "\r" # divide the label into lines, centered, # left-justified, and right-justified, respectively. label = label.replace('\n', '\l') # create node c.node(basicblock.name, label=label) count += 1 # insert edges on the graph insert_edges_to_graph(g, self.cfg.edges, call) g.render(self.filename, view=view) # g.view()
[ "graphviz.Digraph", "logging.getLogger" ]
[((238, 265), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (255, 265), False, 'import logging\n'), ((1761, 1807), 'graphviz.Digraph', 'Digraph', (['self.filename'], {'filename': 'self.filename'}), '(self.filename, filename=self.filename)\n', (1768, 1807), False, 'from graphviz import Digraph\n'), ((3417, 3453), 'graphviz.Digraph', 'Digraph', (['"""G"""'], {'filename': 'self.filename'}), "('G', filename=self.filename)\n", (3424, 3453), False, 'from graphviz import Digraph\n')]
""" Module for Brokers Brokers hold data, and provide it or subsets of it on request when requesting price for buying and selling, prices will likely differ """ import copy import pandas as pd from pandas.tseries.offsets import DateOffset class PaperBroker: def __init__(self, data_df, slippage_time=DateOffset(seconds=0), transaction_cost=0.0, spread_pct=0.0): """ create a homemade paper trading brokerage account Args: - data_df: pandas dataframe, indexed by time, columns as ticker names Keyword Args: - slippage_time: pandas DateOffset object, specifying delay broker has between request and real-time info (default 0 seconds) - transcation_cost: cost to perform a transaction (default 0) - spread_pct: the spread percentage between buy/sell price, defaults to 0%. """ if isinstance(data_df, pd.DataFrame): self._historical_data = data_df else: raise TypeError("data_df supplied to PaperBroker should be a \ pandas DataFrame ") if isinstance(slippage_time, pd.DateOffset): self._slippage_time = slippage_time else: raise TypeError("splippage time should be a pandas DateOffset") if isinstance(transaction_cost,(float,int)): if transaction_cost>=0: self.transaction_cost = transaction_cost else: raise ValueError("transcation fee cannot be < 0") else: raise TypeError("transcation_cost should be a number") if isinstance(spread_pct,(float,int)): if (spread_pct <= 100.0) and spread_pct >=0.0: self.spread_pct = spread_pct else: raise ValueError("spread_pct should be a percentage: on [0,1]") else: raise TypeError("spread_pct should be a number") def clone(self): return copy.deepcopy(self) def next_extant_time(self,time): if time<=self._historical_data.index.max(): t_ind = self._historical_data.index.get_loc(time, 'backfill') time = self._historical_data.index[t_ind] return time else: raise ValueError("requesting a time later than available in data") # ---------- information requests ------------------------------------------ def get_timeindex_subset(self,t0,t1): if not isinstance(t0,pd.Timestamp): raise TypeError("t0 should be a pandas timestamp") if not isinstance(t1,pd.Timestamp): raise TypeError("t1 should be a pandas timestamp") if t0<self._historical_data.index.min(): raise ValueError("requesting data prior to earliest time") if t1>self._historical_data.index.max(): raise ValueError("requesting data after latest time") return copy.deepcopy(self._historical_data.loc[t0:t1].index) def get_firstlast_times(self): t0 = self._historical_data.index.min() t1 = self._historical_data.index.max() return t0,t1 def get_tick_list(self): return self._historical_data.columns.to_list() def get_price_list(self,ticker_list,time0,time1): if isinstance(ticker_list,str): ticker_list=[ticker_list] if set(ticker_list).issubset(self._historical_data.columns): return self._historical_data.loc[time0:time1][ticker_list] else: raise ValueError("ticker_list contained tickers that do not exist in historical data") def get_data_subset(self,ticker,time): max_time = self._historical_data.index.max() return self.get_price_list(ticker,time,max_time) def get_unslipped_price(self,ticker,time): #time = time+self.broker._slippage_time time = self.next_extant_time(time) if ticker in self._historical_data: return self._historical_data.loc[time][ticker] else: raise ValueError("ticker:",ticker," not available in historical_data") def get_price(self,ticker,time): time = time+self._slippage_time time = self.next_extant_time(time) if ticker in self._historical_data: return self._historical_data.loc[time][ticker], self.transaction_cost,time else: raise ValueError("ticker:",ticker," not available in historical_data") def get_buy_price(self,ticker,time): p,f,t = self.get_price(ticker,time) return p*(1.0+self.spread_pct/200.0),f,t def get_sell_price(self,ticker,time): p,f,t = self.get_price(ticker,time) return p*(1.0-self.spread_pct/200.0),f,t
[ "copy.deepcopy", "pandas.tseries.offsets.DateOffset" ]
[((343, 364), 'pandas.tseries.offsets.DateOffset', 'DateOffset', ([], {'seconds': '(0)'}), '(seconds=0)\n', (353, 364), False, 'from pandas.tseries.offsets import DateOffset\n'), ((2111, 2130), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (2124, 2130), False, 'import copy\n'), ((3056, 3109), 'copy.deepcopy', 'copy.deepcopy', (['self._historical_data.loc[t0:t1].index'], {}), '(self._historical_data.loc[t0:t1].index)\n', (3069, 3109), False, 'import copy\n')]
import cv2 # cap = cv2.VideoCapture(0) cap = cv2.VideoCapture('../datasets/opencv/fish.mp4') while True: _ret, frame = cap.read() frame = cv2.resize(frame, (500,400)) cv2.imshow('opencv camera', frame) k = cv2.waitKey(1) #1msec 대기 if k==27 or k==13 : break cap.release() cv2.destroyAllWindows() import numpy as np while True: _ret, frame = cap.read() frame = cv2.resize(frame, (500,400)) hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) h = hsv[:, :, 0] #hue s = hsv[:, :, 1] #saturation v = hsv[:, :, 2] # value brighthness img = np.zeros(h.shape, dtype=np.uint8) img[((h < 50) | (h > 200)) & (s > 100)] = 255 cv2.imshow('opencv camera', img) k = cv2.waitKey(1) #1msec 대기 if k==27 or k==13 : break cap.release() cv2.destroyAllWindows()
[ "cv2.waitKey", "cv2.cvtColor", "cv2.imshow", "numpy.zeros", "cv2.VideoCapture", "cv2.destroyAllWindows", "cv2.resize" ]
[((45, 92), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""../datasets/opencv/fish.mp4"""'], {}), "('../datasets/opencv/fish.mp4')\n", (61, 92), False, 'import cv2\n'), ((292, 315), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (313, 315), False, 'import cv2\n'), ((775, 798), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (796, 798), False, 'import cv2\n'), ((147, 176), 'cv2.resize', 'cv2.resize', (['frame', '(500, 400)'], {}), '(frame, (500, 400))\n', (157, 176), False, 'import cv2\n'), ((180, 214), 'cv2.imshow', 'cv2.imshow', (['"""opencv camera"""', 'frame'], {}), "('opencv camera', frame)\n", (190, 214), False, 'import cv2\n'), ((223, 237), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (234, 237), False, 'import cv2\n'), ((389, 418), 'cv2.resize', 'cv2.resize', (['frame', '(500, 400)'], {}), '(frame, (500, 400))\n', (399, 418), False, 'import cv2\n'), ((428, 466), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (440, 466), False, 'import cv2\n'), ((577, 610), 'numpy.zeros', 'np.zeros', (['h.shape'], {'dtype': 'np.uint8'}), '(h.shape, dtype=np.uint8)\n', (585, 610), True, 'import numpy as np\n'), ((665, 697), 'cv2.imshow', 'cv2.imshow', (['"""opencv camera"""', 'img'], {}), "('opencv camera', img)\n", (675, 697), False, 'import cv2\n'), ((706, 720), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (717, 720), False, 'import cv2\n')]
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import os import logging import sys def setup_logging(): logger = get_main_logger() add_console_handler(logger, level=logging.DEBUG) mute_matplotlib_handler() return logger def get_main_logger(level=logging.DEBUG): # get a top-level "mypackage" logger, # set its log level to DEBUG, # BUT PREVENT IT from propagating messages to the root logger logger = logging.getLogger('main') logger.setLevel(level) logger.propagate = 0 return logger def add_console_handler(logger, level): # Based on: https://stackoverflow.com/questions/25187083/python-logging-to-multiple-handlers-at-different-log-levels simple_fmt = '[%(name)5s] [%(levelname)9s] %(message)s' simple_formatter = logging.Formatter(simple_fmt) # create a console handler # and set its log level to the command-line option console_handler = logging.StreamHandler(sys.stdout) console_handler.setLevel(level) console_handler.setFormatter(simple_formatter) logger.addHandler(console_handler) def add_file_handler(logger, level, log_fn): # https://stackoverflow.com/questions/10973362/python-logging-function-name-file-name-line-number-using-a-single-file#10974508 detailed_fmt = '[%(asctime)s - %(levelname)8s] [%(filename)20s: %(lineno)3s] %(message)s' detailed_formatter = logging.Formatter(detailed_fmt, datefmt='%Y-%m-%d,%H:%M:%S') # create a file handler # and set its log level to DEBUG log_fn = os.path.abspath(log_fn) file_handler = logging.FileHandler(log_fn) file_handler.setLevel(level) file_handler.setFormatter(detailed_formatter) logger.addHandler(file_handler) return logger def mute_matplotlib_handler(): # set WARNING for Matplotlib matplotlib_logger = logging.getLogger('matplotlib') matplotlib_logger.setLevel(logging.WARNING) logger = setup_logging()
[ "os.path.abspath", "logging.FileHandler", "logging.StreamHandler", "logging.Formatter", "logging.getLogger" ]
[((436, 461), 'logging.getLogger', 'logging.getLogger', (['"""main"""'], {}), "('main')\n", (453, 461), False, 'import logging\n'), ((778, 807), 'logging.Formatter', 'logging.Formatter', (['simple_fmt'], {}), '(simple_fmt)\n', (795, 807), False, 'import logging\n'), ((917, 950), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stdout'], {}), '(sys.stdout)\n', (938, 950), False, 'import logging\n'), ((1373, 1433), 'logging.Formatter', 'logging.Formatter', (['detailed_fmt'], {'datefmt': '"""%Y-%m-%d,%H:%M:%S"""'}), "(detailed_fmt, datefmt='%Y-%m-%d,%H:%M:%S')\n", (1390, 1433), False, 'import logging\n'), ((1513, 1536), 'os.path.abspath', 'os.path.abspath', (['log_fn'], {}), '(log_fn)\n', (1528, 1536), False, 'import os\n'), ((1557, 1584), 'logging.FileHandler', 'logging.FileHandler', (['log_fn'], {}), '(log_fn)\n', (1576, 1584), False, 'import logging\n'), ((1812, 1843), 'logging.getLogger', 'logging.getLogger', (['"""matplotlib"""'], {}), "('matplotlib')\n", (1829, 1843), False, 'import logging\n')]
# -*- coding: utf-8 -*- import requests from config import config class LineNotify(object): """NotifyClass for LINE""" NOTIFY_API_URL = "https://notify-api.line.me/api/notify" def __init__(self, api_url = NOTIFY_API_URL, authority = config.AUTHORITY_TOKEN): self.apiUrl = api_url self.headers = {"Authorization" : "Bearer "+ authority} def notify(self, message): payload = {"message" : message} #payload = {"message" : message, 'stickerPackageId': 2, 'stickerId': 144} return self._post(payload) def _post(self, payload): return requests.post(self.apiUrl, headers = self.headers, data = payload, files = "")
[ "requests.post" ]
[((643, 715), 'requests.post', 'requests.post', (['self.apiUrl'], {'headers': 'self.headers', 'data': 'payload', 'files': '""""""'}), "(self.apiUrl, headers=self.headers, data=payload, files='')\n", (656, 715), False, 'import requests\n')]
import requests, json """登录测试 POST /v1_0/authorizations""" url = 'http://127.0.0.1:5000/v1_0/authorizations' REDIS_SENTINELS = [('127.0.0.1', '26380'), ('127.0.0.1', '26381'), ('127.0.0.1', '26382'),] REDIS_SENTINEL_SERVICE_NAME = 'mymaster' from redis.sentinel import Sentinel _sentinel = Sentinel(REDIS_SENTINELS) redis_master = _sentinel.master_for(REDIS_SENTINEL_SERVICE_NAME) redis_master.set('app:code:13161933309', '123456') # 构造raw application/json形式的请求体 data = json.dumps({'mobile': '13161933309', 'code': '123456'}) # requests发送 POST raw application/json 请求 resp = requests.post(url, data=data, headers={'Content-Type': 'application/json'}) print(resp.json()) token = resp.json()['data']['token'] print(token) """测试 查询缓存获取用户信息 /v1_0/user""" url = 'http://127.0.0.1:5000/v1_0/user' headers = {'Authorization': 'Bearer {}'.format(token)} resp = requests.get(url, headers=headers) print(resp.json())
[ "requests.get", "requests.post", "redis.sentinel.Sentinel", "json.dumps" ]
[((329, 354), 'redis.sentinel.Sentinel', 'Sentinel', (['REDIS_SENTINELS'], {}), '(REDIS_SENTINELS)\n', (337, 354), False, 'from redis.sentinel import Sentinel\n'), ((510, 565), 'json.dumps', 'json.dumps', (["{'mobile': '13161933309', 'code': '123456'}"], {}), "({'mobile': '13161933309', 'code': '123456'})\n", (520, 565), False, 'import requests, json\n'), ((615, 690), 'requests.post', 'requests.post', (['url'], {'data': 'data', 'headers': "{'Content-Type': 'application/json'}"}), "(url, data=data, headers={'Content-Type': 'application/json'})\n", (628, 690), False, 'import requests, json\n'), ((894, 928), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (906, 928), False, 'import requests, json\n')]
""" Cisco_IOS_XE_poe_oper This module contains a collection of YANG definitions for monitoring power over ethernet feature in a Network Element. Copyright (c) 2016\-2018 by Cisco Systems, Inc. All rights reserved. """ from collections import OrderedDict from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64 from ydk.filters import YFilter from ydk.errors import YError, YModelError from ydk.errors.error_handler import handle_type_error as _handle_type_error class IlpowerPdClass(Enum): """ IlpowerPdClass (Enum Class) Name of the power class .. data:: poe_null = 0 List of POE interfaces, keyed by interface name .. data:: poe_unknown = 1 Power class unknown .. data:: poe_cisco = 2 Power class cisco .. data:: poe_ieee0 = 3 IEEE power class 0 .. data:: poe_ieee1 = 4 IEEE power class 1 .. data:: poe_ieee2 = 5 IEEE power class 2 .. data:: poe_ieee3 = 6 IEEE power class 3 .. data:: poe_ieee4 = 7 IEEE power class 4 .. data:: poe_ieee5 = 8 IEEE power class 5 .. data:: poe_ieee_unknown_class = 9 IEEE power class unknown """ poe_null = Enum.YLeaf(0, "poe-null") poe_unknown = Enum.YLeaf(1, "poe-unknown") poe_cisco = Enum.YLeaf(2, "poe-cisco") poe_ieee0 = Enum.YLeaf(3, "poe-ieee0") poe_ieee1 = Enum.YLeaf(4, "poe-ieee1") poe_ieee2 = Enum.YLeaf(5, "poe-ieee2") poe_ieee3 = Enum.YLeaf(6, "poe-ieee3") poe_ieee4 = Enum.YLeaf(7, "poe-ieee4") poe_ieee5 = Enum.YLeaf(8, "poe-ieee5") poe_ieee_unknown_class = Enum.YLeaf(9, "poe-ieee-unknown-class") class PoeOperData(Entity): """ Informaton about POEs .. attribute:: poe_port List of POE interfaces, keyed by interface name **type**\: list of :py:class:`PoePort <ydk.models.cisco_ios_xe.Cisco_IOS_XE_poe_oper.PoeOperData.PoePort>` """ _prefix = 'poe-ios-xe-oper' _revision = '2018-02-04' def __init__(self): super(PoeOperData, self).__init__() self._top_entity = None self.yang_name = "poe-oper-data" self.yang_parent_name = "Cisco-IOS-XE-poe-oper" self.is_top_level_class = True self.has_list_ancestor = False self.ylist_key_names = [] self._child_classes = OrderedDict([("poe-port", ("poe_port", PoeOperData.PoePort))]) self._leafs = OrderedDict() self.poe_port = YList(self) self._segment_path = lambda: "Cisco-IOS-XE-poe-oper:poe-oper-data" self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(PoeOperData, [], name, value) class PoePort(Entity): """ List of POE interfaces, keyed by interface name .. attribute:: intf_name (key) Name of the POE interface **type**\: str .. attribute:: poe_intf_enabled POE interface admin state **type**\: bool .. attribute:: power_used Power used by PD device **type**\: :py:class:`Decimal64<ydk.types.Decimal64>` **range:** \-92233720368547758.08..92233720368547758.07 .. attribute:: pd_class Class of the PD device **type**\: :py:class:`IlpowerPdClass <ydk.models.cisco_ios_xe.Cisco_IOS_XE_poe_oper.IlpowerPdClass>` """ _prefix = 'poe-ios-xe-oper' _revision = '2018-02-04' def __init__(self): super(PoeOperData.PoePort, self).__init__() self.yang_name = "poe-port" self.yang_parent_name = "poe-oper-data" self.is_top_level_class = False self.has_list_ancestor = False self.ylist_key_names = ['intf_name'] self._child_classes = OrderedDict([]) self._leafs = OrderedDict([ ('intf_name', (YLeaf(YType.str, 'intf-name'), ['str'])), ('poe_intf_enabled', (YLeaf(YType.boolean, 'poe-intf-enabled'), ['bool'])), ('power_used', (YLeaf(YType.str, 'power-used'), ['Decimal64'])), ('pd_class', (YLeaf(YType.enumeration, 'pd-class'), [('ydk.models.cisco_ios_xe.Cisco_IOS_XE_poe_oper', 'IlpowerPdClass', '')])), ]) self.intf_name = None self.poe_intf_enabled = None self.power_used = None self.pd_class = None self._segment_path = lambda: "poe-port" + "[intf-name='" + str(self.intf_name) + "']" self._absolute_path = lambda: "Cisco-IOS-XE-poe-oper:poe-oper-data/%s" % self._segment_path() self._is_frozen = True def __setattr__(self, name, value): self._perform_setattr(PoeOperData.PoePort, ['intf_name', 'poe_intf_enabled', 'power_used', 'pd_class'], name, value) def clone_ptr(self): self._top_entity = PoeOperData() return self._top_entity
[ "collections.OrderedDict", "ydk.types.Enum.YLeaf", "ydk.types.YLeaf", "ydk.types.YList" ]
[((1251, 1276), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', (['(0)', '"""poe-null"""'], {}), "(0, 'poe-null')\n", (1261, 1276), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((1296, 1324), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', (['(1)', '"""poe-unknown"""'], {}), "(1, 'poe-unknown')\n", (1306, 1324), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((1342, 1368), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', (['(2)', '"""poe-cisco"""'], {}), "(2, 'poe-cisco')\n", (1352, 1368), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((1386, 1412), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', (['(3)', '"""poe-ieee0"""'], {}), "(3, 'poe-ieee0')\n", (1396, 1412), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((1430, 1456), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', (['(4)', '"""poe-ieee1"""'], {}), "(4, 'poe-ieee1')\n", (1440, 1456), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((1474, 1500), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', (['(5)', '"""poe-ieee2"""'], {}), "(5, 'poe-ieee2')\n", (1484, 1500), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((1518, 1544), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', (['(6)', '"""poe-ieee3"""'], {}), "(6, 'poe-ieee3')\n", (1528, 1544), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((1562, 1588), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', (['(7)', '"""poe-ieee4"""'], {}), "(7, 'poe-ieee4')\n", (1572, 1588), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((1606, 1632), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', (['(8)', '"""poe-ieee5"""'], {}), "(8, 'poe-ieee5')\n", (1616, 1632), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((1663, 1702), 'ydk.types.Enum.YLeaf', 'Enum.YLeaf', (['(9)', '"""poe-ieee-unknown-class"""'], {}), "(9, 'poe-ieee-unknown-class')\n", (1673, 1702), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((2396, 2458), 'collections.OrderedDict', 'OrderedDict', (["[('poe-port', ('poe_port', PoeOperData.PoePort))]"], {}), "([('poe-port', ('poe_port', PoeOperData.PoePort))])\n", (2407, 2458), False, 'from collections import OrderedDict\n'), ((2481, 2494), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2492, 2494), False, 'from collections import OrderedDict\n'), ((2520, 2531), 'ydk.types.YList', 'YList', (['self'], {}), '(self)\n', (2525, 2531), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((3935, 3950), 'collections.OrderedDict', 'OrderedDict', (['[]'], {}), '([])\n', (3946, 3950), False, 'from collections import OrderedDict\n'), ((4022, 4051), 'ydk.types.YLeaf', 'YLeaf', (['YType.str', '"""intf-name"""'], {}), "(YType.str, 'intf-name')\n", (4027, 4051), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((4102, 4142), 'ydk.types.YLeaf', 'YLeaf', (['YType.boolean', '"""poe-intf-enabled"""'], {}), "(YType.boolean, 'poe-intf-enabled')\n", (4107, 4142), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((4188, 4218), 'ydk.types.YLeaf', 'YLeaf', (['YType.str', '"""power-used"""'], {}), "(YType.str, 'power-used')\n", (4193, 4218), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n'), ((4267, 4303), 'ydk.types.YLeaf', 'YLeaf', (['YType.enumeration', '"""pd-class"""'], {}), "(YType.enumeration, 'pd-class')\n", (4272, 4303), False, 'from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64\n')]
# -*- coding: utf-8 -*- """ Created on Tue Sep 13 19:00:40 2016 @author: sebalander """ from numpy import zeros, sqrt, array, tan, arctan, prod, cos from cv2 import Rodrigues from lmfit import minimize, Parameters #from calibration import calibrator #xypToZplane = calibrator.xypToZplane # ## %% ========== ========== PARAMETER HANDLING ========== ========== #def formatParameters(rVec, tVec, linearCoeffs, distCoeffs): # params = Parameters() # # if prod(rVec.shape) == 9: # rVec = Rodrigues(rVec)[0] # # rVec = rVec.reshape(3) # # for i in range(3): # params.add('rvec%d'%i, # value=rVec[i], vary=True) # params.add('tvec%d'%i, # value=tVec[i], vary=True) # # # image center # params.add('cameraMatrix0', # value=linearCoeffs[0], vary=False) # params.add('cameraMatrix1', # value=linearCoeffs[1], vary=False) # # # k # params.add('distCoeffs', # value=distCoeffs, vary=False) # # return params # #def retrieveParameters(params): # ''' # # ''' # rvec = zeros((3,1)) # tvec = zeros((3,1)) # for i in range(3): # rvec[i,0] = params['rvec%d'%i].value # tvec[i,0] = params['tvec%d'%i].value # # cameraMatrix = zeros(2) # cameraMatrix[0] = params['cameraMatrix0'].value # cameraMatrix[1] = params['cameraMatrix1'].value # # distCoeffs = params['distCoeffs'].value # # return rvec, tvec, cameraMatrix, distCoeffs # %% ========== ========== DIRECT ========== ========== def radialDistort(rh, k, quot=False, der=False): ''' returns distorted radius using distortion coefficient k optionally it returns the distortion quotioent rpp = rp * q ''' k.shape = 1 th = arctan(rh) tanth = tan(th / 2) rd = k * tanth if der: # rpp wrt rp dDdH = k / cos(th / 2)**2 / 2 / (1 + rh**2) # calculate quotient q = rd / rh # q wrt rpp dQdH = ((dDdH - q) / rh).reshape((1, -1)) # deriv wrt undistorted coords dQdK = (tanth / rh).reshape((1, -1)) if quot: return q, dQdH, dQdK else: return rd, dQdH, dQdK else: if quot: return rd / rh else: return rd ## we asume that intrinsic distortion paramters is just a scalar: distCoeffs=k #def direct(fiducialPoints, rVec, tVec, linearCoeffs, distCoeffs): # # format as matrix # try: # rVec.reshape(3) # rVec = Rodrigues(rVec)[0] # except: # pass # # xyz = rVec.dot(fiducialPoints[0].T)+tVec # # xp = xyz[0]/xyz[2] # yp = xyz[1]/xyz[2] # # rp = sqrt(xp**2 + yp**2) # thetap = arctan(rp) # # rpp = distCoeffs*tan(thetap/2) # # rpp_rp = rpp/rp # # xpp = xp*rpp_rp # ypp = yp*rpp_rp # # u = xpp + linearCoeffs[0] # v = ypp + linearCoeffs[1] # # return array([u,v]).reshape((fiducialPoints.shape[1],1,2)) # #def residualDirect(params, fiducialPoints, imageCorners): # rVec, tVec, linearCoeffs, distCoeffs = retrieveParameters(params) # # projectedCorners = direct(fiducialPoints, # rVec, # tVec, # linearCoeffs, # distCoeffs) # # return imageCorners[:,0,:] - projectedCorners[:,0,:] # #def calibrateDirect(fiducialPoints, imageCorners, rVec, tVec, linearCoeffs, distCoeffs): # initialParams = formatParameters(rVec, tVec, linearCoeffs, distCoeffs) # generate Parameters obj # # out = minimize(residualDirect, # initialParams, # args=(fiducialPoints, # imageCorners)) # # rvecOpt, tvecOpt, _, _ = retrieveParameters(out.params) # # return rvecOpt, tvecOpt, out.params # %% ========== ========== INVERSE ========== ========== def radialUndistort(rd, k, quot=False, der=False): ''' takes distorted radius and returns the radius undistorted optionally it returns the undistortion quotient rd = rh * q ''' # polynomial coeffs, grade 7 # # (k1,k2,p1,p2[,k3[,k4,k5,k6[,s1,s2,s3,s4[,τx,τy]]]]) k.shape = -1 thetap = 2 * arctan(rd / k) rh = tan(thetap) retVal = True if der: # derivada de la directa q, dQdH, dQdK = radialDistort(rh, k, quot, der) if quot: return q, retVal, dQdH, dQdK else: return rh, retVal, dQdH, dQdK else: if quot: # returns q return rd / rh, retVal else: return rh, retVal #def inverse(imageCorners, rVec, tVec, linearCoeffs, distCoeffs): # # xpp = imageCorners[:,0,0]-linearCoeffs[0] # ypp = imageCorners[:,0,1]-linearCoeffs[1] # rpp = sqrt(xpp**2 + ypp**2) # # thetap = 2*arctan(rpp/distCoeffs) # # rp = tan(thetap) # # rp_rpp = rp/rpp # # xp = xpp * rp_rpp # yp = ypp * rp_rpp # # # project to z=0 plane. perhaps calculate faster with homography function? # XYZ = xypToZplane(xp, yp, rVec, tVec) # # return XYZ # # #def residualInverse(params, fiducialPoints, imageCorners): # rVec, tVec, linearCoeffs, distCoeffs = retrieveParameters(params) # # projectedFiducialPoints = inverse(imageCorners, # rVec, # tVec, # linearCoeffs, # distCoeffs) # # return fiducialPoints[0,:,:2] - projectedFiducialPoints[0,:,:2] # #def calibrateInverse(fiducialPoints, imageCorners, rVec, tVec, linearCoeffs, distCoeffs): # initialParams = formatParameters(rVec, tVec, linearCoeffs, distCoeffs) # generate Parameters obj # # out = minimize(residualInverse, # initialParams, # args=(fiducialPoints, # imageCorners)) # # rvecOpt, tvecOpt, _, _ = retrieveParameters(out.params) # # return rvecOpt, tvecOpt, out.params
[ "numpy.arctan", "numpy.tan", "numpy.cos" ]
[((1758, 1768), 'numpy.arctan', 'arctan', (['rh'], {}), '(rh)\n', (1764, 1768), False, 'from numpy import zeros, sqrt, array, tan, arctan, prod, cos\n'), ((1781, 1792), 'numpy.tan', 'tan', (['(th / 2)'], {}), '(th / 2)\n', (1784, 1792), False, 'from numpy import zeros, sqrt, array, tan, arctan, prod, cos\n'), ((4240, 4251), 'numpy.tan', 'tan', (['thetap'], {}), '(thetap)\n', (4243, 4251), False, 'from numpy import zeros, sqrt, array, tan, arctan, prod, cos\n'), ((4215, 4229), 'numpy.arctan', 'arctan', (['(rd / k)'], {}), '(rd / k)\n', (4221, 4229), False, 'from numpy import zeros, sqrt, array, tan, arctan, prod, cos\n'), ((1865, 1876), 'numpy.cos', 'cos', (['(th / 2)'], {}), '(th / 2)\n', (1868, 1876), False, 'from numpy import zeros, sqrt, array, tan, arctan, prod, cos\n')]
from sspipe import p, px, unpipe def test_unpipe_active(): a_pipe = px + 1 | px * 5 func = unpipe(a_pipe) assert func(0) == 5 def test_unpipe_passive(): func = lambda x: (x + 1) * 5 func = unpipe(func) assert func(0) == 5
[ "sspipe.unpipe" ]
[((100, 114), 'sspipe.unpipe', 'unpipe', (['a_pipe'], {}), '(a_pipe)\n', (106, 114), False, 'from sspipe import p, px, unpipe\n'), ((211, 223), 'sspipe.unpipe', 'unpipe', (['func'], {}), '(func)\n', (217, 223), False, 'from sspipe import p, px, unpipe\n')]
# -*- coding: utf-8 -*- # Copyright 2012 codestation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from os.path import exists, join, dirname, abspath def list_generator(list_widget): for i in range(list_widget.count()): yield list_widget.item(i) def parse_dimension(term, dim): query = {} if term[len("%s:" % dim)] == ">": query['%s_type' % dim] = ">" query[dim] = int(term.split(">")[1]) elif term[len("%s:" % dim)] == "<": query['%s_type' % dim] = "<" query[dim] = int(term.split("<")[1]) else: query['%s_type' % dim] = "=" query[dim] = int(term.split(":")[1]) return query def parse_query(text): query = {} query['tags'] = [] if isinstance(text, list): items = text else: items = re.sub(' +', ' ', text).split(' ') try: for item in items: if item.startswith("site:"): query['site'] = item.split(":")[1] elif item.startswith("rating:"): query['rating'] = item.split(":")[1] elif item.startswith("width:"): query.update(parse_dimension(item, "width")) elif item.startswith("height:"): query.update(parse_dimension(item, "height")) elif item.startswith("ratio:"): query['ratio'] = item.split(":", 1)[1] query['ratio_width'] = int(item.split(":")[1]) query['ratio_height'] = int(item.split(":")[2]) elif item.startswith("limit:"): query['limit'] = item.split(":")[1] elif item.startswith("pool:"): query['pool'] = item.split(":")[1] else: query['tags'].append(item) return query except (ValueError, TypeError, IndexError): return item def find_resource(base, filename): base_path = [dirname(abspath(base)), "/usr/local/share/danbooru-daemon", "/usr/share/danbooru-daemon"] for path in base_path: full_path = join(path, filename) if exists(full_path): return full_path raise Exception("%s cannot be found." % filename) def filter_posts(posts, query): if query.get('rating'): posts[:] = [post for post in posts if post['rating'] == query['rating']] if query.get('width'): if query['width_type'] == "=": posts[:] = [post for post in posts if post['width'] == query['width']] if query['width_type'] == "<": posts[:] = [post for post in posts if post['width'] < query['width']] if query['width_type'] == ">": posts[:] = [post for post in posts if post['width'] > query['width']] if query.get('height'): if query['height_type'] == "=": posts[:] = [post for post in posts if post['height'] == query['height']] if query['height_type'] == "<": posts[:] = [post for post in posts if post['height'] < query['height']] if query['height_type'] == ">": posts[:] = [post for post in posts if post['height'] > query['height']] if query.get('ratio'): posts[:] = [post for post in posts if post['width'] * 1.0 / post['height'] == query['ratio_width'] * 1.0 / query['ratio_height']] return posts def remove_duplicates(posts): posts[:] = list(dict((x['id'], x) for x in posts).values()) return sorted(posts, key=lambda k: k['id'], reverse=True)
[ "os.path.abspath", "os.path.join", "os.path.exists", "re.sub" ]
[((2600, 2620), 'os.path.join', 'join', (['path', 'filename'], {}), '(path, filename)\n', (2604, 2620), False, 'from os.path import exists, join, dirname, abspath\n'), ((2633, 2650), 'os.path.exists', 'exists', (['full_path'], {}), '(full_path)\n', (2639, 2650), False, 'from os.path import exists, join, dirname, abspath\n'), ((2436, 2449), 'os.path.abspath', 'abspath', (['base'], {}), '(base)\n', (2443, 2449), False, 'from os.path import exists, join, dirname, abspath\n'), ((1334, 1357), 're.sub', 're.sub', (['""" +"""', '""" """', 'text'], {}), "(' +', ' ', text)\n", (1340, 1357), False, 'import re\n')]
# -*-coding:utf-8 -*- """ Created on 2015-05-21 @author: Danny<<EMAIL>> DannyWork Project """ from __future__ import unicode_literals from django.contrib.sitemaps import Sitemap from django.core.urlresolvers import reverse from .models import Blog class BlogSitemap(Sitemap): """ 博客 Sitemap """ changefreq = 'never' priority = 0.8 def items(self): return Blog.objects.filter(is_deleted=False, is_active=True).order_by('-created') def lastmod(self, obj): return obj.created def location(self, obj): return reverse('blog_detail', args=[obj.id])
[ "django.core.urlresolvers.reverse" ]
[((571, 608), 'django.core.urlresolvers.reverse', 'reverse', (['"""blog_detail"""'], {'args': '[obj.id]'}), "('blog_detail', args=[obj.id])\n", (578, 608), False, 'from django.core.urlresolvers import reverse\n')]
""" plot.py defines functions for plotting phase diagrams of complex coacervate liquid separation. """ # standard libraries import matplotlib.pyplot as plt from matplotlib import cm # colormap import numpy as np import pandas as pd # custom libraries import pe import salt as nacl # plotting libraries import plotly.graph_objects as go from bokeh.plotting import figure, output_file, show from bokeh.models import ColumnDataSource, Title, Range1d from bokeh.models.tools import HoverTool # CONSTANTS NA = 6.022E23 # Avogadro's number, molecules / mol m3_2_L = 1E3 K_2_C = 273.15 # conversion from Kelvin to Celsius (subtract this) m_2_A = 1E10 def alpha_custom_rho(data, rho_p_list, rho_s_list, beads_2_M, T_range=[273.15,373.35], cmap_name='plasma', sigma=None, colors=None, marker='o', lw=1, T_cels=False, y_lim=[0.5, 1], square_box=False, tol=0.05, ax=None, show_lgnd=True): """ Plots the volume fraction of supernatant phase I (alpha) vs. the overall density of the varied component. Note: currently eliminates data points with alpha = 1 because they tend to be the result of numerical imprecision T_range : 2-tuple Lower and upper bounds on temperature to consider in degrees Kelvin (even if T_cels is True) tol : float, opt Tolerance of how close volume fraction nearest single-phase region needs to be to 1 to round up to 1 (for plotting dashed line) """ # creates list of colors for each value of the varied density if colors is None: colors = get_colors(cmap_name, len(rho_var_list)) # creates figure if ax is None: fig = plt.figure() ax = fig.add_subplot(111) # plots volume fraction of supernatant for each composition for i, rho_pair in enumerate(zip(rho_p_list, rho_s_list)): # plots binodal for low polymer concentration [M] rho_p, rho_s = rho_pair results = nacl.fixed_rho_total(data, rho_p, rho_s, beads_2_M) rho_PCI_list = results['rho_PCI'] rho_PCII_list = results['rho_PCII'] rho_CI_list = results['rho_CI'] rho_CII_list = results['rho_CII'] lB_arr = results['lB'] alpha = results['alpha'] T_arr = pe.lB_2_T_arr(lB_arr, T_range, sigma=sigma) liq_h2o = (T_arr >= T_range[0]) * (T_arr <= T_range[1]) * \ (np.asarray(alpha) != 1) if T_cels: T_arr -= K_2_C # plots alpha vs. T for given rho_p alpha_arr = np.array(alpha)[liq_h2o] T_arr = T_arr[liq_h2o] ax.plot(T_arr, alpha_arr, color=colors[i], marker=marker, lw=lw, label=r'$\rho_p = $' + \ '{0:.2f} M, '.format(rho_p) + r'$\rho_s = $' + \ '{0:.2f} M'.format(rho_s)) ### Single Phase # plots dashed line to lowest temperature if single phase # *checks if lowest plotted temperature reaches y axis T_min = np.min(T_arr) if T_min > np.min(ax.get_xlim()): alpha_single_phase = alpha_arr[np.argmin(T_arr)] # rounds up to 1 if volume fraction is close (discontinuous phase sep) if np.abs(alpha_single_phase - 1) < tol: ax.plot([T_min, T_min], [alpha_single_phase, 1], '-', lw=lw, color=colors[i]) alpha_single_phase = 1 # rounds to 0.5 if volume fraction is close (passes through LCST) if np.abs(alpha_single_phase - 0.5) < tol: alpha_single_phase = 0.5 # plots horizontal dashed line to indicate single phase at low T ax.plot([ax.get_xlim()[0], T_min], [alpha_single_phase, alpha_single_phase], '--', lw=lw, color=colors[i]) # determines labels and limits of axes if T_cels: x_lim = [T_range[0] - K_2_C, T_range[1] - K_2_C] x_label = r'$T$' x_unit = r'$^{\circ}$C' else: x_lim = T_range x_lim = r'$T$ [K]' y_label = r'$V_{sup}/V_{tot}$' # formats plot format_binodal(ax, x_label, x_unit, T_range, x_lim=x_lim, y_lim=y_lim, y_label=y_label, square_box=square_box, show_lgnd=show_lgnd) return ax def alpha_vary_rho(data, rho_var_list, rho_fix, ch_var, beads_2_M, T_range=[273.15,373.35], cmap_name='plasma', sigma=None, colors=None, marker='o', lw=1, T_cels=False, y_lim=[0.5, 1], title=None, square_box=False): """ Plots the volume fraction of supernatant phase I (alpha) vs. the overall density of the varied component. Note: currently eliminates data points with alpha = 1 because they tend to be the result of numerical imprecision T_range : 2-tuple Lower and upper bounds on temperature to consider in degrees Kelvin (even if T_cels==True) """ # creates dictionary of values based on which component's density is varied d = get_plot_dict_p_s(ch_var) # creates list of colors for each value of the varied density if colors is None: colors = get_colors(cmap_name, len(rho_var_list)) # creates figure fig = plt.figure() ax = fig.add_subplot(111) for i, rho_var in enumerate(rho_var_list): # plots binodal for low polymer concentration [M] rho_pair = np.array([rho_var, rho_fix]) rho_p, rho_s = rho_pair[d['order']] results = nacl.fixed_rho_total(data, rho_p, rho_s, beads_2_M) rho_PCI_list = results['rho_PCI'] rho_PCII_list = results['rho_PCII'] rho_CI_list = results['rho_CI'] rho_CII_list = results['rho_CII'] lB_arr = results['lB'] alpha = results['alpha'] T_arr = pe.lB_2_T_arr(lB_arr, T_range, sigma=sigma) liq_h2o = (T_arr >= T_range[0]) * (T_arr <= T_range[1]) * \ (np.asarray(alpha) != 1) if T_cels: T_arr -= K_2_C # plots alpha vs. T for given rho_p ax.plot(T_arr[liq_h2o], np.array(alpha)[liq_h2o], color=colors[i], marker=marker, lw=lw, label=r'$\rho_' + d['ch_var'] + ' = $' + \ '{0:.2f} M'.format(rho_var)) # determines labels and limits of axes if T_cels: x_lim = [T_range[0] - K_2_C, T_range[1] - K_2_C] x_label = r'$T$' x_unit = r'$^{\circ}$C' else: x_lim = T_range x_lim = r'$T$ [K]' y_label = r'$V^{sup}/V^{tot}$' if title is None: title = 'Effect of Total {0:s} on Supernatant Volume, {1:s} = {2:.2f} M' \ .format(d['name_var'], r'$\rho_' + d['ch_fix'] + '$', rho_fix) # formats plot format_binodal(ax, x_label, x_unit, T_range, title=title, x_lim=x_lim, y_lim=y_lim, y_label=y_label, square_box=square_box) return ax def binodal(lB_arr, left_list, right_list, left='rhoPCI', right='rhoPCII', x_label='polyanion density', n_tie_lines=3, plot_T=True, sigma=None, T_range=[273, 373], beads_2_M=None, title='', fix_eps=False, deg_C=False, x_lim=None, y_lim=None, marker=True, line=False, c1='blue', c2='red'): """ Plots binodal with polyanion density as x axis and temperature or Bjerrum length as y axis using Bokeh interactive plotting methods. Parameters ---------- lB_arr : (Nx1) numpy array Array of Bjerrum lengths non-dimensionalized by sigma defined in definition of "data" dictionary. left_list : N-element list List of x-axis variable in phase I (supernatant) [beads/sigma^3] right_list : N-element list List of x-axis variable in phase II (coacervate) [beads/sigma^3] left : string Name of heading in df of the variable given in left_list right : string Name of heading in df of the variable given in right_list x_label : string Variable to be plotted along the x-axis (without units) n_tie_lines : int Number of tie lines to plot plot_T : bool y axis is temperature [K] if True, Bjerrum [sigma] if False T_range : 2-element list Lower and upper bound for temperatures to plot (to limit temperatures to those for which water is liquid) beads_2_M : float Conversion from beads/sigma^3 to moles of monomers / L. If None, no conversion is made and the units on the x axis are beads/sigma^3. title : string Title of plot fix_eps : bool Fixed epsilon to constant value if True, or allows it to vary with temperature if False. deg_C : bool, opt If True, temperature is shown in degrees Celsius (assuming it is provided in Kelvin), default = False. x_lim : 2-element tuple of floats, optional Lower and upper bounds of x axis. If None provided, automatically set. y_lim : 2-element tuple of floats, optional Lower and upper bounds of y axis. If None provided, automatically set. Returns ------- p : bokeh plot Plot of binodal. Use bokeh's "show(p)" to display. Use "output_notebook()" beforehand to show the plot in the same cell (instead of a separate browser tab). """ left_arr = np.copy(left_list) right_arr = np.copy(right_list) # calculates conversion from beads / sigma^3 to mol/L if beads_2_M is not None: left_arr *= beads_2_M right_arr *= beads_2_M units_rho = '[mol/L]' else: units_rho = '[beads/sigma^3]' # computes temperature corresponding to Bjerrum lengths T_arr = pe.lB_2_T_arr(lB_arr, T_range, fix_eps=fix_eps, sigma=sigma) # stores results in dataframe for plotting df_mu = pd.DataFrame(columns=['BJ', 'T', left, right]) liq_h2o = np.logical_and(T_arr >= T_range[0], T_arr <= T_range[1]) df_mu['BJ'] = lB_arr[liq_h2o] df_mu['T'] = T_arr[liq_h2o] - deg_C*273 # converts to degrees Celsius if requested df_mu[left] = left_arr[liq_h2o] # monomer density df_mu[right] = right_arr[liq_h2o] # monomer density # plots binodal at fixed chemical potential n_plot = len(df_mu) if n_plot == 0: print('No data to plot in plot.binodal()--error likely.') p = no_salt(df_mu, n_plot, left=left, right=right, x_label=x_label, n_tie_lines=n_tie_lines, plot_T=plot_T, marker=marker, line=line, title=title, units_rho=units_rho, deg_C=deg_C, c1=c1, c2=c2) # sets axis limits if requested if x_lim is not None: p.x_range = Range1d(*x_lim) if y_lim is not None: p.y_range = Range1d(*y_lim) return p def binodal_custom_rho(data, rho_p_list, rho_s_list, beads_2_M, x_var='polycation', x_label=r'$\rho_{PSS}$', sigma=None, T_range=[273.15,373.15], cmap_name='plasma', colors=None, marker='o', fill_left='none', fill_right='full', lw_sup=1, lw_co=3, lgnd_out=True, lw=1, x_lim=None, T_cels=False, c_sup='#1414FF', c_co='#FF0000', ls_sup='-', square_box=False, plot_fixed_rho=False, ax=None, show_lgnd=True): """ Like `binodal_vary_rho()` but allows user to customize both rho_p and rho_s (overall) of each condition, rather than fixing one for all conditions. """ # creates list of colors for each value of rho_p if colors is None: if cmap_name is not None: colors = get_colors(cmap_name, len(rho_var_list)) # creates figure if ax is None: fig = plt.figure() ax = fig.add_subplot(111) for i, rho_pair in enumerate(zip(rho_p_list, rho_s_list)): rho_p, rho_s = rho_pair # plots binodal for low polymer concentration [M] results = nacl.fixed_rho_total(data, rho_p, rho_s, beads_2_M) rho_PCI_list = results['rho_PCI'] rho_PCII_list = results['rho_PCII'] rho_CI_list = results['rho_CI'] rho_CII_list = results['rho_CII'] rho_PAI_list = results['rho_PAI'] rho_PAII_list = results['rho_PAII'] rho_AI_list = results['rho_AI'] rho_AII_list = results['rho_AII'] lB_arr = results['lB'] alpha = results['alpha'] # selects the x-axis data if x_var == 'polycation': left_arr = np.array(rho_PCI_list) right_arr = np.array(rho_PCII_list) elif x_var == 'polyanion': left_arr = np.array(rho_PAI_list) right_arr = np.array(rho_PAII_list) elif x_var == 'cation': left_arr = np.array(rho_CI_list) right_arr = np.array(rho_CII_list) elif x_var == 'anion': left_arr = np.array(rho_AI_list) right_arr = np.array(rho_AII_list) elif x_var == 'solvent': left_arr = pe.calc_rho_solv(rho_PCI_list, rho_CI_list, beads_2_M) right_arr = pe.calc_rho_solv(rho_PCII_list, rho_CII_list, beads_2_M) elif x_var == 'polyelectrolyte': left_arr = np.array(rho_PCI_list) + np.array(rho_PAI_list) right_arr = np.array(rho_PCII_list) + np.array(rho_PAII_list) elif x_var == 'salt': left_arr = np.array(rho_CI_list) right_arr = np.array(rho_CII_list) else: print('Error. Invalid x variable in plot.binodal_vary_rho().') # computes temperature and identifies data within range T_arr = pe.lB_2_T_arr(lB_arr, T_range, sigma=sigma) liq_h2o = np.logical_and(T_arr >= T_range[0], T_arr <= T_range[1]) # converts temperature from Kelvin to Celsius if T_cels: T_arr -= K_2_C # assigns separate colors to coacervate and supernatant if not specified if colors is not None: c_sup = colors[i] c_co = colors[i] # supernatant ax.plot(left_arr[liq_h2o], T_arr[liq_h2o], color=c_sup, marker=marker, fillstyle=fill_left, ls=ls_sup, label=r'$\rho_p = $' + '{0:.2f} M, '.format(rho_p) + \ r'$\rho_s = $' + '{0:.2f} M, supernatant'.format(rho_s), lw=lw_sup) # coacervate ax.plot(right_arr[liq_h2o], T_arr[liq_h2o], color=c_co, marker=marker, fillstyle=fill_right, label=r'$\rho_p = $' + '{0:.2f} M, '.format(rho_p) + \ r'$\rho_s = $' + '{0:.2f} M, coacervate'.format(rho_s), lw=lw_co) # plots dashed line indicating fixed density if requested if plot_fixed_rho: # defines dictionary mapping x variable to corresponding fixed # density x_var_2_rho_fixed = {'polycation' : rho_p/2, 'cation' : rho_s, 'solvent' : 1 - rho_p - rho_s, 'polyelectrolyte' : rho_p, 'salt' : rho_s} # selects appropriate fixed density based on x variable rho_fixed = x_var_2_rho_fixed[x_var] # determines color based on which branch is closest if (rho_fixed - np.max(left_arr[liq_h2o])) > \ (np.min(right_arr[liq_h2o]) - rho_fixed): # coacervate branch is closest to fixed density color = c_co else: # supernatant branch is closest to fixed density color = c_sup # plots fixed density as vertical dashed line ax.plot([rho_fixed, rho_fixed], ax.get_ylim(), '--', color=color, lw=lw_sup) # determines units of density to display on plot if beads_2_M is not None: units_rho = 'mol/L' else: units_rho = 'beads/sigma^3' # formats plot format_binodal(ax, x_label, units_rho, T_range, x_lim=x_lim, T_cels=T_cels, square_box=square_box, show_lgnd=show_lgnd) return ax def binodal_custom_rho_rho(data, lB_list, rho_p_list, rho_s_list, beads_2_M, show_tie_line=True, cmap_name='plasma', colors=None, sigma=None, marker='o', fill_left='none', fill_right='full', lgnd_out=True, tol=1E-4, ms=10, T_cels=False, show_lB=False, T_range=[273.15, 373.15], lw=2, square_box=False, ax=None, colors_symbols=None, mew=1.5, x_lim=None, y_lim=None, show_lgnd=True): """ Plots the binodal as a function of salt density and polyelectrolyte density. Different Bjerrum lengths/temperatures are represented by different trend lines. Returns ------- None. """ # variables defining order of plotted objects back = 0 front = 10 # lists symbols for plotting overall composition sym_list = ['*', '^', 's', '<', '>', 'v', '+', 'x'] # creates list of colors for each value of rho_p if colors is None: colors = get_colors(cmap_name, len(lB_list)) # determines units if beads_2_M != 1: units_rho = 'mol/L' else: units_rho = r'beads/$\sigma^3$' # creates figure if ax is None: fig = plt.figure() ax = fig.add_subplot(111) # loops through each temperature / Bjerrum length in data for i, lB in enumerate(lB_list): df = data[lB] # loads binodal data for supernatant (I) and coacervate (II) # doubles polycation concentration to include polyanion in polymer # concentration ion_I_list = list(beads_2_M*df['rhoCI']) ion_II_list = list(beads_2_M*df['rhoCII']) polymer_I_list = list(2*beads_2_M*df['rhoPCI']) polymer_II_list = list(2*beads_2_M*df['rhoPCII']) # critical points polymer_c = polymer_I_list[-1] ion_c = ion_I_list[-1] # computes temperature T = pe.lB_2_T(lB, sigma=sigma) if T_cels: T_unit = r'$^{\circ}$C' T -= K_2_C else: T_unit = ' K' # plots tie lines and overall composition for j, rho_pair in enumerate(zip(rho_p_list, rho_s_list)): rho_p, rho_s = rho_pair results = nacl.fixed_rho_total(data, rho_p, rho_s, beads_2_M) rho_PCI_list = results['rho_PCI'] rho_PCII_list = results['rho_PCII'] rho_CI_list = results['rho_CI'] rho_CII_list = results['rho_CII'] lB_arr = np.asarray(results['lB']) alpha = results['alpha'] # converts to arrays of polymer and salt concentrations rho_p_I = 2*np.asarray(rho_PCI_list) rho_s_I = np.asarray(rho_CI_list) rho_p_II = 2*np.asarray(rho_PCII_list) rho_s_II = np.asarray(rho_CII_list) # continues if no T in range has 2 phases for concentration # finds closest match given Bjerrum length try: i_tie = np.where(np.abs(lB_arr - lB) < tol)[0][0] except: print('lB = {0:.3f} gives 1 phase for'.format(lB) + \ ' rho_p = {0:.3f} [{1:s}],'.format(rho_p, units_rho) + \ 'rho_s = {0:.3f} [{1:s}].'.format(rho_s, units_rho)) continue # tie line if show_tie_line: ax.plot([rho_p_I[i_tie], rho_p_II[i_tie]], [rho_s_I[i_tie], rho_s_II[i_tie]], '--', color='k', lw=lw, zorder=back) # supernatant ax.plot(rho_p_I[i_tie], rho_s_I[i_tie], color=colors[i], marker='o', fillstyle='none', zorder=front) # coacervate ax.plot(rho_p_II[i_tie], rho_s_II[i_tie], color=colors[i], marker='o', fillstyle='none', zorder=front) # plots overall composition last time through if i == len(lB_list)-1: short = {'mol/L' : 'M', 'beads/sigma^3' : r'$\sigma^{-3}$'} if sym_list[j] == '*': ms_boost = 4 else: ms_boost = 0 # if provided, can specify marker face color if colors_symbols is not None: mfc = colors_symbols[j] else: mfc = 'w' # plots symbol representing composition ax.plot(rho_p, rho_s, marker=sym_list[j], markerfacecolor=mfc, ms=ms+ms_boost, markeredgecolor='k', markeredgewidth=mew, lw=0, label=r'$\rho_p = $ ' + '{0:.2f} {1:s}'.format(rho_p, short[units_rho]) + r', $\rho_s = $ ' + \ '{0:.2f} {1:s}'.format(rho_s, short[units_rho]), zorder=front) # plots binodal, flipping coacervate order to be in order label = r'$T = $' + '{0:d}{1:s}'.format(int(T), T_unit) if show_lB: label += r', $l_B = $ ' + '{0:.3f}'.format(lB) ax.plot(polymer_I_list + polymer_II_list[::-1], ion_I_list + ion_II_list[::-1], color=colors[i], lw=lw, label=label, zorder=back) # plots critical point ax.plot(polymer_c, ion_c, marker='o', fillstyle='full', color=colors[i], zorder=front) # formats plot x_label = r'$\rho_p$' y_label = r'$\rho_s$ [' + units_rho + ']' # determines component with varied concentration name_pair = ['Polymer', 'Salt'] format_binodal(ax, x_label, units_rho, T_range, y_label=y_label, x_lim=x_lim, y_lim=y_lim, lgnd_out=lgnd_out, square_box=square_box, show_lgnd=show_lgnd) return ax def binodal_line_3d(data, mode='lines', ms=8, op=0.1, c1='black', c2='black', lw=8, fig=None): """Plots line binodal in 3d plot.""" x1, y1, z1, x2, y2, z2 = data fig = line_3d(x1, y1, z1, mode=mode, ms=ms, op=op, c=c1, lw=lw, fig=fig) fig = line_3d(x2, y2, z2, mode=mode, ms=ms, op=op, c=c2, lw=lw, fig=fig) return fig def binodal_proj_fixed_conc(data, mu_salt_folder, rho_salt_M_list, color_list, T_range, sigma, z_name, beads_2_M, lB_list, lB_color_list, T_cels=False, marker='o', show_lB=False, fill_left='none', fill_right='full', lw_sup=1, lw_co=3, lw_lB=2, naming_structure='NA(100)NB(100)*', ext='PD', figsize=None, vertical=True): """ Computes binodal projected onto three different planes (polymer-temperature, salt-temperature, and polymer-salt) at fixed concentration of salt in a saltwater reservoir. show_lB : bool, optional If True, will show Bjerrum length in legend """ ### Formats Figure # creates figure to plot the three 2D projections in a single row if figsize is None: fig = plt.figure() else: fig = plt.figure(figsize=figsize) ### Creates Axes if vertical: h = 3 # 3 plots high w = 1 # 1 plot wide else: h = 1 # 1 plot high w = 3 # 3 plots wide # polymer-T projection ax_pT = fig.add_subplot(h, w, 1) # salt-T projection ax_sT = fig.add_subplot(h, w, 2) # polymer-salt projection ax_ps = fig.add_subplot(h, w, 3) # computes binodal at different saltwater reservoir concentrations # and plots on each of the three projections for rho_salt_M, color in zip(rho_salt_M_list, color_list): # converts mol/L to beads/sigma^3 rho_salt = rho_salt_M / beads_2_M # makes dataframe of binodal for fixed salt reservoir concentration df_mu = nacl.make_df_mu(data, mu_salt_folder, rho_salt, T_range, sigma, naming_structure=naming_structure, ext=ext) rho_p_I, rho_s_I, T_arr, rho_p_II, rho_s_II, _ = nacl.extract_df_mu_data(df_mu, z_name) # computes temperature and identifies data within range liq_h2o = np.logical_and(T_arr >= T_range[0], T_arr <= T_range[1]) # converts temperature from Kelvin to Celsius if T_cels: T_arr -= K_2_C # creates labels label_sup = r'$\rho_s^{res} = $' + '{0:.2f} M, supernatant'.format(rho_salt_M) label_co = r'$\rho_s^{res} = $' + '{0:.2f} M, coacervate'.format(rho_salt_M) # polymer-T projection ax_pT.plot(rho_p_I[liq_h2o], T_arr[liq_h2o], color=color, marker=marker, fillstyle=fill_left, label=label_sup, lw=lw_sup) ax_pT.plot(rho_p_II[liq_h2o], T_arr[liq_h2o], color=color, marker=marker, fillstyle=fill_right, label=label_co, lw=lw_co) # salt-T projection ax_sT.plot(rho_s_I[liq_h2o], T_arr[liq_h2o], color=color, marker=marker, fillstyle=fill_left, label=label_sup, lw=lw_sup) ax_sT.plot(rho_s_II[liq_h2o], T_arr[liq_h2o], color=color, marker=marker, fillstyle=fill_right, label=label_co, lw=lw_co) # polymer-salt projection ax_ps.plot(rho_p_I[liq_h2o], rho_s_I[liq_h2o], color=color, label=label_sup, lw=lw_sup, zorder=10) ax_ps.plot(rho_p_II[liq_h2o], rho_s_II[liq_h2o], color=color, label=label_co, lw=lw_co, zorder=10) # plots isothermal binodal slices in polymer-salt plane for lB, lB_color in zip(lB_list, lB_color_list): df = data[lB] T = pe.lB_2_T(lB, sigma=sigma) # loads binodal data for supernatant (I) and coacervate (II) # doubles polycation concentration to include polyanion in polymer # concentration ion_I_list = list(beads_2_M*df['rhoCI']) ion_II_list = list(beads_2_M*df['rhoCII']) polymer_I_list = list(2*beads_2_M*df['rhoPCI']) polymer_II_list = list(2*beads_2_M*df['rhoPCII']) # critical points polymer_c = polymer_I_list[-1] ion_c = ion_I_list[-1] # units for temperature if T_cels: T_unit = r'$^{\circ}$C' T -= K_2_C else: T_unit = ' K' # plots binodal, flipping coacervate order to be in order label = r'$T = $' + '{0:d}{1:s} '.format(int(T), T_unit) if show_lB: label += r'$l_B = $ ' + '{0:.3f}'.format(lB) ax_ps.plot(polymer_I_list + polymer_II_list[::-1], ion_I_list + ion_II_list[::-1], color=lB_color, lw=lw_lB, label=label, zorder=0) # plots critical point ax_ps.plot(polymer_c, ion_c, marker='o', fillstyle='full', color=lB_color) return fig, ax_pT, ax_sT, ax_ps def binodal_rho_rho(data, lB_list, rho_var_list, rho_fix, ch_var, beads_2_M, show_tie_line=True, cmap_name='plasma', colors=None, sigma=None, title=None, marker='o', fill_left='none', fill_right='full', lgnd_out=True, tol=1E-4, ms=10, T_cels=False, show_lB=False, T_range=[273.15, 373.15], lw=2, square_box=False, ax=None): """ Plots the binodal as a function of salt density and polyelectrolyte density. Different Bjerrum lengths/temperatures are represented by different trend lines. Returns ------- None. """ # variables defining order of plotted objects back = 0 front = 10 # lists symbols for plotting overall composition sym_list = ['*', '^', 's', '<', '>', 'v', '+', 'x'] # creates dictionary to order fixed and varied densities properly d = get_plot_dict_p_s(ch_var) # creates list of colors for each value of rho_p if colors is None: colors = get_colors(cmap_name, len(lB_list)) # determines units if beads_2_M != 1: units_rho = 'mol/L' else: units_rho = r'beads/$\sigma^3$' # creates figure if ax is None: fig = plt.figure() ax = fig.add_subplot(111) # loops through each temperature / Bjerrum length in data for i, lB in enumerate(lB_list): df = data[lB] # loads binodal data for supernatant (I) and coacervate (II) # doubles polycation concentration to include polyanion in polymer # concentration ion_I_list = list(beads_2_M*df['rhoCI']) ion_II_list = list(beads_2_M*df['rhoCII']) polymer_I_list = list(2*beads_2_M*df['rhoPCI']) polymer_II_list = list(2*beads_2_M*df['rhoPCII']) # critical points polymer_c = polymer_I_list[-1] ion_c = ion_I_list[-1] # computes temperature T = pe.lB_2_T(lB, sigma=sigma) if T_cels: T_unit = r'$^{\circ}$C' T -= K_2_C else: T_unit = ' K' # plots tie lines and overall composition for j, rho_var in enumerate(rho_var_list): rho_pair = np.array([rho_var, rho_fix]) rho_p, rho_s = rho_pair[d['order']] results = nacl.fixed_rho_total(data, rho_p, rho_s, beads_2_M) rho_PCI_list = results['rho_PCI'] rho_PCII_list = results['rho_PCII'] rho_CI_list = results['rho_CI'] rho_CII_list = results['rho_CII'] lB_arr = np.asarray(results['lB']) alpha = results['alpha'] # converts to arrays of polymer and salt concentrations rho_p_I = 2*np.asarray(rho_PCI_list) rho_s_I = np.asarray(rho_CI_list) rho_p_II = 2*np.asarray(rho_PCII_list) rho_s_II = np.asarray(rho_CII_list) # continues if no T in range has 2 phases for concentration # finds closest match given Bjerrum length try: i_tie = np.where(np.abs(lB_arr - lB) < tol)[0][0] except: print('lB = {0:.3f} gives 1 phase for'.format(lB) + \ ' rho_p = {0:.3f} [{1:s}],'.format(rho_p, units_rho) + \ 'rho_s = {0:.3f} [{1:s}].'.format(rho_s, units_rho)) continue # tie line if show_tie_line: ax.plot([rho_p_I[i_tie], rho_p_II[i_tie]], [rho_s_I[i_tie], rho_s_II[i_tie]], '--', color='k', lw=lw, zorder=back) # supernatant ax.plot(rho_p_I[i_tie], rho_s_I[i_tie], color=colors[i], marker='o', fillstyle='none', zorder=front) # coacervate ax.plot(rho_p_II[i_tie], rho_s_II[i_tie], color=colors[i], marker='o', fillstyle='none', zorder=front) # plots overall composition last time through if i == len(lB_list)-1: short = {'mol/L' : 'M', 'beads/sigma^3' : r'$\sigma^{-3}$'} if sym_list[j] == '*': ms_boost = 4 else: ms_boost = 0 ax.plot(rho_p, rho_s, marker=sym_list[j], markerfacecolor='w', ms=ms+ms_boost, markeredgecolor='k', markeredgewidth=1.5, lw=0, label=r'$\rho_p = $ ' + '{0:.2f} {1:s}'.format(rho_p, short[units_rho]) + r', $\rho_s = $ ' + \ '{0:.2f} {1:s}'.format(rho_s, short[units_rho]), zorder=front) # plots binodal, flipping coacervate order to be in order label = r'$T = $' + '{0:d}{1:s}'.format(int(T), T_unit) if show_lB: label += r', $l_B = $ ' + '{0:.3f}'.format(lB) ax.plot(polymer_I_list + polymer_II_list[::-1], ion_I_list + ion_II_list[::-1], color=colors[i], lw=lw, label=label, zorder=front) # plots critical point ax.plot(polymer_c, ion_c, marker='o', fillstyle='full', color=colors[i], zorder=front) # formats plot x_label = r'$\rho_p$' y_label = r'$\rho_s$ [' + units_rho + ']' # determines component with varied concentration name_pair = ['Polymer', 'Salt'] name_var = name_pair[d['order'][0]] if title is None: title = 'Vary Overall {0:s} Concentration'.format(name_var) format_binodal(ax, x_label, units_rho, T_range, y_label=y_label, title=title, lgnd_out=lgnd_out, square_box=square_box) return ax def binodal_surf_3d(data, mode='markers', ms=4, op=0.01, c1='blue', c2='red', lw=0, fig=None): """Plots surface binodal in 3d.""" x1, y1, z1, x2, y2, z2 = data if fig == None: fig = go.Figure() # plots phase I (supernatant) of full binodal fig = fig.add_trace(go.Scatter3d( x=x1, y=y1, z=z1, mode=mode, marker=dict( size=ms, opacity=op, color=c1 ), line=dict( color=c1, width=lw, ), )) # plots phase II (coacervate) of full binodal fig.add_trace(go.Scatter3d( x=x2, y=y2, z=z2, mode=mode, marker=dict( size=ms, opacity=op, color=c2 ), line=dict( color=c2, width=lw, ), )) return fig def binodal_surf_3d_batch(data_3d, op, ms, lw, mode, fig=None, skip=[]): """ Plots batch of data for a 3d surface binodal. """ # extracts data x1_coll, y1_coll, z1_coll, x2_coll, y2_coll, z2_coll = data_3d z_arr = np.unique(z1_coll) # plots data at each z value for (i, z) in enumerate(z_arr): # skips indices requested if i in skip: continue # extracts data corresponding to current z value (T or lB) x1 = x1_coll[z1_coll==z] y1 = y1_coll[z1_coll==z] z1 = z1_coll[z1_coll==z] x2 = x2_coll[z2_coll==z] y2 = y2_coll[z2_coll==z] z2 = z2_coll[z2_coll==z] # plots dataon 3D plot fig = binodal_surf_3d((x1, y1, z1, x2, y2, z2), op=op, ms=ms, lw=lw, mode=mode, fig=fig) return fig def binodal_vary_conc(mu_salt_folder, data, rho_salt_list, beads_2_M, qty, x_var='polycation', x_label=r'$\rho_{PSS}$', sigma=None, T_range=[273,373], cmap_name='plasma', colors=None, marker='o', fill_left='none', fill_right='full', lgnd_out=True): """ LEGACY Plots the binodal for different average densities of polymer. qty : string The quantity from df to return. Options include 'rhoPC', 'rhoPA', 'rhoC', and 'rhoA'. """ # creates list of colors for each value of rho_p if colors is None: colors = get_colors(cmap_name, len(rho_salt_list)) # creates figure fig = plt.figure() ax = fig.add_subplot(111) for i, rho_salt in enumerate(rho_salt_list): # plots binodal for low polymer concentration [M] mu_conc = nacl.get_mu_conc(mu_salt_folder, data, rho_salt, beads_2_M=beads_2_M) try: lB_arr, rho_PCI_list, rho_PCII_list = nacl.fixed_conc(mu_conc, data, qty, beads_2_M=beads_2_M) except: continue # selects the x-axis data left_arr = np.array(rho_PCI_list) right_arr = np.array(rho_PCII_list) # computes temperature and identifies data within range T_arr = pe.lB_2_T_arr(lB_arr, T_range, sigma=sigma) liq_h2o = np.logical_and(T_arr >= T_range[0], T_arr <= T_range[1]) # determines units if beads_2_M is not None: units_rho = '[mol/L]' else: units_rho = '[beads/sigma^3]' # left binodal ax.plot(left_arr[liq_h2o], T_arr[liq_h2o], color=colors[i], marker=marker, fillstyle=fill_left, label=r'$\rho_{salt} = $' + '{0:.2f} {1:s}, supernatant' \ .format(rho_salt, units_rho)) # right binodal ax.plot(right_arr[liq_h2o], T_arr[liq_h2o], color=colors[i], marker=marker, fillstyle=fill_right, label=r'$\rho_{salt} = $' + \ '{0:.2f} {1:s}, coacervate'.format(rho_salt, units_rho)) # formats plot ax.set_ylim(T_range) ax.set_xlabel(x_label + ' ' + units_rho, fontsize=16) ax.set_ylabel(r'$T$ [K]', fontsize=16) ax.tick_params(axis='both', labelsize=14) ax.set_title('Effect of Salt Reservoir on Binodal', fontsize=16) # put legend outside of plot box if lgnd_out: box = ax.get_position() ax.set_position([box.x0, box.y0, box.width, box.height]) legend_x = 1 legend_y = 0.5 plt.legend(loc='center left', bbox_to_anchor=(legend_x, legend_y), fontsize=12) else: plt.legend(fontsize=12) return ax def binodal_vary_f(data, f_list, color_list, T_cels=True, x_label=r'$\rho_p$', units_rho='M', T_range=[273.15, 373.15], lw1=1, lw2=4, square_box=True, show_lgnd=False, ax=None): """ Plots binodal projected onto coordinate plane for different charge fractions f. """ # creates figure if ax is None: fig = plt.figure() ax = fig.add_subplot(111) for f, color in zip(f_list, color_list): # creates labels label_sup = r'$f$ =' + ' {0:.2f} supernatant'.format(f) label_co = r'$f$ =' + ' {0:.2f} coacervate'.format(f) # extracts data T_arr, rho_p_I, rho_p_II = data[f] # polymer-T projection ax.plot(rho_p_I, T_arr, color=color, label=label_sup, lw=lw1) ax.plot(rho_p_II, T_arr, color=color, label=label_co, lw=lw2) # formats plot format_binodal(ax, x_label, units_rho, T_range, T_cels=T_cels, square_box=square_box, show_lgnd=show_lgnd) return ax def binodal_vary_N(data, N_list, color_list, T_cels=True, x_label=r'$\rho_p$', units_rho='M', T_range=[273.15, 373.15], lw1=1, lw2=4, square_box=True, show_lgnd=False, ax=None): """ Plots binodal projected onto coordinate plane for different degrees of polymerization N. """ # creates figure if ax is None: fig = plt.figure() ax = fig.add_subplot(131) for N, color in zip(N_list, color_list): # extracts data for given N T_arr, rho_p_I, rho_p_II = data[N] # creates labels label_sup = r'$N$ =' + ' {0:d} supernatant'.format(N) label_co = r'$N$ =' + ' {0:d} coacervate'.format(N) # polymer-T projection ax.plot(rho_p_I, T_arr, color=color, label=label_sup, lw=lw1) ax.plot(rho_p_II, T_arr, color=color, label=label_co, lw=lw2) # formats plot format_binodal(ax, x_label, units_rho, T_range, T_cels=T_cels, square_box=square_box, show_lgnd=show_lgnd) return ax def binodal_vary_sigma(data, sigma_list, color_list, T_cels=True, x_label=r'$\rho_p$', units_rho='M', T_range=[273.15, 373.15], lw1=1, lw2=4, square_box=True, show_lgnd=False, x_lim=None, ax=None): """ Plots binodal projected onto coordinate plane for different charge fractions f. """ # creates figure if ax is None: fig = plt.figure() ax = fig.add_subplot(111) for sigma, color in zip(sigma_list, color_list): # creates labels label_sup = r'$\sigma$ =' + ' {0:.1f}'.format(sigma*m_2_A) + r' $\AA$ supernatant' label_co = r'$\sigma$ =' + ' {0:.1f}'.format(sigma*m_2_A) + r' $\AA$ coacervate' # extracts data T_arr, rho_p_I, rho_p_II = data[sigma] # polymer-T projection ax.plot(rho_p_I, T_arr, color=color, label=label_sup, lw=lw1) ax.plot(rho_p_II, T_arr, color=color, label=label_co, lw=lw2) # formats plot format_binodal(ax, x_label, units_rho, T_range, T_cels=T_cels, x_lim=x_lim, square_box=square_box, show_lgnd=show_lgnd) return ax def binodal_vary_rho(data, rho_var_list, rho_fix, ch_var, beads_2_M, x_var='polycation', x_label=r'$\rho_{PSS}$', sigma=None, T_range=[273.15,373.15], cmap_name='plasma', colors=None, marker='o', fill_left='none', fill_right='full', lw_sup=1, lw_co=3, lgnd_out=True, lw=1, x_lim=None, T_cels=False, title=None, c_sup='#1414FF', c_co='#FF0000', ls_sup='-', square_box=False, ax=None): """ Plots the binodal for different average densities of polymer. If T_cels is True, converts the temperature from Kelvin to Celsius """ # creates dictionary of values based on which component's density is varied d = get_plot_dict_p_s(ch_var) # creates list of colors for each value of rho_p if colors is None: if cmap_name is not None: colors = get_colors(cmap_name, len(rho_var_list)) # creates figure if ax is None: fig = plt.figure() ax = fig.add_subplot(111) ### Plots figure for i, rho_var in enumerate(rho_var_list): # plots binodal for low polymer concentration [M] rho_pair = np.array([rho_var, rho_fix]) rho_p, rho_s = rho_pair[d['order']] results = nacl.fixed_rho_total(data, rho_p, rho_s, beads_2_M) rho_PCI_list = results['rho_PCI'] rho_PCII_list = results['rho_PCII'] rho_CI_list = results['rho_CI'] rho_CII_list = results['rho_CII'] rho_PAI_list = results['rho_PAI'] rho_PAII_list = results['rho_PAII'] rho_AI_list = results['rho_AI'] rho_AII_list = results['rho_AII'] lB_arr = results['lB'] alpha = results['alpha'] # selects the x-axis data if x_var == 'polycation': left_arr = np.array(rho_PCI_list) right_arr = np.array(rho_PCII_list) elif x_var == 'polyanion': left_arr = np.array(rho_PAI_list) right_arr = np.array(rho_PAII_list) elif x_var == 'cation': left_arr = np.array(rho_CI_list) right_arr = np.array(rho_CII_list) elif x_var == 'anion': left_arr = np.array(rho_AI_list) right_arr = np.array(rho_AII_list) elif x_var == 'solvent': left_arr = pe.calc_rho_solv(rho_PCI_list, rho_CI_list, beads_2_M) right_arr = pe.calc_rho_solv(rho_PCII_list, rho_CII_list, beads_2_M) elif x_var == 'polyelectrolyte': left_arr = np.array(rho_PCI_list) + np.array(rho_PAI_list) right_arr = np.array(rho_PCII_list) + np.array(rho_PAII_list) elif x_var == 'salt': left_arr = np.array(rho_CI_list) right_arr = np.array(rho_CII_list) else: print('Error. Invalid x variable in plot.binodal_vary_rho().') # computes temperature and identifies data within range T_arr = pe.lB_2_T_arr(lB_arr, T_range, sigma=sigma) liq_h2o = np.logical_and(T_arr >= T_range[0], T_arr <= T_range[1]) # converts temperature from Kelvin to Celsius if T_cels: T_arr -= K_2_C # assigns separate colors to coacervate and supernatant if not specified if colors is not None: c_sup = colors[i] c_co = colors[i] # supernatant ax.plot(left_arr[liq_h2o], T_arr[liq_h2o], color=c_sup, marker=marker, fillstyle=fill_left, ls=ls_sup, label=r'$\rho_' + d['ch_var'] + ' = $' + '{0:.2f} M, supernatant' \ .format(rho_var), lw=lw_sup) # coacervate ax.plot(right_arr[liq_h2o], T_arr[liq_h2o], color=c_co, marker=marker, fillstyle=fill_right, label=r'$\rho_' + d['ch_var'] + ' = $' + \ '{0:.2f} M, coacervate'.format(rho_var), lw=lw_co) # determines units of density to display on plot if beads_2_M is not None: units_rho = 'mol/L' else: units_rho = 'beads/sigma^3' # formats plot if title is None: title = 'Effect of {0:s} on Binodal, {1:s} = {2:.2f} M' \ .format(d['name_var'], r'$\rho_' + d['ch_fix'] + '$', rho_fix) format_binodal(ax, x_label, units_rho, T_range, title=title, x_lim=x_lim, T_cels=T_cels, square_box=square_box) return ax def fig4(data_pred, df_exp, rho_s_raw_list, rho_p_raw, sigma, T_range, lw=3, c_sup='#1414FF', c_co='#FF0000', ms=11, mfc='w', mew=1.5, x_lim=None, x_label=r'$\rho_{PSS}$', conv_vals=False, tol=1E-6, show_lgnd=False, figsize=None, pad=3, vertical=False, plot_errorbars=False): """ Validates fit of sigma to experiments. """ # computes conversion from beads/sigma^3 to mol/L beads_2_M = pe.get_beads_2_M(sigma, SI=True) # creates figure if figsize is None: fig = plt.figure() else: fig = plt.figure(figsize=figsize) # determines arrangement of subplots if vertical: h = len(rho_s_raw_list) # many plots high w = 1 # 1 plot wide else: h = 1 # 1 plot high w = len(rho_s_raw_list) # many plots wide # Plots figure for i, rho_s_raw in enumerate(rho_s_raw_list): if conv_vals: rho_p, rho_s = nacl.conv_ali_conc(df_exp, rho_p_raw, rho_s_raw) # creates subplot ax = fig.add_subplot(h, w, i+1) # polymer-temperature plane ax = binodal_custom_rho(data_pred, [rho_p], [rho_s], beads_2_M, x_var='polycation', x_label=x_label, x_lim=x_lim, sigma=sigma, T_range=T_range, marker='', lw=lw, lw_sup=lw, lw_co=lw, colors=None, cmap_name=None, T_cels=True, c_sup=c_sup, c_co=c_co, ls_sup='--', square_box=True, show_lgnd=show_lgnd, ax=ax) # plots experimental results for i in range(len(df_exp)): rho_p_exp, rho_s_exp, T_exp, \ rho_p_sup, rho_p_co, s_rho_p_sup, \ s_rho_p_co = nacl.read_df_exp(df_exp, i, conv_vals=conv_vals, read_sigma=plot_errorbars) if (rho_p_exp == rho_p) and (rho_s_exp == rho_s): # plots supernatant and coacervate compositions rho_pss_sup = rho_p_sup/2 rho_pss_co = rho_p_co/2 if plot_errorbars: s_rho_pss_sup = s_rho_p_sup/2 s_rho_pss_co = s_rho_p_co/2 ax.errorbar(rho_pss_sup, T_exp, xerr=s_rho_pss_sup, lw=0, marker='o', ms=ms, markerfacecolor=mfc, markeredgewidth=mew, elinewidth=1, markeredgecolor=c_sup, label='Ali et al. (2019), supernatant') ax.errorbar(rho_pss_co, T_exp, xerr=s_rho_pss_co, lw=0, marker='o', ms=ms, markerfacecolor=c_co, markeredgewidth=mew, elinewidth=1, markeredgecolor=c_co, label='Ali et al. (2019), coacervate') else: ax.plot(rho_pss_sup, T_exp, lw=0, marker='o', ms=ms, markerfacecolor=mfc, markeredgewidth=mew, markeredgecolor=c_sup, label='Ali et al. (2019), supernatant') ax.plot(rho_pss_co, T_exp, lw=0, marker='o', ms=ms, markerfacecolor=c_co, markeredgewidth=mew, markeredgecolor=c_co, label='Ali et al. (2019), coacervate') # pads subplots with whitespace fig.tight_layout(pad=pad) return fig def figs3(data_folder_N, data_folder_f, data_folder_sigma, mu_salt_folder_N, mu_salt_folder_f, mu_salt_folder_sigma, rho_s_M_N, rho_s_M_f, rho_s_M_sigma, ext_N, ext_f, ext_sigma, N_list, f_list, sigma_list, color_list_N, color_list_f, color_list_sigma, sigma_fixed, x_lim_sigma=[0,6], figsize=None, pad=3, naming_structure_sigma='NA(100)NB(100)lB(*)', lB_lo=1.3, lB_hi=2.398): """Plots Figure S3 of SI showing effects of N, f, and sigma on binodal projections in polymer-temperature plane.""" # creates figure if figsize is None: fig = plt.figure() else: fig = plt.figure(figsize=figsize) ### Effect of varying N print('loading N data') # adds subplot axN = fig.add_subplot(131) # extracts data data_vary_N = nacl.binodal_vary_N_data(data_folder_N, mu_salt_folder_N, rho_s_M_N, N_list, sigma=sigma_fixed, ext=ext_N) # plots data print('plotting N data') _ = binodal_vary_N(data_vary_N, N_list, color_list_N, ax=axN) ### Effect of varying charge fraction f # adds subplot axf = fig.add_subplot(132) # extracts data print('loading f data') data_vary_f = nacl.binodal_vary_f_data(data_folder_f, mu_salt_folder_f, rho_s_M_f, f_list, sigma=sigma_fixed, ext=ext_f) # plots data print('plotting f data') _ = binodal_vary_f(data_vary_f, f_list, color_list_f, ax=axf) ### Effect of varying sigma axsigma = fig.add_subplot(133) # laads all data print('loading sigma data') data = nacl.load_data(data_folder_sigma, ext=ext_sigma, naming_structure=naming_structure_sigma, lB_lo=lB_lo, lB_hi=lB_hi) # extracts relevant data data_vary_sigma = nacl.binodal_vary_sigma_data(data, mu_salt_folder_sigma, rho_s_M_sigma, sigma_list, ext=ext_sigma) # plots data print('plotting sigma data') _ = binodal_vary_sigma(data_vary_sigma, sigma_list, color_list_sigma, ax=axsigma, x_lim=x_lim_sigma) # pads subplots with whitespace fig.tight_layout(pad=pad) return fig def compare_to_exp(data, beads_2_M, rho_p_list=[0.3], rho_s_list=[1.6, 1.85, 1.9], N=100, f=1, sigma=4, t_fs=12, T_range=[273.15, 323.15]): """ Compares predictions from data to the experiment in the Prabhu group. """ # sets x and y axis limits x_lim = (-0.05, 1.3) # [mol/L] y_lim = (0, 60) # [C] # sets temperature range T_range = [273, 333] for rho_s in rho_s_list: for rho_p in rho_p_list: # computes polycation concentrations at different temperatures for fixed polymer and salt [mol/L] results = nacl.fixed_rho_total(data, rho_p, rho_s, beads_2_M) rho_PCI_list = results['rho_PCI'] rho_PCII_list = results['rho_PCII'] lB_arr = results['lB'] # plots binodal title = '{0:.2f} M Salt, {1:.2f} M Polymer, N = {2:d}, f = {3:.2f}, sig = {4:.2f} A'.format(rho_s, rho_p, N, f, sigma) p = binodal(lB_arr, rho_PCI_list, rho_PCII_list, title=title, beads_2_M=1, n_tie_lines=0, deg_C=True, T_range=T_range, x_lim=x_lim, y_lim=y_lim, marker=False, line=True) p.title.text_font_size = '{0:d}pt'.format(t_fs) show(p) return def crit_line_3d(data_cp, c_crit, lw_crit, fig): """ Plots critical line in 3D, typically for 3D surface binodal plot. LEGACY """ polymer_c_list, salt_c_list, z_arr = data_cp fig.add_trace(go.Scatter3d( x=polymer_c_list, y=salt_c_list, z=z_arr, mode='lines', line=dict( color=c_crit, width=lw_crit, ), ), ) return fig def fig1(data_3d, data_cp, data_z, data_mu, plot_params, fixed_T=True, fixed_salt=True, crit_line=True, fixed_comp=False, data_comp=None, data_outlines=None, skip=[], plot_axes=True, outline_scale_factor=1.02, toc_fig=False, has_ucst=False, show_labels=True): """ Plots Figure 1 from CCLS paper: 3d surface binodal, fixed T 2d line binodal, fixed salt reservoir concentration 2d line binodal, and critical line. """ # if Table of Contents (TOC) figure, removes all but LCST if toc_fig: fixed_salt = True crit_line = True fixed_comp = False fixed_T = False x_range, y_range, z_range, eye_xyz, op, ms_bin, lw_bin, \ lw_fix, lw_crit, lw_outline, c1_T, c2_T, c1_fix, c2_fix, \ c_crit, c_outline, mode, width, height, fs, offset = plot_params x, y, z = eye_xyz # plots 3d surface binodal fig = binodal_surf_3d_batch(data_3d, op, ms_bin, lw_bin, mode, skip=skip) if crit_line: # plots critical line fig = line_3d(*data_cp, c=c_crit, lw=lw_crit, fig=fig) if fixed_T: # plots binodal at fixed z value (temperature or Bjerrum length) fig = binodal_line_3d(data_z, fig=fig, lw=lw_fix, c1=c1_T, c2=c2_T) if fixed_salt: ### FIXED SALT CONCENTRATION ### # if there is a UCST, split the binodal in two if has_ucst: # identifies threshold between UCST and LCST by largest gap in z z1 = data_mu[2] z1_diff = np.diff(z1) i_thresh = np.argmax(z1_diff) thresh_ucst = (z1[i_thresh] + z1[i_thresh+1])/2 # splits data below UCST and above LCST ucst_data = list(zip(*[(x1, y1, z1, x2, y2, z2) for x1, y1, z1, x2, y2, z2 in zip(*data_mu) if z1 < thresh_ucst])) lcst_data = list(zip(*[(x1, y1, z1, x2, y2, z2) for x1, y1, z1, x2, y2, z2 in zip(*data_mu) if z1 > thresh_ucst])) # plots UCST and LCST data separately fig = binodal_line_3d(ucst_data, fig=fig, lw=lw_fix, c1=c1_fix, c2=c2_fix) fig = binodal_line_3d(lcst_data, fig=fig, lw=lw_fix, c1=c1_fix, c2=c2_fix) else: # plots data for fixed saltwater reservoir concentration fig = binodal_line_3d(data_mu, fig=fig, lw=lw_fix, c1=c1_fix, c2=c2_fix) if fixed_comp: # plots binodal at fixed overall salt, polymer concentration # fig = binodal_line_3d(data_comp, fig=fig, lw=lw_fix, c1=c1_fix, c2=c2_fix) # plots outlines of the surface for definition if data_outlines is not None: for data_outline in data_outlines: data_outline_scaled = [] for coord in data_outline: coord = outline_scale_factor*np.asarray(coord) data_outline_scaled += [coord] fig = binodal_line_3d(data_outline_scaled, c1=c_outline, c2=c_outline, fig=fig) if plot_axes: # x-axis fig = line_3d(x_range, [offset, offset], [z_range[0] + offset, z_range[0] + offset], lw=12, c=c_outline, fig=fig) # y-axis fig = line_3d([offset, offset], y_range, [z_range[0] + offset, z_range[0] + offset], lw=12, c=c_outline, fig=fig) # z-axis fig = line_3d([offset, offset], [offset, offset], z_range, c=c_outline, lw=12, fig=fig) ### FORMATS FIGURE ### fig.update_layout( scene = dict(xaxis = dict(range=x_range,), yaxis = dict(range=y_range,), zaxis = dict(range=z_range,), ), width = width, height = height, # changes initial view of figure scene_camera = dict( eye=dict(x=x, y=y, z=z), # center=dict(x=0, y=0.3, z=0.3), # up=dict(x=0, y=0, z=1) ), font = dict( family='Arial', color='black', size=fs) ) ### Cleanup # removes legend (too crowded to be off use) fig.update_layout(showlegend=False) #removes tick labels and axis titles (so I can add them myself) if not show_labels: fig.update_layout( scene = dict(xaxis = dict(showticklabels=False, title=''), yaxis = dict(showticklabels=False, title=''), zaxis = dict(showticklabels=False, title='', tickmode = 'linear', tick0 = 0, dtick = 50), ), ) return fig def fig2a(rho_salt_M_list_list, data, mu_salt_folder, color_list, T_range, sigma, z_name, beads_2_M, lB_list, lB_color_list, pad, kwargs, units_rho='mol/L', show_lgnd=False, y_lim_T=(0, 100), rho_p_label=r'$\rho_p$', rho_s_label=r'$\rho_s$', y_lim_s=[0, 2.25]): """Plots Figure 2a of binodal projections at different saltwater concentrations.""" for rho_salt_M_list in rho_salt_M_list_list: # plots binodal projections fig, ax_pT, ax_sT, \ ax_ps = binodal_proj_fixed_conc(data, mu_salt_folder, rho_salt_M_list, color_list, T_range, sigma, z_name, beads_2_M, lB_list, lB_color_list, **kwargs) # formats plots ax_pT = format_binodal(ax_pT, rho_p_label, units_rho, T_range, T_cels=kwargs['T_cels'], y_lim=y_lim_T, show_lgnd=show_lgnd) ax_sT = format_binodal(ax_sT, rho_s_label, units_rho, T_range, T_cels=kwargs['T_cels'], y_lim=y_lim_T, show_lgnd=show_lgnd) ax_ps = format_binodal(ax_ps, rho_p_label, units_rho, T_range, y_label=rho_s_label + ' [' + units_rho + ']', show_lgnd=show_lgnd, y_lim=y_lim_s) # pads plots with whitespace fig.tight_layout(pad=pad) return fig def fig2b(data, rho_p_list, rho_s_list, beads_2_M, lB_list, color_list, lB_color_list, kwargs, alpha_y_lim=(0.5,1.05), alpha_yticks=(0.5,0.75,1), figsize=None, pad=3, mew=0.5, show_lgnd=False): """Plots Figure 2b of binodal projections at different overall compositions.""" ### Formats Figure if figsize is None: fig = plt.figure() else: fig = plt.figure(figsize=figsize) ### polymer-temperature plane ### ax1 = fig.add_subplot(221) _ = binodal_custom_rho(data, rho_p_list, rho_s_list, beads_2_M, x_var='polyelectrolyte', x_label=r'$\rho_p$', marker='', colors=color_list, plot_fixed_rho=True, ax=ax1, show_lgnd=show_lgnd, **kwargs) ### salt-temperature plane ### ax2 = fig.add_subplot(222) _ = binodal_custom_rho(data, rho_p_list, rho_s_list, beads_2_M, x_var='salt', x_label=r'$\rho_s$', marker='', colors=color_list, plot_fixed_rho=True, ax=ax2, show_lgnd=show_lgnd, **kwargs) ### polymer-salt plane ### ax3 = fig.add_subplot(223) _ = binodal_custom_rho_rho(data, lB_list, rho_p_list, rho_s_list, beads_2_M, colors=lB_color_list, mew=mew, ax=ax3, show_lgnd=show_lgnd, colors_symbols=color_list, **kwargs) ### volume fraction of supernatant vs. temperature ### ax4 = fig.add_subplot(224) _ = alpha_custom_rho(data, rho_p_list, rho_s_list, beads_2_M, y_lim=alpha_y_lim, marker='', colors=color_list, ax=ax4, show_lgnd=show_lgnd, **kwargs) # customizes tick mark locations ax4.set_yticks(alpha_yticks) # pads subplots with whitespace fig.tight_layout(pad=pad) return fig def fig3(data, lB_list, rho_p_fixed, rho_s_fixed, rho_p_varied, rho_s_varied, beads_2_M, kwargs, figsize=None, pad=3, vertical=True): """Plots Figure 3 of tie lines in polymer-salt plane.""" # formats figure if figsize is None: fig = plt.figure() else: fig = plt.figure(figsize=figsize) # determines arrangement of subplots if vertical: h = 2 # 2 plots high w = 1 # 1 plot wide else: h = 1 # 1 plot high w = 2 # 2 plots wide ################ VARIES SALT CONCENTRATION ############### # creates subplot ax1 = fig.add_subplot(h, w, 1) # plots binodal rho_p_list = rho_p_fixed*np.ones([len(rho_s_varied)]) rho_s_list = rho_s_varied _ = binodal_custom_rho_rho(data, lB_list, rho_p_list, rho_s_list, beads_2_M, ax=ax1, show_lgnd=False, **kwargs) ############ VARIES POLYMER CONCENTRATION #################### # creates subplot ax2 = fig.add_subplot(h, w, 2) # plots binodal rho_p_list = rho_p_varied rho_s_list = rho_s_fixed*np.ones([len(rho_p_varied)]) ax = binodal_custom_rho_rho(data, lB_list, rho_p_list, rho_s_list, beads_2_M, ax=ax2, show_lgnd=False, **kwargs) # pads subplots with whitespace fig.tight_layout(pad=pad) return fig def figs1(T_range, sigma, T_room_C=20, T_cels=True, figsize=(5,5), gridspec=10, lw=3, y_lim=[5.5,9.5], y_ticks=[6,7,8,9], d=0.5, ax_fs=16, tk_fs=16): """Plots Figure S1 of the SI of Bjerrum length vs. T for fixed and T-dependent dielectric constant.""" # computes Bjerrum lengths T_arr, lB_A_arr, lB_0_A_arr = nacl.lB_comparison(T_range, sigma, T_room_C=T_room_C) # creates figure fig, (ax1, ax2) = plt.subplots(2, 1, figsize=figsize, gridspec_kw={'height_ratios': [gridspec,1]}, sharex=True) # adjusts temperature based on requested unit if T_cels: T_arr -= K_2_C unit_T = r'$^{\circ}C$' else: unit_T = 'K' # plots Bjerrum lengths ax1.plot(T_arr, lB_A_arr, lw=lw, label=r'$\epsilon(T)$') ax1.plot(T_arr, lB_0_A_arr, lw=lw, label=r'$\epsilon(T) = \epsilon($' + \ '{0:d}'.format(int(T_room_C)) + r'$^{\circ}C)$') # formats plot ax2.set_xlabel(r'$T$ [' + unit_T + ']', fontsize=ax_fs) ax1.set_ylabel(r'$l_B$ $[\AA]$', fontsize=ax_fs) ax1.tick_params(axis='both', labelsize=tk_fs) ax2.tick_params(axis='both', labelsize=tk_fs) ### Creates broken axis # see: https://matplotlib.org/stable/gallery/subplots_axes_and_figures/broken_axis.html # set limits and ticks on upper axis ax1.set_ylim(y_lim) ax1.set_yticks(y_ticks) # lower axis ax2.set_ylim([0, 0.5]) ax2.set_yticks([0]) # hide the spines between ax and ax2 ax1.spines['bottom'].set_visible(False) ax2.spines['top'].set_visible(False) ax1.xaxis.tick_top() ax1.tick_params(top=False, labeltop=False) # don't put ticks or labels at top ax2.xaxis.tick_bottom() # plots diagonal hatch marks on y-axis--"d" is ratio of height to length kwargs = dict(marker=[(-1, -d), (1, d)], markersize=12, linestyle="none", color='k', mec='k', mew=1, clip_on=False) ax1.plot([0, 1], [0, 0], transform=ax1.transAxes, **kwargs) ax2.plot([0, 1], [1, 1], transform=ax2.transAxes, **kwargs) return fig def format_binodal(ax, x_label, units_rho, T_range, y_label=None, title=None, x_lim=None, y_lim=None, T_cels=False, lgnd_out=True, square_box=True, show_lgnd=True): """ Formats axes of a plot of the binodal projected onto a plane with temperature as the vertical axis. """ if x_lim is not None: ax.set_xlim(x_lim) ax.set_xlabel('{0:s} [{1:s}]'.format(x_label, units_rho), fontsize=18) # assumes that the y axis is temperature if another label is not given if y_label is None: T_unit = 'K' if T_cels: T_unit = r'$^{\circ}$C' T_range = [T - K_2_C for T in T_range] if y_lim is None: ax.set_ylim(T_range) else: ax.set_ylim(y_lim) ax.set_ylabel(r'$T$' + ' [{0:s}]'.format(T_unit), fontsize=18) else: ax.set_ylabel(y_label, fontsize=18) ax.set_ylim(y_lim) ax.tick_params(axis='both', labelsize=16) if title is not None: ax.set_title(title, fontsize=16) # makes box of plot square if square_box: ax.set_aspect(np.diff(ax.get_xlim()) / np.diff(ax.get_ylim())) # places legend outside of plot box if show_lgnd: if lgnd_out: box = ax.get_position() ax.set_position([box.x0, box.y0, box.width, box.height]) legend_x = 1 legend_y = 0.5 ax.legend(loc='center left', bbox_to_anchor=(legend_x, legend_y), fontsize=14, frameon=False) else: ax.legend(fontsize=12, frameon=False) return ax def get_colors(cmap_name, n): """Returns list of colors using given colormap.""" cmap = plt.get_cmap(cmap_name) return [cmap(val) for val in np.linspace(0, 1, n)] def get_lgnd_labels(handles, labels, key): """Returns zipped handles and labels for which labels contains key.""" return [pair for pair in zip(handles, labels) if key in pair[1]] def get_plot_dict_p_s(ch_var): """Returns a dictionary of key parameters for plotting based on varied component.""" d = {} # polyelectrolyte density varied if ch_var == 'p': d = {'ch_var':'p', 'ch_fix':'s', 'order':[0,1], 'name_var':'Polymer'} # salt density varied elif ch_var == 's': d = {'ch_var':'s', 'ch_fix':'p', 'order':[1,0], 'name_var':'Salt'} else: print('invalid ch_var character: choose s or p.') return d def line_3d(x, y, z, mode='lines', ms=8, op=0.1, c='black', lw=8, fig=None): """ Plots line in 3D plot (plotly). """ if fig == None: fig = go.Figure() # plots phase I (supernatant) of fixed salt binodal fig.add_trace(go.Scatter3d( x=x, y=y, z=z, mode=mode, marker=dict( size=ms, opacity=op, color=c ), line=dict( color=c, width=lw, ), )) return fig def no_salt(df, n_plot, left='rhoPCI', right='rhoPCII', x_label='polycation density', p=None, n_tie_lines=0, plot_T=False, title='', line=False, marker=True, w=500, h=500, units_rho='[beads/sigma^3]', deg_C=False, leg1='supernatant', c1='blue', leg2='coacervate', c2='red'): """ Plots the binodal for a polyelectrolyte in solution without salt. """ if plot_T: y = 'T' if deg_C: y_label = 'Temperature [' + r'$^{\circ}$' + 'C]' else: y_label = 'Temperature [K]' else: y = 'BJ' y_label = 'Bjerrum length' # samples a uniform subset of the data n = len(df) skip = int(n / n_plot) sample = df.iloc[::skip] # creates figure object if not provided if p is None: p = figure(plot_width=w, plot_height=h) # loads source for plot data source = ColumnDataSource(sample) if marker: # creates circle glyph of polycation concentration in dilute phase p.circle(x=left, y=y, source=source, size=10, color=c1, legend_label=leg1) # creates circle glyph of polycation concentration in coacervate phase p.circle(x=right, y=y, source=source, size=10, color=c2, legend_label=leg2) if line: # creates circle glyph of polycation concentration in dilute phase p.line(x=left, y=y, source=source, line_width=6, line_color=c1, legend_label=leg1) # creates circle glyph of polycation concentration in coacervate phase p.line(x=right, y=y, source=source, line_width=6, line_color=c2, legend_label=leg2) # adds tie lines if n_tie_lines > 0: skip_tie_lines = int(n / n_tie_lines) df_tie_lines = df.iloc[::skip_tie_lines] for t in range(len(df_tie_lines)): p.line([df_tie_lines[left].iloc[t], df_tie_lines[right].iloc[t]], [df_tie_lines[y].iloc[t], df_tie_lines[y].iloc[t]], color='black') # adds plot labels p.xaxis.axis_label = x_label + ' ' + units_rho p.xaxis.axis_label_text_font_size = '18pt' p.xaxis.major_label_text_font_size = '14pt' p.yaxis.axis_label = y_label p.yaxis.axis_label_text_font_size = '18pt' p.yaxis.major_label_text_font_size = '14pt' # adds title p.title.text = title p.title.text_font_size = '16pt' # formats legend p.legend.location = "bottom_right" p.legend.label_text_font_size = '14pt' p.legend.click_policy = 'hide' # creates hover feature to read data hover = HoverTool() hover.tooltips=[ (y_label, '@' + y), (x_label + ' (I)', '@' + left), (x_label + ' (II)', '@' + right) ] p.add_tools(hover) return p def pt_3d(x, y, z, mode='markers', ms=8, op=1, c='black', fig=None): """ Plots line in 3D plot (plotly). """ if fig == None: fig = go.Figure() # plots phase I (supernatant) of fixed salt binodal fig.add_trace(go.Scatter3d( x=[x], y=[y], z=[z], mode=mode, marker=dict( size=ms, opacity=op, color=c ), )) return fig def salt(df, n_plot, p=None, n_tie_lines=0): """ Plots the binodal for a polyelectrolyte in solution with salt at a fixed Bjerrum length on rho_p vs. rho_s axes. """ # samples a uniform subset of the data n = len(df) skip = int(n / n_plot) sample = df.iloc[::skip] # creates figure object if not provided if p is None: p = figure() # loads source for plot data source = ColumnDataSource(sample) # creates circle glyph of polycation concentration in dilute phase p.circle(x='rhoPAI', y='rhoAI', source=source, size=10, color='red', legend_label='dilute phase (I)') # creates circle glyph of polycation concentration in coacervate phase p.circle(x='rhoPAII', y='rhoAII', source=source, size=10, color='blue', legend_label='coacervate phase (II)') # draws tie lines if n_tie_lines > 0: skip_tie_lines = int(n / n_tie_lines) df_tie_lines = df.iloc[::skip_tie_lines] for t in range(len(df_tie_lines)): x = [df_tie_lines['rhoPAI'].iloc[t], df_tie_lines['rhoPAII'].iloc[t]] y = [df_tie_lines['rhoAI'].iloc[t], df_tie_lines['rhoAII'].iloc[t]] p.line(x, y, color='black') # adds plot labels p.xaxis.axis_label = 'polyanion number density' p.xaxis.axis_label_text_font_size = '18pt' p.xaxis.major_label_text_font_size = '14pt' p.yaxis.axis_label = 'anion number density' p.yaxis.axis_label_text_font_size = '18pt' p.yaxis.major_label_text_font_size = '14pt' # formats legend p.legend.location = "top_right" p.legend.label_text_font_size = '16pt' p.legend.click_policy = 'hide' # creates hover feature to read data hover = HoverTool() hover.tooltips=[ ('Anion Density (I)', '@rhoAI'), ('Anion Density (II)', '@rhoAII'), ('Polyanion density (I)', '@rhoPAI'), ('Polyanion density (II)', '@rhoPAII') ] p.add_tools(hover) return p def sort_lgnd_labels(ax, sorted_keys): """Sorts legend labels based on order of keywords.""" # gets handles and labels from legend handles, labels = ax.get_legend_handles_labels() # sorts by keywords lgnd_sorted = [] for key in sorted_keys: lgnd_sorted += get_lgnd_labels(handles, labels, key) # removes redundant entries lgnd_unique = [(0,0)] # primer entry [lgnd_unique.append(pair) for pair in lgnd_sorted if pair[1] \ not in list(zip(*lgnd_unique))[1]] # removes primer entry lgnd_unique = lgnd_unique[1:] # unzips handles_sorted, labels_sorted = zip(*lgnd_unique) # adds legend outside plot box = ax.get_position() ax.set_position([box.x0, box.y0, box.width, box.height]) legend_x = 1 legend_y = 0.5 ax.legend(handles_sorted, labels_sorted, loc='center left', bbox_to_anchor=(legend_x, legend_y), fontsize=14, frameon=False) return ax def validate_fit(data_pred, df_exp, ch_var, rho_var_list, rho_fix, colors, beads_2_M_opt, T_range=[273.15, 323.15], lw=2, sigma=None, conv_vals=False, x_var='polyelectrolyte'): """ Validates fit of sigma to experiments. """ if conv_vals: rho_p = df_exp['rho_p [M]'].to_numpy(dtype=float) rho_p_conv = df_exp['rho_p (conv) [M]'].to_numpy(dtype=float) rho_s = df_exp['rho_s [M]'].to_numpy(dtype=float) rho_s_conv = df_exp['rho_s (conv) [M]'].to_numpy(dtype=float) # matches polymer and salt values with fixed and varied concentrations rho_var_list_conv = [] if ch_var == 'p': for rho_var in rho_var_list: i = np.where(rho_var == rho_p)[0][0] rho_var_list_conv += [rho_p_conv[i]] rho_fix_conv = rho_s_conv[np.where(rho_fix == rho_s)[0][0]] elif ch_var == 's': for rho_var in rho_var_list: i = np.where(rho_var == rho_s)[0][0] rho_var_list_conv += [rho_s_conv[i]] rho_fix_conv = rho_p_conv[np.where(rho_fix == rho_p)[0][0]] # polymer-temperature plane if conv_vals: ax = binodal_vary_rho(data_pred, rho_var_list_conv, rho_fix_conv, ch_var, beads_2_M_opt, x_var=x_var, x_label=r'$\rho_p$', sigma=sigma, T_range=T_range, marker='', lw=lw, colors=colors, T_cels=True) else: ax = binodal_vary_rho(data_pred, rho_var_list, rho_fix, ch_var, beads_2_M_opt, x_var=x_var, x_label=r'$\rho_p$', sigma=sigma, T_range=T_range, marker='', lw=lw, colors=colors, T_cels=True) # plots experimental results for i in range(len(df_exp)): rho_p, rho_s, T_exp, rho_p_sup, rho_p_co = nacl.read_df_exp(df_exp, i) if ch_var == 'p': rho_var_exp = rho_p rho_fix_exp = rho_s elif ch_var == 's': rho_var_exp = rho_s rho_fix_exp = rho_p else: print('Please select s or p as ch_var') if (rho_var_exp in rho_var_list) and (rho_fix_exp == rho_fix): # determines color color = [colors[i] for i in range(len(colors)) if rho_var_list[i] == rho_var_exp][0] # plots desired species concentration if x_var == 'polyanion' or x_var == 'polycation': # if just plotting polyanion, divides total polymer # concentration in half (assumes symmetric solution) rho_sup = rho_p_sup / 2 rho_co = rho_p_co / 2 elif x_var == 'polyelectrolyte': rho_sup = rho_p_sup rho_co = rho_p_co # plots supernatant and coacervate compositions ax.plot(rho_sup, T_exp, color=color, marker='o', label='supernatant') ax.plot(rho_co, T_exp, color=color, marker='^', label='coacervate')
[ "bokeh.models.ColumnDataSource", "pe.get_beads_2_M", "numpy.abs", "numpy.argmax", "salt.extract_df_mu_data", "numpy.argmin", "matplotlib.pyplot.figure", "salt.fixed_conc", "numpy.unique", "pandas.DataFrame", "numpy.copy", "salt.binodal_vary_f_data", "pe.lB_2_T", "numpy.max", "bokeh.plotting.show", "numpy.linspace", "salt.lB_comparison", "matplotlib.pyplot.subplots", "salt.read_df_exp", "salt.fixed_rho_total", "salt.make_df_mu", "matplotlib.pyplot.get_cmap", "plotly.graph_objects.Figure", "salt.get_mu_conc", "matplotlib.pyplot.legend", "bokeh.models.Range1d", "numpy.asarray", "pe.calc_rho_solv", "numpy.min", "bokeh.models.tools.HoverTool", "pe.lB_2_T_arr", "bokeh.plotting.figure", "salt.binodal_vary_N_data", "numpy.logical_and", "salt.binodal_vary_sigma_data", "salt.conv_ali_conc", "numpy.diff", "numpy.array", "numpy.where", "salt.load_data" ]
[((5262, 5274), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5272, 5274), True, 'import matplotlib.pyplot as plt\n'), ((9297, 9315), 'numpy.copy', 'np.copy', (['left_list'], {}), '(left_list)\n', (9304, 9315), True, 'import numpy as np\n'), ((9332, 9351), 'numpy.copy', 'np.copy', (['right_list'], {}), '(right_list)\n', (9339, 9351), True, 'import numpy as np\n'), ((9652, 9712), 'pe.lB_2_T_arr', 'pe.lB_2_T_arr', (['lB_arr', 'T_range'], {'fix_eps': 'fix_eps', 'sigma': 'sigma'}), '(lB_arr, T_range, fix_eps=fix_eps, sigma=sigma)\n', (9665, 9712), False, 'import pe\n'), ((9773, 9819), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['BJ', 'T', left, right]"}), "(columns=['BJ', 'T', left, right])\n", (9785, 9819), True, 'import pandas as pd\n'), ((9834, 9890), 'numpy.logical_and', 'np.logical_and', (['(T_arr >= T_range[0])', '(T_arr <= T_range[1])'], {}), '(T_arr >= T_range[0], T_arr <= T_range[1])\n', (9848, 9890), True, 'import numpy as np\n'), ((33758, 33776), 'numpy.unique', 'np.unique', (['z1_coll'], {}), '(z1_coll)\n', (33767, 33776), True, 'import numpy as np\n'), ((35080, 35092), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (35090, 35092), True, 'import matplotlib.pyplot as plt\n'), ((45434, 45466), 'pe.get_beads_2_M', 'pe.get_beads_2_M', (['sigma'], {'SI': '(True)'}), '(sigma, SI=True)\n', (45450, 45466), False, 'import pe\n'), ((49207, 49317), 'salt.binodal_vary_N_data', 'nacl.binodal_vary_N_data', (['data_folder_N', 'mu_salt_folder_N', 'rho_s_M_N', 'N_list'], {'sigma': 'sigma_fixed', 'ext': 'ext_N'}), '(data_folder_N, mu_salt_folder_N, rho_s_M_N, N_list,\n sigma=sigma_fixed, ext=ext_N)\n', (49231, 49317), True, 'import salt as nacl\n'), ((49611, 49721), 'salt.binodal_vary_f_data', 'nacl.binodal_vary_f_data', (['data_folder_f', 'mu_salt_folder_f', 'rho_s_M_f', 'f_list'], {'sigma': 'sigma_fixed', 'ext': 'ext_f'}), '(data_folder_f, mu_salt_folder_f, rho_s_M_f, f_list,\n sigma=sigma_fixed, ext=ext_f)\n', (49635, 49721), True, 'import salt as nacl\n'), ((50050, 50170), 'salt.load_data', 'nacl.load_data', (['data_folder_sigma'], {'ext': 'ext_sigma', 'naming_structure': 'naming_structure_sigma', 'lB_lo': 'lB_lo', 'lB_hi': 'lB_hi'}), '(data_folder_sigma, ext=ext_sigma, naming_structure=\n naming_structure_sigma, lB_lo=lB_lo, lB_hi=lB_hi)\n', (50064, 50170), True, 'import salt as nacl\n'), ((50229, 50331), 'salt.binodal_vary_sigma_data', 'nacl.binodal_vary_sigma_data', (['data', 'mu_salt_folder_sigma', 'rho_s_M_sigma', 'sigma_list'], {'ext': 'ext_sigma'}), '(data, mu_salt_folder_sigma, rho_s_M_sigma,\n sigma_list, ext=ext_sigma)\n', (50257, 50331), True, 'import salt as nacl\n'), ((62350, 62403), 'salt.lB_comparison', 'nacl.lB_comparison', (['T_range', 'sigma'], {'T_room_C': 'T_room_C'}), '(T_range, sigma, T_room_C=T_room_C)\n', (62368, 62403), True, 'import salt as nacl\n'), ((62508, 62606), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': 'figsize', 'gridspec_kw': "{'height_ratios': [gridspec, 1]}", 'sharex': '(True)'}), "(2, 1, figsize=figsize, gridspec_kw={'height_ratios': [gridspec,\n 1]}, sharex=True)\n", (62520, 62606), True, 'import matplotlib.pyplot as plt\n'), ((65892, 65915), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap_name'], {}), '(cmap_name)\n', (65904, 65915), True, 'import matplotlib.pyplot as plt\n'), ((68060, 68084), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['sample'], {}), '(sample)\n', (68076, 68084), False, 'from bokeh.models import ColumnDataSource, Title, Range1d\n'), ((69778, 69789), 'bokeh.models.tools.HoverTool', 'HoverTool', ([], {}), '()\n', (69787, 69789), False, 'from bokeh.models.tools import HoverTool\n'), ((70839, 70863), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['sample'], {}), '(sample)\n', (70855, 70863), False, 'from bokeh.models import ColumnDataSource, Title, Range1d\n'), ((72122, 72133), 'bokeh.models.tools.HoverTool', 'HoverTool', ([], {}), '()\n', (72131, 72133), False, 'from bokeh.models.tools import HoverTool\n'), ((1727, 1739), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1737, 1739), True, 'import matplotlib.pyplot as plt\n'), ((2010, 2061), 'salt.fixed_rho_total', 'nacl.fixed_rho_total', (['data', 'rho_p', 'rho_s', 'beads_2_M'], {}), '(data, rho_p, rho_s, beads_2_M)\n', (2030, 2061), True, 'import salt as nacl\n'), ((2310, 2353), 'pe.lB_2_T_arr', 'pe.lB_2_T_arr', (['lB_arr', 'T_range'], {'sigma': 'sigma'}), '(lB_arr, T_range, sigma=sigma)\n', (2323, 2353), False, 'import pe\n'), ((3028, 3041), 'numpy.min', 'np.min', (['T_arr'], {}), '(T_arr)\n', (3034, 3041), True, 'import numpy as np\n'), ((5429, 5457), 'numpy.array', 'np.array', (['[rho_var, rho_fix]'], {}), '([rho_var, rho_fix])\n', (5437, 5457), True, 'import numpy as np\n'), ((5520, 5571), 'salt.fixed_rho_total', 'nacl.fixed_rho_total', (['data', 'rho_p', 'rho_s', 'beads_2_M'], {}), '(data, rho_p, rho_s, beads_2_M)\n', (5540, 5571), True, 'import salt as nacl\n'), ((5820, 5863), 'pe.lB_2_T_arr', 'pe.lB_2_T_arr', (['lB_arr', 'T_range'], {'sigma': 'sigma'}), '(lB_arr, T_range, sigma=sigma)\n', (5833, 5863), False, 'import pe\n'), ((10594, 10609), 'bokeh.models.Range1d', 'Range1d', (['*x_lim'], {}), '(*x_lim)\n', (10601, 10609), False, 'from bokeh.models import ColumnDataSource, Title, Range1d\n'), ((10656, 10671), 'bokeh.models.Range1d', 'Range1d', (['*y_lim'], {}), '(*y_lim)\n', (10663, 10671), False, 'from bokeh.models import ColumnDataSource, Title, Range1d\n'), ((11641, 11653), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11651, 11653), True, 'import matplotlib.pyplot as plt\n'), ((11860, 11911), 'salt.fixed_rho_total', 'nacl.fixed_rho_total', (['data', 'rho_p', 'rho_s', 'beads_2_M'], {}), '(data, rho_p, rho_s, beads_2_M)\n', (11880, 11911), True, 'import salt as nacl\n'), ((13691, 13734), 'pe.lB_2_T_arr', 'pe.lB_2_T_arr', (['lB_arr', 'T_range'], {'sigma': 'sigma'}), '(lB_arr, T_range, sigma=sigma)\n', (13704, 13734), False, 'import pe\n'), ((13753, 13809), 'numpy.logical_and', 'np.logical_and', (['(T_arr >= T_range[0])', '(T_arr <= T_range[1])'], {}), '(T_arr >= T_range[0], T_arr <= T_range[1])\n', (13767, 13809), True, 'import numpy as np\n'), ((17449, 17461), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (17459, 17461), True, 'import matplotlib.pyplot as plt\n'), ((18141, 18167), 'pe.lB_2_T', 'pe.lB_2_T', (['lB'], {'sigma': 'sigma'}), '(lB, sigma=sigma)\n', (18150, 18167), False, 'import pe\n'), ((23245, 23257), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (23255, 23257), True, 'import matplotlib.pyplot as plt\n'), ((23282, 23309), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (23292, 23309), True, 'import matplotlib.pyplot as plt\n'), ((24026, 24137), 'salt.make_df_mu', 'nacl.make_df_mu', (['data', 'mu_salt_folder', 'rho_salt', 'T_range', 'sigma'], {'naming_structure': 'naming_structure', 'ext': 'ext'}), '(data, mu_salt_folder, rho_salt, T_range, sigma,\n naming_structure=naming_structure, ext=ext)\n', (24041, 24137), True, 'import salt as nacl\n'), ((24223, 24261), 'salt.extract_df_mu_data', 'nacl.extract_df_mu_data', (['df_mu', 'z_name'], {}), '(df_mu, z_name)\n', (24246, 24261), True, 'import salt as nacl\n'), ((24345, 24401), 'numpy.logical_and', 'np.logical_and', (['(T_arr >= T_range[0])', '(T_arr <= T_range[1])'], {}), '(T_arr >= T_range[0], T_arr <= T_range[1])\n', (24359, 24401), True, 'import numpy as np\n'), ((25751, 25777), 'pe.lB_2_T', 'pe.lB_2_T', (['lB'], {'sigma': 'sigma'}), '(lB, sigma=sigma)\n', (25760, 25777), False, 'import pe\n'), ((28207, 28219), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (28217, 28219), True, 'import matplotlib.pyplot as plt\n'), ((28899, 28925), 'pe.lB_2_T', 'pe.lB_2_T', (['lB'], {'sigma': 'sigma'}), '(lB, sigma=sigma)\n', (28908, 28925), False, 'import pe\n'), ((32871, 32882), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (32880, 32882), True, 'import plotly.graph_objects as go\n'), ((35249, 35318), 'salt.get_mu_conc', 'nacl.get_mu_conc', (['mu_salt_folder', 'data', 'rho_salt'], {'beads_2_M': 'beads_2_M'}), '(mu_salt_folder, data, rho_salt, beads_2_M=beads_2_M)\n', (35265, 35318), True, 'import salt as nacl\n'), ((35530, 35552), 'numpy.array', 'np.array', (['rho_PCI_list'], {}), '(rho_PCI_list)\n', (35538, 35552), True, 'import numpy as np\n'), ((35573, 35596), 'numpy.array', 'np.array', (['rho_PCII_list'], {}), '(rho_PCII_list)\n', (35581, 35596), True, 'import numpy as np\n'), ((35678, 35721), 'pe.lB_2_T_arr', 'pe.lB_2_T_arr', (['lB_arr', 'T_range'], {'sigma': 'sigma'}), '(lB_arr, T_range, sigma=sigma)\n', (35691, 35721), False, 'import pe\n'), ((35740, 35796), 'numpy.logical_and', 'np.logical_and', (['(T_arr >= T_range[0])', '(T_arr <= T_range[1])'], {}), '(T_arr >= T_range[0], T_arr <= T_range[1])\n', (35754, 35796), True, 'import numpy as np\n'), ((36953, 37032), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""center left"""', 'bbox_to_anchor': '(legend_x, legend_y)', 'fontsize': '(12)'}), "(loc='center left', bbox_to_anchor=(legend_x, legend_y), fontsize=12)\n", (36963, 37032), True, 'import matplotlib.pyplot as plt\n'), ((37051, 37074), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'fontsize': '(12)'}), '(fontsize=12)\n', (37061, 37074), True, 'import matplotlib.pyplot as plt\n'), ((37468, 37480), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (37478, 37480), True, 'import matplotlib.pyplot as plt\n'), ((38525, 38537), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (38535, 38537), True, 'import matplotlib.pyplot as plt\n'), ((39620, 39632), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (39630, 39632), True, 'import matplotlib.pyplot as plt\n'), ((41377, 41389), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (41387, 41389), True, 'import matplotlib.pyplot as plt\n'), ((41571, 41599), 'numpy.array', 'np.array', (['[rho_var, rho_fix]'], {}), '([rho_var, rho_fix])\n', (41579, 41599), True, 'import numpy as np\n'), ((41662, 41713), 'salt.fixed_rho_total', 'nacl.fixed_rho_total', (['data', 'rho_p', 'rho_s', 'beads_2_M'], {}), '(data, rho_p, rho_s, beads_2_M)\n', (41682, 41713), True, 'import salt as nacl\n'), ((43493, 43536), 'pe.lB_2_T_arr', 'pe.lB_2_T_arr', (['lB_arr', 'T_range'], {'sigma': 'sigma'}), '(lB_arr, T_range, sigma=sigma)\n', (43506, 43536), False, 'import pe\n'), ((43555, 43611), 'numpy.logical_and', 'np.logical_and', (['(T_arr >= T_range[0])', '(T_arr <= T_range[1])'], {}), '(T_arr >= T_range[0], T_arr <= T_range[1])\n', (43569, 43611), True, 'import numpy as np\n'), ((45527, 45539), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (45537, 45539), True, 'import matplotlib.pyplot as plt\n'), ((45564, 45591), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (45574, 45591), True, 'import matplotlib.pyplot as plt\n'), ((48997, 49009), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (49007, 49009), True, 'import matplotlib.pyplot as plt\n'), ((49034, 49061), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (49044, 49061), True, 'import matplotlib.pyplot as plt\n'), ((59037, 59049), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (59047, 59049), True, 'import matplotlib.pyplot as plt\n'), ((59074, 59101), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (59084, 59101), True, 'import matplotlib.pyplot as plt\n'), ((60928, 60940), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (60938, 60940), True, 'import matplotlib.pyplot as plt\n'), ((60965, 60992), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (60975, 60992), True, 'import matplotlib.pyplot as plt\n'), ((66822, 66833), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (66831, 66833), True, 'import plotly.graph_objects as go\n'), ((67977, 68012), 'bokeh.plotting.figure', 'figure', ([], {'plot_width': 'w', 'plot_height': 'h'}), '(plot_width=w, plot_height=h)\n', (67983, 68012), False, 'from bokeh.plotting import figure, output_file, show\n'), ((70140, 70151), 'plotly.graph_objects.Figure', 'go.Figure', ([], {}), '()\n', (70149, 70151), True, 'import plotly.graph_objects as go\n'), ((70783, 70791), 'bokeh.plotting.figure', 'figure', ([], {}), '()\n', (70789, 70791), False, 'from bokeh.plotting import figure, output_file, show\n'), ((75340, 75367), 'salt.read_df_exp', 'nacl.read_df_exp', (['df_exp', 'i'], {}), '(df_exp, i)\n', (75356, 75367), True, 'import salt as nacl\n'), ((2577, 2592), 'numpy.array', 'np.array', (['alpha'], {}), '(alpha)\n', (2585, 2592), True, 'import numpy as np\n'), ((12404, 12426), 'numpy.array', 'np.array', (['rho_PCI_list'], {}), '(rho_PCI_list)\n', (12412, 12426), True, 'import numpy as np\n'), ((12451, 12474), 'numpy.array', 'np.array', (['rho_PCII_list'], {}), '(rho_PCII_list)\n', (12459, 12474), True, 'import numpy as np\n'), ((18463, 18514), 'salt.fixed_rho_total', 'nacl.fixed_rho_total', (['data', 'rho_p', 'rho_s', 'beads_2_M'], {}), '(data, rho_p, rho_s, beads_2_M)\n', (18483, 18514), True, 'import salt as nacl\n'), ((18720, 18745), 'numpy.asarray', 'np.asarray', (["results['lB']"], {}), "(results['lB'])\n", (18730, 18745), True, 'import numpy as np\n'), ((18922, 18945), 'numpy.asarray', 'np.asarray', (['rho_CI_list'], {}), '(rho_CI_list)\n', (18932, 18945), True, 'import numpy as np\n'), ((19020, 19044), 'numpy.asarray', 'np.asarray', (['rho_CII_list'], {}), '(rho_CII_list)\n', (19030, 19044), True, 'import numpy as np\n'), ((29169, 29197), 'numpy.array', 'np.array', (['[rho_var, rho_fix]'], {}), '([rho_var, rho_fix])\n', (29177, 29197), True, 'import numpy as np\n'), ((29270, 29321), 'salt.fixed_rho_total', 'nacl.fixed_rho_total', (['data', 'rho_p', 'rho_s', 'beads_2_M'], {}), '(data, rho_p, rho_s, beads_2_M)\n', (29290, 29321), True, 'import salt as nacl\n'), ((29527, 29552), 'numpy.asarray', 'np.asarray', (["results['lB']"], {}), "(results['lB'])\n", (29537, 29552), True, 'import numpy as np\n'), ((29729, 29752), 'numpy.asarray', 'np.asarray', (['rho_CI_list'], {}), '(rho_CI_list)\n', (29739, 29752), True, 'import numpy as np\n'), ((29827, 29851), 'numpy.asarray', 'np.asarray', (['rho_CII_list'], {}), '(rho_CII_list)\n', (29837, 29851), True, 'import numpy as np\n'), ((35382, 35438), 'salt.fixed_conc', 'nacl.fixed_conc', (['mu_conc', 'data', 'qty'], {'beads_2_M': 'beads_2_M'}), '(mu_conc, data, qty, beads_2_M=beads_2_M)\n', (35397, 35438), True, 'import salt as nacl\n'), ((42206, 42228), 'numpy.array', 'np.array', (['rho_PCI_list'], {}), '(rho_PCI_list)\n', (42214, 42228), True, 'import numpy as np\n'), ((42253, 42276), 'numpy.array', 'np.array', (['rho_PCII_list'], {}), '(rho_PCII_list)\n', (42261, 42276), True, 'import numpy as np\n'), ((45937, 45985), 'salt.conv_ali_conc', 'nacl.conv_ali_conc', (['df_exp', 'rho_p_raw', 'rho_s_raw'], {}), '(df_exp, rho_p_raw, rho_s_raw)\n', (45955, 45985), True, 'import salt as nacl\n'), ((46805, 46880), 'salt.read_df_exp', 'nacl.read_df_exp', (['df_exp', 'i'], {'conv_vals': 'conv_vals', 'read_sigma': 'plot_errorbars'}), '(df_exp, i, conv_vals=conv_vals, read_sigma=plot_errorbars)\n', (46821, 46880), True, 'import salt as nacl\n'), ((51226, 51277), 'salt.fixed_rho_total', 'nacl.fixed_rho_total', (['data', 'rho_p', 'rho_s', 'beads_2_M'], {}), '(data, rho_p, rho_s, beads_2_M)\n', (51246, 51277), True, 'import salt as nacl\n'), ((51869, 51876), 'bokeh.plotting.show', 'show', (['p'], {}), '(p)\n', (51873, 51876), False, 'from bokeh.plotting import figure, output_file, show\n'), ((53964, 53975), 'numpy.diff', 'np.diff', (['z1'], {}), '(z1)\n', (53971, 53975), True, 'import numpy as np\n'), ((53999, 54017), 'numpy.argmax', 'np.argmax', (['z1_diff'], {}), '(z1_diff)\n', (54008, 54017), True, 'import numpy as np\n'), ((65949, 65969), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (65960, 65969), True, 'import numpy as np\n'), ((2443, 2460), 'numpy.asarray', 'np.asarray', (['alpha'], {}), '(alpha)\n', (2453, 2460), True, 'import numpy as np\n'), ((3127, 3143), 'numpy.argmin', 'np.argmin', (['T_arr'], {}), '(T_arr)\n', (3136, 3143), True, 'import numpy as np\n'), ((3243, 3273), 'numpy.abs', 'np.abs', (['(alpha_single_phase - 1)'], {}), '(alpha_single_phase - 1)\n', (3249, 3273), True, 'import numpy as np\n'), ((3523, 3555), 'numpy.abs', 'np.abs', (['(alpha_single_phase - 0.5)'], {}), '(alpha_single_phase - 0.5)\n', (3529, 3555), True, 'import numpy as np\n'), ((5953, 5970), 'numpy.asarray', 'np.asarray', (['alpha'], {}), '(alpha)\n', (5963, 5970), True, 'import numpy as np\n'), ((6099, 6114), 'numpy.array', 'np.array', (['alpha'], {}), '(alpha)\n', (6107, 6114), True, 'import numpy as np\n'), ((12533, 12555), 'numpy.array', 'np.array', (['rho_PAI_list'], {}), '(rho_PAI_list)\n', (12541, 12555), True, 'import numpy as np\n'), ((12580, 12603), 'numpy.array', 'np.array', (['rho_PAII_list'], {}), '(rho_PAII_list)\n', (12588, 12603), True, 'import numpy as np\n'), ((18875, 18899), 'numpy.asarray', 'np.asarray', (['rho_PCI_list'], {}), '(rho_PCI_list)\n', (18885, 18899), True, 'import numpy as np\n'), ((18971, 18996), 'numpy.asarray', 'np.asarray', (['rho_PCII_list'], {}), '(rho_PCII_list)\n', (18981, 18996), True, 'import numpy as np\n'), ((29682, 29706), 'numpy.asarray', 'np.asarray', (['rho_PCI_list'], {}), '(rho_PCI_list)\n', (29692, 29706), True, 'import numpy as np\n'), ((29778, 29803), 'numpy.asarray', 'np.asarray', (['rho_PCII_list'], {}), '(rho_PCII_list)\n', (29788, 29803), True, 'import numpy as np\n'), ((42335, 42357), 'numpy.array', 'np.array', (['rho_PAI_list'], {}), '(rho_PAI_list)\n', (42343, 42357), True, 'import numpy as np\n'), ((42382, 42405), 'numpy.array', 'np.array', (['rho_PAII_list'], {}), '(rho_PAII_list)\n', (42390, 42405), True, 'import numpy as np\n'), ((12659, 12680), 'numpy.array', 'np.array', (['rho_CI_list'], {}), '(rho_CI_list)\n', (12667, 12680), True, 'import numpy as np\n'), ((12705, 12727), 'numpy.array', 'np.array', (['rho_CII_list'], {}), '(rho_CII_list)\n', (12713, 12727), True, 'import numpy as np\n'), ((15402, 15427), 'numpy.max', 'np.max', (['left_arr[liq_h2o]'], {}), '(left_arr[liq_h2o])\n', (15408, 15427), True, 'import numpy as np\n'), ((15450, 15476), 'numpy.min', 'np.min', (['right_arr[liq_h2o]'], {}), '(right_arr[liq_h2o])\n', (15456, 15476), True, 'import numpy as np\n'), ((42461, 42482), 'numpy.array', 'np.array', (['rho_CI_list'], {}), '(rho_CI_list)\n', (42469, 42482), True, 'import numpy as np\n'), ((42507, 42529), 'numpy.array', 'np.array', (['rho_CII_list'], {}), '(rho_CII_list)\n', (42515, 42529), True, 'import numpy as np\n'), ((55198, 55215), 'numpy.asarray', 'np.asarray', (['coord'], {}), '(coord)\n', (55208, 55215), True, 'import numpy as np\n'), ((12782, 12803), 'numpy.array', 'np.array', (['rho_AI_list'], {}), '(rho_AI_list)\n', (12790, 12803), True, 'import numpy as np\n'), ((12828, 12850), 'numpy.array', 'np.array', (['rho_AII_list'], {}), '(rho_AII_list)\n', (12836, 12850), True, 'import numpy as np\n'), ((42584, 42605), 'numpy.array', 'np.array', (['rho_AI_list'], {}), '(rho_AI_list)\n', (42592, 42605), True, 'import numpy as np\n'), ((42630, 42652), 'numpy.array', 'np.array', (['rho_AII_list'], {}), '(rho_AII_list)\n', (42638, 42652), True, 'import numpy as np\n'), ((74127, 74153), 'numpy.where', 'np.where', (['(rho_var == rho_p)'], {}), '(rho_var == rho_p)\n', (74135, 74153), True, 'import numpy as np\n'), ((74251, 74277), 'numpy.where', 'np.where', (['(rho_fix == rho_s)'], {}), '(rho_fix == rho_s)\n', (74259, 74277), True, 'import numpy as np\n'), ((12907, 12961), 'pe.calc_rho_solv', 'pe.calc_rho_solv', (['rho_PCI_list', 'rho_CI_list', 'beads_2_M'], {}), '(rho_PCI_list, rho_CI_list, beads_2_M)\n', (12923, 12961), False, 'import pe\n'), ((13070, 13126), 'pe.calc_rho_solv', 'pe.calc_rho_solv', (['rho_PCII_list', 'rho_CII_list', 'beads_2_M'], {}), '(rho_PCII_list, rho_CII_list, beads_2_M)\n', (13086, 13126), False, 'import pe\n'), ((42709, 42763), 'pe.calc_rho_solv', 'pe.calc_rho_solv', (['rho_PCI_list', 'rho_CI_list', 'beads_2_M'], {}), '(rho_PCI_list, rho_CI_list, beads_2_M)\n', (42725, 42763), False, 'import pe\n'), ((42872, 42928), 'pe.calc_rho_solv', 'pe.calc_rho_solv', (['rho_PCII_list', 'rho_CII_list', 'beads_2_M'], {}), '(rho_PCII_list, rho_CII_list, beads_2_M)\n', (42888, 42928), False, 'import pe\n'), ((74374, 74400), 'numpy.where', 'np.where', (['(rho_var == rho_s)'], {}), '(rho_var == rho_s)\n', (74382, 74400), True, 'import numpy as np\n'), ((74498, 74524), 'numpy.where', 'np.where', (['(rho_fix == rho_p)'], {}), '(rho_fix == rho_p)\n', (74506, 74524), True, 'import numpy as np\n'), ((19223, 19242), 'numpy.abs', 'np.abs', (['(lB_arr - lB)'], {}), '(lB_arr - lB)\n', (19229, 19242), True, 'import numpy as np\n'), ((30030, 30049), 'numpy.abs', 'np.abs', (['(lB_arr - lB)'], {}), '(lB_arr - lB)\n', (30036, 30049), True, 'import numpy as np\n'), ((13277, 13299), 'numpy.array', 'np.array', (['rho_PCI_list'], {}), '(rho_PCI_list)\n', (13285, 13299), True, 'import numpy as np\n'), ((13302, 13324), 'numpy.array', 'np.array', (['rho_PAI_list'], {}), '(rho_PAI_list)\n', (13310, 13324), True, 'import numpy as np\n'), ((13349, 13372), 'numpy.array', 'np.array', (['rho_PCII_list'], {}), '(rho_PCII_list)\n', (13357, 13372), True, 'import numpy as np\n'), ((13375, 13398), 'numpy.array', 'np.array', (['rho_PAII_list'], {}), '(rho_PAII_list)\n', (13383, 13398), True, 'import numpy as np\n'), ((13452, 13473), 'numpy.array', 'np.array', (['rho_CI_list'], {}), '(rho_CI_list)\n', (13460, 13473), True, 'import numpy as np\n'), ((13498, 13520), 'numpy.array', 'np.array', (['rho_CII_list'], {}), '(rho_CII_list)\n', (13506, 13520), True, 'import numpy as np\n'), ((43079, 43101), 'numpy.array', 'np.array', (['rho_PCI_list'], {}), '(rho_PCI_list)\n', (43087, 43101), True, 'import numpy as np\n'), ((43104, 43126), 'numpy.array', 'np.array', (['rho_PAI_list'], {}), '(rho_PAI_list)\n', (43112, 43126), True, 'import numpy as np\n'), ((43151, 43174), 'numpy.array', 'np.array', (['rho_PCII_list'], {}), '(rho_PCII_list)\n', (43159, 43174), True, 'import numpy as np\n'), ((43177, 43200), 'numpy.array', 'np.array', (['rho_PAII_list'], {}), '(rho_PAII_list)\n', (43185, 43200), True, 'import numpy as np\n'), ((43254, 43275), 'numpy.array', 'np.array', (['rho_CI_list'], {}), '(rho_CI_list)\n', (43262, 43275), True, 'import numpy as np\n'), ((43300, 43322), 'numpy.array', 'np.array', (['rho_CII_list'], {}), '(rho_CII_list)\n', (43308, 43322), True, 'import numpy as np\n')]
from django.db import models class ConceptClass(models.Model): concept_class_id = models.CharField( primary_key=True, max_length=20 ) concept_class_name = models.CharField( max_length=255 ) concept_class_concept_id = models.IntegerField( ) class Meta: managed = False db_table = 'omop"."concept_class'
[ "django.db.models.CharField", "django.db.models.IntegerField" ]
[((90, 139), 'django.db.models.CharField', 'models.CharField', ([], {'primary_key': '(True)', 'max_length': '(20)'}), '(primary_key=True, max_length=20)\n', (106, 139), False, 'from django.db import models\n'), ((188, 220), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (204, 220), False, 'from django.db import models\n'), ((275, 296), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (294, 296), False, 'from django.db import models\n')]
import typing import sys import numpy as np import numba as nb @nb.njit def csgraph_to_directed(g: np.ndarray) -> np.ndarray: m = len(g) g = np.vstack((g, g)) g[m:, :2] = g[m:, 1::-1] return g @nb.njit def sort_csgraph( n: int, g: np.ndarray, ) -> typing.Tuple[np.ndarray, np.ndarray, np.ndarray]: sort_idx = np.argsort(g[:, 0], kind='mergesort') g = g[sort_idx] edge_idx = np.searchsorted(g[:, 0], np.arange(n + 1)) original_idx = np.arange(len(g))[sort_idx] return g, edge_idx, original_idx @nb.njit def euler_tour_edge( g: np.ndarray, edge_idx: np.ndarray, root: int, ) -> typing.Tuple[(np.ndarray, ) * 3]: n = g[:, :2].max() + 1 parent = np.full(n, -1, np.int64) depth = np.zeros(n, np.int64) tour = np.empty(n * 2, np.int64) st = [root] for i in range(2 * n): u = st.pop() tour[i] = u if u < 0: continue st.append(-u - 1) for v in g[edge_idx[u]:edge_idx[u + 1], 1][::-1]: if v == parent[u]: continue parent[v] = u depth[v] = depth[u] + 1 st.append(v) return tour, parent, depth @nb.njit def euler_tour_node( g: np.ndarray, edge_idx: np.ndarray, root: int, ) -> typing.Tuple[(np.ndarray, ) * 4]: tour, parent, depth = euler_tour_edge(g, edge_idx, root) n = len(tour) >> 1 tour = tour[:-1] first_idx = np.full(n, -1, np.int64) for i in range(2 * n - 1): u = tour[i] if u < 0: tour[i] = parent[~u] continue first_idx[u] = i return tour, first_idx, parent, depth @nb.njit def uf_build(n: int) -> np.ndarray: return np.full(n, -1, np.int64) @nb.njit def uf_find(uf: np.ndarray, u: int) -> int: if uf[u] < 0: return u uf[u] = uf_find(uf, uf[u]) return uf[u] @nb.njit def uf_unite( uf: np.ndarray, u: int, v: int, ) -> typing.NoReturn: u, v = uf_find(uf, u), uf_find(uf, v) if u == v: return if uf[u] > uf[v]: u, v = v, u uf[u] += uf[v] uf[v] = u @nb.njit def lca( g: np.ndarray, edge_idx: np.ndarray, vu: np.ndarray, ) -> np.ndarray: m = len(vu) tour, parent, _ = euler_tour_edge(g, edge_idx, 0) n = len(tour) >> 1 first_idx = np.full(n, -1, np.int64) for i in range(len(tour)): u = tour[i] if u < 0: continue first_idx[u] = i for i in range(m): v, u = vu[i] if first_idx[v] < first_idx[u]: vu[i] = vu[i, ::-1] vu, query_idx, original_idx = sort_csgraph(n, vu) _lca = np.empty(m, np.int64) uf = uf_build(n) ancestor = np.arange(n) for v in tour[:-1]: if v >= 0: continue v = ~v for j in range(query_idx[v], query_idx[v + 1]): u = vu[j, 1] _lca[original_idx[j]] = ancestor[uf_find(uf, u)] p = parent[v] uf_unite(uf, v, p) ancestor[uf_find(uf, p)] = p return _lca @nb.njit((nb.i8[:, :], nb.i8[:, :]), cache=True) def solve(xy: np.ndarray, ab: np.ndarray) -> typing.NoReturn: n = len(xy) + 1 g = csgraph_to_directed(xy) g, edge_idx, _ = sort_csgraph(n, g) _, _, depth = euler_tour_edge(g, edge_idx, 0) _lca = lca(g, edge_idx, ab) for i in range(len(ab)): u, v = ab[i] l = _lca[i] d = depth[u] + depth[v] - 2 * depth[l] + 1 print(d) def main() -> typing.NoReturn: n = int(input()) I = np.array( sys.stdin.read().split(), dtype=np.int64, ) xy = I[:2 * (n - 1)].reshape(n - 1, 2) - 1 ab = I[2 * n - 1:].reshape(-1, 2) - 1 solve(xy, ab) main()
[ "numpy.full", "sys.stdin.read", "numpy.empty", "numba.njit", "numpy.zeros", "numpy.argsort", "numpy.arange", "numpy.vstack" ]
[((2746, 2793), 'numba.njit', 'nb.njit', (['(nb.i8[:, :], nb.i8[:, :])'], {'cache': '(True)'}), '((nb.i8[:, :], nb.i8[:, :]), cache=True)\n', (2753, 2793), True, 'import numba as nb\n'), ((154, 171), 'numpy.vstack', 'np.vstack', (['(g, g)'], {}), '((g, g))\n', (163, 171), True, 'import numpy as np\n'), ((337, 374), 'numpy.argsort', 'np.argsort', (['g[:, 0]'], {'kind': '"""mergesort"""'}), "(g[:, 0], kind='mergesort')\n", (347, 374), True, 'import numpy as np\n'), ((691, 715), 'numpy.full', 'np.full', (['n', '(-1)', 'np.int64'], {}), '(n, -1, np.int64)\n', (698, 715), True, 'import numpy as np\n'), ((726, 747), 'numpy.zeros', 'np.zeros', (['n', 'np.int64'], {}), '(n, np.int64)\n', (734, 747), True, 'import numpy as np\n'), ((757, 782), 'numpy.empty', 'np.empty', (['(n * 2)', 'np.int64'], {}), '(n * 2, np.int64)\n', (765, 782), True, 'import numpy as np\n'), ((1326, 1350), 'numpy.full', 'np.full', (['n', '(-1)', 'np.int64'], {}), '(n, -1, np.int64)\n', (1333, 1350), True, 'import numpy as np\n'), ((1571, 1595), 'numpy.full', 'np.full', (['n', '(-1)', 'np.int64'], {}), '(n, -1, np.int64)\n', (1578, 1595), True, 'import numpy as np\n'), ((2130, 2154), 'numpy.full', 'np.full', (['n', '(-1)', 'np.int64'], {}), '(n, -1, np.int64)\n', (2137, 2154), True, 'import numpy as np\n'), ((2402, 2423), 'numpy.empty', 'np.empty', (['m', 'np.int64'], {}), '(m, np.int64)\n', (2410, 2423), True, 'import numpy as np\n'), ((2456, 2468), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2465, 2468), True, 'import numpy as np\n'), ((431, 447), 'numpy.arange', 'np.arange', (['(n + 1)'], {}), '(n + 1)\n', (440, 447), True, 'import numpy as np\n'), ((3213, 3229), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (3227, 3229), False, 'import sys\n')]
from os import path import json import re from stix_shifter_utils.stix_translation.src.utils.exceptions import DataMappingException from stix_shifter_utils.modules.base.stix_translation.base_data_mapper import BaseDataMapper def _fetch_mapping(dialect=''): try: if dialect != '': dialect = dialect + '_' basepath = path.dirname(__file__) filepath = path.abspath( path.join(basepath, "json", dialect + "from_stix_map.json")) map_file = open(filepath).read() map_data = json.loads(map_file) return map_data except Exception as ex: print('exception in main():', ex) return {} class DataMapper(BaseDataMapper): def map_object(self, stix_object_name): self.map_data = _fetch_mapping(self.dialect) if stix_object_name in self.map_data and self.map_data[stix_object_name] != None: return self.map_data[stix_object_name] else: raise DataMappingException( "Unable to map object `{}` into SQL".format(stix_object_name)) def map_field(self, stix_object_name, stix_property_name): self.map_data = _fetch_mapping(self.dialect) if stix_object_name in self.map_data and stix_property_name in self.map_data[stix_object_name]["fields"]: return self.map_data[stix_object_name]["fields"][stix_property_name] else: return [] def map_selections(self): try: filepath = path.abspath( path.join(self.basepath, "json", self.dialect + "_event_fields.json")) sql_fields_file = open(filepath).read() sql_fields_json = json.loads(sql_fields_file) # Temporary default selections, this will change based on upcoming config override and the STIX pattern that is getting converted to SQL. field_list = sql_fields_json['default'] sql_select = ", ".join(field_list) return sql_select except Exception as ex: print('Exception while reading sql fields file:', ex) return {}
[ "os.path.dirname", "os.path.join", "json.loads" ]
[((349, 371), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (361, 371), False, 'from os import path\n'), ((539, 559), 'json.loads', 'json.loads', (['map_file'], {}), '(map_file)\n', (549, 559), False, 'import json\n'), ((417, 476), 'os.path.join', 'path.join', (['basepath', '"""json"""', "(dialect + 'from_stix_map.json')"], {}), "(basepath, 'json', dialect + 'from_stix_map.json')\n", (426, 476), False, 'from os import path\n'), ((1678, 1705), 'json.loads', 'json.loads', (['sql_fields_file'], {}), '(sql_fields_file)\n', (1688, 1705), False, 'import json\n'), ((1525, 1594), 'os.path.join', 'path.join', (['self.basepath', '"""json"""', "(self.dialect + '_event_fields.json')"], {}), "(self.basepath, 'json', self.dialect + '_event_fields.json')\n", (1534, 1594), False, 'from os import path\n')]
from django.conf.urls import patterns, url from categories import views urlpatterns = patterns('', # /categories/ url(r'^$', views.IndexView.as_view(), name='index'), url(r'^(?P<slug>[-_\w]+)/$', views.CategoryPageView.as_view(), name='categoryPage'), )
[ "categories.views.IndexView.as_view", "categories.views.CategoryPageView.as_view" ]
[((142, 167), 'categories.views.IndexView.as_view', 'views.IndexView.as_view', ([], {}), '()\n', (165, 167), False, 'from categories import views\n'), ((221, 253), 'categories.views.CategoryPageView.as_view', 'views.CategoryPageView.as_view', ([], {}), '()\n', (251, 253), False, 'from categories import views\n')]
""" Copyright (c) Facebook, Inc. and its affiliates. """ from .voc import VOCDetection from typing import Iterable import to_coco_api VOC_PATH = "/datasets01/VOC/060817/" class VOCDetection2012(VOCDetection): def __init__(self, image_set: str = "train", transforms: Iterable = None): super(VOCDetection, self).__init__( VOC_PATH, image_set=image_set, year="2012", download=False ) self.prepare = to_coco_api.PrepareInstance() self._transforms = transforms from .voc import make_voc_transforms def build(image_set, args): # if we only use voc2012, then we need to adapt trainval and test to # VOC2012 constraints if image_set == "test": image_set = "val" if image_set == "trainval": image_set = "train" return VOCDetection2012( image_set=image_set, transforms=make_voc_transforms(image_set, args.remove_difficult) )
[ "to_coco_api.PrepareInstance" ]
[((440, 469), 'to_coco_api.PrepareInstance', 'to_coco_api.PrepareInstance', ([], {}), '()\n', (467, 469), False, 'import to_coco_api\n')]
from __future__ import absolute_import from collections import namedtuple from time import time from flask import g from jsonschema import ValidationError from reles.references import resolve_field_reference ProcessingContext = namedtuple( 'ProcessingContext', ('datastore', 'doc_id', 'full_entity', 'full_schema') ) def _log_access(entity, schema, parent, context): if entity: raise ValidationError("'log_access' entity cannot be overridden manually") if not context.doc_id: log = {schema['created']: int(time())} else: log = {schema['updated']: int(time())} # This dependency means that the modificator will only work in a request context. log[schema['user']] = g.user['email'] log[schema['customer']] = g.customer['name'] return log def _fill_from_fkey(entity, schema, parent, context): # type: (Any, dict, ProcessingContext) -> Union[dict, Sequence, str] """ The *fill from foreign key* modifier pulls a related document - or one of it's attributes - into the document being processed. This can be useful to make it possible to enrich the indexes for the document being processed with data from the related document, essentially denormalizing their relationship. This can make certain kind of queries easier or be required to make them possible at all. """ def denormalize(id): source_document = context.datastore.get_document(_index, _doc_type, id) if _field: docs = resolve_field_reference(_field, None, source_document) return docs[0] if docs else None else: return source_document if entity: raise ValidationError("'fill_from_fkey' entity can not be set to anything!") _index = schema['source']['index'] _doc_type = schema['source']['doc_type'] _field = schema['source'].get('field', '') _fkey_field = schema['fkey_field'] _fkey_values = resolve_field_reference(_fkey_field, parent, context.full_entity) if _fkey_values: # The fkey(s) pointing at the data to be denormalized are set denormalized = [] for _fkey_data in _fkey_values: if isinstance(_fkey_data, list): denormalized.extend([denormalize(_id) for _id in _fkey_data]) else: denormalized.append(denormalize(_fkey_data)) return denormalized else: return None def _include_parents(entity, schema, parent, context): # type: (list, dict, Any, ProcessingContext) -> Sequence def _get_parents(child_id): while child_id is not None: yield child_id child_id = context.datastore.get_document( index, doc_type, child_id ).get(parent_field) if not entity: # Nothing to expand return entity index = schema['index'] doc_type = schema['doc_type'] parent_field = schema['parent_field'] parents = set() for child_id in entity: parents.update([id for id in _get_parents(child_id)]) return list(parents) class Processor(object): _processors = { 'x-log-access': _log_access, 'x-fill-from-fkey': _fill_from_fkey, 'x-include-parents': _include_parents, } def __init__(self, schema, datastore, processors=None): # type: (dict, DataStore, dict) -> None super(Processor, self).__init__() self._datastore = datastore self.schema = schema if processors is not None: self._processors = processors def _process(self, key, schema, entity, parent, context): # type: (str, dict, Union[dict, Sequence, str], Union[dict, Sequence, str], ProcessingContext) -> Union[dict, Sequence, str] """ Recursive helper function for process(). Applies processors in a *depth first* manner, if no processors are applicable it ends up copying the entity. """ # apply any applicable processors on this entity... for processor_name, processor in self._processors.items(): if processor_name in schema: entity = processor(entity, schema[processor_name], parent, context) # ...then recurse deeper into the schema/entity if schema['type'] == 'object' and isinstance(entity, dict): for _key, _schema in schema.get('properties', {}).items(): # If the field has not been sent, `None` is passed down as the entity. # Modifiers will be applied, but recursion stops due to type checks. processed = self._process(_key, _schema, entity.get(_key), entity, context) if processed is not None: entity[_key] = processed return entity elif schema['type'] == 'array' and isinstance(entity, list): return [self._process(key, schema['items'], _entity, entity, context) for _entity in entity] else: return entity def process(self, entity, id=None): # type: (Union[dict, Sequence, str]) -> Union[dict, Sequence, str] """ Applies any configured processors to the given entity (document). The result will always be a new object and given entity unmodified, even if no processor was applicable. """ context = ProcessingContext( datastore=self._datastore, doc_id=id, full_entity=entity, full_schema=self.schema, ) for key, sub_schema in self.schema['properties'].items(): processed = self._process(key, sub_schema, entity.get(key, None), entity, context) if processed is not None: entity[key] = processed # TODO: eliminate `return` statement, use `entity` as in-out-parameter return entity
[ "reles.references.resolve_field_reference", "jsonschema.ValidationError", "collections.namedtuple", "time.time" ]
[((232, 322), 'collections.namedtuple', 'namedtuple', (['"""ProcessingContext"""', "('datastore', 'doc_id', 'full_entity', 'full_schema')"], {}), "('ProcessingContext', ('datastore', 'doc_id', 'full_entity',\n 'full_schema'))\n", (242, 322), False, 'from collections import namedtuple\n'), ((1949, 2014), 'reles.references.resolve_field_reference', 'resolve_field_reference', (['_fkey_field', 'parent', 'context.full_entity'], {}), '(_fkey_field, parent, context.full_entity)\n', (1972, 2014), False, 'from reles.references import resolve_field_reference\n'), ((410, 478), 'jsonschema.ValidationError', 'ValidationError', (['"""\'log_access\' entity cannot be overridden manually"""'], {}), '("\'log_access\' entity cannot be overridden manually")\n', (425, 478), False, 'from jsonschema import ValidationError\n'), ((1687, 1757), 'jsonschema.ValidationError', 'ValidationError', (['"""\'fill_from_fkey\' entity can not be set to anything!"""'], {}), '("\'fill_from_fkey\' entity can not be set to anything!")\n', (1702, 1757), False, 'from jsonschema import ValidationError\n'), ((1508, 1562), 'reles.references.resolve_field_reference', 'resolve_field_reference', (['_field', 'None', 'source_document'], {}), '(_field, None, source_document)\n', (1531, 1562), False, 'from reles.references import resolve_field_reference\n'), ((545, 551), 'time.time', 'time', ([], {}), '()\n', (549, 551), False, 'from time import time\n'), ((602, 608), 'time.time', 'time', ([], {}), '()\n', (606, 608), False, 'from time import time\n')]
""" ============================================= Multiclass Classification with NumPy and TMVA ============================================= """ from array import array import numpy as np from numpy.random import RandomState from root_numpy.tmva import add_classification_events, evaluate_reader from root_numpy import ROOT_VERSION import matplotlib.pyplot as plt from ROOT import TMVA, TFile, TCut plt.style.use('ggplot') RNG = RandomState(42) # Construct an example multiclass dataset n_events = 1000 class_0 = RNG.multivariate_normal( [-2, -2], np.diag([1, 1]), n_events) class_1 = RNG.multivariate_normal( [0, 2], np.diag([1, 1]), n_events) class_2 = RNG.multivariate_normal( [2, -2], np.diag([1, 1]), n_events) X = np.concatenate([class_0, class_1, class_2]) y = np.ones(X.shape[0]) w = RNG.randint(1, 10, n_events * 3) y[:class_0.shape[0]] *= 0 y[-class_2.shape[0]:] *= 2 permute = RNG.permutation(y.shape[0]) X = X[permute] y = y[permute] # Split into training and test datasets X_train, y_train, w_train = X[:n_events], y[:n_events], w[:n_events] X_test, y_test, w_test = X[n_events:], y[n_events:], w[n_events:] output = TFile('tmva_output.root', 'recreate') factory = TMVA.Factory('classifier', output, 'AnalysisType=Multiclass:' '!V:Silent:!DrawProgressBar') if ROOT_VERSION >= '6.07/04': data = TMVA.DataLoader('.') else: data = factory for n in range(2): data.AddVariable('f{0}'.format(n), 'F') # Call root_numpy's utility functions to add events from the arrays add_classification_events(data, X_train, y_train, weights=w_train) add_classification_events(data, X_test, y_test, weights=w_test, test=True) # The following line is necessary if events have been added individually: data.PrepareTrainingAndTestTree(TCut('1'), 'NormMode=EqualNumEvents') # Train an MLP if ROOT_VERSION >= '6.07/04': BookMethod = factory.BookMethod else: BookMethod = TMVA.Factory.BookMethod BookMethod(data, 'MLP', 'MLP', 'NeuronType=tanh:NCycles=200:HiddenLayers=N+2,2:' 'TestRate=5:EstimatorType=MSE') factory.TrainAllMethods() # Classify the test dataset with the BDT reader = TMVA.Reader() for n in range(2): reader.AddVariable('f{0}'.format(n), array('f', [0.])) reader.BookMVA('MLP', 'weights/classifier_MLP.weights.xml') class_proba = evaluate_reader(reader, 'MLP', X_test) # Plot the decision boundaries plot_colors = "rgb" plot_step = 0.02 class_names = "ABC" cmap = plt.get_cmap('Paired') fig = plt.figure(figsize=(5, 5)) fig.patch.set_alpha(0) x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)) Z = evaluate_reader(reader, 'MLP', np.c_[xx.ravel(), yy.ravel()]) Z = np.argmax(Z, axis=1) - 1 Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=cmap, alpha=0.5) plt.axis("tight") # Plot the training points for i, n, c in zip(range(3), class_names, plot_colors): idx = np.where(y == i) plt.scatter(X[idx, 0], X[idx, 1], c=c, cmap=cmap, label="Class %s" % n) plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.legend(loc='upper right') plt.xlabel('x') plt.ylabel('y') plt.title('Decision Boundary') plt.tight_layout() plt.show()
[ "matplotlib.pyplot.title", "numpy.argmax", "numpy.ones", "ROOT.TFile", "matplotlib.pyplot.style.use", "matplotlib.pyplot.figure", "matplotlib.pyplot.contourf", "numpy.arange", "ROOT.TMVA.DataLoader", "numpy.diag", "matplotlib.pyplot.tight_layout", "root_numpy.tmva.add_classification_events", "numpy.random.RandomState", "array.array", "ROOT.TMVA.Reader", "matplotlib.pyplot.show", "root_numpy.tmva.evaluate_reader", "matplotlib.pyplot.get_cmap", "matplotlib.pyplot.ylim", "matplotlib.pyplot.legend", "ROOT.TCut", "matplotlib.pyplot.ylabel", "numpy.concatenate", "matplotlib.pyplot.xlim", "matplotlib.pyplot.scatter", "ROOT.TMVA.Factory", "matplotlib.pyplot.axis", "numpy.where", "matplotlib.pyplot.xlabel" ]
[((401, 424), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (414, 424), True, 'import matplotlib.pyplot as plt\n'), ((431, 446), 'numpy.random.RandomState', 'RandomState', (['(42)'], {}), '(42)\n', (442, 446), False, 'from numpy.random import RandomState\n'), ((735, 778), 'numpy.concatenate', 'np.concatenate', (['[class_0, class_1, class_2]'], {}), '([class_0, class_1, class_2])\n', (749, 778), True, 'import numpy as np\n'), ((783, 802), 'numpy.ones', 'np.ones', (['X.shape[0]'], {}), '(X.shape[0])\n', (790, 802), True, 'import numpy as np\n'), ((1147, 1184), 'ROOT.TFile', 'TFile', (['"""tmva_output.root"""', '"""recreate"""'], {}), "('tmva_output.root', 'recreate')\n", (1152, 1184), False, 'from ROOT import TMVA, TFile, TCut\n'), ((1195, 1287), 'ROOT.TMVA.Factory', 'TMVA.Factory', (['"""classifier"""', 'output', '"""AnalysisType=Multiclass:!V:Silent:!DrawProgressBar"""'], {}), "('classifier', output,\n 'AnalysisType=Multiclass:!V:Silent:!DrawProgressBar')\n", (1207, 1287), False, 'from ROOT import TMVA, TFile, TCut\n'), ((1553, 1619), 'root_numpy.tmva.add_classification_events', 'add_classification_events', (['data', 'X_train', 'y_train'], {'weights': 'w_train'}), '(data, X_train, y_train, weights=w_train)\n', (1578, 1619), False, 'from root_numpy.tmva import add_classification_events, evaluate_reader\n'), ((1620, 1694), 'root_numpy.tmva.add_classification_events', 'add_classification_events', (['data', 'X_test', 'y_test'], {'weights': 'w_test', 'test': '(True)'}), '(data, X_test, y_test, weights=w_test, test=True)\n', (1645, 1694), False, 'from root_numpy.tmva import add_classification_events, evaluate_reader\n'), ((2180, 2193), 'ROOT.TMVA.Reader', 'TMVA.Reader', ([], {}), '()\n', (2191, 2193), False, 'from ROOT import TMVA, TFile, TCut\n'), ((2346, 2384), 'root_numpy.tmva.evaluate_reader', 'evaluate_reader', (['reader', '"""MLP"""', 'X_test'], {}), "(reader, 'MLP', X_test)\n", (2361, 2384), False, 'from root_numpy.tmva import add_classification_events, evaluate_reader\n'), ((2481, 2503), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Paired"""'], {}), "('Paired')\n", (2493, 2503), True, 'import matplotlib.pyplot as plt\n'), ((2511, 2537), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (2521, 2537), True, 'import matplotlib.pyplot as plt\n'), ((2899, 2944), 'matplotlib.pyplot.contourf', 'plt.contourf', (['xx', 'yy', 'Z'], {'cmap': 'cmap', 'alpha': '(0.5)'}), '(xx, yy, Z, cmap=cmap, alpha=0.5)\n', (2911, 2944), True, 'import matplotlib.pyplot as plt\n'), ((2945, 2962), 'matplotlib.pyplot.axis', 'plt.axis', (['"""tight"""'], {}), "('tight')\n", (2953, 2962), True, 'import matplotlib.pyplot as plt\n'), ((3182, 3204), 'matplotlib.pyplot.xlim', 'plt.xlim', (['x_min', 'x_max'], {}), '(x_min, x_max)\n', (3190, 3204), True, 'import matplotlib.pyplot as plt\n'), ((3205, 3227), 'matplotlib.pyplot.ylim', 'plt.ylim', (['y_min', 'y_max'], {}), '(y_min, y_max)\n', (3213, 3227), True, 'import matplotlib.pyplot as plt\n'), ((3228, 3257), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (3238, 3257), True, 'import matplotlib.pyplot as plt\n'), ((3258, 3273), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (3268, 3273), True, 'import matplotlib.pyplot as plt\n'), ((3274, 3289), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (3284, 3289), True, 'import matplotlib.pyplot as plt\n'), ((3290, 3320), 'matplotlib.pyplot.title', 'plt.title', (['"""Decision Boundary"""'], {}), "('Decision Boundary')\n", (3299, 3320), True, 'import matplotlib.pyplot as plt\n'), ((3322, 3340), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3338, 3340), True, 'import matplotlib.pyplot as plt\n'), ((3341, 3351), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3349, 3351), True, 'import matplotlib.pyplot as plt\n'), ((555, 570), 'numpy.diag', 'np.diag', (['[1, 1]'], {}), '([1, 1])\n', (562, 570), True, 'import numpy as np\n'), ((629, 644), 'numpy.diag', 'np.diag', (['[1, 1]'], {}), '([1, 1])\n', (636, 644), True, 'import numpy as np\n'), ((704, 719), 'numpy.diag', 'np.diag', (['[1, 1]'], {}), '([1, 1])\n', (711, 719), True, 'import numpy as np\n'), ((1375, 1395), 'ROOT.TMVA.DataLoader', 'TMVA.DataLoader', (['"""."""'], {}), "('.')\n", (1390, 1395), False, 'from ROOT import TMVA, TFile, TCut\n'), ((1801, 1810), 'ROOT.TCut', 'TCut', (['"""1"""'], {}), "('1')\n", (1805, 1810), False, 'from ROOT import TMVA, TFile, TCut\n'), ((2686, 2720), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'plot_step'], {}), '(x_min, x_max, plot_step)\n', (2695, 2720), True, 'import numpy as np\n'), ((2743, 2777), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'plot_step'], {}), '(y_min, y_max, plot_step)\n', (2752, 2777), True, 'import numpy as np\n'), ((2850, 2870), 'numpy.argmax', 'np.argmax', (['Z'], {'axis': '(1)'}), '(Z, axis=1)\n', (2859, 2870), True, 'import numpy as np\n'), ((3057, 3073), 'numpy.where', 'np.where', (['(y == i)'], {}), '(y == i)\n', (3065, 3073), True, 'import numpy as np\n'), ((3078, 3149), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[idx, 0]', 'X[idx, 1]'], {'c': 'c', 'cmap': 'cmap', 'label': "('Class %s' % n)"}), "(X[idx, 0], X[idx, 1], c=c, cmap=cmap, label='Class %s' % n)\n", (3089, 3149), True, 'import matplotlib.pyplot as plt\n'), ((2254, 2271), 'array.array', 'array', (['"""f"""', '[0.0]'], {}), "('f', [0.0])\n", (2259, 2271), False, 'from array import array\n')]
from drl_negotiation.scenario import BaseScenario from drl_negotiation.core import TrainWorld, MySCML2020Agent from drl_negotiation.myagent import MyComponentsBasedAgent from drl_negotiation.hyperparameters import * from negmas.helpers import get_class from scml.scml2020 import ( DecentralizingAgent, BuyCheapSellExpensiveAgent, SCML2020World, is_system_agent, ) from typing import Union import numpy as np class Scenario(BaseScenario): def make_world(self, config=None) -> TrainWorld: # configuration, for Scenario scml if config is None: agent_types = [get_class(agent_type, ) for agent_type in TRAINING_AGENT_TYPES] n_steps = N_STEPS world_configuration = SCML2020World.generate( agent_types=agent_types, n_steps=n_steps ) else: world_configuration = SCML2020World.generate( agent_types=config['agent_types'], agent_params=config['agent_params'][:-2], n_steps=config['n_steps'] ) world = TrainWorld(configuration=world_configuration) if config is None: self.reset_world(world) return world def reset_world(self, world): # callback, reset # reset world, agents, factories # fixed position agent_types = world.configuration['agent_types'] agent_params = world.configuration['agent_params'][:-2] n_steps = world.configuration['n_steps'] reset_configuration = SCML2020World.generate( #TODO: [Future work Improvement] could be reset agent_types=agent_types, agent_params=agent_params, n_steps=n_steps ) world.__init__(configuration=reset_configuration) def benchmark_data(self, agent, world, seller=True): #TODO: data for benchmarkign purposes, info_callabck, # will be rendered when display is true # how to compare different companies, Ratio Analysis # https://www.investopedia.com/ask/answers/032315/how-does-ratio-analysis-make-it-easier-compare-different-companies.asp # price-to-earnings ratio and net profit margin # Margin Ratios and Return Ratios # https://corporatefinanceinstitute.com/resources/knowledge/finance/profitability-ratios/ profitability = [] initial_balances = [] factories = [_ for _ in world.factories if not is_system_agent(_.agent_id)] for i, factory in enumerate(factories): initial_balances.append(factory.initial_balance) normalize = all(_ != 0 for _ in initial_balances) for _ in world.agents: if world.agents[_].action_callback == "system": continue if world.agents[_] in world.heuristic_agents: if normalize: profitability.append( (agent.state.f[2] - agent.state.f[0]) / agent.state.f[0] - ([f.current_balance for f in factories if f.agent_id == world.agents[_].id][0] - [f.initial_balance for f in factories if f.agent_id == world.agents[_].id][0]) / [f.initial_balance for f in factories if f.agent_id == world.agents[_].id][0] ) else: profitability.append( (agent.state.f[2] - agent.state.f[0]) - ([f.current_balance for f in factories if f.agent_id == world.agents[_].id][0] - [f.initial_balance for f in factories if f.agent_id == world.agents[_].id][0]) ) return {"profitability": profitability} def good_agents(self, world): return [agent for agent in world.agents if not agent.adversary] def adversaries(self, world): return [agent for agent in world.agents if agent.adversary] def reward(self, agent, world, seller=True): # callback, reward # Delayed reward problem???? # Keep this in mind when writing reward functions: You get what you incentivize, not what you intend. # idea 1: external rewards, e.g. balance - initial balance for agent, -(balance - initial balance) for adversary agent # idea 2: Intrinsic motivation rewards. # On Learning Intrinsic Rewards for Policy Gradient Methods, https://arxiv.org/abs/1804.06459 return self.adversary_reward(agent, world) if agent.adversary else self.agent_reward(agent, world) def agent_reward(self, agent, world): # 1. Difference of balance with the end of last step, begin of current step # 2. Difference of balance with the other agents rew = 0 # means in this world step, the agent starts a negotiation except initial state if agent.state.o_negotiation_step == agent.awi.current_step: rew = (agent.state.f[2]- agent.state.f[1]) / (agent.state.f[0]) * REW_FACTOR gap = [] for entity in world.entities: if entity is agent: continue if entity.action_callback == "system": continue if entity.action_callback is None: continue initial_balance = [_.initial_balance for _ in world.factories if _.agent_id == entity.id][0] current_balance = [_.current_balance for _ in world.factories if _.agent_id == entity.id][0] gap.append((current_balance - initial_balance) / initial_balance) rew -= np.mean(np.array(gap)) return rew def adversary_reward(self, agent, world): #TODO: keep the good agents near the intial funds # neg reward # pos reward # agent.init_f - agent.f rew = 0 return rew def observation(self, agent: Union[MyComponentsBasedAgent, MySCML2020Agent], world: Union[TrainWorld], seller=True): # get all observation, # callback: obrvation _obs = agent._get_obs(seller=seller) #2. Economic gap with others, extra information # economic_gaps = [] # # for entity in world.entities: # if entity is agent: continue # economic_gaps.append(entity.state.f - agent.state.f) # # economic_gaps = np.array(economic_gaps) #return np.concatenate(economic_gaps + o_m.flatten() + o_a + o_u_c + o_u_e + o_u_t + o_q_n.flatten() + o_t_c) # return np.concatenate((economic_gaps.flatten(), _obs)) return _obs def done(self, agent, world, seller=True): # callback of done # simulation is end if world.world_done: return True import ipdb # agent is brankrupt return [_.is_bankrupt for _ in world.factories if _.agent_id == agent.id][0]
[ "scml.scml2020.SCML2020World.generate", "scml.scml2020.is_system_agent", "drl_negotiation.core.TrainWorld", "numpy.array", "negmas.helpers.get_class" ]
[((1138, 1183), 'drl_negotiation.core.TrainWorld', 'TrainWorld', ([], {'configuration': 'world_configuration'}), '(configuration=world_configuration)\n', (1148, 1183), False, 'from drl_negotiation.core import TrainWorld, MySCML2020Agent\n'), ((1599, 1694), 'scml.scml2020.SCML2020World.generate', 'SCML2020World.generate', ([], {'agent_types': 'agent_types', 'agent_params': 'agent_params', 'n_steps': 'n_steps'}), '(agent_types=agent_types, agent_params=agent_params,\n n_steps=n_steps)\n', (1621, 1694), False, 'from scml.scml2020 import DecentralizingAgent, BuyCheapSellExpensiveAgent, SCML2020World, is_system_agent\n'), ((773, 837), 'scml.scml2020.SCML2020World.generate', 'SCML2020World.generate', ([], {'agent_types': 'agent_types', 'n_steps': 'n_steps'}), '(agent_types=agent_types, n_steps=n_steps)\n', (795, 837), False, 'from scml.scml2020 import DecentralizingAgent, BuyCheapSellExpensiveAgent, SCML2020World, is_system_agent\n'), ((932, 1063), 'scml.scml2020.SCML2020World.generate', 'SCML2020World.generate', ([], {'agent_types': "config['agent_types']", 'agent_params': "config['agent_params'][:-2]", 'n_steps': "config['n_steps']"}), "(agent_types=config['agent_types'], agent_params=\n config['agent_params'][:-2], n_steps=config['n_steps'])\n", (954, 1063), False, 'from scml.scml2020 import DecentralizingAgent, BuyCheapSellExpensiveAgent, SCML2020World, is_system_agent\n'), ((5546, 5559), 'numpy.array', 'np.array', (['gap'], {}), '(gap)\n', (5554, 5559), True, 'import numpy as np\n'), ((645, 666), 'negmas.helpers.get_class', 'get_class', (['agent_type'], {}), '(agent_type)\n', (654, 666), False, 'from negmas.helpers import get_class\n'), ((2523, 2550), 'scml.scml2020.is_system_agent', 'is_system_agent', (['_.agent_id'], {}), '(_.agent_id)\n', (2538, 2550), False, 'from scml.scml2020 import DecentralizingAgent, BuyCheapSellExpensiveAgent, SCML2020World, is_system_agent\n')]
# MIT License # # Copyright (c) 2021 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import setuptools import os basedir = os.path.abspath(os.path.dirname(__file__)) with open(basedir + "/README.md", "r") as ld: long_description = ld.read() setuptools.setup( name="st-schema-python", version="2.0.0", author="erickvneri", description="SmartThings Schema Connector Python SDK", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/erickvneri/st-schema-python/", packages=setuptools.find_packages(), install_requires=["marshmallow"], python_requires=">=3.6", classifiers=[ "Development Status :: 4 - Beta", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Topic :: Home Automation", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", ], )
[ "os.path.dirname", "setuptools.find_packages" ]
[((1157, 1182), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1172, 1182), False, 'import os\n'), ((1578, 1604), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (1602, 1604), False, 'import setuptools\n')]
from .featurizer import Featurizer from dataset import AuxTables from dataset.dataset import dictify import pandas as pd import itertools import torch from tqdm import tqdm #GM class OccurFeaturizerfusion(Featurizer): def specific_setup(self): self.name = "OccurFeaturizerfusion" if not self.setup_done: raise Exception('Featurizer %s is not properly setup.'%self.name) if self.tensor is None: self.raw_data_dict = self.ds.raw_data.df.set_index(self.ds.key).to_dict('index') self.all_attrs = self.ds.get_attributes() self.all_attrs.remove(self.ds.src) self.all_attrs.remove(self.ds.key) self.create_cooccur_stats_dictionary() # self.all_attrs = self.ds.get_attributes() self.attrs_number = len(self.ds.attr_to_idx) def create_tensor(self): # Iterate over tuples in domain tensors = [] # Set tuple_id index on raw_data t = self.ds.aux_table[AuxTables.cell_domain] self.create_cooccur_dictionary() sorted_domain = t.df.reset_index().sort_values(by=['_vid_'])[['_tid_','object', 'attribute','_vid_','domain']] records = sorted_domain.to_records() for row in tqdm(list(records)): #Get tuple from raw_dataset feat_tensor = self.gen_feat_tensor(row) tensors.append(feat_tensor) self.tensor = torch.cat(tensors) return self.tensor def gen_feat_tensor(self, row): tensor = torch.zeros(1, self.classes, self.attrs_number) # dimension is 1 X (max domain size) X (number of attributes) rv_attr = row['attribute'] object = row['object'] domain = row['domain'].split('|||') rv_domain_idx = {val: idx for idx, val in enumerate(domain)} # set the index corresponding to this value to the cooccurence with another value for attr in self.all_attrs: if attr != rv_attr and attr != self.ds.key : attr_idx = self.ds.attr_to_idx[attr] co_value = self.dictionary_cooccur[object][attr] count1 = self.domain_stats[object][attr][co_value] for rv_val in rv_domain_idx: count = self.cooccur_pair_stats[object][attr][rv_attr].get((co_value, rv_val), 0) prob = float(count) / count1 tensor[0][rv_domain_idx[rv_val]][attr_idx] = prob return tensor def create_cooccur_dictionary(self): """ create cooccur dictionary from the Current_init Dictionary of current inferred values of dataset's cells """ self.dictionary_cooccur = {} current_init_dict = self.ds.aux_table[AuxTables.current_init].df.to_dict('index') for object_key in current_init_dict : self.dictionary_cooccur[object_key] = {} for attr in self.all_attrs: if attr != self.ds.src and attr != self.ds.key: self.dictionary_cooccur[object_key][attr] = current_init_dict [object_key][attr] return def create_cooccur_stats_dictionary(self): """ Creates the cooccurrence for value per objects """ # counts frequency of two observed values occurring together for two of an entity's attributes # e.g. self.cooccur_pair_stats[entity][attr1][attr2][(val1, val2)] = n # where n is the number of times attr1=val1 and attr2=val2 for that entity self.cooccur_pair_stats = {} # counts frequency of observed values for a particular entity's attributes # e.g. self.domain_stats[entity][attr][val] = n # where n is the number of times attr=val for that entity self.domain_stats = {} # iterate through provided dataset for row in self.ds.raw_data.df.to_dict('records'): # if an entity is not in domain_stats object, # initialize dictionaries if row[self.ds.key] not in self.domain_stats: self.cooccur_pair_stats[row[self.ds.key]] = {} self.domain_stats[row[self.ds.key]] = {} # create the domain_stats for each value # iterate through attributes for co_attribute in self.all_attrs: if co_attribute != self.ds.key and co_attribute != "src": # initialize dictionaries if attribute hasn't been initialized for this entity if co_attribute not in self.domain_stats[row[self.ds.key]]: self.cooccur_pair_stats[row[self.ds.key]][ co_attribute] = {} self.domain_stats[row[self.ds.key]][co_attribute] = {} value = row[co_attribute] if value not in self.domain_stats[row[self.ds.key]][ co_attribute]: self.domain_stats[row[self.ds.key]][co_attribute][ value] = 0.0 self.domain_stats[row[self.ds.key]][co_attribute][ value] += 1.0 # create the cooccur_pair_stats for co_attribute1 in self.all_attrs: if co_attribute1 != self.ds.key and co_attribute1 != "src" and co_attribute1!= co_attribute: if co_attribute1 not in \ self.cooccur_pair_stats[row[self.ds.key]][ co_attribute]: self.cooccur_pair_stats[row[self.ds.key]][ co_attribute][co_attribute1] = {} value2 = row[co_attribute1] assgn_tuple = (value, value2) if assgn_tuple not in \ self.cooccur_pair_stats[row[self.ds.key]][ co_attribute][co_attribute1]: self.cooccur_pair_stats[row[self.ds.key]][ co_attribute][co_attribute1][ assgn_tuple] = 0.0 self.cooccur_pair_stats[row[self.ds.key]][ co_attribute][co_attribute1][ assgn_tuple] += 1.0 return def feature_names(self): return self.all_attrs
[ "torch.zeros", "torch.cat" ]
[((1426, 1444), 'torch.cat', 'torch.cat', (['tensors'], {}), '(tensors)\n', (1435, 1444), False, 'import torch\n'), ((1526, 1573), 'torch.zeros', 'torch.zeros', (['(1)', 'self.classes', 'self.attrs_number'], {}), '(1, self.classes, self.attrs_number)\n', (1537, 1573), False, 'import torch\n')]
""" dp[i][j][k] = i頂点n辺グラフ長さ最大値kの組み合わせ数 dp[i][j][k] = dp[i - k][n - usededge][_] for _ in range(k + 1) よくわからん """ import math from operator import mul from functools import reduce def comb(n, r): r = min(r, n - r) numer = reduce(mul, range(n, n - r, -1), 1) denom = reduce(mul, range(1, r + 1), 1) return numer // denom MOD = 10 ** 9 + 7 n, m, l = map(int, input().split()) dp = [[[1] * (l + 1) for _ in range(m + 1)] for __ in range(n + 1)] for i in range(n + 1): for j in range(m + 1): for k in range(1, l + 1): if i - k >= 0 and j - k + 1 >= 0: if k > 2: dp[i][j][k] *= (k + 1) * math.factorial(k - 1) / 2 % MOD for _ in range(1, k + 1): dp[i][j][k] *= dp[i - k][j - k + 1][_] dp[i][j][k] %= MOD elif k == 1: dp[i][j][k] *= 2 for _ in range(1, k + 1): dp[i][j][k] *= dp[i - k][j - k + 1][_] + dp[i - k][j - k][_] dp[i][j][k] %= MOD dp[i][j][k] *= comb(i, k) % MOD dp[i][j][k] %= MOD print(dp[n][m][l])
[ "math.factorial" ]
[((669, 690), 'math.factorial', 'math.factorial', (['(k - 1)'], {}), '(k - 1)\n', (683, 690), False, 'import math\n')]
import os dir_path = os.path.dirname(os.path.realpath(__file__)) if dir_path.split('\\')[0] == 'D:': datasources_path = dir_path+"\datasources\\" enrichment_path = dir_path+"\enrichment\\" pickles_path = dir_path+"\pickles\\" learning_models_path = dir_path+"\learning_models\\" temp_files_path = dir_path+"\\tmp\\" else: datasources_path = dir_path+"/datasources/" enrichment_path = dir_path+"/enrichment/" pickles_path = dir_path+"/pickles/" learning_models_path = dir_path+"/learning_models/" temp_files_path = dir_path+"/tmp/"
[ "os.path.realpath" ]
[((37, 63), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (53, 63), False, 'import os\n')]
import pytest from bitcoinx import ( hex_str_to_hash, bits_to_work, bits_to_target, hash_to_value, hash_to_hex_str, ) from bitcoinx.coin import * header_400k = ( b'\x04\x00\x00\x009\xfa\x82\x18Hx\x1f\x02z.m\xfa\xbb\xf6\xbd\xa9 \xd9' b'\xaea\xb64\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\xec\xaeSj0@B\xe3' b'\x15K\xe0\xe3\xe9\xa8"\x0eUh\xc3C:\x9a\xb4\x9a\xc4\xcb\xb7O\x8d\xf8' b'\xe8\xb0\xcc*\xcfV\x9f\xb9\x06\x18\x06e,\'' ) @pytest.mark.parametrize("raw_header,header_hash,version,prev_hash," "merkle_root,timestamp,bits,nonce", ( ( Bitcoin.genesis_header, '000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f', 1, '0000000000000000000000000000000000000000000000000000000000000000', '4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b', 1231006505, 486604799, 2083236893 ), ( header_400k, '000000000000000004ec466ce4732fe6f1ed1cddc2ed4b328fff5224276e3f6f', 4, '0000000000000000030034b661aed920a9bdf6bbfa6d2e7a021f78481882fa39', 'b0e8f88d4fb7cbc49ab49a3a43c368550e22a8e9e3e04b15e34240306a53aeec', 1456417484, 403093919, 657220870 ), )) def test_Bitcoin(raw_header, header_hash, version, prev_hash, merkle_root, timestamp, bits, nonce): header_hash = hex_str_to_hash(header_hash) prev_hash = hex_str_to_hash(prev_hash) merkle_root = hex_str_to_hash(merkle_root) assert Bitcoin.header_hash(raw_header) == header_hash assert Bitcoin.header_prev_hash(raw_header) == prev_hash assert Bitcoin.header_work(raw_header) == bits_to_work(bits) assert Bitcoin.header_timestamp(raw_header) == timestamp header = Bitcoin.deserialized_header(raw_header, 0) assert header.version == version assert header.prev_hash == prev_hash assert header.merkle_root == merkle_root assert header.timestamp == timestamp assert header.bits == bits assert header.nonce == nonce assert header.raw == raw_header assert header.hash == header_hash assert header.height == 0 assert header.work() == Bitcoin.header_work(raw_header) assert header.target() == bits_to_target(bits) assert header.hash_value() == hash_to_value(header_hash) assert header.hex_str() == hash_to_hex_str(header_hash) assert 'height=0' in str(header) def test_from_WIF_byte(): for coin in all_coins: if coin is BitcoinScalingTestnet: # Testnet has the same identifiers as scaling testnet, as the latter is dumbed down. assert Coin.from_WIF_byte(coin.WIF_byte) is BitcoinTestnet else: assert Coin.from_WIF_byte(coin.WIF_byte) is coin with pytest.raises(ValueError): Coin.from_WIF_byte(0x01) def test_lookup_xver_bytes(): for coin in all_coins: if coin is BitcoinScalingTestnet: # Testnet has the same identifiers as scaling testnet, as the latter is dumbed down. assert Coin.lookup_xver_bytes(coin.xpub_verbytes) == (BitcoinTestnet, True) assert Coin.lookup_xver_bytes(coin.xprv_verbytes) == (BitcoinTestnet, False) else: assert Coin.lookup_xver_bytes(coin.xpub_verbytes) == (coin, True) assert Coin.lookup_xver_bytes(coin.xprv_verbytes) == (coin, False) with pytest.raises(ValueError): Coin.lookup_xver_bytes(bytes.fromhex("043587ff")) def test_P2SH_verbyte(): assert Bitcoin.P2SH_verbyte == 0x05 assert BitcoinTestnet.P2SH_verbyte == BitcoinScalingTestnet.P2SH_verbyte == 0xc4
[ "bitcoinx.hash_to_hex_str", "bitcoinx.hash_to_value", "bitcoinx.hex_str_to_hash", "pytest.raises", "bitcoinx.bits_to_target", "pytest.mark.parametrize", "bitcoinx.bits_to_work" ]
[((447, 1117), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""raw_header,header_hash,version,prev_hash,merkle_root,timestamp,bits,nonce"""', "((Bitcoin.genesis_header,\n '000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f', 1,\n '0000000000000000000000000000000000000000000000000000000000000000',\n '4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b', \n 1231006505, 486604799, 2083236893), (header_400k,\n '000000000000000004ec466ce4732fe6f1ed1cddc2ed4b328fff5224276e3f6f', 4,\n '0000000000000000030034b661aed920a9bdf6bbfa6d2e7a021f78481882fa39',\n 'b0e8f88d4fb7cbc49ab49a3a43c368550e22a8e9e3e04b15e34240306a53aeec', \n 1456417484, 403093919, 657220870))"], {}), "(\n 'raw_header,header_hash,version,prev_hash,merkle_root,timestamp,bits,nonce'\n , ((Bitcoin.genesis_header,\n '000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f', 1,\n '0000000000000000000000000000000000000000000000000000000000000000',\n '4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b', \n 1231006505, 486604799, 2083236893), (header_400k,\n '000000000000000004ec466ce4732fe6f1ed1cddc2ed4b328fff5224276e3f6f', 4,\n '0000000000000000030034b661aed920a9bdf6bbfa6d2e7a021f78481882fa39',\n 'b0e8f88d4fb7cbc49ab49a3a43c368550e22a8e9e3e04b15e34240306a53aeec', \n 1456417484, 403093919, 657220870)))\n", (470, 1117), False, 'import pytest\n'), ((1388, 1416), 'bitcoinx.hex_str_to_hash', 'hex_str_to_hash', (['header_hash'], {}), '(header_hash)\n', (1403, 1416), False, 'from bitcoinx import hex_str_to_hash, bits_to_work, bits_to_target, hash_to_value, hash_to_hex_str\n'), ((1433, 1459), 'bitcoinx.hex_str_to_hash', 'hex_str_to_hash', (['prev_hash'], {}), '(prev_hash)\n', (1448, 1459), False, 'from bitcoinx import hex_str_to_hash, bits_to_work, bits_to_target, hash_to_value, hash_to_hex_str\n'), ((1478, 1506), 'bitcoinx.hex_str_to_hash', 'hex_str_to_hash', (['merkle_root'], {}), '(merkle_root)\n', (1493, 1506), False, 'from bitcoinx import hex_str_to_hash, bits_to_work, bits_to_target, hash_to_value, hash_to_hex_str\n'), ((1673, 1691), 'bitcoinx.bits_to_work', 'bits_to_work', (['bits'], {}), '(bits)\n', (1685, 1691), False, 'from bitcoinx import hex_str_to_hash, bits_to_work, bits_to_target, hash_to_value, hash_to_hex_str\n'), ((2232, 2252), 'bitcoinx.bits_to_target', 'bits_to_target', (['bits'], {}), '(bits)\n', (2246, 2252), False, 'from bitcoinx import hex_str_to_hash, bits_to_work, bits_to_target, hash_to_value, hash_to_hex_str\n'), ((2287, 2313), 'bitcoinx.hash_to_value', 'hash_to_value', (['header_hash'], {}), '(header_hash)\n', (2300, 2313), False, 'from bitcoinx import hex_str_to_hash, bits_to_work, bits_to_target, hash_to_value, hash_to_hex_str\n'), ((2345, 2373), 'bitcoinx.hash_to_hex_str', 'hash_to_hex_str', (['header_hash'], {}), '(header_hash)\n', (2360, 2373), False, 'from bitcoinx import hex_str_to_hash, bits_to_work, bits_to_target, hash_to_value, hash_to_hex_str\n'), ((2760, 2785), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2773, 2785), False, 'import pytest\n'), ((3375, 3400), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3388, 3400), False, 'import pytest\n')]
# Copyright 2017 The TensorFlow Lattice Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """CalibratedEtl tests.""" # Dependency imports import numpy as np from tensorflow_lattice.python.estimators import calibrated_etl from tensorflow_lattice.python.estimators import hparams as tfl_hparams from tensorflow_lattice.python.lib import keypoints_initialization from tensorflow_lattice.python.lib import test_data from tensorflow.python.estimator.inputs import numpy_io from tensorflow.python.feature_column import feature_column_lib from tensorflow.python.platform import test _NUM_KEYPOINTS = 50 class CalibratedEtlHParamsTest(test.TestCase): def testEmptyMonotonicLatticeRankExpectsError(self): hparams = tfl_hparams.CalibratedEtlHParams(feature_names=['x']) hparams.set_param('monotonic_num_lattices', 2) hparams.set_param('monotonic_lattice_size', 2) with self.assertRaisesRegexp( ValueError, 'Hyperparameter configuration cannot be used in the calibrated etl ' 'estimator.'): calibrated_etl.calibrated_etl_classifier(hparams=hparams) def testEmptyMonotonicLatticeSizeExpectsError(self): hparams = tfl_hparams.CalibratedEtlHParams(feature_names=['x']) hparams.set_param('monotonic_num_lattices', 2) hparams.set_param('monotonic_lattice_rank', 2) with self.assertRaisesRegexp( ValueError, 'Hyperparameter configuration cannot be used in the calibrated etl ' 'estimator.'): calibrated_etl.calibrated_etl_classifier(hparams=hparams) def testEmptyNonMonotonicLatticeRankExpectsError(self): hparams = tfl_hparams.CalibratedEtlHParams(feature_names=['x']) hparams.set_param('non_monotonic_num_lattices', 2) hparams.set_param('non_monotonic_lattice_size', 2) with self.assertRaisesRegexp( ValueError, 'Hyperparameter configuration cannot be used in the calibrated etl ' 'estimator.'): calibrated_etl.calibrated_etl_classifier(hparams=hparams) def testEmptyNonMonotonicLatticeSizeExpectsError(self): hparams = tfl_hparams.CalibratedEtlHParams(feature_names=['x']) hparams.set_param('non_monotonic_num_lattices', 2) hparams.set_param('non_monotonic_lattice_rank', 2) with self.assertRaisesRegexp( ValueError, 'Hyperparameter configuration cannot be used in the calibrated etl ' 'estimator.'): calibrated_etl.calibrated_etl_classifier(hparams=hparams) def testWrongLatticeRegularization(self): hparams = tfl_hparams.CalibratedEtlHParams(feature_names=['x']) hparams.set_param('non_monotonic_num_lattices', 2) hparams.set_param('non_monotonic_lattice_size', 2) hparams.set_param('nno_monotonic_lattice_rank', 2) hparams.set_feature_param('x', 'lattice_l1_reg', 0.1) hparams.set_feature_param('x', 'lattice_l2_reg', 0.1) hparams.set_feature_param('x', 'lattice_l1_torsion_reg', 0.1) hparams.set_feature_param('x', 'lattice_l1_torsion_reg', 0.1) with self.assertRaisesRegexp( ValueError, 'Hyperparameter configuration cannot be used in the calibrated etl ' 'estimator.'): calibrated_etl.calibrated_etl_classifier(hparams=hparams) class CalibratedEtlTest(test.TestCase): def setUp(self): super(CalibratedEtlTest, self).setUp() self._test_data = test_data.TestData() def _CalibratedEtlRegressor(self, feature_names, feature_columns, **hparams_args): def init_fn(): return keypoints_initialization.uniform_keypoints_for_signal( _NUM_KEYPOINTS, -1., 1., 0., 1.) hparams = tfl_hparams.CalibratedEtlHParams( feature_names, num_keypoints=_NUM_KEYPOINTS, monotonic_num_lattices=1, monotonic_lattice_rank=1, monotonic_lattice_size=2, non_monotonic_num_lattices=1, non_monotonic_lattice_rank=1, non_monotonic_lattice_size=2, **hparams_args) # Turn off monotonic calibrator. hparams.set_param('calibration_monotonic', None) hparams.set_param('learning_rate', 0.1) return calibrated_etl.calibrated_etl_regressor( feature_columns=feature_columns, hparams=hparams, keypoints_initializers_fn=init_fn) def _CalibratedEtlClassifier(self, feature_columns, **hparams_args): def init_fn(): return keypoints_initialization.uniform_keypoints_for_signal( _NUM_KEYPOINTS, -1., 1., 0., 1.) hparams = tfl_hparams.CalibratedEtlHParams( num_keypoints=_NUM_KEYPOINTS, monotonic_num_lattices=1, monotonic_lattice_rank=1, monotonic_lattice_size=2, non_monotonic_num_lattices=1, non_monotonic_lattice_rank=1, non_monotonic_lattice_size=2, **hparams_args) # Turn off monotonic calibrator. hparams.set_param('calibration_monotonic', None) hparams.set_param('learning_rate', 0.1) return calibrated_etl.calibrated_etl_classifier( feature_columns=feature_columns, hparams=hparams, keypoints_initializers_fn=init_fn) def testCalibratedEtlRegressorTraining1D(self): feature_columns = [ feature_column_lib.numeric_column('x'), ] estimator = self._CalibratedEtlRegressor( ['x'], feature_columns, interpolation_type='simplex') estimator.train(input_fn=self._test_data.oned_input_fn()) # Here we only check the successful evaluation. # Checking the actual number, accuracy, etc, makes the test too flaky. _ = estimator.evaluate(input_fn=self._test_data.oned_input_fn()) def testCalibratedEtlRegressorTraining2D(self): feature_columns = [ feature_column_lib.numeric_column('x0'), feature_column_lib.numeric_column('x1'), ] estimator = self._CalibratedEtlRegressor( ['x0', 'x1'], feature_columns, interpolation_type='hypercube') estimator.train(input_fn=self._test_data.twod_input_fn()) # Here we only check the successful evaluation. # Checking the actual number, accuracy, etc, makes the test too flaky. _ = estimator.evaluate(input_fn=self._test_data.twod_input_fn()) def testCalibratedEtlRegressorTraining2DWithCalbrationRegularization(self): feature_columns = [ feature_column_lib.numeric_column('x0'), feature_column_lib.numeric_column('x1'), ] estimator = self._CalibratedEtlRegressor( ['x0', 'x1'], feature_columns, interpolation_type='simplex', calibration_l1_reg=1e-2, calibration_l2_reg=1e-2, calibration_l1_laplacian_reg=0.05, calibration_l2_laplacian_reg=0.01) estimator.train(input_fn=self._test_data.twod_input_fn()) # Here we only check the successful evaluation. # Checking the actual number, accuracy, etc, makes the test too flaky. _ = estimator.evaluate(input_fn=self._test_data.twod_input_fn()) def testCalibratedEtlRegressorTraining2DWithLatticeRegularizer(self): feature_columns = [ feature_column_lib.numeric_column('x0'), feature_column_lib.numeric_column('x1'), ] estimator = self._CalibratedEtlRegressor( ['x0', 'x1'], feature_columns, interpolation_type='simplex', lattice_l1_reg=1.0, lattice_l2_reg=1.0, lattice_l1_torsion_reg=1.0, lattice_l2_torsion_reg=1.0, lattice_l1_laplacian_reg=1.0, lattice_l2_laplacian_reg=1.0) estimator.train(input_fn=self._test_data.twod_input_fn()) results = estimator.evaluate(input_fn=self._test_data.twod_input_fn()) # We expect the worse result due to the calibration regularization. self.assertGreater(results['average_loss'], 3e-3) self.assertLess(results['average_loss'], 4e-2) def testCalibratedEtlRegressorTrainingMultiDimensionalFeature(self): feature_columns = [ feature_column_lib.numeric_column('x', shape=(2,)), ] estimator = self._CalibratedEtlRegressor(['x'], feature_columns) estimator.train(input_fn=self._test_data.multid_feature_input_fn()) results = estimator.evaluate( input_fn=self._test_data.multid_feature_input_fn()) self.assertLess(results['average_loss'], 1e-2) # Turn-off calibration for feature 'x', it should turn off for both # dimensions, and the results should get much worse. estimator = self._CalibratedEtlRegressor( ['x'], feature_columns, feature__x__num_keypoints=0) estimator.train(input_fn=self._test_data.multid_feature_input_fn()) results = estimator.evaluate( input_fn=self._test_data.multid_feature_input_fn()) self.assertGreater(results['average_loss'], 1e-2) def testCalibratedEtlClassifierTraining(self): feature_columns = [ feature_column_lib.numeric_column('x0'), feature_column_lib.numeric_column('x1'), ] estimator = self._CalibratedEtlClassifier(feature_columns) estimator.train(input_fn=self._test_data.twod_classificer_input_fn()) results = estimator.evaluate( input_fn=self._test_data.twod_classificer_input_fn()) self.assertGreater(results['accuracy'], 0.97) def testCalibratedEtlClassifierTrainingWithCalibrationRegularizer(self): feature_columns = [ feature_column_lib.numeric_column('x0'), feature_column_lib.numeric_column('x1'), ] estimator = self._CalibratedEtlClassifier( feature_columns, calibration_l1_reg=1e-2, calibration_l2_reg=1e-2, calibration_l1_laplacian_reg=1e-1, calibration_l2_laplacian_reg=1e-1, interpolation_type='hypercube') estimator.train(input_fn=self._test_data.twod_classificer_input_fn()) # Here we only check the successful evaluation. # Checking the actual number, accuracy, etc, makes the test too flaky. _ = estimator.evaluate( input_fn=self._test_data.twod_classificer_input_fn()) def testCalibratedEtlClassifierTrainingWithLatticeRegularizer(self): feature_columns = [ feature_column_lib.numeric_column('x0'), feature_column_lib.numeric_column('x1'), ] estimator = self._CalibratedEtlClassifier( feature_columns, lattice_l1_reg=1.0, lattice_l2_reg=1.0, lattice_l1_torsion_reg=1.0, lattice_l2_torsion_reg=1.0, lattice_l1_laplacian_reg=1.0, lattice_l2_laplacian_reg=1.0, interpolation_type='hypercube') estimator.train(input_fn=self._test_data.twod_classificer_input_fn()) results = estimator.evaluate( input_fn=self._test_data.twod_classificer_input_fn()) # Due to regularizer, we expect the worse performance. self.assertLess(results['accuracy'], 0.97) self.assertGreater(results['accuracy'], 0.8) def testCalibratedEtlMonotonicClassifierTraining(self): # Construct the following training pair. # # Training: (x, y) # ([0., 0.], 0.0) # ([0., 1.], 1.0) # ([1., 0.], 1.0) # ([1., 1.], 0.0) # # which is not a monotonic function. Then check the forcing monotonicity # resulted in the following monotonicity or not. # f(0, 0) <= f(0, 1), f(0, 0) <= f(1, 0), f(0, 1) <= f(1, 1), # f(1, 0) < = f(1, 1). x0 = np.array([0.0, 0.0, 1.0, 1.0]) x1 = np.array([0.0, 1.0, 0.0, 1.0]) x_samples = {'x0': x0, 'x1': x1} training_y = np.array([[False], [True], [True], [False]]) train_input_fn = numpy_io.numpy_input_fn( x=x_samples, y=training_y, batch_size=4, num_epochs=1000, shuffle=False) test_input_fn = numpy_io.numpy_input_fn(x=x_samples, y=None, shuffle=False) # Define monotonic lattice classifier. feature_columns = [ feature_column_lib.numeric_column('x0'), feature_column_lib.numeric_column('x1'), ] def init_fn(): return keypoints_initialization.uniform_keypoints_for_signal( 2, 0., 1., 0., 1.) hparams = tfl_hparams.CalibratedEtlHParams( num_keypoints=2, monotonic_num_lattices=2, monotonic_lattice_rank=2, monotonic_lattice_size=2) hparams.set_param('calibration_monotonic', +1) hparams.set_param('lattice_monotonic', True) hparams.set_param('learning_rate', 0.1) estimator = calibrated_etl.calibrated_etl_classifier( feature_columns=feature_columns, hparams=hparams, keypoints_initializers_fn=init_fn) estimator.train(input_fn=train_input_fn) predictions = [ results['logits'][0] for results in estimator.predict(input_fn=test_input_fn) ] self.assertEqual(len(predictions), 4) # Check monotonicity. Note that projection has its own precision, so we # add a small number. self.assertLess(predictions[0], predictions[1] + 1e-6) self.assertLess(predictions[0], predictions[2] + 1e-6) self.assertLess(predictions[1], predictions[3] + 1e-6) self.assertLess(predictions[2], predictions[3] + 1e-6) def testCalibratedEtlWithMissingTraining(self): # x0 is missing with it's own vertex: so it can take very different values, # while x1 is missing and calibrated, in this case to the middle of the # lattice. x0 = np.array([0., 0., 1., 1., -1., -1., 0., 1.]) x1 = np.array([0., 1., 0., 1., 0., 1., -1., -1.]) training_y = np.array([1., 3., 7., 11., 23., 27., 2., 9.]) x_samples = {'x0': x0, 'x1': x1} train_input_fn = numpy_io.numpy_input_fn( x=x_samples, y=training_y, batch_size=x0.shape[0], num_epochs=2000, shuffle=False) test_input_fn = numpy_io.numpy_input_fn( x=x_samples, y=training_y, shuffle=False) feature_columns = [ feature_column_lib.numeric_column('x0'), feature_column_lib.numeric_column('x1'), ] def init_fn(): return keypoints_initialization.uniform_keypoints_for_signal( 2, 0., 1., 0., 1.) hparams = tfl_hparams.CalibratedEtlHParams( ['x0', 'x1'], num_keypoints=2, non_monotonic_num_lattices=5, non_monotonic_lattice_rank=2, non_monotonic_lattice_size=2, learning_rate=0.1, missing_input_value=-1.) estimator = calibrated_etl.calibrated_etl_regressor( feature_columns=feature_columns, hparams=hparams, keypoints_initializers_fn=init_fn) estimator.train(input_fn=train_input_fn) results = estimator.evaluate(input_fn=test_input_fn) self.assertLess(results['average_loss'], 0.1) if __name__ == '__main__': test.main()
[ "tensorflow.python.platform.test.main", "tensorflow_lattice.python.estimators.hparams.CalibratedEtlHParams", "tensorflow.python.feature_column.feature_column_lib.numeric_column", "tensorflow_lattice.python.lib.test_data.TestData", "tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_classifier", "tensorflow_lattice.python.lib.keypoints_initialization.uniform_keypoints_for_signal", "numpy.array", "tensorflow.python.estimator.inputs.numpy_io.numpy_input_fn", "tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_regressor" ]
[((14983, 14994), 'tensorflow.python.platform.test.main', 'test.main', ([], {}), '()\n', (14992, 14994), False, 'from tensorflow.python.platform import test\n'), ((1306, 1359), 'tensorflow_lattice.python.estimators.hparams.CalibratedEtlHParams', 'tfl_hparams.CalibratedEtlHParams', ([], {'feature_names': "['x']"}), "(feature_names=['x'])\n", (1338, 1359), True, 'from tensorflow_lattice.python.estimators import hparams as tfl_hparams\n'), ((1750, 1803), 'tensorflow_lattice.python.estimators.hparams.CalibratedEtlHParams', 'tfl_hparams.CalibratedEtlHParams', ([], {'feature_names': "['x']"}), "(feature_names=['x'])\n", (1782, 1803), True, 'from tensorflow_lattice.python.estimators import hparams as tfl_hparams\n'), ((2197, 2250), 'tensorflow_lattice.python.estimators.hparams.CalibratedEtlHParams', 'tfl_hparams.CalibratedEtlHParams', ([], {'feature_names': "['x']"}), "(feature_names=['x'])\n", (2229, 2250), True, 'from tensorflow_lattice.python.estimators import hparams as tfl_hparams\n'), ((2652, 2705), 'tensorflow_lattice.python.estimators.hparams.CalibratedEtlHParams', 'tfl_hparams.CalibratedEtlHParams', ([], {'feature_names': "['x']"}), "(feature_names=['x'])\n", (2684, 2705), True, 'from tensorflow_lattice.python.estimators import hparams as tfl_hparams\n'), ((3093, 3146), 'tensorflow_lattice.python.estimators.hparams.CalibratedEtlHParams', 'tfl_hparams.CalibratedEtlHParams', ([], {'feature_names': "['x']"}), "(feature_names=['x'])\n", (3125, 3146), True, 'from tensorflow_lattice.python.estimators import hparams as tfl_hparams\n'), ((3906, 3926), 'tensorflow_lattice.python.lib.test_data.TestData', 'test_data.TestData', ([], {}), '()\n', (3924, 3926), False, 'from tensorflow_lattice.python.lib import test_data\n'), ((4189, 4463), 'tensorflow_lattice.python.estimators.hparams.CalibratedEtlHParams', 'tfl_hparams.CalibratedEtlHParams', (['feature_names'], {'num_keypoints': '_NUM_KEYPOINTS', 'monotonic_num_lattices': '(1)', 'monotonic_lattice_rank': '(1)', 'monotonic_lattice_size': '(2)', 'non_monotonic_num_lattices': '(1)', 'non_monotonic_lattice_rank': '(1)', 'non_monotonic_lattice_size': '(2)'}), '(feature_names, num_keypoints=\n _NUM_KEYPOINTS, monotonic_num_lattices=1, monotonic_lattice_rank=1,\n monotonic_lattice_size=2, non_monotonic_num_lattices=1,\n non_monotonic_lattice_rank=1, non_monotonic_lattice_size=2, **hparams_args)\n', (4221, 4463), True, 'from tensorflow_lattice.python.estimators import hparams as tfl_hparams\n'), ((4670, 4798), 'tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_regressor', 'calibrated_etl.calibrated_etl_regressor', ([], {'feature_columns': 'feature_columns', 'hparams': 'hparams', 'keypoints_initializers_fn': 'init_fn'}), '(feature_columns=feature_columns,\n hparams=hparams, keypoints_initializers_fn=init_fn)\n', (4709, 4798), False, 'from tensorflow_lattice.python.estimators import calibrated_etl\n'), ((5038, 5296), 'tensorflow_lattice.python.estimators.hparams.CalibratedEtlHParams', 'tfl_hparams.CalibratedEtlHParams', ([], {'num_keypoints': '_NUM_KEYPOINTS', 'monotonic_num_lattices': '(1)', 'monotonic_lattice_rank': '(1)', 'monotonic_lattice_size': '(2)', 'non_monotonic_num_lattices': '(1)', 'non_monotonic_lattice_rank': '(1)', 'non_monotonic_lattice_size': '(2)'}), '(num_keypoints=_NUM_KEYPOINTS,\n monotonic_num_lattices=1, monotonic_lattice_rank=1,\n monotonic_lattice_size=2, non_monotonic_num_lattices=1,\n non_monotonic_lattice_rank=1, non_monotonic_lattice_size=2, **hparams_args)\n', (5070, 5296), True, 'from tensorflow_lattice.python.estimators import hparams as tfl_hparams\n'), ((5496, 5625), 'tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_classifier', 'calibrated_etl.calibrated_etl_classifier', ([], {'feature_columns': 'feature_columns', 'hparams': 'hparams', 'keypoints_initializers_fn': 'init_fn'}), '(feature_columns=feature_columns,\n hparams=hparams, keypoints_initializers_fn=init_fn)\n', (5536, 5625), False, 'from tensorflow_lattice.python.estimators import calibrated_etl\n'), ((11721, 11751), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 1.0, 1.0])\n', (11729, 11751), True, 'import numpy as np\n'), ((11761, 11791), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0, 1.0]'], {}), '([0.0, 1.0, 0.0, 1.0])\n', (11769, 11791), True, 'import numpy as np\n'), ((11846, 11890), 'numpy.array', 'np.array', (['[[False], [True], [True], [False]]'], {}), '([[False], [True], [True], [False]])\n', (11854, 11890), True, 'import numpy as np\n'), ((11913, 12014), 'tensorflow.python.estimator.inputs.numpy_io.numpy_input_fn', 'numpy_io.numpy_input_fn', ([], {'x': 'x_samples', 'y': 'training_y', 'batch_size': '(4)', 'num_epochs': '(1000)', 'shuffle': '(False)'}), '(x=x_samples, y=training_y, batch_size=4, num_epochs\n =1000, shuffle=False)\n', (11936, 12014), False, 'from tensorflow.python.estimator.inputs import numpy_io\n'), ((12039, 12098), 'tensorflow.python.estimator.inputs.numpy_io.numpy_input_fn', 'numpy_io.numpy_input_fn', ([], {'x': 'x_samples', 'y': 'None', 'shuffle': '(False)'}), '(x=x_samples, y=None, shuffle=False)\n', (12062, 12098), False, 'from tensorflow.python.estimator.inputs import numpy_io\n'), ((12403, 12534), 'tensorflow_lattice.python.estimators.hparams.CalibratedEtlHParams', 'tfl_hparams.CalibratedEtlHParams', ([], {'num_keypoints': '(2)', 'monotonic_num_lattices': '(2)', 'monotonic_lattice_rank': '(2)', 'monotonic_lattice_size': '(2)'}), '(num_keypoints=2, monotonic_num_lattices=2,\n monotonic_lattice_rank=2, monotonic_lattice_size=2)\n', (12435, 12534), True, 'from tensorflow_lattice.python.estimators import hparams as tfl_hparams\n'), ((12725, 12854), 'tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_classifier', 'calibrated_etl.calibrated_etl_classifier', ([], {'feature_columns': 'feature_columns', 'hparams': 'hparams', 'keypoints_initializers_fn': 'init_fn'}), '(feature_columns=feature_columns,\n hparams=hparams, keypoints_initializers_fn=init_fn)\n', (12765, 12854), False, 'from tensorflow_lattice.python.estimators import calibrated_etl\n'), ((13653, 13705), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0, -1.0, -1.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0, 1.0, -1.0, -1.0, 0.0, 1.0])\n', (13661, 13705), True, 'import numpy as np\n'), ((13707, 13759), 'numpy.array', 'np.array', (['[0.0, 1.0, 0.0, 1.0, 0.0, 1.0, -1.0, -1.0]'], {}), '([0.0, 1.0, 0.0, 1.0, 0.0, 1.0, -1.0, -1.0])\n', (13715, 13759), True, 'import numpy as np\n'), ((13769, 13822), 'numpy.array', 'np.array', (['[1.0, 3.0, 7.0, 11.0, 23.0, 27.0, 2.0, 9.0]'], {}), '([1.0, 3.0, 7.0, 11.0, 23.0, 27.0, 2.0, 9.0])\n', (13777, 13822), True, 'import numpy as np\n'), ((13874, 13984), 'tensorflow.python.estimator.inputs.numpy_io.numpy_input_fn', 'numpy_io.numpy_input_fn', ([], {'x': 'x_samples', 'y': 'training_y', 'batch_size': 'x0.shape[0]', 'num_epochs': '(2000)', 'shuffle': '(False)'}), '(x=x_samples, y=training_y, batch_size=x0.shape[0],\n num_epochs=2000, shuffle=False)\n', (13897, 13984), False, 'from tensorflow.python.estimator.inputs import numpy_io\n'), ((14042, 14107), 'tensorflow.python.estimator.inputs.numpy_io.numpy_input_fn', 'numpy_io.numpy_input_fn', ([], {'x': 'x_samples', 'y': 'training_y', 'shuffle': '(False)'}), '(x=x_samples, y=training_y, shuffle=False)\n', (14065, 14107), False, 'from tensorflow.python.estimator.inputs import numpy_io\n'), ((14377, 14583), 'tensorflow_lattice.python.estimators.hparams.CalibratedEtlHParams', 'tfl_hparams.CalibratedEtlHParams', (["['x0', 'x1']"], {'num_keypoints': '(2)', 'non_monotonic_num_lattices': '(5)', 'non_monotonic_lattice_rank': '(2)', 'non_monotonic_lattice_size': '(2)', 'learning_rate': '(0.1)', 'missing_input_value': '(-1.0)'}), "(['x0', 'x1'], num_keypoints=2,\n non_monotonic_num_lattices=5, non_monotonic_lattice_rank=2,\n non_monotonic_lattice_size=2, learning_rate=0.1, missing_input_value=-1.0)\n", (14409, 14583), True, 'from tensorflow_lattice.python.estimators import hparams as tfl_hparams\n'), ((14649, 14777), 'tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_regressor', 'calibrated_etl.calibrated_etl_regressor', ([], {'feature_columns': 'feature_columns', 'hparams': 'hparams', 'keypoints_initializers_fn': 'init_fn'}), '(feature_columns=feature_columns,\n hparams=hparams, keypoints_initializers_fn=init_fn)\n', (14688, 14777), False, 'from tensorflow_lattice.python.estimators import calibrated_etl\n'), ((1622, 1679), 'tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_classifier', 'calibrated_etl.calibrated_etl_classifier', ([], {'hparams': 'hparams'}), '(hparams=hparams)\n', (1662, 1679), False, 'from tensorflow_lattice.python.estimators import calibrated_etl\n'), ((2066, 2123), 'tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_classifier', 'calibrated_etl.calibrated_etl_classifier', ([], {'hparams': 'hparams'}), '(hparams=hparams)\n', (2106, 2123), False, 'from tensorflow_lattice.python.estimators import calibrated_etl\n'), ((2521, 2578), 'tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_classifier', 'calibrated_etl.calibrated_etl_classifier', ([], {'hparams': 'hparams'}), '(hparams=hparams)\n', (2561, 2578), False, 'from tensorflow_lattice.python.estimators import calibrated_etl\n'), ((2976, 3033), 'tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_classifier', 'calibrated_etl.calibrated_etl_classifier', ([], {'hparams': 'hparams'}), '(hparams=hparams)\n', (3016, 3033), False, 'from tensorflow_lattice.python.estimators import calibrated_etl\n'), ((3721, 3778), 'tensorflow_lattice.python.estimators.calibrated_etl.calibrated_etl_classifier', 'calibrated_etl.calibrated_etl_classifier', ([], {'hparams': 'hparams'}), '(hparams=hparams)\n', (3761, 3778), False, 'from tensorflow_lattice.python.estimators import calibrated_etl\n'), ((4076, 4170), 'tensorflow_lattice.python.lib.keypoints_initialization.uniform_keypoints_for_signal', 'keypoints_initialization.uniform_keypoints_for_signal', (['_NUM_KEYPOINTS', '(-1.0)', '(1.0)', '(0.0)', '(1.0)'], {}), '(_NUM_KEYPOINTS, -1.0,\n 1.0, 0.0, 1.0)\n', (4129, 4170), False, 'from tensorflow_lattice.python.lib import keypoints_initialization\n'), ((4925, 5019), 'tensorflow_lattice.python.lib.keypoints_initialization.uniform_keypoints_for_signal', 'keypoints_initialization.uniform_keypoints_for_signal', (['_NUM_KEYPOINTS', '(-1.0)', '(1.0)', '(0.0)', '(1.0)'], {}), '(_NUM_KEYPOINTS, -1.0,\n 1.0, 0.0, 1.0)\n', (4978, 5019), False, 'from tensorflow_lattice.python.lib import keypoints_initialization\n'), ((5730, 5768), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x"""'], {}), "('x')\n", (5763, 5768), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((6225, 6264), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x0"""'], {}), "('x0')\n", (6258, 6264), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((6274, 6313), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x1"""'], {}), "('x1')\n", (6307, 6313), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((6807, 6846), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x0"""'], {}), "('x0')\n", (6840, 6846), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((6856, 6895), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x1"""'], {}), "('x1')\n", (6889, 6895), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((7549, 7588), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x0"""'], {}), "('x0')\n", (7582, 7588), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((7598, 7637), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x1"""'], {}), "('x1')\n", (7631, 7637), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((8398, 8448), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x"""'], {'shape': '(2,)'}), "('x', shape=(2,))\n", (8431, 8448), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((9281, 9320), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x0"""'], {}), "('x0')\n", (9314, 9320), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((9330, 9369), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x1"""'], {}), "('x1')\n", (9363, 9369), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((9768, 9807), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x0"""'], {}), "('x0')\n", (9801, 9807), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((9817, 9856), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x1"""'], {}), "('x1')\n", (9850, 9856), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((10524, 10563), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x0"""'], {}), "('x0')\n", (10557, 10563), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((10573, 10612), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x1"""'], {}), "('x1')\n", (10606, 10612), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((12175, 12214), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x0"""'], {}), "('x0')\n", (12208, 12214), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((12224, 12263), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x1"""'], {}), "('x1')\n", (12257, 12263), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((12304, 12380), 'tensorflow_lattice.python.lib.keypoints_initialization.uniform_keypoints_for_signal', 'keypoints_initialization.uniform_keypoints_for_signal', (['(2)', '(0.0)', '(1.0)', '(0.0)', '(1.0)'], {}), '(2, 0.0, 1.0, 0.0, 1.0)\n', (12357, 12380), False, 'from tensorflow_lattice.python.lib import keypoints_initialization\n'), ((14149, 14188), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x0"""'], {}), "('x0')\n", (14182, 14188), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((14198, 14237), 'tensorflow.python.feature_column.feature_column_lib.numeric_column', 'feature_column_lib.numeric_column', (['"""x1"""'], {}), "('x1')\n", (14231, 14237), False, 'from tensorflow.python.feature_column import feature_column_lib\n'), ((14278, 14354), 'tensorflow_lattice.python.lib.keypoints_initialization.uniform_keypoints_for_signal', 'keypoints_initialization.uniform_keypoints_for_signal', (['(2)', '(0.0)', '(1.0)', '(0.0)', '(1.0)'], {}), '(2, 0.0, 1.0, 0.0, 1.0)\n', (14331, 14354), False, 'from tensorflow_lattice.python.lib import keypoints_initialization\n')]
""" Example script demonstrating the training portion of the MLR pipeline. This is mostly to demonstrate how everything ties together To run: PYSPARK_PYTHON=venv/bin/python spark-submit \ --jars /path/to/mjolnir-with-dependencies.jar \ --artifacts 'mjolnir_venv.zip#venv' \ path/to/training_pipeline.py """ from __future__ import absolute_import import argparse import datetime import glob import json import logging import mjolnir.feature_engineering import mjolnir.training.xgboost from mjolnir.utils import hdfs_open_read import os import pickle from pyspark import SparkContext from pyspark.sql import HiveContext import sys def run_pipeline( sc, sqlContext, input_dir, output_dir, wikis, initial_num_trees, final_num_trees, num_cv_jobs, iterations ): with hdfs_open_read(os.path.join(input_dir, 'stats.json')) as f: stats = json.loads(f.read()) wikis_available = set(stats['wikis'].keys()) if wikis: missing = set(wikis).difference(wikis_available) if missing: raise Exception("Wikis not available: " + ", ".join(missing)) wikis = wikis_available.intersection(wikis) else: wikis = stats['wikis'].keys() if not wikis: raise Exception("No wikis provided") for wiki in wikis: config = stats['wikis'][wiki] print('Training wiki: %s' % (wiki)) num_folds = config['num_folds'] if num_cv_jobs is None: num_cv_jobs = num_folds # Add extension matching training type extension = ".xgb" # Add file extensions to all the folds folds = config['folds'] for fold in folds: for partition in fold: for name, path in partition.items(): partition[name] = path + extension # "all" data with no splits all_paths = config['all'] for partition in all_paths: for name, path in partition.items(): partition[name] = path + extension tune_results = mjolnir.training.xgboost.tune( folds, config['stats'], num_cv_jobs=num_cv_jobs, train_matrix="train", initial_num_trees=initial_num_trees, final_num_trees=final_num_trees, iterations=iterations) print('CV test-ndcg@10: %.4f' % (tune_results['metrics']['cv-test'])) print('CV train-ndcg@10: %.4f' % (tune_results['metrics']['cv-train'])) tune_results['metadata'] = { 'wiki': wiki, 'input_dir': input_dir, 'training_datetime': datetime.datetime.now().isoformat(), 'dataset': config['stats'], } # Train a model over all data with best params. best_params = tune_results['params'].copy() print('Best parameters:') for param, value in best_params.items(): print('\t%20s: %s' % (param, value)) model = mjolnir.training.xgboost.train( all_paths, best_params, train_matrix="all") tune_results['metrics'] = { 'train': model.summary.train() } print('train-ndcg@10: %.5f' % (tune_results['metrics']['train'][-1])) # Save the tune results somewhere for later analysis. Use pickle # to maintain the hyperopt.Trials objects as is. It might be nice # to write out a json version, but the Trials objects require # some more work before they can be json encoded. tune_output_pickle = os.path.join(output_dir, 'tune_%s.pickle' % (wiki)) with open(tune_output_pickle, 'wb') as f: # TODO: This includes special hyperopt and mjolnir objects, it would # be nice if those could be converted to something simple like dicts # and output json instead of pickle. This would greatly simplify # post-processing. f.write(pickle.dumps(tune_results)) print('Wrote tuning results to %s' % (tune_output_pickle)) # Generate a feature map so xgboost can include feature names in the dump. # The final `q` indicates all features are quantitative values (floats). if 'wiki_features' in config['stats']: features = config['stats']['wiki_features'][wiki] else: features = config['stats']['features'] json_model_output = os.path.join(output_dir, 'model_%s.json' % (wiki)) with open(json_model_output, 'w') as f: # The 'unused' first feature is because DataWriter creates datafiles # that start at index 1 to support xgboost and lightgbm from the same # file. f.write(model.dump(['unused'] + features)) print('Wrote xgboost json model to %s' % (json_model_output)) # Write out the xgboost binary format as well, so it can be re-loaded # and evaluated model_output = os.path.join(output_dir, 'model_%s.xgb' % (wiki)) model.saveModelAsLocalFile(model_output) print('Wrote xgboost binary model to %s' % (model_output)) print('') def arg_parser(): parser = argparse.ArgumentParser(description='Train XGBoost ranking models') parser.add_argument( '-i', '--input', dest='input_dir', type=str, required=True, help='Input path, prefixed with hdfs://, to dataframe with labels and features') parser.add_argument( '-o', '--output', dest='output_dir', type=str, required=True, help='Path, on local filesystem, to directory to store the results of ' 'model training to.') parser.add_argument( '-c', '--cv-jobs', dest='num_cv_jobs', default=None, type=int, help='Number of cross validation folds to perform in parallel. Defaults to number ' + 'of folds, to run all in parallel. If this is a multiple of the number ' + 'of folds multiple cross validations will run in parallel.') parser.add_argument( '--initial-trees', dest='initial_num_trees', default=100, type=int, help='Number of trees to perform hyperparamter tuning with. (Default: 100)') parser.add_argument( '--final-trees', dest='final_num_trees', default=None, type=int, help='Number of trees in the final ensemble. If not provided the value from ' + '--initial-trees will be used. (Default: None)') parser.add_argument( '-t', '--iterations', dest='iterations', default=150, type=int, help='The number of hyperparameter tuning iterations to perform') parser.add_argument( 'wikis', metavar='wiki', type=str, nargs='*', help='A wiki to perform model training for.') return parser def main(**kwargs): # TODO: Set spark configuration? Some can't actually be set here though, so best might be to set all of it # on the command line for consistency. app_name = "MLR: training pipeline xgboost" if kwargs['wikis']: app_name += ': ' + ', '.join(kwargs['wikis']) sc = SparkContext(appName=app_name) sc.setLogLevel('WARN') sqlContext = HiveContext(sc) output_dir = kwargs['output_dir'] if os.path.exists(output_dir): logging.error('Output directory (%s) already exists' % (output_dir)) sys.exit(1) # Maybe this is a bit early to create the path ... but should be fine. # The annoyance might be that an error in training requires deleting # this directory to try again. os.mkdir(output_dir) try: run_pipeline(sc, sqlContext, **kwargs) except: # noqa: E722 # If the directory we created is still empty delete it # so it doesn't need to be manually re-created if not len(glob.glob(os.path.join(output_dir, '*'))): os.rmdir(output_dir) raise if __name__ == "__main__": logging.basicConfig() kwargs = dict(vars(arg_parser().parse_args())) main(**kwargs)
[ "os.mkdir", "logging.error", "argparse.ArgumentParser", "logging.basicConfig", "pyspark.SparkContext", "os.path.exists", "datetime.datetime.now", "pyspark.sql.HiveContext", "os.rmdir", "os.path.join", "sys.exit", "pickle.dumps" ]
[((5126, 5193), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train XGBoost ranking models"""'}), "(description='Train XGBoost ranking models')\n", (5149, 5193), False, 'import argparse\n'), ((7007, 7037), 'pyspark.SparkContext', 'SparkContext', ([], {'appName': 'app_name'}), '(appName=app_name)\n', (7019, 7037), False, 'from pyspark import SparkContext\n'), ((7082, 7097), 'pyspark.sql.HiveContext', 'HiveContext', (['sc'], {}), '(sc)\n', (7093, 7097), False, 'from pyspark.sql import HiveContext\n'), ((7144, 7170), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (7158, 7170), False, 'import os\n'), ((7457, 7477), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (7465, 7477), False, 'import os\n'), ((7821, 7842), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '()\n', (7840, 7842), False, 'import logging\n'), ((3515, 3564), 'os.path.join', 'os.path.join', (['output_dir', "('tune_%s.pickle' % wiki)"], {}), "(output_dir, 'tune_%s.pickle' % wiki)\n", (3527, 3564), False, 'import os\n'), ((4373, 4421), 'os.path.join', 'os.path.join', (['output_dir', "('model_%s.json' % wiki)"], {}), "(output_dir, 'model_%s.json' % wiki)\n", (4385, 4421), False, 'import os\n'), ((4909, 4956), 'os.path.join', 'os.path.join', (['output_dir', "('model_%s.xgb' % wiki)"], {}), "(output_dir, 'model_%s.xgb' % wiki)\n", (4921, 4956), False, 'import os\n'), ((7180, 7246), 'logging.error', 'logging.error', (["('Output directory (%s) already exists' % output_dir)"], {}), "('Output directory (%s) already exists' % output_dir)\n", (7193, 7246), False, 'import logging\n'), ((7257, 7268), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7265, 7268), False, 'import sys\n'), ((819, 856), 'os.path.join', 'os.path.join', (['input_dir', '"""stats.json"""'], {}), "(input_dir, 'stats.json')\n", (831, 856), False, 'import os\n'), ((3907, 3933), 'pickle.dumps', 'pickle.dumps', (['tune_results'], {}), '(tune_results)\n', (3919, 3933), False, 'import pickle\n'), ((7753, 7773), 'os.rmdir', 'os.rmdir', (['output_dir'], {}), '(output_dir)\n', (7761, 7773), False, 'import os\n'), ((2610, 2633), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2631, 2633), False, 'import datetime\n'), ((7708, 7737), 'os.path.join', 'os.path.join', (['output_dir', '"""*"""'], {}), "(output_dir, '*')\n", (7720, 7737), False, 'import os\n')]
from django import forms from django.contrib.auth import get_user_model from django.contrib.auth.forms import UserCreationForm from . import models class UserCreateForm(UserCreationForm): class Meta: fields = ('username', 'email', '<PASSWORD>', '<PASSWORD>') model = get_user_model() class UserProfileForm(forms.ModelForm): class Meta: fields = ('profile_picture',) model = models.UserProfile
[ "django.contrib.auth.get_user_model" ]
[((273, 289), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (287, 289), False, 'from django.contrib.auth import get_user_model\n')]
# -*- coding: utf-8 -*- """ All code for scraping images and videos from posted links go in this file. """ #import BeautifulSoup import requests from urllib.parse import urlparse, urlunparse, urljoin img_extensions = ['jpg', 'jpeg', 'gif', 'png', 'bmp'] def make_abs(url, img_src): domain = urlparse(url).netloc scheme = urlparse(url).scheme baseurl = scheme + '://' + domain return urljoin(baseurl, img_src) def clean_url(url): frag = urlparse(url) frag = frag._replace(query='', fragment='') return urlunparse(frag) def get_top_img(url, timeout=4): """ Nothing fancy here, we merely check if the page author set a designated image or if the url itself is an image. This method could be mutch better but we are favoring ease of installation and simplicity of speed. """ if not url: return None url = clean_url(url) # if the url is referencing an img itself, return it if url.split('.')[-1].lower() in img_extensions: return url try: pass except Exception as e: print('FAILED WHILE EXTRACTING THREAD IMG', str(e)) return None return None
[ "urllib.parse.urlunparse", "urllib.parse.urljoin", "urllib.parse.urlparse" ]
[((401, 426), 'urllib.parse.urljoin', 'urljoin', (['baseurl', 'img_src'], {}), '(baseurl, img_src)\n', (408, 426), False, 'from urllib.parse import urlparse, urlunparse, urljoin\n'), ((459, 472), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (467, 472), False, 'from urllib.parse import urlparse, urlunparse, urljoin\n'), ((532, 548), 'urllib.parse.urlunparse', 'urlunparse', (['frag'], {}), '(frag)\n', (542, 548), False, 'from urllib.parse import urlparse, urlunparse, urljoin\n'), ((297, 310), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (305, 310), False, 'from urllib.parse import urlparse, urlunparse, urljoin\n'), ((331, 344), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (339, 344), False, 'from urllib.parse import urlparse, urlunparse, urljoin\n')]
from invoke import task, run @task def check(c): ''' Comprobador de la sintaxis del proyecto ''' print("Comprobando sintaxis...") run("pyflakes code") # El directorio con todo el código de la aplicación @task def test(c): ''' Realiza los tests del proyecto ''' print("Realizando los tests...") run("pytest")
[ "invoke.run" ]
[((151, 171), 'invoke.run', 'run', (['"""pyflakes code"""'], {}), "('pyflakes code')\n", (154, 171), False, 'from invoke import task, run\n'), ((336, 349), 'invoke.run', 'run', (['"""pytest"""'], {}), "('pytest')\n", (339, 349), False, 'from invoke import task, run\n')]
# From hacksoft.io/improve-your-tests-django-fakes-and-factories/ import factory from faker import Faker from factory import lazy_attribute fake = Faker() class CategoryFactory(factory.django.DjangoModelFactory): class Meta: model = 'products.Category' id = factory.Sequence(lambda n: n) name = factory.Sequence(lambda n: 'Category{0}' .format(n)) division = factory.Faker('word') class Product_FamilyFactory(factory.django.DjangoModelFactory): class Meta: model = 'products.Product_Family' id = factory.Sequence(lambda n: n) name = factory.Sequence(lambda n: 'Product_Family{0}' .format(n)) brand_name = factory.Faker('word') class ProductFactory(factory.django.DjangoModelFactory): class Meta: model = 'products.Product' id = factory.Sequence(lambda n: n) category = factory.SubFactory(CategoryFactory) product_family = factory.SubFactory(Product_FamilyFactory) name = factory.Sequence(lambda n: 'Test Product {0}' .format(n)) price = factory.Faker("random_int", min=2, max=1500) active = factory.Faker("boolean", chance_of_getting_true=90)
[ "factory.SubFactory", "factory.Faker", "factory.Sequence", "faker.Faker" ]
[((148, 155), 'faker.Faker', 'Faker', ([], {}), '()\n', (153, 155), False, 'from faker import Faker\n'), ((278, 307), 'factory.Sequence', 'factory.Sequence', (['(lambda n: n)'], {}), '(lambda n: n)\n', (294, 307), False, 'import factory\n'), ((387, 408), 'factory.Faker', 'factory.Faker', (['"""word"""'], {}), "('word')\n", (400, 408), False, 'import factory\n'), ((543, 572), 'factory.Sequence', 'factory.Sequence', (['(lambda n: n)'], {}), '(lambda n: n)\n', (559, 572), False, 'import factory\n'), ((660, 681), 'factory.Faker', 'factory.Faker', (['"""word"""'], {}), "('word')\n", (673, 681), False, 'import factory\n'), ((802, 831), 'factory.Sequence', 'factory.Sequence', (['(lambda n: n)'], {}), '(lambda n: n)\n', (818, 831), False, 'import factory\n'), ((847, 882), 'factory.SubFactory', 'factory.SubFactory', (['CategoryFactory'], {}), '(CategoryFactory)\n', (865, 882), False, 'import factory\n'), ((904, 945), 'factory.SubFactory', 'factory.SubFactory', (['Product_FamilyFactory'], {}), '(Product_FamilyFactory)\n', (922, 945), False, 'import factory\n'), ((1027, 1071), 'factory.Faker', 'factory.Faker', (['"""random_int"""'], {'min': '(2)', 'max': '(1500)'}), "('random_int', min=2, max=1500)\n", (1040, 1071), False, 'import factory\n'), ((1085, 1136), 'factory.Faker', 'factory.Faker', (['"""boolean"""'], {'chance_of_getting_true': '(90)'}), "('boolean', chance_of_getting_true=90)\n", (1098, 1136), False, 'import factory\n')]
from fastapi_utils.api_model import APIModel from tifa.apps.admin.local import g from tifa.apps.admin.router import bp from tifa.models.app import App class TApp(APIModel): id: str name: str @bp.list("/apps", out=TApp, summary="App", tags=["App"]) def apps_list(): ins = g.adal.first_or_404(App) return {"items": ins} @bp.item("/app", out=TApp, summary="App", tags=["App"]) def app_item(): ins = g.adal.first_or_404(App) return {"items": ins} @bp.item("/app/fetch_manifest", out=TApp, summary="App", tags=["App"]) def app_fetch_manifest(): ins = g.adal.first_or_404(App) return {"items": ins} @bp.op("/app/create", out=TApp, summary="App", tags=["App"]) def app_create(): ins = g.adal.first_or_404(App) return {"items": ins} @bp.op("/app/update", out=TApp, summary="App", tags=["App"]) def app_update(): ins = g.adal.first_or_404(App) return {"items": ins} @bp.op("/app/activate", out=TApp, summary="App", tags=["App"]) def app_activate(): ins = g.adal.first_or_404(App) return {"items": ins} @bp.op("/app/deactivate", out=TApp, summary="App", tags=["App"]) def app_deactivate(): ins = g.adal.first_or_404(App) return {"items": ins} @bp.op("/app/delete", out=TApp, summary="App", tags=["App"]) def app_delete(): ins = g.adal.first_or_404(App) return {"items": ins} class TAppInstallation(APIModel): id: str name: str @bp.list( "/app/installations", out=TAppInstallation, summary="AppInstallation", tags=["App"] ) def app_installations(): ins = g.adal.first_or_404(App) return {"items": ins} @bp.item( "/app/installation", out=TAppInstallation, summary="AppInstallation", tags=["App"] ) def app_installation(): ins = g.adal.first_or_404(App) return {"items": ins} @bp.item( "/app/delete_failed_installation", out=TAppInstallation, summary="AppInstallation", tags=["App"], ) def app_delete_failed_installation(): ins = g.adal.first_or_404(App) return {"items": ins} @bp.op("/app/install", out=TApp, summary="App", tags=["App"]) def app_install(): ins = g.adal.first_or_404(App) return {"items": ins} @bp.op("/app/retry_install", out=TApp, summary="App", tags=["App"]) def app_retry_install(): ins = g.adal.first_or_404(App) return {"items": ins} class TAppToken(APIModel): id: str name: str @bp.op("/app/token_create", out=TAppToken, summary="AppInstallation", tags=["App"]) def app_token_create(): ins = g.adal.first_or_404(App) return {"items": ins} @bp.op("/app/token_update", out=TAppToken, summary="AppInstallation", tags=["App"]) def app_token_update(): ins = g.adal.first_or_404(App) return {"items": ins} @bp.op("/app/token_verify", out=TAppToken, summary="AppInstallation", tags=["App"]) def app_token_verify(): ins = g.adal.first_or_404(App) return {"items": ins}
[ "tifa.apps.admin.router.bp.op", "tifa.apps.admin.local.g.adal.first_or_404", "tifa.apps.admin.router.bp.item", "tifa.apps.admin.router.bp.list" ]
[((205, 260), 'tifa.apps.admin.router.bp.list', 'bp.list', (['"""/apps"""'], {'out': 'TApp', 'summary': '"""App"""', 'tags': "['App']"}), "('/apps', out=TApp, summary='App', tags=['App'])\n", (212, 260), False, 'from tifa.apps.admin.router import bp\n'), ((342, 396), 'tifa.apps.admin.router.bp.item', 'bp.item', (['"""/app"""'], {'out': 'TApp', 'summary': '"""App"""', 'tags': "['App']"}), "('/app', out=TApp, summary='App', tags=['App'])\n", (349, 396), False, 'from tifa.apps.admin.router import bp\n'), ((477, 546), 'tifa.apps.admin.router.bp.item', 'bp.item', (['"""/app/fetch_manifest"""'], {'out': 'TApp', 'summary': '"""App"""', 'tags': "['App']"}), "('/app/fetch_manifest', out=TApp, summary='App', tags=['App'])\n", (484, 546), False, 'from tifa.apps.admin.router import bp\n'), ((637, 696), 'tifa.apps.admin.router.bp.op', 'bp.op', (['"""/app/create"""'], {'out': 'TApp', 'summary': '"""App"""', 'tags': "['App']"}), "('/app/create', out=TApp, summary='App', tags=['App'])\n", (642, 696), False, 'from tifa.apps.admin.router import bp\n'), ((779, 838), 'tifa.apps.admin.router.bp.op', 'bp.op', (['"""/app/update"""'], {'out': 'TApp', 'summary': '"""App"""', 'tags': "['App']"}), "('/app/update', out=TApp, summary='App', tags=['App'])\n", (784, 838), False, 'from tifa.apps.admin.router import bp\n'), ((921, 982), 'tifa.apps.admin.router.bp.op', 'bp.op', (['"""/app/activate"""'], {'out': 'TApp', 'summary': '"""App"""', 'tags': "['App']"}), "('/app/activate', out=TApp, summary='App', tags=['App'])\n", (926, 982), False, 'from tifa.apps.admin.router import bp\n'), ((1067, 1130), 'tifa.apps.admin.router.bp.op', 'bp.op', (['"""/app/deactivate"""'], {'out': 'TApp', 'summary': '"""App"""', 'tags': "['App']"}), "('/app/deactivate', out=TApp, summary='App', tags=['App'])\n", (1072, 1130), False, 'from tifa.apps.admin.router import bp\n'), ((1217, 1276), 'tifa.apps.admin.router.bp.op', 'bp.op', (['"""/app/delete"""'], {'out': 'TApp', 'summary': '"""App"""', 'tags': "['App']"}), "('/app/delete', out=TApp, summary='App', tags=['App'])\n", (1222, 1276), False, 'from tifa.apps.admin.router import bp\n'), ((1421, 1518), 'tifa.apps.admin.router.bp.list', 'bp.list', (['"""/app/installations"""'], {'out': 'TAppInstallation', 'summary': '"""AppInstallation"""', 'tags': "['App']"}), "('/app/installations', out=TAppInstallation, summary=\n 'AppInstallation', tags=['App'])\n", (1428, 1518), False, 'from tifa.apps.admin.router import bp\n'), ((1609, 1705), 'tifa.apps.admin.router.bp.item', 'bp.item', (['"""/app/installation"""'], {'out': 'TAppInstallation', 'summary': '"""AppInstallation"""', 'tags': "['App']"}), "('/app/installation', out=TAppInstallation, summary=\n 'AppInstallation', tags=['App'])\n", (1616, 1705), False, 'from tifa.apps.admin.router import bp\n'), ((1795, 1905), 'tifa.apps.admin.router.bp.item', 'bp.item', (['"""/app/delete_failed_installation"""'], {'out': 'TAppInstallation', 'summary': '"""AppInstallation"""', 'tags': "['App']"}), "('/app/delete_failed_installation', out=TAppInstallation, summary=\n 'AppInstallation', tags=['App'])\n", (1802, 1905), False, 'from tifa.apps.admin.router import bp\n'), ((2022, 2082), 'tifa.apps.admin.router.bp.op', 'bp.op', (['"""/app/install"""'], {'out': 'TApp', 'summary': '"""App"""', 'tags': "['App']"}), "('/app/install', out=TApp, summary='App', tags=['App'])\n", (2027, 2082), False, 'from tifa.apps.admin.router import bp\n'), ((2166, 2232), 'tifa.apps.admin.router.bp.op', 'bp.op', (['"""/app/retry_install"""'], {'out': 'TApp', 'summary': '"""App"""', 'tags': "['App']"}), "('/app/retry_install', out=TApp, summary='App', tags=['App'])\n", (2171, 2232), False, 'from tifa.apps.admin.router import bp\n'), ((2377, 2464), 'tifa.apps.admin.router.bp.op', 'bp.op', (['"""/app/token_create"""'], {'out': 'TAppToken', 'summary': '"""AppInstallation"""', 'tags': "['App']"}), "('/app/token_create', out=TAppToken, summary='AppInstallation', tags=[\n 'App'])\n", (2382, 2464), False, 'from tifa.apps.admin.router import bp\n'), ((2548, 2635), 'tifa.apps.admin.router.bp.op', 'bp.op', (['"""/app/token_update"""'], {'out': 'TAppToken', 'summary': '"""AppInstallation"""', 'tags': "['App']"}), "('/app/token_update', out=TAppToken, summary='AppInstallation', tags=[\n 'App'])\n", (2553, 2635), False, 'from tifa.apps.admin.router import bp\n'), ((2719, 2806), 'tifa.apps.admin.router.bp.op', 'bp.op', (['"""/app/token_verify"""'], {'out': 'TAppToken', 'summary': '"""AppInstallation"""', 'tags': "['App']"}), "('/app/token_verify', out=TAppToken, summary='AppInstallation', tags=[\n 'App'])\n", (2724, 2806), False, 'from tifa.apps.admin.router import bp\n'), ((288, 312), 'tifa.apps.admin.local.g.adal.first_or_404', 'g.adal.first_or_404', (['App'], {}), '(App)\n', (307, 312), False, 'from tifa.apps.admin.local import g\n'), ((423, 447), 'tifa.apps.admin.local.g.adal.first_or_404', 'g.adal.first_or_404', (['App'], {}), '(App)\n', (442, 447), False, 'from tifa.apps.admin.local import g\n'), ((583, 607), 'tifa.apps.admin.local.g.adal.first_or_404', 'g.adal.first_or_404', (['App'], {}), '(App)\n', (602, 607), False, 'from tifa.apps.admin.local import g\n'), ((725, 749), 'tifa.apps.admin.local.g.adal.first_or_404', 'g.adal.first_or_404', (['App'], {}), '(App)\n', (744, 749), False, 'from tifa.apps.admin.local import g\n'), ((867, 891), 'tifa.apps.admin.local.g.adal.first_or_404', 'g.adal.first_or_404', (['App'], {}), '(App)\n', (886, 891), False, 'from tifa.apps.admin.local import g\n'), ((1013, 1037), 'tifa.apps.admin.local.g.adal.first_or_404', 'g.adal.first_or_404', (['App'], {}), '(App)\n', (1032, 1037), False, 'from tifa.apps.admin.local import g\n'), ((1163, 1187), 'tifa.apps.admin.local.g.adal.first_or_404', 'g.adal.first_or_404', (['App'], {}), '(App)\n', (1182, 1187), False, 'from tifa.apps.admin.local import g\n'), ((1305, 1329), 'tifa.apps.admin.local.g.adal.first_or_404', 'g.adal.first_or_404', (['App'], {}), '(App)\n', (1324, 1329), False, 'from tifa.apps.admin.local import g\n'), ((1555, 1579), 'tifa.apps.admin.local.g.adal.first_or_404', 'g.adal.first_or_404', (['App'], {}), '(App)\n', (1574, 1579), False, 'from tifa.apps.admin.local import g\n'), ((1741, 1765), 'tifa.apps.admin.local.g.adal.first_or_404', 'g.adal.first_or_404', (['App'], {}), '(App)\n', (1760, 1765), False, 'from tifa.apps.admin.local import g\n'), ((1968, 1992), 'tifa.apps.admin.local.g.adal.first_or_404', 'g.adal.first_or_404', (['App'], {}), '(App)\n', (1987, 1992), False, 'from tifa.apps.admin.local import g\n'), ((2112, 2136), 'tifa.apps.admin.local.g.adal.first_or_404', 'g.adal.first_or_404', (['App'], {}), '(App)\n', (2131, 2136), False, 'from tifa.apps.admin.local import g\n'), ((2268, 2292), 'tifa.apps.admin.local.g.adal.first_or_404', 'g.adal.first_or_404', (['App'], {}), '(App)\n', (2287, 2292), False, 'from tifa.apps.admin.local import g\n'), ((2494, 2518), 'tifa.apps.admin.local.g.adal.first_or_404', 'g.adal.first_or_404', (['App'], {}), '(App)\n', (2513, 2518), False, 'from tifa.apps.admin.local import g\n'), ((2665, 2689), 'tifa.apps.admin.local.g.adal.first_or_404', 'g.adal.first_or_404', (['App'], {}), '(App)\n', (2684, 2689), False, 'from tifa.apps.admin.local import g\n'), ((2836, 2860), 'tifa.apps.admin.local.g.adal.first_or_404', 'g.adal.first_or_404', (['App'], {}), '(App)\n', (2855, 2860), False, 'from tifa.apps.admin.local import g\n')]
# -*- coding: utf-8 -*- from cadnano.util import to_dot_path pp = to_dot_path(__file__) PathNucleicAcidPartItemT = pp + '.nucleicacidpartitem.PathNucleicAcidPartItem' PathVirtualHelixItemT = pp + 'virtualhelixitem.PathVirtualHelixItem' PathStrandItemT = pp + 'strand.stranditem.StrandItem' PathEndpointItemT = pp + 'strand.endpointitem.EndpointItem' PathXoverItemT = pp + 'strand.xoveritem.XoverItem' PathRootItemT = pp + 'pathrootitem.PathRootItem' PathToolManagerT = pp + 'tools.pathtoolmanager.PathToolManager' AbstractPathToolT = pp + 'tools.abstractpathtool.AbstractPathTool' PreXoverManagerT = pp + '.prexovermanager.PreXoverManager'
[ "cadnano.util.to_dot_path" ]
[((66, 87), 'cadnano.util.to_dot_path', 'to_dot_path', (['__file__'], {}), '(__file__)\n', (77, 87), False, 'from cadnano.util import to_dot_path\n')]
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ''' Initialize the global configuration for the translator ''' import os from translator.conf.config import ConfigProvider CONF_FILENAME = 'translator.conf' def init_global_conf(): '''Initialize the configuration provider. Allows the configuration to be shared throughout the translator code. The file used is translator.conf, and is within the conf/ directory. It is a standard ini format, and is prcessed using the ConfigParser module. ''' conf_path = os.path.dirname(os.path.abspath(__file__)) conf_file = os.path.join(conf_path, CONF_FILENAME) ConfigProvider._load_config(conf_file) init_global_conf()
[ "os.path.abspath", "os.path.join", "translator.conf.config.ConfigProvider._load_config" ]
[((1087, 1125), 'os.path.join', 'os.path.join', (['conf_path', 'CONF_FILENAME'], {}), '(conf_path, CONF_FILENAME)\n', (1099, 1125), False, 'import os\n'), ((1130, 1168), 'translator.conf.config.ConfigProvider._load_config', 'ConfigProvider._load_config', (['conf_file'], {}), '(conf_file)\n', (1157, 1168), False, 'from translator.conf.config import ConfigProvider\n'), ((1044, 1069), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1059, 1069), False, 'import os\n')]
import json import os from contextlib import contextmanager import pytest from text_normalizer.convert import text2int, ord_unfold, is_ordfold, MONTHS, month2num from text_normalizer.convert._convert import _numerics from ..settings import TESTS_PATH with open(os.path.join(TESTS_PATH, 'convert/data/numerics_ds.json'), encoding='utf=8') as f: nums_dataset = json.load(f) @contextmanager def does_not_raise(): yield @pytest.mark.parametrize('text, num', _numerics.items()) def test_text2int_single(text, num): nums_list = text.split() assert text2int(*nums_list) == num[0] @pytest.mark.parametrize('num, text', nums_dataset.items()) def _test_text2int_dataset(text, num): nums_list = text.split() assert text2int(*nums_list) == int(num) @pytest.mark.parametrize('text, num', [ ("десяток", 10), ("два десяток", 20), ("пять десяток", 50), ("сотня", 100), ("две сотня", 200), ("", 0), ]) def test_text2int_custom(text, num): nums_list = text.split() assert text2int(*nums_list) == num @pytest.mark.parametrize('text, num, expected', [ ('двести', 200, does_not_raise()), ('пять десятков', 50, pytest.raises(ValueError)), ('сто один двадцать', 0, pytest.raises(ValueError)), ('десяток двоек', 0, pytest.raises(ValueError)), ('тысяча миллион', 0, pytest.raises(ValueError)), ('абвгд', 0, pytest.raises(ValueError)), ]) def test_text2int_raises(text, num, expected): nums_list = text.split() with expected: assert text2int(*nums_list) == num @pytest.mark.parametrize('inp, outp', [ ('10-ый', True), ('10-го', True), ('10-му', True), ('10-го', True), ('10-ым', True), ('10-м', True), ('10-ом', True), ('10', False), ]) def test_isordfold(inp, outp): assert is_ordfold(inp) == outp @pytest.mark.parametrize('inp, outp, expected', [ ('10-ый', '10', does_not_raise()), ('10-го', '10', does_not_raise()), ('10-му', '10', does_not_raise()), ('10-го', '10', does_not_raise()), ('10-ым', '10', does_not_raise()), ('10-м', '10', does_not_raise()), ('10-ом', '10', does_not_raise()), ('10ый', '10', does_not_raise()), ('10го', '10', does_not_raise()), ('10му', '10', does_not_raise()), ('10го', '10', does_not_raise()), ('10ым', '10', does_not_raise()), ('10м', '10', does_not_raise()), ('10ом', '10', does_not_raise()), ("", '', pytest.raises(ValueError)), ("asdc", '', pytest.raises(ValueError)), ]) def test_ord_unfold(inp, outp, expected): with expected: assert ord_unfold(inp) == outp @pytest.mark.parametrize('inp, outp', zip(MONTHS, range(1, 13)), ids=MONTHS) def test_month2num(inp, outp): assert month2num(inp) == outp
[ "json.load", "text_normalizer.convert.month2num", "text_normalizer.convert.ord_unfold", "text_normalizer.convert._convert._numerics.items", "pytest.raises", "text_normalizer.convert.is_ordfold", "pytest.mark.parametrize", "text_normalizer.convert.text2int", "os.path.join" ]
[((773, 920), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""text, num"""', "[('десяток', 10), ('два десяток', 20), ('пять десяток', 50), ('сотня', 100),\n ('две сотня', 200), ('', 0)]"], {}), "('text, num', [('десяток', 10), ('два десяток', 20),\n ('пять десяток', 50), ('сотня', 100), ('две сотня', 200), ('', 0)])\n", (796, 920), False, 'import pytest\n'), ((1548, 1729), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inp, outp"""', "[('10-ый', True), ('10-го', True), ('10-му', True), ('10-го', True), (\n '10-ым', True), ('10-м', True), ('10-ом', True), ('10', False)]"], {}), "('inp, outp', [('10-ый', True), ('10-го', True), (\n '10-му', True), ('10-го', True), ('10-ым', True), ('10-м', True), (\n '10-ом', True), ('10', False)])\n", (1571, 1729), False, 'import pytest\n'), ((366, 378), 'json.load', 'json.load', (['f'], {}), '(f)\n', (375, 378), False, 'import json\n'), ((469, 486), 'text_normalizer.convert._convert._numerics.items', '_numerics.items', ([], {}), '()\n', (484, 486), False, 'from text_normalizer.convert._convert import _numerics\n'), ((264, 321), 'os.path.join', 'os.path.join', (['TESTS_PATH', '"""convert/data/numerics_ds.json"""'], {}), "(TESTS_PATH, 'convert/data/numerics_ds.json')\n", (276, 321), False, 'import os\n'), ((565, 585), 'text_normalizer.convert.text2int', 'text2int', (['*nums_list'], {}), '(*nums_list)\n', (573, 585), False, 'from text_normalizer.convert import text2int, ord_unfold, is_ordfold, MONTHS, month2num\n'), ((737, 757), 'text_normalizer.convert.text2int', 'text2int', (['*nums_list'], {}), '(*nums_list)\n', (745, 757), False, 'from text_normalizer.convert import text2int, ord_unfold, is_ordfold, MONTHS, month2num\n'), ((1021, 1041), 'text_normalizer.convert.text2int', 'text2int', (['*nums_list'], {}), '(*nums_list)\n', (1029, 1041), False, 'from text_normalizer.convert import text2int, ord_unfold, is_ordfold, MONTHS, month2num\n'), ((1797, 1812), 'text_normalizer.convert.is_ordfold', 'is_ordfold', (['inp'], {}), '(inp)\n', (1807, 1812), False, 'from text_normalizer.convert import text2int, ord_unfold, is_ordfold, MONTHS, month2num\n'), ((2720, 2734), 'text_normalizer.convert.month2num', 'month2num', (['inp'], {}), '(inp)\n', (2729, 2734), False, 'from text_normalizer.convert import text2int, ord_unfold, is_ordfold, MONTHS, month2num\n'), ((1517, 1537), 'text_normalizer.convert.text2int', 'text2int', (['*nums_list'], {}), '(*nums_list)\n', (1525, 1537), False, 'from text_normalizer.convert import text2int, ord_unfold, is_ordfold, MONTHS, month2num\n'), ((1178, 1203), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1191, 1203), False, 'import pytest\n'), ((1238, 1263), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1251, 1263), False, 'import pytest\n'), ((1288, 1313), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1301, 1313), False, 'import pytest\n'), ((1343, 1368), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1356, 1368), False, 'import pytest\n'), ((1380, 1405), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1393, 1405), False, 'import pytest\n'), ((2575, 2590), 'text_normalizer.convert.ord_unfold', 'ord_unfold', (['inp'], {}), '(inp)\n', (2585, 2590), False, 'from text_normalizer.convert import text2int, ord_unfold, is_ordfold, MONTHS, month2num\n'), ((2423, 2448), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2436, 2448), False, 'import pytest\n'), ((2468, 2493), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2481, 2493), False, 'import pytest\n')]
############################################################################## # Copyright (c) 2017 ZTE Corporation and others. # # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 ############################################################################## from oslo_config import cfg from oslo_utils import importutils OPTS = [ cfg.StrOpt('type', default='sample', choices=['sample'], help='the component of doctor consumer', required=True), cfg.StrOpt('ip', default='127.0.0.1', help='the ip of consumer', required=True), cfg.IntOpt('port', default='12346', help='the port of doctor consumer', required=True), ] _consumer_name_class_mapping = { 'sample': 'doctor_tests.consumer.sample.SampleConsumer' } def get_consumer(conf, log): consumer_class = _consumer_name_class_mapping.get(conf.consumer.type) return importutils.import_object(consumer_class, conf, log)
[ "oslo_config.cfg.StrOpt", "oslo_utils.importutils.import_object", "oslo_config.cfg.IntOpt" ]
[((530, 647), 'oslo_config.cfg.StrOpt', 'cfg.StrOpt', (['"""type"""'], {'default': '"""sample"""', 'choices': "['sample']", 'help': '"""the component of doctor consumer"""', 'required': '(True)'}), "('type', default='sample', choices=['sample'], help=\n 'the component of doctor consumer', required=True)\n", (540, 647), False, 'from oslo_config import cfg\n'), ((708, 787), 'oslo_config.cfg.StrOpt', 'cfg.StrOpt', (['"""ip"""'], {'default': '"""127.0.0.1"""', 'help': '"""the ip of consumer"""', 'required': '(True)'}), "('ip', default='127.0.0.1', help='the ip of consumer', required=True)\n", (718, 787), False, 'from oslo_config import cfg\n'), ((838, 928), 'oslo_config.cfg.IntOpt', 'cfg.IntOpt', (['"""port"""'], {'default': '"""12346"""', 'help': '"""the port of doctor consumer"""', 'required': '(True)'}), "('port', default='12346', help='the port of doctor consumer',\n required=True)\n", (848, 928), False, 'from oslo_config import cfg\n'), ((1186, 1238), 'oslo_utils.importutils.import_object', 'importutils.import_object', (['consumer_class', 'conf', 'log'], {}), '(consumer_class, conf, log)\n', (1211, 1238), False, 'from oslo_utils import importutils\n')]
from metaflow import FlowSpec, step, Flow, Parameter, JSONType class ClassifierPredictFlow(FlowSpec): vector = Parameter('vector', type=JSONType, required=True) @step def start(self): run = Flow('ClassifierTrainFlow').latest_run self.train_run_id = run.pathspec self.model = run['end'].task.data.model print("Input vector", self.vector) self.next(self.end) @step def end(self): print('Model', self.model) if __name__ == '__main__': ClassifierPredictFlow()
[ "metaflow.Flow", "metaflow.Parameter" ]
[((117, 166), 'metaflow.Parameter', 'Parameter', (['"""vector"""'], {'type': 'JSONType', 'required': '(True)'}), "('vector', type=JSONType, required=True)\n", (126, 166), False, 'from metaflow import FlowSpec, step, Flow, Parameter, JSONType\n'), ((213, 240), 'metaflow.Flow', 'Flow', (['"""ClassifierTrainFlow"""'], {}), "('ClassifierTrainFlow')\n", (217, 240), False, 'from metaflow import FlowSpec, step, Flow, Parameter, JSONType\n')]
from models import PatchCore from save_utils import saveModelPath import numpy import torch import warnings from torch import tensor from torchvision import transforms import json import numpy from PIL import Image, ImageFilter import os from torch.utils.data import DataLoader,TensorDataset warnings.filterwarnings("ignore") class train_patchcore(): def __init__(self,configPath,train_imgs_folder, resize=None,center_crop=None, f_coreset=.20,backbone_name="wide_resnet50_2", TimeStamp=None): self.configPath=configPath self.train_imgs_folder=train_imgs_folder self.resize=resize self.center_crop=center_crop self.f_coreset=f_coreset self.backbone_name=backbone_name self.TimeStamp=TimeStamp with open(configPath) as json_file: self.data = json.load(json_file) self.model=PatchCore( f_coreset=f_coreset, backbone_name=backbone_name, ) self.train_tar,self.train_path,self.model_path=saveModelPath(self.configPath,self.TimeStamp) IMAGENET_MEAN = tensor([.485, .456, .406]) IMAGENET_STD = tensor([.229, .224, .225]) transfoms_paras = [ transforms.ToTensor(), transforms.Normalize(IMAGENET_MEAN, IMAGENET_STD), ] if resize!=None: transfoms_paras.append(transforms.Resize(self.resize, interpolation=transforms.InterpolationMode.BICUBIC)) if center_crop!=None: transfoms_paras.append(transforms.CenterCrop(center_crop)) if self.data!=None: self.scaling_factor=self.data['scaling_factor'] self.median_blur_size=self.data['smooth'] if self.scaling_factor!=1: width = int(self.data['original_imgsz'][0]*self.scaling_factor) height = int(self.data['original_imgsz'][1]*self.scaling_factor) self.resize=[height,width] transfoms_paras.append(transforms.Resize(self.resize, interpolation=transforms.InterpolationMode.BICUBIC)) self.loader=transforms.Compose(transfoms_paras) def genTrainDS(self): train_ims = [] train_labels = [] for img_id in self.data['train_ids']: img_path = os.path.join(self.train_imgs_folder, img_id) train_im = Image.open(img_path).convert('RGB') if self.median_blur_size!=0: train_im = train_im.filter(ImageFilter.MedianFilter(size=self.median_blur_size)) print ('Applying median filter on training image with degree of '+ str(self.median_blur_size)) train_im = self.loader(train_im) train_label = tensor([0]) train_ims.append(train_im.numpy()) train_labels.append(train_label.numpy()) train_ims = numpy.array(train_ims) train_labels = numpy.array(train_labels) print ('Training Tensor Shape is' + str(train_ims.shape)) train_ims = torch.from_numpy(train_ims) train_labels = torch.from_numpy(train_labels) train_data = TensorDataset(train_ims,train_labels) train_ds = DataLoader(train_data) return train_ds def saveTrainConfig(self): self.data['configPath'] = self.configPath self.data['imgsz'] = self.resize self.data['center_crop'] = self.center_crop self.data['scaling_factor'] = self.scaling_factor self.data['train_imgs_folder'] = self.train_imgs_folder self.data['backbone_name'] = self.backbone_name self.data['TimeStamp'] = self.TimeStamp json_string = json.dumps(self.data) json_filePath = os.path.join(self.model_path,'training_config.json') with open(json_filePath, 'w') as outfile: outfile.write(json_string) def run(self): train_ds = self.genTrainDS() tobesaved = self.model.fit(train_ds) torch.save(tobesaved, self.train_tar) torch.save(self.model.state_dict(), self.train_path) self.saveTrainConfig()
[ "json.dumps", "torch.utils.data.TensorDataset", "torchvision.transforms.Normalize", "os.path.join", "torch.utils.data.DataLoader", "torchvision.transforms.Compose", "torchvision.transforms.CenterCrop", "PIL.ImageFilter.MedianFilter", "torch.from_numpy", "torchvision.transforms.Resize", "save_utils.saveModelPath", "json.load", "warnings.filterwarnings", "PIL.Image.open", "torch.save", "numpy.array", "models.PatchCore", "torch.tensor", "torchvision.transforms.ToTensor" ]
[((295, 328), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (318, 328), False, 'import warnings\n'), ((923, 982), 'models.PatchCore', 'PatchCore', ([], {'f_coreset': 'f_coreset', 'backbone_name': 'backbone_name'}), '(f_coreset=f_coreset, backbone_name=backbone_name)\n', (932, 982), False, 'from models import PatchCore\n'), ((1099, 1145), 'save_utils.saveModelPath', 'saveModelPath', (['self.configPath', 'self.TimeStamp'], {}), '(self.configPath, self.TimeStamp)\n', (1112, 1145), False, 'from save_utils import saveModelPath\n'), ((1170, 1199), 'torch.tensor', 'tensor', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (1176, 1199), False, 'from torch import tensor\n'), ((1220, 1249), 'torch.tensor', 'tensor', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (1226, 1249), False, 'from torch import tensor\n'), ((2202, 2237), 'torchvision.transforms.Compose', 'transforms.Compose', (['transfoms_paras'], {}), '(transfoms_paras)\n', (2220, 2237), False, 'from torchvision import transforms\n'), ((2964, 2986), 'numpy.array', 'numpy.array', (['train_ims'], {}), '(train_ims)\n', (2975, 2986), False, 'import numpy\n'), ((3010, 3035), 'numpy.array', 'numpy.array', (['train_labels'], {}), '(train_labels)\n', (3021, 3035), False, 'import numpy\n'), ((3123, 3150), 'torch.from_numpy', 'torch.from_numpy', (['train_ims'], {}), '(train_ims)\n', (3139, 3150), False, 'import torch\n'), ((3174, 3204), 'torch.from_numpy', 'torch.from_numpy', (['train_labels'], {}), '(train_labels)\n', (3190, 3204), False, 'import torch\n'), ((3226, 3264), 'torch.utils.data.TensorDataset', 'TensorDataset', (['train_ims', 'train_labels'], {}), '(train_ims, train_labels)\n', (3239, 3264), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((3283, 3305), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {}), '(train_data)\n', (3293, 3305), False, 'from torch.utils.data import DataLoader, TensorDataset\n'), ((3755, 3776), 'json.dumps', 'json.dumps', (['self.data'], {}), '(self.data)\n', (3765, 3776), False, 'import json\n'), ((3801, 3854), 'os.path.join', 'os.path.join', (['self.model_path', '"""training_config.json"""'], {}), "(self.model_path, 'training_config.json')\n", (3813, 3854), False, 'import os\n'), ((4057, 4094), 'torch.save', 'torch.save', (['tobesaved', 'self.train_tar'], {}), '(tobesaved, self.train_tar)\n', (4067, 4094), False, 'import torch\n'), ((882, 902), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (891, 902), False, 'import json\n'), ((1299, 1320), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1318, 1320), False, 'from torchvision import transforms\n'), ((1346, 1395), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['IMAGENET_MEAN', 'IMAGENET_STD'], {}), '(IMAGENET_MEAN, IMAGENET_STD)\n', (1366, 1395), False, 'from torchvision import transforms\n'), ((2392, 2436), 'os.path.join', 'os.path.join', (['self.train_imgs_folder', 'img_id'], {}), '(self.train_imgs_folder, img_id)\n', (2404, 2436), False, 'import os\n'), ((2830, 2841), 'torch.tensor', 'tensor', (['[0]'], {}), '([0])\n', (2836, 2841), False, 'from torch import tensor\n'), ((1479, 1566), 'torchvision.transforms.Resize', 'transforms.Resize', (['self.resize'], {'interpolation': 'transforms.InterpolationMode.BICUBIC'}), '(self.resize, interpolation=transforms.InterpolationMode.\n BICUBIC)\n', (1496, 1566), False, 'from torchvision import transforms\n'), ((1628, 1662), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['center_crop'], {}), '(center_crop)\n', (1649, 1662), False, 'from torchvision import transforms\n'), ((2097, 2184), 'torchvision.transforms.Resize', 'transforms.Resize', (['self.resize'], {'interpolation': 'transforms.InterpolationMode.BICUBIC'}), '(self.resize, interpolation=transforms.InterpolationMode.\n BICUBIC)\n', (2114, 2184), False, 'from torchvision import transforms\n'), ((2460, 2480), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (2470, 2480), False, 'from PIL import Image, ImageFilter\n'), ((2593, 2645), 'PIL.ImageFilter.MedianFilter', 'ImageFilter.MedianFilter', ([], {'size': 'self.median_blur_size'}), '(size=self.median_blur_size)\n', (2617, 2645), False, 'from PIL import Image, ImageFilter\n')]
# Generated by Django 4.0.1 on 2022-01-13 19:25 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import django.db.models.manager import user.models class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('user', '0010_post_interacted'), ] operations = [ migrations.AlterField( model_name='post', name='interacted', field=models.DateTimeField(auto_now_add=True, null=True), ), migrations.CreateModel( name='PlusOne', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('targetType', models.IntegerField(choices=[(1, 'Comment'), (2, 'Post')])), ('targetComment', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='user.comment')), ('targetPost', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='user.post')), ('user', models.ForeignKey(on_delete=models.SET(user.models.get_sentinel_user), to=settings.AUTH_USER_MODEL)), ], managers=[ ('plus_ones', django.db.models.manager.Manager()), ], ), ]
[ "django.db.migrations.swappable_dependency", "django.db.models.ForeignKey", "django.db.models.AutoField", "django.db.models.IntegerField", "django.db.models.SET", "django.db.models.DateTimeField" ]
[((278, 335), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (309, 335), False, 'from django.db import migrations, models\n'), ((516, 566), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'null': '(True)'}), '(auto_now_add=True, null=True)\n', (536, 566), False, 'from django.db import migrations, models\n'), ((683, 776), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (699, 776), False, 'from django.db import migrations, models\n'), ((806, 864), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'choices': "[(1, 'Comment'), (2, 'Post')]"}), "(choices=[(1, 'Comment'), (2, 'Post')])\n", (825, 864), False, 'from django.db import migrations, models\n'), ((901, 1027), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'default': 'None', 'null': '(True)', 'on_delete': 'django.db.models.deletion.DO_NOTHING', 'to': '"""user.comment"""'}), "(blank=True, default=None, null=True, on_delete=django.db.\n models.deletion.DO_NOTHING, to='user.comment')\n", (918, 1027), False, 'from django.db import migrations, models\n'), ((1056, 1179), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'default': 'None', 'null': '(True)', 'on_delete': 'django.db.models.deletion.DO_NOTHING', 'to': '"""user.post"""'}), "(blank=True, default=None, null=True, on_delete=django.db.\n models.deletion.DO_NOTHING, to='user.post')\n", (1073, 1179), False, 'from django.db import migrations, models\n'), ((1230, 1271), 'django.db.models.SET', 'models.SET', (['user.models.get_sentinel_user'], {}), '(user.models.get_sentinel_user)\n', (1240, 1271), False, 'from django.db import migrations, models\n')]
"""Serializer classes for DNSaaS API""" import ipaddress from django.conf import settings from django.contrib.auth import get_user_model from powerdns.utils import find_domain_for_record from powerdns.models import ( RECORD_A_TYPES, CryptoKey, Domain, DomainMetadata, DomainTemplate, Record, RecordRequest, RecordTemplate, RequestStates, Service, SuperMaster, TsigKey, ) from rest_framework import serializers from rest_framework.serializers import ( PrimaryKeyRelatedField, ReadOnlyField, ModelSerializer, SlugRelatedField, ) class OwnerSerializer(ModelSerializer): owner = SlugRelatedField( slug_field='username', queryset=get_user_model().objects.all(), allow_null=True, required=False, ) class DomainSerializer(OwnerSerializer): id = ReadOnlyField() service_name = serializers.SerializerMethodField() def get_service_name(self, obj): return obj.service.name if obj.service else '' class Meta: model = Domain read_only_fields = ('notified_serial',) class ServiceSerializer(ModelSerializer): class Meta: model = Service class RecordRequestSerializer(OwnerSerializer): last_change = serializers.SerializerMethodField() target_owner = SlugRelatedField( slug_field='username', queryset=get_user_model().objects.all(), allow_null=True, required=False, ) created = serializers.DateTimeField( format='%Y-%m-%d %H:%M:%S', read_only=True ) modified = serializers.DateTimeField( format='%Y-%m-%d %H:%M:%S', read_only=True ) class Meta: model = RecordRequest def get_last_change(self, obj): if obj.state == RequestStates.OPEN: return obj._get_json_history(obj.get_object()) else: return obj.last_change_json def _trim_whitespace(data_dict, trim_fields): for field_name in trim_fields: if field_name not in data_dict: continue data_dict[field_name] = data_dict[field_name].strip() return data_dict class RecordSerializer(OwnerSerializer): class Meta: model = Record read_only_fields = ('change_date', 'ordername',) domain = PrimaryKeyRelatedField( queryset=Domain.objects.all(), required=False, allow_null=True, ) service = PrimaryKeyRelatedField( queryset=Service.objects.all(), required=False, allow_null=True, # required by setting REQUIRED_SERVICE_FIELD ) service_uid = SlugRelatedField( slug_field='uid', source='service', queryset=Service.objects.all(), allow_null=True, required=False, many=False, read_only=False, # required by setting REQUIRED_SERVICE_FIELD ) service_name = serializers.SerializerMethodField() modified = serializers.DateTimeField( format='%Y-%m-%d %H:%M:%S', read_only=True ) change_request = serializers.SerializerMethodField( 'get_change_record_request' ) delete_request = serializers.SerializerMethodField( 'get_delete_record_request' ) unrestricted_domain = serializers.BooleanField( source='domain.unrestricted', read_only=True ) def is_valid(self, raise_exception=False): if ( 'service_uid' in self.initial_data and not self.initial_data['service_uid'] ): del self.initial_data['service_uid'] return super(RecordSerializer, self).is_valid(raise_exception) def get_service_name(self, obj): return obj.service.name if obj.service else '' def get_change_record_request(self, record): record_request = record.requests.all() if record_request: return record_request[0].key return None def get_delete_record_request(self, record): delete_request = record.delete_request.all() if delete_request: return delete_request[0].key return None def _clean_txt_content(self, record_type, attrs): """ Remove backslashes form `content` (from `attrs`) inplace when `type`=TXT """ # DNS servers don't accept backslashes (\) in content so we neither if record_type == 'TXT': attrs['content'] = attrs['content'].replace('\\', '') def _ensure_owner_is_set(self): if self.instance and not self.instance.has_owner(): raise serializers.ValidationError({ 'owner': [ 'Record requires owner to be editable. Please contact DNS support.' # noqa ] }) def _validate_service(self, attrs): if not settings.REQUIRED_SERVICE_FIELD or self.instance: return if 'service' not in attrs: raise serializers.ValidationError({ 'service': [ 'Service is required. Please provide DNSaaS internal ' 'service ID in field `service` or global service UID in ' 'field `service_uid`.' ] }) def validate(self, attrs): self._ensure_owner_is_set() _trim_whitespace(attrs, ['name', 'content']) domain, content, record_type = ( attrs.get('domain'), attrs.get('content'), attrs.get('type') ) if record_type in RECORD_A_TYPES: try: ipaddress.ip_address(content) except ValueError: raise serializers.ValidationError({ 'content': ['Content should be valid IP address'] }) self._clean_txt_content(record_type, attrs) if ( domain and domain.template and domain.template.is_public_domain and content and record_type == 'A' ): address = ipaddress.ip_address(content) if address.is_private: raise serializers.ValidationError( {'content': ['IP address cannot be private.']} ) if not self.instance: # get domain from name only for creation if not domain: domain = find_domain_for_record(attrs['name']) if not domain: raise serializers.ValidationError({ 'domain': [ 'No domain found for name {}'.format( attrs['name'] ) ] }) attrs['domain'] = domain self._validate_service(attrs) return attrs class CryptoKeySerializer(ModelSerializer): class Meta: model = CryptoKey class DomainMetadataSerializer(ModelSerializer): class Meta: model = DomainMetadata class SuperMasterSerializer(ModelSerializer): class Meta: model = SuperMaster class DomainTemplateSerializer(ModelSerializer): class Meta: model = DomainTemplate class RecordTemplateSerializer(ModelSerializer): class Meta: model = RecordTemplate class TsigKeysTemplateSerializer(ModelSerializer): class Meta: model = TsigKey
[ "powerdns.utils.find_domain_for_record", "rest_framework.serializers.SerializerMethodField", "powerdns.models.Service.objects.all", "powerdns.models.Domain.objects.all", "django.contrib.auth.get_user_model", "rest_framework.serializers.ReadOnlyField", "ipaddress.ip_address", "rest_framework.serializers.BooleanField", "rest_framework.serializers.DateTimeField", "rest_framework.serializers.ValidationError" ]
[((853, 868), 'rest_framework.serializers.ReadOnlyField', 'ReadOnlyField', ([], {}), '()\n', (866, 868), False, 'from rest_framework.serializers import PrimaryKeyRelatedField, ReadOnlyField, ModelSerializer, SlugRelatedField\n'), ((888, 923), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (921, 923), False, 'from rest_framework import serializers\n'), ((1258, 1293), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (1291, 1293), False, 'from rest_framework import serializers\n'), ((1481, 1550), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'format': '"""%Y-%m-%d %H:%M:%S"""', 'read_only': '(True)'}), "(format='%Y-%m-%d %H:%M:%S', read_only=True)\n", (1506, 1550), False, 'from rest_framework import serializers\n'), ((1580, 1649), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'format': '"""%Y-%m-%d %H:%M:%S"""', 'read_only': '(True)'}), "(format='%Y-%m-%d %H:%M:%S', read_only=True)\n", (1605, 1649), False, 'from rest_framework import serializers\n'), ((2890, 2925), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (2923, 2925), False, 'from rest_framework import serializers\n'), ((2941, 3010), 'rest_framework.serializers.DateTimeField', 'serializers.DateTimeField', ([], {'format': '"""%Y-%m-%d %H:%M:%S"""', 'read_only': '(True)'}), "(format='%Y-%m-%d %H:%M:%S', read_only=True)\n", (2966, 3010), False, 'from rest_framework import serializers\n'), ((3046, 3108), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', (['"""get_change_record_request"""'], {}), "('get_change_record_request')\n", (3079, 3108), False, 'from rest_framework import serializers\n'), ((3144, 3206), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', (['"""get_delete_record_request"""'], {}), "('get_delete_record_request')\n", (3177, 3206), False, 'from rest_framework import serializers\n'), ((3247, 3317), 'rest_framework.serializers.BooleanField', 'serializers.BooleanField', ([], {'source': '"""domain.unrestricted"""', 'read_only': '(True)'}), "(source='domain.unrestricted', read_only=True)\n", (3271, 3317), False, 'from rest_framework import serializers\n'), ((2327, 2347), 'powerdns.models.Domain.objects.all', 'Domain.objects.all', ([], {}), '()\n', (2345, 2347), False, 'from powerdns.models import RECORD_A_TYPES, CryptoKey, Domain, DomainMetadata, DomainTemplate, Record, RecordRequest, RecordTemplate, RequestStates, Service, SuperMaster, TsigKey\n'), ((2459, 2480), 'powerdns.models.Service.objects.all', 'Service.objects.all', ([], {}), '()\n', (2478, 2480), False, 'from powerdns.models import RECORD_A_TYPES, CryptoKey, Domain, DomainMetadata, DomainTemplate, Record, RecordRequest, RecordTemplate, RequestStates, Service, SuperMaster, TsigKey\n'), ((2695, 2716), 'powerdns.models.Service.objects.all', 'Service.objects.all', ([], {}), '()\n', (2714, 2716), False, 'from powerdns.models import RECORD_A_TYPES, CryptoKey, Domain, DomainMetadata, DomainTemplate, Record, RecordRequest, RecordTemplate, RequestStates, Service, SuperMaster, TsigKey\n'), ((4551, 4665), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (["{'owner': ['Record requires owner to be editable. Please contact DNS support.']\n }"], {}), "({'owner': [\n 'Record requires owner to be editable. Please contact DNS support.']})\n", (4578, 4665), False, 'from rest_framework import serializers\n'), ((4915, 5098), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (["{'service': [\n 'Service is required. Please provide DNSaaS internal service ID in field `service` or global service UID in field `service_uid`.'\n ]}"], {}), "({'service': [\n 'Service is required. Please provide DNSaaS internal service ID in field `service` or global service UID in field `service_uid`.'\n ]})\n", (4942, 5098), False, 'from rest_framework import serializers\n'), ((5961, 5990), 'ipaddress.ip_address', 'ipaddress.ip_address', (['content'], {}), '(content)\n', (5981, 5990), False, 'import ipaddress\n'), ((5524, 5553), 'ipaddress.ip_address', 'ipaddress.ip_address', (['content'], {}), '(content)\n', (5544, 5553), False, 'import ipaddress\n'), ((6048, 6123), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (["{'content': ['IP address cannot be private.']}"], {}), "({'content': ['IP address cannot be private.']})\n", (6075, 6123), False, 'from rest_framework import serializers\n'), ((6298, 6335), 'powerdns.utils.find_domain_for_record', 'find_domain_for_record', (["attrs['name']"], {}), "(attrs['name'])\n", (6320, 6335), False, 'from powerdns.utils import find_domain_for_record\n'), ((5607, 5692), 'rest_framework.serializers.ValidationError', 'serializers.ValidationError', (["{'content': ['Content should be valid IP address']}"], {}), "({'content': ['Content should be valid IP address']}\n )\n", (5634, 5692), False, 'from rest_framework import serializers\n'), ((713, 729), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (727, 729), False, 'from django.contrib.auth import get_user_model\n'), ((1379, 1395), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1393, 1395), False, 'from django.contrib.auth import get_user_model\n')]
import graphene from graphene_django.types import DjangoObjectType from cask.accounts.models import Follower from cask.utils import optimize_queryset from .models import CheckIn class CheckInScope(graphene.Enum): class Meta: name = "CheckInScope" public = "public" friends = "friends" class CheckInNode(DjangoObjectType): class Meta: model = CheckIn name = "CheckIn" class Query(object): checkins = graphene.List( CheckInNode, id=graphene.UUID(), scope=graphene.Argument(CheckInScope), created_by=graphene.UUID(), ) def resolve_checkins( self, info, id: str = None, scope: str = None, created_by: str = None ): user = info.context.user qs = CheckIn.objects.all() if id: qs = qs.filter(id=id) if scope == "friends": if not user.is_authenticated: return qs.none() qs = qs.filter(created_by__in=Follower.objects.filter(from_user=user.id)) # there's not yet privacy scope elif scope == "public": pass elif scope: raise NotImplementedError if created_by: qs = qs.filter(created_by=created_by) qs = qs.order_by("-created_at") qs = optimize_queryset(qs, info, "checkins") return qs
[ "cask.utils.optimize_queryset", "graphene.Argument", "graphene.UUID", "cask.accounts.models.Follower.objects.filter" ]
[((1305, 1344), 'cask.utils.optimize_queryset', 'optimize_queryset', (['qs', 'info', '"""checkins"""'], {}), "(qs, info, 'checkins')\n", (1322, 1344), False, 'from cask.utils import optimize_queryset\n'), ((499, 514), 'graphene.UUID', 'graphene.UUID', ([], {}), '()\n', (512, 514), False, 'import graphene\n'), ((530, 561), 'graphene.Argument', 'graphene.Argument', (['CheckInScope'], {}), '(CheckInScope)\n', (547, 561), False, 'import graphene\n'), ((582, 597), 'graphene.UUID', 'graphene.UUID', ([], {}), '()\n', (595, 597), False, 'import graphene\n'), ((985, 1027), 'cask.accounts.models.Follower.objects.filter', 'Follower.objects.filter', ([], {'from_user': 'user.id'}), '(from_user=user.id)\n', (1008, 1027), False, 'from cask.accounts.models import Follower\n')]
import os from download_and_view import download_and_unzip, check_directory_contents, read_random_review, \ remove_unneeded_directories from load_raw_datasets import build_raw_datasets, view_dataset from preprocess_data import apply_vectorisation, view_sample_vectorisation, preprocess_dataset from train_model import train_model from evaluate_model import visualise_training from export_model import export_model imdb_url = "https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz" expected_top_level_contents = ['train', 'test', 'README', 'imdbEr.txt', 'imdb.vocab'] expected_train_contents = ['labeledBow.feat', 'urls_pos.txt', 'urls_unsup.txt', 'unsup', 'pos', 'unsupBow.feat', 'urls_neg.txt', 'neg'] def create_directory_structure(): dataset_dir = download_and_unzip("aclImdb_v1", imdb_url, 'aclImdb') check_directory_contents(dataset_dir, expected_top_level_contents) train_dir = os.path.join(dataset_dir, 'train') read_random_review(train_dir) remove_unneeded_directories(train_dir, 'unsup') return dataset_dir def main(): dataset_dir = create_directory_structure() raw_train_dataset, raw_validation_dataset, raw_test_dataset = build_raw_datasets(dataset_dir) view_dataset(raw_validation_dataset) vectorised_layer = apply_vectorisation(raw_train_dataset) max_features = len(vectorised_layer.get_vocabulary()) view_sample_vectorisation(raw_train_dataset, vectorised_layer) train_dataset, validation_dataset, test_dataset = \ preprocess_dataset(raw_train_dataset, vectorised_layer), \ preprocess_dataset(raw_validation_dataset, vectorised_layer), \ preprocess_dataset(raw_test_dataset, vectorised_layer) model, history = train_model(train_dataset, validation_dataset, max_features) visualise_training(history, model, test_dataset) model_to_export = export_model(vectorised_layer, model) if __name__ == "__main__": main()
[ "load_raw_datasets.build_raw_datasets", "download_and_view.read_random_review", "download_and_view.check_directory_contents", "download_and_view.download_and_unzip", "download_and_view.remove_unneeded_directories", "evaluate_model.visualise_training", "preprocess_data.apply_vectorisation", "load_raw_datasets.view_dataset", "train_model.train_model", "preprocess_data.view_sample_vectorisation", "preprocess_data.preprocess_dataset", "export_model.export_model", "os.path.join" ]
[((800, 853), 'download_and_view.download_and_unzip', 'download_and_unzip', (['"""aclImdb_v1"""', 'imdb_url', '"""aclImdb"""'], {}), "('aclImdb_v1', imdb_url, 'aclImdb')\n", (818, 853), False, 'from download_and_view import download_and_unzip, check_directory_contents, read_random_review, remove_unneeded_directories\n'), ((858, 924), 'download_and_view.check_directory_contents', 'check_directory_contents', (['dataset_dir', 'expected_top_level_contents'], {}), '(dataset_dir, expected_top_level_contents)\n', (882, 924), False, 'from download_and_view import download_and_unzip, check_directory_contents, read_random_review, remove_unneeded_directories\n'), ((941, 975), 'os.path.join', 'os.path.join', (['dataset_dir', '"""train"""'], {}), "(dataset_dir, 'train')\n", (953, 975), False, 'import os\n'), ((980, 1009), 'download_and_view.read_random_review', 'read_random_review', (['train_dir'], {}), '(train_dir)\n', (998, 1009), False, 'from download_and_view import download_and_unzip, check_directory_contents, read_random_review, remove_unneeded_directories\n'), ((1014, 1061), 'download_and_view.remove_unneeded_directories', 'remove_unneeded_directories', (['train_dir', '"""unsup"""'], {}), "(train_dir, 'unsup')\n", (1041, 1061), False, 'from download_and_view import download_and_unzip, check_directory_contents, read_random_review, remove_unneeded_directories\n'), ((1212, 1243), 'load_raw_datasets.build_raw_datasets', 'build_raw_datasets', (['dataset_dir'], {}), '(dataset_dir)\n', (1230, 1243), False, 'from load_raw_datasets import build_raw_datasets, view_dataset\n'), ((1248, 1284), 'load_raw_datasets.view_dataset', 'view_dataset', (['raw_validation_dataset'], {}), '(raw_validation_dataset)\n', (1260, 1284), False, 'from load_raw_datasets import build_raw_datasets, view_dataset\n'), ((1308, 1346), 'preprocess_data.apply_vectorisation', 'apply_vectorisation', (['raw_train_dataset'], {}), '(raw_train_dataset)\n', (1327, 1346), False, 'from preprocess_data import apply_vectorisation, view_sample_vectorisation, preprocess_dataset\n'), ((1409, 1471), 'preprocess_data.view_sample_vectorisation', 'view_sample_vectorisation', (['raw_train_dataset', 'vectorised_layer'], {}), '(raw_train_dataset, vectorised_layer)\n', (1434, 1471), False, 'from preprocess_data import apply_vectorisation, view_sample_vectorisation, preprocess_dataset\n'), ((1751, 1811), 'train_model.train_model', 'train_model', (['train_dataset', 'validation_dataset', 'max_features'], {}), '(train_dataset, validation_dataset, max_features)\n', (1762, 1811), False, 'from train_model import train_model\n'), ((1816, 1864), 'evaluate_model.visualise_training', 'visualise_training', (['history', 'model', 'test_dataset'], {}), '(history, model, test_dataset)\n', (1834, 1864), False, 'from evaluate_model import visualise_training\n'), ((1887, 1924), 'export_model.export_model', 'export_model', (['vectorised_layer', 'model'], {}), '(vectorised_layer, model)\n', (1899, 1924), False, 'from export_model import export_model\n'), ((1536, 1591), 'preprocess_data.preprocess_dataset', 'preprocess_dataset', (['raw_train_dataset', 'vectorised_layer'], {}), '(raw_train_dataset, vectorised_layer)\n', (1554, 1591), False, 'from preprocess_data import apply_vectorisation, view_sample_vectorisation, preprocess_dataset\n'), ((1603, 1663), 'preprocess_data.preprocess_dataset', 'preprocess_dataset', (['raw_validation_dataset', 'vectorised_layer'], {}), '(raw_validation_dataset, vectorised_layer)\n', (1621, 1663), False, 'from preprocess_data import apply_vectorisation, view_sample_vectorisation, preprocess_dataset\n'), ((1675, 1729), 'preprocess_data.preprocess_dataset', 'preprocess_dataset', (['raw_test_dataset', 'vectorised_layer'], {}), '(raw_test_dataset, vectorised_layer)\n', (1693, 1729), False, 'from preprocess_data import apply_vectorisation, view_sample_vectorisation, preprocess_dataset\n')]
from django.test import TestCase from django.test import Client from django.urls import reverse from tests.factories.gbe_factories import ( ConferenceFactory, GenericEventFactory, PersonaFactory, ProfileFactory, ) from tests.contexts import ( StaffAreaContext, VolunteerContext, ) from scheduler.models import Event from tests.functions.gbe_functions import ( assert_alert_exists, grant_privilege, login_as, ) from settings import GBE_DATE_FORMAT from tests.gbe.scheduling.test_scheduling import TestScheduling class TestRehearsalWizard(TestScheduling): '''Tests for the 2nd stage in the rehearsal wizard view''' view_name = 'rehearsal_wizard' def setUp(self): self.show_volunteer = VolunteerContext() self.current_conference = self.show_volunteer.conference self.url = reverse( self.view_name, args=[self.current_conference.conference_slug], urlconf='gbe.scheduling.urls') self.client = Client() self.privileged_user = ProfileFactory().user_object grant_privilege(self.privileged_user, 'Scheduling Mavens') def test_authorized_user_can_access(self): login_as(self.privileged_user, self) response = self.client.get(self.url) self.assertEqual(response.status_code, 200) self.assert_event_was_picked_in_wizard(response, "rehearsal") self.assertContains(response, str(self.show_volunteer.event.e_title)) self.assertContains(response, "Make New Show") def test_authorized_user_empty_conference(self): other_conf = ConferenceFactory() login_as(self.privileged_user, self) self.url = reverse( self.view_name, args=[other_conf.conference_slug], urlconf='gbe.scheduling.urls') response = self.client.get(self.url) self.assertNotContains(response, str(self.show_volunteer.event.e_title)) self.assertContains(response, "Make New Show") def test_auth_user_can_pick_show(self): login_as(self.privileged_user, self) response = self.client.post( self.url, data={ 'pick_show': True, 'show': self.show_volunteer.sched_event.pk}, follow=True) self.assertRedirects( response, "%s?rehearsal_open=True" % reverse( 'edit_show', urlconf='gbe.scheduling.urls', args=[self.current_conference.conference_slug, self.show_volunteer.sched_event.pk])) def test_invalid_form(self): login_as(self.privileged_user, self) response = self.client.post( self.url, data={ 'pick_show': True, 'show': "boo"}) self.assertContains( response, 'Select a valid choice. boo is not one of the available choices.') def test_auth_user_pick_new_show(self): login_as(self.privileged_user, self) response = self.client.post( self.url, data={ 'pick_show': True, 'show': ""}, follow=True) self.assertRedirects( response, reverse('create_ticketed_event_wizard', urlconf='gbe.scheduling.urls', args=[self.current_conference.conference_slug, "show"])+"?")
[ "tests.contexts.VolunteerContext", "django.test.Client", "tests.factories.gbe_factories.ConferenceFactory", "django.urls.reverse", "tests.functions.gbe_functions.login_as", "tests.functions.gbe_functions.grant_privilege", "tests.factories.gbe_factories.ProfileFactory" ]
[((742, 760), 'tests.contexts.VolunteerContext', 'VolunteerContext', ([], {}), '()\n', (758, 760), False, 'from tests.contexts import StaffAreaContext, VolunteerContext\n'), ((845, 951), 'django.urls.reverse', 'reverse', (['self.view_name'], {'args': '[self.current_conference.conference_slug]', 'urlconf': '"""gbe.scheduling.urls"""'}), "(self.view_name, args=[self.current_conference.conference_slug],\n urlconf='gbe.scheduling.urls')\n", (852, 951), False, 'from django.urls import reverse\n'), ((1007, 1015), 'django.test.Client', 'Client', ([], {}), '()\n', (1013, 1015), False, 'from django.test import Client\n'), ((1084, 1142), 'tests.functions.gbe_functions.grant_privilege', 'grant_privilege', (['self.privileged_user', '"""Scheduling Mavens"""'], {}), "(self.privileged_user, 'Scheduling Mavens')\n", (1099, 1142), False, 'from tests.functions.gbe_functions import assert_alert_exists, grant_privilege, login_as\n'), ((1199, 1235), 'tests.functions.gbe_functions.login_as', 'login_as', (['self.privileged_user', 'self'], {}), '(self.privileged_user, self)\n', (1207, 1235), False, 'from tests.functions.gbe_functions import assert_alert_exists, grant_privilege, login_as\n'), ((1639, 1658), 'tests.factories.gbe_factories.ConferenceFactory', 'ConferenceFactory', ([], {}), '()\n', (1656, 1658), False, 'from tests.factories.gbe_factories import ConferenceFactory, GenericEventFactory, PersonaFactory, ProfileFactory\n'), ((1667, 1703), 'tests.functions.gbe_functions.login_as', 'login_as', (['self.privileged_user', 'self'], {}), '(self.privileged_user, self)\n', (1675, 1703), False, 'from tests.functions.gbe_functions import assert_alert_exists, grant_privilege, login_as\n'), ((1723, 1817), 'django.urls.reverse', 'reverse', (['self.view_name'], {'args': '[other_conf.conference_slug]', 'urlconf': '"""gbe.scheduling.urls"""'}), "(self.view_name, args=[other_conf.conference_slug], urlconf=\n 'gbe.scheduling.urls')\n", (1730, 1817), False, 'from django.urls import reverse\n'), ((2143, 2179), 'tests.functions.gbe_functions.login_as', 'login_as', (['self.privileged_user', 'self'], {}), '(self.privileged_user, self)\n', (2151, 2179), False, 'from tests.functions.gbe_functions import assert_alert_exists, grant_privilege, login_as\n'), ((2720, 2756), 'tests.functions.gbe_functions.login_as', 'login_as', (['self.privileged_user', 'self'], {}), '(self.privileged_user, self)\n', (2728, 2756), False, 'from tests.functions.gbe_functions import assert_alert_exists, grant_privilege, login_as\n'), ((3085, 3121), 'tests.functions.gbe_functions.login_as', 'login_as', (['self.privileged_user', 'self'], {}), '(self.privileged_user, self)\n', (3093, 3121), False, 'from tests.functions.gbe_functions import assert_alert_exists, grant_privilege, login_as\n'), ((1047, 1063), 'tests.factories.gbe_factories.ProfileFactory', 'ProfileFactory', ([], {}), '()\n', (1061, 1063), False, 'from tests.factories.gbe_factories import ConferenceFactory, GenericEventFactory, PersonaFactory, ProfileFactory\n'), ((2470, 2610), 'django.urls.reverse', 'reverse', (['"""edit_show"""'], {'urlconf': '"""gbe.scheduling.urls"""', 'args': '[self.current_conference.conference_slug, self.show_volunteer.sched_event.pk]'}), "('edit_show', urlconf='gbe.scheduling.urls', args=[self.\n current_conference.conference_slug, self.show_volunteer.sched_event.pk])\n", (2477, 2610), False, 'from django.urls import reverse\n'), ((3353, 3484), 'django.urls.reverse', 'reverse', (['"""create_ticketed_event_wizard"""'], {'urlconf': '"""gbe.scheduling.urls"""', 'args': "[self.current_conference.conference_slug, 'show']"}), "('create_ticketed_event_wizard', urlconf='gbe.scheduling.urls', args\n =[self.current_conference.conference_slug, 'show'])\n", (3360, 3484), False, 'from django.urls import reverse\n')]
import itertools from budget_nanny.api_requests import APIRequester, BUDGETS_ENDPOINT, BUDGET_ENDPOINTS DEFAULT_BUDGET = 'Personal' class BudgetRequester: def __init__(self, budget): self.budget = budget self.api_requester = APIRequester() def create_transaction(self, transaction_data): return self.api_requester.post( BUDGET_ENDPOINTS['transactions'].replace('budget_id', self.budget['id']), { 'transaction': transaction_data } ) def create_transactions(self, transactions): return self.api_requester.post( BUDGET_ENDPOINTS['transactions'].replace('budget_id', self.budget['id']), { 'transactions': list(transactions) } ) def get_accounts(self): return self._get_budget_collection('accounts') def get_categories(self): return itertools.chain.from_iterable([ x['categories'] for x in self.api_requester.get( BUDGET_ENDPOINTS['categories'].replace('budget_id', self.budget['id']) )['category_groups'] ]) def get_payees(self): return self._get_budget_collection('payees') def get_transactions(self): return self._get_budget_collection('transactions') def _get_budget_collection(self, collection_key): return self.api_requester.get( BUDGET_ENDPOINTS[collection_key].replace('budget_id', self.budget['id']) )[collection_key] def get_budgets(): return APIRequester().get(BUDGETS_ENDPOINT)['budgets'] budgets = get_budgets() default_budget = [x for x in budgets if x['name'] == DEFAULT_BUDGET][0] default_budget_requester = BudgetRequester(default_budget)
[ "budget_nanny.api_requests.APIRequester" ]
[((249, 263), 'budget_nanny.api_requests.APIRequester', 'APIRequester', ([], {}), '()\n', (261, 263), False, 'from budget_nanny.api_requests import APIRequester, BUDGETS_ENDPOINT, BUDGET_ENDPOINTS\n'), ((1533, 1547), 'budget_nanny.api_requests.APIRequester', 'APIRequester', ([], {}), '()\n', (1545, 1547), False, 'from budget_nanny.api_requests import APIRequester, BUDGETS_ENDPOINT, BUDGET_ENDPOINTS\n')]
import cv2 import numpy as np from .fs_access import FSAccess def read_image_file(fname_url): with FSAccess(fname_url, True) as image_f: img_buf = image_f.read() np_arr = np.frombuffer(img_buf, np.uint8) img = cv2.imdecode(np_arr, 0) return img def write_image_file(fname_url, img): np_arr = np.getbuffer(img) img_buf = cv2.imencode(os.path.splitext(fname_url)[1], np_arr) with FSAccess(fname_url, True, read=False) as image_f: image_f.write(img_buf)
[ "numpy.frombuffer", "cv2.imdecode", "numpy.getbuffer" ]
[((330, 347), 'numpy.getbuffer', 'np.getbuffer', (['img'], {}), '(img)\n', (342, 347), True, 'import numpy as np\n'), ((192, 224), 'numpy.frombuffer', 'np.frombuffer', (['img_buf', 'np.uint8'], {}), '(img_buf, np.uint8)\n', (205, 224), True, 'import numpy as np\n'), ((239, 262), 'cv2.imdecode', 'cv2.imdecode', (['np_arr', '(0)'], {}), '(np_arr, 0)\n', (251, 262), False, 'import cv2\n')]