code
stringlengths
22
1.05M
apis
listlengths
1
3.31k
extract_api
stringlengths
75
3.25M
import torch from cnsproject.network.neural_populations import LIFPopulation, ELIFPopulation, AELIFPopulation from cnsproject.network.monitors import Monitor from cnsproject.network.connections import DenseConnection, RandomConnection from cnsproject.plotting.plotting import plot_current, raster, population_activity from typing import Tuple, Callable, Iterable, Union class phase4(): def __init__( self, N: int, time: int = 100, dt: Union[float, torch.Tensor]= 1, ** kwargs ) -> None: self.shape_exc = (int(N*0.8/2),) self.shape_inh = (int(N*0.2),) self.time = time self.dt = dt def current_maker(self, mean, std, threshold=None, jump=60) -> Callable[[int], int]: torch.manual_seed(16) if threshold != None: a = torch.empty(threshold + 1).normal_(mean=mean+jump, std=std) b = torch.empty(self.time - threshold).normal_(mean=mean, std=std) x = torch.cat((b,a)) else: x = torch.empty(self.time + 1).normal_(mean=mean, std=std) y = torch.Tensor([0]) x = torch.cat((y,x)) def current_rand(t: int): return x[int(t)] return current_rand def pop_maker( self, shape: Iterable[int], model = "LIF", is_inhibitory: bool = False, v_rest: Union[float, torch.Tensor] = -70., threshold: Union[float, torch.Tensor] = -50., tau: Union[float, torch.Tensor] = 15, dt: Union[float, torch.Tensor]= 1, R: Union[float, torch.Tensor] = 1., delta_t: int = 1., theta_rh: float = -55., tau_w: Union[float, torch.Tensor] = 5, w: Union[float, torch.Tensor] = 2, a: Union[float, torch.Tensor] = 5, b: Union[float, torch.Tensor] = 2): if model == "LIF": neuron = LIFPopulation(shape=shape, is_inhibitory=is_inhibitory, v_rest=v_rest, threshold=threshold, tau=tau, dt=dt, R=R) elif model == "ELIF": neuron = LIFPopulation(shape=shape, is_inhibitory=is_inhibitory, v_rest=v_rest, threshold=threshold, tau=tau, dt=dt, R=R, theta_rh=theta_rh, delta_t=delta_t) elif model == "AELIF": neuron = AELIFPopulation(shape=shape, is_inhibitory=is_inhibitory, v_rest=v_rest, threshold=threshold, tau=tau, dt=dt, R=R, theta_rh=theta_rh, delta_t=delta_t, tau_w=tau_w, w=w, a=a, b=b) return neuron def Simulation(self, pop_inh, pop_exc1, pop_exc2, mean: list, std: list): current0 = self.current_maker(mean[0], std[0]) current1 = self.current_maker(mean[1], std[1], threshold=50) current2 = self.current_maker(mean[2], std[2]) monitor_exc1 = Monitor(pop_exc1, state_variables=["s", "v"]) monitor_exc1.set_time_steps(self.time, self.dt) monitor_exc1.reset_state_variables() monitor_exc2 = Monitor(pop_exc2, state_variables=["s", "v"]) monitor_exc2.set_time_steps(self.time, self.dt) monitor_exc2.reset_state_variables() monitor_inh = Monitor(pop_inh, state_variables=["s", "v"]) monitor_inh.set_time_steps(self.time, self.dt) monitor_inh.reset_state_variables() connect_exc1_to_inh = DenseConnection(pop_exc1, pop_inh, C=25,control=1) connect_inh_to_exc1 = DenseConnection(pop_inh, pop_exc1, C=13,control=1) connect_exc2_to_inh = DenseConnection(pop_exc2, pop_inh, C=25,control=1) connect_inh_to_exc2 = DenseConnection(pop_inh, pop_exc2, C=13,control=1) connect_exc1_to_exc2 = DenseConnection(pop_exc1, pop_exc2, C=5,control=1) connect_exc2_to_exc1 = DenseConnection(pop_exc2, pop_exc1, C=5,control=1) connect_inside_exc1 = RandomConnection(pop_exc1, pop_exc1, insode=True, C=10,control=1) connect_inside_exc2 = RandomConnection(pop_exc2, pop_exc2, inside=True, C=10,control=1) connect_inside_inh = RandomConnection(pop_inh, pop_inh, inside=True, C=10,control=1) for t in range(self.time): input_I_inh = torch.Tensor([current0(t=t)] * pop_inh.shape[0]) input_I_exc1 = torch.Tensor([current1(t=t)] * pop_exc1.shape[0]) input_I_exc2 = torch.Tensor([current2(t=t)] * pop_exc2.shape[0]) noise_inh = torch.empty(input_I_inh.size(0)).normal_(0, 5) noise_exc1 = torch.empty(input_I_exc1.size(0)).normal_(0, 5) noise_exc2 = torch.empty(input_I_exc2.size(0)).normal_(0, 5) traces_inh = connect_exc1_to_inh.compute(pop_exc1.s) +\ connect_exc2_to_inh.compute(pop_exc2.s) + \ connect_inside_inh.compute(pop_inh.s) traces_exc1 = connect_inh_to_exc1.compute(pop_inh.s) +\ connect_exc2_to_exc1.compute(pop_exc2.s) + \ connect_inside_exc1.compute(pop_exc1.s) traces_exc2 = connect_inh_to_exc2.compute(pop_inh.s) +\ connect_exc1_to_exc2.compute(pop_exc1.s) + \ connect_inside_exc2.compute(pop_exc2.s) pop_inh.forward(I=input_I_inh-noise_inh, traces=traces_inh) pop_exc1.forward(I=input_I_exc1-noise_exc1, traces=traces_exc1) pop_exc2.forward(I=input_I_exc2-noise_exc2, traces=traces_exc2) monitor_inh.record() monitor_exc1.record() monitor_exc2.record() s_inh = monitor_inh.get("s") s_exc1 = monitor_exc1.get("s") s_exc2 = monitor_exc2.get("s") population_activity(s_inh, "of inh") population_activity(s_exc1, "of exc1") population_activity(s_exc2, "of exc2") plot_current(current=current0, time=(0, self.time), dt=self.dt, label="inh input current") plot_current(current=current1, time=(0, self.time), dt=self.dt, label="exc1 input current") plot_current(current=current2, time=(0, self.time), dt=self.dt, label="exc2 input current") raster(s_exc1, label="exc1") raster(s_exc2, label="exc2") raster(s_inh=s_inh, label="inh") if __name__ == "__main__": p = phase4(100) pop_exc1=p.pop_maker(p.shape_exc, tau=20) pop_exc2=p.pop_maker(p.shape_exc, tau=20) pop_inh=p.pop_maker(p.shape_inh, tau=10, R=1, is_inhibitory=True) p.Simulation(pop_inh, pop_exc1, pop_exc2, [20, 45, 45], [5, 5, 5])
[ "cnsproject.network.neural_populations.AELIFPopulation", "torch.manual_seed", "cnsproject.network.neural_populations.LIFPopulation", "cnsproject.network.connections.DenseConnection", "torch.cat", "cnsproject.plotting.plotting.population_activity", "cnsproject.network.monitors.Monitor", "torch.empty", "cnsproject.plotting.plotting.plot_current", "torch.Tensor", "cnsproject.plotting.plotting.raster", "cnsproject.network.connections.RandomConnection" ]
[((814, 835), 'torch.manual_seed', 'torch.manual_seed', (['(16)'], {}), '(16)\n', (831, 835), False, 'import torch\n'), ((1158, 1175), 'torch.Tensor', 'torch.Tensor', (['[0]'], {}), '([0])\n', (1170, 1175), False, 'import torch\n'), ((1189, 1206), 'torch.cat', 'torch.cat', (['(y, x)'], {}), '((y, x))\n', (1198, 1206), False, 'import torch\n'), ((3859, 3904), 'cnsproject.network.monitors.Monitor', 'Monitor', (['pop_exc1'], {'state_variables': "['s', 'v']"}), "(pop_exc1, state_variables=['s', 'v'])\n", (3866, 3904), False, 'from cnsproject.network.monitors import Monitor\n'), ((4042, 4087), 'cnsproject.network.monitors.Monitor', 'Monitor', (['pop_exc2'], {'state_variables': "['s', 'v']"}), "(pop_exc2, state_variables=['s', 'v'])\n", (4049, 4087), False, 'from cnsproject.network.monitors import Monitor\n'), ((4224, 4268), 'cnsproject.network.monitors.Monitor', 'Monitor', (['pop_inh'], {'state_variables': "['s', 'v']"}), "(pop_inh, state_variables=['s', 'v'])\n", (4231, 4268), False, 'from cnsproject.network.monitors import Monitor\n'), ((4419, 4470), 'cnsproject.network.connections.DenseConnection', 'DenseConnection', (['pop_exc1', 'pop_inh'], {'C': '(25)', 'control': '(1)'}), '(pop_exc1, pop_inh, C=25, control=1)\n', (4434, 4470), False, 'from cnsproject.network.connections import DenseConnection, RandomConnection\n'), ((4501, 4552), 'cnsproject.network.connections.DenseConnection', 'DenseConnection', (['pop_inh', 'pop_exc1'], {'C': '(13)', 'control': '(1)'}), '(pop_inh, pop_exc1, C=13, control=1)\n', (4516, 4552), False, 'from cnsproject.network.connections import DenseConnection, RandomConnection\n'), ((4585, 4636), 'cnsproject.network.connections.DenseConnection', 'DenseConnection', (['pop_exc2', 'pop_inh'], {'C': '(25)', 'control': '(1)'}), '(pop_exc2, pop_inh, C=25, control=1)\n', (4600, 4636), False, 'from cnsproject.network.connections import DenseConnection, RandomConnection\n'), ((4667, 4718), 'cnsproject.network.connections.DenseConnection', 'DenseConnection', (['pop_inh', 'pop_exc2'], {'C': '(13)', 'control': '(1)'}), '(pop_inh, pop_exc2, C=13, control=1)\n', (4682, 4718), False, 'from cnsproject.network.connections import DenseConnection, RandomConnection\n'), ((4768, 4819), 'cnsproject.network.connections.DenseConnection', 'DenseConnection', (['pop_exc1', 'pop_exc2'], {'C': '(5)', 'control': '(1)'}), '(pop_exc1, pop_exc2, C=5, control=1)\n', (4783, 4819), False, 'from cnsproject.network.connections import DenseConnection, RandomConnection\n'), ((4851, 4902), 'cnsproject.network.connections.DenseConnection', 'DenseConnection', (['pop_exc2', 'pop_exc1'], {'C': '(5)', 'control': '(1)'}), '(pop_exc2, pop_exc1, C=5, control=1)\n', (4866, 4902), False, 'from cnsproject.network.connections import DenseConnection, RandomConnection\n'), ((4943, 5009), 'cnsproject.network.connections.RandomConnection', 'RandomConnection', (['pop_exc1', 'pop_exc1'], {'insode': '(True)', 'C': '(10)', 'control': '(1)'}), '(pop_exc1, pop_exc1, insode=True, C=10, control=1)\n', (4959, 5009), False, 'from cnsproject.network.connections import DenseConnection, RandomConnection\n'), ((5040, 5106), 'cnsproject.network.connections.RandomConnection', 'RandomConnection', (['pop_exc2', 'pop_exc2'], {'inside': '(True)', 'C': '(10)', 'control': '(1)'}), '(pop_exc2, pop_exc2, inside=True, C=10, control=1)\n', (5056, 5106), False, 'from cnsproject.network.connections import DenseConnection, RandomConnection\n'), ((5136, 5200), 'cnsproject.network.connections.RandomConnection', 'RandomConnection', (['pop_inh', 'pop_inh'], {'inside': '(True)', 'C': '(10)', 'control': '(1)'}), '(pop_inh, pop_inh, inside=True, C=10, control=1)\n', (5152, 5200), False, 'from cnsproject.network.connections import DenseConnection, RandomConnection\n'), ((6824, 6860), 'cnsproject.plotting.plotting.population_activity', 'population_activity', (['s_inh', '"""of inh"""'], {}), "(s_inh, 'of inh')\n", (6843, 6860), False, 'from cnsproject.plotting.plotting import plot_current, raster, population_activity\n'), ((6871, 6909), 'cnsproject.plotting.plotting.population_activity', 'population_activity', (['s_exc1', '"""of exc1"""'], {}), "(s_exc1, 'of exc1')\n", (6890, 6909), False, 'from cnsproject.plotting.plotting import plot_current, raster, population_activity\n'), ((6919, 6957), 'cnsproject.plotting.plotting.population_activity', 'population_activity', (['s_exc2', '"""of exc2"""'], {}), "(s_exc2, 'of exc2')\n", (6938, 6957), False, 'from cnsproject.plotting.plotting import plot_current, raster, population_activity\n'), ((6985, 7080), 'cnsproject.plotting.plotting.plot_current', 'plot_current', ([], {'current': 'current0', 'time': '(0, self.time)', 'dt': 'self.dt', 'label': '"""inh input current"""'}), "(current=current0, time=(0, self.time), dt=self.dt, label=\n 'inh input current')\n", (6997, 7080), False, 'from cnsproject.plotting.plotting import plot_current, raster, population_activity\n'), ((7096, 7192), 'cnsproject.plotting.plotting.plot_current', 'plot_current', ([], {'current': 'current1', 'time': '(0, self.time)', 'dt': 'self.dt', 'label': '"""exc1 input current"""'}), "(current=current1, time=(0, self.time), dt=self.dt, label=\n 'exc1 input current')\n", (7108, 7192), False, 'from cnsproject.plotting.plotting import plot_current, raster, population_activity\n'), ((7198, 7294), 'cnsproject.plotting.plotting.plot_current', 'plot_current', ([], {'current': 'current2', 'time': '(0, self.time)', 'dt': 'self.dt', 'label': '"""exc2 input current"""'}), "(current=current2, time=(0, self.time), dt=self.dt, label=\n 'exc2 input current')\n", (7210, 7294), False, 'from cnsproject.plotting.plotting import plot_current, raster, population_activity\n'), ((7320, 7348), 'cnsproject.plotting.plotting.raster', 'raster', (['s_exc1'], {'label': '"""exc1"""'}), "(s_exc1, label='exc1')\n", (7326, 7348), False, 'from cnsproject.plotting.plotting import plot_current, raster, population_activity\n'), ((7358, 7386), 'cnsproject.plotting.plotting.raster', 'raster', (['s_exc2'], {'label': '"""exc2"""'}), "(s_exc2, label='exc2')\n", (7364, 7386), False, 'from cnsproject.plotting.plotting import plot_current, raster, population_activity\n'), ((7396, 7428), 'cnsproject.plotting.plotting.raster', 'raster', ([], {'s_inh': 's_inh', 'label': '"""inh"""'}), "(s_inh=s_inh, label='inh')\n", (7402, 7428), False, 'from cnsproject.plotting.plotting import plot_current, raster, population_activity\n'), ((1041, 1058), 'torch.cat', 'torch.cat', (['(b, a)'], {}), '((b, a))\n', (1050, 1058), False, 'import torch\n'), ((2041, 2157), 'cnsproject.network.neural_populations.LIFPopulation', 'LIFPopulation', ([], {'shape': 'shape', 'is_inhibitory': 'is_inhibitory', 'v_rest': 'v_rest', 'threshold': 'threshold', 'tau': 'tau', 'dt': 'dt', 'R': 'R'}), '(shape=shape, is_inhibitory=is_inhibitory, v_rest=v_rest,\n threshold=threshold, tau=tau, dt=dt, R=R)\n', (2054, 2157), False, 'from cnsproject.network.neural_populations import LIFPopulation, ELIFPopulation, AELIFPopulation\n'), ((2423, 2580), 'cnsproject.network.neural_populations.LIFPopulation', 'LIFPopulation', ([], {'shape': 'shape', 'is_inhibitory': 'is_inhibitory', 'v_rest': 'v_rest', 'threshold': 'threshold', 'tau': 'tau', 'dt': 'dt', 'R': 'R', 'theta_rh': 'theta_rh', 'delta_t': 'delta_t'}), '(shape=shape, is_inhibitory=is_inhibitory, v_rest=v_rest,\n threshold=threshold, tau=tau, dt=dt, R=R, theta_rh=theta_rh, delta_t=\n delta_t)\n', (2436, 2580), False, 'from cnsproject.network.neural_populations import LIFPopulation, ELIFPopulation, AELIFPopulation\n'), ((884, 910), 'torch.empty', 'torch.empty', (['(threshold + 1)'], {}), '(threshold + 1)\n', (895, 910), False, 'import torch\n'), ((961, 995), 'torch.empty', 'torch.empty', (['(self.time - threshold)'], {}), '(self.time - threshold)\n', (972, 995), False, 'import torch\n'), ((1090, 1116), 'torch.empty', 'torch.empty', (['(self.time + 1)'], {}), '(self.time + 1)\n', (1101, 1116), False, 'import torch\n'), ((2914, 3101), 'cnsproject.network.neural_populations.AELIFPopulation', 'AELIFPopulation', ([], {'shape': 'shape', 'is_inhibitory': 'is_inhibitory', 'v_rest': 'v_rest', 'threshold': 'threshold', 'tau': 'tau', 'dt': 'dt', 'R': 'R', 'theta_rh': 'theta_rh', 'delta_t': 'delta_t', 'tau_w': 'tau_w', 'w': 'w', 'a': 'a', 'b': 'b'}), '(shape=shape, is_inhibitory=is_inhibitory, v_rest=v_rest,\n threshold=threshold, tau=tau, dt=dt, R=R, theta_rh=theta_rh, delta_t=\n delta_t, tau_w=tau_w, w=w, a=a, b=b)\n', (2929, 3101), False, 'from cnsproject.network.neural_populations import LIFPopulation, ELIFPopulation, AELIFPopulation\n')]
from bank_account_app.models import BankAccount from bank_account_app.permissions import (CanChangeBankAccount, CanViewBankAccount) from bank_account_app.serializers import (BankAccountDetailsSerializer, BankAccountShortDetailsSerializer) from django.db import transaction from django.db.models import F from rest_framework import permissions, validators, viewsets from rest_framework.decorators import action class BankAccountViewSet(viewsets.ReadOnlyModelViewSet): ''' retrieve: Get the specified bank account. list: Get a list of all bank accounts. ''' def get_queryset(self): querysets_dict = { 'retrieve': BankAccount.objects.all(), 'list': BankAccount.objects.all(), 'top_up': BankAccount.objects.all(), } queryset = querysets_dict.get(self.action) return queryset.distinct() def get_serializer_class(self): serializers_dict = { 'retrieve': BankAccountDetailsSerializer, 'list': BankAccountShortDetailsSerializer, 'top_up': BankAccountDetailsSerializer, } serializer_class = serializers_dict.get(self.action) return serializer_class def get_permissions(self): base_permissions = [permissions.IsAuthenticated, CanViewBankAccount] permissions_dict = { 'retrieve': [], 'list': [], 'top_up': [CanChangeBankAccount], } base_permissions += permissions_dict.get(self.action, []) return [permission() for permission in base_permissions] @action(methods=['PUT', 'PATCH'], detail=True) def top_up(self, request, pk=None): # Top up balance to bank account try: bank_account = BankAccount.objects.get(pk=pk) amount = self.request.POST.get('amount') amount = self.validate_top_up_amount(amount) with transaction.atomic(): bank_account.balance = F('balance') + amount bank_account.save() return self.retrieve(request, pk=pk) except BankAccount.DoesNotExist: raise validators.ValidationError({ 'bank_account': 'Bank account with specified id doesn\'t exists!', }) def validate_top_up_amount(self, value): if value is None: raise validators.ValidationError({ 'amount': 'This field is required!' }) float_value = 0 try: float_value = float(value) except ValueError: raise validators.ValidationError({ 'amount': 'Incorrect value!' }) if float_value < 0: raise validators.ValidationError({ 'amount': 'This field is has to be positive!' }) return float_value
[ "bank_account_app.models.BankAccount.objects.get", "bank_account_app.models.BankAccount.objects.all", "rest_framework.validators.ValidationError", "django.db.models.F", "rest_framework.decorators.action", "django.db.transaction.atomic" ]
[((1686, 1731), 'rest_framework.decorators.action', 'action', ([], {'methods': "['PUT', 'PATCH']", 'detail': '(True)'}), "(methods=['PUT', 'PATCH'], detail=True)\n", (1692, 1731), False, 'from rest_framework.decorators import action\n'), ((754, 779), 'bank_account_app.models.BankAccount.objects.all', 'BankAccount.objects.all', ([], {}), '()\n', (777, 779), False, 'from bank_account_app.models import BankAccount\n'), ((801, 826), 'bank_account_app.models.BankAccount.objects.all', 'BankAccount.objects.all', ([], {}), '()\n', (824, 826), False, 'from bank_account_app.models import BankAccount\n'), ((850, 875), 'bank_account_app.models.BankAccount.objects.all', 'BankAccount.objects.all', ([], {}), '()\n', (873, 875), False, 'from bank_account_app.models import BankAccount\n'), ((1853, 1883), 'bank_account_app.models.BankAccount.objects.get', 'BankAccount.objects.get', ([], {'pk': 'pk'}), '(pk=pk)\n', (1876, 1883), False, 'from bank_account_app.models import BankAccount\n'), ((2456, 2521), 'rest_framework.validators.ValidationError', 'validators.ValidationError', (["{'amount': 'This field is required!'}"], {}), "({'amount': 'This field is required!'})\n", (2482, 2521), False, 'from rest_framework import permissions, validators, viewsets\n'), ((2810, 2885), 'rest_framework.validators.ValidationError', 'validators.ValidationError', (["{'amount': 'This field is has to be positive!'}"], {}), "({'amount': 'This field is has to be positive!'})\n", (2836, 2885), False, 'from rest_framework import permissions, validators, viewsets\n'), ((2012, 2032), 'django.db.transaction.atomic', 'transaction.atomic', ([], {}), '()\n', (2030, 2032), False, 'from django.db import transaction\n'), ((2239, 2337), 'rest_framework.validators.ValidationError', 'validators.ValidationError', (['{\'bank_account\': "Bank account with specified id doesn\'t exists!"}'], {}), '({\'bank_account\':\n "Bank account with specified id doesn\'t exists!"})\n', (2265, 2337), False, 'from rest_framework import permissions, validators, viewsets\n'), ((2674, 2732), 'rest_framework.validators.ValidationError', 'validators.ValidationError', (["{'amount': 'Incorrect value!'}"], {}), "({'amount': 'Incorrect value!'})\n", (2700, 2732), False, 'from rest_framework import permissions, validators, viewsets\n'), ((2073, 2085), 'django.db.models.F', 'F', (['"""balance"""'], {}), "('balance')\n", (2074, 2085), False, 'from django.db.models import F\n')]
""" Classes and functions for path exploration on semantic graphs. """ #import pdb import re import warnings from collections import deque, defaultdict from itertools import product from .components import (Pred, links, var_sort) from .util import powerset from .config import IVARG_ROLE from delphin.exceptions import XmrsError # for rebuilding Xmrs from paths from delphin.mrs import Node, Link, Pred, Dmrs TOP = 'TOP' STAR = '*' # flags NODEID = NID = 1 # pred#NID... or #NID... PRED = P = 2 # pred or "pred" or 'pred VARSORT = VS = 4 # pred[e], pred[x], etc. VARPROPS = VP = 8 # pred[@PROP=val] OUTAXES = OUT = 16 # pred:ARG1/NEQ> INAXES = IN = 32 # pred<ARG1/EQ: UNDIRECTEDAXES = UND = 64 # pred:/EQ: SUBPATHS = SP = 128 # pred:ARG1/NEQ>pred2 CARG = C = 256 # pred:CARG>"value" BALANCED = B = 512 CONTEXT = VS | VP | SP ALLAXES = OUT | IN | UND DEFAULT = P | VS | VP | OUT | IN | SP ALL = NID | P | VS | VP | OUT | IN | UND | SP class XmrsPathError(XmrsError): pass # GRAPH WALKING ######################################################## def axis_sort(axis): return ( not axis[-1] == '>', # forward links first not axis[0] == '<', # then backward, then undirected not (len(axis) >= 5 and axis[1:4] == 'LBL'), # LBL before other args (len(axis) >= 6 and axis[1:5] == 'BODY'), # BODY last axis[1:] # otherwise alphabtical ) def step_sort(step): nodeid, axis = step return tuple( list(axis_sort(axis)) + [nodeid] ) def walk(xmrs, start=0, method='headed', sort_key=step_sort): if method not in ('top-down', 'bottom-up', 'headed'): raise XmrsPathError("Invalid path-finding method: {}".format(method)) if not (start == 0 or xmrs.pred(start)): raise XmrsPathError('Start nodeid not in Xmrs graph.') linkdict = _build_linkdict(xmrs) for step in _walk(start, linkdict, set(), method, sort_key): yield step def _walk(nodeid, linkdict, visited, method, sort_key): if nodeid in visited: return visited.add(nodeid) local_links = linkdict.get(nodeid, []) steps = sorted( filter(_axis_filter(method), local_links), key=sort_key ) for tgtnid, axis in steps: # if this undirected link was already traversed in the other # direction, just yield this step but don't recurse if axis == ':/EQ:' and tgtnid in visited: #yield (nodeid, tgtnid, axis) continue yield (nodeid, tgtnid, axis) for step in _walk(tgtnid, linkdict, visited, method, sort_key): yield step def _build_linkdict(xmrs): ld = defaultdict(list) for link in links(xmrs): axis = '{}/{}'.format(link.rargname or '', link.post) if link_is_directed(link): ld[link.start].append((link.end, ':{}>'.format(axis))) ld[link.end].append((link.start, '<{}:'.format(axis))) else: # pretend they are directed #ld[link.end]['<{}:'.format(axis)] = link.start ld[link.start].append((link.end, ':{}:'.format(axis))) ld[link.end].append((link.start, ':{}:'.format(axis))) return ld def _axis_filter(method): # top-down: :X/Y> or :X/Y: (the latter only if added) def axis_filter(step): nid, axis = step if method == 'headed' and headed(axis) or \ method == 'top-down' and axis.startswith(':') or \ method == 'bottom-up' and axis.endswith(':'): return True return False return axis_filter def link_is_directed(link): return bool(link.rargname) or link.post != 'EQ' def headed(axis): # quantifiers and X/EQ links are not the heads of their subgraphs if axis == '<RSTR/H:' or axis.endswith('/EQ:'): return True if (axis == ':RSTR/H>' or axis.endswith('/EQ>') or axis.startswith('<')): return False return True # CLASSES ############################################################## class XmrsPathNode(object): __slots__ = ('nodeid', 'pred', 'context', 'links', '_overlapping_links', '_depth', '_order') def __init__(self, nodeid, pred, context=None, links=None): self.nodeid = nodeid self.pred = pred self.context = dict(context or []) self.links = dict(links or []) self._overlapping_links = {} # {overlapping_axis: orig_axis, ...} self._depth = ( max([-1] + [x._depth for x in self.links.values() if x is not None]) + 1 ) self._order = ( sum(x._order for x in self.links.values() if x is not None) + 1 ) def __getitem__(self, key): return self.links[key] def __iter__(self): return iter(self.links.items()) def __len__(self): return self._depth def update(self, other): self.nodeid = other.nodeid or self.nodeid self.pred = other.pred or self.pred self.context.update(other.context or []) for axis, tgt in other.links.items(): if not self.links.get(axis): self.links[axis] = tgt else: self[axis].update(tgt) def depth(self): return self._depth def order(self): return self._order # def extend(self, extents): # for axes, extent in extents: # # the final axis may be new information # tgt = self.follow(axes[:-1]) # if axes: # subtgt = tgt.links.get(axes[-1]) # if subtgt is None: # tgt.links[axes[-1]] = extent # continue # else: # tgt = subtgt # tgt.update(extent) # class XmrsPath(XmrsPathNode): # def __init__(self, nodeid, pred, context=None, links=None): # XmrsPathNode.__init__(self, nodeid, pred, context, links) # self.calculate_metrics() # @classmethod # def from_node(cls, node): # return cls(node.nodeid, node.pred, node.context, node.links) # def calculate_metrics(self): # self._distance = {} # self._depth = {} # self._preds = {} # self._calculate_metrics(self, 0, 0) # def _calculate_metrics(self, curnode, depth, distance): # if curnode is None: # return # # add pred index # try: # self._preds[curnode.pred].append(curnode) # except KeyError: # self._preds[curnode.pred] = [] # self._preds[curnode.pred].append(curnode) # _id = id(curnode) # # we may re-update if we're on a shorter path # updated = False # if _id not in self._distance or distance < self._distance[_id]: # self._distance[_id] = distance # updated = True # if _id not in self._depth or abs(depth) < abs(self._depth[_id]): # self._depth[_id] = depth # updated = True # if not updated: # return # for link in curnode.links: # if link.endswith('>'): # self._calculate_metrics(curnode[link], depth+1, distance+1) # elif link.startswith('<'): # self._calculate_metrics(curnode[link], depth-1, distance+1) # else: # self._calculate_metrics(curnode[link], depth, distance+1) # def distance(self, node=None): # if node is None: # return max(self._distance.values()) # else: # return self._distance[id(node)] # def depth(self, node=None, direction=max): # if node is None: # return direction(self._depth.values()) # return self._depth[id(node)] # def select(self, pred): # return self._preds.get(pred, []) # # def extend(self, extents, base_axes=None): # # if base_axes is None: # # base_axes = [] # # base = self.follow(base_axes) # # base.extend(extents) # # self.calculate_metrics() # HELPER FUNCTIONS ########################################################## def get_nodeids(node): yield node.nodeid for link, path_node in node: if path_node is None: continue for nid in get_nodeids(path_node): yield nid def get_preds(node): yield node.pred for link, path_node in node: if path_node is None: continue for pred in get_preds(path_node): yield pred def copy(node, depth=-1, flags=ALL): nodeid = node.nodeid if (flags & NODEID) else None pred = node.pred if (flags & PRED) else None context = dict( (k, v) for k, v in node.context.items() if (k == 'varsort' and (flags & VARSORT)) or (k.startswith('@') and (flags & VARPROPS)) or (k[0] in (':', '<') and (flags & SUBPATHS)) ) links = {} if depth != 0: for axis, tgt in node.links.items(): if tgt is None: if _valid_axis(axis, flags): links[axis] = None elif (flags & SUBPATHS): links[axis] = copy(tgt, depth-1, flags=flags) n = XmrsPathNode(nodeid, pred, context=context, links=links) return n def _valid_axis(axis, flags): return ( (axis.endswith('>') and (flags & OUTAXES)) or (axis.startswith('<') and (flags & INAXES)) or (axis.endswith(':') and (flags & UNDIRECTEDAXES)) ) def follow(obj, axes): axes = list(reversed(axes)) while axes: obj = obj[axes.pop()] return obj def merge(base, obj, location=None): """ merge is like XmrsPathNode.update() except it raises errors on unequal non-None values. """ # pump object to it's location with dummy nodes while location: axis = location.pop() obj = XmrsPathNode(None, None, links={axis: obj}) if base is None: return obj _merge(base, obj) # if isinstance(base, XmrsPath): # base.calculate_metrics() return base def _merge(basenode, objnode): if basenode is None or objnode is None: return basenode or objnode basenode.nodeid = _merge_atom(basenode.nodeid, objnode.nodeid) basenode.pred = _merge_atom(basenode.pred, objnode.pred) baseside = basenode.context for k, v in objnode.context.items(): if k[0] in (':', '<'): # subpath context; need to recurse baseside[k] = _merge(baseside.get(k), v) else: baseside[k] = _merge_atom(baseside.get(k), v) baseside = basenode.links for axis, tgt in objnode.links.items(): baseside[axis] = _merge(baseside.get(axis), tgt) return basenode def _merge_atom(obj1, obj2): if obj1 is None or obj1 == STAR: return obj2 or obj1 # or obj1 in case obj2 is None and obj1 == STAR elif obj2 is None or obj2 == STAR: return obj1 or obj2 # or obj2 in case obj1 is None and obj2 == STAR elif obj1 == obj2: return obj1 else: raise XmrsPathError( 'Cannot merge MrsPath atoms: {} and {}'.format(obj1, obj2) ) # WRITING PATHS ############################################################# def format(node, sort_key=axis_sort, depth=-1, flags=DEFAULT): if node is None: return '' symbol = '' if (flags & PRED) and node.pred is not None: symbol = str(node.pred) nodeid = '' if (flags & NODEID) and node.nodeid is not None: nodeid = '#{}'.format(node.nodeid) if not (symbol or nodeid): symbol = STAR context = _format_context(node, sort_key, depth, flags) subpath = '' if depth != 0: subpath = _format_subpath(node, sort_key, depth-1, flags) return '{}{}{}{}'.format(symbol, nodeid, context, subpath) def _format_context(node, sort_key, depth, flags): context = '' if (flags & CONTEXT) and node.context: contexts = [] for k in sorted(node.context, key=_context_sort): v = node.context[k] if k == 'varsort': if (flags & VARSORT): contexts.append(v) elif k[0] == '@': if (flags & VARPROPS): contexts.append('{}={}'.format(k, v)) elif k[0] in (':', '<'): if v is not None and (flags & SUBPATHS): v = format(v, sort_key, depth-1, flags) elif _valid_axis(k, flags): v = '' else: continue contexts.append('{}{}'.format(k, v)) else: raise XmrsPathError('Invalid context key: {}'.format(k)) if contexts: context = '[{}]'.format(' & '.join(contexts)) return context def _format_subpath(node, sort_key, depth, flags): links = [] axislist = _prepare_axes(node, sort_key) for axis, tgt in axislist: if tgt is not None and (flags & SUBPATHS): tgt = format(tgt, sort_key, depth, flags) elif _valid_axis(axis, flags): tgt = '' else: continue links.append('{}{}'.format(axis, tgt)) if len(links) > 1: subpath = '({})'.format(' & '.join(links)) else: subpath = ''.join(links) # possibly just '' return subpath def _prepare_axes(node, sort_key): """ Sort axes and combine those that point to the same target and go in the same direction. """ links = node.links o_links = node._overlapping_links overlap = {ax2 for ax in links for ax2 in o_links.get(ax, [])} axes = [] for axis in sorted(links.keys(), key=sort_key): if axis in overlap: continue tgt = links[axis] if axis in o_links: s, e = axis[0], axis[-1] axis = '%s%s%s' % ( s, '&'.join(a[1:-1] for a in [axis] + o_links[axis]), e ) axes.append((axis, tgt)) return axes def _context_sort(k): return (k != 'varsort', k[0] in (':', '<'), k) # FINDING PATHS ############################################################# def find_paths( xmrs, nodeids=None, method='top-down', flags=DEFAULT, max_distance=-1, subpath_select=list): warnings.warn('find_paths() is deprecated; use explore()', DeprecationWarning) return explore(xmrs, nodeids, method, flags, max_distance, subpath_select) def explore( xmrs, nodeids=None, method='top-down', flags=DEFAULT, max_distance=-1, subpath_select=list): if nodeids is None: nodeids = [0] + xmrs._nodeids # 0 for TOP stepmap = defaultdict(lambda: defaultdict(set)) for startnid in nodeids: if startnid in stepmap: continue # start node already done for start, end, axis in walk(xmrs, start=startnid, method=method): stepmap[start][end].add(axis) # if axis in stepmap.get(start, {}): # continue # current node already done # stepmap[start][axis] = end for nodeid in nodeids: for node in _explore( xmrs, stepmap, nodeid, flags, max_distance, subpath_select, set()): #yield XmrsPath.from_node(node) yield node def _explore( xmrs, stepmap, start, flags, max_distance, subpath_select, visited): if start in visited: return visited = visited.union([start]) ctext = None if start == 0: symbol = TOP else: symbol = xmrs.pred(start) if (flags & CONTEXT): ctext = {} # it's not guaranteed that an EP has an intrinsic variable if IVARG_ROLE in xmrs.args(start): iv = xmrs.args(start)[IVARG_ROLE] varsort = var_sort(iv) ctext['varsort'] = varsort props = xmrs.properties(iv) ctext.update([ ('@{}'.format(k), v) for k, v in props.items() ]) steps = stepmap.get(start, {}) # this is {end_nodeid: set(axes), ...} # remove :/EQ: if necessary and generate mapping for overlapping axes overlap = {} for end, axes in steps.items(): if (':/EQ:' in axes and (not (flags & UNDIRECTEDAXES) or (end in visited and ':/EQ:' in stepmap[end].get(start, [])))): axes.difference_update([':/EQ:']) if len(axes) > 1: # don't sort if this significantly hurts performance axes = sorted(axes, key=axis_sort) s, e = axes[0][0], axes[0][-1] # axis direction characters overlap[axes[0]] = [ ax for ax in axes[1:] if ax[0] == s and ax[-1] == e ] # exclude TOP from being its own path node if start != 0: n = XmrsPathNode( start, symbol, context=ctext, links={axis: None for axes in steps.values() for axis in axes} ) n._overlapping_links = overlap yield n # keep tuples of axes instead of mapping each unique axis. This is # for things like coordination where more than one axis point to the # same thing, and we don't want to enumerate all possibilities. subpaths = {} for tgtnid, axes in steps.items(): if tgtnid == 0: # assume only one axis going to TOP (can there be more than 1?) axis = next(iter(axes)) subpaths[(axis,)] = [XmrsPathNode(tgtnid, TOP)] elif (flags & SUBPATHS) and max_distance != 0: if not axes: # maybe an :/EQ: was pruned and nothing remained continue sps = subpath_select(list( _explore(xmrs, stepmap, tgtnid, flags, max_distance-1, subpath_select, visited) )) if not (flags & BALANCED): sps.append(None) subpaths[tuple(axes)] = sps if subpaths: # beware of magic below: # links maps a tuple of axes (usually just one axis, like # (ARG1/NEQ,)) to a list of subpaths. # This gets the product of subpaths for all axes, then remaps # axis tuples to the appropriate subpaths. E.g. if subpaths is # {(':ARG1/NEQ>',): [def], # (':ARG2/NEQ>',':ARG3/EQ>'): [ghi, jkl]} # then alts is # [{(':ARG1/NEQ>',): def, ('ARG2/NEQ>', ':ARG3/EQ>'): ghi}, # {(':ARG1/NEQ>',): def, ('ARG2/NEQ>', ':ARG3/EQ>'): jkl}] alts = list(map( lambda z: dict(zip(subpaths.keys(), z)), product(*subpaths.values()) )) # now enumerate the tupled axes for alt in alts: ld = dict((a, tgt) for axes, tgt in alt.items() for a in axes) # don't output all null axes (already done above) if set(ld.values()) != {None}: n = XmrsPathNode(start, symbol, context=ctext, links=ld) n._overlapping_links = overlap yield n # READING PATHS ############################################################# tokenizer = re.compile( # two kinds of strings: "double quoted", and 'open-single-quoted r'(?P<string>"[^"\\]*(?:\\.[^"\\]*)*"|\'[^ \\]*(?:\\.[^ \\]*)*)' # axes should be like :X/Y>, <X/Y:, :X/Y:, :X/Y&A/B>, etc. r'|(?P<axis>[<:][^/]*/(?:[HN]?EQ|H)(?:&[^/]*/(?:[HN]?EQ|H))*[:>])' r'|(?P<symbol>[^\s#:><@=()\[\]&|]+)' # non-breaking characters r'|(?P<nodeid>#\d+)' # nodeids (e.g. #10003) r'|(?P<punc>[@=()\[\]&|])' # meaningful punctuation ) def read_path(path_string): toks = deque((mo.lastgroup, mo.group()) for mo in tokenizer.finditer(path_string)) try: node = _read_node(toks) except IndexError: raise XmrsPathError('Unexpected termination for path: {}' .format(path_string)) if node is None: raise XmrsPathError('Error reading path: {}' .format(path_string)) elif toks: raise XmrsPathError('Unconsumed tokens: {}' .format(', '.join(tok[1] for tok in toks))) #path = XmrsPath.from_node(startnode) #return path return node def _read_node(tokens): if not tokens or tokens[0][0] not in {'string', 'symbol', 'nodeid'}: return None # A node can be a pred, a nodeid, or both (in that order). This # means two 'if's, not 'if-else'. mtype, mtext = tokens.popleft() pred = nodeid = None if mtype in ('string', 'symbol'): if mtext == TOP or mtext == STAR: pred = mtext else: pred = Pred.stringpred(mtext) if tokens and tokens[0][0] == 'nodeid': mtype, mtext = tokens.popleft() if mtype == 'nodeid': nodeid = int(mtext[1:]) # get rid of the initial # character context = _read_context(tokens) links = _read_links(tokens) return XmrsPathNode( nodeid, pred, context=context, links=links ) def _read_context(tokens): if not tokens or tokens[0] != ('punc', '['): return None _, _ = tokens.popleft() # this is the ('punc', '[') # context can be a varsort, an @attribute, or an axis context = {} for token in _read_conjunction(tokens): mtype, mtext = token if mtype == 'symbol': context['varsort'] = mtext elif token == ('punc', '@'): _, attr = tokens.popleft() assert tokens.popleft() == ('punc', '=') _, val = tokens.popleft() context['@{}'.format(attr)] = val elif mtype == 'axis': tgt = _read_node(tokens) start, end = mtext[0], mtext[-1] axes = mtext[1:-1].split('&') for ax in axes: ax = '%s%s%s' % (start, ax.strip(), end) context[ax] = tgt else: raise XmrsPathError( 'Invalid conjunct in context: {}'.format(mtext) ) assert tokens.popleft() == ('punc', ']') return context def _read_links(tokens): if not tokens or (tokens[0][0] != 'axis' and tokens[0][1] != '('): return None mtype, mtext = tokens.popleft() # it could be a single :axis if mtype == 'axis': return {mtext: _read_node(tokens)} # or (:many :axes) assert mtext == '(' links = {} for token in _read_conjunction(tokens): mtype, mtext = token if mtype == 'axis': tgt = _read_node(tokens) start, end = mtext[0], mtext[-1] axes = mtext[1:-1].split('&') for ax in axes: ax = '%s%s%s' % (start, ax.strip(), end) links[ax] = tgt else: raise XmrsPathError('Invalid conjunct in axes: {}'.format(mtext)) assert tokens.popleft() == ('punc', ')') return links def _read_conjunction(tokens): yield tokens.popleft() while tokens[0] == ('punc', '&'): tokens.popleft() # the & character yield tokens.popleft() # # SEARCHING PATHS ########################################################### def find_node(base, node=None, nodeid=None, pred=None, context=None): matches = [] if node is None: node = XmrsPathNode(nodeid, pred, context=context) if _nodes_unifiable(base, node): matches.append(([], base)) # there's no cycle detection below because paths are (supposedly) trees agenda = [([a], sp) for a, sp in base.links.items() if sp is not None] while agenda: axes, base = agenda.pop() if _nodes_unifiable(base, node): matches.append((axes, base)) agenda.extend( (axes+[a], sp) for a, sp in base.links.items() if sp is not None ) return matches def _nodes_unifiable(n1, n2): if n1 is None or n2 is None: return True # nodeids same or one/both is None if not (n1.nodeid is None or n2.nodeid is None or n1.nodeid == n2.nodeid): return False # preds same or one/both is None or STAR if not (n1.pred in (None, STAR) or n2.pred in (None, STAR) or n1.pred == n2.pred): return False # context can be properties or subpaths for k, v2 in n2.context.items(): if k[0] in (':', '<'): # subpaths must be recursively unifiable if not _nodes_unifiable(n1.context.get(k), v2): return False else: # properties just need to be equal v1 = n1.context.get(k) if not (v1 is None or v2 is None or v1 == v2): return False # links are just like context subpaths if not all(_nodes_unifiable(n1.links.get(axis), sp2) for axis, sp2 in n2.links.items()): return False return True def match(pattern, p, flags=DEFAULT): if (flags & NODEID) and (pattern.nodeid != p.nodeid): return False if (flags & PRED): p1 = pattern.pred p2 = p.pred if not (p1 == STAR or p2 == STAR or p1 == p2): return False if (flags & CONTEXT): c1 = pattern.context c2 = p.context check_sp = flags & SUBPATHS check_vs = flags & VARSORT check_vp = flags & VARPROPS for k, a in c1.items(): if k[0] in (':', '<') and check_sp: b = c2.get(k) if not (a is None or b is None or match(a, b)): return False elif (k == 'varsort' and check_vs) or check_vp: if c2.get(k, a) != a: return False if (flags & SUBPATHS): for axis, pattern_ in pattern.links.items(): p_ = p.links.get(axis) if not (pattern_ is None or p_ is None or match(pattern_, p_)): return False return True def subpaths(p): all_sps = [] sps = list(_subpaths(p)) sps = sps[1:] # the first subpath is the same as the original return sps def _subpaths(p): if p is None: return sps = {ax: list(_subpaths(tgt)) + [None] for ax, tgt in p.links.items()} # this fancy bit is the same as in _explore() alts = list(map( lambda z: dict(zip(sps.keys(), z)), product(*sps.values()) )) for alt in alts: ld = dict((axis, tgt) for axis, tgt in alt.items()) n = XmrsPathNode(p.nodeid, p.pred, context=p.context, links=ld) n._overlapping_links = p._overlapping_links yield n # BUILDING XMRS######################################################### def reify_xmrs(path): #from delphin.mrs import simpledmrs # if hasattr(path, 'start'): # path = path.start if path.pred == TOP: assert len(path.links) == 1 axis, path = list(path.links.items())[0] else: axis = ':/H>' # just pretend there was a TOP:/H> if path is None: return for upath, _, _ in _unique_paths(path, defaultdict(set), 10000): m = _reify_xmrs(upath, top_axis=axis) if m.is_well_formed(): yield m #print(simpledmrs.dumps_one(m, pretty_print=True)) def _unique_paths(path, nidmap, nextnid): if path is None: yield (path, nidmap, nextnid) return # first get possibilities for the current node node_repr = format(path, depth=0, flags=PRED|CONTEXT) # if already has nodeid, use it; otherwise create or use from nidmap if path.nodeid is None: nids = [nextnid] # only consider existing nids if they aren't quantifiers because # we expect to see many quantifiers but they are all unique if not path.pred.is_quantifier(): nids += list(nidmap.get(node_repr, [])) nextnid += 1 else: nids = [path.nodeid] alts = [] for nid in nids: alts.append(( _new_node(path, nid), _new_nidmap(nidmap, node_repr, nid), nextnid )) # then for each alternative, find possible descendants agenda = list(path.links.items()) while agenda: _alts = [] axis, tgt = agenda.pop() for node, nm, nn in alts: for subpath, _nm, _nn in _unique_paths(tgt, nm, nn): n = copy(node) n.links[axis] = subpath _alts.append((n, _nm, _nn)) alts = _alts for alt in alts: yield alt def _new_node(node, nid=None): new_node = copy(node, depth=0) if nid is not None: new_node.nodeid = nid return new_node def _new_nidmap(nidmap, node_repr, nid): nm = defaultdict(set, {k: v.copy() for k, v in nidmap.items()}) nm[node_repr].add(nid) return nm def _reify_xmrs(path, top_axis=None): nodes = {} links = [] agenda = [(0, top_axis or ':/H>', path)] while agenda: srcnid, axis, tgt = agenda.pop() if tgt is None: continue # add link to tgt rargname, post = axis.strip(':<>').split('/') if axis.startswith('<'): links.append(Link(tgt.nodeid, srcnid, rargname or None, post)) elif axis.endswith('>'): links.append(Link(srcnid, tgt.nodeid, rargname or None, post)) elif axis == ':/EQ:': links.append(Link(srcnid, tgt.nodeid, None, 'EQ')) else: raise XmrsPathError('Invalid axis: {}'.format(axis)) # add node if necessary (note, currently does not update pred # or sortinfo if encountered twice) if tgt.nodeid not in nodes: sortinfo = dict( [('cvarsort', tgt.context.get('varsort') or 'u')] + [(k.lstrip('@'), v) for k, v in tgt.context.items() if k.startswith('@')] ) nodes[tgt.nodeid] = Node(tgt.nodeid, tgt.pred, sortinfo=sortinfo) # add new agenda for tgt for axis, next_tgt in tgt.links.items(): agenda.append((tgt.nodeid, axis, next_tgt)) return Dmrs(list(nodes.values()), links)
[ "delphin.mrs.Node", "delphin.mrs.Pred.stringpred", "collections.defaultdict", "warnings.warn", "delphin.mrs.Link", "re.compile" ]
[((19406, 19647), 're.compile', 're.compile', (['"""(?P<string>"[^"\\\\\\\\]*(?:\\\\\\\\.[^"\\\\\\\\]*)*"|\\\\\'[^ \\\\\\\\]*(?:\\\\\\\\.[^ \\\\\\\\]*)*)|(?P<axis>[<:][^/]*/(?:[HN]?EQ|H)(?:&[^/]*/(?:[HN]?EQ|H))*[:>])|(?P<symbol>[^\\\\s#:><@=()\\\\[\\\\]&|]+)|(?P<nodeid>#\\\\d+)|(?P<punc>[@=()\\\\[\\\\]&|])"""'], {}), '(\n \'(?P<string>"[^"\\\\\\\\]*(?:\\\\\\\\.[^"\\\\\\\\]*)*"|\\\\\\\'[^ \\\\\\\\]*(?:\\\\\\\\.[^ \\\\\\\\]*)*)|(?P<axis>[<:][^/]*/(?:[HN]?EQ|H)(?:&[^/]*/(?:[HN]?EQ|H))*[:>])|(?P<symbol>[^\\\\s#:><@=()\\\\[\\\\]&|]+)|(?P<nodeid>#\\\\d+)|(?P<punc>[@=()\\\\[\\\\]&|])\'\n )\n', (19416, 19647), False, 'import re\n'), ((2731, 2748), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (2742, 2748), False, 'from collections import deque, defaultdict\n'), ((14426, 14504), 'warnings.warn', 'warnings.warn', (['"""find_paths() is deprecated; use explore()"""', 'DeprecationWarning'], {}), "('find_paths() is deprecated; use explore()', DeprecationWarning)\n", (14439, 14504), False, 'import warnings\n'), ((27210, 27226), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (27221, 27226), False, 'from collections import deque, defaultdict\n'), ((14859, 14875), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (14870, 14875), False, 'from collections import deque, defaultdict\n'), ((20893, 20915), 'delphin.mrs.Pred.stringpred', 'Pred.stringpred', (['mtext'], {}), '(mtext)\n', (20908, 20915), False, 'from delphin.mrs import Node, Link, Pred, Dmrs\n'), ((30038, 30083), 'delphin.mrs.Node', 'Node', (['tgt.nodeid', 'tgt.pred'], {'sortinfo': 'sortinfo'}), '(tgt.nodeid, tgt.pred, sortinfo=sortinfo)\n', (30042, 30083), False, 'from delphin.mrs import Node, Link, Pred, Dmrs\n'), ((29308, 29356), 'delphin.mrs.Link', 'Link', (['tgt.nodeid', 'srcnid', '(rargname or None)', 'post'], {}), '(tgt.nodeid, srcnid, rargname or None, post)\n', (29312, 29356), False, 'from delphin.mrs import Node, Link, Pred, Dmrs\n'), ((29416, 29464), 'delphin.mrs.Link', 'Link', (['srcnid', 'tgt.nodeid', '(rargname or None)', 'post'], {}), '(srcnid, tgt.nodeid, rargname or None, post)\n', (29420, 29464), False, 'from delphin.mrs import Node, Link, Pred, Dmrs\n'), ((29521, 29557), 'delphin.mrs.Link', 'Link', (['srcnid', 'tgt.nodeid', 'None', '"""EQ"""'], {}), "(srcnid, tgt.nodeid, None, 'EQ')\n", (29525, 29557), False, 'from delphin.mrs import Node, Link, Pred, Dmrs\n')]
# for building the exe: # python setup.py py2exe --includes sip from distutils.core import setup from py2exe.build_exe import py2exe from glob import glob import py2exe import sys sys.path.append("C:\\Program Files (x86)\\Microsoft Visual Studio 9.0\\VC\\redist\\x86\\Microsoft.VC90.CRT") data_files = [("Microsoft.VC90.CRT", glob(r'C:\Program Files (x86)\Microsoft Visual Studio 9.0\VC\redist\x86\Microsoft.VC90.CRT\*.*'))] setup( data_files=data_files, console=[{"script": "nirviz.py"}] )
[ "sys.path.append", "glob.glob", "distutils.core.setup" ]
[((191, 309), 'sys.path.append', 'sys.path.append', (['"""C:\\\\Program Files (x86)\\\\Microsoft Visual Studio 9.0\\\\VC\\\\redist\\\\x86\\\\Microsoft.VC90.CRT"""'], {}), "(\n 'C:\\\\Program Files (x86)\\\\Microsoft Visual Studio 9.0\\\\VC\\\\redist\\\\x86\\\\Microsoft.VC90.CRT'\n )\n", (206, 309), False, 'import sys\n'), ((438, 501), 'distutils.core.setup', 'setup', ([], {'data_files': 'data_files', 'console': "[{'script': 'nirviz.py'}]"}), "(data_files=data_files, console=[{'script': 'nirviz.py'}])\n", (443, 501), False, 'from distutils.core import setup\n'), ((338, 450), 'glob.glob', 'glob', (['"""C:\\\\Program Files (x86)\\\\Microsoft Visual Studio 9.0\\\\VC\\\\redist\\\\x86\\\\Microsoft.VC90.CRT\\\\*.*"""'], {}), "(\n 'C:\\\\Program Files (x86)\\\\Microsoft Visual Studio 9.0\\\\VC\\\\redist\\\\x86\\\\Microsoft.VC90.CRT\\\\*.*'\n )\n", (342, 450), False, 'from glob import glob\n')]
import os import tempfile import unittest import logging from pyidf import ValidationLevel import pyidf from pyidf.idf import IDF from pyidf.natural_ventilation_and_duct_leakage import AirflowNetworkDistributionComponentDuct log = logging.getLogger(__name__) class TestAirflowNetworkDistributionComponentDuct(unittest.TestCase): def setUp(self): self.fd, self.path = tempfile.mkstemp() def tearDown(self): os.remove(self.path) def test_create_airflownetworkdistributioncomponentduct(self): pyidf.validation_level = ValidationLevel.error obj = AirflowNetworkDistributionComponentDuct() # alpha var_name = "Name" obj.name = var_name # real var_duct_length = 0.0001 obj.duct_length = var_duct_length # real var_hydraulic_diameter = 0.0001 obj.hydraulic_diameter = var_hydraulic_diameter # real var_cross_section_area = 0.0001 obj.cross_section_area = var_cross_section_area # real var_surface_roughness = 0.0001 obj.surface_roughness = var_surface_roughness # real var_coefficient_for_local_dynamic_loss_due_to_fitting = 0.0 obj.coefficient_for_local_dynamic_loss_due_to_fitting = var_coefficient_for_local_dynamic_loss_due_to_fitting # real var_overall_heat_transmittance_coefficient_ufactor_from_air_to_air = 0.0001 obj.overall_heat_transmittance_coefficient_ufactor_from_air_to_air = var_overall_heat_transmittance_coefficient_ufactor_from_air_to_air # real var_overall_moisture_transmittance_coefficient_from_air_to_air = 0.0001 obj.overall_moisture_transmittance_coefficient_from_air_to_air = var_overall_moisture_transmittance_coefficient_from_air_to_air idf = IDF() idf.add(obj) idf.save(self.path, check=False) with open(self.path, mode='r') as f: for line in f: log.debug(line.strip()) idf2 = IDF(self.path) self.assertEqual(idf2.airflownetworkdistributioncomponentducts[0].name, var_name) self.assertAlmostEqual(idf2.airflownetworkdistributioncomponentducts[0].duct_length, var_duct_length) self.assertAlmostEqual(idf2.airflownetworkdistributioncomponentducts[0].hydraulic_diameter, var_hydraulic_diameter) self.assertAlmostEqual(idf2.airflownetworkdistributioncomponentducts[0].cross_section_area, var_cross_section_area) self.assertAlmostEqual(idf2.airflownetworkdistributioncomponentducts[0].surface_roughness, var_surface_roughness) self.assertAlmostEqual(idf2.airflownetworkdistributioncomponentducts[0].coefficient_for_local_dynamic_loss_due_to_fitting, var_coefficient_for_local_dynamic_loss_due_to_fitting) self.assertAlmostEqual(idf2.airflownetworkdistributioncomponentducts[0].overall_heat_transmittance_coefficient_ufactor_from_air_to_air, var_overall_heat_transmittance_coefficient_ufactor_from_air_to_air) self.assertAlmostEqual(idf2.airflownetworkdistributioncomponentducts[0].overall_moisture_transmittance_coefficient_from_air_to_air, var_overall_moisture_transmittance_coefficient_from_air_to_air)
[ "os.remove", "tempfile.mkstemp", "pyidf.idf.IDF", "pyidf.natural_ventilation_and_duct_leakage.AirflowNetworkDistributionComponentDuct", "logging.getLogger" ]
[((232, 259), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (249, 259), False, 'import logging\n'), ((382, 400), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {}), '()\n', (398, 400), False, 'import tempfile\n'), ((434, 454), 'os.remove', 'os.remove', (['self.path'], {}), '(self.path)\n', (443, 454), False, 'import os\n'), ((594, 635), 'pyidf.natural_ventilation_and_duct_leakage.AirflowNetworkDistributionComponentDuct', 'AirflowNetworkDistributionComponentDuct', ([], {}), '()\n', (633, 635), False, 'from pyidf.natural_ventilation_and_duct_leakage import AirflowNetworkDistributionComponentDuct\n'), ((1816, 1821), 'pyidf.idf.IDF', 'IDF', ([], {}), '()\n', (1819, 1821), False, 'from pyidf.idf import IDF\n'), ((2013, 2027), 'pyidf.idf.IDF', 'IDF', (['self.path'], {}), '(self.path)\n', (2016, 2027), False, 'from pyidf.idf import IDF\n')]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('syntacticframes', '0006_auto_20141103_0939'), ] operations = [ migrations.AlterModelOptions( name='verbnetclass', options={'ordering': ['levin_class', 'name']}, ), migrations.AlterModelOptions( name='verbnetframeset', options={'ordering': ['tree_id']}, ), migrations.AlterField( model_name='verbnetframeset', name='tree_id', field=models.PositiveSmallIntegerField(), preserve_default=True, ), ]
[ "django.db.migrations.AlterModelOptions", "django.db.models.PositiveSmallIntegerField" ]
[((259, 360), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""verbnetclass"""', 'options': "{'ordering': ['levin_class', 'name']}"}), "(name='verbnetclass', options={'ordering': [\n 'levin_class', 'name']})\n", (287, 360), False, 'from django.db import models, migrations\n'), ((400, 492), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""verbnetframeset"""', 'options': "{'ordering': ['tree_id']}"}), "(name='verbnetframeset', options={'ordering': [\n 'tree_id']})\n", (428, 492), False, 'from django.db import models, migrations\n'), ((643, 677), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {}), '()\n', (675, 677), False, 'from django.db import models, migrations\n')]
import nose.tools as nt import numpy as np import theano import theano.tensor as T import treeano import treeano.nodes as tn from treeano.sandbox.nodes import wta_sparisty as wta fX = theano.config.floatX def test_wta_spatial_sparsity_node_serialization(): tn.check_serialization(wta.WTASpatialSparsityNode("a")) def test_wta_sparsity_node_serialization(): tn.check_serialization(wta.WTASparsityNode("a")) def test_wta_spatial_sparsity_node(): network = tn.SequentialNode( "s", [tn.InputNode("i", shape=(2, 2, 2, 2)), wta.WTASpatialSparsityNode("a")] ).network() fn = network.function(["i"], ["s"]) x = np.arange(16).reshape(2, 2, 2, 2).astype(fX) ans = x.copy() ans[..., 0] = 0 ans[..., 0, :] = 0 np.testing.assert_allclose(fn(x)[0], ans) def test_wta_sparsity_node(): network = tn.SequentialNode( "s", [tn.InputNode("i", shape=(2, 2, 2, 2)), wta.WTASparsityNode("a", percentile=0.5)] ).network() fn = network.function(["i"], ["s"]) x = np.arange(16).reshape(2, 2, 2, 2).astype(fX) ans = x.copy() ans[..., 0] = 0 ans[..., 0, :] = 0 ans[0] = 0 res = fn(x)[0] np.testing.assert_allclose(res, ans)
[ "treeano.sandbox.nodes.wta_sparisty.WTASparsityNode", "treeano.sandbox.nodes.wta_sparisty.WTASpatialSparsityNode", "treeano.nodes.InputNode", "numpy.arange", "numpy.testing.assert_allclose" ]
[((1234, 1270), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['res', 'ans'], {}), '(res, ans)\n', (1260, 1270), True, 'import numpy as np\n'), ((290, 321), 'treeano.sandbox.nodes.wta_sparisty.WTASpatialSparsityNode', 'wta.WTASpatialSparsityNode', (['"""a"""'], {}), "('a')\n", (316, 321), True, 'from treeano.sandbox.nodes import wta_sparisty as wta\n'), ((396, 420), 'treeano.sandbox.nodes.wta_sparisty.WTASparsityNode', 'wta.WTASparsityNode', (['"""a"""'], {}), "('a')\n", (415, 420), True, 'from treeano.sandbox.nodes import wta_sparisty as wta\n'), ((517, 554), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""i"""'], {'shape': '(2, 2, 2, 2)'}), "('i', shape=(2, 2, 2, 2))\n", (529, 554), True, 'import treeano.nodes as tn\n'), ((565, 596), 'treeano.sandbox.nodes.wta_sparisty.WTASpatialSparsityNode', 'wta.WTASpatialSparsityNode', (['"""a"""'], {}), "('a')\n", (591, 596), True, 'from treeano.sandbox.nodes import wta_sparisty as wta\n'), ((663, 676), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (672, 676), True, 'import numpy as np\n'), ((934, 971), 'treeano.nodes.InputNode', 'tn.InputNode', (['"""i"""'], {'shape': '(2, 2, 2, 2)'}), "('i', shape=(2, 2, 2, 2))\n", (946, 971), True, 'import treeano.nodes as tn\n'), ((982, 1022), 'treeano.sandbox.nodes.wta_sparisty.WTASparsityNode', 'wta.WTASparsityNode', (['"""a"""'], {'percentile': '(0.5)'}), "('a', percentile=0.5)\n", (1001, 1022), True, 'from treeano.sandbox.nodes import wta_sparisty as wta\n'), ((1089, 1102), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (1098, 1102), True, 'import numpy as np\n')]
# Copyright (c) 2021-2022, <NAME>, Jonxslays # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from pathlib import Path import pytest import len8 from len8.errors import BadLines, InvalidPath TEST_FILE = Path(__file__).parent / "testdata.py" TEST_NON_VALID = TEST_FILE.parent / "nsx_simple_app.nsx" TEST_TOML_CONFIG = TEST_FILE.parent / "test.toml" TEST_LOL_TOML_CONFIG = TEST_FILE.parent / "test_lol.toml" @pytest.fixture() # type: ignore def default_checker() -> len8.Checker: return len8.Checker() @pytest.fixture() # type: ignore def extended_checker() -> len8.Checker: return len8.Checker( exclude=["custom", Path("another")], extend=2, strict=True ) @pytest.fixture() # type: ignore def custom_checker() -> len8.Checker: return len8.Checker(max_code_length=100, max_docs_length=80) @pytest.fixture() # type: ignore def valid_config() -> len8.Config: return len8.Config(TEST_TOML_CONFIG) def test_default_init(default_checker: len8.Checker) -> None: assert isinstance(default_checker, len8.Checker) assert default_checker.exclude == [ Path(".nox"), Path(".venv"), Path("venv"), ] assert default_checker.extend == 0 assert default_checker.bad_lines is None assert default_checker.strict is False assert default_checker.code_length == 79 assert default_checker.docs_length == 72 def test_extended_init(extended_checker: len8.Checker) -> None: assert isinstance(extended_checker, len8.Checker) assert extended_checker.exclude == [ Path(".nox"), Path(".venv"), Path("venv"), Path("custom"), Path("another"), ] assert extended_checker.extend == 2 assert extended_checker.bad_lines is None assert extended_checker.strict is True assert extended_checker.code_length == 99 assert extended_checker.docs_length == 72 def test_custom_init(custom_checker: len8.Checker) -> None: assert isinstance(custom_checker, len8.Checker) assert custom_checker.exclude == [ Path(".nox"), Path(".venv"), Path("venv"), ] assert custom_checker.extend == 0 assert custom_checker.bad_lines is None assert custom_checker.strict is False assert custom_checker.code_length == 100 assert custom_checker.docs_length == 80 def test_bad_inits(default_checker: len8.Checker) -> None: with pytest.raises(ValueError) as exc: len8.Checker(extend=5) assert f"{exc.value}" == "'extend' should be between 0 and 2 inclusive" with pytest.raises(ValueError) as exc: len8.Checker(max_code_length=-1) assert f"{exc.value}" == "line lengths cannot be less than 0" with pytest.raises(ValueError) as exc: len8.Checker(max_docs_length=-1) assert f"{exc.value}" == "line lengths cannot be less than 0" with pytest.raises(ValueError) as exc: default_checker.extend = 5 assert f"{exc.value}" == "'extend' should be between 0 and 2 inclusive" def test_setting_lengths(default_checker: len8.Checker) -> None: default_checker.set_lengths(code=100, docs=80) assert default_checker.code_length == 100 assert default_checker.docs_length == 80 default_checker.set_lengths(docs=50) assert default_checker.code_length == 100 assert default_checker.docs_length == 50 default_checker.set_lengths(code=None) assert default_checker.code_length == 79 assert default_checker.docs_length == 50 def test_non_strict_output(default_checker: len8.Checker) -> None: output = ( f"\33[1m{TEST_FILE}\33[0m\n" " * Line 4 (76/72)\n" " * Line 5 (83/79)\n" " * Line 11 (78/72)\n\n" f"\33[1m\33[31mFound 3 problems\33[0m" ) assert default_checker.check(TEST_FILE) == output def test_non_strict_output_extended(default_checker: len8.Checker) -> None: default_checker.extend = 2 output = ( f"\33[1m{TEST_FILE}\33[0m\n" " * Line 4 (76/72)\n" " * Line 11 (78/72)\n\n" f"\33[1m\33[31mFound 2 problems\33[0m" ) assert default_checker.check(TEST_FILE) == output assert default_checker.check(TEST_FILE.parent) == output def test_strict_output(default_checker: len8.Checker) -> None: default_checker.strict = True output = ( f"\33[1m{TEST_FILE}\33[0m\n" " * Line 4 (76/72)\n" " * Line 5 (83/79)\n" " * Line 11 (78/72)\n\n" f"\33[1m\33[31mFound 3 problems\33[0m" ) with pytest.raises(BadLines) as exc: assert default_checker.check(TEST_FILE) == output assert f"{exc.value}" == output def test_update_excludes(default_checker: len8.Checker) -> None: default_checker.exclude = [Path("custom"), Path("another")] assert default_checker.exclude == [ Path(".nox"), Path(".venv"), Path("venv"), Path("custom"), Path("another"), ] def test_file_validation(default_checker: len8.Checker) -> None: assert default_checker._is_valid(TEST_FILE) assert not default_checker._is_valid(Path("README.md")) default_checker.exclude = [Path(__file__).parent] assert default_checker._is_valid(Path("len8").absolute()) assert not default_checker._is_valid(Path("tests").absolute()) default_checker.exclude = [Path("testdata.py")] assert default_checker._is_valid(Path("checker.py")) assert not default_checker._is_valid(Path("testdata.py")) def test_pathlib_conversion_on_check(default_checker: len8.Checker) -> None: output = ( f"\33[1m{TEST_FILE}\33[0m\n" " * Line 4 (76/72)\n" " * Line 5 (83/79)\n" " * Line 11 (78/72)\n\n" f"\33[1m\33[31mFound 3 problems\33[0m" ) assert default_checker.check(f"{TEST_FILE}") == output default_checker.strict = True with pytest.raises(InvalidPath) as exc: assert default_checker.check(f"invalid_dir") == output assert f"{exc.value}" == f"Error: 'invalid_dir' is not a valid path." def test_skip_invalid_files(default_checker: len8.Checker) -> None: try: default_checker.check(TEST_NON_VALID) except UnicodeDecodeError: pytest.fail() def test__check_dir(default_checker: len8.Checker) -> None: default_checker._check(Path("tests")) def test_valid_config_init(valid_config: len8.Config) -> None: assert isinstance(valid_config, len8.Config) assert valid_config.include == ["tests/testdata.py"] assert valid_config.exclude == ["tests/exclude.py"] assert valid_config.code_length == 88 assert valid_config.docs_length == 69 assert valid_config.strict is True assert valid_config.is_configured def test_invalid_config_init() -> None: with pytest.raises(len8.ConfigurationError) as e: _ = len8.Config(TEST_NON_VALID) assert str(e.value) == ( f"'{TEST_NON_VALID}' is not a valid configuration file." ) def test_config_missing_len8() -> None: config = len8.Config(TEST_LOL_TOML_CONFIG) assert not config.is_configured assert config.strict is False assert config.docs_length is None assert config.code_length is None assert config.include is None assert config.exclude is None def test_config_bad_toml_syntax() -> None: p = "./tests/invalid.toml" with open(p, "w") as f: f.write("[tool.invalid_toml_syntax\n") with pytest.raises(len8.ConfigurationError) as e: _ = len8.Config(p) assert "Failed to parse configuration file" in str(e.value) Path(p).unlink() def test_checker_from_invalid_config() -> None: with pytest.raises(len8.ConfigurationError) as e: _ = len8.Checker.from_config(TEST_NON_VALID) assert str(e.value) == ( f"'{TEST_NON_VALID}' is not a valid configuration file." ) def test_checker_from_valid_config(valid_config: len8.Config) -> None: checker = len8.Checker.from_config(valid_config) assert checker.code_length == 88 assert checker.docs_length == 69 assert checker.extend == 0 assert checker.strict is True assert checker.exclude == [ Path(".nox"), Path(".venv"), Path("venv"), Path("tests/exclude.py"), ]
[ "len8.Checker.from_config", "len8.Checker", "pytest.fixture", "pytest.fail", "pathlib.Path", "pytest.raises", "len8.Config" ]
[((1876, 1892), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1890, 1892), False, 'import pytest\n'), ((1977, 1993), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1991, 1993), False, 'import pytest\n'), ((2151, 2167), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (2165, 2167), False, 'import pytest\n'), ((2290, 2306), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (2304, 2306), False, 'import pytest\n'), ((1959, 1973), 'len8.Checker', 'len8.Checker', ([], {}), '()\n', (1971, 1973), False, 'import len8\n'), ((2233, 2286), 'len8.Checker', 'len8.Checker', ([], {'max_code_length': '(100)', 'max_docs_length': '(80)'}), '(max_code_length=100, max_docs_length=80)\n', (2245, 2286), False, 'import len8\n'), ((2369, 2398), 'len8.Config', 'len8.Config', (['TEST_TOML_CONFIG'], {}), '(TEST_TOML_CONFIG)\n', (2380, 2398), False, 'import len8\n'), ((8427, 8460), 'len8.Config', 'len8.Config', (['TEST_LOL_TOML_CONFIG'], {}), '(TEST_LOL_TOML_CONFIG)\n', (8438, 8460), False, 'import len8\n'), ((9344, 9382), 'len8.Checker.from_config', 'len8.Checker.from_config', (['valid_config'], {}), '(valid_config)\n', (9368, 9382), False, 'import len8\n'), ((1670, 1684), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1674, 1684), False, 'from pathlib import Path\n'), ((3859, 3884), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3872, 3884), False, 'import pytest\n'), ((3901, 3923), 'len8.Checker', 'len8.Checker', ([], {'extend': '(5)'}), '(extend=5)\n', (3913, 3923), False, 'import len8\n'), ((4010, 4035), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4023, 4035), False, 'import pytest\n'), ((4052, 4084), 'len8.Checker', 'len8.Checker', ([], {'max_code_length': '(-1)'}), '(max_code_length=-1)\n', (4064, 4084), False, 'import len8\n'), ((4161, 4186), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4174, 4186), False, 'import pytest\n'), ((4203, 4235), 'len8.Checker', 'len8.Checker', ([], {'max_docs_length': '(-1)'}), '(max_docs_length=-1)\n', (4215, 4235), False, 'import len8\n'), ((4312, 4337), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4325, 4337), False, 'import pytest\n'), ((5960, 5983), 'pytest.raises', 'pytest.raises', (['BadLines'], {}), '(BadLines)\n', (5973, 5983), False, 'import pytest\n'), ((6184, 6198), 'pathlib.Path', 'Path', (['"""custom"""'], {}), "('custom')\n", (6188, 6198), False, 'from pathlib import Path\n'), ((6200, 6215), 'pathlib.Path', 'Path', (['"""another"""'], {}), "('another')\n", (6204, 6215), False, 'from pathlib import Path\n'), ((6770, 6789), 'pathlib.Path', 'Path', (['"""testdata.py"""'], {}), "('testdata.py')\n", (6774, 6789), False, 'from pathlib import Path\n'), ((6828, 6846), 'pathlib.Path', 'Path', (['"""checker.py"""'], {}), "('checker.py')\n", (6832, 6846), False, 'from pathlib import Path\n'), ((7293, 7319), 'pytest.raises', 'pytest.raises', (['InvalidPath'], {}), '(InvalidPath)\n', (7306, 7319), False, 'import pytest\n'), ((7732, 7745), 'pathlib.Path', 'Path', (['"""tests"""'], {}), "('tests')\n", (7736, 7745), False, 'from pathlib import Path\n'), ((8186, 8224), 'pytest.raises', 'pytest.raises', (['len8.ConfigurationError'], {}), '(len8.ConfigurationError)\n', (8199, 8224), False, 'import pytest\n'), ((8243, 8270), 'len8.Config', 'len8.Config', (['TEST_NON_VALID'], {}), '(TEST_NON_VALID)\n', (8254, 8270), False, 'import len8\n'), ((8837, 8875), 'pytest.raises', 'pytest.raises', (['len8.ConfigurationError'], {}), '(len8.ConfigurationError)\n', (8850, 8875), False, 'import pytest\n'), ((8894, 8908), 'len8.Config', 'len8.Config', (['p'], {}), '(p)\n', (8905, 8908), False, 'import len8\n'), ((9058, 9096), 'pytest.raises', 'pytest.raises', (['len8.ConfigurationError'], {}), '(len8.ConfigurationError)\n', (9071, 9096), False, 'import pytest\n'), ((9115, 9155), 'len8.Checker.from_config', 'len8.Checker.from_config', (['TEST_NON_VALID'], {}), '(TEST_NON_VALID)\n', (9139, 9155), False, 'import len8\n'), ((2564, 2576), 'pathlib.Path', 'Path', (['""".nox"""'], {}), "('.nox')\n", (2568, 2576), False, 'from pathlib import Path\n'), ((2586, 2599), 'pathlib.Path', 'Path', (['""".venv"""'], {}), "('.venv')\n", (2590, 2599), False, 'from pathlib import Path\n'), ((2609, 2621), 'pathlib.Path', 'Path', (['"""venv"""'], {}), "('venv')\n", (2613, 2621), False, 'from pathlib import Path\n'), ((3015, 3027), 'pathlib.Path', 'Path', (['""".nox"""'], {}), "('.nox')\n", (3019, 3027), False, 'from pathlib import Path\n'), ((3037, 3050), 'pathlib.Path', 'Path', (['""".venv"""'], {}), "('.venv')\n", (3041, 3050), False, 'from pathlib import Path\n'), ((3060, 3072), 'pathlib.Path', 'Path', (['"""venv"""'], {}), "('venv')\n", (3064, 3072), False, 'from pathlib import Path\n'), ((3082, 3096), 'pathlib.Path', 'Path', (['"""custom"""'], {}), "('custom')\n", (3086, 3096), False, 'from pathlib import Path\n'), ((3106, 3121), 'pathlib.Path', 'Path', (['"""another"""'], {}), "('another')\n", (3110, 3121), False, 'from pathlib import Path\n'), ((3511, 3523), 'pathlib.Path', 'Path', (['""".nox"""'], {}), "('.nox')\n", (3515, 3523), False, 'from pathlib import Path\n'), ((3533, 3546), 'pathlib.Path', 'Path', (['""".venv"""'], {}), "('.venv')\n", (3537, 3546), False, 'from pathlib import Path\n'), ((3556, 3568), 'pathlib.Path', 'Path', (['"""venv"""'], {}), "('venv')\n", (3560, 3568), False, 'from pathlib import Path\n'), ((6265, 6277), 'pathlib.Path', 'Path', (['""".nox"""'], {}), "('.nox')\n", (6269, 6277), False, 'from pathlib import Path\n'), ((6287, 6300), 'pathlib.Path', 'Path', (['""".venv"""'], {}), "('.venv')\n", (6291, 6300), False, 'from pathlib import Path\n'), ((6310, 6322), 'pathlib.Path', 'Path', (['"""venv"""'], {}), "('venv')\n", (6314, 6322), False, 'from pathlib import Path\n'), ((6332, 6346), 'pathlib.Path', 'Path', (['"""custom"""'], {}), "('custom')\n", (6336, 6346), False, 'from pathlib import Path\n'), ((6356, 6371), 'pathlib.Path', 'Path', (['"""another"""'], {}), "('another')\n", (6360, 6371), False, 'from pathlib import Path\n'), ((6535, 6552), 'pathlib.Path', 'Path', (['"""README.md"""'], {}), "('README.md')\n", (6539, 6552), False, 'from pathlib import Path\n'), ((6586, 6600), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (6590, 6600), False, 'from pathlib import Path\n'), ((6889, 6908), 'pathlib.Path', 'Path', (['"""testdata.py"""'], {}), "('testdata.py')\n", (6893, 6908), False, 'from pathlib import Path\n'), ((7629, 7642), 'pytest.fail', 'pytest.fail', ([], {}), '()\n', (7640, 7642), False, 'import pytest\n'), ((8982, 8989), 'pathlib.Path', 'Path', (['p'], {}), '(p)\n', (8986, 8989), False, 'from pathlib import Path\n'), ((9563, 9575), 'pathlib.Path', 'Path', (['""".nox"""'], {}), "('.nox')\n", (9567, 9575), False, 'from pathlib import Path\n'), ((9585, 9598), 'pathlib.Path', 'Path', (['""".venv"""'], {}), "('.venv')\n", (9589, 9598), False, 'from pathlib import Path\n'), ((9608, 9620), 'pathlib.Path', 'Path', (['"""venv"""'], {}), "('venv')\n", (9612, 9620), False, 'from pathlib import Path\n'), ((9630, 9654), 'pathlib.Path', 'Path', (['"""tests/exclude.py"""'], {}), "('tests/exclude.py')\n", (9634, 9654), False, 'from pathlib import Path\n'), ((2102, 2117), 'pathlib.Path', 'Path', (['"""another"""'], {}), "('another')\n", (2106, 2117), False, 'from pathlib import Path\n'), ((6646, 6658), 'pathlib.Path', 'Path', (['"""len8"""'], {}), "('len8')\n", (6650, 6658), False, 'from pathlib import Path\n'), ((6712, 6725), 'pathlib.Path', 'Path', (['"""tests"""'], {}), "('tests')\n", (6716, 6725), False, 'from pathlib import Path\n')]
""" this is a program contains Test Class for HW04 Written by <NAME> """ import unittest as ut import HW04_Qi_Zhao as HW04 class TestFraction(ut.TestCase): #Test cases class with extended test cases. def test_simplify(self): self.assertEqual(HW04.Fraction(9, 27).simplify(),HW04.Fraction(1,3)) self.assertEqual(HW04.Fraction(9, -27).simplify(),HW04.Fraction(-1,3)) self.assertEqual(HW04.Fraction(1, -4).simplify(),HW04.Fraction(-1,4)) self.assertFalse(HW04.Fraction(2, -4).simplify()==HW04.Fraction(-1,4)) class TestIteration(ut.TestCase): #Test cases for all the other functions def test_count_vowels(self): #test for the count vowels function self.assertEqual(HW04.count_vowels("Happy Day!"),2) self.assertEqual(HW04.count_vowels("HAppy Deust!"),3) self.assertEqual(HW04.count_vowels("H ppy D st!"),0) self.assertFalse(HW04.count_vowels("H ppy D s3et!")==0) def test_last_occurance(self): #test for the last occurance function for all sequence self.assertEqual(HW04.last_occurrence(42,[10,20,23,42,42]),4) self.assertEqual(HW04.last_occurrence('apple',['10',20,'23','42','apple']),4) self.assertEqual(HW04.last_occurrence('p','apple'),2) self.assertEqual(HW04.last_occurrence('p',[]),None) def test_my_enumerate(self): #test for the generator strA = "hello world!" strB = "hello " self.assertTrue(list(HW04.my_enumerate(strA))==list(enumerate(strA))) self.assertFalse(list(HW04.my_enumerate(strA))==list(enumerate(strB))) if __name__ == '__main__': ut.main(exit=False,verbosity=2)
[ "unittest.main", "HW04_Qi_Zhao.Fraction", "HW04_Qi_Zhao.count_vowels", "HW04_Qi_Zhao.last_occurrence", "HW04_Qi_Zhao.my_enumerate" ]
[((1651, 1683), 'unittest.main', 'ut.main', ([], {'exit': '(False)', 'verbosity': '(2)'}), '(exit=False, verbosity=2)\n', (1658, 1683), True, 'import unittest as ut\n'), ((296, 315), 'HW04_Qi_Zhao.Fraction', 'HW04.Fraction', (['(1)', '(3)'], {}), '(1, 3)\n', (309, 315), True, 'import HW04_Qi_Zhao as HW04\n'), ((374, 394), 'HW04_Qi_Zhao.Fraction', 'HW04.Fraction', (['(-1)', '(3)'], {}), '(-1, 3)\n', (387, 394), True, 'import HW04_Qi_Zhao as HW04\n'), ((452, 472), 'HW04_Qi_Zhao.Fraction', 'HW04.Fraction', (['(-1)', '(4)'], {}), '(-1, 4)\n', (465, 472), True, 'import HW04_Qi_Zhao as HW04\n'), ((734, 765), 'HW04_Qi_Zhao.count_vowels', 'HW04.count_vowels', (['"""Happy Day!"""'], {}), "('Happy Day!')\n", (751, 765), True, 'import HW04_Qi_Zhao as HW04\n'), ((794, 827), 'HW04_Qi_Zhao.count_vowels', 'HW04.count_vowels', (['"""HAppy Deust!"""'], {}), "('HAppy Deust!')\n", (811, 827), True, 'import HW04_Qi_Zhao as HW04\n'), ((856, 888), 'HW04_Qi_Zhao.count_vowels', 'HW04.count_vowels', (['"""H ppy D st!"""'], {}), "('H ppy D st!')\n", (873, 888), True, 'import HW04_Qi_Zhao as HW04\n'), ((1080, 1126), 'HW04_Qi_Zhao.last_occurrence', 'HW04.last_occurrence', (['(42)', '[10, 20, 23, 42, 42]'], {}), '(42, [10, 20, 23, 42, 42])\n', (1100, 1126), True, 'import HW04_Qi_Zhao as HW04\n'), ((1150, 1212), 'HW04_Qi_Zhao.last_occurrence', 'HW04.last_occurrence', (['"""apple"""', "['10', 20, '23', '42', 'apple']"], {}), "('apple', ['10', 20, '23', '42', 'apple'])\n", (1170, 1212), True, 'import HW04_Qi_Zhao as HW04\n'), ((1236, 1270), 'HW04_Qi_Zhao.last_occurrence', 'HW04.last_occurrence', (['"""p"""', '"""apple"""'], {}), "('p', 'apple')\n", (1256, 1270), True, 'import HW04_Qi_Zhao as HW04\n'), ((1298, 1327), 'HW04_Qi_Zhao.last_occurrence', 'HW04.last_occurrence', (['"""p"""', '[]'], {}), "('p', [])\n", (1318, 1327), True, 'import HW04_Qi_Zhao as HW04\n'), ((531, 551), 'HW04_Qi_Zhao.Fraction', 'HW04.Fraction', (['(-1)', '(4)'], {}), '(-1, 4)\n', (544, 551), True, 'import HW04_Qi_Zhao as HW04\n'), ((917, 951), 'HW04_Qi_Zhao.count_vowels', 'HW04.count_vowels', (['"""H ppy D s3et!"""'], {}), "('H ppy D s3et!')\n", (934, 951), True, 'import HW04_Qi_Zhao as HW04\n'), ((264, 284), 'HW04_Qi_Zhao.Fraction', 'HW04.Fraction', (['(9)', '(27)'], {}), '(9, 27)\n', (277, 284), True, 'import HW04_Qi_Zhao as HW04\n'), ((341, 362), 'HW04_Qi_Zhao.Fraction', 'HW04.Fraction', (['(9)', '(-27)'], {}), '(9, -27)\n', (354, 362), True, 'import HW04_Qi_Zhao as HW04\n'), ((420, 440), 'HW04_Qi_Zhao.Fraction', 'HW04.Fraction', (['(1)', '(-4)'], {}), '(1, -4)\n', (433, 440), True, 'import HW04_Qi_Zhao as HW04\n'), ((1481, 1504), 'HW04_Qi_Zhao.my_enumerate', 'HW04.my_enumerate', (['strA'], {}), '(strA)\n', (1498, 1504), True, 'import HW04_Qi_Zhao as HW04\n'), ((1560, 1583), 'HW04_Qi_Zhao.my_enumerate', 'HW04.my_enumerate', (['strA'], {}), '(strA)\n', (1577, 1583), True, 'import HW04_Qi_Zhao as HW04\n'), ((498, 518), 'HW04_Qi_Zhao.Fraction', 'HW04.Fraction', (['(2)', '(-4)'], {}), '(2, -4)\n', (511, 518), True, 'import HW04_Qi_Zhao as HW04\n')]
import os class DocumentFile: def __init__(self, fmtdirstr, file_name, media_root): self.fmtdirstr = fmtdirstr self.file_name = file_name self.media_root = media_root @property def dir_path(self): return os.path.join( self.media_root, self.fmtdirstr ) def __str__(self): return self.abspath def __repr__(self): return self.abspath @property def rootname(self): root, _ = os.path.splitext( os.path.basename(self.file_name) ) return root @property def is_image(self): """ """ ext = os.path.splitext(self.abspath)[1] if ext.lower() in ('.png', '.jpeg', '.jpg'): return True return False @property def abspath(self): return os.path.join( self.dir_path, self.file_name ) @property def exists(self): return os.path.exists(self.abspath)
[ "os.path.join", "os.path.splitext", "os.path.exists", "os.path.basename" ]
[((251, 296), 'os.path.join', 'os.path.join', (['self.media_root', 'self.fmtdirstr'], {}), '(self.media_root, self.fmtdirstr)\n', (263, 296), False, 'import os\n'), ((850, 893), 'os.path.join', 'os.path.join', (['self.dir_path', 'self.file_name'], {}), '(self.dir_path, self.file_name)\n', (862, 893), False, 'import os\n'), ((980, 1008), 'os.path.exists', 'os.path.exists', (['self.abspath'], {}), '(self.abspath)\n', (994, 1008), False, 'import os\n'), ((523, 555), 'os.path.basename', 'os.path.basename', (['self.file_name'], {}), '(self.file_name)\n', (539, 555), False, 'import os\n'), ((664, 694), 'os.path.splitext', 'os.path.splitext', (['self.abspath'], {}), '(self.abspath)\n', (680, 694), False, 'import os\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import import json import requests from v2ex_daily_mission.notifier.abc import Notifier, NotificationSendFailedException class SlackNotifier(Notifier): def __init__(self, config): self.config = config def send_notification(self): url = self.config['notifier']['slack']['url'] data = { "text": "v2ex_daily_mission: sign failed." } try: response = requests.post(url, data=json.dumps(data)) if response.text != 'ok': raise NotificationSendFailedException( "slack notification send failed, response: {}".format(response.text) ) except requests.RequestException as e: raise NotificationSendFailedException( "slack notification send failed, error: {}".format(e) )
[ "json.dumps" ]
[((526, 542), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (536, 542), False, 'import json\n')]
# -*- coding: utf-8 -*- # tab level: 4 #!/usr/bin/env python3 """this module will control all settings """ import logging import logging.config import logging.handlers import json import os from pathlib import Path from mytemplate.errorclass import CustomError as customerr logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) class ConfManage: """this class manage all config on this server. this class use setting data from pyserver/conf/json file in default. Methods ------- convert_path(relative_path) return absolute path load_data_dict set json data return_server_addr return tuple of server data subsclibe_client(client_address) """ def __init__(self, env_path): self.abs_path = None self.confi_set = {} self.env = os.getenv("PY_SERVER_CONF") if env_path is None: self.rel_path = self.env else: self.rel_path = env_path def setup_logging(self, Defaultpath="./conf/logconf.json", Defaultlevel=logging.INFO, Envkey="LOG_CFG" ): """this method setting of logging. default logging level is INFO. if you want to change level,you should change Defaultlevel parameters. Parameters ---------- Defaultpath : str, optional [description], by default "./conf/logconf.json" Defaultlevel : logginLevel, optional [description], by default logging.INFO Envkey : str, optional [description], by default "LOG_CFG" """ log_locate_rel = Defaultpath log_locate_abs = self.convert_path(log_locate_rel) abs_path = Path(log_locate_abs) if abs_path.exists(): c_dict = self.load_jsonfile(str(abs_path)) logging.config.dictConfig(c_dict) else: logging.basicConfig(level=Defaultlevel) def convert_path(self, relative_path): """return boolean parameter. This function return Flase if happend error. Parameters ---------- relative_path : str Returns ------- abs_path : str """ try: if self.rel_path is not None: relative_path = self.rel_path rel_path = Path(relative_path) abs_path = rel_path.resolve(strict=True) except FileNotFoundError: raise customerr.PathError except Exception as e: logger.error(e) raise Exception else: return str(abs_path) def return_pathobj(self, abs_path): path = Path(abs_path) return path def output_fileobj(self, abs_path): path = self.return_pathobj(abs_path) try: with path.open("r", encoding="utf-8") as f: obj = self.format_IO(f) except TypeError as e: print(e) return obj def format_IO(self, IO): # bad condition if IO is True: Fobj = json.loads(IO) elif IO is False: Fobj = IO.read() else: raise OSError return Fobj def load_jsonfile(self, abs_path): """this method convert JSON file to fit object of python, So this can only be used for JSON files. Patameters ---------- abs_path: str purpose json file path Returns ------- py_dataobj: Python object """ path = self.return_pathobj(abs_path) try: with path.open(mode="r", encoding="utf-8") as f: py_dataobj = json.load(f) logger.info("%s", "read conf file sucess.") except (OSError, json.JSONDecodeError): logger.error("%s", "cant read conf file. should check there is config file") return None else: return py_dataobj def return_server_addr(self): """this method get path data from its args. which return tuple about this server address data. Returns ------- conf_tuple : tuple exsample ("0.0.0.0",1234) """ conf_dict = self.confi_set.copy() key = conf_dict.keys() ip_port = ("host_ip", "host_port") # bad code if key is ip_port[0] & key is ip_port[1]: conf_tuple = conf_dict[ip_port[0]], conf_dict[ip_port[1]] logger.info("%s", "compleate this app conf") return conf_tuple def get_plaformdata(self): pass
[ "json.load", "json.loads", "logging.basicConfig", "pathlib.Path", "logging.config.dictConfig", "os.getenv", "logging.getLogger" ]
[((287, 314), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (304, 314), False, 'import logging\n'), ((833, 860), 'os.getenv', 'os.getenv', (['"""PY_SERVER_CONF"""'], {}), "('PY_SERVER_CONF')\n", (842, 860), False, 'import os\n'), ((1775, 1795), 'pathlib.Path', 'Path', (['log_locate_abs'], {}), '(log_locate_abs)\n', (1779, 1795), False, 'from pathlib import Path\n'), ((2721, 2735), 'pathlib.Path', 'Path', (['abs_path'], {}), '(abs_path)\n', (2725, 2735), False, 'from pathlib import Path\n'), ((1893, 1926), 'logging.config.dictConfig', 'logging.config.dictConfig', (['c_dict'], {}), '(c_dict)\n', (1918, 1926), False, 'import logging\n'), ((1953, 1992), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'Defaultlevel'}), '(level=Defaultlevel)\n', (1972, 1992), False, 'import logging\n'), ((2386, 2405), 'pathlib.Path', 'Path', (['relative_path'], {}), '(relative_path)\n', (2390, 2405), False, 'from pathlib import Path\n'), ((3118, 3132), 'json.loads', 'json.loads', (['IO'], {}), '(IO)\n', (3128, 3132), False, 'import json\n'), ((3726, 3738), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3735, 3738), False, 'import json\n')]
# -*- coding: utf-8 -*- """ @author: <NAME> @description: Graceful stopping condition @contact: <EMAIL> """ import signal import time import subprocess from pathlib import Path class GracefulKiller: kill_now = False def __init__(self): signal.signal(signal.SIGINT, self.exit_gracefully) signal.signal(signal.SIGTERM, self.exit_gracefully) def exit_gracefully(self,signum, frame): self.kill_now = True if __name__ == '__main__': try: subprocess.call('rm .kill', shell=True, cwd=Path.cwd()) except FileNotFoundError: print("No .kill fn present. You are cleared for takeoff") killer = GracefulKiller() while not killer.kill_now: time.sleep(1) print("Found SIGINT/SIGTERM signal") subprocess.call('touch .kill', shell=True, cwd=Path.cwd()) print("Clean Exit. The flow field has been saved :)")
[ "signal.signal", "pathlib.Path.cwd", "time.sleep" ]
[((254, 304), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'self.exit_gracefully'], {}), '(signal.SIGINT, self.exit_gracefully)\n', (267, 304), False, 'import signal\n'), ((313, 364), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'self.exit_gracefully'], {}), '(signal.SIGTERM, self.exit_gracefully)\n', (326, 364), False, 'import signal\n'), ((712, 725), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (722, 725), False, 'import time\n'), ((818, 828), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (826, 828), False, 'from pathlib import Path\n'), ((533, 543), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (541, 543), False, 'from pathlib import Path\n')]
#!/usr/bin/env python # Roobert V2 - second version of home robot project # ________ ______ _____ # ___ __ \______________ /_______________ /_ # __ /_/ / __ \ __ \_ __ \ _ \_ ___/ __/ # _ _, _// /_/ / /_/ / /_/ / __/ / / /_ # /_/ |_| \____/\____//_.___/\___//_/ \__/ # # Project website: http://roobert.springwald.de # # ############################################## # # Roobert hardware device factory and config # # ############################################## # # Licensed under MIT License (MIT) # # Copyright (c) 2018 <NAME> | <EMAIL> # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. from __future__ import division import time, os, sys my_file = os.path.abspath(__file__) my_path ='/'.join(my_file.split('/')[0:-1]) sys.path.insert(0,my_path + "/" ) sys.path.insert(0,my_path + "/../DanielsRasPiPythonLibs/multitasking" ) sys.path.insert(0,my_path + "/../DanielsRasPiPythonLibs/hardware" ) from MultiProcessing import * from array import array from SharedInts import SharedInts from SharedFloats import SharedFloats from LX16AServos import LX16AServos from SmartServoManager import SmartServoManager from Arms import Arms from Neck import Neck from RgbLeds import RgbLeds import atexit class HardwareDevices(): _bodyLeds = None _arms = None _neck = None _servoManager = None _servos = None __singleton = None _released = False @staticmethod def singleton(): if (HardwareDevices.__singleton == None): HardwareDevices.__singleton = HardwareDevices() return HardwareDevices.__singleton @property def arms(self): return self._arms @property def neck(self): return self._neck @property def BodyLeds(self): return self._bodyLeds def __init__(self): self._servos = LX16AServos(); self._servoManager = SmartServoManager(lX16AServos=self._servos, ramp=0, maxSpeed=1) self._arms = Arms(self._servoManager) self._neck = Neck(self._servoManager) self._servoManager.Start() self._neck.SetLeftRight(0) self._neck.SetUpDown(0) self._bodyLeds = RgbLeds([ my_path + '/../Gfx/Body/hearth2.gif', my_path + '/../../RoobertGifs/e8nZC.gif', my_path + '/../../RoobertGifs/U9LwW86.gif', my_path + '/../../RoobertGifs/Spin_Toad.gif', my_path + '/../../RoobertGifs/haleye.gif', my_path + '/../../RoobertGifs/Yoshi_render.gif' ]) def Release(self): if (self._released == False): self._released = True print("releasing hardware devices") if (self._bodyLeds != None): self._bodyLeds.Release() if (self._arms != None): self._arms.Release() if (self._neck != None): self._neck.Release() if (self._servoManager != None): self._servoManager.Release() if (self._servos != None): self._servos.Release() def __del__(self): self.Release() def exit_handler(): devices.Release() if __name__ == "__main__": devices = HardwareDevices.singleton() atexit.register(exit_handler) devices.arms.WaitTillTargetsReached(); time.sleep(5) devices.Release()
[ "atexit.register", "os.path.abspath", "SmartServoManager.SmartServoManager", "sys.path.insert", "RgbLeds.RgbLeds", "time.sleep", "Neck.Neck", "Arms.Arms", "LX16AServos.LX16AServos" ]
[((1831, 1856), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1846, 1856), False, 'import time, os, sys\n'), ((1902, 1935), 'sys.path.insert', 'sys.path.insert', (['(0)', "(my_path + '/')"], {}), "(0, my_path + '/')\n", (1917, 1935), False, 'import time, os, sys\n'), ((1937, 2008), 'sys.path.insert', 'sys.path.insert', (['(0)', "(my_path + '/../DanielsRasPiPythonLibs/multitasking')"], {}), "(0, my_path + '/../DanielsRasPiPythonLibs/multitasking')\n", (1952, 2008), False, 'import time, os, sys\n'), ((2009, 2076), 'sys.path.insert', 'sys.path.insert', (['(0)', "(my_path + '/../DanielsRasPiPythonLibs/hardware')"], {}), "(0, my_path + '/../DanielsRasPiPythonLibs/hardware')\n", (2024, 2076), False, 'import time, os, sys\n'), ((4126, 4155), 'atexit.register', 'atexit.register', (['exit_handler'], {}), '(exit_handler)\n', (4141, 4155), False, 'import atexit\n'), ((4200, 4213), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (4210, 4213), False, 'import time, os, sys\n'), ((2941, 2954), 'LX16AServos.LX16AServos', 'LX16AServos', ([], {}), '()\n', (2952, 2954), False, 'from LX16AServos import LX16AServos\n'), ((2979, 3042), 'SmartServoManager.SmartServoManager', 'SmartServoManager', ([], {'lX16AServos': 'self._servos', 'ramp': '(0)', 'maxSpeed': '(1)'}), '(lX16AServos=self._servos, ramp=0, maxSpeed=1)\n', (2996, 3042), False, 'from SmartServoManager import SmartServoManager\n'), ((3058, 3082), 'Arms.Arms', 'Arms', (['self._servoManager'], {}), '(self._servoManager)\n', (3062, 3082), False, 'from Arms import Arms\n'), ((3098, 3122), 'Neck.Neck', 'Neck', (['self._servoManager'], {}), '(self._servoManager)\n', (3102, 3122), False, 'from Neck import Neck\n'), ((3229, 3520), 'RgbLeds.RgbLeds', 'RgbLeds', (["[my_path + '/../Gfx/Body/hearth2.gif', my_path +\n '/../../RoobertGifs/e8nZC.gif', my_path +\n '/../../RoobertGifs/U9LwW86.gif', my_path +\n '/../../RoobertGifs/Spin_Toad.gif', my_path +\n '/../../RoobertGifs/haleye.gif', my_path +\n '/../../RoobertGifs/Yoshi_render.gif']"], {}), "([my_path + '/../Gfx/Body/hearth2.gif', my_path +\n '/../../RoobertGifs/e8nZC.gif', my_path +\n '/../../RoobertGifs/U9LwW86.gif', my_path +\n '/../../RoobertGifs/Spin_Toad.gif', my_path +\n '/../../RoobertGifs/haleye.gif', my_path +\n '/../../RoobertGifs/Yoshi_render.gif'])\n", (3236, 3520), False, 'from RgbLeds import RgbLeds\n')]
from logging import getLogger from libcity.executor.abstract_tradition_executor import AbstractTraditionExecutor from libcity.utils import get_evaluator class MapMatchingExecutor(AbstractTraditionExecutor): def __init__(self, config, model): self.model = model self.config = config self.evaluator = get_evaluator(config) self.exp_id = self.config.get('exp_id', None) self.cache_dir = './libcity/cache/{}/model_cache'.format(self.exp_id) self.evaluate_res_dir = './libcity/cache/{}/evaluate_cache'.format(self.exp_id) self._logger = getLogger() def evaluate(self, test_data): """ use model to test data Args: test_data """ result = self.model.run(test_data) batch = {'route': test_data['route'], 'result': result, 'rd_nwk': test_data['rd_nwk']} self.evaluator.collect(batch) self.evaluator.save_result(self.evaluate_res_dir) def train(self, train_dataloader, eval_dataloader): """ 对于传统模型,不需要训练 Args: train_dataloader(torch.Dataloader): Dataloader eval_dataloader(torch.Dataloader): Dataloader """ pass # do nothing
[ "libcity.utils.get_evaluator", "logging.getLogger" ]
[((330, 351), 'libcity.utils.get_evaluator', 'get_evaluator', (['config'], {}), '(config)\n', (343, 351), False, 'from libcity.utils import get_evaluator\n'), ((595, 606), 'logging.getLogger', 'getLogger', ([], {}), '()\n', (604, 606), False, 'from logging import getLogger\n')]
# Create program to read email from Gmail account and perform a sentiment analysis on the email. import imaplib import email import email.message import email.utils import re import datetime import sys import os # import pandas as pd # import numpy as np # import matplotlib.pyplot as plt # from matplotlib.backends.backend_pdf import PdfPages from email.parser import Parser from email.header import decode_header from email.utils import parsedate_tz, mktime_tz, parsedate from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from email.mime.base import MIMEBase from email.mime.image import MIMEImage from email.mime.audio import MIMEAudio from email.mime.application import MIMEApplication from email.encoders import encode_base64 from email.utils import make_msgid from email.utils import formatdate from email.utils import formataddr from email.utils import getaddresses from email.utils import parseaddr def main(): # Create a connection to the Gmail server print("\n*** Starting Email Sentiment Analyzer *** \n") print("\n* Connecting to Gmail * \n") mail = imaplib.IMAP4_SSL('imap.gmail.com') mail.login('<EMAIL>', 'ppyaqlzrpmpmyiez') mail.select('inbox') result, data = mail.uid('search', None, "ALL") inbox_item_list = data[0].split() latest_email_uid = inbox_item_list[-1] result, data = mail.uid('fetch', latest_email_uid, '(RFC822)') raw_email = data[0][1] email_message = email.message_from_string(raw_email.decode('utf-8')) email_subject = email_message['subject'] email_from = email_message['from'] email_date = email_message['date'] # email_body = get_body(email_message) email_body_plain = email_message.get_payload() # Define the date format date_format = '%a, %d %b %Y %H:%M:%S %z' date_format_2 = '%d/%b/%Y' # Parse the email date email_date_parsed = datetime.datetime.strptime(email_date, date_format) email_date_parsed = email_date_parsed.strftime(date_format_2) # Print parsed email subject and date print('Email subject: ' + email_subject) print('Email date: ' + email_date_parsed) # Evaluate the sentiment of the email # Create a dataframe to store the data # Connect to AWS S3 # ============================================================================= if __name__ == "__main__": main()
[ "imaplib.IMAP4_SSL", "datetime.datetime.strptime" ]
[((1128, 1163), 'imaplib.IMAP4_SSL', 'imaplib.IMAP4_SSL', (['"""imap.gmail.com"""'], {}), "('imap.gmail.com')\n", (1145, 1163), False, 'import imaplib\n'), ((1908, 1959), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['email_date', 'date_format'], {}), '(email_date, date_format)\n', (1934, 1959), False, 'import datetime\n')]
import setuptools with open('README.md') as f: long_desc = f.read() setuptools.setup( name='asyncping3', use_scm_version={"version_scheme": "guess-next-dev", "local_scheme": "dirty-tag"}, setup_requires=["setuptools_scm"], description='A pure python3 version of ICMP ping implementation using raw socket.', long_description=long_desc, long_description_content_type='text/markdown', url='https://github.com/M-o-a-T/asyncping3', author='<NAME>', author_email='<EMAIL>', license='MIT', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: End Users/Desktop', 'Topic :: System :: Networking', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3', ], keywords='python3 ping icmp socket tool', packages=["asyncping3"], python_requires='>=3', install_requires=["anyio >= 3"], extras_require={ 'dev': ['build', 'twine', 'pycodestyle'], }, package_data={}, data_files=[], entry_points={ 'console_scripts': ['pping=asyncping3._main:main'], }, )
[ "setuptools.setup" ]
[((74, 1092), 'setuptools.setup', 'setuptools.setup', ([], {'name': '"""asyncping3"""', 'use_scm_version': "{'version_scheme': 'guess-next-dev', 'local_scheme': 'dirty-tag'}", 'setup_requires': "['setuptools_scm']", 'description': '"""A pure python3 version of ICMP ping implementation using raw socket."""', 'long_description': 'long_desc', 'long_description_content_type': '"""text/markdown"""', 'url': '"""https://github.com/M-o-a-T/asyncping3"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""MIT"""', 'classifiers': "['Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: End Users/Desktop',\n 'Topic :: System :: Networking',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3']", 'keywords': '"""python3 ping icmp socket tool"""', 'packages': "['asyncping3']", 'python_requires': '""">=3"""', 'install_requires': "['anyio >= 3']", 'extras_require': "{'dev': ['build', 'twine', 'pycodestyle']}", 'package_data': '{}', 'data_files': '[]', 'entry_points': "{'console_scripts': ['pping=asyncping3._main:main']}"}), "(name='asyncping3', use_scm_version={'version_scheme':\n 'guess-next-dev', 'local_scheme': 'dirty-tag'}, setup_requires=[\n 'setuptools_scm'], description=\n 'A pure python3 version of ICMP ping implementation using raw socket.',\n long_description=long_desc, long_description_content_type=\n 'text/markdown', url='https://github.com/M-o-a-T/asyncping3', author=\n '<NAME>', author_email='<EMAIL>', license='MIT', classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: End Users/Desktop',\n 'Topic :: System :: Networking',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3'], keywords=\n 'python3 ping icmp socket tool', packages=['asyncping3'],\n python_requires='>=3', install_requires=['anyio >= 3'], extras_require=\n {'dev': ['build', 'twine', 'pycodestyle']}, package_data={}, data_files\n =[], entry_points={'console_scripts': ['pping=asyncping3._main:main']})\n", (90, 1092), False, 'import setuptools\n')]
# Copyright (c) 2016-2019 <NAME> # # This file is part of XL-mHG. """Contains the `mHGResult` class.""" import sys import hashlib import logging import numpy as np try: # This is a duct-tape fix for the Google App Engine, on which importing # the C extension fails. from . import mhg_cython except ImportError: print('Warning (xlmhg): Failed to import "mhg_cython" C extension.', file=sys.stderr) from . import mhg as mhg_cython logger = logging.getLogger(__name__) class mHGResult(object): """The result of an XL-mHG test. This class is used by the `get_xlmhg_test_result` function to represent the result of an XL-mHG test. Parameters ---------- N: int See :attr:`N` attribute. indices See :attr:`indices` attribute. X: int See :attr:`X` attribute. L: int See :attr:'L' attribute. stat: float See :attr:`stat` attribute. cutoff: int See :attr:`cutoff` attribute. pval: float See :attr:`pval` attribute. pval_thresh: float, optional See :attr:`pval_thresh` attribute. escore_pval_thresh: float, optional See :attr:`escore_pval_thresh` attribute. escore_tol: float, optional See :attr:`escore_tol` attribute. Attributes ---------- N: int The length of the ranked list (i.e., the number of elements in it). indices: `numpy.ndarray` with ``ndim=1`` and ``dtype=np.uint16``. A sorted (!) list of indices of all the 1's in the ranked list. X: int The XL-mHG X parameter. L: int The XL-mHG L parameter. stat: float The XL-mHG test statistic. cutoff: int The XL-mHG cutoff. pval: float The XL-mHG p-value. pval_thresh: float or None The user-specified significance (p-value) threshold for this test. escore_pval_thresh: float or None The user-specified p-value threshold used in the E-score calculation. escore_tol: float or None The floating point tolerance used in the E-score calculation. """ def __init__(self, N, indices, X, L, stat, cutoff, pval, pval_thresh=None, escore_pval_thresh=None, escore_tol=None): assert isinstance(N, int) assert isinstance(indices, np.ndarray) and indices.ndim == 1 and \ np.issubdtype(indices.dtype, np.uint16) and \ indices.flags.c_contiguous assert isinstance(X, int) assert isinstance(L, int) assert isinstance(stat, float) assert isinstance(cutoff, int) assert isinstance(pval, float) if pval_thresh is not None: assert isinstance(pval_thresh, float) if escore_pval_thresh is not None: assert isinstance(escore_pval_thresh, float) if escore_tol is not None: assert isinstance(escore_tol, float) self.indices = indices self.N = N self.X = X self.L = L self.stat = stat self.cutoff = cutoff self.pval = pval self.pval_thresh = pval_thresh self.escore_pval_thresh = escore_pval_thresh self.escore_tol = escore_tol def __repr__(self): return '<%s object (N=%d, K=%d, pval=%.1e, hash="%s")>' \ % (self.__class__.__name__, self.N, self.K, self.pval, self.hash) def __str__(self): return '<%s object (N=%d, K=%d, X=%d, L=%d, pval=%.1e)>' \ % (self.__class__.__name__, self.N, self.K, self.X, self.L, self.pval) def __eq__(self, other): if self is other: return True elif type(self) == type(other): return self.hash == other.hash else: return NotImplemented def __ne__(self, other): return not self.__eq__(other) @property def v(self): """(property) Returns the list as a `numpy.ndarray` (with dtype ``np.uint8``). """ v = np.zeros(self.N, dtype=np.uint8) v[self.indices] = 1 return v @property def K(self): """(property) Returns the number of 1's in the list.""" return self.indices.size @property def k(self): """(property) Returns the number of 1's above the XL-mHG cutoff.""" return int(np.sum(self.indices < self.cutoff)) @property def hash(self): """(property) Returns a unique hash value for the result.""" data_str = ';'.join( [str(repr(var)) for var in [self.N, self.K, self.X, self.L, self.stat, self.cutoff, self.pval, self.pval_thresh, self.escore_pval_thresh]]) data_str += ';' data = data_str.encode('UTF-8') + self.indices.tobytes() return str(hashlib.md5(data).hexdigest()) @property def fold_enrichment(self): """(property) Returns the fold enrichment at the XL-mHG cutoff.""" return self.k / (self.K*(self.cutoff/float(self.N))) @property def escore(self): """(property) Returns the E-score associated with the result.""" hg_pval_thresh = self.escore_pval_thresh or self.pval escore_tol = self.escore_tol or mhg_cython.get_default_tol() es = mhg_cython.get_xlmhg_escore( self.indices, self.N, self.K, self.X, self.L, hg_pval_thresh, escore_tol) return es
[ "hashlib.md5", "numpy.sum", "numpy.zeros", "logging.getLogger", "numpy.issubdtype" ]
[((473, 500), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (490, 500), False, 'import logging\n'), ((4035, 4067), 'numpy.zeros', 'np.zeros', (['self.N'], {'dtype': 'np.uint8'}), '(self.N, dtype=np.uint8)\n', (4043, 4067), True, 'import numpy as np\n'), ((2366, 2405), 'numpy.issubdtype', 'np.issubdtype', (['indices.dtype', 'np.uint16'], {}), '(indices.dtype, np.uint16)\n', (2379, 2405), True, 'import numpy as np\n'), ((4369, 4403), 'numpy.sum', 'np.sum', (['(self.indices < self.cutoff)'], {}), '(self.indices < self.cutoff)\n', (4375, 4403), True, 'import numpy as np\n'), ((4839, 4856), 'hashlib.md5', 'hashlib.md5', (['data'], {}), '(data)\n', (4850, 4856), False, 'import hashlib\n')]
""" Extend napoleon to provide a `Members` section for C structs and unions similar to the `Attributes` section in python objects. """ from functools import partial from typing import Optional, Any, List, Union, Dict, Callable from sphinx.config import Config from sphinx.ext.autodoc import Options from sphinx.application import Sphinx from sphinx.ext.napoleon import GoogleDocstring # pylint: disable=too-few-public-methods class CAutoDocString(GoogleDocstring): """ A docstring that can handle documenting some extra c sections, in particular, `members` sections of structs and unions and `enumerators` sections of enums. """ def __init__( self, docstring: Union[str, List[str]], config: Optional[Config] = None, app: Optional[Sphinx] = None, what: str = "", name: str = "", obj: Optional[Any] = None, options: Optional[Options] = None, ) -> None: if not hasattr(self, "_sections"): self._sections = self.get_default_sections() super().__init__(docstring, config, app, what, name, obj, options) def get_default_sections(self) -> Dict[str, Callable]: """ Creates the dictionary that should be used in :attr:`_sections` for this instance. If one wants to extend this class simply do:: class MyDocString(CAutoDocString): def get_default_sections(self) -> Dict[str, Callable]: sections = super().get_default_sections() sections["my_custom_section"] = self._some_method Returns: Dict[str, Callable]: The dictionary of sections to methods that should be used :attr:`_sections`. """ default_sections: Dict[str, Callable] = { "args": self._parse_parameters_section, "arguments": self._parse_parameters_section, "attention": partial(self._parse_admonition, "attention"), "attributes": self._parse_attributes_section, "caution": partial(self._parse_admonition, "caution"), "danger": partial(self._parse_admonition, "danger"), "enumerators": partial(self._parse_nested_section, "enumerator"), "error": partial(self._parse_admonition, "error"), "example": self._parse_examples_section, "examples": self._parse_examples_section, "hint": partial(self._parse_admonition, "hint"), "important": partial(self._parse_admonition, "important"), "members": partial(self._parse_nested_section, "member"), "note": partial(self._parse_admonition, "note"), "notes": self._parse_notes_section, "parameters": self._parse_parameters_section, "return": self._parse_returns_section, "returns": self._parse_returns_section, "references": self._parse_references_section, "see also": self._parse_see_also_section, "tip": partial(self._parse_admonition, "tip"), "todo": partial(self._parse_admonition, "todo"), "warning": partial(self._parse_admonition, "warning"), "warnings": partial(self._parse_admonition, "warning"), "warns": self._parse_warns_section, "yield": self._parse_yields_section, "yields": self._parse_yields_section, } return default_sections # pylint: disable=unused-argument def _parse_nested_section(self, nested_title: str, section: str) -> List[str]: """ Parse a members section of a comment. The members section is only expected to be seen in processing of C files. Each item will be formatted using the ``.. c:member:: <name>`` syntax. Args: section (str): The name of the parsed section. Unused. nested_title (str): The name to give to the nested items. Returns: List[str]: The list of lines from `section` converted to the appropriate reST. """ # Place a blank line prior to the section this ensures there is a # newline prior to the first `.. c:member::` section and thus it # doesn't get treated as a sentence in the same paragraph lines = [""] # Type should be unused, it's not normal in c to do `var (type)` it's # usually `type var` for name, _, desc in self._consume_fields(): lines.extend([f".. c:{nested_title}:: {name}", ""]) fields = self._format_field("", "", desc) lines.extend(self._indent(fields, 3)) lines.append("") return lines def process_autodoc_docstring( app: Any, what: str, name: str, obj: Any, options: Optional[Options], lines: List[str], ) -> None: """ Call back for autodoc's ``autodoc-process-docstring`` event. Args: app (:class:`Sphinx`): The Sphinx application object what (str): The type of the object which the comment belongs to. One of "cmodule", "cmember", "ctype", "cfunction", "cstruct". name (str): The fully qualified name of the object. For C files this may be a little polluted as it will be ``my_file.c.some_item.some_items_member``. obj (any): The object itself options (dict): The options given to the directive. lines (List[str]): The lines of the comment. This is modified in place. """ docstring = CAutoDocString(lines, app.config, app, what, name, obj, options) result_lines = docstring.lines() lines[:] = result_lines[:] def setup(app: Sphinx) -> None: """ Extend sphinx to assist sphinx_c_autodocs to allow Google style docstrings for C constructs. Args: app (:class:`Sphinx`): The Sphinx application object """ app.setup_extension("sphinx.ext.napoleon") app.connect("autodoc-process-docstring", process_autodoc_docstring)
[ "functools.partial" ]
[((1928, 1972), 'functools.partial', 'partial', (['self._parse_admonition', '"""attention"""'], {}), "(self._parse_admonition, 'attention')\n", (1935, 1972), False, 'from functools import partial\n'), ((2055, 2097), 'functools.partial', 'partial', (['self._parse_admonition', '"""caution"""'], {}), "(self._parse_admonition, 'caution')\n", (2062, 2097), False, 'from functools import partial\n'), ((2121, 2162), 'functools.partial', 'partial', (['self._parse_admonition', '"""danger"""'], {}), "(self._parse_admonition, 'danger')\n", (2128, 2162), False, 'from functools import partial\n'), ((2191, 2240), 'functools.partial', 'partial', (['self._parse_nested_section', '"""enumerator"""'], {}), "(self._parse_nested_section, 'enumerator')\n", (2198, 2240), False, 'from functools import partial\n'), ((2263, 2303), 'functools.partial', 'partial', (['self._parse_admonition', '"""error"""'], {}), "(self._parse_admonition, 'error')\n", (2270, 2303), False, 'from functools import partial\n'), ((2432, 2471), 'functools.partial', 'partial', (['self._parse_admonition', '"""hint"""'], {}), "(self._parse_admonition, 'hint')\n", (2439, 2471), False, 'from functools import partial\n'), ((2498, 2542), 'functools.partial', 'partial', (['self._parse_admonition', '"""important"""'], {}), "(self._parse_admonition, 'important')\n", (2505, 2542), False, 'from functools import partial\n'), ((2567, 2612), 'functools.partial', 'partial', (['self._parse_nested_section', '"""member"""'], {}), "(self._parse_nested_section, 'member')\n", (2574, 2612), False, 'from functools import partial\n'), ((2634, 2673), 'functools.partial', 'partial', (['self._parse_admonition', '"""note"""'], {}), "(self._parse_admonition, 'note')\n", (2641, 2673), False, 'from functools import partial\n'), ((3015, 3053), 'functools.partial', 'partial', (['self._parse_admonition', '"""tip"""'], {}), "(self._parse_admonition, 'tip')\n", (3022, 3053), False, 'from functools import partial\n'), ((3075, 3114), 'functools.partial', 'partial', (['self._parse_admonition', '"""todo"""'], {}), "(self._parse_admonition, 'todo')\n", (3082, 3114), False, 'from functools import partial\n'), ((3139, 3181), 'functools.partial', 'partial', (['self._parse_admonition', '"""warning"""'], {}), "(self._parse_admonition, 'warning')\n", (3146, 3181), False, 'from functools import partial\n'), ((3207, 3249), 'functools.partial', 'partial', (['self._parse_admonition', '"""warning"""'], {}), "(self._parse_admonition, 'warning')\n", (3214, 3249), False, 'from functools import partial\n')]
import ctypes DWORD = ctypes.c_ulong MAX_PATH = ctypes.c_int(260) MAX_PATH_NULL = int(MAX_PATH.value) + 1 def decode(s): if isinstance(s, unicode): return s return s.decode('mbcs') def GetModuleFileName(handle): r = 0 if hasattr(ctypes.windll.kernel32, "GetModuleFileNameW"): name = ctypes.create_unicode_buffer(MAX_PATH_NULL) r = ctypes.windll.kernel32.GetModuleFileNameW(handle, name, MAX_PATH_NULL) if r == 0: name = ctypes.create_string_buffer(MAX_PATH_NULL) ctypes.windll.kernel32.GetModuleFileNameA(handle, name, MAX_PATH_NULL) return decode(name.value) def GetTempPath(): r = 0 if hasattr(ctypes.windll.kernel32, "GetTempPathW"): name = ctypes.create_unicode_buffer(MAX_PATH_NULL) r = ctypes.windll.kernel32.GetTempPathW(MAX_PATH_NULL, name) if r == 0: name = ctypes.create_string_buffer(MAX_PATH_NULL) ctypes.windll.kernel32.GetTempPathA(MAX_PATH_NULL, name) return decode(name.value) def ShellExecute(hwnd, operation, file, parameters, directory, showCmd): if hasattr(ctypes.windll.shell32, 'ShellExecuteW'): SW = ctypes.windll.shell32.ShellExecuteW operation = decode(operation) file = decode(file) parameters = decode(parameters) directory = decode(directory) else: SW = ctypes.windll.shell32.ShellExecuteA return SW(hwnd, operation, file, parameters, directory, showCmd) def GetVolumeInformation(rootPathName): volumeSerialNumber = DWORD() maximumComponentLength = DWORD() fileSystemFlags = DWORD() if hasattr(ctypes.windll.kernel32, "GetVolumeInformationW"): rootPathName = decode(rootPathName) volumeNameBuffer = ctypes.create_unicode_buffer(MAX_PATH_NULL) fileSystemNameBuffer = ctypes.create_unicode_buffer(MAX_PATH_NULL) GVI = ctypes.windll.kernel32.GetVolumeInformationW else: volumeNameBuffer = ctypes.create_string_buffer(MAX_PATH_NULL) fileSystemNameBuffer = ctypes.create_string_buffer(MAX_PATH_NULL) GVI = ctypes.windll.kernel32.GetVolumeInformationA GVI(rootPathName, volumeNameBuffer, MAX_PATH_NULL, ctypes.byref(volumeSerialNumber), ctypes.byref(maximumComponentLength), ctypes.byref(fileSystemFlags), fileSystemNameBuffer, MAX_PATH_NULL) return (volumeNameBuffer.value, volumeSerialNumber.value, maximumComponentLength.value, fileSystemFlags.value, fileSystemNameBuffer.value) CloseHandle = ctypes.windll.kernel32.CloseHandle GetLastError = ctypes.windll.kernel32.GetLastError GetCurrentProcessId = ctypes.windll.kernel32.GetCurrentProcessId OpenProcess = ctypes.windll.kernel32.OpenProcess TerminateProcess = ctypes.windll.kernel32.TerminateProcess
[ "ctypes.c_int", "ctypes.byref", "ctypes.windll.kernel32.GetModuleFileNameW", "ctypes.windll.kernel32.GetModuleFileNameA", "ctypes.create_string_buffer", "ctypes.create_unicode_buffer", "ctypes.windll.kernel32.GetTempPathW", "ctypes.windll.kernel32.GetTempPathA" ]
[((49, 66), 'ctypes.c_int', 'ctypes.c_int', (['(260)'], {}), '(260)\n', (61, 66), False, 'import ctypes\n'), ((320, 363), 'ctypes.create_unicode_buffer', 'ctypes.create_unicode_buffer', (['MAX_PATH_NULL'], {}), '(MAX_PATH_NULL)\n', (348, 363), False, 'import ctypes\n'), ((376, 446), 'ctypes.windll.kernel32.GetModuleFileNameW', 'ctypes.windll.kernel32.GetModuleFileNameW', (['handle', 'name', 'MAX_PATH_NULL'], {}), '(handle, name, MAX_PATH_NULL)\n', (417, 446), False, 'import ctypes\n'), ((477, 519), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', (['MAX_PATH_NULL'], {}), '(MAX_PATH_NULL)\n', (504, 519), False, 'import ctypes\n'), ((528, 598), 'ctypes.windll.kernel32.GetModuleFileNameA', 'ctypes.windll.kernel32.GetModuleFileNameA', (['handle', 'name', 'MAX_PATH_NULL'], {}), '(handle, name, MAX_PATH_NULL)\n', (569, 598), False, 'import ctypes\n'), ((731, 774), 'ctypes.create_unicode_buffer', 'ctypes.create_unicode_buffer', (['MAX_PATH_NULL'], {}), '(MAX_PATH_NULL)\n', (759, 774), False, 'import ctypes\n'), ((787, 843), 'ctypes.windll.kernel32.GetTempPathW', 'ctypes.windll.kernel32.GetTempPathW', (['MAX_PATH_NULL', 'name'], {}), '(MAX_PATH_NULL, name)\n', (822, 843), False, 'import ctypes\n'), ((874, 916), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', (['MAX_PATH_NULL'], {}), '(MAX_PATH_NULL)\n', (901, 916), False, 'import ctypes\n'), ((925, 981), 'ctypes.windll.kernel32.GetTempPathA', 'ctypes.windll.kernel32.GetTempPathA', (['MAX_PATH_NULL', 'name'], {}), '(MAX_PATH_NULL, name)\n', (960, 981), False, 'import ctypes\n'), ((1743, 1786), 'ctypes.create_unicode_buffer', 'ctypes.create_unicode_buffer', (['MAX_PATH_NULL'], {}), '(MAX_PATH_NULL)\n', (1771, 1786), False, 'import ctypes\n'), ((1818, 1861), 'ctypes.create_unicode_buffer', 'ctypes.create_unicode_buffer', (['MAX_PATH_NULL'], {}), '(MAX_PATH_NULL)\n', (1846, 1861), False, 'import ctypes\n'), ((1958, 2000), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', (['MAX_PATH_NULL'], {}), '(MAX_PATH_NULL)\n', (1985, 2000), False, 'import ctypes\n'), ((2032, 2074), 'ctypes.create_string_buffer', 'ctypes.create_string_buffer', (['MAX_PATH_NULL'], {}), '(MAX_PATH_NULL)\n', (2059, 2074), False, 'import ctypes\n'), ((2197, 2229), 'ctypes.byref', 'ctypes.byref', (['volumeSerialNumber'], {}), '(volumeSerialNumber)\n', (2209, 2229), False, 'import ctypes\n'), ((2231, 2267), 'ctypes.byref', 'ctypes.byref', (['maximumComponentLength'], {}), '(maximumComponentLength)\n', (2243, 2267), False, 'import ctypes\n'), ((2277, 2306), 'ctypes.byref', 'ctypes.byref', (['fileSystemFlags'], {}), '(fileSystemFlags)\n', (2289, 2306), False, 'import ctypes\n')]
import numpy as np import pytest from dnnv.nn.converters.tensorflow import * from dnnv.nn.operations import * def test_Reshape(): original_shape = [0, 3, 4] data = np.random.random_sample(original_shape).astype(np.float32) new_shape = np.array([3, 4, 0], dtype=np.int64) y = np.reshape(data, new_shape) op = Reshape(data, new_shape, allowzero=True) tf_op = TensorflowConverter().visit(op) result = tf_op().numpy() assert np.allclose(result, y) op = Reshape( Input((0, 3, 4), np.dtype(np.float32)), Input((3,), np.dtype(np.int64)), allowzero=True, ) tf_op = TensorflowConverter().visit(op) result = tf_op(data, new_shape).numpy() assert np.allclose(result, y) def test_Reshape_reordered_all_dims(): original_shape = [2, 3, 4] data = np.random.random_sample(original_shape).astype(np.float32) new_shape = np.array([4, 2, 3], dtype=np.int64) y = np.reshape(data, new_shape) op = Reshape(data, new_shape) tf_op = TensorflowConverter().visit(op) result = tf_op().numpy() assert np.allclose(result, y) def test_Reshape_reordered_last_dims(): original_shape = [2, 3, 4] data = np.random.random_sample(original_shape).astype(np.float32) new_shape = np.array([2, 4, 3], dtype=np.int64) y = np.reshape(data, new_shape) op = Reshape(data, new_shape) tf_op = TensorflowConverter().visit(op) result = tf_op().numpy() assert np.allclose(result, y) def test_Reshape_reduced_dims(): original_shape = [2, 3, 4] data = np.random.random_sample(original_shape).astype(np.float32) new_shape = np.array([2, 12], dtype=np.int64) y = np.reshape(data, new_shape) op = Reshape(data, new_shape) tf_op = TensorflowConverter().visit(op) result = tf_op().numpy() assert np.allclose(result, y) def test_Reshape_extended_dims(): original_shape = [2, 3, 4] data = np.random.random_sample(original_shape).astype(np.float32) new_shape = np.array([2, 3, 2, 2], dtype=np.int64) y = np.reshape(data, new_shape) op = Reshape(data, new_shape) tf_op = TensorflowConverter().visit(op) result = tf_op().numpy() assert np.allclose(result, y) def test_Reshape_one_dim(): original_shape = [2, 3, 4] data = np.random.random_sample(original_shape).astype(np.float32) new_shape = np.array([24], dtype=np.int64) y = np.reshape(data, new_shape) op = Reshape(data, new_shape) tf_op = TensorflowConverter().visit(op) result = tf_op().numpy() assert np.allclose(result, y) def test_Reshape_negative_dim(): original_shape = [2, 3, 4] data = np.random.random_sample(original_shape).astype(np.float32) new_shape = np.array([2, -1, 2], dtype=np.int64) y = np.reshape(data, new_shape) op = Reshape(data, new_shape) tf_op = TensorflowConverter().visit(op) result = tf_op().numpy() assert np.allclose(result, y) def test_Reshape_negative_extended_dims(): original_shape = [2, 3, 4] data = np.random.random_sample(original_shape).astype(np.float32) new_shape = np.array([-1, 2, 3, 4], dtype=np.int64) y = np.reshape(data, new_shape) op = Reshape(data, new_shape) tf_op = TensorflowConverter().visit(op) result = tf_op().numpy() assert np.allclose(result, y) def test_Reshape_zero_dim(): original_shape = [2, 3, 4] data = np.random.random_sample(original_shape).astype(np.float32) new_shape = np.array([2, 0, 4, 1], dtype=np.int64) y = np.reshape(data, [2, 3, 4, 1]) op = Reshape(data, new_shape) tf_op = TensorflowConverter().visit(op) result = tf_op().numpy() assert np.allclose(result, y) def test_Reshape_zero_and_negative_dim(): original_shape = [2, 3, 4] data = np.random.random_sample(original_shape).astype(np.float32) new_shape = np.array([2, 0, 1, -1], dtype=np.int64) y = np.reshape(data, [2, 3, 1, -1]) op = Reshape(data, new_shape) tf_op = TensorflowConverter().visit(op) result = tf_op().numpy() assert np.allclose(result, y)
[ "numpy.random.random_sample", "numpy.allclose", "numpy.dtype", "numpy.array", "numpy.reshape" ]
[((250, 285), 'numpy.array', 'np.array', (['[3, 4, 0]'], {'dtype': 'np.int64'}), '([3, 4, 0], dtype=np.int64)\n', (258, 285), True, 'import numpy as np\n'), ((294, 321), 'numpy.reshape', 'np.reshape', (['data', 'new_shape'], {}), '(data, new_shape)\n', (304, 321), True, 'import numpy as np\n'), ((457, 479), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (468, 479), True, 'import numpy as np\n'), ((717, 739), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (728, 739), True, 'import numpy as np\n'), ((898, 933), 'numpy.array', 'np.array', (['[4, 2, 3]'], {'dtype': 'np.int64'}), '([4, 2, 3], dtype=np.int64)\n', (906, 933), True, 'import numpy as np\n'), ((942, 969), 'numpy.reshape', 'np.reshape', (['data', 'new_shape'], {}), '(data, new_shape)\n', (952, 969), True, 'import numpy as np\n'), ((1089, 1111), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (1100, 1111), True, 'import numpy as np\n'), ((1271, 1306), 'numpy.array', 'np.array', (['[2, 4, 3]'], {'dtype': 'np.int64'}), '([2, 4, 3], dtype=np.int64)\n', (1279, 1306), True, 'import numpy as np\n'), ((1315, 1342), 'numpy.reshape', 'np.reshape', (['data', 'new_shape'], {}), '(data, new_shape)\n', (1325, 1342), True, 'import numpy as np\n'), ((1462, 1484), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (1473, 1484), True, 'import numpy as np\n'), ((1637, 1670), 'numpy.array', 'np.array', (['[2, 12]'], {'dtype': 'np.int64'}), '([2, 12], dtype=np.int64)\n', (1645, 1670), True, 'import numpy as np\n'), ((1679, 1706), 'numpy.reshape', 'np.reshape', (['data', 'new_shape'], {}), '(data, new_shape)\n', (1689, 1706), True, 'import numpy as np\n'), ((1826, 1848), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (1837, 1848), True, 'import numpy as np\n'), ((2002, 2040), 'numpy.array', 'np.array', (['[2, 3, 2, 2]'], {'dtype': 'np.int64'}), '([2, 3, 2, 2], dtype=np.int64)\n', (2010, 2040), True, 'import numpy as np\n'), ((2049, 2076), 'numpy.reshape', 'np.reshape', (['data', 'new_shape'], {}), '(data, new_shape)\n', (2059, 2076), True, 'import numpy as np\n'), ((2196, 2218), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (2207, 2218), True, 'import numpy as np\n'), ((2366, 2396), 'numpy.array', 'np.array', (['[24]'], {'dtype': 'np.int64'}), '([24], dtype=np.int64)\n', (2374, 2396), True, 'import numpy as np\n'), ((2405, 2432), 'numpy.reshape', 'np.reshape', (['data', 'new_shape'], {}), '(data, new_shape)\n', (2415, 2432), True, 'import numpy as np\n'), ((2552, 2574), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (2563, 2574), True, 'import numpy as np\n'), ((2727, 2763), 'numpy.array', 'np.array', (['[2, -1, 2]'], {'dtype': 'np.int64'}), '([2, -1, 2], dtype=np.int64)\n', (2735, 2763), True, 'import numpy as np\n'), ((2772, 2799), 'numpy.reshape', 'np.reshape', (['data', 'new_shape'], {}), '(data, new_shape)\n', (2782, 2799), True, 'import numpy as np\n'), ((2919, 2941), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (2930, 2941), True, 'import numpy as np\n'), ((3104, 3143), 'numpy.array', 'np.array', (['[-1, 2, 3, 4]'], {'dtype': 'np.int64'}), '([-1, 2, 3, 4], dtype=np.int64)\n', (3112, 3143), True, 'import numpy as np\n'), ((3152, 3179), 'numpy.reshape', 'np.reshape', (['data', 'new_shape'], {}), '(data, new_shape)\n', (3162, 3179), True, 'import numpy as np\n'), ((3299, 3321), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (3310, 3321), True, 'import numpy as np\n'), ((3470, 3508), 'numpy.array', 'np.array', (['[2, 0, 4, 1]'], {'dtype': 'np.int64'}), '([2, 0, 4, 1], dtype=np.int64)\n', (3478, 3508), True, 'import numpy as np\n'), ((3517, 3547), 'numpy.reshape', 'np.reshape', (['data', '[2, 3, 4, 1]'], {}), '(data, [2, 3, 4, 1])\n', (3527, 3547), True, 'import numpy as np\n'), ((3667, 3689), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (3678, 3689), True, 'import numpy as np\n'), ((3851, 3890), 'numpy.array', 'np.array', (['[2, 0, 1, -1]'], {'dtype': 'np.int64'}), '([2, 0, 1, -1], dtype=np.int64)\n', (3859, 3890), True, 'import numpy as np\n'), ((3899, 3930), 'numpy.reshape', 'np.reshape', (['data', '[2, 3, 1, -1]'], {}), '(data, [2, 3, 1, -1])\n', (3909, 3930), True, 'import numpy as np\n'), ((4050, 4072), 'numpy.allclose', 'np.allclose', (['result', 'y'], {}), '(result, y)\n', (4061, 4072), True, 'import numpy as np\n'), ((175, 214), 'numpy.random.random_sample', 'np.random.random_sample', (['original_shape'], {}), '(original_shape)\n', (198, 214), True, 'import numpy as np\n'), ((524, 544), 'numpy.dtype', 'np.dtype', (['np.float32'], {}), '(np.float32)\n', (532, 544), True, 'import numpy as np\n'), ((567, 585), 'numpy.dtype', 'np.dtype', (['np.int64'], {}), '(np.int64)\n', (575, 585), True, 'import numpy as np\n'), ((823, 862), 'numpy.random.random_sample', 'np.random.random_sample', (['original_shape'], {}), '(original_shape)\n', (846, 862), True, 'import numpy as np\n'), ((1196, 1235), 'numpy.random.random_sample', 'np.random.random_sample', (['original_shape'], {}), '(original_shape)\n', (1219, 1235), True, 'import numpy as np\n'), ((1562, 1601), 'numpy.random.random_sample', 'np.random.random_sample', (['original_shape'], {}), '(original_shape)\n', (1585, 1601), True, 'import numpy as np\n'), ((1927, 1966), 'numpy.random.random_sample', 'np.random.random_sample', (['original_shape'], {}), '(original_shape)\n', (1950, 1966), True, 'import numpy as np\n'), ((2291, 2330), 'numpy.random.random_sample', 'np.random.random_sample', (['original_shape'], {}), '(original_shape)\n', (2314, 2330), True, 'import numpy as np\n'), ((2652, 2691), 'numpy.random.random_sample', 'np.random.random_sample', (['original_shape'], {}), '(original_shape)\n', (2675, 2691), True, 'import numpy as np\n'), ((3029, 3068), 'numpy.random.random_sample', 'np.random.random_sample', (['original_shape'], {}), '(original_shape)\n', (3052, 3068), True, 'import numpy as np\n'), ((3395, 3434), 'numpy.random.random_sample', 'np.random.random_sample', (['original_shape'], {}), '(original_shape)\n', (3418, 3434), True, 'import numpy as np\n'), ((3776, 3815), 'numpy.random.random_sample', 'np.random.random_sample', (['original_shape'], {}), '(original_shape)\n', (3799, 3815), True, 'import numpy as np\n')]
#!/usr/bin/python import sys, platform, os import matplotlib.patches as mpatches import matplotlib.pyplot as plt from matplotlib import pyplot import numpy as np from matplotlib.patches import Ellipse import camb from camb import model, initialpower from pysm.nominal import models import healpy as hp import site plt.rcParams["figure.facecolor"] = 'w' plt.rcParams["axes.facecolor"] = 'w' plt.rcParams["savefig.facecolor"] = 'w' def plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs): def eigsorted(cov): vals, vecs = np.linalg.eigh(cov) order = vals.argsort()[::-1] return vals[order], vecs[:,order] if ax is None: ax = plt.gca() vals, vecs = eigsorted(cov) theta = np.degrees(np.arctan2(*vecs[:,0][::-1])) # Width and height are "full" widths, not radius width, height = 2 * nstd * np.sqrt(vals) ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs) ax.add_artist(ellip) return ellip def conf_legend(): conf=[99,95,68] concolor=['orangered','red','maroon'] Ep_handle={} Ep_label={} for i in range(0,3): Ep_handle[i]=[] Ep_label[i]=[] Ep_handle[i] = [mpatches.Patch(color=concolor[i], alpha=0.6, linewidth=0)] Ep_label[i] = [u'${0}\%$ CL'.format(conf[i])] handles2=[] labels2=[] for i in range(0,3): handles2.extend(Ep_handle[i]) labels2.extend(Ep_label[i]) legend22 = plt.legend(handles2,labels2,loc='center right',bbox_to_anchor = [0.9325, 0.61], ncol=2,prop={'size':12},numpoints=1) pyplot.gca().add_artist(legend22) plt.legend(loc='center right',bbox_to_anchor = [0.99, 0.27]) def quantum_levels_legend(colours,l): p_handle={} p_label={} for i in range(0,5): p_handle[i]=[] p_label[i]=[] p_handle[i] = [mpatches.Patch(color=colours[i], alpha=1.0, linewidth=1.5)] p_label[i] = [u'$l=m={0}$'.format(l[i])] plt.text(13.11, 0.34, r'$\mu_{\rm ax}=10^{-11}eV$', fontsize=15,bbox={'facecolor':'white', 'alpha':1.0, 'pad':12}) #handle, label = ax.get_legend_handles_labels() handles=[] labels=[] for i in range(0,5): handles.extend(p_handle[i]) labels.extend(p_label[i]) legend2 = plt.legend(handles,labels,loc='lower right', ncol=2,prop={'size':12},numpoints=1) pyplot.gca().add_artist(legend2) def regge_plane_plot(x1,y1,colours,sr_spins,sr_masses,sr_spin_up,sr_spin_low,sr_mass_up,sr_mass_low): fig, ax = plt.subplots(figsize=(10,6)) for i in range(4,-1,-1): ax.fill_between(x1[i], y1[i], 1,facecolor=colours[i],linewidth=2.0,zorder=2) labels=(r'$\rm Continuum\ Fit \ Black$' '\n' r'$\rm Hole \ Data$') ax.errorbar(sr_masses, sr_spins, yerr=[sr_spin_up,sr_spin_low], xerr=[sr_mass_up,sr_mass_low], fmt='o',color='k',label=labels) plt.legend(loc='lower right',prop={'size':12}) plt.xlabel(r'$\rm Black \ Hole \ Mass \ \left(\rm{M_{\rm BH}} \ / M_{\odot} \right)$', ha='center', va='center',size=20,labelpad=15) plt.ylabel(r'$\rm Black \ Hole \ Spin \ \left( a_{*}\right)$',size=21) plt.ylim(0,1) plt.xlim(0,x1[4].max()) def regge_region_plot(fx,fy,blackholes,rt,xtem,ytem,dytem,dxtem,example_mass,example_spin,example_spin_error,example_mass_error,error_ellipse,bmhu): plt.plot(fx,fy,linestyle='-',color='black') print(xtem) plt.fill_between(fx, fy,1, color='deepskyblue',alpha=0.3) plt.xlim(fx.min(),fx.max()) if blackholes == True: for i in range(len(ytem)): plt.errorbar(xtem[i], ytem[i], yerr=dytem[i], xerr=dxtem[i], fmt='o',color='k') plt.errorbar(example_mass,example_spin,yerr=example_spin_error,xerr=example_mass_error, fmt='o',color='k') if error_ellipse==True: for i in range (len(example_mass_error)): plot_cov_ellipse([[(example_mass_error[i])**2, 0],[0, (example_spin_error[i])**2]],[example_mass[i],example_spin[i]], nstd=3, alpha=0.5, facecolor='none',zorder=1,edgecolor='black',linewidth=0.8) plot_cov_ellipse([[(example_mass_error[i])**2, 0],[0, (example_spin_error[i])**2]],[example_mass[i],example_spin[i]], nstd=2, alpha=0.5, facecolor='none',zorder=1,edgecolor='black',linewidth=0.8) plot_cov_ellipse([[(example_mass_error[i])**2, 0],[0, (example_spin_error[i])**2]],[example_mass[i],example_spin[i]], nstd=1, alpha=0.5, facecolor='none',zorder=1,edgecolor='black',linewidth=0.8) plt.xlabel(r'${\rm M_{BH}} \left( M_{\odot} \right)$', ha='center', va='center',size=20,labelpad=15) plt.ylabel(r'$ a_{*}$',size=21) plt.ylim(0,1) plt.xlim(0,70) def intersection_plot(nx,ny,indx,indx2): plt.plot(nx[4][indx2[3]], ny[4][indy2[3]], 'ro') plt.plot(nx[0][0:indx[0]],ny[0][0:indx[0]]) plt.plot(nx[1][indx2[0]:indx[1]],ny[1][indx2[0]:indx[1]]) plt.plot(nx[2][indx2[1]:indx[2]],ny[2][indx2[1]:indx[2]]) plt.plot(nx[3][indx2[2]:indx[3]],ny[3][indx2[2]:indx[3]]) plt.plot(nx[4][indx2[3]:-1],ny[4][indx2[3]:-1]) def superradiance_rates_plot(alpha,rates): for i in range(0,5): plt.plot(alpha,rates[i]*2,linewidth=2) plt.yscale('log') plt.xlabel(r'$\mu_{\rm ax} r_g$', size=24,labelpad=4.15) plt.ylabel(r'$ \log_{10}(M_{\rm BH} \ IM(\omega))$',size=21,labelpad=2) plt.xlim(0,2.55) plt.ylim(10**-16.5,10**-6.5)
[ "matplotlib.pyplot.xlim", "matplotlib.pyplot.yscale", "matplotlib.pyplot.fill_between", "numpy.arctan2", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylim", "matplotlib.pyplot.legend", "matplotlib.patches.Patch", "matplotlib.pyplot.text", "numpy.linalg.eigh", "matplotlib.pyplot.gca", "matplotlib.pyplot.ylabel", "matplotlib.patches.Ellipse", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.subplots", "matplotlib.pyplot.errorbar", "numpy.sqrt" ]
[((831, 897), 'matplotlib.patches.Ellipse', 'Ellipse', ([], {'xy': 'pos', 'width': 'width', 'height': 'height', 'angle': 'theta'}), '(xy=pos, width=width, height=height, angle=theta, **kwargs)\n', (838, 897), False, 'from matplotlib.patches import Ellipse\n'), ((1348, 1473), 'matplotlib.pyplot.legend', 'plt.legend', (['handles2', 'labels2'], {'loc': '"""center right"""', 'bbox_to_anchor': '[0.9325, 0.61]', 'ncol': '(2)', 'prop': "{'size': 12}", 'numpoints': '(1)'}), "(handles2, labels2, loc='center right', bbox_to_anchor=[0.9325, \n 0.61], ncol=2, prop={'size': 12}, numpoints=1)\n", (1358, 1473), True, 'import matplotlib.pyplot as plt\n'), ((1513, 1572), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""center right"""', 'bbox_to_anchor': '[0.99, 0.27]'}), "(loc='center right', bbox_to_anchor=[0.99, 0.27])\n", (1523, 1572), True, 'import matplotlib.pyplot as plt\n'), ((1818, 1942), 'matplotlib.pyplot.text', 'plt.text', (['(13.11)', '(0.34)', '"""$\\\\mu_{\\\\rm ax}=10^{-11}eV$"""'], {'fontsize': '(15)', 'bbox': "{'facecolor': 'white', 'alpha': 1.0, 'pad': 12}"}), "(13.11, 0.34, '$\\\\mu_{\\\\rm ax}=10^{-11}eV$', fontsize=15, bbox={\n 'facecolor': 'white', 'alpha': 1.0, 'pad': 12})\n", (1826, 1942), True, 'import matplotlib.pyplot as plt\n'), ((2099, 2189), 'matplotlib.pyplot.legend', 'plt.legend', (['handles', 'labels'], {'loc': '"""lower right"""', 'ncol': '(2)', 'prop': "{'size': 12}", 'numpoints': '(1)'}), "(handles, labels, loc='lower right', ncol=2, prop={'size': 12},\n numpoints=1)\n", (2109, 2189), True, 'import matplotlib.pyplot as plt\n'), ((2344, 2373), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (2356, 2373), True, 'import matplotlib.pyplot as plt\n'), ((2680, 2728), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""', 'prop': "{'size': 12}"}), "(loc='lower right', prop={'size': 12})\n", (2690, 2728), True, 'import matplotlib.pyplot as plt\n'), ((2728, 2881), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\rm Black \\\\ Hole \\\\ Mass \\\\ \\\\left(\\\\rm{M_{\\\\rm BH}} \\\\ / M_{\\\\odot} \\\\right)$"""'], {'ha': '"""center"""', 'va': '"""center"""', 'size': '(20)', 'labelpad': '(15)'}), "(\n '$\\\\rm Black \\\\ Hole \\\\ Mass \\\\ \\\\left(\\\\rm{M_{\\\\rm BH}} \\\\ / M_{\\\\odot} \\\\right)$'\n , ha='center', va='center', size=20, labelpad=15)\n", (2738, 2881), True, 'import matplotlib.pyplot as plt\n'), ((2862, 2938), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$\\\\rm Black \\\\ Hole \\\\ Spin \\\\ \\\\left( a_{*}\\\\right)$"""'], {'size': '(21)'}), "('$\\\\rm Black \\\\ Hole \\\\ Spin \\\\ \\\\left( a_{*}\\\\right)$', size=21)\n", (2872, 2938), True, 'import matplotlib.pyplot as plt\n'), ((2935, 2949), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (2943, 2949), True, 'import matplotlib.pyplot as plt\n'), ((3126, 3172), 'matplotlib.pyplot.plot', 'plt.plot', (['fx', 'fy'], {'linestyle': '"""-"""', 'color': '"""black"""'}), "(fx, fy, linestyle='-', color='black')\n", (3134, 3172), True, 'import matplotlib.pyplot as plt\n'), ((3184, 3243), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['fx', 'fy', '(1)'], {'color': '"""deepskyblue"""', 'alpha': '(0.3)'}), "(fx, fy, 1, color='deepskyblue', alpha=0.3)\n", (3200, 3243), True, 'import matplotlib.pyplot as plt\n'), ((4190, 4300), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""${\\\\rm M_{BH}} \\\\left( M_{\\\\odot} \\\\right)$"""'], {'ha': '"""center"""', 'va': '"""center"""', 'size': '(20)', 'labelpad': '(15)'}), "('${\\\\rm M_{BH}} \\\\left( M_{\\\\odot} \\\\right)$', ha='center', va=\n 'center', size=20, labelpad=15)\n", (4200, 4300), True, 'import matplotlib.pyplot as plt\n'), ((4292, 4324), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$ a_{*}$"""'], {'size': '(21)'}), "('$ a_{*}$', size=21)\n", (4302, 4324), True, 'import matplotlib.pyplot as plt\n'), ((4327, 4341), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (4335, 4341), True, 'import matplotlib.pyplot as plt\n'), ((4342, 4357), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(70)'], {}), '(0, 70)\n', (4350, 4357), True, 'import matplotlib.pyplot as plt\n'), ((4403, 4451), 'matplotlib.pyplot.plot', 'plt.plot', (['nx[4][indx2[3]]', 'ny[4][indy2[3]]', '"""ro"""'], {}), "(nx[4][indx2[3]], ny[4][indy2[3]], 'ro')\n", (4411, 4451), True, 'import matplotlib.pyplot as plt\n'), ((4453, 4497), 'matplotlib.pyplot.plot', 'plt.plot', (['nx[0][0:indx[0]]', 'ny[0][0:indx[0]]'], {}), '(nx[0][0:indx[0]], ny[0][0:indx[0]])\n', (4461, 4497), True, 'import matplotlib.pyplot as plt\n'), ((4498, 4556), 'matplotlib.pyplot.plot', 'plt.plot', (['nx[1][indx2[0]:indx[1]]', 'ny[1][indx2[0]:indx[1]]'], {}), '(nx[1][indx2[0]:indx[1]], ny[1][indx2[0]:indx[1]])\n', (4506, 4556), True, 'import matplotlib.pyplot as plt\n'), ((4557, 4615), 'matplotlib.pyplot.plot', 'plt.plot', (['nx[2][indx2[1]:indx[2]]', 'ny[2][indx2[1]:indx[2]]'], {}), '(nx[2][indx2[1]:indx[2]], ny[2][indx2[1]:indx[2]])\n', (4565, 4615), True, 'import matplotlib.pyplot as plt\n'), ((4616, 4674), 'matplotlib.pyplot.plot', 'plt.plot', (['nx[3][indx2[2]:indx[3]]', 'ny[3][indx2[2]:indx[3]]'], {}), '(nx[3][indx2[2]:indx[3]], ny[3][indx2[2]:indx[3]])\n', (4624, 4674), True, 'import matplotlib.pyplot as plt\n'), ((4675, 4723), 'matplotlib.pyplot.plot', 'plt.plot', (['nx[4][indx2[3]:-1]', 'ny[4][indx2[3]:-1]'], {}), '(nx[4][indx2[3]:-1], ny[4][indx2[3]:-1])\n', (4683, 4723), True, 'import matplotlib.pyplot as plt\n'), ((4835, 4852), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (4845, 4852), True, 'import matplotlib.pyplot as plt\n'), ((4854, 4913), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\mu_{\\\\rm ax} r_g$"""'], {'size': '(24)', 'labelpad': '(4.15)'}), "('$\\\\mu_{\\\\rm ax} r_g$', size=24, labelpad=4.15)\n", (4864, 4913), True, 'import matplotlib.pyplot as plt\n'), ((4913, 4989), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""$ \\\\log_{10}(M_{\\\\rm BH} \\\\ IM(\\\\omega))$"""'], {'size': '(21)', 'labelpad': '(2)'}), "('$ \\\\log_{10}(M_{\\\\rm BH} \\\\ IM(\\\\omega))$', size=21, labelpad=2)\n", (4923, 4989), True, 'import matplotlib.pyplot as plt\n'), ((4986, 5003), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(2.55)'], {}), '(0, 2.55)\n', (4994, 5003), True, 'import matplotlib.pyplot as plt\n'), ((5004, 5037), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(10 ** -16.5)', '(10 ** -6.5)'], {}), '(10 ** -16.5, 10 ** -6.5)\n', (5012, 5037), True, 'import matplotlib.pyplot as plt\n'), ((528, 547), 'numpy.linalg.eigh', 'np.linalg.eigh', (['cov'], {}), '(cov)\n', (542, 547), True, 'import numpy as np\n'), ((639, 648), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (646, 648), True, 'import matplotlib.pyplot as plt\n'), ((699, 728), 'numpy.arctan2', 'np.arctan2', (['*vecs[:, 0][::-1]'], {}), '(*vecs[:, 0][::-1])\n', (709, 728), True, 'import numpy as np\n'), ((808, 821), 'numpy.sqrt', 'np.sqrt', (['vals'], {}), '(vals)\n', (815, 821), True, 'import numpy as np\n'), ((3410, 3525), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['example_mass', 'example_spin'], {'yerr': 'example_spin_error', 'xerr': 'example_mass_error', 'fmt': '"""o"""', 'color': '"""k"""'}), "(example_mass, example_spin, yerr=example_spin_error, xerr=\n example_mass_error, fmt='o', color='k')\n", (3422, 3525), True, 'import matplotlib.pyplot as plt\n'), ((4795, 4837), 'matplotlib.pyplot.plot', 'plt.plot', (['alpha', '(rates[i] * 2)'], {'linewidth': '(2)'}), '(alpha, rates[i] * 2, linewidth=2)\n', (4803, 4837), True, 'import matplotlib.pyplot as plt\n'), ((1115, 1172), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'concolor[i]', 'alpha': '(0.6)', 'linewidth': '(0)'}), '(color=concolor[i], alpha=0.6, linewidth=0)\n', (1129, 1172), True, 'import matplotlib.patches as mpatches\n'), ((1478, 1490), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (1488, 1490), False, 'from matplotlib import pyplot\n'), ((1713, 1771), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': 'colours[i]', 'alpha': '(1.0)', 'linewidth': '(1.5)'}), '(color=colours[i], alpha=1.0, linewidth=1.5)\n', (1727, 1771), True, 'import matplotlib.patches as mpatches\n'), ((2194, 2206), 'matplotlib.pyplot.gca', 'pyplot.gca', ([], {}), '()\n', (2204, 2206), False, 'from matplotlib import pyplot\n'), ((3328, 3413), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['xtem[i]', 'ytem[i]'], {'yerr': 'dytem[i]', 'xerr': 'dxtem[i]', 'fmt': '"""o"""', 'color': '"""k"""'}), "(xtem[i], ytem[i], yerr=dytem[i], xerr=dxtem[i], fmt='o', color='k'\n )\n", (3340, 3413), True, 'import matplotlib.pyplot as plt\n')]
from io import StringIO from tests.src.lsf_config import Config class TestBool: def test_empty_returns_false(self): config = Config({}) assert not config def test_non_empty_returns_true(self): config = Config({1: 1}) assert config class TestDefaultConstructor: def test_no_options_given_returns_empty(self): config = Config() assert not config class TestContains: def test_item_not_in_config(self): stream = StringIO("key: 'foo'") item = "bar" config = Config.from_stream(stream) assert item not in config def test_item_in_config(self): stream = StringIO("key: 'foo'") item = "key" config = Config.from_stream(stream) assert item in config def test_only_keys_are_tested_for_membership(self): stream = StringIO("key: 'foo'") item = "foo" config = Config.from_stream(stream) assert item not in config class TestConcatenateParams: def test_str_returns_str(self): params = "-q queue" actual = Config.concatenate_params(params) expected = params assert actual == expected def test_empty_list_returns_empty_str(self): params = [] actual = Config.concatenate_params(params) expected = "" assert actual == expected def test_list_returns_str(self): params = ["-q queue", "-P project"] actual = Config.concatenate_params(params) expected = "-q queue -P project" assert actual == expected class TestGet: def test_get_empty_returns_default(self): stream = StringIO("") config = Config.from_stream(stream) key = "key" actual = config.get(key) assert actual is None def test_get_key_in_yaml(self): stream = StringIO("key: 'foo'") key = "key" config = Config.from_stream(stream) actual = config.get(key) expected = "foo" assert actual == expected def test_get_key_not_in_yaml_returns_default(self): stream = StringIO("key: 'foo'") key = "bar" default = "default" config = Config.from_stream(stream) actual = config.get(key, default) expected = default assert actual == expected class TestDefaultParams: def test_no_default_returns_empty(self): stream = StringIO("key: 'foo'") config = Config.from_stream(stream) actual = config.default_params() expected = "" assert actual == expected def test_default_present_returns_params(self): stream = StringIO("__default__: '-q foo'") config = Config.from_stream(stream) actual = config.default_params() expected = "-q foo" assert actual == expected def test_default_present_params_are_list_returns_params(self): stream = StringIO("__default__:\n - '-q foo'\n - '-P project'") config = Config.from_stream(stream) actual = config.default_params() expected = "-q foo -P project" assert actual == expected def test_default_present_without_underscores_returns_empty(self): stream = StringIO("default:\n - '-q foo'\n - '-P project'") config = Config.from_stream(stream) actual = config.default_params() expected = "" assert actual == expected class TestParamsForRule: def test_no_default_or_rule_returns_empty(self): stream = StringIO("key: 'foo'") config = Config.from_stream(stream) rulename = "a" actual = config.params_for_rule(rulename) expected = "" assert actual == expected def test_default_present_but_not_rule_returns_default_params(self): stream = StringIO("__default__: '-q foo'") config = Config.from_stream(stream) rulename = "a" actual = config.params_for_rule(rulename) expected = "-q foo" assert actual == expected def test_rule_and_default_present_returns_default_and_rule_params(self): stream = StringIO("__default__: '-q foo'\nrule:\n - '-P project'\n") config = Config.from_stream(stream) rulename = "rule" actual = config.params_for_rule(rulename) expected = "-q foo -P project" assert actual == expected def test_rule_present_but_not_default_returns_rule_params(self): stream = StringIO("rule:\n - '-P project'\n - '-q bar'") config = Config.from_stream(stream) rulename = "rule" actual = config.params_for_rule(rulename) expected = "-P project -q bar" assert actual == expected def test_rule_and_default_have_same_params_rule_params_take_precedent(self): stream = StringIO( "__default__: '-q foo'\nrule:\n - '-P project'\n - '-q bar'" ) config = Config.from_stream(stream) rulename = "rule" actual = config.params_for_rule(rulename) expected = "-q bar -P project" assert actual == expected def test_args_to_dict(): args = '-W 0:01 -W 0:02 -J "test name"' actual = Config.args_to_dict(args) expected = {"-W": "0:02", "-J": "test name"} assert actual == expected
[ "tests.src.lsf_config.Config.args_to_dict", "io.StringIO", "tests.src.lsf_config.Config.from_stream", "tests.src.lsf_config.Config", "tests.src.lsf_config.Config.concatenate_params" ]
[((5180, 5205), 'tests.src.lsf_config.Config.args_to_dict', 'Config.args_to_dict', (['args'], {}), '(args)\n', (5199, 5205), False, 'from tests.src.lsf_config import Config\n'), ((139, 149), 'tests.src.lsf_config.Config', 'Config', (['{}'], {}), '({})\n', (145, 149), False, 'from tests.src.lsf_config import Config\n'), ((238, 254), 'tests.src.lsf_config.Config', 'Config', (['{(1): 1}'], {}), '({(1): 1})\n', (244, 254), False, 'from tests.src.lsf_config import Config\n'), ((376, 384), 'tests.src.lsf_config.Config', 'Config', ([], {}), '()\n', (382, 384), False, 'from tests.src.lsf_config import Config\n'), ((490, 512), 'io.StringIO', 'StringIO', (['"""key: \'foo\'"""'], {}), '("key: \'foo\'")\n', (498, 512), False, 'from io import StringIO\n'), ((551, 577), 'tests.src.lsf_config.Config.from_stream', 'Config.from_stream', (['stream'], {}), '(stream)\n', (569, 577), False, 'from tests.src.lsf_config import Config\n'), ((666, 688), 'io.StringIO', 'StringIO', (['"""key: \'foo\'"""'], {}), '("key: \'foo\'")\n', (674, 688), False, 'from io import StringIO\n'), ((727, 753), 'tests.src.lsf_config.Config.from_stream', 'Config.from_stream', (['stream'], {}), '(stream)\n', (745, 753), False, 'from tests.src.lsf_config import Config\n'), ((859, 881), 'io.StringIO', 'StringIO', (['"""key: \'foo\'"""'], {}), '("key: \'foo\'")\n', (867, 881), False, 'from io import StringIO\n'), ((920, 946), 'tests.src.lsf_config.Config.from_stream', 'Config.from_stream', (['stream'], {}), '(stream)\n', (938, 946), False, 'from tests.src.lsf_config import Config\n'), ((1095, 1128), 'tests.src.lsf_config.Config.concatenate_params', 'Config.concatenate_params', (['params'], {}), '(params)\n', (1120, 1128), False, 'from tests.src.lsf_config import Config\n'), ((1278, 1311), 'tests.src.lsf_config.Config.concatenate_params', 'Config.concatenate_params', (['params'], {}), '(params)\n', (1303, 1311), False, 'from tests.src.lsf_config import Config\n'), ((1469, 1502), 'tests.src.lsf_config.Config.concatenate_params', 'Config.concatenate_params', (['params'], {}), '(params)\n', (1494, 1502), False, 'from tests.src.lsf_config import Config\n'), ((1659, 1671), 'io.StringIO', 'StringIO', (['""""""'], {}), "('')\n", (1667, 1671), False, 'from io import StringIO\n'), ((1689, 1715), 'tests.src.lsf_config.Config.from_stream', 'Config.from_stream', (['stream'], {}), '(stream)\n', (1707, 1715), False, 'from tests.src.lsf_config import Config\n'), ((1855, 1877), 'io.StringIO', 'StringIO', (['"""key: \'foo\'"""'], {}), '("key: \'foo\'")\n', (1863, 1877), False, 'from io import StringIO\n'), ((1915, 1941), 'tests.src.lsf_config.Config.from_stream', 'Config.from_stream', (['stream'], {}), '(stream)\n', (1933, 1941), False, 'from tests.src.lsf_config import Config\n'), ((2110, 2132), 'io.StringIO', 'StringIO', (['"""key: \'foo\'"""'], {}), '("key: \'foo\'")\n', (2118, 2132), False, 'from io import StringIO\n'), ((2198, 2224), 'tests.src.lsf_config.Config.from_stream', 'Config.from_stream', (['stream'], {}), '(stream)\n', (2216, 2224), False, 'from tests.src.lsf_config import Config\n'), ((2419, 2441), 'io.StringIO', 'StringIO', (['"""key: \'foo\'"""'], {}), '("key: \'foo\'")\n', (2427, 2441), False, 'from io import StringIO\n'), ((2459, 2485), 'tests.src.lsf_config.Config.from_stream', 'Config.from_stream', (['stream'], {}), '(stream)\n', (2477, 2485), False, 'from tests.src.lsf_config import Config\n'), ((2654, 2687), 'io.StringIO', 'StringIO', (['"""__default__: \'-q foo\'"""'], {}), '("__default__: \'-q foo\'")\n', (2662, 2687), False, 'from io import StringIO\n'), ((2705, 2731), 'tests.src.lsf_config.Config.from_stream', 'Config.from_stream', (['stream'], {}), '(stream)\n', (2723, 2731), False, 'from tests.src.lsf_config import Config\n'), ((2922, 2980), 'io.StringIO', 'StringIO', (['"""__default__:\n - \'-q foo\'\n - \'-P project\'"""'], {}), '("""__default__:\n - \'-q foo\'\n - \'-P project\'""")\n', (2930, 2980), False, 'from io import StringIO\n'), ((2996, 3022), 'tests.src.lsf_config.Config.from_stream', 'Config.from_stream', (['stream'], {}), '(stream)\n', (3014, 3022), False, 'from tests.src.lsf_config import Config\n'), ((3227, 3281), 'io.StringIO', 'StringIO', (['"""default:\n - \'-q foo\'\n - \'-P project\'"""'], {}), '("""default:\n - \'-q foo\'\n - \'-P project\'""")\n', (3235, 3281), False, 'from io import StringIO\n'), ((3297, 3323), 'tests.src.lsf_config.Config.from_stream', 'Config.from_stream', (['stream'], {}), '(stream)\n', (3315, 3323), False, 'from tests.src.lsf_config import Config\n'), ((3520, 3542), 'io.StringIO', 'StringIO', (['"""key: \'foo\'"""'], {}), '("key: \'foo\'")\n', (3528, 3542), False, 'from io import StringIO\n'), ((3560, 3586), 'tests.src.lsf_config.Config.from_stream', 'Config.from_stream', (['stream'], {}), '(stream)\n', (3578, 3586), False, 'from tests.src.lsf_config import Config\n'), ((3808, 3841), 'io.StringIO', 'StringIO', (['"""__default__: \'-q foo\'"""'], {}), '("__default__: \'-q foo\'")\n', (3816, 3841), False, 'from io import StringIO\n'), ((3859, 3885), 'tests.src.lsf_config.Config.from_stream', 'Config.from_stream', (['stream'], {}), '(stream)\n', (3877, 3885), False, 'from tests.src.lsf_config import Config\n'), ((4118, 4179), 'io.StringIO', 'StringIO', (['"""__default__: \'-q foo\'\nrule:\n - \'-P project\'\n"""'], {}), '("""__default__: \'-q foo\'\nrule:\n - \'-P project\'\n""")\n', (4126, 4179), False, 'from io import StringIO\n'), ((4196, 4222), 'tests.src.lsf_config.Config.from_stream', 'Config.from_stream', (['stream'], {}), '(stream)\n', (4214, 4222), False, 'from tests.src.lsf_config import Config\n'), ((4461, 4512), 'io.StringIO', 'StringIO', (['"""rule:\n - \'-P project\'\n - \'-q bar\'"""'], {}), '("""rule:\n - \'-P project\'\n - \'-q bar\'""")\n', (4469, 4512), False, 'from io import StringIO\n'), ((4528, 4554), 'tests.src.lsf_config.Config.from_stream', 'Config.from_stream', (['stream'], {}), '(stream)\n', (4546, 4554), False, 'from tests.src.lsf_config import Config\n'), ((4805, 4878), 'io.StringIO', 'StringIO', (['"""__default__: \'-q foo\'\nrule:\n - \'-P project\'\n - \'-q bar\'"""'], {}), '("""__default__: \'-q foo\'\nrule:\n - \'-P project\'\n - \'-q bar\'""")\n', (4813, 4878), False, 'from io import StringIO\n'), ((4917, 4943), 'tests.src.lsf_config.Config.from_stream', 'Config.from_stream', (['stream'], {}), '(stream)\n', (4935, 4943), False, 'from tests.src.lsf_config import Config\n')]
import numpy as np from base.RecommenderUtils import check_matrix from base.BaseRecommender import RecommenderSystem from tqdm import tqdm import models.MF.Cython.MF_RMSE as mf class IALS_numpy(RecommenderSystem): ''' binary Alternating Least Squares model (or Weighed Regularized Matrix Factorization) Reference: Collaborative Filtering for binary Feedback Datasets (Hu et al., 2008) Factorization model for binary feedback. First, splits the feedback matrix R as the element-wise a Preference matrix P and a Confidence matrix C. Then computes the decomposition of them into the dot product of two matrices X and Y of latent factors. X represent the user latent factors, Y the item latent factors. The model is learned by solving the following regularized Least-squares objective function with Stochastic Gradient Descent \operatornamewithlimits{argmin}\limits_{x*,y*}\frac{1}{2}\sum_{i,j}{c_{ij}(p_{ij}-x_i^T y_j) + \lambda(\sum_{i}{||x_i||^2} + \sum_{j}{||y_j||^2})} ''' # TODO: Add support for multiple confidence scaling functions (e.g. linear and log scaling) def __init__(self, num_factors=50, reg=0.011, iters=30, scaling='log', alpha=40, epsilon=1.0, init_mean=0.0, init_std=0.1, rnd_seed=42): super(IALS_numpy, self).__init__() assert scaling in ['linear', 'log'], 'Unsupported scaling: {}'.format(scaling) self.num_factors = num_factors self.reg = reg self.iters = iters self.scaling = scaling self.alpha = alpha self.epsilon = epsilon self.init_mean = init_mean self.init_std = init_std self.rnd_seed = rnd_seed self.parameters = "num_factors={}, reg={}, iters={}, scaling={}, alpha={}, episilon={}, init_mean={}, " \ "init_std={}, rnd_seed={}".format( self.num_factors, self.reg, self.iters, self.scaling, self.alpha, self.epsilon, self.init_mean, self.init_std, self.rnd_seed) def __str__(self): return "WRMF-iALS Implementation" def _linear_scaling(self, R): C = R.copy().tocsr() C.data *= self.alpha C.data += 1.0 return C def _log_scaling(self, R): C = R.copy().tocsr() C.data = 1.0 + self.alpha * np.log(1.0 + C.data / self.epsilon) return C def fit(self, R): self.dataset = R # compute the confidence matrix if self.scaling == 'linear': C = self._linear_scaling(R) else: C = self._log_scaling(R) Ct = C.T.tocsr() M, N = R.shape # set the seed np.random.seed(self.rnd_seed) # initialize the latent factors self.X = np.random.normal(self.init_mean, self.init_std, size=(M, self.num_factors)) self.Y = np.random.normal(self.init_mean, self.init_std, size=(N, self.num_factors)) for it in tqdm(range(self.iters)): self.X = self._lsq_solver_fast(C, self.X, self.Y, self.reg) self.Y = self._lsq_solver_fast(Ct, self.Y, self.X, self.reg) def recommend(self, playlist_id, n=None, exclude_seen=True,export= False): scores = np.dot(self.X[playlist_id], self.Y.T) ranking = scores.argsort()[::-1] # rank items if exclude_seen: ranking = self._filter_seen(playlist_id, ranking) if not export: return ranking[:n] elif export: return str(ranking[:n]).strip("[]") def _lsq_solver(self, C, X, Y, reg): # precompute YtY rows, factors = X.shape YtY = np.dot(Y.T, Y) for i in range(rows): # accumulate YtCiY + reg*I in A A = YtY + reg * np.eye(factors) # accumulate Yt*Ci*p(i) in b b = np.zeros(factors) for j, cij in self._nonzeros(C, i): vj = Y[j] A += (cij - 1.0) * np.outer(vj, vj) b += cij * vj X[i] = np.linalg.solve(A, b) return X def _lsq_solver_fast(self, C, X, Y, reg): # precompute YtY rows, factors = X.shape YtY = np.dot(Y.T, Y) for i in range(rows): # accumulate YtCiY + reg*I in A A = YtY + reg * np.eye(factors) start, end = C.indptr[i], C.indptr[i + 1] j = C.indices[start:end] # indices of the non-zeros in Ci ci = C.data[start:end] # non-zeros in Ci Yj = Y[j] # only the factors with non-zero confidence # compute Yt(Ci-I)Y aux = np.dot(Yj.T, np.diag(ci - 1.0)) A += np.dot(aux, Yj) # compute YtCi b = np.dot(Yj.T, ci) X[i] = np.linalg.solve(A, b) return X def _nonzeros(self, R, row): for i in range(R.indptr[row], R.indptr[row + 1]): yield (R.indices[i], R.data[i]) def _get_user_ratings(self, playlist_id): self.dataset = check_matrix(self.dataset, "csr") return self.dataset[playlist_id] def _get_item_ratings(self, track_id): self.dataset = check_matrix(self.dataset, "csc") return self.dataset[:, track_id] def _filter_seen(self, playlist_id, ranking): user_profile = self._get_user_ratings(playlist_id) seen = user_profile.indices unseen_mask = np.in1d(ranking, seen, assume_unique=True, invert=True) return ranking[unseen_mask]
[ "base.RecommenderUtils.check_matrix", "numpy.outer", "numpy.random.seed", "numpy.log", "numpy.eye", "numpy.zeros", "numpy.random.normal", "numpy.dot", "numpy.linalg.solve", "numpy.diag", "numpy.in1d" ]
[((2790, 2819), 'numpy.random.seed', 'np.random.seed', (['self.rnd_seed'], {}), '(self.rnd_seed)\n', (2804, 2819), True, 'import numpy as np\n'), ((2877, 2952), 'numpy.random.normal', 'np.random.normal', (['self.init_mean', 'self.init_std'], {'size': '(M, self.num_factors)'}), '(self.init_mean, self.init_std, size=(M, self.num_factors))\n', (2893, 2952), True, 'import numpy as np\n'), ((2970, 3045), 'numpy.random.normal', 'np.random.normal', (['self.init_mean', 'self.init_std'], {'size': '(N, self.num_factors)'}), '(self.init_mean, self.init_std, size=(N, self.num_factors))\n', (2986, 3045), True, 'import numpy as np\n'), ((3332, 3369), 'numpy.dot', 'np.dot', (['self.X[playlist_id]', 'self.Y.T'], {}), '(self.X[playlist_id], self.Y.T)\n', (3338, 3369), True, 'import numpy as np\n'), ((3755, 3769), 'numpy.dot', 'np.dot', (['Y.T', 'Y'], {}), '(Y.T, Y)\n', (3761, 3769), True, 'import numpy as np\n'), ((4295, 4309), 'numpy.dot', 'np.dot', (['Y.T', 'Y'], {}), '(Y.T, Y)\n', (4301, 4309), True, 'import numpy as np\n'), ((5117, 5150), 'base.RecommenderUtils.check_matrix', 'check_matrix', (['self.dataset', '"""csr"""'], {}), "(self.dataset, 'csr')\n", (5129, 5150), False, 'from base.RecommenderUtils import check_matrix\n'), ((5259, 5292), 'base.RecommenderUtils.check_matrix', 'check_matrix', (['self.dataset', '"""csc"""'], {}), "(self.dataset, 'csc')\n", (5271, 5292), False, 'from base.RecommenderUtils import check_matrix\n'), ((5502, 5557), 'numpy.in1d', 'np.in1d', (['ranking', 'seen'], {'assume_unique': '(True)', 'invert': '(True)'}), '(ranking, seen, assume_unique=True, invert=True)\n', (5509, 5557), True, 'import numpy as np\n'), ((3945, 3962), 'numpy.zeros', 'np.zeros', (['factors'], {}), '(factors)\n', (3953, 3962), True, 'import numpy as np\n'), ((4138, 4159), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (4153, 4159), True, 'import numpy as np\n'), ((4776, 4791), 'numpy.dot', 'np.dot', (['aux', 'Yj'], {}), '(aux, Yj)\n', (4782, 4791), True, 'import numpy as np\n'), ((4835, 4851), 'numpy.dot', 'np.dot', (['Yj.T', 'ci'], {}), '(Yj.T, ci)\n', (4841, 4851), True, 'import numpy as np\n'), ((4872, 4893), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (4887, 4893), True, 'import numpy as np\n'), ((2442, 2477), 'numpy.log', 'np.log', (['(1.0 + C.data / self.epsilon)'], {}), '(1.0 + C.data / self.epsilon)\n', (2448, 2477), True, 'import numpy as np\n'), ((4740, 4757), 'numpy.diag', 'np.diag', (['(ci - 1.0)'], {}), '(ci - 1.0)\n', (4747, 4757), True, 'import numpy as np\n'), ((3872, 3887), 'numpy.eye', 'np.eye', (['factors'], {}), '(factors)\n', (3878, 3887), True, 'import numpy as np\n'), ((4072, 4088), 'numpy.outer', 'np.outer', (['vj', 'vj'], {}), '(vj, vj)\n', (4080, 4088), True, 'import numpy as np\n'), ((4413, 4428), 'numpy.eye', 'np.eye', (['factors'], {}), '(factors)\n', (4419, 4428), True, 'import numpy as np\n')]
import os,unittest, json from app import create_app from instance.config import app_config from app.connect import QuestionerDB app = create_app(app_config['testing']) class UserTestCases(unittest.TestCase): """ Base test class """ def setUp(self): """ Defining test variables """ self.app = create_app(app_config['testing']) self.client = self.app.test_client() self.app_context = self.app self.app.testing = True self.user = { "firstname": "StandUps", "lastname": "Sky", "othername": "Tea", "email": "<EMAIL>", "phone_number": "123456789", "is_admin": "True", "username": "Scupper", "password": "<PASSWORD>" } self.user1 = { "firstname": "StandUps" } self.user2 = { "firstname": "Tom", "lastname": "Hunter", "othername": "Caps", "email": "<EMAIL>", "phone_number": "123498rttt", "is_admin": "False", "username": "Awesome", "password": "<PASSWORD>" } self.user3 = { "firstname": "Truthy", "lastname": "Stoway", "othername": "Birth", "email": "t<EMAIL>", "phone_number": "1234534", "is_admin": "True", "username": "Scupperdf", "password": "<PASSWORD>" } self.user6 = { "firstname": " ", "lastname": "Stoway", "othername": "Birth", "email": "<EMAIL>", "phone_number": "1234534", "is_admin": "True", "username": "Scupperdf", "password": "<PASSWORD>" } self.login = { "username": "Scupper", "password": "<PASSWORD>" } self.login1 = { "username": "Champ", "password": "<PASSWORD>" } self.login2 = { "username": "", "password": "<PASSWORD>" } self.login3 = { "username": "Scuppersds", "password": "" } self.login4 = { "username": "Kijana", "password": "<PASSWORD>" } def tear_down(self): """This function destroys objests created during the test run""" destroy_tests() def test_user_signup(self): """ Test signup user """ check = self.client.post( "/api/v2/signup", data=json.dumps(self.user), content_type="application/json") result = json.loads(check.data.decode()) self.assertEqual(check.status_code, 201) self.assertEqual(result[1].get("status"), 201) self.assertIn("<EMAIL>", result[0].get('email')) def test_validate_phone_number(self): """test phone number""" response = self.client.post( '/api/v2/signup', data=json.dumps(self.user2), content_type="application/json") result = json.loads(response.data) self.assertTrue(result["message"], "Please input valid phone number") self.assertTrue(response.status_code, 400) def test_validate_email(self): """ validate email""" response = self.client.post( '/api/v2/signup', data=json.dumps(self.user3), content_type="application/json") result = json.loads(response.data) self.assertEqual(result["message"], "Invalid email") self.assertEqual(response.status_code, 400) # def test_username_exists(self): # """username exists""" # response = self.client.post( # '/api/v2/signup', data=json.dumps(self.user), content_type="application/json") # result = json.loads(response.data) # # import pdb; pdb.set_trace() # self.assertEqual(result[1]["message"], "Username exists") # self.assertEqual(response.status_code, 400) # # def test_user_login(self): # """ Test login user """ # check_login = self.client.post( # "/api/v2/login", data=json.dumps(self.login), content_type="application/json") # result = json.loads(check_login.data.decode()) # # self.assertEqual(result["status"], 200) # self.assertEqual(result["message"], "User logged in successfully") # def test_user_exists(self): response1 = self.client.post( "/api/v2/login", data=json.dumps(self.login1), content_type="application/json") result1 = json.loads(response1.data.decode()) self.assertEqual(response1.status_code, 404) self.assertEqual(result1["status"], 404) self.assertEqual(result1["message"], "User does not exist") def test_username_required(self): """username test""" response2 = self.client.post( "/api/v2/login", data=json.dumps(self.login2), content_type="application/json") result2 = json.loads(response2.data.decode()) self.assertEqual(response2.status_code, 400) self.assertEqual(result2["status"], 400) self.assertEqual(result2["message"], "Username is required") def test_password_required(self): """password required""" response3 = self.client.post( "/api/v2/login", data=json.dumps(self.login3), content_type="application/json") result3 = json.loads(response3.data.decode()) self.assertEqual(response3.status_code, 400) self.assertEqual(result3["status"], 400) self.assertEqual(result3["message"], "Password is required") def tearDown(self): """Method to destroy test database tables""" QuestionerDB.drop_tables() if __name__ == "__main__": unittest.main()
[ "unittest.main", "json.loads", "app.connect.QuestionerDB.drop_tables", "json.dumps", "app.create_app" ]
[((137, 170), 'app.create_app', 'create_app', (["app_config['testing']"], {}), "(app_config['testing'])\n", (147, 170), False, 'from app import create_app\n'), ((5796, 5811), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5809, 5811), False, 'import os, unittest, json\n'), ((320, 353), 'app.create_app', 'create_app', (["app_config['testing']"], {}), "(app_config['testing'])\n", (330, 353), False, 'from app import create_app\n'), ((3043, 3068), 'json.loads', 'json.loads', (['response.data'], {}), '(response.data)\n', (3053, 3068), False, 'import os, unittest, json\n'), ((3435, 3460), 'json.loads', 'json.loads', (['response.data'], {}), '(response.data)\n', (3445, 3460), False, 'import os, unittest, json\n'), ((5736, 5762), 'app.connect.QuestionerDB.drop_tables', 'QuestionerDB.drop_tables', ([], {}), '()\n', (5760, 5762), False, 'from app.connect import QuestionerDB\n'), ((2554, 2575), 'json.dumps', 'json.dumps', (['self.user'], {}), '(self.user)\n', (2564, 2575), False, 'import os, unittest, json\n'), ((2969, 2991), 'json.dumps', 'json.dumps', (['self.user2'], {}), '(self.user2)\n', (2979, 2991), False, 'import os, unittest, json\n'), ((3361, 3383), 'json.dumps', 'json.dumps', (['self.user3'], {}), '(self.user3)\n', (3371, 3383), False, 'import os, unittest, json\n'), ((4517, 4540), 'json.dumps', 'json.dumps', (['self.login1'], {}), '(self.login1)\n', (4527, 4540), False, 'import os, unittest, json\n'), ((4939, 4962), 'json.dumps', 'json.dumps', (['self.login2'], {}), '(self.login2)\n', (4949, 4962), False, 'import os, unittest, json\n'), ((5366, 5389), 'json.dumps', 'json.dumps', (['self.login3'], {}), '(self.login3)\n', (5376, 5389), False, 'import os, unittest, json\n')]
import numpy as np import heapq import math import time import pygame class Node: def __init__(self, state=None, cost=float('inf'), costToCome=float('inf'), parent=None, collision=None): self.state = state self.parent = parent self.cost = cost self.costToCome = costToCome self.collision = collision class CoupledPlannerNode: def __init__(self, state=None, collision=None, parent=None, f_score=float('inf'), cost_to_come=float('inf')): self.state = state self.parent = parent self.collision = collision self.f_score = f_score self.cost_to_come = cost_to_come class CoupledNode: def __init__(self, state=None, collision=None, parent=None, f_score=None, cost_to_go=None, cost_to_come=None): self.state = state self.parent = parent self.collision = collision self.f_score = f_score self.cost_to_go = cost_to_go self.cost_to_come = cost_to_come def pointInValidWorkspace(point, res, radiusClearance, scale): x, y = point # -------------------------------------------------------------------------------- # Checking whether point inside obstacles # -------------------------------------------------------------------------------- X = np.float32([8, 12.5, 12.5, 8]) * scale / res Y = np.float32([9, 9, 9.5, 9.5]) * scale / res ptInRectangle = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \ X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res X = np.float32([10, 10.5, 10.5, 10]) * scale / res Y = np.float32([7, 7, 11.5, 11.5]) * scale / res ptInRectangle1 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \ X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res X = np.float32([4, 4.25, 4.25, 4]) * scale / res Y = np.float32([8, 8, 10.5, 10.5]) * scale / res ptInRectangle2 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \ X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res X = np.float32([1.5, 3, 3, 1.5]) * scale / res Y = np.float32([9, 9, 9.25, 9.25]) * scale / res ptInRectangle3 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \ X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res X = np.float32([16, 16.25, 16.25, 16]) * scale / res Y = np.float32([8, 8, 10.5, 10.5]) * scale / res ptInRectangle4 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \ X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res X = np.float32([17, 18.5, 18.5, 17]) * scale / res Y = np.float32([9, 9, 9.25, 9.25]) * scale / res ptInRectangle5 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \ X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res X = np.float32([9, 11.5, 11.5, 9]) * scale / res Y = np.float32([3, 3, 3.25, 3.25]) * scale / res ptInRectangle6 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \ X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res X = np.float32([10.15, 10.40, 10.40, 10.15]) * scale / res Y = np.float32([0.8, 0.8, 2.3, 2.3]) * scale / res ptInRectangle7 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \ X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res X = np.float32([9, 11.5, 11.5, 9]) * scale / res Y = np.float32([15, 15, 15.25, 15.25]) * scale / res ptInRectangle8 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \ X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res X = np.float32([10.15, 10.40, 10.40, 10.15]) * scale / res Y = np.float32([16, 16, 17.5, 17.5]) * scale / res ptInRectangle9 = Y[0] - radiusClearance / res <= y <= Y[2] + radiusClearance / res and \ X[1] + radiusClearance / res >= x >= X[3] - radiusClearance / res if ptInRectangle or ptInRectangle1 or ptInRectangle2 or ptInRectangle3 or ptInRectangle4 or \ ptInRectangle5 or ptInRectangle6 or ptInRectangle7 or ptInRectangle8 or ptInRectangle9: return False return True # checks whether next action is near an obstacle or ill defined def isSafe(newState, scale, r=1, radiusClearance=0): col = math.floor(800 / r) row = math.floor(800 / r) newState = list(newState) if not isinstance(newState[0], list): if newState[0] < 0 or newState[0] > col or newState[1] < 0 or newState[1] > row: return False return pointInValidWorkspace(newState[0:2], r, radiusClearance, scale) else: check = True for i in range(len(newState)): check = check or newState[i][0] < 0 or newState[i][0] > col or newState[i][1] < 0 or newState[i][1] > row if check: check = pointInValidWorkspace(newState[i][0:2], r, radiusClearance, scale) else: return False return check # prints solution path def printPath(node): l = [] current = node while current: l.append(current.state) current = current.parent return l 3 def normalize(startPosition, startOrientation, threshDistance=0.5, threshAngle=30): x, y = startPosition t = startOrientation x = round(x / threshDistance) * threshDistance y = round(y / threshDistance) * threshDistance t = round(t / threshAngle) * threshAngle return [x, y, t] # Calculating the Euclidean distance def distance(startPosition, goalPosition): sx, sy = startPosition gx, gy = goalPosition return math.sqrt((gx - sx) ** 2 + (gy - sy) ** 2) # generates optimal path for robot def Astar(q, startPosition, startOrientation, goalPosition, nodesExplored, scale, threshDistance=0.5, threshAngle=30, radiusClearance=0): # normalize goal and start positions sx, sy, st = normalize(startPosition, startOrientation, threshDistance, threshAngle) gx, gy, gt = normalize(goalPosition, 0, threshDistance, threshAngle) # Initializing root node key = str(sx) + str(sy) + str(st) root = Node(np.array([sx, sy, st]), 0.0, 0.0, None) if key not in nodesExplored: nodesExplored[key] = root count = 1 heapq.heappush(q, (root.cost, count, root)) while len(q) > 0: _, _, currentNode = heapq.heappop(q) if distance(currentNode.state[0:2], goalPosition) <= 3 * 1.5: sol = printPath(currentNode) return [True, sol] angle = 360 // threshAngle for theta in range(angle): x, y, t = currentNode.state newOrientation = math.radians((threshAngle * theta + t) % 360) newPosX = threshDistance * math.cos(newOrientation) + x newPosY = threshDistance * math.sin(newOrientation) + y newState = np.array(normalize([newPosX, newPosY], newOrientation, threshDistance, threshAngle)) s = str(newState[0]) + str(newState[1]) + str(newState[2]) if s not in nodesExplored: if isSafe(newState, scale, 1, radiusClearance): newCostToCome = currentNode.costToCome + distance([newState[0], newState[1]], [x, y]) newCost = newCostToCome + distance([newState[0], newState[1]], [gx, gy]) newNode = Node(state=newState, cost=newCost, costToCome=newCostToCome, parent=currentNode) nodesExplored[s] = newNode heapq.heappush(q, (newNode.cost, count, newNode)) count += 1 else: if nodesExplored[s].collision is None or ( isinstance(nodesExplored[s].collision, list) and len(nodesExplored[s].collision) == 0): if (nodesExplored[s].cost > currentNode.costToCome + distance([newState[0], newState[1]], [x, y]) + distance( [newState[0], newState[1]], [gx, gy])): nodesExplored[s].costToCome = currentNode.costToCome + distance([newState[0], newState[1]], [x, y]) nodesExplored[s].cost = nodesExplored[s].costToCome + distance([newState[0], newState[1]], [gx, gy]) nodesExplored[s].parent = currentNode return [False, None] # checks whether next action is near an obstacle or ill defined def determineCollision(robotPosition): collisionSet = [] for i in range(len(robotPosition) - 1): collision = [] for j in range(i + 1, len(robotPosition)): if list(robotPosition[i]) == list(robotPosition[j]): collision.append(i) collision.append(j) collision = list(set(collision)) if collision: collisionSet.append(collision) return collisionSet def coupledPlanner(collision, startPosition, startOrientation, goalPosition, coupledNodesExplored, nodesExplored, solPaths1, iterateSolPaths1, scale, threshDistance=0.5, threshAngle=30, radiusClearance=0): nonCollisionRobots = np.array([Node()] * len(startPosition)) goalChecker = {} solution = {} solution1 = {} nodeE = {} co = 0 currentPos = startPosition.copy() count = [0] * len(startPosition) q = {} col = [] for i in range(len(startPosition)): q[i] = [] if i not in collision: s = str(solPaths1[i][iterateSolPaths1[i]][0]) + str(solPaths1[i][iterateSolPaths1[i]][1]) + str( solPaths1[i][iterateSolPaths1[i]][2]) nonCollisionRobots[i] = coupledNodesExplored[s] iterateSolPaths1[i] -= 1 else: goalChecker[i] = False nodeE[i] = {} root = Node(startPosition[i], 0.0, 0.0, None) s = str(startPosition[i][0]) + str(startPosition[i][1]) + str(startPosition[i][2]) nodeE[i][s] = root count[i] += 1 heapq.heappush(q[i], (root.cost, count[i], root)) while not all(ele for ele in goalChecker.values()): co += 1 # print(currentPos, determineCollision(currentPos.copy()), len(currentPos), co) if determineCollision(currentPos.copy()): col = determineCollision(currentPos.copy()) for i in col[0]: s = str(currentPos[i][0]) + str(currentPos[i][1]) + str(currentPos[i][2]) q[i].clear() nodesExplored[i][s].collision = col heapq.heappush(q[i], (nodesExplored[i][s].parent.cost, count[i], nodesExplored[i][s].parent)) nodesExplored[i][s].parent = None nodeE[i].clear() # collision = list(set(collision + col[0])) # print(collision) for i in range(len(startPosition)): if i in collision: if not goalChecker[i]: _, _, currentNode = heapq.heappop(q[i]) currentPos[i] = currentNode.state if distance(currentNode.state[0:2], goalPosition[i][0:2]) <= 3 * 1.5: solution[i] = printPath(currentNode) goalChecker[i] = True continue angle = 360 // threshAngle for theta in range(angle): x, y, t = currentNode.state newOrientation = math.radians((threshAngle * theta + t) % 360) newPosX = threshDistance * math.cos(newOrientation) + x newPosY = threshDistance * math.sin(newOrientation) + y newState = np.array(normalize([newPosX, newPosY], newOrientation, threshDistance, threshAngle)) s = str(newState[0]) + str(newState[1]) + str(newState[2]) if s not in nodeE[i]: if (s in nodesExplored[i] and not nodesExplored[i][s].collision) or ( s not in nodesExplored[i]): if isSafe(newState, scale, 1, radiusClearance): newCostToCome = currentNode.costToCome + distance([newState[0], newState[1]], [x, y]) newCost = newCostToCome + distance([newState[0], newState[1]], goalPosition[i][0:2]) newNode = Node(state=newState, cost=newCost, costToCome=newCostToCome, parent=currentNode) nodesExplored[i][s] = newNode nodeE[i][s] = newNode heapq.heappush(q[i], (newNode.cost, count[i], newNode)) count[i] += 1 else: if (s in nodesExplored[i] and not nodesExplored[i][s].collision) or ( s not in nodesExplored[i]): if (nodeE[i][s].cost > currentNode.costToCome + distance([newState[0], newState[1]], [x, y]) + distance( [newState[0], newState[1]], goalPosition[i][0:2])): nodeE[i][s].costToCome = currentNode.costToCome + distance( [newState[0], newState[1]], [x, y]) nodeE[i][s].cost = nodeE[i][s].costToCome + distance([newState[0], newState[1]], goalPosition[i][0:2]) nodeE[i][s].parent = currentNode # print(currentNode.state) # print(currentPos[i]) else: if iterateSolPaths1[i] > 0: s = str(solPaths1[i][iterateSolPaths1[i]][0]) + str(solPaths1[i][iterateSolPaths1[i]][1]) + str( solPaths1[i][iterateSolPaths1[i]][2]) nonCollisionRobots[i] = nodesExplored[i][s] currentPos[i] = nonCollisionRobots[i].state.copy() iterateSolPaths1[i] -= 1 else: goalChecker[i] = True return solution, nodesExplored def updateCollsionPath(colset, previousPos, coupledNodesExplored, nodesExplored, nodesExplored1): for i, pos in enumerate(previousPos): s = str(pos[0]) + str(pos[1]) + str(pos[2]) while nodesExplored1[i][s].parent is not None: for collision in colset: if i in collision: if coupledNodesExplored[s].collision: for col in coupledNodesExplored[s].collision: col = list(set(col + [i])) else: coupledNodesExplored[s].collision = colset if nodesExplored[s].collision: for col in nodesExplored[s].collision: col = list(set(col + [i])) else: nodesExplored[s].collision = colset if nodesExplored1[i][s].collision: for col in nodesExplored1[i][s].collision: col = list(set(col + [i])) else: nodesExplored1[i][s].collision = colset st = nodesExplored1[i][s].parent.state s = str(st[0]) + str(st[1]) + str(st[2]) def subdimensionalExpansion(solPaths, nodesExplored, nodesExplored1, iterateSolPaths, scale, threshDistance, threshAngle, radiusClearance): currentPos = [] sol = [] nodeE = [] startPosition = [] goalPosition = [] previousPos = [] colset = [] count = -1 exp = False previousNode = [Node()] * len(solPaths) node = [Node()] * len(solPaths) coupledNodesExplored = {} solPaths1 = solPaths.copy() iterateSolPaths1 = iterateSolPaths.copy() for index, path in enumerate(solPaths): startPosition.append(list(path[iterateSolPaths[index]])) goalPosition.append(list(path[0])) while not all(ele == 0 for ele in iterateSolPaths): previousPos = currentPos.copy() currentPos.clear() for index, path in enumerate(solPaths): currentPos.append(list(path[iterateSolPaths[index]])) colset = determineCollision(currentPos) count += 1 if count == 0: previousPos = currentPos if not colset: for i, pos in enumerate(currentPos): s = str(pos[0]) + str(pos[1]) + str(pos[2]) if count == 0: node[i] = Node(state=pos, collision=colset, parent=None) previousNode[i] = node[i] else: previousNode[i] = node[i] node[i] = Node(state=pos, collision=colset, parent=previousNode[i]) if s not in nodesExplored: nodesExplored[s] = node[i] coupledNodesExplored[s] = node[i] if iterateSolPaths[i] > 0: iterateSolPaths[i] -= 1 else: exp = True # print(currentPos) break for i, pos in enumerate(currentPos): s = str(pos[0]) + str(pos[1]) + str(pos[2]) for collision in colset: if i in collision: node[i] = Node(state=pos, collision=colset, parent=None) coupledNodesExplored[s] = node[i] nodesExplored[s].collision = colset nodesExplored1[i][s].collision = colset else: while iterateSolPaths[i] > 0: s = str(pos[0]) + str(pos[1]) + str(pos[2]) previousNode[i] = node[i] node[i] = Node(state=pos, collision=[], parent=previousNode[i]) coupledNodesExplored[s] = node[i] iterateSolPaths[i] -= 1 pos = solPaths[i][iterateSolPaths[i]] break if exp: print('Collision found') # print(colset) updateCollsionPath(colset, previousPos, coupledNodesExplored, nodesExplored, nodesExplored1) for collision in colset: a = time.time() sol, nodeE = coupledPlanner(collision, startPosition, 0, goalPosition, coupledNodesExplored, nodesExplored1, solPaths1, iterateSolPaths1, scale, threshDistance, 30, radiusClearance) b = time.time() print(b - a) return exp, sol, colset[0], nodeE, currentPos return exp, sol, colset, nodeE, currentPos def triangleCoordinates(start, end, triangleSize=5): rotation = (math.atan2(start[1] - end[1], end[0] - start[0])) + math.pi / 2 rad = math.pi / 180 coordinateList = np.array([[end[0], end[1]], [end[0] + triangleSize * math.sin(rotation - 165 * rad), end[1] + triangleSize * math.cos(rotation - 165 * rad)], [end[0] + triangleSize * math.sin(rotation + 165 * rad), end[1] + triangleSize * math.cos(rotation + 165 * rad)]]) return coordinateList def visualizeMStar(): ################################################### # Parameters ################################################### clearance = 10 radius = 0 stepSize = 11 threshDistance = stepSize # Step size of movement res = 1 # resolution of grid scale = 40 # scale of grid # 1 Robot # start = [[1 * scale, 16 * scale]] # Starting position of the robots # goal = [[16 * scale, 1 * scale]] # Goal position of the robots # 2 Robots # start = [[1 * scale, 6 * scale], [6 * scale, 1 * scale]] # Starting position of the robots # goal = [[14 * scale, 10 * scale], [9 * scale, 14 * scale]] # Goal position of the robots # 3 Robots start = [[1 * scale, 16 * scale], [1 * scale, 6 * scale], [6 * scale, 1 * scale]] # Starting position of the robots goal = [[2 * scale, 8 * scale], [14 * scale, 10 * scale], [9 * scale, 14 * scale]] # Goal position of the robots # 4 Robots # start = [[1 * scale, 16 * scale], [1 * scale, 6 * scale], # [6 * scale, 1 * scale], [19 * scale, 1 * scale]] # Starting position of the robots # goal = [[2 * scale, 8 * scale], [14 * scale, 10 * scale], # [9 * scale, 14 * scale], [5 * scale, 16 * scale]] # Goal position of the robots # 5 Robots # start = [[1 * scale, 16 * scale], [1 * scale, 6 * scale], # [6 * scale, 1 * scale], [19 * scale, 1 * scale], [17 * scale, 4 * scale]] # Starting position of the robots # goal = [[2 * scale, 8 * scale], [14 * scale, 10 * scale], # [9 * scale, 14 * scale], [5 * scale, 16 * scale], [19 * scale, 19 * scale]] # Goal position of the robots # 6 Robots # start = [[1 * scale, 16 * scale], [1 * scale, 6 * scale], # [6 * scale, 1 * scale], [19 * scale, 1 * scale], [17 * scale, 4 * scale], [1 * scale, 19 * scale]] # Starting position of the robots # goal = [[2 * scale, 8 * scale], [14 * scale, 10 * scale], # [9 * scale, 14 * scale], [5 * scale, 16 * scale], [19 * scale, 19 * scale], [14 * scale, 16 * scale]] # Goal position of the robots # 7 Robots # start = [[1 * scale, 16 * scale], [1 * scale, 6 * scale], # [6 * scale, 1 * scale], [19 * scale, 1 * scale], # [17 * scale, 4 * scale], [1 * scale, 19 * scale], [2 * scale, 2 * scale]] # Starting position of the robots # goal = [[2 * scale, 8 * scale], [14 * scale, 10 * scale], # [9 * scale, 14 * scale], [5 * scale, 16 * scale], # [19 * scale, 19 * scale], [14 * scale, 16 * scale], [14 * scale, 3 * scale]] # Goal position of the robots drawing = True threshAngle = 90 # Angle between actions startOrientation = 0 white = (255, 255, 255) black = (0, 0, 0) red = (255, 0, 0) lred = (255, 102, 102) green = (0, 102, 0) lgreen = (153, 255, 153) orange = (255, 165, 0) dorange = (240, 94, 35) blue = (0, 0, 255) lblue = (153, 204, 255) purple = (75, 0, 130) yellow = (255, 255, 0) pink = (255,192,203) dpink = (199,21,133) gray = (220,220,220) dgray = (105,105,105) cyan = (0, 255, 255) maroon = (255,160,122) dmaroon = (128, 0, 0) pathColours = [blue, red, green, dmaroon, orange, dpink, dgray] colors = [lblue, lred, lgreen, maroon, dorange, pink, gray] solutionPaths = [] size_x = 20 size_y = 20 TotalNodesExplored = {} TotalNodesExplored1 = {} totalTime = 0 if drawing: pygame.init() gameDisplay = pygame.display.set_mode((size_x * scale, size_y * scale)) gameDisplay.fill(white) pygame.display.set_caption("M* Algorithm Implementation") basicfont = pygame.font.SysFont('timesnewroman', 20, bold=True) ############################################################ # Display Obstacles ############################################################ pygame.draw.rect(gameDisplay, black, [int(scale * 8), int(scale * 9), int(scale * 4.5), int(scale * 0.5)]) # plus pygame.draw.rect(gameDisplay, black, [int(scale * 10), int(scale * 7), int(scale * 0.5), int(scale * 4.5)]) # plus pygame.draw.rect(gameDisplay, black, [int(scale * 4), int(scale * 8), int(scale * 0.25), int(scale * 2.5)]) # | pygame.draw.rect(gameDisplay, black, [int(scale * 1.5), int(scale * 9), int(scale * 1.5), int(scale * 0.25)]) # - pygame.draw.rect(gameDisplay, black, [int(scale * 16), int(scale * 8), int(scale * 0.25), int(scale * 2.5)]) # | pygame.draw.rect(gameDisplay, black, [int(scale * 17), int(scale * 9), int(scale * 1.5), int(scale * 0.25)]) # - pygame.draw.rect(gameDisplay, black, [int(scale * 9), int(scale * 3), int(scale * 2.5), int(scale * 0.25)]) # - pygame.draw.rect(gameDisplay, black, [int(scale * 10.15), int(scale * 0.8), int(scale * 0.25), int(scale * 1.5)]) # | pygame.draw.rect(gameDisplay, black, [int(scale * 9), int(scale * 15), int(scale * 2.5), int(scale * 0.25)]) # - pygame.draw.rect(gameDisplay, black, [int(scale * 10.15), int(scale * 16), int(scale * 0.25), int(scale * 1.5)]) # | ############################################################ # Display start and end points of the robots ############################################################ for i in range(len(start)): pygame.draw.circle(gameDisplay, pathColours[i], start[i], 0.1 * scale) pygame.draw.circle(gameDisplay, pathColours[i], goal[i], 0.1 * scale) text = basicfont.render('s' + str(i + 1), False, pathColours[i]) text1 = basicfont.render('g' + str(i + 1), False, pathColours[i]) gameDisplay.blit(text, (start[i][0] + 5, start[i][1] + 5)) gameDisplay.blit(text1, (goal[i][0] + 5, goal[i][1] + 5)) pygame.display.update() pygame.time.delay(500) ############################################################ # Draw Explored Nodes and solution path ############################################################ for i in range(len(start)): nodesExplored = {} q = [] startPosition = np.round((np.array(start[i])) / res) goalPosition = np.round((np.array(goal[i])) / res) if not isSafe(startPosition, scale, res, clearance + radius) or not isSafe(goalPosition, scale, res, clearance + radius): print('Start or goal configuration of robot ' + str(i + 1) + ' is not in a valid workspace') else: print('Exploring workspace for robot ' + str(i + 1)) startTime = time.time() # Start time of simulation success, solution = Astar(q, startPosition, startOrientation, goalPosition, nodesExplored, scale, threshDistance, threshAngle, clearance + radius) endTime = time.time() TotalNodesExplored.update(nodesExplored) TotalNodesExplored1[i] = nodesExplored ############################################# # Drawing ############################################# if success: solutionPaths.append(solution) print('Optimal path found for robot ' + str(i + 1)) print("Total time taken for exploring nodes " + str(endTime - startTime) + " seconds.") totalTime += endTime - startTime print('-------------------------') if drawing: draw = True while draw: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() quit() # draw nodesExplored for s in nodesExplored: if nodesExplored[s].parent: pt = nodesExplored[s].state[0:2] ptParent = nodesExplored[s].parent.state[0:2] x, y = pt * res x2, y2 = ptParent * res # draw explored nodes pygame.draw.line(gameDisplay, colors[i], (x2, y2), (x, y), 1) triangle = triangleCoordinates([x2, y2], [x, y], 5) pygame.draw.polygon(gameDisplay, colors[i], [tuple(triangle[0]), tuple(triangle[1]), tuple(triangle[2])]) # draw start and goal locations pygame.draw.rect(gameDisplay, colors[i], (int(startPosition[0] * res * scale), int(startPosition[1] * res * scale), int(res * scale), int(res * scale))) pygame.draw.circle(gameDisplay, colors[i], (int(goalPosition[0] * res * scale), int(goalPosition[1] * res * scale)), math.floor(3 * 1.5 * res * scale)) pygame.draw.rect(gameDisplay, white, (int(goalPosition[0] * res * scale), int(goalPosition[1] * res * scale), int(res * scale), int(res * scale))) pygame.display.update() draw = False else: solutionPaths.append(success) print("Total time " + str(totalTime)) print("solution Paths " + str(len(solutionPaths))) print('Robots following their own individual optimal Paths') print() print() iterateSolutionPaths = [] for i in range(len(solutionPaths)): if solutionPaths[i]: iterateSolutionPaths.append(len(solutionPaths[i]) - 1) else: iterateSolutionPaths.append(-1) iterateSolutionPathsCopy = iterateSolutionPaths.copy() iterateSolutionPathsCopy1 = iterateSolutionPaths.copy() solutionPathsCopy = solutionPaths.copy() failure, sol, collision, nodeE, currentPos = subdimensionalExpansion(solutionPathsCopy, TotalNodesExplored, TotalNodesExplored1, iterateSolutionPathsCopy, scale, threshDistance, 45, radius + clearance) if drawing: temp = [True] * len(iterateSolutionPaths) while not all(ele == -2 for ele in iterateSolutionPaths) and not all(not p for p in temp): for i in range(len(solutionPaths)): if list(solutionPaths[i][iterateSolutionPaths[i]]) == currentPos[i] and failure: pt = solutionPaths[i][iterateSolutionPaths[i]][0:2] x, y = pt[0] * res, pt[1] * res pygame.draw.circle(gameDisplay, pathColours[i], (int(x * res), int(y * res)), math.floor(3 * 1.5 * res)) pygame.time.delay(50) pygame.display.update() iterateSolutionPaths[i] -= 1 temp[i] = False else: if iterateSolutionPaths[i] != -2: if iterateSolutionPaths[i] == -1: print("There is no Path for Robot " + str(i + 1)) iterateSolutionPaths[i] = -2 elif iterateSolutionPaths[i] >= 0: pt = solutionPaths[i][iterateSolutionPaths[i]][0:2] x, y = pt[0] * res, pt[1] * res pygame.draw.circle(gameDisplay, pathColours[i], (int(x * res), int(y * res)), math.floor(3 * 1.5 * res)) pygame.time.delay(50) iterateSolutionPaths[i] -= 1 if iterateSolutionPaths[i] == 0: print("Robot " + str(i + 1) + " reached its goal") iterateSolutionPaths[i] = -2 pygame.display.update() pygame.time.delay(1000) if failure: s = '' for i in collision: s += str(i + 1) + ' ' print("--------------------") print('Robot - Robot collision detected between robots ' + s) print('Starting subdimesional Expansion') print('Running Back propogation and updating Collision list') temp = [] for i in range(len(iterateSolutionPaths)): if i in collision: temp.append(False) else: temp.append(True) if drawing: while not all(ele for ele in temp): for i in range(len(iterateSolutionPaths)): if i in collision: if iterateSolutionPaths[i] != iterateSolutionPathsCopy1[i]: pt = solutionPaths[i][iterateSolutionPaths[i]][0:2] x, y = pt[0] * res, pt[1] * res iterateSolutionPaths[i] += 1 pygame.draw.circle(gameDisplay, yellow, (int(x * res), int(y * res)), math.floor(3 * 1.5 * res)) pygame.time.delay(50) pygame.display.update() else: temp[i] = True pygame.time.delay(500) # pygame.quit() print() print('Implementing coupled planner for robots ' + s) print() print('Robots following collision free path') if drawing: gameDisplay.fill(white) pygame.display.set_caption("M* Algorithm Implementation") basicfont = pygame.font.SysFont('timesnewroman', 20, bold=True) ############################################################ # Display Obstacles ############################################################ pygame.draw.rect(gameDisplay, black, [int(scale * 8), int(scale * 9), int(scale * 4.5), int(scale * 0.5)]) # plus pygame.draw.rect(gameDisplay, black, [int(scale * 10), int(scale * 7), int(scale * 0.5), int(scale * 4.5)]) # plus pygame.draw.rect(gameDisplay, black, [int(scale * 4), int(scale * 8), int(scale * 0.25), int(scale * 2.5)]) # | pygame.draw.rect(gameDisplay, black, [int(scale * 1.5), int(scale * 9), int(scale * 1.5), int(scale * 0.25)]) # - pygame.draw.rect(gameDisplay, black, [int(scale * 16), int(scale * 8), int(scale * 0.25), int(scale * 2.5)]) # | pygame.draw.rect(gameDisplay, black, [int(scale * 17), int(scale * 9), int(scale * 1.5), int(scale * 0.25)]) # - pygame.draw.rect(gameDisplay, black, [int(scale * 9), int(scale * 3), int(scale * 2.5), int(scale * 0.25)]) # - pygame.draw.rect(gameDisplay, black, [int(scale * 10.15), int(scale * 0.8), int(scale * 0.25), int(scale * 1.5)]) # | pygame.draw.rect(gameDisplay, black, [int(scale * 9), int(scale * 15), int(scale * 2.5), int(scale * 0.25)]) # - pygame.draw.rect(gameDisplay, black, [int(scale * 10.15), int(scale * 16), int(scale * 0.25), int(scale * 1.5)]) # | pygame.display.update() solutionPaths2 = solutionPathsCopy.copy() sol = list(np.load('sol.npy')) sol.reverse() for i in range(len(solutionPaths2)): if i in collision: solutionPaths2[i] = sol.pop(0) iterateSolutionPaths2 = [] if drawing: for i in range(len(start)): pygame.draw.circle(gameDisplay, black, start[i], 0.1 * scale) pygame.draw.circle(gameDisplay, black, goal[i], 0.1 * scale) text = basicfont.render('s' + str(i + 1), False, black) text1 = basicfont.render('g' + str(i + 1), False, black) gameDisplay.blit(text, (start[i][0] + 5, start[i][1] + 5)) gameDisplay.blit(text1, (goal[i][0] + 5, goal[i][1] + 5)) pygame.display.update() pygame.time.delay(500) for i in range(len(start)): # if i not in collision: startPosition = np.round((np.array(start[i])) / res) goalPosition = np.round((np.array(goal[i])) / res) draw = True while draw: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() quit() # draw nodesExplored for s in nodeE[i]: if nodeE[i][s].parent: pt = nodeE[i][s].state[0:2] ptParent = nodeE[i][s].parent.state[0:2] x, y = pt * res x2, y2 = ptParent * res # draw explored nodes if i in collision: pygame.draw.line(gameDisplay, yellow, (x2, y2), (x, y), 1) triangle = triangleCoordinates([x2, y2], [x, y], 5) pygame.draw.polygon(gameDisplay, yellow, [tuple(triangle[0]), tuple(triangle[1]), tuple(triangle[2])]) else: pygame.draw.line(gameDisplay, colors[i], (x2, y2), (x, y), 1) triangle = triangleCoordinates([x2, y2], [x, y], 5) pygame.draw.polygon(gameDisplay, colors[i], [tuple(triangle[0]), tuple(triangle[1]), tuple(triangle[2])]) # draw start and goal locations pygame.draw.rect(gameDisplay, colors[i], (int(startPosition[0] * res * scale), int(startPosition[1] * res * scale), int(res * scale), int(res * scale))) pygame.draw.circle(gameDisplay, colors[i], (int(goalPosition[0] * res * scale), int(goalPosition[1] * res * scale)), math.floor(3 * 1.5 * res * scale)) pygame.draw.rect(gameDisplay, white, (int(goalPosition[0] * res * scale), int(goalPosition[1] * res * scale), int(res * scale), int(res * scale))) pygame.display.update() draw = False for i in range(len(solutionPaths2)): iterateSolutionPaths2.append(len(solutionPaths2[i]) - 1) print(iterateSolutionPaths2) # draw solution path while not all(ele == -2 for ele in iterateSolutionPaths2): for i in range(len(solutionPaths2)): if iterateSolutionPaths2[i] != -2: if iterateSolutionPaths2[i] == -1: print("There is no Path for Robot " + str(i + 1)) iterateSolutionPaths2[i] = -2 elif iterateSolutionPaths2[i] >= 0: pt = solutionPaths2[i][iterateSolutionPaths2[i]][0:2] x, y = pt[0] * res, pt[1] * res pygame.draw.circle(gameDisplay, pathColours[i], (int(x * res), int(y * res)), math.floor(3 * 1.5 * res)) pygame.time.delay(50) iterateSolutionPaths2[i] -= 1 if iterateSolutionPaths2[i] == 0: print("Robot " + str(i + 1) + " reached its goal") iterateSolutionPaths2[i] = -2 pygame.display.update() pygame.time.delay(4000) pygame.quit() def main(): visualizeMStar() if __name__ == "__main__": main()
[ "numpy.load", "pygame.draw.line", "heapq.heappush", "math.atan2", "pygame.event.get", "pygame.display.update", "pygame.font.SysFont", "math.radians", "pygame.display.set_mode", "math.cos", "pygame.display.set_caption", "pygame.quit", "math.sqrt", "pygame.init", "math.sin", "pygame.draw.circle", "numpy.float32", "pygame.time.delay", "math.floor", "heapq.heappop", "time.time", "numpy.array" ]
[((4574, 4593), 'math.floor', 'math.floor', (['(800 / r)'], {}), '(800 / r)\n', (4584, 4593), False, 'import math\n'), ((4604, 4623), 'math.floor', 'math.floor', (['(800 / r)'], {}), '(800 / r)\n', (4614, 4623), False, 'import math\n'), ((5885, 5927), 'math.sqrt', 'math.sqrt', (['((gx - sx) ** 2 + (gy - sy) ** 2)'], {}), '((gx - sx) ** 2 + (gy - sy) ** 2)\n', (5894, 5927), False, 'import math\n'), ((6526, 6569), 'heapq.heappush', 'heapq.heappush', (['q', '(root.cost, count, root)'], {}), '(q, (root.cost, count, root))\n', (6540, 6569), False, 'import heapq\n'), ((33090, 33113), 'pygame.time.delay', 'pygame.time.delay', (['(1000)'], {}), '(1000)\n', (33107, 33113), False, 'import pygame\n'), ((6400, 6422), 'numpy.array', 'np.array', (['[sx, sy, st]'], {}), '([sx, sy, st])\n', (6408, 6422), True, 'import numpy as np\n'), ((6621, 6637), 'heapq.heappop', 'heapq.heappop', (['q'], {}), '(q)\n', (6634, 6637), False, 'import heapq\n'), ((19612, 19660), 'math.atan2', 'math.atan2', (['(start[1] - end[1])', '(end[0] - start[0])'], {}), '(start[1] - end[1], end[0] - start[0])\n', (19622, 19660), False, 'import math\n'), ((23677, 23690), 'pygame.init', 'pygame.init', ([], {}), '()\n', (23688, 23690), False, 'import pygame\n'), ((23713, 23770), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(size_x * scale, size_y * scale)'], {}), '((size_x * scale, size_y * scale))\n', (23736, 23770), False, 'import pygame\n'), ((23811, 23868), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""M* Algorithm Implementation"""'], {}), "('M* Algorithm Implementation')\n", (23837, 23868), False, 'import pygame\n'), ((23889, 23940), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""timesnewroman"""', '(20)'], {'bold': '(True)'}), "('timesnewroman', 20, bold=True)\n", (23908, 23940), False, 'import pygame\n'), ((26261, 26284), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (26282, 26284), False, 'import pygame\n'), ((26293, 26315), 'pygame.time.delay', 'pygame.time.delay', (['(500)'], {}), '(500)\n', (26310, 26315), False, 'import pygame\n'), ((1318, 1348), 'numpy.float32', 'np.float32', (['[8, 12.5, 12.5, 8]'], {}), '([8, 12.5, 12.5, 8])\n', (1328, 1348), True, 'import numpy as np\n'), ((1371, 1399), 'numpy.float32', 'np.float32', (['[9, 9, 9.5, 9.5]'], {}), '([9, 9, 9.5, 9.5])\n', (1381, 1399), True, 'import numpy as np\n'), ((1601, 1633), 'numpy.float32', 'np.float32', (['[10, 10.5, 10.5, 10]'], {}), '([10, 10.5, 10.5, 10])\n', (1611, 1633), True, 'import numpy as np\n'), ((1656, 1686), 'numpy.float32', 'np.float32', (['[7, 7, 11.5, 11.5]'], {}), '([7, 7, 11.5, 11.5])\n', (1666, 1686), True, 'import numpy as np\n'), ((1890, 1920), 'numpy.float32', 'np.float32', (['[4, 4.25, 4.25, 4]'], {}), '([4, 4.25, 4.25, 4])\n', (1900, 1920), True, 'import numpy as np\n'), ((1943, 1973), 'numpy.float32', 'np.float32', (['[8, 8, 10.5, 10.5]'], {}), '([8, 8, 10.5, 10.5])\n', (1953, 1973), True, 'import numpy as np\n'), ((2177, 2205), 'numpy.float32', 'np.float32', (['[1.5, 3, 3, 1.5]'], {}), '([1.5, 3, 3, 1.5])\n', (2187, 2205), True, 'import numpy as np\n'), ((2228, 2258), 'numpy.float32', 'np.float32', (['[9, 9, 9.25, 9.25]'], {}), '([9, 9, 9.25, 9.25])\n', (2238, 2258), True, 'import numpy as np\n'), ((2462, 2496), 'numpy.float32', 'np.float32', (['[16, 16.25, 16.25, 16]'], {}), '([16, 16.25, 16.25, 16])\n', (2472, 2496), True, 'import numpy as np\n'), ((2519, 2549), 'numpy.float32', 'np.float32', (['[8, 8, 10.5, 10.5]'], {}), '([8, 8, 10.5, 10.5])\n', (2529, 2549), True, 'import numpy as np\n'), ((2753, 2785), 'numpy.float32', 'np.float32', (['[17, 18.5, 18.5, 17]'], {}), '([17, 18.5, 18.5, 17])\n', (2763, 2785), True, 'import numpy as np\n'), ((2808, 2838), 'numpy.float32', 'np.float32', (['[9, 9, 9.25, 9.25]'], {}), '([9, 9, 9.25, 9.25])\n', (2818, 2838), True, 'import numpy as np\n'), ((3042, 3072), 'numpy.float32', 'np.float32', (['[9, 11.5, 11.5, 9]'], {}), '([9, 11.5, 11.5, 9])\n', (3052, 3072), True, 'import numpy as np\n'), ((3095, 3125), 'numpy.float32', 'np.float32', (['[3, 3, 3.25, 3.25]'], {}), '([3, 3, 3.25, 3.25])\n', (3105, 3125), True, 'import numpy as np\n'), ((3329, 3367), 'numpy.float32', 'np.float32', (['[10.15, 10.4, 10.4, 10.15]'], {}), '([10.15, 10.4, 10.4, 10.15])\n', (3339, 3367), True, 'import numpy as np\n'), ((3392, 3424), 'numpy.float32', 'np.float32', (['[0.8, 0.8, 2.3, 2.3]'], {}), '([0.8, 0.8, 2.3, 2.3])\n', (3402, 3424), True, 'import numpy as np\n'), ((3628, 3658), 'numpy.float32', 'np.float32', (['[9, 11.5, 11.5, 9]'], {}), '([9, 11.5, 11.5, 9])\n', (3638, 3658), True, 'import numpy as np\n'), ((3681, 3715), 'numpy.float32', 'np.float32', (['[15, 15, 15.25, 15.25]'], {}), '([15, 15, 15.25, 15.25])\n', (3691, 3715), True, 'import numpy as np\n'), ((3919, 3957), 'numpy.float32', 'np.float32', (['[10.15, 10.4, 10.4, 10.15]'], {}), '([10.15, 10.4, 10.4, 10.15])\n', (3929, 3957), True, 'import numpy as np\n'), ((3982, 4014), 'numpy.float32', 'np.float32', (['[16, 16, 17.5, 17.5]'], {}), '([16, 16, 17.5, 17.5])\n', (3992, 4014), True, 'import numpy as np\n'), ((6919, 6964), 'math.radians', 'math.radians', (['((threshAngle * theta + t) % 360)'], {}), '((threshAngle * theta + t) % 360)\n', (6931, 6964), False, 'import math\n'), ((10473, 10522), 'heapq.heappush', 'heapq.heappush', (['q[i]', '(root.cost, count[i], root)'], {}), '(q[i], (root.cost, count[i], root))\n', (10487, 10522), False, 'import heapq\n'), ((19061, 19072), 'time.time', 'time.time', ([], {}), '()\n', (19070, 19072), False, 'import time\n'), ((19403, 19414), 'time.time', 'time.time', ([], {}), '()\n', (19412, 19414), False, 'import time\n'), ((25803, 25873), 'pygame.draw.circle', 'pygame.draw.circle', (['gameDisplay', 'pathColours[i]', 'start[i]', '(0.1 * scale)'], {}), '(gameDisplay, pathColours[i], start[i], 0.1 * scale)\n', (25821, 25873), False, 'import pygame\n'), ((25886, 25955), 'pygame.draw.circle', 'pygame.draw.circle', (['gameDisplay', 'pathColours[i]', 'goal[i]', '(0.1 * scale)'], {}), '(gameDisplay, pathColours[i], goal[i], 0.1 * scale)\n', (25904, 25955), False, 'import pygame\n'), ((27115, 27126), 'time.time', 'time.time', ([], {}), '()\n', (27124, 27126), False, 'import time\n'), ((27412, 27423), 'time.time', 'time.time', ([], {}), '()\n', (27421, 27423), False, 'import time\n'), ((34431, 34453), 'pygame.time.delay', 'pygame.time.delay', (['(500)'], {}), '(500)\n', (34448, 34453), False, 'import pygame\n'), ((34694, 34751), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""M* Algorithm Implementation"""'], {}), "('M* Algorithm Implementation')\n", (34720, 34751), False, 'import pygame\n'), ((34776, 34827), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""timesnewroman"""', '(20)'], {'bold': '(True)'}), "('timesnewroman', 20, bold=True)\n", (34795, 34827), False, 'import pygame\n'), ((36596, 36619), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (36617, 36619), False, 'import pygame\n'), ((36690, 36708), 'numpy.load', 'np.load', (['"""sol.npy"""'], {}), "('sol.npy')\n", (36697, 36708), True, 'import numpy as np\n'), ((37414, 37437), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (37435, 37437), False, 'import pygame\n'), ((37450, 37472), 'pygame.time.delay', 'pygame.time.delay', (['(500)'], {}), '(500)\n', (37467, 37472), False, 'import pygame\n'), ((41394, 41417), 'pygame.time.delay', 'pygame.time.delay', (['(4000)'], {}), '(4000)\n', (41411, 41417), False, 'import pygame\n'), ((41430, 41443), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (41441, 41443), False, 'import pygame\n'), ((11006, 11103), 'heapq.heappush', 'heapq.heappush', (['q[i]', '(nodesExplored[i][s].parent.cost, count[i], nodesExplored[i][s].parent)'], {}), '(q[i], (nodesExplored[i][s].parent.cost, count[i],\n nodesExplored[i][s].parent))\n', (11020, 11103), False, 'import heapq\n'), ((26607, 26625), 'numpy.array', 'np.array', (['start[i]'], {}), '(start[i])\n', (26615, 26625), True, 'import numpy as np\n'), ((26667, 26684), 'numpy.array', 'np.array', (['goal[i]'], {}), '(goal[i])\n', (26675, 26684), True, 'import numpy as np\n'), ((36968, 37029), 'pygame.draw.circle', 'pygame.draw.circle', (['gameDisplay', 'black', 'start[i]', '(0.1 * scale)'], {}), '(gameDisplay, black, start[i], 0.1 * scale)\n', (36986, 37029), False, 'import pygame\n'), ((37046, 37106), 'pygame.draw.circle', 'pygame.draw.circle', (['gameDisplay', 'black', 'goal[i]', '(0.1 * scale)'], {}), '(gameDisplay, black, goal[i], 0.1 * scale)\n', (37064, 37106), False, 'import pygame\n'), ((7004, 7028), 'math.cos', 'math.cos', (['newOrientation'], {}), '(newOrientation)\n', (7012, 7028), False, 'import math\n'), ((7072, 7096), 'math.sin', 'math.sin', (['newOrientation'], {}), '(newOrientation)\n', (7080, 7096), False, 'import math\n'), ((7762, 7811), 'heapq.heappush', 'heapq.heappush', (['q', '(newNode.cost, count, newNode)'], {}), '(q, (newNode.cost, count, newNode))\n', (7776, 7811), False, 'import heapq\n'), ((11416, 11435), 'heapq.heappop', 'heapq.heappop', (['q[i]'], {}), '(q[i])\n', (11429, 11435), False, 'import heapq\n'), ((31922, 31943), 'pygame.time.delay', 'pygame.time.delay', (['(50)'], {}), '(50)\n', (31939, 31943), False, 'import pygame\n'), ((31964, 31987), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (31985, 31987), False, 'import pygame\n'), ((37780, 37798), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (37796, 37798), False, 'import pygame\n'), ((11909, 11954), 'math.radians', 'math.radians', (['((threshAngle * theta + t) % 360)'], {}), '((threshAngle * theta + t) % 360)\n', (11921, 11954), False, 'import math\n'), ((19805, 19835), 'math.sin', 'math.sin', (['(rotation - 165 * rad)'], {}), '(rotation - 165 * rad)\n', (19813, 19835), False, 'import math\n'), ((19893, 19923), 'math.cos', 'math.cos', (['(rotation - 165 * rad)'], {}), '(rotation - 165 * rad)\n', (19901, 19923), False, 'import math\n'), ((19982, 20012), 'math.sin', 'math.sin', (['(rotation + 165 * rad)'], {}), '(rotation + 165 * rad)\n', (19990, 20012), False, 'import math\n'), ((20070, 20100), 'math.cos', 'math.cos', (['(rotation + 165 * rad)'], {}), '(rotation + 165 * rad)\n', (20078, 20100), False, 'import math\n'), ((28144, 28162), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (28160, 28162), False, 'import pygame\n'), ((31875, 31900), 'math.floor', 'math.floor', (['(3 * 1.5 * res)'], {}), '(3 * 1.5 * res)\n', (31885, 31900), False, 'import math\n'), ((37596, 37614), 'numpy.array', 'np.array', (['start[i]'], {}), '(start[i])\n', (37604, 37614), True, 'import numpy as np\n'), ((37664, 37681), 'numpy.array', 'np.array', (['goal[i]'], {}), '(goal[i])\n', (37672, 37681), True, 'import numpy as np\n'), ((40000, 40023), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (40021, 40023), False, 'import pygame\n'), ((30029, 30052), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (30050, 30052), False, 'import pygame\n'), ((34271, 34292), 'pygame.time.delay', 'pygame.time.delay', (['(50)'], {}), '(50)\n', (34288, 34292), False, 'import pygame\n'), ((34321, 34344), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (34342, 34344), False, 'import pygame\n'), ((37882, 37895), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (37893, 37895), False, 'import pygame\n'), ((39685, 39718), 'math.floor', 'math.floor', (['(3 * 1.5 * res * scale)'], {}), '(3 * 1.5 * res * scale)\n', (39695, 39718), False, 'import math\n'), ((12006, 12030), 'math.cos', 'math.cos', (['newOrientation'], {}), '(newOrientation)\n', (12014, 12030), False, 'import math\n'), ((12086, 12110), 'math.sin', 'math.sin', (['newOrientation'], {}), '(newOrientation)\n', (12094, 12110), False, 'import math\n'), ((28254, 28267), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (28265, 28267), False, 'import pygame\n'), ((28790, 28851), 'pygame.draw.line', 'pygame.draw.line', (['gameDisplay', 'colors[i]', '(x2, y2)', '(x, y)', '(1)'], {}), '(gameDisplay, colors[i], (x2, y2), (x, y), 1)\n', (28806, 28851), False, 'import pygame\n'), ((29698, 29731), 'math.floor', 'math.floor', (['(3 * 1.5 * res * scale)'], {}), '(3 * 1.5 * res * scale)\n', (29708, 29731), False, 'import math\n'), ((32749, 32770), 'pygame.time.delay', 'pygame.time.delay', (['(50)'], {}), '(50)\n', (32766, 32770), False, 'import pygame\n'), ((33061, 33084), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (33082, 33084), False, 'import pygame\n'), ((34216, 34241), 'math.floor', 'math.floor', (['(3 * 1.5 * res)'], {}), '(3 * 1.5 * res)\n', (34226, 34241), False, 'import math\n'), ((38409, 38467), 'pygame.draw.line', 'pygame.draw.line', (['gameDisplay', 'yellow', '(x2, y2)', '(x, y)', '(1)'], {}), '(gameDisplay, yellow, (x2, y2), (x, y), 1)\n', (38425, 38467), False, 'import pygame\n'), ((38805, 38866), 'pygame.draw.line', 'pygame.draw.line', (['gameDisplay', 'colors[i]', '(x2, y2)', '(x, y)', '(1)'], {}), '(gameDisplay, colors[i], (x2, y2), (x, y), 1)\n', (38821, 38866), False, 'import pygame\n'), ((41042, 41063), 'pygame.time.delay', 'pygame.time.delay', (['(50)'], {}), '(50)\n', (41059, 41063), False, 'import pygame\n'), ((41357, 41380), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (41378, 41380), False, 'import pygame\n'), ((13275, 13330), 'heapq.heappush', 'heapq.heappush', (['q[i]', '(newNode.cost, count[i], newNode)'], {}), '(q[i], (newNode.cost, count[i], newNode))\n', (13289, 13330), False, 'import heapq\n'), ((32694, 32719), 'math.floor', 'math.floor', (['(3 * 1.5 * res)'], {}), '(3 * 1.5 * res)\n', (32704, 32719), False, 'import math\n'), ((40987, 41012), 'math.floor', 'math.floor', (['(3 * 1.5 * res)'], {}), '(3 * 1.5 * res)\n', (40997, 41012), False, 'import math\n')]
import goless from goless.backends import current as be from . import BaseTests class RecvCaseTests(BaseTests): chansize = 1 def setUp(self): BaseTests.setUp(self) self.ch = goless.chan(self.chansize) self.ca = goless.rcase(self.ch) def test_ready(self): self.assertFalse(self.ca.ready()) be.run(self.ch.send, 1) self.assertTrue(self.ca.ready()) be.run(self.ch.recv) self.assertFalse(self.ca.ready()) def test_executes(self): be.run(self.ch.send, 'a') x = self.ca.exec_() self.assertEqual(x, 'a') def test_exec_with_no_body(self): be.run(self.ch.send, 'a') ca = goless.rcase(self.ch) self.assertEqual(ca.exec_(), 'a') class RecvCaseUnbufferedTests(RecvCaseTests): chansize = 0 class SendCaseTests(BaseTests): chansize = 1 def setUp(self): BaseTests.setUp(self) self.ch = goless.chan(self.chansize) self.sendval = 1 self.ca = goless.scase(self.ch, self.sendval) def test_ready(self): def assert_default_readiness(): self.assertEquals(self.ca.ready(), self.chansize > 0) assert_default_readiness() be.run(self.ch.send) self.assertFalse(self.ca.ready()) be.run(self.ch.recv) assert_default_readiness() be.run(self.ch.send) self.assertFalse(self.ca.ready()) be.run(self.ch.recv) assert_default_readiness() def test_executes(self): def recv(): a.append(self.ch.recv()) a = [] be.run(recv) self.ca.exec_() self.assertEqual(a, [self.sendval]) def test_exec_no_onselected(self): be.run(self.ch.recv) self.ca.exec_() class SendCaseUnbufferedTests(SendCaseTests): chansize = 0 class SelectTests(BaseTests): def setUp(self): BaseTests.setUp(self) self.chan1 = goless.chan() def test_select_uses_default(self): cases = [goless.rcase(self.chan1), goless.dcase()] result, val = goless.select(cases) self.assertIs(result, cases[1]) self.assertIsNone(val) def test_select_chooses_ready_selection(self): readychan = goless.chan(1) notreadychan = goless.chan(1) readychan.send(3) cases = [goless.rcase(notreadychan), goless.rcase(readychan), goless.dcase()] result, val = goless.select(cases) self.assertIs(result, cases[1]) self.assertEqual(val, 3) def test_select_no_default_no_ready_blocks(self): chan1 = goless.chan() chan2 = goless.chan() a = [] cases = [goless.rcase(chan2), goless.rcase(chan1)] def sel(): a.append(goless.select(cases)) be.run(sel) self.assertEqual(a, []) chan1.send(5) be.yield_() self.assertEqual(len(a), 1) chosen, val = a[0] self.assertEqual(chosen, cases[1]) self.assertEqual(val, 5) def test_main_tasklet_can_select(self): chan1 = goless.chan(1) cases = [goless.scase(chan1, 3)] chosen, val = goless.select(cases) self.assertIs(chosen, cases[0]) self.assertIsNone(val) def test_raises_if_multiple_default_cases(self): with self.assertRaises(AssertionError): goless.select([goless.dcase(), goless.dcase()]) def test_select_accepts_args(self): chan1 = goless.chan(1) scase = goless.scase(chan1, 1) chosen, val = goless.select(scase) self.assertIs(chosen, scase) self.assertIsNone(val) def test_select_raises_for_list_and_args(self): chan1 = goless.chan(1) chan2 = goless.chan(1) chan3 = goless.chan(1) cases = [goless.scase(chan1, 1), goless.scase(chan2, 2)] with self.assertRaises(TypeError): goless.select(cases, chan3) def test_select_with_no_args_should_do_nothing(self): goless.select() goless.select([]) def test_raises_deadlock_if_no_goroutines(self): with self.assertRaises(goless.Deadlock): goless.select(goless.rcase(goless.chan()))
[ "goless.dcase", "goless.scase", "goless.rcase", "goless.backends.current.run", "goless.chan", "goless.backends.current.yield_", "goless.select" ]
[((201, 227), 'goless.chan', 'goless.chan', (['self.chansize'], {}), '(self.chansize)\n', (212, 227), False, 'import goless\n'), ((246, 267), 'goless.rcase', 'goless.rcase', (['self.ch'], {}), '(self.ch)\n', (258, 267), False, 'import goless\n'), ((345, 368), 'goless.backends.current.run', 'be.run', (['self.ch.send', '(1)'], {}), '(self.ch.send, 1)\n', (351, 368), True, 'from goless.backends import current as be\n'), ((418, 438), 'goless.backends.current.run', 'be.run', (['self.ch.recv'], {}), '(self.ch.recv)\n', (424, 438), True, 'from goless.backends import current as be\n'), ((519, 544), 'goless.backends.current.run', 'be.run', (['self.ch.send', '"""a"""'], {}), "(self.ch.send, 'a')\n", (525, 544), True, 'from goless.backends import current as be\n'), ((653, 678), 'goless.backends.current.run', 'be.run', (['self.ch.send', '"""a"""'], {}), "(self.ch.send, 'a')\n", (659, 678), True, 'from goless.backends import current as be\n'), ((692, 713), 'goless.rcase', 'goless.rcase', (['self.ch'], {}), '(self.ch)\n', (704, 713), False, 'import goless\n'), ((942, 968), 'goless.chan', 'goless.chan', (['self.chansize'], {}), '(self.chansize)\n', (953, 968), False, 'import goless\n'), ((1012, 1047), 'goless.scase', 'goless.scase', (['self.ch', 'self.sendval'], {}), '(self.ch, self.sendval)\n', (1024, 1047), False, 'import goless\n'), ((1225, 1245), 'goless.backends.current.run', 'be.run', (['self.ch.send'], {}), '(self.ch.send)\n', (1231, 1245), True, 'from goless.backends import current as be\n'), ((1296, 1316), 'goless.backends.current.run', 'be.run', (['self.ch.recv'], {}), '(self.ch.recv)\n', (1302, 1316), True, 'from goless.backends import current as be\n'), ((1360, 1380), 'goless.backends.current.run', 'be.run', (['self.ch.send'], {}), '(self.ch.send)\n', (1366, 1380), True, 'from goless.backends import current as be\n'), ((1431, 1451), 'goless.backends.current.run', 'be.run', (['self.ch.recv'], {}), '(self.ch.recv)\n', (1437, 1451), True, 'from goless.backends import current as be\n'), ((1597, 1609), 'goless.backends.current.run', 'be.run', (['recv'], {}), '(recv)\n', (1603, 1609), True, 'from goless.backends import current as be\n'), ((1726, 1746), 'goless.backends.current.run', 'be.run', (['self.ch.recv'], {}), '(self.ch.recv)\n', (1732, 1746), True, 'from goless.backends import current as be\n'), ((1940, 1953), 'goless.chan', 'goless.chan', ([], {}), '()\n', (1951, 1953), False, 'import goless\n'), ((2076, 2096), 'goless.select', 'goless.select', (['cases'], {}), '(cases)\n', (2089, 2096), False, 'import goless\n'), ((2240, 2254), 'goless.chan', 'goless.chan', (['(1)'], {}), '(1)\n', (2251, 2254), False, 'import goless\n'), ((2278, 2292), 'goless.chan', 'goless.chan', (['(1)'], {}), '(1)\n', (2289, 2292), False, 'import goless\n'), ((2427, 2447), 'goless.select', 'goless.select', (['cases'], {}), '(cases)\n', (2440, 2447), False, 'import goless\n'), ((2592, 2605), 'goless.chan', 'goless.chan', ([], {}), '()\n', (2603, 2605), False, 'import goless\n'), ((2622, 2635), 'goless.chan', 'goless.chan', ([], {}), '()\n', (2633, 2635), False, 'import goless\n'), ((2781, 2792), 'goless.backends.current.run', 'be.run', (['sel'], {}), '(sel)\n', (2787, 2792), True, 'from goless.backends import current as be\n'), ((2855, 2866), 'goless.backends.current.yield_', 'be.yield_', ([], {}), '()\n', (2864, 2866), True, 'from goless.backends import current as be\n'), ((3067, 3081), 'goless.chan', 'goless.chan', (['(1)'], {}), '(1)\n', (3078, 3081), False, 'import goless\n'), ((3145, 3165), 'goless.select', 'goless.select', (['cases'], {}), '(cases)\n', (3158, 3165), False, 'import goless\n'), ((3468, 3482), 'goless.chan', 'goless.chan', (['(1)'], {}), '(1)\n', (3479, 3482), False, 'import goless\n'), ((3499, 3521), 'goless.scase', 'goless.scase', (['chan1', '(1)'], {}), '(chan1, 1)\n', (3511, 3521), False, 'import goless\n'), ((3544, 3564), 'goless.select', 'goless.select', (['scase'], {}), '(scase)\n', (3557, 3564), False, 'import goless\n'), ((3710, 3724), 'goless.chan', 'goless.chan', (['(1)'], {}), '(1)\n', (3721, 3724), False, 'import goless\n'), ((3741, 3755), 'goless.chan', 'goless.chan', (['(1)'], {}), '(1)\n', (3752, 3755), False, 'import goless\n'), ((3772, 3786), 'goless.chan', 'goless.chan', (['(1)'], {}), '(1)\n', (3783, 3786), False, 'import goless\n'), ((4023, 4038), 'goless.select', 'goless.select', ([], {}), '()\n', (4036, 4038), False, 'import goless\n'), ((4047, 4064), 'goless.select', 'goless.select', (['[]'], {}), '([])\n', (4060, 4064), False, 'import goless\n'), ((2012, 2036), 'goless.rcase', 'goless.rcase', (['self.chan1'], {}), '(self.chan1)\n', (2024, 2036), False, 'import goless\n'), ((2038, 2052), 'goless.dcase', 'goless.dcase', ([], {}), '()\n', (2050, 2052), False, 'import goless\n'), ((2336, 2362), 'goless.rcase', 'goless.rcase', (['notreadychan'], {}), '(notreadychan)\n', (2348, 2362), False, 'import goless\n'), ((2364, 2387), 'goless.rcase', 'goless.rcase', (['readychan'], {}), '(readychan)\n', (2376, 2387), False, 'import goless\n'), ((2389, 2403), 'goless.dcase', 'goless.dcase', ([], {}), '()\n', (2401, 2403), False, 'import goless\n'), ((2668, 2687), 'goless.rcase', 'goless.rcase', (['chan2'], {}), '(chan2)\n', (2680, 2687), False, 'import goless\n'), ((2689, 2708), 'goless.rcase', 'goless.rcase', (['chan1'], {}), '(chan1)\n', (2701, 2708), False, 'import goless\n'), ((3099, 3121), 'goless.scase', 'goless.scase', (['chan1', '(3)'], {}), '(chan1, 3)\n', (3111, 3121), False, 'import goless\n'), ((3804, 3826), 'goless.scase', 'goless.scase', (['chan1', '(1)'], {}), '(chan1, 1)\n', (3816, 3826), False, 'import goless\n'), ((3828, 3850), 'goless.scase', 'goless.scase', (['chan2', '(2)'], {}), '(chan2, 2)\n', (3840, 3850), False, 'import goless\n'), ((3916, 3943), 'goless.select', 'goless.select', (['cases', 'chan3'], {}), '(cases, chan3)\n', (3929, 3943), False, 'import goless\n'), ((2751, 2771), 'goless.select', 'goless.select', (['cases'], {}), '(cases)\n', (2764, 2771), False, 'import goless\n'), ((3366, 3380), 'goless.dcase', 'goless.dcase', ([], {}), '()\n', (3378, 3380), False, 'import goless\n'), ((3382, 3396), 'goless.dcase', 'goless.dcase', ([], {}), '()\n', (3394, 3396), False, 'import goless\n'), ((4207, 4220), 'goless.chan', 'goless.chan', ([], {}), '()\n', (4218, 4220), False, 'import goless\n')]
""" cuTENSOR Wrapper Use `cupy_backends.cuda.libs.cutensor` directly in CuPy codebase. """ available = True try: from cupy_backends.cuda.libs.cutensor import * # NOQA except ImportError as e: available = False from cupy._environment import _preload_warning _preload_warning('cutensor', e)
[ "cupy._environment._preload_warning" ]
[((277, 308), 'cupy._environment._preload_warning', '_preload_warning', (['"""cutensor"""', 'e'], {}), "('cutensor', e)\n", (293, 308), False, 'from cupy._environment import _preload_warning\n')]
# -*- coding: utf-8 -*- """ Created on Fri Apr 9 20:00:55 2021 @author: oscar """ import numpy as np import math def bin_MUA_data(MUA,bin_res): counter = 0 binned_MUA = np.zeros([math.ceil(len(MUA[:,1])/bin_res),len(MUA[1,:])]) for bin in range(math.ceil(len(MUA[:,1])/bin_res)): if bin != math.ceil(len(MUA[:,1])/bin_res): temp = np.sum(MUA[counter:counter+bin_res,:],0) else: temp = np.sum(MUA[counter:len(MUA[:,1]),:],0) binned_MUA[bin,:] = temp counter = counter + bin_res binned_MUA = binned_MUA.astype(int) return binned_MUA def online_histogram_w_sat_based_nb_of_samples(data_in,sample_val_cutoff, max_firing_rate): # We consider the histogram to be full when "sample_val_cutoff" values have # been entered into it. # Inputs: # data_in = 1d vector of MUA data from 1 channel. # sample_val_cutoff = how mnay values the histogram will measure until we # consider the histogram training period to have ended. # max_firing_rate: S-1, max value that we consider in the MUA data. # Outputs: # approx sorted histogram, how many samples we measure (just for testing purposes) hist = {'0':0} flag_1 = False i = 0 while not flag_1: # the histogram isn't full yet # Saturate the histogram at the max firing rate if data_in[i] >= max_firing_rate: data_in[i] = max_firing_rate symbol = str(data_in[i]) if symbol in hist: # If this symbol is represented in the histogram hist[symbol] += 1 else: # If this symbol is new in the histogram hist[symbol] = 1 # If the histogram is full, end the while loop hist_count = 0 for symbol_hist in hist: hist_count += int(hist.get(str(symbol_hist))) if hist_count > sample_val_cutoff-1: flag_1 = True # If we've exceeded the number of samples in the data, end the while loop if i+1 == len(data_in): flag_1 = True i += 1 # Increment counter return hist, i # Approx sort used in the work, where the histogram is assumed to follow a # unimodal distribution. The peak in the histogram is identified and given an # index of 0, and values on either side are iteratively assigned the next # indices. def approx_sort(hist): idx = np.arange(0,len(hist)) p_idx = np.argmax(hist) if (p_idx>len(hist)/2): # peak shows on right half right = np.arange(2,(len(hist)-1-p_idx)*2+1,2) #idx on the right (even or odd doesn't matter) idx = np.delete(idx,right) # remove used idx left = idx else: # peak shows on left half left = np.arange(1,(2*p_idx-1)+1,2) idx = np.delete(idx,left) right = idx idx = np.hstack((np.flip(left),right)) idx = np.argsort(idx) return idx.astype(int), hist[idx.astype(int)]
[ "numpy.sum", "numpy.flip", "numpy.argmax", "numpy.argsort", "numpy.arange", "numpy.delete" ]
[((2571, 2586), 'numpy.argmax', 'np.argmax', (['hist'], {}), '(hist)\n', (2580, 2586), True, 'import numpy as np\n'), ((3054, 3069), 'numpy.argsort', 'np.argsort', (['idx'], {}), '(idx)\n', (3064, 3069), True, 'import numpy as np\n'), ((2778, 2799), 'numpy.delete', 'np.delete', (['idx', 'right'], {}), '(idx, right)\n', (2787, 2799), True, 'import numpy as np\n'), ((2908, 2942), 'numpy.arange', 'np.arange', (['(1)', '(2 * p_idx - 1 + 1)', '(2)'], {}), '(1, 2 * p_idx - 1 + 1, 2)\n', (2917, 2942), True, 'import numpy as np\n'), ((2952, 2972), 'numpy.delete', 'np.delete', (['idx', 'left'], {}), '(idx, left)\n', (2961, 2972), True, 'import numpy as np\n'), ((382, 426), 'numpy.sum', 'np.sum', (['MUA[counter:counter + bin_res, :]', '(0)'], {}), '(MUA[counter:counter + bin_res, :], 0)\n', (388, 426), True, 'import numpy as np\n'), ((3021, 3034), 'numpy.flip', 'np.flip', (['left'], {}), '(left)\n', (3028, 3034), True, 'import numpy as np\n')]
# Copyright 2016-2020 Blue Marble Analytics LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Carbon emissions from each carbonaceous project. """ import csv import os.path from pyomo.environ import Param, Set from gridpath.auxiliary.auxiliary import cursor_to_df, subset_init_by_param_value from gridpath.auxiliary.db_interface import ( update_prj_zone_column, determine_table_subset_by_start_and_column, ) from gridpath.auxiliary.validations import write_validation_to_database, validate_idxs def add_model_components(m, d, scenario_directory, subproblem, stage): """ The following Pyomo model components are defined in this module: +-------------------------------------------------------------------------+ | Sets | +=========================================================================+ | | :code:`CRBN_PRJS` | | | *Within*: :code:`PROJECTS` | | | | Two set of carbonaceous projects we need to track for the carbon cap. | +-------------------------------------------------------------------------+ | +-------------------------------------------------------------------------+ | Required Input Params | +=========================================================================+ | | :code:`carbon_cap_zone` | | | *Defined over*: :code:`CRBN_PRJS` | | | *Within*: :code:`CARBON_CAP_ZONES` | | | | This param describes the carbon cap zone for each carbonaceous project. | +-------------------------------------------------------------------------+ | +-------------------------------------------------------------------------+ | Derived Sets | +=========================================================================+ | | :code:`CRBN_PRJS_BY_CARBON_CAP_ZONE` | | | *Defined over*: :code:`CARBON_CAP_ZONES` | | | *Within*: :code:`CRBN_PRJS` | | | | Indexed set that describes the list of carbonaceous projects for each | | carbon cap zone. | +-------------------------------------------------------------------------+ | | :code:`CRBN_PRJ_OPR_TMPS` | | | *Within*: :code:`PRJ_OPR_TMPS` | | | | Two-dimensional set that defines all project-timepoint combinations | | when a carbonaceous project can be operational. | +-------------------------------------------------------------------------+ """ # Sets ########################################################################### m.CRBN_PRJS = Set(within=m.PROJECTS) # Input Params ########################################################################### m.carbon_cap_zone = Param(m.CRBN_PRJS, within=m.CARBON_CAP_ZONES) # Derived Sets ########################################################################### m.CRBN_PRJS_BY_CARBON_CAP_ZONE = Set( m.CARBON_CAP_ZONES, within=m.CRBN_PRJS, initialize=lambda mod, co2_z: subset_init_by_param_value( mod, "CRBN_PRJS", "carbon_cap_zone", co2_z ), ) m.CRBN_PRJ_OPR_TMPS = Set( within=m.PRJ_OPR_TMPS, initialize=lambda mod: [ (p, tmp) for (p, tmp) in mod.PRJ_OPR_TMPS if p in mod.CRBN_PRJS ], ) # Input-Output ############################################################################### def load_model_data(m, d, data_portal, scenario_directory, subproblem, stage): """ :param m: :param d: :param data_portal: :param scenario_directory: :param subproblem: :param stage: :return: """ data_portal.load( filename=os.path.join( scenario_directory, str(subproblem), str(stage), "inputs", "projects.tab" ), select=("project", "carbon_cap_zone"), param=(m.carbon_cap_zone,), ) data_portal.data()["CRBN_PRJS"] = { None: list(data_portal.data()["carbon_cap_zone"].keys()) } # Database ############################################################################### def get_inputs_from_database(scenario_id, subscenarios, subproblem, stage, conn): """ :param subscenarios: SubScenarios object with all subscenario info :param subproblem: :param stage: :param conn: database connection :return: """ subproblem = 1 if subproblem == "" else subproblem stage = 1 if stage == "" else stage c = conn.cursor() project_zones = c.execute( """SELECT project, carbon_cap_zone FROM -- Get projects from portfolio only (SELECT project FROM inputs_project_portfolios WHERE project_portfolio_scenario_id = {} ) as prj_tbl LEFT OUTER JOIN -- Get carbon cap zones for those projects (SELECT project, carbon_cap_zone FROM inputs_project_carbon_cap_zones WHERE project_carbon_cap_zone_scenario_id = {} ) as prj_cc_zone_tbl USING (project) -- Filter out projects whose carbon cap zone is not one included in -- our carbon_cap_zone_scenario_id WHERE carbon_cap_zone in ( SELECT carbon_cap_zone FROM inputs_geography_carbon_cap_zones WHERE carbon_cap_zone_scenario_id = {} ); """.format( subscenarios.PROJECT_PORTFOLIO_SCENARIO_ID, subscenarios.PROJECT_CARBON_CAP_ZONE_SCENARIO_ID, subscenarios.CARBON_CAP_ZONE_SCENARIO_ID, ) ) return project_zones def write_model_inputs( scenario_directory, scenario_id, subscenarios, subproblem, stage, conn ): """ Get inputs from database and write out the model input projects.tab file (to be precise, amend it). :param scenario_directory: string, the scenario directory :param subscenarios: SubScenarios object with all subscenario info :param subproblem: :param stage: :param conn: database connection :return: """ project_zones = get_inputs_from_database( scenario_id, subscenarios, subproblem, stage, conn ) # Make a dict for easy access prj_zone_dict = dict() for (prj, zone) in project_zones: prj_zone_dict[str(prj)] = "." if zone is None else str(zone) with open( os.path.join( scenario_directory, str(subproblem), str(stage), "inputs", "projects.tab" ), "r", ) as projects_file_in: reader = csv.reader(projects_file_in, delimiter="\t", lineterminator="\n") new_rows = list() # Append column header header = next(reader) header.append("carbon_cap_zone") new_rows.append(header) # Append correct values for row in reader: # If project specified, check if BA specified or not if row[0] in list(prj_zone_dict.keys()): row.append(prj_zone_dict[row[0]]) new_rows.append(row) # If project not specified, specify no BA else: row.append(".") new_rows.append(row) with open( os.path.join( scenario_directory, str(subproblem), str(stage), "inputs", "projects.tab" ), "w", newline="", ) as projects_file_out: writer = csv.writer(projects_file_out, delimiter="\t", lineterminator="\n") writer.writerows(new_rows) def process_results(db, c, scenario_id, subscenarios, quiet): """ :param db: :param c: :param subscenarios: :param quiet: :return: """ if not quiet: print("update carbon cap zones") tables_to_update = determine_table_subset_by_start_and_column( conn=db, tbl_start="results_project_", cols=["carbon_cap_zone"] ) for tbl in tables_to_update: update_prj_zone_column( conn=db, scenario_id=scenario_id, subscenarios=subscenarios, subscenario="project_carbon_cap_zone_scenario_id", subsc_tbl="inputs_project_carbon_cap_zones", prj_tbl=tbl, col="carbon_cap_zone", ) # Validation ############################################################################### def validate_inputs(scenario_id, subscenarios, subproblem, stage, conn): """ Get inputs from database and validate the inputs :param subscenarios: SubScenarios object with all subscenario info :param subproblem: :param stage: :param conn: database connection :return: """ project_zones = get_inputs_from_database( scenario_id, subscenarios, subproblem, stage, conn ) # Convert input data into pandas DataFrame df = cursor_to_df(project_zones) zones_w_project = df["carbon_cap_zone"].unique() # Get the required carbon cap zones # TODO: make this into a function similar to get_projects()? # could eventually centralize all these db query functions in one place c = conn.cursor() zones = c.execute( """SELECT carbon_cap_zone FROM inputs_geography_carbon_cap_zones WHERE carbon_cap_zone_scenario_id = {} """.format( subscenarios.CARBON_CAP_ZONE_SCENARIO_ID ) ) zones = [z[0] for z in zones] # convert to list # Check that each carbon cap zone has at least one project assigned to it write_validation_to_database( conn=conn, scenario_id=scenario_id, subproblem_id=subproblem, stage_id=stage, gridpath_module=__name__, db_table="inputs_project_carbon_cap_zones", severity="High", errors=validate_idxs( actual_idxs=zones_w_project, req_idxs=zones, idx_label="carbon_cap_zone", msg="Each carbon cap zone needs at least 1 " "project assigned to it.", ), ) # TODO: need validation that projects with carbon cap zones also have fuels
[ "gridpath.auxiliary.auxiliary.subset_init_by_param_value", "csv.reader", "csv.writer", "gridpath.auxiliary.db_interface.determine_table_subset_by_start_and_column", "gridpath.auxiliary.db_interface.update_prj_zone_column", "pyomo.environ.Param", "pyomo.environ.Set", "gridpath.auxiliary.validations.validate_idxs", "gridpath.auxiliary.auxiliary.cursor_to_df" ]
[((3938, 3960), 'pyomo.environ.Set', 'Set', ([], {'within': 'm.PROJECTS'}), '(within=m.PROJECTS)\n', (3941, 3960), False, 'from pyomo.environ import Param, Set\n'), ((4086, 4131), 'pyomo.environ.Param', 'Param', (['m.CRBN_PRJS'], {'within': 'm.CARBON_CAP_ZONES'}), '(m.CRBN_PRJS, within=m.CARBON_CAP_ZONES)\n', (4091, 4131), False, 'from pyomo.environ import Param, Set\n'), ((4496, 4614), 'pyomo.environ.Set', 'Set', ([], {'within': 'm.PRJ_OPR_TMPS', 'initialize': '(lambda mod: [(p, tmp) for p, tmp in mod.PRJ_OPR_TMPS if p in mod.CRBN_PRJS])'}), '(within=m.PRJ_OPR_TMPS, initialize=lambda mod: [(p, tmp) for p, tmp in\n mod.PRJ_OPR_TMPS if p in mod.CRBN_PRJS])\n', (4499, 4614), False, 'from pyomo.environ import Param, Set\n'), ((9042, 9154), 'gridpath.auxiliary.db_interface.determine_table_subset_by_start_and_column', 'determine_table_subset_by_start_and_column', ([], {'conn': 'db', 'tbl_start': '"""results_project_"""', 'cols': "['carbon_cap_zone']"}), "(conn=db, tbl_start=\n 'results_project_', cols=['carbon_cap_zone'])\n", (9084, 9154), False, 'from gridpath.auxiliary.db_interface import update_prj_zone_column, determine_table_subset_by_start_and_column\n'), ((10087, 10114), 'gridpath.auxiliary.auxiliary.cursor_to_df', 'cursor_to_df', (['project_zones'], {}), '(project_zones)\n', (10099, 10114), False, 'from gridpath.auxiliary.auxiliary import cursor_to_df, subset_init_by_param_value\n'), ((7844, 7909), 'csv.reader', 'csv.reader', (['projects_file_in'], {'delimiter': '"""\t"""', 'lineterminator': '"""\n"""'}), "(projects_file_in, delimiter='\\t', lineterminator='\\n')\n", (7854, 7909), False, 'import csv\n'), ((8691, 8757), 'csv.writer', 'csv.writer', (['projects_file_out'], {'delimiter': '"""\t"""', 'lineterminator': '"""\n"""'}), "(projects_file_out, delimiter='\\t', lineterminator='\\n')\n", (8701, 8757), False, 'import csv\n'), ((9206, 9435), 'gridpath.auxiliary.db_interface.update_prj_zone_column', 'update_prj_zone_column', ([], {'conn': 'db', 'scenario_id': 'scenario_id', 'subscenarios': 'subscenarios', 'subscenario': '"""project_carbon_cap_zone_scenario_id"""', 'subsc_tbl': '"""inputs_project_carbon_cap_zones"""', 'prj_tbl': 'tbl', 'col': '"""carbon_cap_zone"""'}), "(conn=db, scenario_id=scenario_id, subscenarios=\n subscenarios, subscenario='project_carbon_cap_zone_scenario_id',\n subsc_tbl='inputs_project_carbon_cap_zones', prj_tbl=tbl, col=\n 'carbon_cap_zone')\n", (9228, 9435), False, 'from gridpath.auxiliary.db_interface import update_prj_zone_column, determine_table_subset_by_start_and_column\n'), ((11007, 11173), 'gridpath.auxiliary.validations.validate_idxs', 'validate_idxs', ([], {'actual_idxs': 'zones_w_project', 'req_idxs': 'zones', 'idx_label': '"""carbon_cap_zone"""', 'msg': '"""Each carbon cap zone needs at least 1 project assigned to it."""'}), "(actual_idxs=zones_w_project, req_idxs=zones, idx_label=\n 'carbon_cap_zone', msg=\n 'Each carbon cap zone needs at least 1 project assigned to it.')\n", (11020, 11173), False, 'from gridpath.auxiliary.validations import write_validation_to_database, validate_idxs\n'), ((4369, 4439), 'gridpath.auxiliary.auxiliary.subset_init_by_param_value', 'subset_init_by_param_value', (['mod', '"""CRBN_PRJS"""', '"""carbon_cap_zone"""', 'co2_z'], {}), "(mod, 'CRBN_PRJS', 'carbon_cap_zone', co2_z)\n", (4395, 4439), False, 'from gridpath.auxiliary.auxiliary import cursor_to_df, subset_init_by_param_value\n')]
# -*- coding: utf-8 -*- from django.db import models from django.utils.translation import ugettext_lazy as _ from webtopay.signals import payment_was_successful, payment_was_flagged class WebToPayResponse(models.Model): # Non-webtopay params def __unicode__(self): amount = self.amount / 100 if self.amount else 0 return "%s %.2f" % (self.currency, amount) query = models.TextField(blank=True) ipaddress = models.IPAddressField(blank=True) flag = models.BooleanField(blank=True, default=False) flag_info = models.TextField(blank=True) # The thing we got from server projectid = models.BigIntegerField(null=True, help_text="Unikalus projekto numeris. "+\ "Tik patvirtinti projektai gali priimti įmokas") orderid = models.CharField(max_length=40, help_text="Užsakymo numeris iš jūsų sistemos") lang = models.CharField(max_length=3, blank=True, help_text="Galima nurodyti naudotojo kalbą, jeigu tokios kalbos "+\ "mokėjimai.lt nepalaiko bus parinkta kalba pagal "+\ "lankytojo IP adresą arba anglų kalba pagal nutylėjimą. "+\ "(LIT, LAV, EST, RUS, ENG, GER, POL)") amount = models.BigIntegerField(null=True, help_text="Suma centais, kurią klientas turi apmokėti") currency = models.CharField(max_length=3, help_text="Mokėjimo valiuta (LTL, USD, EUR), kuria pageidaujate, "+\ "kad klientas mokėtų. Jeigu nurodyta valiuta per "+\ "pasirinktą mokėjimo būdą negali būti priimta, "+\ "sistema automatiškai pagal dienos kursą konvertuos "+\ "į palaikomą valiutą. Atsakyme į Jūsų svetainę bus "+\ "paduoti payamount ir paycurrency") payment = models.CharField(max_length=20, help_text="Mokėjimo būdas. Parametras, kuriame nieko nenurodoma "+\ "(paliekamas tuščias). Naudotojui bus pateikta lentelė "+\ "su mokėjimo būdų sąrašu, pasirinkimui. Jis naudojamas "+\ "tik tuo atveju, jeigu norima, kad mokėjimas būtų "+\ "atliktas tik per konkretų mokėjimo būdą") country = models.CharField(max_length=2, help_text="Mokėtojo šalis (LT, EE, LV, GB, PL, DE). Nurodžius "+\ "šalį, mokėtojui iš karto pateikiami mokėjimo būdai, "+\ "galimi toje šalyje. Jeigu šalis nenurodoma, sistema "+\ "pagal mokėtojo IP adresą nustato jo šalį. Mokėtojui "+\ "paliekama galimybė pasikeisti šalį") paytext = models.TextField( help_text="Mokėjimo paskirtis, kuri matosi darant pavedimą.") _ss2 = models.CharField(blank=True, max_length=255, help_text="Parametras, kurio pagalba yra tikrinama, ar iš mūsų "+\ "serverio gavote atsakymą. Tai aukščiausio patikimumo "+\ "lygio tikrinimo būdas. Atsisiųskite skripto pavyzdį") _ss1 = models.CharField(blank=True, max_length=64, help_text="Parametras, kurio pagalba yra tikrinama, ar iš mūsų "+\ "serverio gavote atsakymą. Tai -- žemesnio nei _ss2 "+\ "patikimumo lygio tikrinimo būdas. Atsisiųskite pavyzdį") name = models.CharField(max_length=255, blank=True, help_text="Mokėtojo vardas, gautas iš mokėjimo sistemos. "+\ "Siunčiamas tik jeigu mokėjimo sistema tokį suteikia") surename = models.CharField(max_length=255, blank=True, help_text="Mokėtojo pavardė, gauta iš mokėjimo sistemos. "+\ "Siunčiamas tik jeigu mokėjimo sistema tokį suteikia") status = models.IntegerField(max_length=255, help_text="Mokėjimo būklė: "+\ "0 - apmokėjimas neįvyko, "+\ "1 - apmokėta sėkmingai, "+\ "2 - mokėjimo nurodymas priimtas, bet dar neįvykdytas", choices=((0, _('payment did not succeed')), (1, _('payment succeeded')), (2, _('payment accepted, but not yet processed'))), default=0 ) # Error codes are stored separately error = models.CharField(max_length=20, blank=True, help_text="Klaidos kodas") test = models.SmallIntegerField(choices=((0, 'Production'), (1, 'Test')), null=True, help_text="Parametras, kuriam esant galima testuoti sujungimą, "+\ "tokiu būdu apmokėjimas nevykdomas ir rezultatas "+\ "grąžinamas iš karto, tartum būtų sumokėta. Norint "+\ "testuoti, būtina aktyvuoti testavimo režimą prie "+\ "konkretaus projekto, kai prisijungiate: \"Paslaugų "+\ "valdymas\" -> \"įmokų surinkimas\" (prie konkretaus "+\ "projekto) -> \"Leisti testinius mokėjimus\" (pažymėkite)") # In API 1.4, this field has zero allowed length, therefore textfield... p_email = models.TextField( help_text="Pirkėjo el. paštas privalomas. Jeigu adresas nebus "+\ "gautas, kliento bus prašoma jį įvesti. Šiuo adresu "+\ "mokėjimai.lt sistema informuos mokėtoją apie apmokėjimo"+\ " būklę") requestid = models.CharField(max_length=40, blank=True, help_text="Tai užklausos numeris, kurį gauname, kai žmogus "+\ "nuspaudžia ant banko ir kurį pateikiame į "+\ "\"callbackurl\" laukelyje nurodytą nuorodą") payamount = models.IntegerField(null=True, help_text="Suma centais, kurią pervedė. Gali skirtis jeigu buvo"+\ "konvertuota į kitą valiutą") # This is suspicious. According to the spec, its max length is 0. So in # theory, it should be a text field. paycurrency = models.CharField(max_length=10, help_text="Mokėjimo valiuta (LTL, USD, EUR), kurią pervedė. Gali "+\ "skirtis nuo tos kurios prašėte, jeigu pasirinktas "+\ "mokėjimo būdas negalėjo priimti prašomos valiutos") version = models.CharField(max_length=9, help_text="Mokėjimai.lt mokėjimų sistemos specifikacijos (API) "+\ "versijos numeris") """ 0x1 - Mokėjimo suma per maža 0x2 - Mokėjimo suma per didelė 0x3 - Nurodyta valiuta neaptarnaujama 0x4 - Nėra sumos arba valiutos 0x6 - Neįrašytas projectID arba tokio ID nėra 0x7 - Išjungtas testavimo rėžimas, tačiau mėginote atlikti testinį mokėjimą 0x8 - Jūs uždraudėte šį mokėjimo būdą 0x9 - Blogas "paytext" kintamojo kodavimas (turi būti utf-8) 0x10 - Tuščias arba neteisingai užpildytas "orderID" 0x11xError - Toks projektas neegzistuoja 0x11x0 - Projektas nėra patikrintas mūsų administratoriaus 0x11x2 - Projektas yra sustabdytas kliento 0x11x4 - Projektas yra blokuotas mūsų administratoriaus 0x11x5 - Projektas yra ištrintas iš mūsų sistemos 0x12 - Negautas projectid (projekto numeris) parametras, nors jis yra privalomas 0x13 - Accepturl, cancellurl, callbacurl arba referer bazinis adresas skiriasi nuo projekte patvirtintų adresų 0x14 - Klaidingas "sign" parametras 0x15 - Klaidingi kai kurie iš perduotų parametrų 0x15x0 - Neteisingas vienas iš šių parametrų: cancelurl, accepturl, callbackurl """ def set_flag(self, info): self.flag = True self.flag_info += info def send_signals(self): if self.flag: payment_was_flagged.send(sender=self) else: payment_was_successful.send(sender=self)
[ "django.db.models.TextField", "webtopay.signals.payment_was_flagged.send", "django.db.models.BigIntegerField", "django.db.models.IPAddressField", "django.db.models.CharField", "django.db.models.BooleanField", "django.db.models.SmallIntegerField", "django.db.models.IntegerField", "django.utils.translation.ugettext_lazy", "webtopay.signals.payment_was_successful.send" ]
[((398, 426), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (414, 426), False, 'from django.db import models\n'), ((443, 476), 'django.db.models.IPAddressField', 'models.IPAddressField', ([], {'blank': '(True)'}), '(blank=True)\n', (464, 476), False, 'from django.db import models\n'), ((488, 534), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'blank': '(True)', 'default': '(False)'}), '(blank=True, default=False)\n', (507, 534), False, 'from django.db import models\n'), ((551, 579), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (567, 579), False, 'from django.db import models\n'), ((632, 760), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'null': '(True)', 'help_text': "('Unikalus projekto numeris. ' +\n 'Tik patvirtinti projektai gali priimti įmokas')"}), "(null=True, help_text='Unikalus projekto numeris. ' +\n 'Tik patvirtinti projektai gali priimti įmokas')\n", (654, 760), False, 'from django.db import models\n'), ((803, 881), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)', 'help_text': '"""Užsakymo numeris iš jūsų sistemos"""'}), "(max_length=40, help_text='Užsakymo numeris iš jūsų sistemos')\n", (819, 881), False, 'from django.db import models\n'), ((905, 1184), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(3)', 'blank': '(True)', 'help_text': "('Galima nurodyti naudotojo kalbą, jeigu tokios kalbos ' +\n 'mokėjimai.lt nepalaiko bus parinkta kalba pagal ' +\n 'lankytojo IP adresą arba anglų kalba pagal nutylėjimą. ' +\n '(LIT, LAV, EST, RUS, ENG, GER, POL)')"}), "(max_length=3, blank=True, help_text=\n 'Galima nurodyti naudotojo kalbą, jeigu tokios kalbos ' +\n 'mokėjimai.lt nepalaiko bus parinkta kalba pagal ' +\n 'lankytojo IP adresą arba anglų kalba pagal nutylėjimą. ' +\n '(LIT, LAV, EST, RUS, ENG, GER, POL)')\n", (921, 1184), False, 'from django.db import models\n'), ((1253, 1347), 'django.db.models.BigIntegerField', 'models.BigIntegerField', ([], {'null': '(True)', 'help_text': '"""Suma centais, kurią klientas turi apmokėti"""'}), "(null=True, help_text=\n 'Suma centais, kurią klientas turi apmokėti')\n", (1275, 1347), False, 'from django.db import models\n'), ((1370, 1745), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(3)', 'help_text': "('Mokėjimo valiuta (LTL, USD, EUR), kuria pageidaujate, ' +\n 'kad klientas mokėtų. Jeigu nurodyta valiuta per ' +\n 'pasirinktą mokėjimo būdą negali būti priimta, ' +\n 'sistema automatiškai pagal dienos kursą konvertuos ' +\n 'į palaikomą valiutą. Atsakyme į Jūsų svetainę bus ' +\n 'paduoti payamount ir paycurrency')"}), "(max_length=3, help_text=\n 'Mokėjimo valiuta (LTL, USD, EUR), kuria pageidaujate, ' +\n 'kad klientas mokėtų. Jeigu nurodyta valiuta per ' +\n 'pasirinktą mokėjimo būdą negali būti priimta, ' +\n 'sistema automatiškai pagal dienos kursą konvertuos ' +\n 'į palaikomą valiutą. Atsakyme į Jūsų svetainę bus ' +\n 'paduoti payamount ir paycurrency')\n", (1386, 1745), False, 'from django.db import models\n'), ((1847, 2182), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'help_text': "('Mokėjimo būdas. Parametras, kuriame nieko nenurodoma ' +\n '(paliekamas tuščias). Naudotojui bus pateikta lentelė ' +\n 'su mokėjimo būdų sąrašu, pasirinkimui. Jis naudojamas ' +\n 'tik tuo atveju, jeigu norima, kad mokėjimas būtų ' +\n 'atliktas tik per konkretų mokėjimo būdą')"}), "(max_length=20, help_text=\n 'Mokėjimo būdas. Parametras, kuriame nieko nenurodoma ' +\n '(paliekamas tuščias). Naudotojui bus pateikta lentelė ' +\n 'su mokėjimo būdų sąrašu, pasirinkimui. Jis naudojamas ' +\n 'tik tuo atveju, jeigu norima, kad mokėjimas būtų ' +\n 'atliktas tik per konkretų mokėjimo būdą')\n", (1863, 2182), False, 'from django.db import models\n'), ((2268, 2594), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(2)', 'help_text': "('Mokėtojo šalis (LT, EE, LV, GB, PL, DE). Nurodžius ' +\n 'šalį, mokėtojui iš karto pateikiami mokėjimo būdai, ' +\n 'galimi toje šalyje. Jeigu šalis nenurodoma, sistema ' +\n 'pagal mokėtojo IP adresą nustato jo šalį. Mokėtojui ' +\n 'paliekama galimybė pasikeisti šalį')"}), "(max_length=2, help_text=\n 'Mokėtojo šalis (LT, EE, LV, GB, PL, DE). Nurodžius ' +\n 'šalį, mokėtojui iš karto pateikiami mokėjimo būdai, ' +\n 'galimi toje šalyje. Jeigu šalis nenurodoma, sistema ' +\n 'pagal mokėtojo IP adresą nustato jo šalį. Mokėtojui ' +\n 'paliekama galimybė pasikeisti šalį')\n", (2284, 2594), False, 'from django.db import models\n'), ((2680, 2758), 'django.db.models.TextField', 'models.TextField', ([], {'help_text': '"""Mokėjimo paskirtis, kuri matosi darant pavedimą."""'}), "(help_text='Mokėjimo paskirtis, kuri matosi darant pavedimą.')\n", (2696, 2758), False, 'from django.db import models\n'), ((2783, 3020), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(255)', 'help_text': "('Parametras, kurio pagalba yra tikrinama, ar iš mūsų ' +\n 'serverio gavote atsakymą. Tai aukščiausio patikimumo ' +\n 'lygio tikrinimo būdas. Atsisiųskite skripto pavyzdį')"}), "(blank=True, max_length=255, help_text=\n 'Parametras, kurio pagalba yra tikrinama, ar iš mūsų ' +\n 'serverio gavote atsakymą. Tai aukščiausio patikimumo ' +\n 'lygio tikrinimo būdas. Atsisiųskite skripto pavyzdį')\n", (2799, 3020), False, 'from django.db import models\n'), ((3071, 3308), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(64)', 'help_text': "('Parametras, kurio pagalba yra tikrinama, ar iš mūsų ' +\n 'serverio gavote atsakymą. Tai -- žemesnio nei _ss2 ' +\n 'patikimumo lygio tikrinimo būdas. Atsisiųskite pavyzdį')"}), "(blank=True, max_length=64, help_text=\n 'Parametras, kurio pagalba yra tikrinama, ar iš mūsų ' +\n 'serverio gavote atsakymą. Tai -- žemesnio nei _ss2 ' +\n 'patikimumo lygio tikrinimo būdas. Atsisiųskite pavyzdį')\n", (3087, 3308), False, 'from django.db import models\n'), ((3359, 3528), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'help_text': "('Mokėtojo vardas, gautas iš mokėjimo sistemos. ' +\n 'Siunčiamas tik jeigu mokėjimo sistema tokį suteikia')"}), "(max_length=255, blank=True, help_text=\n 'Mokėtojo vardas, gautas iš mokėjimo sistemos. ' +\n 'Siunčiamas tik jeigu mokėjimo sistema tokį suteikia')\n", (3375, 3528), False, 'from django.db import models\n'), ((3567, 3736), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'blank': '(True)', 'help_text': "('Mokėtojo pavardė, gauta iš mokėjimo sistemos. ' +\n 'Siunčiamas tik jeigu mokėjimo sistema tokį suteikia')"}), "(max_length=255, blank=True, help_text=\n 'Mokėtojo pavardė, gauta iš mokėjimo sistemos. ' +\n 'Siunčiamas tik jeigu mokėjimo sistema tokį suteikia')\n", (3583, 3736), False, 'from django.db import models\n'), ((4313, 4383), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'blank': '(True)', 'help_text': '"""Klaidos kodas"""'}), "(max_length=20, blank=True, help_text='Klaidos kodas')\n", (4329, 4383), False, 'from django.db import models\n'), ((4408, 4910), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {'choices': "((0, 'Production'), (1, 'Test'))", 'null': '(True)', 'help_text': '(\'Parametras, kuriam esant galima testuoti sujungimą, \' +\n \'tokiu būdu apmokėjimas nevykdomas ir rezultatas \' +\n \'grąžinamas iš karto, tartum būtų sumokėta. Norint \' +\n \'testuoti, būtina aktyvuoti testavimo režimą prie \' +\n \'konkretaus projekto, kai prisijungiate: "Paslaugų \' +\n \'valdymas" -> "įmokų surinkimas" (prie konkretaus \' +\n \'projekto) -> "Leisti testinius mokėjimus" (pažymėkite)\')'}), '(choices=((0, \'Production\'), (1, \'Test\')), null=\n True, help_text=\'Parametras, kuriam esant galima testuoti sujungimą, \' +\n \'tokiu būdu apmokėjimas nevykdomas ir rezultatas \' +\n \'grąžinamas iš karto, tartum būtų sumokėta. Norint \' +\n \'testuoti, būtina aktyvuoti testavimo režimą prie \' +\n \'konkretaus projekto, kai prisijungiate: "Paslaugų \' +\n \'valdymas" -> "įmokų surinkimas" (prie konkretaus \' +\n \'projekto) -> "Leisti testinius mokėjimus" (pažymėkite)\')\n', (4432, 4910), False, 'from django.db import models\n'), ((5124, 5345), 'django.db.models.TextField', 'models.TextField', ([], {'help_text': "('Pirkėjo el. paštas privalomas. Jeigu adresas nebus ' +\n 'gautas, kliento bus prašoma jį įvesti. Šiuo adresu ' +\n 'mokėjimai.lt sistema informuos mokėtoją apie apmokėjimo' + ' būklę')"}), "(help_text=\n 'Pirkėjo el. paštas privalomas. Jeigu adresas nebus ' +\n 'gautas, kliento bus prašoma jį įvesti. Šiuo adresu ' +\n 'mokėjimai.lt sistema informuos mokėtoją apie apmokėjimo' + ' būklę')\n", (5140, 5345), False, 'from django.db import models\n'), ((5422, 5632), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(40)', 'blank': '(True)', 'help_text': '(\'Tai užklausos numeris, kurį gauname, kai žmogus \' +\n \'nuspaudžia ant banko ir kurį pateikiame į \' +\n \'"callbackurl" laukelyje nurodytą nuorodą\')'}), '(max_length=40, blank=True, help_text=\n \'Tai užklausos numeris, kurį gauname, kai žmogus \' +\n \'nuspaudžia ant banko ir kurį pateikiame į \' +\n \'"callbackurl" laukelyje nurodytą nuorodą\')\n', (5438, 5632), False, 'from django.db import models\n'), ((5690, 5826), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)', 'help_text': "('Suma centais, kurią pervedė. Gali skirtis jeigu buvo' +\n 'konvertuota į kitą valiutą')"}), "(null=True, help_text=\n 'Suma centais, kurią pervedė. Gali skirtis jeigu buvo' +\n 'konvertuota į kitą valiutą')\n", (5709, 5826), False, 'from django.db import models\n'), ((5986, 6207), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'help_text': "('Mokėjimo valiuta (LTL, USD, EUR), kurią pervedė. Gali ' +\n 'skirtis nuo tos kurios prašėte, jeigu pasirinktas ' +\n 'mokėjimo būdas negalėjo priimti prašomos valiutos')"}), "(max_length=10, help_text=\n 'Mokėjimo valiuta (LTL, USD, EUR), kurią pervedė. Gali ' +\n 'skirtis nuo tos kurios prašėte, jeigu pasirinktas ' +\n 'mokėjimo būdas negalėjo priimti prašomos valiutos')\n", (6002, 6207), False, 'from django.db import models\n'), ((6261, 6388), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(9)', 'help_text': "('Mokėjimai.lt mokėjimų sistemos specifikacijos (API) ' + 'versijos numeris')"}), "(max_length=9, help_text=\n 'Mokėjimai.lt mokėjimų sistemos specifikacijos (API) ' + 'versijos numeris'\n )\n", (6277, 6388), False, 'from django.db import models\n'), ((7739, 7776), 'webtopay.signals.payment_was_flagged.send', 'payment_was_flagged.send', ([], {'sender': 'self'}), '(sender=self)\n', (7763, 7776), False, 'from webtopay.signals import payment_was_successful, payment_was_flagged\n'), ((7803, 7843), 'webtopay.signals.payment_was_successful.send', 'payment_was_successful.send', ([], {'sender': 'self'}), '(sender=self)\n', (7830, 7843), False, 'from webtopay.signals import payment_was_successful, payment_was_flagged\n'), ((4048, 4076), 'django.utils.translation.ugettext_lazy', '_', (['"""payment did not succeed"""'], {}), "('payment did not succeed')\n", (4049, 4076), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4107, 4129), 'django.utils.translation.ugettext_lazy', '_', (['"""payment succeeded"""'], {}), "('payment succeeded')\n", (4108, 4129), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((4160, 4204), 'django.utils.translation.ugettext_lazy', '_', (['"""payment accepted, but not yet processed"""'], {}), "('payment accepted, but not yet processed')\n", (4161, 4204), True, 'from django.utils.translation import ugettext_lazy as _\n')]
from array_shift import insert_shift_array # def test_insert_to_middle(): # expected = [1, 2, 3] # actual = insert_shift_array([1, 3], 2) # assert expected == actual def test_insert_to_middle(): expected = [1, 2, 3, 4, 5, 6] actual = insert_shift_array([1, 2, 3, 5, 6], 4) assert expected == actual
[ "array_shift.insert_shift_array" ]
[((269, 307), 'array_shift.insert_shift_array', 'insert_shift_array', (['[1, 2, 3, 5, 6]', '(4)'], {}), '([1, 2, 3, 5, 6], 4)\n', (287, 307), False, 'from array_shift import insert_shift_array\n')]
from collections import OrderedDict import numpy as np import torch import torch.optim as optim from torch import nn as nn from torch import autograd from torch.autograd import Variable import torch.nn.functional as F from rlkit.core import logger import rlkit.torch.pytorch_util as ptu from rlkit.core.eval_util import create_stats_ordered_dict from rlkit.torch.torch_meta_irl_algorithm import TorchMetaIRLAlgorithm from rlkit.torch.sac.policies import MakeDeterministic from rlkit.core.train_util import linear_schedule from rlkit.torch.core import PyTorchModule from rlkit.torch.sac.policies import PostCondMLPPolicyWrapper from rlkit.data_management.path_builder import PathBuilder from gym.spaces import Dict from rlkit.torch.irl.encoders.aggregators import sum_aggregator from rlkit.torch.distributions import ReparamMultivariateNormalDiag OUTER_RADIUS = 2.0 TASK_RADIUS = 2.0 SAME_COLOUR_RADIUS = 1.0 def concat_trajs(trajs): new_dict = {} for k in trajs[0].keys(): if isinstance(trajs[0][k], dict): new_dict[k] = concat_trajs([t[k] for t in trajs]) else: new_dict[k] = np.concatenate([t[k] for t in trajs], axis=0) return new_dict def subsample_traj(traj, num_samples): traj_len = traj['observations'].shape[0] idxs = np.random.choice(traj_len, size=num_samples, replace=traj_len<num_samples) new_traj = {k: traj[k][idxs,...] for k in traj} return new_traj class R2ZMap(PyTorchModule): def __init__( self, r_dim, z_dim, hid_dim, # this makes it be closer to deterministic, makes it easier to train # before we turn on the KL regularization LOG_STD_SUBTRACT_VALUE=2.0 ): self.save_init_params(locals()) super().__init__() self.trunk = nn.Sequential( nn.Linear(r_dim, hid_dim), nn.BatchNorm1d(hid_dim), nn.ReLU(), nn.Linear(hid_dim, hid_dim), nn.BatchNorm1d(hid_dim), nn.ReLU(), ) self.mean_fc = nn.Linear(hid_dim, z_dim) self.log_sig_fc = nn.Linear(hid_dim, z_dim) self.LOG_STD_SUBTRACT_VALUE = LOG_STD_SUBTRACT_VALUE print('LOG STD SUBTRACT VALUE IS FOR APPROX POSTERIOR IS %f' % LOG_STD_SUBTRACT_VALUE) def forward(self, r): trunk_output = self.trunk(r) mean = self.mean_fc(trunk_output) log_sig = self.log_sig_fc(trunk_output) - self.LOG_STD_SUBTRACT_VALUE return mean, log_sig class Encoder(PyTorchModule): def __init__(self, z_dim): self.save_init_params(locals()) super().__init__() HID_DIM = 64 self.encoder_mlp = nn.Sequential( nn.Linear(6, HID_DIM), nn.BatchNorm1d(HID_DIM), nn.ReLU(), nn.Linear(HID_DIM, HID_DIM), nn.BatchNorm1d(HID_DIM), nn.ReLU(), nn.Linear(HID_DIM, HID_DIM) ) self.agg = sum_aggregator self.r2z_map = R2ZMap(HID_DIM, z_dim, HID_DIM) def forward(self, context, mask): N_tasks, N_max_cont, N_dim = context.size(0), context.size(1), context.size(2) context = context.view(-1, N_dim) embedded_context = self.encoder_mlp(context) embed_dim = embedded_context.size(1) embedded_context = embedded_context.view(N_tasks, N_max_cont, embed_dim) agg = self.agg(embedded_context, mask) post_mean, post_log_sig = self.r2z_map(agg) return ReparamMultivariateNormalDiag(post_mean, post_log_sig) class FetchTaskDesign(): def __init__( self, mlp, num_tasks_used_per_update=5, min_context_size=1, max_context_size=5, classification_batch_size_per_task=32, encoder_lr=1e-3, encoder_optimizer_class=optim.Adam, mlp_lr=1e-3, mlp_optimizer_class=optim.Adam, num_update_loops_per_train_call=1000, num_epochs=10000, z_dim=16, **kwargs ): self.mlp = mlp self.encoder = Encoder(z_dim) self.num_tasks_used_per_update = num_tasks_used_per_update self.min_context_size = min_context_size self.max_context_size = max_context_size self.classification_batch_size_per_task = classification_batch_size_per_task self.encoder_optimizer = encoder_optimizer_class( self.encoder.parameters(), lr=encoder_lr, betas=(0.9, 0.999) ) self.mlp_optimizer = mlp_optimizer_class( self.mlp.parameters(), lr=mlp_lr, betas=(0.9, 0.999) ) self.bce = nn.BCEWithLogitsLoss() self.num_update_loops_per_train_call = num_update_loops_per_train_call self.num_epochs = num_epochs def _sample_color_within_radius(self, center, radius): new_color = self._uniform_sample_from_sphere(radius) + center while np.linalg.norm(new_color) > OUTER_RADIUS: new_color = self._uniform_sample_from_sphere(radius) + center return new_color def _uniform_sample_from_sphere(self, radius): x = np.random.normal(size=3) x /= np.linalg.norm(x, axis=-1) r = radius u = np.random.uniform() sampled_color = r * (u**(1.0/3.0)) * x return sampled_color def _sample_color_with_min_dist(self, color, min_dist): new_color = self._uniform_sample_from_sphere(OUTER_RADIUS) while np.linalg.norm(new_color - color, axis=-1) < min_dist: new_color = self._uniform_sample_from_sphere(OUTER_RADIUS) return new_color def _get_training_batch(self): task_colors = [] for _ in range(self.num_tasks_used_per_update): task_colors.append(self._uniform_sample_from_sphere(TASK_RADIUS)) task_colors = np.array(task_colors) input_batch = [] labels = [] for task in task_colors: for _ in range(self.classification_batch_size_per_task): good = self._sample_color_within_radius(task, SAME_COLOUR_RADIUS) bad = self._sample_color_with_min_dist(task, 0.0) # HERE if np.random.uniform() > 0.5: input_batch.append(np.concatenate((good, bad))) labels.append([1.0]) else: input_batch.append(np.concatenate((bad, good))) labels.append([0.0]) input_batch = Variable(ptu.from_numpy(np.array(input_batch))) labels = Variable(ptu.from_numpy(np.array(labels))) context = [] mask = Variable(ptu.from_numpy(np.zeros((self.num_tasks_used_per_update, self.max_context_size, 1)))) for task_num, task in enumerate(task_colors): task_context = [] for _ in range(self.max_context_size): good = self._sample_color_within_radius(task, SAME_COLOUR_RADIUS) bad = self._sample_color_with_min_dist(task, 0.0) # HERE # always the same order because it's the context task_context.append(np.concatenate((good, bad))) context.append(task_context) con_size = np.random.randint(self.min_context_size, self.max_context_size+1) mask[task_num,:con_size,:] = 1.0 context = Variable(ptu.from_numpy(np.array(context))) return context, mask, input_batch, labels def _get_eval_batch(self): task_colors = [] for _ in range(self.num_tasks_used_per_update): task_colors.append(self._uniform_sample_from_sphere(TASK_RADIUS)) task_colors = np.array(task_colors) # task_colors = np.zeros((self.num_tasks_used_per_update, 3)) # THIS # task_colors[:,0] = -1.0 # THIS input_batch = [] labels = [] for task in task_colors: for _ in range(self.classification_batch_size_per_task): good = self._sample_color_within_radius(task, SAME_COLOUR_RADIUS) bad = self._sample_color_with_min_dist(task, SAME_COLOUR_RADIUS) if np.random.uniform() > 0.5: input_batch.append(np.concatenate((good, bad))) labels.append([1.0]) else: input_batch.append(np.concatenate((bad, good))) labels.append([0.0]) input_batch = Variable(ptu.from_numpy(np.array(input_batch))) labels = Variable(ptu.from_numpy(np.array(labels))) context = [] mask = Variable(ptu.from_numpy(np.zeros((self.num_tasks_used_per_update, self.max_context_size, 1)))) for task_num, task in enumerate(task_colors): task_context = [] for _ in range(self.max_context_size): good = self._sample_color_within_radius(task, SAME_COLOUR_RADIUS) # good = np.zeros(3) # THIS bad = self._sample_color_with_min_dist(task, 0.0) # HERE # bad = np.array([2.0, 0.0, 0.0]) # THIS # always the same order because it's the context task_context.append(np.concatenate((good, bad))) context.append(task_context) con_size = np.random.randint(self.min_context_size, self.max_context_size+1) mask[task_num,:con_size,:] = 1.0 context = Variable(ptu.from_numpy(np.array(context))) return context, mask, input_batch, labels def train(self): for e in range(self.num_epochs): self._do_training(e, self.num_update_loops_per_train_call) self.evaluate() def _do_training(self, epoch, num_updates): ''' Train the discriminator ''' self.mlp.train() self.encoder.train() for _ in range(num_updates): self.encoder_optimizer.zero_grad() self.mlp_optimizer.zero_grad() # prep the batches context, mask, input_batch, labels = self._get_training_batch() post_dist = self.encoder(context, mask) # z = post_dist.sample() # N_tasks x Dim z = post_dist.mean repeated_z = z.repeat(1, self.classification_batch_size_per_task).view(-1, z.size(1)) mlp_input = torch.cat([input_batch, repeated_z], dim=-1) preds = self.mlp(mlp_input) loss = self.bce(preds, labels) loss.backward() self.mlp_optimizer.step() self.encoder_optimizer.step() def evaluate(self): eval_statistics = OrderedDict() self.mlp.eval() self.encoder.eval() for i in range(1, 12): # prep the batches # context, mask, input_batch, labels = self._get_training_batch() context, mask, input_batch, labels = self._get_eval_batch() post_dist = self.encoder(context, mask) # z = post_dist.sample() # N_tasks x Dim z = post_dist.mean repeated_z = z.repeat(1, self.classification_batch_size_per_task).view(-1, z.size(1)) mlp_input = torch.cat([input_batch, repeated_z], dim=-1) preds = self.mlp(mlp_input) class_preds = (preds > 0).type(preds.data.type()) accuracy = (class_preds == labels).type(torch.FloatTensor).mean() eval_statistics['Acc for %d' % i] = np.mean(ptu.get_numpy(accuracy)) # for key, value in eval_statistics.items(): # logger.record_tabular(key, value) # logger.dump_tabular(with_prefix=False, with_timestamp=False) print(np.mean(list(eval_statistics.values()))) def cuda(self): self.encoder.cuda() self.mlp.cuda() def cpu(self): self.encoder.cpu() self.mlp.cpu() def _elem_or_tuple_to_variable(elem_or_tuple): if isinstance(elem_or_tuple, tuple): return tuple( _elem_or_tuple_to_variable(e) for e in elem_or_tuple ) return Variable(ptu.from_numpy(elem_or_tuple).float(), requires_grad=False) def _filter_batch(np_batch): for k, v in np_batch.items(): if v.dtype == np.bool: yield k, v.astype(int) else: yield k, v def np_to_pytorch_batch(np_batch): return { k: _elem_or_tuple_to_variable(x) for k, x in _filter_batch(np_batch) if x.dtype != np.dtype('O') # ignore object (e.g. dictionaries) } def log_sum_exp(value, dim=None, keepdim=False): """Numerically stable implementation of the operation value.exp().sum(dim, keepdim).log() """ # TODO: torch.max(value, dim=None) threw an error at time of writing if dim is not None: m, _ = torch.max(value, dim=dim, keepdim=True) value0 = value - m if keepdim is False: m = m.squeeze(dim) return m + torch.log(torch.sum(torch.exp(value0), dim=dim, keepdim=keepdim)) else: m = torch.max(value) sum_exp = torch.sum(torch.exp(value - m)) if isinstance(sum_exp, Number): return m + math.log(sum_exp) else: return m + torch.log(sum_exp)
[ "torch.cat", "rlkit.torch.pytorch_util.from_numpy", "numpy.random.randint", "numpy.linalg.norm", "numpy.random.normal", "torch.exp", "rlkit.torch.distributions.ReparamMultivariateNormalDiag", "numpy.random.choice", "torch.nn.Linear", "torch.log", "torch.nn.BCEWithLogitsLoss", "rlkit.torch.pytorch_util.get_numpy", "torch.nn.BatchNorm1d", "torch.max", "numpy.concatenate", "numpy.random.uniform", "torch.nn.ReLU", "numpy.dtype", "numpy.zeros", "numpy.array", "collections.OrderedDict" ]
[((1297, 1373), 'numpy.random.choice', 'np.random.choice', (['traj_len'], {'size': 'num_samples', 'replace': '(traj_len < num_samples)'}), '(traj_len, size=num_samples, replace=traj_len < num_samples)\n', (1313, 1373), True, 'import numpy as np\n'), ((2060, 2085), 'torch.nn.Linear', 'nn.Linear', (['hid_dim', 'z_dim'], {}), '(hid_dim, z_dim)\n', (2069, 2085), True, 'from torch import nn as nn\n'), ((2112, 2137), 'torch.nn.Linear', 'nn.Linear', (['hid_dim', 'z_dim'], {}), '(hid_dim, z_dim)\n', (2121, 2137), True, 'from torch import nn as nn\n'), ((3509, 3563), 'rlkit.torch.distributions.ReparamMultivariateNormalDiag', 'ReparamMultivariateNormalDiag', (['post_mean', 'post_log_sig'], {}), '(post_mean, post_log_sig)\n', (3538, 3563), False, 'from rlkit.torch.distributions import ReparamMultivariateNormalDiag\n'), ((4729, 4751), 'torch.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (4749, 4751), True, 'from torch import nn as nn\n'), ((5223, 5247), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(3)'}), '(size=3)\n', (5239, 5247), True, 'import numpy as np\n'), ((5261, 5287), 'numpy.linalg.norm', 'np.linalg.norm', (['x'], {'axis': '(-1)'}), '(x, axis=-1)\n', (5275, 5287), True, 'import numpy as np\n'), ((5319, 5338), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (5336, 5338), True, 'import numpy as np\n'), ((5931, 5952), 'numpy.array', 'np.array', (['task_colors'], {}), '(task_colors)\n', (5939, 5952), True, 'import numpy as np\n'), ((7739, 7760), 'numpy.array', 'np.array', (['task_colors'], {}), '(task_colors)\n', (7747, 7760), True, 'import numpy as np\n'), ((10688, 10701), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10699, 10701), False, 'from collections import OrderedDict\n'), ((12832, 12871), 'torch.max', 'torch.max', (['value'], {'dim': 'dim', 'keepdim': '(True)'}), '(value, dim=dim, keepdim=True)\n', (12841, 12871), False, 'import torch\n'), ((13105, 13121), 'torch.max', 'torch.max', (['value'], {}), '(value)\n', (13114, 13121), False, 'import torch\n'), ((1134, 1179), 'numpy.concatenate', 'np.concatenate', (['[t[k] for t in trajs]'], {'axis': '(0)'}), '([t[k] for t in trajs], axis=0)\n', (1148, 1179), True, 'import numpy as np\n'), ((1839, 1864), 'torch.nn.Linear', 'nn.Linear', (['r_dim', 'hid_dim'], {}), '(r_dim, hid_dim)\n', (1848, 1864), True, 'from torch import nn as nn\n'), ((1878, 1901), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hid_dim'], {}), '(hid_dim)\n', (1892, 1901), True, 'from torch import nn as nn\n'), ((1915, 1924), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1922, 1924), True, 'from torch import nn as nn\n'), ((1938, 1965), 'torch.nn.Linear', 'nn.Linear', (['hid_dim', 'hid_dim'], {}), '(hid_dim, hid_dim)\n', (1947, 1965), True, 'from torch import nn as nn\n'), ((1979, 2002), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['hid_dim'], {}), '(hid_dim)\n', (1993, 2002), True, 'from torch import nn as nn\n'), ((2016, 2025), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2023, 2025), True, 'from torch import nn as nn\n'), ((2715, 2736), 'torch.nn.Linear', 'nn.Linear', (['(6)', 'HID_DIM'], {}), '(6, HID_DIM)\n', (2724, 2736), True, 'from torch import nn as nn\n'), ((2750, 2773), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['HID_DIM'], {}), '(HID_DIM)\n', (2764, 2773), True, 'from torch import nn as nn\n'), ((2787, 2796), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2794, 2796), True, 'from torch import nn as nn\n'), ((2810, 2837), 'torch.nn.Linear', 'nn.Linear', (['HID_DIM', 'HID_DIM'], {}), '(HID_DIM, HID_DIM)\n', (2819, 2837), True, 'from torch import nn as nn\n'), ((2851, 2874), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['HID_DIM'], {}), '(HID_DIM)\n', (2865, 2874), True, 'from torch import nn as nn\n'), ((2888, 2897), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2895, 2897), True, 'from torch import nn as nn\n'), ((2911, 2938), 'torch.nn.Linear', 'nn.Linear', (['HID_DIM', 'HID_DIM'], {}), '(HID_DIM, HID_DIM)\n', (2920, 2938), True, 'from torch import nn as nn\n'), ((5013, 5038), 'numpy.linalg.norm', 'np.linalg.norm', (['new_color'], {}), '(new_color)\n', (5027, 5038), True, 'import numpy as np\n'), ((5562, 5604), 'numpy.linalg.norm', 'np.linalg.norm', (['(new_color - color)'], {'axis': '(-1)'}), '(new_color - color, axis=-1)\n', (5576, 5604), True, 'import numpy as np\n'), ((7297, 7364), 'numpy.random.randint', 'np.random.randint', (['self.min_context_size', '(self.max_context_size + 1)'], {}), '(self.min_context_size, self.max_context_size + 1)\n', (7314, 7364), True, 'import numpy as np\n'), ((9353, 9420), 'numpy.random.randint', 'np.random.randint', (['self.min_context_size', '(self.max_context_size + 1)'], {}), '(self.min_context_size, self.max_context_size + 1)\n', (9370, 9420), True, 'import numpy as np\n'), ((10399, 10443), 'torch.cat', 'torch.cat', (['[input_batch, repeated_z]'], {'dim': '(-1)'}), '([input_batch, repeated_z], dim=-1)\n', (10408, 10443), False, 'import torch\n'), ((11226, 11270), 'torch.cat', 'torch.cat', (['[input_batch, repeated_z]'], {'dim': '(-1)'}), '([input_batch, repeated_z], dim=-1)\n', (11235, 11270), False, 'import torch\n'), ((13150, 13170), 'torch.exp', 'torch.exp', (['(value - m)'], {}), '(value - m)\n', (13159, 13170), False, 'import torch\n'), ((6588, 6609), 'numpy.array', 'np.array', (['input_batch'], {}), '(input_batch)\n', (6596, 6609), True, 'import numpy as np\n'), ((6653, 6669), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (6661, 6669), True, 'import numpy as np\n'), ((6741, 6809), 'numpy.zeros', 'np.zeros', (['(self.num_tasks_used_per_update, self.max_context_size, 1)'], {}), '((self.num_tasks_used_per_update, self.max_context_size, 1))\n', (6749, 6809), True, 'import numpy as np\n'), ((7450, 7467), 'numpy.array', 'np.array', (['context'], {}), '(context)\n', (7458, 7467), True, 'import numpy as np\n'), ((8539, 8560), 'numpy.array', 'np.array', (['input_batch'], {}), '(input_batch)\n', (8547, 8560), True, 'import numpy as np\n'), ((8604, 8620), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (8612, 8620), True, 'import numpy as np\n'), ((8692, 8760), 'numpy.zeros', 'np.zeros', (['(self.num_tasks_used_per_update, self.max_context_size, 1)'], {}), '((self.num_tasks_used_per_update, self.max_context_size, 1))\n', (8700, 8760), True, 'import numpy as np\n'), ((9506, 9523), 'numpy.array', 'np.array', (['context'], {}), '(context)\n', (9514, 9523), True, 'import numpy as np\n'), ((11507, 11530), 'rlkit.torch.pytorch_util.get_numpy', 'ptu.get_numpy', (['accuracy'], {}), '(accuracy)\n', (11520, 11530), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((12120, 12149), 'rlkit.torch.pytorch_util.from_numpy', 'ptu.from_numpy', (['elem_or_tuple'], {}), '(elem_or_tuple)\n', (12134, 12149), True, 'import rlkit.torch.pytorch_util as ptu\n'), ((12505, 12518), 'numpy.dtype', 'np.dtype', (['"""O"""'], {}), "('O')\n", (12513, 12518), True, 'import numpy as np\n'), ((13290, 13308), 'torch.log', 'torch.log', (['sum_exp'], {}), '(sum_exp)\n', (13299, 13308), False, 'import torch\n'), ((6275, 6294), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (6292, 6294), True, 'import numpy as np\n'), ((7203, 7230), 'numpy.concatenate', 'np.concatenate', (['(good, bad)'], {}), '((good, bad))\n', (7217, 7230), True, 'import numpy as np\n'), ((8226, 8245), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (8243, 8245), True, 'import numpy as np\n'), ((9259, 9286), 'numpy.concatenate', 'np.concatenate', (['(good, bad)'], {}), '((good, bad))\n', (9273, 9286), True, 'import numpy as np\n'), ((12998, 13015), 'torch.exp', 'torch.exp', (['value0'], {}), '(value0)\n', (13007, 13015), False, 'import torch\n'), ((6341, 6368), 'numpy.concatenate', 'np.concatenate', (['(good, bad)'], {}), '((good, bad))\n', (6355, 6368), True, 'import numpy as np\n'), ((6472, 6499), 'numpy.concatenate', 'np.concatenate', (['(bad, good)'], {}), '((bad, good))\n', (6486, 6499), True, 'import numpy as np\n'), ((8292, 8319), 'numpy.concatenate', 'np.concatenate', (['(good, bad)'], {}), '((good, bad))\n', (8306, 8319), True, 'import numpy as np\n'), ((8423, 8450), 'numpy.concatenate', 'np.concatenate', (['(bad, good)'], {}), '((bad, good))\n', (8437, 8450), True, 'import numpy as np\n')]
# Generated by Django 3.2 on 2021-04-12 13:54 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('movie', '0006_alter_movie_rating'), ] operations = [ migrations.AlterField( model_name='movie', name='photo', field=models.ImageField(upload_to='media'), ), ]
[ "django.db.models.ImageField" ]
[((330, 366), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""media"""'}), "(upload_to='media')\n", (347, 366), False, 'from django.db import migrations, models\n')]
from builtins import int, range from pyinfrabox import ValidationError from pyinfrabox.utils import * def check_version(v, path): if not isinstance(v, int): raise ValidationError(path, "must be an int") if v != 1: raise ValidationError(path, "unsupported version") def parse_measurement(d, path): check_allowed_properties(d, path, ("name", "unit", "value")) check_required_properties(d, path, ("name", "unit", "value")) check_text(d['unit'], path + ".unit") check_text(d['name'], path + ".name") check_text(d['value'], path + ".value") def parse_measurements(e, path): if not isinstance(e, list): raise ValidationError(path, "must be an array") for i in range(0, len(e)): elem = e[i] path = "%s[%s]" % (path, i) parse_measurement(elem, path) def parse_t(d, path): check_allowed_properties(d, path, ("suite", "name", "status", "duration", "message", "stack", "measurements")) check_required_properties(d, path, ("suite", "name", "status", "duration")) check_text(d['suite'], path + ".suite") check_text(d['name'], path + ".name") check_text(d['status'], path + ".status") check_number(d['duration'], path + ".duration") if 'message' in d: check_text(d['message'], path + ".message") if 'stack' in d: check_text(d['stack'], path + ".stack") if 'measurements' in d: parse_measurements(d['measurements'], path + ".measurements") def parse_ts(e, path): if not isinstance(e, list): raise ValidationError(path, "must be an array") if not e: raise ValidationError(path, "must not be empty") for i in range(0, len(e)): elem = e[i] p = "%s[%s]" % (path, i) parse_t(elem, p) def parse_document(d): check_allowed_properties(d, "#", ("version", "tests")) check_required_properties(d, "#", ("version", "tests")) check_version(d['version'], "#version") parse_ts(d['tests'], "#tests") def validate_result(d): parse_document(d)
[ "pyinfrabox.ValidationError" ]
[((177, 216), 'pyinfrabox.ValidationError', 'ValidationError', (['path', '"""must be an int"""'], {}), "(path, 'must be an int')\n", (192, 216), False, 'from pyinfrabox import ValidationError\n'), ((247, 291), 'pyinfrabox.ValidationError', 'ValidationError', (['path', '"""unsupported version"""'], {}), "(path, 'unsupported version')\n", (262, 291), False, 'from pyinfrabox import ValidationError\n'), ((664, 705), 'pyinfrabox.ValidationError', 'ValidationError', (['path', '"""must be an array"""'], {}), "(path, 'must be an array')\n", (679, 705), False, 'from pyinfrabox import ValidationError\n'), ((1608, 1649), 'pyinfrabox.ValidationError', 'ValidationError', (['path', '"""must be an array"""'], {}), "(path, 'must be an array')\n", (1623, 1649), False, 'from pyinfrabox import ValidationError\n'), ((1679, 1721), 'pyinfrabox.ValidationError', 'ValidationError', (['path', '"""must not be empty"""'], {}), "(path, 'must not be empty')\n", (1694, 1721), False, 'from pyinfrabox import ValidationError\n')]
import numpy as np from plantcv.plantcv.visualize import colorize_label_img def test_colorize_label_img(): """Test for PlantCV.""" label_img = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) colored_img = colorize_label_img(label_img) assert (colored_img.shape[0:-1] == label_img.shape) and colored_img.shape[-1] == 3
[ "numpy.array", "plantcv.plantcv.visualize.colorize_label_img" ]
[((153, 196), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n', (161, 196), True, 'import numpy as np\n'), ((215, 244), 'plantcv.plantcv.visualize.colorize_label_img', 'colorize_label_img', (['label_img'], {}), '(label_img)\n', (233, 244), False, 'from plantcv.plantcv.visualize import colorize_label_img\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- import io import os import sys from shutil import rmtree from setuptools import find_packages, setup, Command NAME = 'substrate-utils' VERSION = '0.1' DESCRIPTION = '' URL = 'https://github.com/stakelink/substrate-utils' EMAIL = '<EMAIL>' AUTHOR = 'STAKELINK' REQUIRES_PYTHON = '>=3.6.0' LICENSE = 'MIT' REQUIRED = [ 'substrate-interface>=0.13', 'cachetools' ] here = os.path.abspath(os.path.dirname(__file__)) with open("README.md", "r", encoding="utf-8") as fh: LONG_DESCRIPTION = fh.read() about = {} if not VERSION: with open(os.path.join(here, NAME, '__version__.py')) as f: exec(f.read(), about) else: about['__version__'] = VERSION setup( name=NAME, version=about['__version__'], description=DESCRIPTION, long_description=LONG_DESCRIPTION, long_description_content_type="text/markdown", author=AUTHOR, author_email=EMAIL, python_requires=REQUIRES_PYTHON, url=URL, classifiers=[ "Programming Language :: Python :: 3 :: Only", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], packages=['substrateutils'], entry_points={}, install_requires=REQUIRED, license=LICENSE, project_urls={ 'Bug Reports': 'https://github.com/stakelink/substrate-utils/issues', 'Source': 'https://github.com/stakelink/substrate-utils', }, )
[ "os.path.dirname", "os.path.join", "setuptools.setup" ]
[((720, 1367), 'setuptools.setup', 'setup', ([], {'name': 'NAME', 'version': "about['__version__']", 'description': 'DESCRIPTION', 'long_description': 'LONG_DESCRIPTION', 'long_description_content_type': '"""text/markdown"""', 'author': 'AUTHOR', 'author_email': 'EMAIL', 'python_requires': 'REQUIRES_PYTHON', 'url': 'URL', 'classifiers': "['Programming Language :: Python :: 3 :: Only',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent']", 'packages': "['substrateutils']", 'entry_points': '{}', 'install_requires': 'REQUIRED', 'license': 'LICENSE', 'project_urls': "{'Bug Reports': 'https://github.com/stakelink/substrate-utils/issues',\n 'Source': 'https://github.com/stakelink/substrate-utils'}"}), "(name=NAME, version=about['__version__'], description=DESCRIPTION,\n long_description=LONG_DESCRIPTION, long_description_content_type=\n 'text/markdown', author=AUTHOR, author_email=EMAIL, python_requires=\n REQUIRES_PYTHON, url=URL, classifiers=[\n 'Programming Language :: Python :: 3 :: Only',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent'], packages=['substrateutils'],\n entry_points={}, install_requires=REQUIRED, license=LICENSE,\n project_urls={'Bug Reports':\n 'https://github.com/stakelink/substrate-utils/issues', 'Source':\n 'https://github.com/stakelink/substrate-utils'})\n", (725, 1367), False, 'from setuptools import find_packages, setup, Command\n'), ((442, 467), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (457, 467), False, 'import os\n'), ((598, 640), 'os.path.join', 'os.path.join', (['here', 'NAME', '"""__version__.py"""'], {}), "(here, NAME, '__version__.py')\n", (610, 640), False, 'import os\n')]
from matplotlib import pyplot import numpy x = numpy.linspace(-1, 1, 1000) y1 = numpy.exp(-x**2 * 10) * (1 + 0.05 * numpy.random.rand(len(x))) y2 = (numpy.exp(10*(-(x-0.3)**2 - 0.75*x**4 - 0.25*x**6)) + numpy.piecewise(x, [x < 0.3, x >= 0.3], [lambda x: -(x-0.3)*numpy.sqrt(1+x), 0])) * (1 + 0.05 * numpy.random.rand(len(x))) def plot_max(x, y): x_c = numpy.argmax(y) / len(x) ax_lim = (x_c - 0.1, 0.2, 0.2, 0.2) f = pyplot.plot(x, y) pyplot.xlim(-1, 1) pyplot.arrow(2 * x_c - 1, 0.4, 0, 0.5, head_width=0.025, head_length=0.05) ax = pyplot.axes(ax_lim) ax.plot(x, y) ax.set_xlim(2 * (x_c - 0.56), 2 * (x_c - 0.44)) ax.set_ylim(0.9, 1.1) return f
[ "matplotlib.pyplot.xlim", "matplotlib.pyplot.plot", "numpy.argmax", "matplotlib.pyplot.axes", "numpy.exp", "numpy.linspace", "matplotlib.pyplot.arrow", "numpy.sqrt" ]
[((48, 75), 'numpy.linspace', 'numpy.linspace', (['(-1)', '(1)', '(1000)'], {}), '(-1, 1, 1000)\n', (62, 75), False, 'import numpy\n'), ((81, 104), 'numpy.exp', 'numpy.exp', (['(-x ** 2 * 10)'], {}), '(-x ** 2 * 10)\n', (90, 104), False, 'import numpy\n'), ((431, 448), 'matplotlib.pyplot.plot', 'pyplot.plot', (['x', 'y'], {}), '(x, y)\n', (442, 448), False, 'from matplotlib import pyplot\n'), ((453, 471), 'matplotlib.pyplot.xlim', 'pyplot.xlim', (['(-1)', '(1)'], {}), '(-1, 1)\n', (464, 471), False, 'from matplotlib import pyplot\n'), ((476, 550), 'matplotlib.pyplot.arrow', 'pyplot.arrow', (['(2 * x_c - 1)', '(0.4)', '(0)', '(0.5)'], {'head_width': '(0.025)', 'head_length': '(0.05)'}), '(2 * x_c - 1, 0.4, 0, 0.5, head_width=0.025, head_length=0.05)\n', (488, 550), False, 'from matplotlib import pyplot\n'), ((560, 579), 'matplotlib.pyplot.axes', 'pyplot.axes', (['ax_lim'], {}), '(ax_lim)\n', (571, 579), False, 'from matplotlib import pyplot\n'), ((150, 215), 'numpy.exp', 'numpy.exp', (['(10 * (-(x - 0.3) ** 2 - 0.75 * x ** 4 - 0.25 * x ** 6))'], {}), '(10 * (-(x - 0.3) ** 2 - 0.75 * x ** 4 - 0.25 * x ** 6))\n', (159, 215), False, 'import numpy\n'), ((358, 373), 'numpy.argmax', 'numpy.argmax', (['y'], {}), '(y)\n', (370, 373), False, 'import numpy\n'), ((264, 281), 'numpy.sqrt', 'numpy.sqrt', (['(1 + x)'], {}), '(1 + x)\n', (274, 281), False, 'import numpy\n')]
#!/usr/bin/python # -*- coding: ascii -*- # Author: @harvie <NAME> # Date: 25 sept 2018 __author__ = "@harvie <NAME>" #__email__ = "" __name__ = _("DragKnife") __version__ = "0.3.0" import math import os.path import re from CNC import CNC,Block from bmath import Vector from bpath import eq, Path, Segment from ToolsPage import Plugin from math import pi, sqrt, sin, cos, asin, acos, atan2, hypot, degrees, radians, copysign, fmod class Tool(Plugin): __doc__ = _("""Drag knife postprocessor""") #<<< This comment will be show as tooltip for the ribbon button def __init__(self, master): Plugin.__init__(self, master,"DragKnife") self.icon = "dragknife" #<<< This is the name of file used as icon for the ribbon button. It will be search in the "icons" subfolder self.group = "CAM" #<<< This is the name of group that plugin belongs #self.oneshot = True #Here we are creating the widgets presented to the user inside the plugin #Name, Type , Default value, Description self.variables = [ #<<< Define a list of components for the GUI ("name" , "db" , "", _("Name")), #used to store plugin settings in the internal database ("offset", "mm", 3, _("dragknife offset"), _("distance from dragknife rotation center to the tip of the blade")), ("angle", "float", 20, _("angle threshold"), _("do not perform pivot action for angles smaller than this")), ("swivelz", "mm", 0, _("swivel height"), _("retract to this height for pivots (useful for thick materials, you should enter number slightly lower than material thickness)")), ("initdir", "X+,Y+,Y-,X-,none", "X+", _("initial direction"), _("direction that knife blade is facing before and after cut. Eg.: if you set this to X+, then the knifes rotation axis should be on the right side of the tip. Meaning that the knife is ready to cut towards right immediately without pivoting. If you cut multiple shapes in single operation, it's important to have this set consistently across all of them.")), ("feed", "mm", 200, _("feedrate")), ("simulate", "bool", False, _("simulate"), _("Use this option to simulate cuting of dragknife path. Resulting shape will reflect what shape will actuall be cut. This should reverse the dragknife procedure and give you back the original shape from g-code that was previously processed for dragknife.")), ("simpreci", "mm", 0.5, _("simulation precision"), _("Simulation is currently approximated by using lots of short lines. This is the length of these lines.")) ] self.buttons.append("exe") #<<< This is the button added at bottom to call the execute method below self.help = """DragKnifes are special kind of razor/blade holders that can be fit into spindle of your CNC (do not turn the spindle on!!!). They are often used to cut soft and thin materials like vinyl stickers, fabric, leather, rubber gaskets, paper, cardboard, etc... Dragknife blade is located off center to allow for automatic rotation (kinda like rear wheels of car pivot to the direction of front wheels). This fact introduces the need for preprocessing the g-code to account with that offset. Otherwise it wouldn't be able to cut sharp corners. This plugin does this g-code postprocessing. """ # ---------------------------------------------------------------------- # This method is executed when user presses the plugin execute button # ---------------------------------------------------------------------- def execute(self, app): dragoff = self.fromMm("offset") angleth = self["angle"] swivelz = self.fromMm("swivelz") initdir = self["initdir"] CNC.vars["cutfeed"] = self.fromMm("feed") simulate = self["simulate"] simpreci = self["simpreci"] def initPoint(P, dir, offset): P = Vector(P[0], P[1]) if dir == 'X+': P[0]+=offset elif dir == 'X-': P[0]-=offset elif dir == 'Y+': P[1]+=offset elif dir == 'Y-': P[1]-=offset return P blocks = [] for bid in app.editor.getSelectedBlocks(): if len(app.gcode.toPath(bid)) < 1: continue opath = app.gcode.toPath(bid)[0] npath = Path("dragknife %s: %s"%(dragoff,app.gcode[bid].name())) if not simulate: #Entry vector ventry = Segment(Segment.LINE, initPoint(opath[0].A, initdir, -dragoff), opath[0].A) #Exit vector vexit = Segment(Segment.LINE, opath[-1].B, initPoint(opath[-1].B, initdir, dragoff)) opath.append(vexit) prevseg = ventry #Generate path with tangential lag for dragknife operation for i,seg in enumerate(opath): #Get adjacent tangential vectors in this point TA = prevseg.tangentEnd() TB = seg.tangentStart() #Compute difference between tangential vectors of two neighbor segments angle = degrees(acos(TA.dot(TB))) #Compute swivel direction arcdir = ( TA[0] * TB[1] ) - ( TA[1] * TB[0] ) if arcdir < 0: arcdir = Segment.CW else: arcdir = Segment.CCW #Append swivel if needed (also always do entry/exit) if abs(angle) > angleth or (abs(angle) > 1 and ( i == 0 or i == len(opath)-1 )): arca = Segment(arcdir, prevseg.tangentialOffset(dragoff).B, seg.tangentialOffset(dragoff).A, prevseg.B) if swivelz !=0: arca._inside = [swivelz] npath.append(arca) #Append segment with tangential offset if i < len(opath)-1: npath.append(seg.tangentialOffset(dragoff)) prevseg = seg elif simulate: opath = opath.linearize(simpreci, True) prevknife = initPoint(opath[0].A, initdir, -dragoff) for seg in opath: dist = sqrt((seg.B[0]-prevknife[0])**2+(seg.B[1]-prevknife[1])**2) move = ( seg.B - prevknife ).unit() * ( dist - dragoff ) newknife = prevknife + move if not eq(newknife, prevknife): npath.append(Segment(Segment.LINE, prevknife, newknife)) prevknife = newknife eblock = app.gcode.fromPath(npath) blocks.append(eblock) #active = app.activeBlock() #if active == 0: active+=1 active=-1 #add to end app.gcode.insBlocks(active, blocks, "Dragknife") #<<< insert blocks over active block in the editor app.refresh() #<<< refresh editor app.setStatus(_("Generated: Dragknife")) #<<< feed back result #app.gcode.blocks.append(block)
[ "bpath.eq", "ToolsPage.Plugin.__init__", "math.sqrt", "bpath.Segment", "bmath.Vector" ]
[((599, 641), 'ToolsPage.Plugin.__init__', 'Plugin.__init__', (['self', 'master', '"""DragKnife"""'], {}), "(self, master, 'DragKnife')\n", (614, 641), False, 'from ToolsPage import Plugin\n'), ((3732, 3750), 'bmath.Vector', 'Vector', (['P[0]', 'P[1]'], {}), '(P[0], P[1])\n', (3738, 3750), False, 'from bmath import Vector\n'), ((5528, 5597), 'math.sqrt', 'sqrt', (['((seg.B[0] - prevknife[0]) ** 2 + (seg.B[1] - prevknife[1]) ** 2)'], {}), '((seg.B[0] - prevknife[0]) ** 2 + (seg.B[1] - prevknife[1]) ** 2)\n', (5532, 5597), False, 'from math import pi, sqrt, sin, cos, asin, acos, atan2, hypot, degrees, radians, copysign, fmod\n'), ((5695, 5718), 'bpath.eq', 'eq', (['newknife', 'prevknife'], {}), '(newknife, prevknife)\n', (5697, 5718), False, 'from bpath import eq, Path, Segment\n'), ((5739, 5781), 'bpath.Segment', 'Segment', (['Segment.LINE', 'prevknife', 'newknife'], {}), '(Segment.LINE, prevknife, newknife)\n', (5746, 5781), False, 'from bpath import eq, Path, Segment\n')]
#Dependencies from bs4 import BeautifulSoup as bs import requests from webdriver_manager.chrome import ChromeDriverManager from splinter import Browser import pandas as pd import os def init_browser(): executable_path = {'executable_path': ChromeDriverManager().install()} return Browser("chrome", **executable_path, headless=False) def scrape(): browser = init_browser() mars_data = {} ##### NASA MARS NEWS ##### #URL of page being scraped url= "https://mars.nasa.gov/news/" browser.visit(url) #HTML Object html= browser.html # Parse HTML with Beautiful Soup soup = bs(html, 'html.parser') #Extract title latest_news = soup.find_all('div', class_="list_text") news = latest_news[0] news_title = news.find('div', class_='content_title').text #Extract paragraph text news_p= news.find('div', class_='article_teaser_body').text #Add to mars_data dictionary news_title= str(news_title) news_p= str(news_p) mars_data["news_title"]= news_title mars_data["news_p"]= news_p ##### JPL MARS SPACE IMAGES ##### #URL of page featured_image_url= 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars' browser.visit(featured_image_url) #HTML Object; Parse HTML with Beautiful Soup image_html = browser.html soup = bs(image_html, 'html.parser') #Extract featured image URL featured_img_url= soup.find('article')['style'].replace('background-image: url(','').replace(');', '')[1:-1] main= "https://www.jpl.nasa.gov" featured_image_url= main + featured_img_url #Add to mars_data dictionary featured_image_url= str(featured_image_url) mars_data["featured_image_url"]= featured_image_url ##### MARS FACTS ##### #URL of page marsfacts_url= 'https://space-facts.com/mars/' #Read tables and convert to pandas df mars_facts= pd.read_html(marsfacts_url) mars_df= mars_facts[0] mars_df.columns= ['Description', 'Value'] mars_df.set_index('Description', inplace=True) #Convert to HTML mars_html= mars_df.to_html() mars_facts_html= mars_html.replace('\n', '') #Add to mars_data dictionary mars_data["mars_facts_table"]= mars_facts_html ##### MARS HEMISPHERES ##### #URL of page hemisphere_url= "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars" browser.visit(hemisphere_url) #HTML Object; Parse HTML with Beautiful Soup mars_images_html = browser.html soup = bs(mars_images_html, 'html.parser') #Extract image urls items= soup.find_all('div', class_='item') #Create empty list to store image urls hemisphere_image_urls= [] main_url= "https://astrogeology.usgs.gov" #Loop through items to get each hemisphere url for item in items: # Use Beautiful Soup's find() method to navigate and retrieve attributes title = item.find('h3').text link = item.find('a', class_="itemLink product-item")['href'] #Create HTML Object for individual hemisphere page; Parse HTML with Beautiful Soup browser.visit(main_url + link) hemisphere_html= browser.html soup= bs(hemisphere_html, 'html.parser') #Extract image source url img_url= main_url + soup.find('img', class_='wide-image')['src'] #Create dictionary hemisphere_image_urls.append({ "title": title, "img_url": img_url }) #Add to mars_data dictionary mars_data['hemisphere_image_urls']= hemisphere_image_urls browser.quit() return mars_data
[ "bs4.BeautifulSoup", "splinter.Browser", "pandas.read_html", "webdriver_manager.chrome.ChromeDriverManager" ]
[((290, 342), 'splinter.Browser', 'Browser', (['"""chrome"""'], {'headless': '(False)'}), "('chrome', **executable_path, headless=False)\n", (297, 342), False, 'from splinter import Browser\n'), ((622, 645), 'bs4.BeautifulSoup', 'bs', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (624, 645), True, 'from bs4 import BeautifulSoup as bs\n'), ((1347, 1376), 'bs4.BeautifulSoup', 'bs', (['image_html', '"""html.parser"""'], {}), "(image_html, 'html.parser')\n", (1349, 1376), True, 'from bs4 import BeautifulSoup as bs\n'), ((1904, 1931), 'pandas.read_html', 'pd.read_html', (['marsfacts_url'], {}), '(marsfacts_url)\n', (1916, 1931), True, 'import pandas as pd\n'), ((2536, 2571), 'bs4.BeautifulSoup', 'bs', (['mars_images_html', '"""html.parser"""'], {}), "(mars_images_html, 'html.parser')\n", (2538, 2571), True, 'from bs4 import BeautifulSoup as bs\n'), ((3219, 3253), 'bs4.BeautifulSoup', 'bs', (['hemisphere_html', '"""html.parser"""'], {}), "(hemisphere_html, 'html.parser')\n", (3221, 3253), True, 'from bs4 import BeautifulSoup as bs\n'), ((246, 267), 'webdriver_manager.chrome.ChromeDriverManager', 'ChromeDriverManager', ([], {}), '()\n', (265, 267), False, 'from webdriver_manager.chrome import ChromeDriverManager\n')]
# coding: utf-8 # Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. # This product includes software developed at Datadog (https://www.datadoghq.com/). # Copyright 2019-Present Datadog, Inc. import sys import unittest import datadog_api_client.v2 from datadog_api_client.v2.model.incident_team_create_attributes import IncidentTeamCreateAttributes from datadog_api_client.v2.model.incident_team_relationships import IncidentTeamRelationships from datadog_api_client.v2.model.incident_team_type import IncidentTeamType globals()['IncidentTeamCreateAttributes'] = IncidentTeamCreateAttributes globals()['IncidentTeamRelationships'] = IncidentTeamRelationships globals()['IncidentTeamType'] = IncidentTeamType from datadog_api_client.v2.model.incident_team_create_data import IncidentTeamCreateData class TestIncidentTeamCreateData(unittest.TestCase): """IncidentTeamCreateData unit test stubs""" def setUp(self): pass def tearDown(self): pass def testIncidentTeamCreateData(self): """Test IncidentTeamCreateData""" # FIXME: construct object with mandatory attributes with example values # model = IncidentTeamCreateData() # noqa: E501 pass if __name__ == '__main__': unittest.main()
[ "unittest.main" ]
[((1302, 1317), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1315, 1317), False, 'import unittest\n')]
import tensorflow as tf import cnn_denoiser.input_data as input_data import glob import os # Parameters channels = 1 dataset_path = 'data/pretraining' image_width = 270 image_height = 90 batch_size = 64 image_data = input_data.load_images(dataset_path, image_width, image_height) pretrain_steps_first_layer = 1000 pretrain_steps = 100 training_dropout = 0.5 start_learning_rate = 0.001 steps_before_decay = 1000 decay_rate = 0.995 global_step = tf.Variable(0, trainable=False) alpha = tf.train.exponential_decay(start_learning_rate, global_step, steps_before_decay, decay_rate, staircase=True) def summary_layer(net, name): tf.summary.image(name, tf.expand_dims(tf.transpose(net, [3, 0, 1, 2])[0], 3), max_outputs=1) def conv_layer_dropout(net, layer, out_channels, filter_dims, strides, padding, name, dropout=0.5, act_f = tf.nn.relu, pre=False): net = tf.layers.dropout(net, dropout) return conv_layer(net, layer, out_channels, filter_dims, strides, padding, name, act_f, pre=pre) def conv_layer(net, layer, out_channels, filter_dims, strides, padding, name, act_f = tf.nn.relu, pre=False): net = layer(net, out_channels, filter_dims, strides=strides, padding=padding) net = act_f(net) if not pre: summary_layer(net, name) return net def pre_train_conv_layer(inputs, layer, out_channels, filt, strides, name, act_f = tf.nn.relu, dropout=0.5): forward = layer backward = tf.layers.conv2d_transpose if layer is tf.layers.conv2d else tf.layers.conv2d name_scope = name + '_pretrain_scope' with tf.variable_scope(name_scope) as vs: net = conv_layer_dropout(inputs, forward, out_channels, filt, strides, 'SAME', name, dropout=dropout) out = conv_layer_dropout(net, backward, int(inputs.shape[-1]), filt, strides, 'SAME', name + '_pretrain', pre=True) trainable_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=vs.name) var_list = [v for v in trainable_variables if name in v.name] if len(var_list) != 4: raise Exception("No two unique output layers to pretrain") cost = tf.reduce_mean(tf.square(out - inputs)) step = tf.train.GradientDescentOptimizer(alpha).minimize(cost, var_list=var_list, global_step=global_step) return net, step, cost files = glob.glob('./pretrain/*') for f in files: os.remove(f) writer = tf.summary.FileWriter("./pretrain", graph=tf.get_default_graph()) def pretrain(epochs, step, loss, placeholder, name, training): if not training: return sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run(tf.global_variables_initializer()) summary_op = tf.summary.merge_all() for i in range(epochs): input_, blurred = image_data.next_batch(batch_size) _, cost, summary = sess.run([step, loss, summary_op], feed_dict={placeholder: blurred}) writer.add_summary(summary, i) print(i, "Pretrain " + name, cost) def autoencoder(original, inputs, training): dropout = training_dropout if training else 0.0 # Encoder net, step, loss = pre_train_conv_layer(inputs, tf.layers.conv2d, 512, [5, 5], (3, 3), 'conv1', dropout=0.0) print(net.shape) pretrain(pretrain_steps_first_layer, step, loss, original, 'conv1', training) net, step, loss = pre_train_conv_layer(net, tf.layers.conv2d, 256, [5, 5], (3, 3), 'conv2', dropout=dropout) print(net.shape) pretrain(pretrain_steps, step, loss, original, 'conv2', training) net, step, loss = pre_train_conv_layer(net, tf.layers.conv2d, 128, [5, 5], (1, 1), 'conv3', dropout=dropout) print(net.shape) pretrain(pretrain_steps, step, loss, original, 'conv3', training) # Decoder net, step, loss = pre_train_conv_layer(net, tf.layers.conv2d_transpose, 128, [5, 5], (1, 1), 'deconv1', dropout=dropout) print(net.shape) pretrain(pretrain_steps, step, loss, original, 'deconv1', training) net, step, loss = pre_train_conv_layer(net, tf.layers.conv2d_transpose, 256, [5, 5], (3, 3), 'deconv2', dropout=dropout) print(net.shape) pretrain(pretrain_steps, step, loss, original, 'deconv2', training) net, step, loss = pre_train_conv_layer(net, tf.layers.conv2d_transpose, channels, [5, 5], (3, 3), 'deconv3', dropout=dropout) print(net.shape) pretrain(pretrain_steps, step, loss, original, 'deconv3', training) # Final tanh activation net = tf.nn.tanh(net) return net
[ "os.remove", "tensorflow.square", "tensorflow.nn.tanh", "tensorflow.get_collection", "tensorflow.global_variables_initializer", "tensorflow.layers.dropout", "tensorflow.variable_scope", "tensorflow.get_default_graph", "tensorflow.transpose", "tensorflow.ConfigProto", "tensorflow.Variable", "glob.glob", "tensorflow.train.GradientDescentOptimizer", "cnn_denoiser.input_data.load_images", "tensorflow.train.exponential_decay", "tensorflow.summary.merge_all" ]
[((219, 282), 'cnn_denoiser.input_data.load_images', 'input_data.load_images', (['dataset_path', 'image_width', 'image_height'], {}), '(dataset_path, image_width, image_height)\n', (241, 282), True, 'import cnn_denoiser.input_data as input_data\n'), ((452, 483), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (463, 483), True, 'import tensorflow as tf\n'), ((492, 604), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['start_learning_rate', 'global_step', 'steps_before_decay', 'decay_rate'], {'staircase': '(True)'}), '(start_learning_rate, global_step,\n steps_before_decay, decay_rate, staircase=True)\n', (518, 604), True, 'import tensorflow as tf\n'), ((2447, 2472), 'glob.glob', 'glob.glob', (['"""./pretrain/*"""'], {}), "('./pretrain/*')\n", (2456, 2472), False, 'import glob\n'), ((1011, 1042), 'tensorflow.layers.dropout', 'tf.layers.dropout', (['net', 'dropout'], {}), '(net, dropout)\n', (1028, 1042), True, 'import tensorflow as tf\n'), ((2493, 2505), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (2502, 2505), False, 'import os\n'), ((2819, 2841), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (2839, 2841), True, 'import tensorflow as tf\n'), ((4566, 4581), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['net'], {}), '(net)\n', (4576, 4581), True, 'import tensorflow as tf\n'), ((1697, 1726), 'tensorflow.variable_scope', 'tf.variable_scope', (['name_scope'], {}), '(name_scope)\n', (1714, 1726), True, 'import tensorflow as tf\n'), ((1999, 2062), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.GLOBAL_VARIABLES'], {'scope': 'vs.name'}), '(tf.GraphKeys.GLOBAL_VARIABLES, scope=vs.name)\n', (2016, 2062), True, 'import tensorflow as tf\n'), ((2558, 2580), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (2578, 2580), True, 'import tensorflow as tf\n'), ((2767, 2800), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2798, 2800), True, 'import tensorflow as tf\n'), ((2266, 2289), 'tensorflow.square', 'tf.square', (['(out - inputs)'], {}), '(out - inputs)\n', (2275, 2289), True, 'import tensorflow as tf\n'), ((2711, 2752), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (2725, 2752), True, 'import tensorflow as tf\n'), ((814, 845), 'tensorflow.transpose', 'tf.transpose', (['net', '[3, 0, 1, 2]'], {}), '(net, [3, 0, 1, 2])\n', (826, 845), True, 'import tensorflow as tf\n'), ((2306, 2346), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', (['alpha'], {}), '(alpha)\n', (2339, 2346), True, 'import tensorflow as tf\n')]
import cv2 im = cv2.imread('data/src/lena_square_half.png') th, im_th = cv2.threshold(im, 128, 255, cv2.THRESH_BINARY) print(th) # 128.0 cv2.imwrite('data/dst/opencv_th.jpg', im_th) # True th, im_th_tz = cv2.threshold(im, 128, 255, cv2.THRESH_TOZERO) print(th) # 128.0 cv2.imwrite('data/dst/opencv_th_tz.jpg', im_th_tz) # True # th, im_th_otsu = cv2.threshold(im, 128, 192, cv2.THRESH_OTSU) # error: OpenCV(4.2.0) /tmp/opencv-20200105-17262-cwpzm4/opencv-4.2.0/modules/imgproc/src/thresh.cpp:1529: error: (-215:Assertion failed) src.type() == CV_8UC1 in function 'threshold' im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) th, im_gray_th_otsu = cv2.threshold(im_gray, 128, 192, cv2.THRESH_OTSU) print(th) # 117.0 cv2.imwrite('data/dst/opencv_th_otsu.jpg', im_gray_th_otsu) # True
[ "cv2.cvtColor", "cv2.imread", "cv2.threshold", "cv2.imwrite" ]
[((17, 60), 'cv2.imread', 'cv2.imread', (['"""data/src/lena_square_half.png"""'], {}), "('data/src/lena_square_half.png')\n", (27, 60), False, 'import cv2\n'), ((74, 120), 'cv2.threshold', 'cv2.threshold', (['im', '(128)', '(255)', 'cv2.THRESH_BINARY'], {}), '(im, 128, 255, cv2.THRESH_BINARY)\n', (87, 120), False, 'import cv2\n'), ((141, 185), 'cv2.imwrite', 'cv2.imwrite', (['"""data/dst/opencv_th.jpg"""', 'im_th'], {}), "('data/dst/opencv_th.jpg', im_th)\n", (152, 185), False, 'import cv2\n'), ((209, 255), 'cv2.threshold', 'cv2.threshold', (['im', '(128)', '(255)', 'cv2.THRESH_TOZERO'], {}), '(im, 128, 255, cv2.THRESH_TOZERO)\n', (222, 255), False, 'import cv2\n'), ((276, 326), 'cv2.imwrite', 'cv2.imwrite', (['"""data/dst/opencv_th_tz.jpg"""', 'im_th_tz'], {}), "('data/dst/opencv_th_tz.jpg', im_th_tz)\n", (287, 326), False, 'import cv2\n'), ((594, 630), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2GRAY'], {}), '(im, cv2.COLOR_BGR2GRAY)\n', (606, 630), False, 'import cv2\n'), ((654, 703), 'cv2.threshold', 'cv2.threshold', (['im_gray', '(128)', '(192)', 'cv2.THRESH_OTSU'], {}), '(im_gray, 128, 192, cv2.THRESH_OTSU)\n', (667, 703), False, 'import cv2\n'), ((724, 783), 'cv2.imwrite', 'cv2.imwrite', (['"""data/dst/opencv_th_otsu.jpg"""', 'im_gray_th_otsu'], {}), "('data/dst/opencv_th_otsu.jpg', im_gray_th_otsu)\n", (735, 783), False, 'import cv2\n')]
import numpy as np def cgls(A, b): height, width = A.shape x = np.zeros((height)) while(True): sumA = A.sum() if (sumA < 100): break if (np.linalg.det(A) < 1): A = A + np.eye(height, width) * sumA * 0.000000005 else: x = np.linalg.inv(A).dot(b) break return x
[ "numpy.linalg.det", "numpy.linalg.inv", "numpy.zeros", "numpy.eye" ]
[((72, 88), 'numpy.zeros', 'np.zeros', (['height'], {}), '(height)\n', (80, 88), True, 'import numpy as np\n'), ((186, 202), 'numpy.linalg.det', 'np.linalg.det', (['A'], {}), '(A)\n', (199, 202), True, 'import numpy as np\n'), ((302, 318), 'numpy.linalg.inv', 'np.linalg.inv', (['A'], {}), '(A)\n', (315, 318), True, 'import numpy as np\n'), ((229, 250), 'numpy.eye', 'np.eye', (['height', 'width'], {}), '(height, width)\n', (235, 250), True, 'import numpy as np\n')]
# Licensed Materials - Property of IBM # Copyright IBM Corp. 2016 import unittest import sys import itertools import threading from streamsx.topology.topology import * from streamsx.topology.tester import Tester from streamsx.topology import schema import streamsx.topology.context from streamsx.topology.context import JobConfig from streamsx.topology.context import ConfigParams import streamsx.spl.op as op class TestSPLWindow(unittest.TestCase): """ Test invocations of SPL operators from Python topology. """ _multiprocess_can_split_ = True # Fake out subTest if sys.version_info.major == 2: def subTest(self, **args): return threading.Lock() def setUp(self): Tester.setup_standalone(self) def test_sliding_count(self): for step in [1, 3]: with self.subTest(step=step): topo = Topology() b = op.Source(topo, "spl.utility::Beacon", 'tuple<uint64 seq>', params = {'iterations':12}) b.seq = b.output('IterationCount()') s = b.stream agg = op.Map('spl.relational::Aggregate', s.last(4).trigger(step), schema = 'tuple<uint64 sum, uint64 max>') agg.sum = agg.output('Sum(seq)') agg.max = agg.output('Max(seq)') expected = [] for i in range(4 + step - 2, 12, step): expected.append({'sum': sum(range(i-3, i+1)), 'max': i}) tester = Tester(topo) tester.contents(agg.stream, expected) tester.test(self.test_ctxtype, self.test_config) def test_sliding_count_stv(self): for step in [1, 3]: with self.subTest(step=step): topo = Topology() b = op.Source(topo, "spl.utility::Beacon", 'tuple<uint64 seq>', params = {'iterations':12}) b.seq = b.output('IterationCount()') s = b.stream count = topo.create_submission_parameter('count', 4) window = s.last(count).trigger(step) agg = op.Map('spl.relational::Aggregate', window, schema = 'tuple<uint64 sum, uint64 max>') agg.sum = agg.output('Sum(seq)') agg.max = agg.output('Max(seq)') expected = [] for i in range(4 + step - 2, 12, step): expected.append({'sum': sum(range(i-3, i+1)), 'max': i}) tester = Tester(topo) tester.contents(agg.stream, expected) tester.test(self.test_ctxtype, self.test_config) def test_sliding_count_stv_no_default(self): step =1 topo = Topology() b = op.Source(topo, "spl.utility::Beacon", 'tuple<uint64 seq>', params = {'iterations':12}) b.seq = b.output('IterationCount()') s = b.stream count = topo.create_submission_parameter('count', type_=int) window = s.last(count).trigger(step) agg = op.Map('spl.relational::Aggregate', window, schema = 'tuple<uint64 sum, uint64 max>') agg.sum = agg.output('Sum(seq)') agg.max = agg.output('Max(seq)') expected = [] for i in range(4 + step - 2, 12, step): expected.append({'sum': sum(range(i-3, i+1)), 'max': i}) jc = JobConfig() jc.submission_parameters['count'] = 4 jc.add(self.test_config) tester = Tester(topo) tester.contents(agg.stream, expected) tester.test(self.test_ctxtype, self.test_config) def test_sliding_time_stv(self): topo = Topology() b = op.Source(topo, "spl.utility::Beacon", 'tuple<uint64 seq>', params = {'iterations':12}) b.seq = b.output('IterationCount()') s = b.stream time = topo.create_submission_parameter('time', 2) window = s.lastSeconds(time).trigger(1) agg = op.Map('spl.relational::Aggregate', window, schema = 'tuple<uint64 sum, uint64 max>') agg.sum = agg.output('Sum(seq)') agg.max = agg.output('Max(seq)') tester = Tester(topo) tester.tuple_count(agg.stream, 12) tester.test(self.test_ctxtype, self.test_config) def test_sliding_time_stv_no_default(self): topo = Topology() b = op.Source(topo, "spl.utility::Beacon", 'tuple<uint64 seq>', params = {'iterations':12}) b.seq = b.output('IterationCount()') s = b.stream wtime = topo.create_submission_parameter(name='secs', type_=int) window = s.lastSeconds(wtime).trigger(1) agg = op.Map('spl.relational::Aggregate', window, schema = 'tuple<uint64 sum, uint64 max>') agg.sum = agg.output('Sum(seq)') agg.max = agg.output('Max(seq)') jc = JobConfig() jc.submission_parameters['secs'] = 2 jc.add(self.test_config) tester = Tester(topo) tester.tuple_count(agg.stream, 12) tester.test(self.test_ctxtype, self.test_config) class TestDistributedSPLWindow(TestSPLWindow): def setUp(self): Tester.setup_distributed(self) self.test_config[ConfigParams.SSL_VERIFY] = False
[ "streamsx.spl.op.Map", "streamsx.topology.context.JobConfig", "streamsx.topology.tester.Tester.setup_distributed", "streamsx.topology.tester.Tester.setup_standalone", "streamsx.topology.tester.Tester", "threading.Lock", "streamsx.spl.op.Source" ]
[((710, 739), 'streamsx.topology.tester.Tester.setup_standalone', 'Tester.setup_standalone', (['self'], {}), '(self)\n', (733, 739), False, 'from streamsx.topology.tester import Tester\n'), ((2828, 2919), 'streamsx.spl.op.Source', 'op.Source', (['topo', '"""spl.utility::Beacon"""', '"""tuple<uint64 seq>"""'], {'params': "{'iterations': 12}"}), "(topo, 'spl.utility::Beacon', 'tuple<uint64 seq>', params={\n 'iterations': 12})\n", (2837, 2919), True, 'import streamsx.spl.op as op\n'), ((3136, 3224), 'streamsx.spl.op.Map', 'op.Map', (['"""spl.relational::Aggregate"""', 'window'], {'schema': '"""tuple<uint64 sum, uint64 max>"""'}), "('spl.relational::Aggregate', window, schema=\n 'tuple<uint64 sum, uint64 max>')\n", (3142, 3224), True, 'import streamsx.spl.op as op\n'), ((3470, 3481), 'streamsx.topology.context.JobConfig', 'JobConfig', ([], {}), '()\n', (3479, 3481), False, 'from streamsx.topology.context import JobConfig\n'), ((3579, 3591), 'streamsx.topology.tester.Tester', 'Tester', (['topo'], {}), '(topo)\n', (3585, 3591), False, 'from streamsx.topology.tester import Tester\n'), ((3772, 3863), 'streamsx.spl.op.Source', 'op.Source', (['topo', '"""spl.utility::Beacon"""', '"""tuple<uint64 seq>"""'], {'params': "{'iterations': 12}"}), "(topo, 'spl.utility::Beacon', 'tuple<uint64 seq>', params={\n 'iterations': 12})\n", (3781, 3863), True, 'import streamsx.spl.op as op\n'), ((4073, 4161), 'streamsx.spl.op.Map', 'op.Map', (['"""spl.relational::Aggregate"""', 'window'], {'schema': '"""tuple<uint64 sum, uint64 max>"""'}), "('spl.relational::Aggregate', window, schema=\n 'tuple<uint64 sum, uint64 max>')\n", (4079, 4161), True, 'import streamsx.spl.op as op\n'), ((4271, 4283), 'streamsx.topology.tester.Tester', 'Tester', (['topo'], {}), '(topo)\n', (4277, 4283), False, 'from streamsx.topology.tester import Tester\n'), ((4471, 4562), 'streamsx.spl.op.Source', 'op.Source', (['topo', '"""spl.utility::Beacon"""', '"""tuple<uint64 seq>"""'], {'params': "{'iterations': 12}"}), "(topo, 'spl.utility::Beacon', 'tuple<uint64 seq>', params={\n 'iterations': 12})\n", (4480, 4562), True, 'import streamsx.spl.op as op\n'), ((4787, 4875), 'streamsx.spl.op.Map', 'op.Map', (['"""spl.relational::Aggregate"""', 'window'], {'schema': '"""tuple<uint64 sum, uint64 max>"""'}), "('spl.relational::Aggregate', window, schema=\n 'tuple<uint64 sum, uint64 max>')\n", (4793, 4875), True, 'import streamsx.spl.op as op\n'), ((4981, 4992), 'streamsx.topology.context.JobConfig', 'JobConfig', ([], {}), '()\n', (4990, 4992), False, 'from streamsx.topology.context import JobConfig\n'), ((5089, 5101), 'streamsx.topology.tester.Tester', 'Tester', (['topo'], {}), '(topo)\n', (5095, 5101), False, 'from streamsx.topology.tester import Tester\n'), ((5280, 5310), 'streamsx.topology.tester.Tester.setup_distributed', 'Tester.setup_distributed', (['self'], {}), '(self)\n', (5304, 5310), False, 'from streamsx.topology.tester import Tester\n'), ((663, 679), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (677, 679), False, 'import threading\n'), ((899, 990), 'streamsx.spl.op.Source', 'op.Source', (['topo', '"""spl.utility::Beacon"""', '"""tuple<uint64 seq>"""'], {'params': "{'iterations': 12}"}), "(topo, 'spl.utility::Beacon', 'tuple<uint64 seq>', params={\n 'iterations': 12})\n", (908, 990), True, 'import streamsx.spl.op as op\n'), ((1545, 1557), 'streamsx.topology.tester.Tester', 'Tester', (['topo'], {}), '(topo)\n', (1551, 1557), False, 'from streamsx.topology.tester import Tester\n'), ((1840, 1931), 'streamsx.spl.op.Source', 'op.Source', (['topo', '"""spl.utility::Beacon"""', '"""tuple<uint64 seq>"""'], {'params': "{'iterations': 12}"}), "(topo, 'spl.utility::Beacon', 'tuple<uint64 seq>', params={\n 'iterations': 12})\n", (1849, 1931), True, 'import streamsx.spl.op as op\n'), ((2197, 2285), 'streamsx.spl.op.Map', 'op.Map', (['"""spl.relational::Aggregate"""', 'window'], {'schema': '"""tuple<uint64 sum, uint64 max>"""'}), "('spl.relational::Aggregate', window, schema=\n 'tuple<uint64 sum, uint64 max>')\n", (2203, 2285), True, 'import streamsx.spl.op as op\n'), ((2592, 2604), 'streamsx.topology.tester.Tester', 'Tester', (['topo'], {}), '(topo)\n', (2598, 2604), False, 'from streamsx.topology.tester import Tester\n')]
from pampy import match, _ from dataclasses import dataclass from typing import TypeVar, Generic T = TypeVar('T') class Matcher: def __init__(self): self.pattern = [] def __setitem__(self, pat, method): self.pattern.append(pat) self.pattern.append(method) def __call__(self, *x): if len(x)==1: x = x[0] return match(x, *self.pattern) class Multimethod: def __init__(self): pass def __enter__(self): return Matcher() def __exit__(self, exc_type, exc_value, exc_traceback): pass class Case: def __init__(self,**kwargs): self.slot = kwargs class ADTMeta(type): def __init__(cls, clsname, bases, clsdict): annot = clsdict["__annotations__"] for name in list(annot.keys()): try: d = annot[name].slot t = dataclass(type(name, (cls,), {"__annotations__":d})) setattr(cls, name, t) except AttributeError: pass super().__init__(clsname, bases, clsdict)
[ "typing.TypeVar", "pampy.match" ]
[((101, 113), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (108, 113), False, 'from typing import TypeVar, Generic\n'), ((389, 412), 'pampy.match', 'match', (['x', '*self.pattern'], {}), '(x, *self.pattern)\n', (394, 412), False, 'from pampy import match, _\n')]
import nltk import os from flask_script import Manager from app import app from app.dynamodb.utils import create_dynamo_table from app.models.review import Review from app.models.game import Game from app.models.tag import Tag manager = Manager(app) @manager.command def create_tables(): create_dynamo_table(Review) create_dynamo_table(Tag) create_dynamo_table(Game) @manager.command def get_punkt(): nltk.download("punkt") if __name__ == "__main__": manager.run()
[ "flask_script.Manager", "nltk.download", "app.dynamodb.utils.create_dynamo_table" ]
[((239, 251), 'flask_script.Manager', 'Manager', (['app'], {}), '(app)\n', (246, 251), False, 'from flask_script import Manager\n'), ((295, 322), 'app.dynamodb.utils.create_dynamo_table', 'create_dynamo_table', (['Review'], {}), '(Review)\n', (314, 322), False, 'from app.dynamodb.utils import create_dynamo_table\n'), ((327, 351), 'app.dynamodb.utils.create_dynamo_table', 'create_dynamo_table', (['Tag'], {}), '(Tag)\n', (346, 351), False, 'from app.dynamodb.utils import create_dynamo_table\n'), ((356, 381), 'app.dynamodb.utils.create_dynamo_table', 'create_dynamo_table', (['Game'], {}), '(Game)\n', (375, 381), False, 'from app.dynamodb.utils import create_dynamo_table\n'), ((421, 443), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (434, 443), False, 'import nltk\n')]
#!/usr/bin/env python """ @file route.py @author <NAME> @date 2013-10-23 @version $Id$ Route helper functions. SUMO, Simulation of Urban MObility; see http://sumo.dlr.de/ Copyright (C) 2009-2017 DLR (http://www.dlr.de/) and contributors This file is part of SUMO. SUMO is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. """ import os import sys SUMO_HOME = os.environ.get('SUMO_HOME', os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')) sys.path.append(os.path.join(SUMO_HOME, 'tools')) from sumolib.miscutils import euclidean def _getMinPath(paths): minDist = 1e400 minPath = None for path, dist in paths.iteritems(): if dist < minDist: minPath = path minDist = dist return minPath def mapTrace(trace, net, delta, verbose=False): """ matching a list of 2D positions to consecutive edges in a network """ result = [] paths = {} if verbose: print("mapping trace with %s points" % len(trace)) for pos in trace: newPaths = {} candidates = net.getNeighboringEdges(pos[0], pos[1], delta) if len(candidates) == 0 and verbose: print("Found no candidate edges for %s,%s" % pos) for edge, d in candidates: if paths: minDist = 1e400 minPath = None for path, dist in paths.iteritems(): if dist < minDist: if edge == path[-1]: minPath = path minDist = dist elif edge in path[-1].getOutgoing(): minPath = path + (edge,) minDist = dist else: minPath = path + (edge,) minDist = dist + euclidean( path[-1].getToNode().getCoord(), edge.getFromNode().getCoord()) if minPath: newPaths[minPath] = minDist + d * d else: newPaths[(edge,)] = d * d if not newPaths: if paths: result += [e.getID() for e in _getMinPath(paths)] paths = newPaths if paths: return result + [e.getID() for e in _getMinPath(paths)] return result
[ "os.path.abspath", "os.path.join" ]
[((685, 717), 'os.path.join', 'os.path.join', (['SUMO_HOME', '"""tools"""'], {}), "(SUMO_HOME, 'tools')\n", (697, 717), False, 'import os\n'), ((628, 653), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (643, 653), False, 'import os\n')]
import numpy as np from abc import abstractmethod from init_args_serializer import Serializable from rcsenv import RcsSimEnv, JointLimitException import pyrado from pyrado.environments.sim_base import SimEnv from pyrado.spaces.base import Space from pyrado.spaces.box import BoxSpace from pyrado.spaces.empty import EmptySpace from pyrado.tasks.base import Task from pyrado.utils.data_types import RenderMode def to_pyrado_space(space) -> [BoxSpace, EmptySpace]: """ Convert the box space implementation from RcsPySim to the one of Pyrado. :param space: a space from RcsPySim :return: a Pyrado `BoxSpace` or an Pyrado`EmptySpace` if `None` was given """ if space is None: return EmptySpace return BoxSpace(space.min, space.max, labels=space.names) class RcsSim(SimEnv, Serializable): """ Base class for RcsPySim environments. Uses Serializable to facilitate proper serialization. """ def __init__(self, envType: str, task_args: dict, dt: float = 0.01, max_steps: int = pyrado.inf, init_state: np.ndarray = None, checkJointLimits: bool = False, joint_limit_penalty: float = -1e3, **kwargs): """ Constructor .. note:: The joint type (i.e. position or torque control) is set in the config-xml file in Rcs. :param envType: environment type name as defined on the C++ side :param task_args: arguments for the task construction :param dt: integration step size in seconds :param max_steps: max number of simulation time steps :param domain_param: initial domain param values :param init_state: initial state sampler can be a callable or one fixed state :param checkJointLimits: flags if the joint limits should be ignored or not passed to the C++ constructor :param joint_limit_penalty: cost returned on termination due to joint limits. This is a different from the state bounds since `RcsPySim` return an error when the joint limits are violated. :param kwargs: keyword arguments which are available for `RcsSim` on the C++ side. These arguments will not be stored in the environment object, thus are saved e.g. when pickled. """ Serializable._init(self, locals()) # Initialize basic variables super().__init__(dt, max_steps) self._check_joint_limits = checkJointLimits # Create Rcs-based implementation (RcsSimEnv comes from the pybind11 module) self._impl = RcsSimEnv( dt=dt, envType=envType, checkJointLimits=self._check_joint_limits, **kwargs ) # Setup the initial domain parameters self._domain_param = self._unadapt_domain_param(self._impl.domainParam) if joint_limit_penalty > 0: raise pyrado.ValueErr(given=joint_limit_penalty, le_constraint='0') self._joint_limit_penalty = joint_limit_penalty # Initial init state space is taken from C++ self._init_space = to_pyrado_space(self._impl.initStateSpace) # By default, the state space is a subset of the observation space. Set this to customize in subclass. self.state_mask = None # Dummy initialization, must be set by the derived classes self.init_state = None self.task_args = task_args self._task = self._create_task(self.task_args) @property def state_space(self) -> Space: """ Derives the state space from the observation space using _state_from_obs or state_mask. """ obs_space = self.obs_space # Check if _state_from_obs was overridden if self._state_from_obs.__func__ != RcsSim._state_from_obs: return BoxSpace(self._state_from_obs(obs_space.bound_lo), self._state_from_obs(obs_space.bound_up), None) # Check if there is a state mask if self.state_mask is not None: return obs_space.subspace(self.state_mask) # Identical to obs space return obs_space @property def obs_space(self) -> Space: return to_pyrado_space(self._impl.observationSpace) @property def init_space(self) -> Space: return self._init_space @init_space.setter def init_space(self, space: Space): assert to_pyrado_space(self._impl.initStateSpace).shape == space.shape self._init_space = space @property def act_space(self) -> Space: return to_pyrado_space(self._impl.actionSpace) @property def task(self) -> Task: return self._task @abstractmethod def _create_task(self, task_args: dict) -> Task: # Needs to implemented by subclasses raise NotImplementedError @property def domain_param(self) -> dict: return self._unadapt_domain_param(self._impl.domainParam) @domain_param.setter def domain_param(self, param: dict): if not isinstance(param, dict): raise pyrado.TypeErr(given=param, expected_type=dict) # Update the internal parameters. The New domain parameters will be applied on reset(). self._domain_param.update(param) # Update task self._task = self._create_task(self.task_args) @classmethod def get_nominal_domain_param(cls): """ Get the nominal a.k.a. default domain parameters. .. note:: It is highly recommended to have the same values as in the associated physics config file (p<NAME>.xml), since the nominal domain parameters are not set explicitly from Pyrado (only when randomizing). """ raise NotImplementedError def _state_from_obs(self, obs: np.ndarray) -> np.ndarray: """ Retrieve the system state from the observation. In most cases, the system state is a part of the observation. This function is to be used when the observations include additional information. The default implementation is based off `self.state_mask` which is set in sub-classes of `RcsSim`. :param obs: observation from the environment :return: state of the environment """ if self.state_mask is not None: return obs[self.state_mask] return obs.copy() def _adapt_domain_param(self, params: dict) -> dict: """ Changes the domain parameters before passing them to the Rcs simulation. One use case is for example the rolling friction coefficient which is usually given unit-less but the Vortex physics engine expects it to be multiplied with the body's curvature radius. :param params: domain parameters to adapt :return: adapted parameters """ return params def _unadapt_domain_param(self, params: dict) -> dict: """ Changes the domain parameters coming from to the Rcs simulation. .. note:: This function is called from the constructor. :param params: domain parameters to revert the previously done adaptation :return: unadapted parameters """ return params def _get_state(self, state_dict: dict): state_dict['domain_param'] = self.domain_param state_dict['init_state'] = self.init_state def _set_state(self, state_dict: dict, copying: bool = False): self.domain_param = state_dict['domain_param'] self.init_state = state_dict['init_state'] def _disturbance_generator(self) -> (np.ndarray, None): """ Provide an artificial disturbance. For example a force on a body in the physics simulation. """ return None def reset(self, init_state: np.ndarray = None, domain_param: dict = None) -> np.ndarray: # Reset time self._curr_step = 0 # Reset the state if init_state is None: # Sample from the init state space init_state = self._init_space.sample_uniform() else: if not init_state.shape == self._init_space.shape: raise pyrado.ShapeErr(given=init_state, expected_match=self._init_space) # Reset the task self._task.reset(env_spec=self.spec) # Use stored domain parameters if not overwritten if domain_param is None: domain_param = self._domain_param # Forward to C++ implementation obs = self._impl.reset(domainParam=self._adapt_domain_param(domain_param), initState=init_state) self.state = self._state_from_obs(obs) return obs def step(self, act: np.ndarray) -> tuple: # Current reward depending on the state (before step) and the (unlimited) action remaining_steps = self._max_steps - (self._curr_step + 1) if self._max_steps is not pyrado.inf else 0 self._curr_rew = self._task.step_rew(self.state, act, remaining_steps) # Apply actuator limits act = self.limit_act(act) # Get the disturbance to be applied on the Rcs side disturbance = self._disturbance_generator() # Dynamics are calculated in the Rcs simulation try: obs = self._impl.step(act, disturbance) except JointLimitException: # Joint limits exceeded! Return (obs, rew, done, info) directly after this failure. return self._impl.lastObservation, self._joint_limit_penalty, True, dict(t=self._curr_step*self._dt) self.state = self._state_from_obs(obs) # only for the Python side info = dict(t=self._curr_step*self._dt) self._curr_step += 1 # Check if the task or the environment is done done = self._task.is_done(self.state) if self._curr_step >= self._max_steps: done = True if done: # Add final reward if done self._curr_rew += self._task.final_rew(self.state, remaining_steps) return obs, self._curr_rew, done, info def render(self, mode: RenderMode = RenderMode(text=True), render_step: int = 1): if self._curr_step%render_step == 0: # Call base class super().render(mode) # Forward to Rcs GUI if mode.video: self._impl.render() def save_config_xml(self, fileName: str): """ Save environment configuration as xml file for use on the C++ side. :param fileName: output file name """ self._impl.saveConfigXML(fileName) def get_body_position(self, bodyName: str, refFrameName: str, refBodyName: str) -> np.ndarray: """ Get the position of a body in the simulators config graph. This function uses code coped from `Rcs` to transform the position depending on a refernce frame and/or body. :param bodyName: name of the body in the graph :param refFrameName: name of the reference frame, pass '' to use world coordinates :param refBodyName: name of the reference body, pass '' to use world coordinates :return: x,y,z positions in a reference frame coordinates relative to a reference bodies """ return self._impl.getBodyPosition(bodyName, refFrameName, refBodyName) def get_body_extents(self, bodyName: str, shapeIdx: int = 0) -> np.ndarray: """ Get the dimensions of a body in the simulators config graph. This function uses code coped from `Rcs` to transform the position depending on a refernce frame and/or body. .. note:: Depending on the kind of shape (e.g. box, sphere, torus, ect.) the extends mean different things. :param bodyName: name of the body in the graph :param shapeIdx: index of the shape in the `Body` node, defaults to the first shape of the body :return: x,y,z positions in a reference frame coordinates relative to a reference bodies """ return self._impl.getBodyExtents(bodyName, shapeIdx)
[ "pyrado.spaces.box.BoxSpace", "pyrado.ShapeErr", "pyrado.utils.data_types.RenderMode", "pyrado.TypeErr", "pyrado.ValueErr", "rcsenv.RcsSimEnv" ]
[((737, 787), 'pyrado.spaces.box.BoxSpace', 'BoxSpace', (['space.min', 'space.max'], {'labels': 'space.names'}), '(space.min, space.max, labels=space.names)\n', (745, 787), False, 'from pyrado.spaces.box import BoxSpace\n'), ((2669, 2759), 'rcsenv.RcsSimEnv', 'RcsSimEnv', ([], {'dt': 'dt', 'envType': 'envType', 'checkJointLimits': 'self._check_joint_limits'}), '(dt=dt, envType=envType, checkJointLimits=self._check_joint_limits,\n **kwargs)\n', (2678, 2759), False, 'from rcsenv import RcsSimEnv, JointLimitException\n'), ((10094, 10115), 'pyrado.utils.data_types.RenderMode', 'RenderMode', ([], {'text': '(True)'}), '(text=True)\n', (10104, 10115), False, 'from pyrado.utils.data_types import RenderMode\n'), ((2996, 3057), 'pyrado.ValueErr', 'pyrado.ValueErr', ([], {'given': 'joint_limit_penalty', 'le_constraint': '"""0"""'}), "(given=joint_limit_penalty, le_constraint='0')\n", (3011, 3057), False, 'import pyrado\n'), ((5125, 5172), 'pyrado.TypeErr', 'pyrado.TypeErr', ([], {'given': 'param', 'expected_type': 'dict'}), '(given=param, expected_type=dict)\n', (5139, 5172), False, 'import pyrado\n'), ((8180, 8246), 'pyrado.ShapeErr', 'pyrado.ShapeErr', ([], {'given': 'init_state', 'expected_match': 'self._init_space'}), '(given=init_state, expected_match=self._init_space)\n', (8195, 8246), False, 'import pyrado\n')]
import requests from django.conf import settings from django_ilmoitin.models import NotificationTemplate from django_ilmoitin.utils import render_notification_template NOTIFICATION_SERVICE_API_URL = "NOTIFICATION_SERVICE_API_URL" NOTIFICATION_SERVICE_SENDER_NAME = "NOTIFICATION_SERVICE_SENDER_NAME" NOTIFICATION_SERVICE_TOKEN = "NOTIFICATION_SERVICE_TOKEN" DEFAULT_LANGUAGE = settings.LANGUAGE_CODE class SMSNotificationService: """ The documentation for the API can be found on the GitHub repo: https://github.com/City-of-Helsinki/notification-service-api """ def __init__(self, **kwargs): if "config" in kwargs: self.config = kwargs.get("config") self.api_url = self.config.get(NOTIFICATION_SERVICE_API_URL) self.sender_name = self.config.get(NOTIFICATION_SERVICE_SENDER_NAME) self.token = kwargs.get("token") or self.config.get(NOTIFICATION_SERVICE_TOKEN) assert self.token @staticmethod def get_config_template(): return { NOTIFICATION_SERVICE_API_URL: str, NOTIFICATION_SERVICE_SENDER_NAME: str, NOTIFICATION_SERVICE_TOKEN: str, } def send( self, notification_type: str, context: dict, phone_number: str, language=DEFAULT_LANGUAGE, ): template = NotificationTemplate.objects.get(type=notification_type) message = render_notification_template(template, context, language).body_text return self.send_plain_text(phone_number, message) def send_plain_text(self, phone_number: str, message: str): data = { "sender": self.sender_name, "to": [{"destination": phone_number, "format": "MOBILE"}], "text": message, } return self._do_send(data) def send_batch(self, phone_numbers: list, message: str): data = { "sender": self.sender_name, "to": [ {"destination": phone_number, "format": "MOBILE"} for phone_number in phone_numbers ], "text": message, } return self._do_send(data) def _do_send(self, data): headers = {"Authorization": f"Token {self.token}"} return requests.post(f"{self.api_url}/message/send", json=data, headers=headers)
[ "requests.post", "django_ilmoitin.models.NotificationTemplate.objects.get", "django_ilmoitin.utils.render_notification_template" ]
[((1349, 1405), 'django_ilmoitin.models.NotificationTemplate.objects.get', 'NotificationTemplate.objects.get', ([], {'type': 'notification_type'}), '(type=notification_type)\n', (1381, 1405), False, 'from django_ilmoitin.models import NotificationTemplate\n'), ((2267, 2340), 'requests.post', 'requests.post', (['f"""{self.api_url}/message/send"""'], {'json': 'data', 'headers': 'headers'}), "(f'{self.api_url}/message/send', json=data, headers=headers)\n", (2280, 2340), False, 'import requests\n'), ((1424, 1481), 'django_ilmoitin.utils.render_notification_template', 'render_notification_template', (['template', 'context', 'language'], {}), '(template, context, language)\n', (1452, 1481), False, 'from django_ilmoitin.utils import render_notification_template\n')]
# Generated by Django 3.1.5 on 2021-09-04 23:52 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Security_Alert', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('location', models.CharField(max_length=255, verbose_name='location')), ('created', models.DateTimeField(auto_now_add=True, verbose_name='created')), ('edited', models.DateTimeField(auto_now=True, verbose_name='edited')), ('description', models.CharField(max_length=255, verbose_name='description')), ('confirmation', models.FloatField(default=0.0, verbose_name='confirmation')), ('image', models.ImageField(null=True, upload_to='')), ], options={ 'verbose_name': 'Security_Alert', 'verbose_name_plural': 'Security_Alerts', 'db_table': '', 'managed': True, }, ), migrations.CreateModel( name='Security_alert_type', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=150)), ], options={ 'verbose_name': 'Security_alert_type', 'verbose_name_plural': 'Security_alert_types', 'db_table': '', 'managed': True, }, ), ]
[ "django.db.models.CharField", "django.db.models.FloatField", "django.db.models.AutoField", "django.db.models.ImageField", "django.db.models.DateTimeField" ]
[((310, 403), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (326, 403), False, 'from django.db import migrations, models\n'), ((431, 488), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'verbose_name': '"""location"""'}), "(max_length=255, verbose_name='location')\n", (447, 488), False, 'from django.db import migrations, models\n'), ((519, 582), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""created"""'}), "(auto_now_add=True, verbose_name='created')\n", (539, 582), False, 'from django.db import migrations, models\n'), ((612, 670), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'verbose_name': '"""edited"""'}), "(auto_now=True, verbose_name='edited')\n", (632, 670), False, 'from django.db import migrations, models\n'), ((705, 765), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'verbose_name': '"""description"""'}), "(max_length=255, verbose_name='description')\n", (721, 765), False, 'from django.db import migrations, models\n'), ((801, 860), 'django.db.models.FloatField', 'models.FloatField', ([], {'default': '(0.0)', 'verbose_name': '"""confirmation"""'}), "(default=0.0, verbose_name='confirmation')\n", (818, 860), False, 'from django.db import migrations, models\n'), ((889, 931), 'django.db.models.ImageField', 'models.ImageField', ([], {'null': '(True)', 'upload_to': '""""""'}), "(null=True, upload_to='')\n", (906, 931), False, 'from django.db import migrations, models\n'), ((1286, 1379), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1302, 1379), False, 'from django.db import migrations, models\n'), ((1403, 1435), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(150)'}), '(max_length=150)\n', (1419, 1435), False, 'from django.db import migrations, models\n')]
#!/usr/bin/env python # -*- coding: utf-8 -*- # # This file is part of Karesansui. # # Copyright (C) 2012 HDE, Inc. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # import os import os.path import sys import re import signal import logging from optparse import OptionParser from ksscommand import KssCommand, KssCommandException, KssCommandOptException import __cmd__ try: import karesansui from karesansui import __version__ from karesansui.lib.virt.virt import KaresansuiVirtConnection, KaresansuiVirtException from karesansui.lib.utils import load_locale, preprint_r, base64_decode from karesansui.db.access.machine import findby1uniquekey from karesansui.lib.utils import string_from_uuid as StrFromUUID from karesansui.lib.utils import generate_uuid as GenUUID from karesansui.lib.virt.snapshot import KaresansuiVirtSnapshot from karesansui.db.access.snapshot import findbyname_guestby1 as s_findbyname_guestby1 except ImportError as e: print("[Error] some packages not found. - %s" % e, file=sys.stderr) sys.exit(1) _ = load_locale() usage = '%prog [options]' def getopts(): optp = OptionParser(usage=usage, version=__version__) optp.add_option('-n', '--name', dest='name', help=_('Domain Name')) optp.add_option('-p', '--pool', dest='pool', help=_('Storage pool name')) #optp.add_option('-d', '--dir', dest='dir', help=_('Directory name')) optp.add_option('-t', '--title',dest='title',default='', help=_('Export title')) optp.add_option('-q', '--quiet',dest='verbose', action="store_false", default=True, help=_("don't print status messages")) return optp.parse_args() def chkopts(opts): reg = re.compile("[^a-zA-Z0-9\./_:-]") if opts.name: if reg.search(opts.name): raise KssCommandOptException('ERROR: Illigal option value. option=%s value=%s' % ('-n or --name', opts.name)) else: raise KssCommandOptException('ERROR: %s option is required.' % '-n or --name') if opts.pool: if reg.search(opts.pool): raise KssCommandOptException('ERROR: Illigal option value. option=%s value=%s' % ('-p or --pool', opts.pool)) else: raise KssCommandOptException('ERROR: %s option is required.' % '-p or --pool') #if not opts.pool and not opts.dir: # raise KssCommandOptException('ERROR: -p/--pool or -d/--dir options are required.') class ExportGuest(KssCommand): def __grab_stdout(self, flag): if flag: self.stdout = sys.stdout sys.stdout = os.fdopen(sys.stdout.fileno(), "w", 0) logf = open("/dev/null", "a") os.dup2(logf.fileno(), 1) logf.close() else: os.dup2(sys.stdout.fileno(), 1) sys.stdout = self.stdout del self.stdout def process(self): (opts, args) = getopts() chkopts(opts) self.up_progress(10) conn = KaresansuiVirtConnection(readonly=False) try: try: src_pool = conn.get_storage_pool_name_bydomain(opts.name, "os") if not src_pool: raise KssCommandException("Source storage pool not found. domain=%s" % (opts.name)) if conn.get_storage_pool_type(src_pool) == 'dir': raise KssCommandException("Storage pool type 'dir' is not. domain=%s" % (opts.name)) src_path = conn.get_storage_pool_targetpath(src_pool[0]) self.domain_dir = "%s/%s" % (src_path, opts.name,) if os.path.isdir(self.domain_dir) is False: raise KssCommandException( 'domain directory is not found or not directory. - %s' % (self.domain_dir)) # Model virt_uuid = conn.domname_to_uuid(opts.name) model = findby1uniquekey(self.kss_session, virt_uuid) if not model: raise KssCommandException("Export data does not exist in the database.") database = {} database['attribute'] = model.attribute database['hypervisor'] = model.hypervisor database['icon'] = model.icon database['name'] = model.name database['notebook'] = {"title" : model.notebook.title, "value" : model.notebook.value, } tags = [] for _tag in model.tags: tags.append(_tag.name) database['tags'] = ",".join(tags) database['uniq_key'] = model.uniq_key # Snapshot snapshots = [] kvs = KaresansuiVirtSnapshot(readonly=False) try: guest_id = model.id snapshot_list = kvs.listNames(opts.name)[opts.name] if len(snapshot_list) > 0: for snapshot in snapshot_list: s_model = s_findbyname_guestby1(self.kss_session, snapshot, guest_id) if s_model is not None: name = s_model.name title = s_model.notebook.title value = s_model.notebook.value snapshots.append({"name":name, "title":title, "value":value,}) except: raise KssCommandException("Cannot fetch the information of snapshots correctly.") kvs.finish() # Pool target_dir = "" if opts.pool: inactive_storage_pools = conn.list_inactive_storage_pool() active_storage_pools = conn.list_active_storage_pool() if not (opts.pool in active_storage_pools or opts.pool in inactive_storage_pools): raise KssCommandException('Target storage pool does not exist. - pool=%s' % (opts.pool)) pool = conn.search_kvn_storage_pools(opts.pool) storage_info = pool[0].get_info() if storage_info["type"] == "dir" and storage_info["target"]["path"] != "": target_dir = storage_info["target"]["path"] else: raise KssCommandException("Target storage pool type is not 'dir'. pool=%s" % (opts.pool)) elif opts.dir: target_dir = opts.dir self.up_progress(10) progresscb = None if opts.verbose: try: from karesansui.lib.progress import ProgressMeter progresscb = ProgressMeter(command_object=self) except: pass else: try: from karesansui.lib.progress import ProgressMeter progresscb = ProgressMeter(command_object=self,quiet=True) except: pass if opts.title[0:4] == "b64:": title = base64_decode(opts.title[4:]) else: title = opts.title uuid = StrFromUUID(GenUUID()) conn.export_guest(uuid=uuid, name=opts.name, directory=target_dir, database=database, realicon=model.realicon(), title=title, snapshots=snapshots, progresscb=progresscb) self.up_progress(40) self.logger.info('Export guest completed. - pool=%s, uuid=%s' % (opts.pool, uuid)) print(_('Export guest completed. - pool=%s, uuid=%s' % (opts.pool, uuid)), file=sys.stdout) return True except KaresansuiVirtException as e: raise KssCommandException('Failed to export guest. - %s to %s [%s]' \ % (opts.name,target_dir, ''.join(e.args))) except KssCommandException: raise except: raise KssCommandException('Failed to export guest. - %s to %s' \ % (opts.name,target_dir)) finally: conn.close() if __name__ == "__main__": target = ExportGuest() sys.exit(target.run())
[ "karesansui.lib.utils.base64_decode", "karesansui.lib.utils.load_locale", "ksscommand.KssCommandException", "optparse.OptionParser", "re.compile", "karesansui.db.access.machine.findby1uniquekey", "os.path.isdir", "sys.stdout.fileno", "karesansui.lib.virt.snapshot.KaresansuiVirtSnapshot", "karesansui.lib.utils.generate_uuid", "karesansui.lib.virt.virt.KaresansuiVirtConnection", "karesansui.lib.progress.ProgressMeter", "karesansui.db.access.snapshot.findbyname_guestby1", "sys.exit", "ksscommand.KssCommandOptException" ]
[((2097, 2110), 'karesansui.lib.utils.load_locale', 'load_locale', ([], {}), '()\n', (2108, 2110), False, 'from karesansui.lib.utils import load_locale, preprint_r, base64_decode\n'), ((2165, 2211), 'optparse.OptionParser', 'OptionParser', ([], {'usage': 'usage', 'version': '__version__'}), '(usage=usage, version=__version__)\n', (2177, 2211), False, 'from optparse import OptionParser\n'), ((2709, 2742), 're.compile', 're.compile', (['"""[^a-zA-Z0-9\\\\./_:-]"""'], {}), "('[^a-zA-Z0-9\\\\./_:-]')\n", (2719, 2742), False, 'import re\n'), ((2080, 2091), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2088, 2091), False, 'import sys\n'), ((2941, 3013), 'ksscommand.KssCommandOptException', 'KssCommandOptException', (["('ERROR: %s option is required.' % '-n or --name')"], {}), "('ERROR: %s option is required.' % '-n or --name')\n", (2963, 3013), False, 'from ksscommand import KssCommand, KssCommandException, KssCommandOptException\n'), ((3213, 3285), 'ksscommand.KssCommandOptException', 'KssCommandOptException', (["('ERROR: %s option is required.' % '-p or --pool')"], {}), "('ERROR: %s option is required.' % '-p or --pool')\n", (3235, 3285), False, 'from ksscommand import KssCommand, KssCommandException, KssCommandOptException\n'), ((3989, 4029), 'karesansui.lib.virt.virt.KaresansuiVirtConnection', 'KaresansuiVirtConnection', ([], {'readonly': '(False)'}), '(readonly=False)\n', (4013, 4029), False, 'from karesansui.lib.virt.virt import KaresansuiVirtConnection, KaresansuiVirtException\n'), ((2813, 2920), 'ksscommand.KssCommandOptException', 'KssCommandOptException', (["('ERROR: Illigal option value. option=%s value=%s' % ('-n or --name', opts.\n name))"], {}), "('ERROR: Illigal option value. option=%s value=%s' %\n ('-n or --name', opts.name))\n", (2835, 2920), False, 'from ksscommand import KssCommand, KssCommandException, KssCommandOptException\n'), ((3085, 3192), 'ksscommand.KssCommandOptException', 'KssCommandOptException', (["('ERROR: Illigal option value. option=%s value=%s' % ('-p or --pool', opts.\n pool))"], {}), "('ERROR: Illigal option value. option=%s value=%s' %\n ('-p or --pool', opts.pool))\n", (3107, 3192), False, 'from ksscommand import KssCommand, KssCommandException, KssCommandOptException\n'), ((3584, 3603), 'sys.stdout.fileno', 'sys.stdout.fileno', ([], {}), '()\n', (3601, 3603), False, 'import sys\n'), ((3768, 3787), 'sys.stdout.fileno', 'sys.stdout.fileno', ([], {}), '()\n', (3785, 3787), False, 'import sys\n'), ((4907, 4952), 'karesansui.db.access.machine.findby1uniquekey', 'findby1uniquekey', (['self.kss_session', 'virt_uuid'], {}), '(self.kss_session, virt_uuid)\n', (4923, 4952), False, 'from karesansui.db.access.machine import findby1uniquekey\n'), ((5794, 5832), 'karesansui.lib.virt.snapshot.KaresansuiVirtSnapshot', 'KaresansuiVirtSnapshot', ([], {'readonly': '(False)'}), '(readonly=False)\n', (5816, 5832), False, 'from karesansui.lib.virt.snapshot import KaresansuiVirtSnapshot\n'), ((4199, 4274), 'ksscommand.KssCommandException', 'KssCommandException', (["('Source storage pool not found. domain=%s' % opts.name)"], {}), "('Source storage pool not found. domain=%s' % opts.name)\n", (4218, 4274), False, 'from ksscommand import KssCommand, KssCommandException, KssCommandOptException\n'), ((4369, 4445), 'ksscommand.KssCommandException', 'KssCommandException', (['("Storage pool type \'dir\' is not. domain=%s" % opts.name)'], {}), '("Storage pool type \'dir\' is not. domain=%s" % opts.name)\n', (4388, 4445), False, 'from ksscommand import KssCommand, KssCommandException, KssCommandOptException\n'), ((4610, 4640), 'os.path.isdir', 'os.path.isdir', (['self.domain_dir'], {}), '(self.domain_dir)\n', (4623, 4640), False, 'import os\n'), ((4677, 4774), 'ksscommand.KssCommandException', 'KssCommandException', (["('domain directory is not found or not directory. - %s' % self.domain_dir)"], {}), "('domain directory is not found or not directory. - %s' %\n self.domain_dir)\n", (4696, 4774), False, 'from ksscommand import KssCommand, KssCommandException, KssCommandOptException\n'), ((5009, 5075), 'ksscommand.KssCommandException', 'KssCommandException', (['"""Export data does not exist in the database."""'], {}), "('Export data does not exist in the database.')\n", (5028, 5075), False, 'from ksscommand import KssCommand, KssCommandException, KssCommandOptException\n'), ((8271, 8300), 'karesansui.lib.utils.base64_decode', 'base64_decode', (['opts.title[4:]'], {}), '(opts.title[4:])\n', (8284, 8300), False, 'from karesansui.lib.utils import load_locale, preprint_r, base64_decode\n'), ((8398, 8407), 'karesansui.lib.utils.generate_uuid', 'GenUUID', ([], {}), '()\n', (8405, 8407), True, 'from karesansui.lib.utils import generate_uuid as GenUUID\n'), ((9433, 9520), 'ksscommand.KssCommandException', 'KssCommandException', (["('Failed to export guest. - %s to %s' % (opts.name, target_dir))"], {}), "('Failed to export guest. - %s to %s' % (opts.name,\n target_dir))\n", (9452, 9520), False, 'from ksscommand import KssCommand, KssCommandException, KssCommandOptException\n'), ((6542, 6617), 'ksscommand.KssCommandException', 'KssCommandException', (['"""Cannot fetch the information of snapshots correctly."""'], {}), "('Cannot fetch the information of snapshots correctly.')\n", (6561, 6617), False, 'from ksscommand import KssCommand, KssCommandException, KssCommandOptException\n'), ((7020, 7105), 'ksscommand.KssCommandException', 'KssCommandException', (["('Target storage pool does not exist. - pool=%s' % opts.pool)"], {}), "('Target storage pool does not exist. - pool=%s' % opts.pool\n )\n", (7039, 7105), False, 'from ksscommand import KssCommand, KssCommandException, KssCommandOptException\n'), ((7445, 7531), 'ksscommand.KssCommandException', 'KssCommandException', (['("Target storage pool type is not \'dir\'. pool=%s" % opts.pool)'], {}), '("Target storage pool type is not \'dir\'. pool=%s" % opts\n .pool)\n', (7464, 7531), False, 'from ksscommand import KssCommand, KssCommandException, KssCommandOptException\n'), ((7843, 7877), 'karesansui.lib.progress.ProgressMeter', 'ProgressMeter', ([], {'command_object': 'self'}), '(command_object=self)\n', (7856, 7877), False, 'from karesansui.lib.progress import ProgressMeter\n'), ((8093, 8139), 'karesansui.lib.progress.ProgressMeter', 'ProgressMeter', ([], {'command_object': 'self', 'quiet': '(True)'}), '(command_object=self, quiet=True)\n', (8106, 8139), False, 'from karesansui.lib.progress import ProgressMeter\n'), ((6106, 6165), 'karesansui.db.access.snapshot.findbyname_guestby1', 's_findbyname_guestby1', (['self.kss_session', 'snapshot', 'guest_id'], {}), '(self.kss_session, snapshot, guest_id)\n', (6127, 6165), True, 'from karesansui.db.access.snapshot import findbyname_guestby1 as s_findbyname_guestby1\n')]
import jax.numpy as jnp from jax.api import jit @jit def x_rotz(theta): c = jnp.cos(theta) s = jnp.sin(theta) x = jnp.array([[c, s, 0.0, 0.0, 0.0, 0.0], [-s, c, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, c, s, 0.0], [0.0, 0.0, 0.0, -s, c, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0]]) return x if __name__ == "__main__": import math from jax import make_jaxpr print(make_jaxpr(x_rotz)(math.pi)) print(x_rotz(math.pi))
[ "jax.numpy.array", "jax.numpy.cos", "jax.make_jaxpr", "jax.numpy.sin" ]
[((82, 96), 'jax.numpy.cos', 'jnp.cos', (['theta'], {}), '(theta)\n', (89, 96), True, 'import jax.numpy as jnp\n'), ((105, 119), 'jax.numpy.sin', 'jnp.sin', (['theta'], {}), '(theta)\n', (112, 119), True, 'import jax.numpy as jnp\n'), ((128, 327), 'jax.numpy.array', 'jnp.array', (['[[c, s, 0.0, 0.0, 0.0, 0.0], [-s, c, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, \n 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, c, s, 0.0], [0.0, 0.0, 0.0, -s, c, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0, 1.0]]'], {}), '([[c, s, 0.0, 0.0, 0.0, 0.0], [-s, c, 0.0, 0.0, 0.0, 0.0], [0.0, \n 0.0, 1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, c, s, 0.0], [0.0, 0.0, 0.0, -\n s, c, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0]])\n', (137, 327), True, 'import jax.numpy as jnp\n'), ((506, 524), 'jax.make_jaxpr', 'make_jaxpr', (['x_rotz'], {}), '(x_rotz)\n', (516, 524), False, 'from jax import make_jaxpr\n')]
''' Created on 19 Nov 2017 @author: Simon ''' from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm import numpy as np import datetime import pickle from scipy.interpolate import interp1d from boundary import BoundaryConditionCollection1D from diagnostic import DiagnosticModule class ThawSlump(object): # 1D # time_initial only works when forcing is provided def __init__( self, tsmesh, time_step_module=None, output_step_module=None, forcing_module=None, thermal_properties=None, time_initial=None): self.mesh = tsmesh self.variables = {} self.variables_store = [] self.diagnostic_modules = {} self.diagnostic_update_order = [] self.eq = None self.boundary_condition_collection = None self._time = Variable(value=0) self._time_step_module = time_step_module self._timeref = None # will generally be set by forcing_module; otherwise manually if forcing_module is not None: self.initializeForcing(forcing_module) if time_initial is not None: self.time = time_initial if thermal_properties is not None: self.initializeThermalProperties(thermal_properties) self._output_step_module = output_step_module self._output_module = SlumpOutput() if output_step_module is None: self._output_step_module = OutputStep() @property def time(self): return float(self._time.value) @time.setter def time(self, t): # can also handle date objects try: self.date = t except: self._time.setValue(t) @property def timeStep(self): return self._time_step_module.calculate(self) @property def date(self): return self._internal_time_to_date(self.time) def _internal_time_to_date(self, internal_time): return self._timeref + datetime.timedelta(seconds=internal_time) @date.setter def date(self, d): dtsec = self._date_to_internal_time(d) self._time.setValue(dtsec) def _date_to_internal_time(self, d): dt = d - self._timeref dtsec = dt.days * 24 * 3600 + dt.seconds + dt.microseconds * 1e-6 return dtsec def initializeTimeReference(self, timeref): # timeref is a datetime object self._timeref = timeref def initializePDE(self, tseq=None): self.eq = tseq def initializeTimeStepModule(self, time_step_module): self._time_step_module = time_step_module def _initializeSourcesZero(self, source_name='S'): self.variables[source_name] = CellVariable( name=source_name, mesh=self.mesh.mesh, value=0.0) def initializeDiagnostic( self, variable, funpointer, default=0.0, face_variable=False, output_variable=True): if not face_variable: self.variables[variable] = CellVariable( name=variable, mesh=self.mesh.mesh, value=default) else: self.variables[variable] = FaceVariable( name=variable, mesh=self.mesh.mesh, value=default) self.diagnostic_modules[variable] = DiagnosticModule(funpointer, self) if output_variable: self.variables_store.append(variable) self.diagnostic_update_order.append(variable) def initializeOutputStepModule(self, output_step_module): self._output_step_module = output_step_module def initializeThermalProperties(self, thermal_properties): self.thermal_properties = thermal_properties self.thermal_properties.initializeVariables(self) self.initializeTright() def initializeForcing(self, forcing_module): self.forcing_module = forcing_module for varj in self.forcing_module.variables: assert varj not in self.variables self.variables[varj] = self.forcing_module.variables[varj] self.initializeTimeReference(self.forcing_module._timeref) def initializeEnthalpyTemperature(self, T_initial, proportion_frozen=None, time=None): # time can be internal time or also a datetime object pf = 0.0 if proportion_frozen is None else proportion_frozen assert pf >= 0.0 and pf <= 1.0 self.variables['T'].setValue(T_initial) self.variables['h'].setValue(self.thermal_properties.enthalpyFromTemperature( self, T=T_initial, proportion_frozen=pf)) self.updateDiagnostics() if time is not None: self.time = time def updateDiagnostic(self, variable): self.variables[variable].setValue(self.diagnostic_modules[variable].evaluate()) def updateDiagnostics(self, variables=None): if variables is not None: variablesorder = variables else: variablesorder = self.diagnostic_update_order for variable in variablesorder: self.updateDiagnostic(variable) def specifyBoundaryConditions(self, boundary_condition_collection): self.boundary_condition_collection = boundary_condition_collection self.updateGeometryBoundaryConditions() self.invokeBoundaryConditions() self.initializePDE() def updateGeometryBoundaryConditions(self): self.boundary_condition_collection.updateGeometry(self) def updateBoundaryConditions(self, bc_data, invoke=True): self.boundary_condition_collection.update(bc_data) if invoke: self.invokeBoundaryConditions() def invokeBoundaryConditions(self): self.boundary_condition_collection.invoke(self) def updateGeometry(self): self.boundary_condition_collection.updateGeometry(self) def nextOutput(self): return self._output_step_module.next(self) def updateOutput(self, datanew={}): for v in self.variables_store: datanew[v] = np.copy(self.variables[v].value) # boundary condition outputs: # separate routine: total source, source components, or for basic b.c. just value) datanew.update(self.boundary_condition_collection.output()) self._output_module.update(self.date, datanew) def exportOutput(self, fn): self._output_module.export(fn) def addStoredVariable(self, varname): # varname can also be list if isinstance(varname, str): if varname not in self.variables_store: self.variables_store.append(varname) else: # tuple/list,etc. for varnamej in varname: self.addStoredVariable(varnamej) class ThawSlumpEnthalpy(ThawSlump): # both boundary conditions bc_inside and bc_headwall have to be provided, # and they are only activated when forcing and thermal_properties are also given def __init__( self, tsmesh, time_step_module=None, output_step_module=None, h_initial=0.0, T_initial=None, time_initial=None, proportion_frozen_initial=None, forcing_module=None, thermal_properties=None, bc_inside=None, bc_headwall=None): # T_initial only works if thermal_properties are provided ThawSlump.__init__( self, tsmesh, time_step_module=time_step_module, output_step_module=output_step_module, time_initial=time_initial, forcing_module=forcing_module, thermal_properties=thermal_properties) self._initializeSourcesZero(source_name='S') self._initializeSourcesZero(source_name='S_inside') self._initializeSourcesZero(source_name='S_headwall') # specific volumetric enthalpy self.variables['h'] = CellVariable( name='h', mesh=self.mesh.mesh, value=h_initial, hasOld=True) self.addStoredVariable('h') if T_initial is not None: # essentially overrides h_initial self.initializeEnthalpyTemperature( T_initial, proportion_frozen=proportion_frozen_initial) if (bc_inside is not None and bc_headwall is not None and self.thermal_properties is not None and self.forcing_module is not None): bcc = BoundaryConditionCollection1D( bc_headwall=bc_headwall, bc_inside=bc_inside) self.specifyBoundaryConditions(bcc) self._output_module.storeInitial(self) def initializePDE(self): self.eq = (TransientTerm(var=self.variables['h']) == DiffusionTerm(coeff=self.variables['k'], var=self.variables['T']) + self.variables['S'] + self.variables['S_headwall'] + self.variables['S_inside']) def initializeTright(self): extrapol_dist = (self.mesh.mesh.faceCenters[0, self.mesh.mesh.facesRight()][0] -self.mesh.cell_mid_points) self.dxf = CellVariable(mesh=self.mesh.mesh, value=extrapol_dist) self.variables['T_right'] = ( self.variables['T'] + self.variables['T'].grad[0] * self.dxf) def updateGeometry(self): ThawSlump.updateGeometry(self) self.initializeTright() def _integrate( self, time_step, max_time_step=None, residual_threshold=1e-3, max_steps=20): apply_max_time_step = False if time_step is None: time_step = self.timeStep if max_time_step is not None and time_step > max_time_step: time_step = max_time_step apply_max_time_step = True residual = residual_threshold + 1 steps = 0 assert self._timeref == self.forcing_module._timeref self.forcing_module.evaluateToVariable(t=self.time) while residual > residual_threshold: residual = self.eq.sweep(var=self.variables['h'], dt=time_step) steps = steps + 1 if steps >= max_steps: raise RuntimeError('Sweep did not converge') self.time = self.time + time_step self.variables['h'].updateOld() self.updateDiagnostics() return time_step, apply_max_time_step def integrate( self, time_end, time_step=None, residual_threshold=1e-2, max_steps=10, time_start=None, viewer=None): # time_end can also be date if time_start is not None: self.time = time_start self.variables['h'].updateOld() try: interval = time_end - self.time time_end_internal = time_end except: time_end_internal = self._date_to_internal_time(time_end) time_output = self.nextOutput() write_output = False write_output_limit = False time_steps = [] while self.time < time_end_internal: max_time_step = time_end_internal - self.time if time_output is not None and time_output < time_end_internal: max_time_step = time_output - self.time write_output_limit = True time_step_actual, apply_max_time_step = self._integrate( time_step, max_time_step=max_time_step) time_steps.append(time_step_actual) if apply_max_time_step and write_output_limit: write_output = True if viewer is not None: viewer.plot() viewer.axes.set_title(self.date) if write_output: time_output = self.nextOutput() write_output = False write_output_limit = False # actually write output datanew = {'nsteps':len(time_steps), 'mean_time_step':np.mean(time_steps)} self.updateOutput(datanew=datanew) time_steps = [] class SlumpOutput(object): def __init__(self): self.dates = [] self.data = {} self.initial = {} def update(self, date, datanew): records = set(self.data.keys() + datanew.keys()) for record in records: if record in self.data and record in datanew: self.data[record].append(datanew[record]) elif record in self.data: self.data[record].append(None) else: # new record; fill with Nones self.data[record] = [None] * len(self.dates) self.data[record].append(datanew[record]) self.dates.append(date) def storeInitial(self, ts): self.initial['mesh_mid_points'] = ts.mesh.cell_mid_points self.initial['mesh_face_left'] = ts.mesh.face_left_position self.initial['mesh_face_right'] = ts.mesh.face_right_position self.initial['mesh_cell_volumes'] = ts.mesh.cell_volumes self.initial['T_initial'] = np.copy(ts.variables['T'].value) self.initial.update(ts.thermal_properties.output()) def export(self, fn): with open(fn, 'wb') as f: pickle.dump(self.read(), f) def read(self): return (self.dates, self.data, self.initial) # nice way to read pickled SlumpOutput data (read method) class SlumpResults(object): def __init__(self, dates, data, initial, timeref=None): self.dates = dates self.data = data self.initial = initial if timeref is not None: self._timeref = timeref else: self._timeref = self.dates[0] @classmethod def fromFile(cls, fn): dates, data, initial = pickle.load(open(fn, 'rb')) return cls(dates, data, initial) def _date_to_internal_time(self, ds): # ds is list dts = [d - self._timeref for d in ds] dtsec = [dt.days * 24 * 3600 + dt.seconds + dt.microseconds * 1e-6 for dt in dts] return np.array(dtsec) @property def _depths(self): return self.initial['mesh_face_right'] - self.initial['mesh_mid_points'] def readVariable(self, variable_name='T', interp_dates=None, interp_depths=None): vararr = np.array(self.data[variable_name]) if interp_dates is not None: dates_int = self._date_to_internal_time(self.dates) interp_dates_int = self._date_to_internal_time(interp_dates) interpolator_dates = interp1d(dates_int, vararr, axis=0) vararr = interpolator_dates(interp_dates_int) if interp_depths is not None: # check dimensions assert len(vararr.shape) == 2 assert vararr.shape[1] == self.initial['mesh_mid_points'].shape[0] # interpolate interpolator_depths = interp1d(self._depths, vararr, axis=1) vararr = interpolator_depths(interp_depths) return vararr class TimeStep(object): def __init__(self): pass def calculate(self, ts): pass class TimeStepConstant(TimeStep): def __init__(self, step=1.0): self.step = step def calculate(self, ts): return self.step class TimeStepCFL(TimeStep): def __init__(self, safety=0.9): self.safety = safety def calculate(self, ts): K = np.array(ts.variables['K']) K = 0.5 * (K[1::] + K[:-1:]) CFL = np.min(0.5 * (ts.mesh.cell_volumes) ** 2 / np.array((K / ts.variables['C']))) step = self.safety * CFL return step class TimeStepCFLSources(TimeStep): def __init__( self, safety=0.9, relative_enthalpy_change=0.01, slow_time_scale=3600 * 24 * 30): self.safety = safety self.relative_enthalpy_change = relative_enthalpy_change # internal time scale, should be >> process time scale; to avoid / zero self.slow_time_scale = slow_time_scale def calculate(self, ts): K = np.array(ts.variables['k']) # hack, only works in 1D and is insufficient for highly irregular grids K = 0.5 * (K[1::] + K[:-1:]) CFL = np.min(0.5 * (ts.mesh.cell_volumes) ** 2 / np.array((K / ts.variables['c']))) step = self.safety * CFL S_total = np.abs( ts.variables['S'] + ts.variables['S_headwall'] + ts.variables['S_inside']) denom = (np.abs(ts.variables['h']) / self.slow_time_scale + S_total) step_sources = (self.relative_enthalpy_change * np.min(np.abs(np.array(ts.variables['h'] / denom)))) if step_sources < step: step = step_sources return step class OutputStep(object): def __init__(self): pass def next(self, ts): return None class OutputStepHourly(OutputStep): def __init__(self): pass def next(self, ts): d0 = ts.date datenext = (datetime.datetime(d0.year, d0.month, d0.day, d0.hour) + datetime.timedelta(seconds=3600)) return ts._date_to_internal_time(datenext) class Forcing(object): def __init__(self, values_inp, timeref=datetime.datetime(2012, 1, 1), variables=None): if variables is None: self.variables = [vj for vj in values_inp] else: self.variables = variables self._timeref = timeref self.variables = {vj:Variable(value=values_inp[vj]) for vj in self.variables} self.values = {vj: values_inp[vj] for vj in self.variables} def evaluate(self, t=None): return self.values def evaluateToVariable(self, t=None): for vj, ij in self.evaluate(t=t).iteritems(): self.variables[vj].setValue(ij) class ForcingInterpolation(Forcing): def __init__(self, values_inp, t_inp=None, variables=None, key_time='time'): if t_inp is None: t_inp_int = values_inp[key_time] else: t_inp_int = t_inp self.t_inp = t_inp_int self._timeref = t_inp_int[0] t_inp_rel = [tj - self._timeref for tj in self.t_inp] try: self.t_inp_rel = np.array([tj.total_seconds() for tj in t_inp_rel]) except: self.t_inp_rel = np.array(t_inp_rel) if variables is None: self.variables = [vj for vj in values_inp if vj != key_time] else: self.variables = variables self.variables = {vj:Variable(value=values_inp[vj][0]) for vj in self.variables} self.values = {vj: values_inp[vj] for vj in self.variables} def evaluate(self, t=0): try: t_rel = (t - self._timeref).total_seconds() # datetime object except: t_rel = t # slump-internal time vals = {vj:np.interp(t_rel, self.t_inp_rel, self.values[vj]) for vj in self.variables} return vals
[ "fipy.CellVariable", "numpy.abs", "numpy.copy", "fipy.DiffusionTerm", "boundary.BoundaryConditionCollection1D", "numpy.interp", "datetime.datetime", "numpy.mean", "diagnostic.DiagnosticModule", "numpy.array", "fipy.Variable", "datetime.timedelta", "fipy.FaceVariable", "scipy.interpolate.interp1d", "fipy.TransientTerm" ]
[((858, 875), 'fipy.Variable', 'Variable', ([], {'value': '(0)'}), '(value=0)\n', (866, 875), False, 'from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm\n'), ((2770, 2832), 'fipy.CellVariable', 'CellVariable', ([], {'name': 'source_name', 'mesh': 'self.mesh.mesh', 'value': '(0.0)'}), '(name=source_name, mesh=self.mesh.mesh, value=0.0)\n', (2782, 2832), False, 'from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm\n'), ((3326, 3360), 'diagnostic.DiagnosticModule', 'DiagnosticModule', (['funpointer', 'self'], {}), '(funpointer, self)\n', (3342, 3360), False, 'from diagnostic import DiagnosticModule\n'), ((7912, 7985), 'fipy.CellVariable', 'CellVariable', ([], {'name': '"""h"""', 'mesh': 'self.mesh.mesh', 'value': 'h_initial', 'hasOld': '(True)'}), "(name='h', mesh=self.mesh.mesh, value=h_initial, hasOld=True)\n", (7924, 7985), False, 'from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm\n'), ((9095, 9149), 'fipy.CellVariable', 'CellVariable', ([], {'mesh': 'self.mesh.mesh', 'value': 'extrapol_dist'}), '(mesh=self.mesh.mesh, value=extrapol_dist)\n', (9107, 9149), False, 'from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm\n'), ((13068, 13100), 'numpy.copy', 'np.copy', (["ts.variables['T'].value"], {}), "(ts.variables['T'].value)\n", (13075, 13100), True, 'import numpy as np\n'), ((14118, 14133), 'numpy.array', 'np.array', (['dtsec'], {}), '(dtsec)\n', (14126, 14133), True, 'import numpy as np\n'), ((14372, 14406), 'numpy.array', 'np.array', (['self.data[variable_name]'], {}), '(self.data[variable_name])\n', (14380, 14406), True, 'import numpy as np\n'), ((15526, 15553), 'numpy.array', 'np.array', (["ts.variables['K']"], {}), "(ts.variables['K'])\n", (15534, 15553), True, 'import numpy as np\n'), ((16187, 16214), 'numpy.array', 'np.array', (["ts.variables['k']"], {}), "(ts.variables['k'])\n", (16195, 16214), True, 'import numpy as np\n'), ((16482, 16568), 'numpy.abs', 'np.abs', (["(ts.variables['S'] + ts.variables['S_headwall'] + ts.variables['S_inside'])"], {}), "(ts.variables['S'] + ts.variables['S_headwall'] + ts.variables[\n 'S_inside'])\n", (16488, 16568), True, 'import numpy as np\n'), ((17407, 17436), 'datetime.datetime', 'datetime.datetime', (['(2012)', '(1)', '(1)'], {}), '(2012, 1, 1)\n', (17424, 17436), False, 'import datetime\n'), ((2027, 2068), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': 'internal_time'}), '(seconds=internal_time)\n', (2045, 2068), False, 'import datetime\n'), ((3062, 3125), 'fipy.CellVariable', 'CellVariable', ([], {'name': 'variable', 'mesh': 'self.mesh.mesh', 'value': 'default'}), '(name=variable, mesh=self.mesh.mesh, value=default)\n', (3074, 3125), False, 'from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm\n'), ((3199, 3262), 'fipy.FaceVariable', 'FaceVariable', ([], {'name': 'variable', 'mesh': 'self.mesh.mesh', 'value': 'default'}), '(name=variable, mesh=self.mesh.mesh, value=default)\n', (3211, 3262), False, 'from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm\n'), ((6142, 6174), 'numpy.copy', 'np.copy', (['self.variables[v].value'], {}), '(self.variables[v].value)\n', (6149, 6174), True, 'import numpy as np\n'), ((8402, 8477), 'boundary.BoundaryConditionCollection1D', 'BoundaryConditionCollection1D', ([], {'bc_headwall': 'bc_headwall', 'bc_inside': 'bc_inside'}), '(bc_headwall=bc_headwall, bc_inside=bc_inside)\n', (8431, 8477), False, 'from boundary import BoundaryConditionCollection1D\n'), ((8647, 8685), 'fipy.TransientTerm', 'TransientTerm', ([], {'var': "self.variables['h']"}), "(var=self.variables['h'])\n", (8660, 8685), False, 'from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm\n'), ((14618, 14653), 'scipy.interpolate.interp1d', 'interp1d', (['dates_int', 'vararr'], {'axis': '(0)'}), '(dates_int, vararr, axis=0)\n', (14626, 14653), False, 'from scipy.interpolate import interp1d\n'), ((14969, 15007), 'scipy.interpolate.interp1d', 'interp1d', (['self._depths', 'vararr'], {'axis': '(1)'}), '(self._depths, vararr, axis=1)\n', (14977, 15007), False, 'from scipy.interpolate import interp1d\n'), ((17166, 17219), 'datetime.datetime', 'datetime.datetime', (['d0.year', 'd0.month', 'd0.day', 'd0.hour'], {}), '(d0.year, d0.month, d0.day, d0.hour)\n', (17183, 17219), False, 'import datetime\n'), ((17243, 17275), 'datetime.timedelta', 'datetime.timedelta', ([], {'seconds': '(3600)'}), '(seconds=3600)\n', (17261, 17275), False, 'import datetime\n'), ((17660, 17690), 'fipy.Variable', 'Variable', ([], {'value': 'values_inp[vj]'}), '(value=values_inp[vj])\n', (17668, 17690), False, 'from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm\n'), ((18740, 18773), 'fipy.Variable', 'Variable', ([], {'value': 'values_inp[vj][0]'}), '(value=values_inp[vj][0])\n', (18748, 18773), False, 'from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm\n'), ((19082, 19131), 'numpy.interp', 'np.interp', (['t_rel', 'self.t_inp_rel', 'self.values[vj]'], {}), '(t_rel, self.t_inp_rel, self.values[vj])\n', (19091, 19131), True, 'import numpy as np\n'), ((15650, 15681), 'numpy.array', 'np.array', (["(K / ts.variables['C'])"], {}), "(K / ts.variables['C'])\n", (15658, 15681), True, 'import numpy as np\n'), ((16394, 16425), 'numpy.array', 'np.array', (["(K / ts.variables['c'])"], {}), "(K / ts.variables['c'])\n", (16402, 16425), True, 'import numpy as np\n'), ((16596, 16621), 'numpy.abs', 'np.abs', (["ts.variables['h']"], {}), "(ts.variables['h'])\n", (16602, 16621), True, 'import numpy as np\n'), ((18530, 18549), 'numpy.array', 'np.array', (['t_inp_rel'], {}), '(t_inp_rel)\n', (18538, 18549), True, 'import numpy as np\n'), ((11913, 11932), 'numpy.mean', 'np.mean', (['time_steps'], {}), '(time_steps)\n', (11920, 11932), True, 'import numpy as np\n'), ((16752, 16787), 'numpy.array', 'np.array', (["(ts.variables['h'] / denom)"], {}), "(ts.variables['h'] / denom)\n", (16760, 16787), True, 'import numpy as np\n'), ((8709, 8774), 'fipy.DiffusionTerm', 'DiffusionTerm', ([], {'coeff': "self.variables['k']", 'var': "self.variables['T']"}), "(coeff=self.variables['k'], var=self.variables['T'])\n", (8722, 8774), False, 'from fipy import Variable, FaceVariable, CellVariable, TransientTerm, DiffusionTerm\n')]
# # Copyright (c) 2022 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # -*- coding: utf-8 -*- """ The functions in this file interact with the VPP API to retrieve certain interface metadata. Its base class will never change state. See the derived classes VPPApiDumper() and VPPApiApplier() """ import os import fnmatch import logging import socket from vpp_papi import VPPApiClient class VPPApi: """The VPPApi class is a base class that abstracts the vpp_papi.""" def __init__( self, vpp_api_socket="/run/vpp/api.sock", vpp_json_dir="/usr/share/vpp/api/", clientname="vppcfg", ): self.logger = logging.getLogger("vppcfg.vppapi") self.logger.addHandler(logging.NullHandler()) if not os.path.exists(vpp_api_socket): self.logger.error(f"VPP api socket file not found: {vpp_api_socket}") if not os.path.isdir(vpp_json_dir): self.logger.error(f"VPP api json directory not found: {vpp_json_dir}") self.vpp_api_socket = vpp_api_socket self.vpp_json_dir = vpp_json_dir self.connected = False self.clientname = clientname self.vpp = None self.cache = self.cache_clear() self.cache_read = False self.lcp_enabled = False def connect(self): """Connect to the VPP Dataplane, if we're not already connected""" if self.connected: return True # construct a list of all the json api files jsonfiles = [] for root, _dirnames, filenames in os.walk(self.vpp_json_dir): for filename in fnmatch.filter(filenames, "*.api.json"): jsonfiles.append(os.path.join(root, filename)) if not jsonfiles: self.logger.error("no json api files found") return False self.vpp = VPPApiClient(apifiles=jsonfiles, server_address=self.vpp_api_socket) try: self.logger.debug("Connecting to VPP") self.vpp.connect(self.clientname) except: return False # pylint: disable=no-member api_response = self.vpp.api.show_version() self.logger.info(f"VPP version is {api_response.version}") self.connected = True return True def disconnect(self): """Disconnect from the VPP dataplane, if we are still connected.""" if not self.connected: return True self.vpp.disconnect() self.logger.debug("Disconnected from VPP") self.connected = False return True def cache_clear(self): """Remove the cached VPP configuration elements and return an empty dictionary""" self.cache_read = False return { "lcps": {}, "interface_names": {}, "interfaces": {}, "interface_addresses": {}, "bondethernets": {}, "bondethernet_members": {}, "bridgedomains": {}, "vxlan_tunnels": {}, "l2xcs": {}, "taps": {}, } def cache_remove_lcp(self, lcpname): """Removes the LCP and TAP interface, identified by lcpname, from the VPP config cache""" for _idx, lcp in self.cache["lcps"].items(): if lcp.host_if_name == lcpname: ifname = self.cache["interfaces"][lcp.host_sw_if_index].interface_name del self.cache["lcps"][lcp.phy_sw_if_index] return self.cache_remove_interface(ifname) self.logger.warning( f"Trying to remove an LCP which is not in the config: {lcpname}" ) return False def cache_remove_bondethernet_member(self, ifname): """Removes the bonderthernet member interface, identified by name, from the VPP config cache""" if not ifname in self.cache["interface_names"]: self.logger.warning( f"Trying to remove a bondethernet member interface which is not in the config: {ifname}" ) return False iface = self.cache["interface_names"][ifname] for bond_idx, members in self.cache["bondethernet_members"].items(): if iface.sw_if_index in members: self.cache["bondethernet_members"][bond_idx].remove(iface.sw_if_index) return True def cache_remove_l2xc(self, ifname): """Remvoes the l2xc from the VPP config cache""" if not ifname in self.cache["interface_names"]: self.logger.warning( f"Trying to remove an L2XC which is not in the config: {ifname}" ) return False iface = self.cache["interface_names"][ifname] self.cache["l2xcs"].pop(iface.sw_if_index, None) return True def cache_remove_vxlan_tunnel(self, ifname): """Removes a vxlan_tunnel from the VPP config cache""" if not ifname in self.cache["interface_names"]: self.logger.warning( f"Trying to remove a VXLAN Tunnel which is not in the config: {ifname}" ) return False iface = self.cache["interface_names"][ifname] self.cache["vxlan_tunnels"].pop(iface.sw_if_index, None) return True def cache_remove_interface(self, ifname): """Removes the interface, identified by name, from the VPP config cache""" if not ifname in self.cache["interface_names"]: self.logger.warning( f"Trying to remove an interface which is not in the config: {ifname}" ) return False iface = self.cache["interface_names"][ifname] del self.cache["interfaces"][iface.sw_if_index] if len(self.cache["interface_addresses"][iface.sw_if_index]) > 0: self.logger.warning(f"Not all addresses were removed on {ifname}") del self.cache["interface_addresses"][iface.sw_if_index] del self.cache["interface_names"][ifname] ## Use my_dict.pop('key', None), as it allows 'key' to be absent if iface.sw_if_index in self.cache["bondethernet_members"]: if len(self.cache["bondethernet_members"][iface.sw_if_index]) != 0: self.logger.warning( f"When removing BondEthernet {ifname}, its members are not empty: {self.cache['bondethernet_members'][iface.sw_if_index]}" ) else: del self.cache["bondethernet_members"][iface.sw_if_index] self.cache["bondethernets"].pop(iface.sw_if_index, None) self.cache["taps"].pop(iface.sw_if_index, None) return True def readconfig(self): """Read the configuration out of a running VPP Dataplane and put it into a VPP config cache""" # pylint: disable=no-member if not self.connected and not self.connect(): self.logger.error("Could not connect to VPP") return False self.cache_read = False ## Workaround LCPng and linux-cp, in order. self.lcp_enabled = False try: self.logger.debug("Retrieving LCPs") api_response = self.vpp.api.lcp_itf_pair_get() if isinstance(api_response, tuple) and api_response[0].retval == 0: for lcp in api_response[1]: if lcp.phy_sw_if_index > 65535 or lcp.host_sw_if_index > 65535: ## Work around endianness bug: https://gerrit.fd.io/r/c/vpp/+/35479 ## TODO(pim) - remove this when 22.06 ships lcp = lcp._replace( phy_sw_if_index=socket.ntohl(lcp.phy_sw_if_index) ) lcp = lcp._replace( host_sw_if_index=socket.ntohl(lcp.host_sw_if_index) ) lcp = lcp._replace(vif_index=socket.ntohl(lcp.vif_index)) self.logger.warning( f"LCP workaround for endianness issue on {lcp.host_if_name}" ) self.cache["lcps"][lcp.phy_sw_if_index] = lcp self.lcp_enabled = True except: self.logger.warning( "linux-cp not found, will not reconcile Linux Control Plane" ) self.logger.debug("Retrieving interfaces") api_response = self.vpp.api.sw_interface_dump() for iface in api_response: self.cache["interfaces"][iface.sw_if_index] = iface self.cache["interface_names"][iface.interface_name] = iface self.cache["interface_addresses"][iface.sw_if_index] = [] self.logger.debug(f"Retrieving IPv4 addresses for {iface.interface_name}") ipr = self.vpp.api.ip_address_dump( sw_if_index=iface.sw_if_index, is_ipv6=False ) for addr in ipr: self.cache["interface_addresses"][iface.sw_if_index].append( str(addr.prefix) ) self.logger.debug(f"Retrieving IPv6 addresses for {iface.interface_name}") ipr = self.vpp.api.ip_address_dump( sw_if_index=iface.sw_if_index, is_ipv6=True ) for addr in ipr: self.cache["interface_addresses"][iface.sw_if_index].append( str(addr.prefix) ) self.logger.debug("Retrieving bondethernets") api_response = self.vpp.api.sw_bond_interface_dump() for iface in api_response: self.cache["bondethernets"][iface.sw_if_index] = iface self.cache["bondethernet_members"][iface.sw_if_index] = [] for member in self.vpp.api.sw_member_interface_dump( sw_if_index=iface.sw_if_index ): self.cache["bondethernet_members"][iface.sw_if_index].append( member.sw_if_index ) self.logger.debug("Retrieving bridgedomains") api_response = self.vpp.api.bridge_domain_dump() for bridge in api_response: self.cache["bridgedomains"][bridge.bd_id] = bridge self.logger.debug("Retrieving vxlan_tunnels") api_response = self.vpp.api.vxlan_tunnel_v2_dump() for vxlan in api_response: self.cache["vxlan_tunnels"][vxlan.sw_if_index] = vxlan self.logger.debug("Retrieving L2 Cross Connects") api_response = self.vpp.api.l2_xconnect_dump() for l2xc in api_response: self.cache["l2xcs"][l2xc.rx_sw_if_index] = l2xc self.logger.debug("Retrieving TAPs") api_response = self.vpp.api.sw_interface_tap_v2_dump() for tap in api_response: self.cache["taps"][tap.sw_if_index] = tap self.cache_read = True return self.cache_read def phys_exist(self, ifname_list): """Return True if all interfaces in the `ifname_list` exist as physical interface names in VPP. Return False otherwise.""" ret = True for ifname in ifname_list: if not ifname in self.cache["interface_names"]: self.logger.warning(f"Interface {ifname} does not exist in VPP") ret = False return ret def get_sub_interfaces(self): """Return all interfaces which have a sub-id and one or more tags""" subints = [ self.cache["interfaces"][x].interface_name for x in self.cache["interfaces"] if self.cache["interfaces"][x].sub_id > 0 and self.cache["interfaces"][x].sub_number_of_tags > 0 ] return subints def get_qinx_interfaces(self): """Return all interfaces which have a sub-id and a non-zero inner vlan tag""" qinx_subints = [ self.cache["interfaces"][x].interface_name for x in self.cache["interfaces"] if self.cache["interfaces"][x].sub_id > 0 and self.cache["interfaces"][x].sub_inner_vlan_id > 0 ] return qinx_subints def get_dot1x_interfaces(self): """Return all interfaces which have only an outer vlan tag (dot1q/dot1ad)""" dot1x_subints = [ self.cache["interfaces"][x].interface_name for x in self.cache["interfaces"] if self.cache["interfaces"][x].sub_id > 0 and self.cache["interfaces"][x].sub_inner_vlan_id == 0 ] return dot1x_subints def get_loopbacks(self): """Return all interfaces of VPP type 'Loopback'""" loopbacks = [ self.cache["interfaces"][x].interface_name for x in self.cache["interfaces"] if self.cache["interfaces"][x].interface_dev_type == "Loopback" ] return loopbacks def get_phys(self): """Return all interfaces for which the super interface has the same sw_if_index and aren't known to be virtual interfaces""" phys = [ self.cache["interfaces"][x].interface_name for x in self.cache["interfaces"] if self.cache["interfaces"][x].sw_if_index == self.cache["interfaces"][x].sup_sw_if_index and self.cache["interfaces"][x].interface_dev_type not in ["virtio", "BVI", "Loopback", "VXLAN", "local", "bond"] ] return phys def get_bondethernets(self): """Return all bondethernet interfaces""" bonds = [ self.cache["bondethernets"][x].interface_name for x in self.cache["bondethernets"] ] return bonds def get_vxlan_tunnels(self): """Return all vxlan_tunnel interfaces""" vxlan_tunnels = [ self.cache["interfaces"][x].interface_name for x in self.cache["interfaces"] if self.cache["interfaces"][x].interface_dev_type in ["VXLAN"] ] return vxlan_tunnels def get_lcp_by_interface(self, sw_if_index): """Return the LCP config cache for the interface given by sw_if_index""" for _idx, lcp in self.cache["lcps"].items(): if lcp.phy_sw_if_index == sw_if_index: return lcp return None def tap_is_lcp(self, tap_ifname): """Returns True if the given tap_ifname is a TAP interface belonging to an LCP, or False otherwise.""" if not tap_ifname in self.cache["interface_names"]: return False vpp_iface = self.cache["interface_names"][tap_ifname] if not vpp_iface.interface_dev_type == "virtio": return False for _idx, lcp in self.cache["lcps"].items(): if vpp_iface.sw_if_index == lcp.host_sw_if_index: return True return False
[ "fnmatch.filter", "os.path.isdir", "os.walk", "os.path.exists", "logging.NullHandler", "vpp_papi.VPPApiClient", "os.path.join", "socket.ntohl", "logging.getLogger" ]
[((1155, 1189), 'logging.getLogger', 'logging.getLogger', (['"""vppcfg.vppapi"""'], {}), "('vppcfg.vppapi')\n", (1172, 1189), False, 'import logging\n'), ((2054, 2080), 'os.walk', 'os.walk', (['self.vpp_json_dir'], {}), '(self.vpp_json_dir)\n', (2061, 2080), False, 'import os\n'), ((2343, 2411), 'vpp_papi.VPPApiClient', 'VPPApiClient', ([], {'apifiles': 'jsonfiles', 'server_address': 'self.vpp_api_socket'}), '(apifiles=jsonfiles, server_address=self.vpp_api_socket)\n', (2355, 2411), False, 'from vpp_papi import VPPApiClient\n'), ((1221, 1242), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (1240, 1242), False, 'import logging\n'), ((1260, 1290), 'os.path.exists', 'os.path.exists', (['vpp_api_socket'], {}), '(vpp_api_socket)\n', (1274, 1290), False, 'import os\n'), ((1389, 1416), 'os.path.isdir', 'os.path.isdir', (['vpp_json_dir'], {}), '(vpp_json_dir)\n', (1402, 1416), False, 'import os\n'), ((2110, 2149), 'fnmatch.filter', 'fnmatch.filter', (['filenames', '"""*.api.json"""'], {}), "(filenames, '*.api.json')\n", (2124, 2149), False, 'import fnmatch\n'), ((2184, 2212), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (2196, 2212), False, 'import os\n'), ((8102, 8135), 'socket.ntohl', 'socket.ntohl', (['lcp.phy_sw_if_index'], {}), '(lcp.phy_sw_if_index)\n', (8114, 8135), False, 'import socket\n'), ((8251, 8285), 'socket.ntohl', 'socket.ntohl', (['lcp.host_sw_if_index'], {}), '(lcp.host_sw_if_index)\n', (8263, 8285), False, 'import socket\n'), ((8365, 8392), 'socket.ntohl', 'socket.ntohl', (['lcp.vif_index'], {}), '(lcp.vif_index)\n', (8377, 8392), False, 'import socket\n')]
#!/usr/bin/python3.6 import datetime import time import requests from twilio.rest import Client from config import settings class EventbriteWatcher(object): """ This class is used to watch Eventbrite for new events at a specified organization. """ def __init__(self, watch_time, organizer_id, keyword): """ Args: __watch_time__: Amount of time to watch for the event. __organizer_id__: The ID of the organization to watch. __keyword__: Keyword to alert on. """ self.watch_time = watch_time self.organizer_id = organizer_id self.keyword = keyword def build_headers(self, oauth_token): token = "Bearer {}".format(oauth_token) return {'Authorization': token} def build_payload(self, organizer_id): return {'organizer.id': organizer_id} def check_events(self, events, keyword): results = [] for event in events: event_name = event['name']['text'] event_url = event.get('url', None) if keyword.lower() in event_name.lower(): results.append(event_url) return results def fetch_events(self): response = requests.get( settings.BASE_URL, headers=self.build_headers(settings.OAUTH_TOKEN), params=self.build_payload(self.organizer_id), ) response_status_code = response.status_code if response_status_code != 200: self.send_sms( 'Bad response status code: {}'.format(response_status_code) ) return None r = response.json() return r.get('events', None) def get_end_time(self, watch_time): return datetime.datetime.now() + \ datetime.timedelta(minutes=watch_time) def send_sms(self, message): twilio = settings.TWILIO client = Client( twilio.get('account_sid', None), twilio.get('auth_token', None), ) response = client.messages.create( to=twilio.get('to_number', None), from_=twilio.get('from_number', None), body=message, ) return response def start_watching(self): self.send_sms('Starting to watch...') results_found = 0 no_events_found = 0 end_time = self.get_end_time(self.watch_time) while results_found < 3 and datetime.datetime.now() < end_time: events = self.fetch_events() if not events: no_events_found += 1 if no_events_found > 24: self.send_sms('No events found in past 2 hours...') no_events_found = 0 else: results = self.check_events(events, self.keyword) if results: results_found += 1 for result in results: self.send_sms(result) time.sleep(300)
[ "datetime.datetime.now", "datetime.timedelta", "time.sleep" ]
[((1749, 1772), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1770, 1772), False, 'import datetime\n'), ((1789, 1827), 'datetime.timedelta', 'datetime.timedelta', ([], {'minutes': 'watch_time'}), '(minutes=watch_time)\n', (1807, 1827), False, 'import datetime\n'), ((2986, 3001), 'time.sleep', 'time.sleep', (['(300)'], {}), '(300)\n', (2996, 3001), False, 'import time\n'), ((2440, 2463), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2461, 2463), False, 'import datetime\n')]
from rubrix import _client_instance as client from rubrix.metrics import helpers from rubrix.metrics.models import MetricSummary def f1(name: str) -> MetricSummary: """Computes the single label f1 metric for a dataset Args: name: The dataset name. Returns: The f1 metric summary Examples: >>> from rubrix.metrics.text_classification import f1 >>> summary = f1(name="example-dataset") >>> summary.visualize() # will plot a bar chart with results >>> summary.data # returns the raw result data """ current_client = client() metric = current_client.calculate_metric(name, metric="F1") return MetricSummary.new_summary( data=metric.results, visualization=lambda: helpers.bar( metric.results, title=metric.description, ), ) def f1_multilabel(name: str) -> MetricSummary: """Computes the multi-label label f1 metric for a dataset Args: name: The dataset name. Returns: The f1 metric summary Examples: >>> from rubrix.metrics.text_classification import f1_multilabel >>> summary = f1_multilabel(name="example-dataset") >>> summary.visualize() # will plot a bar chart with results >>> summary.data # returns the raw result data """ current_client = client() metric = current_client.calculate_metric(name, metric="MultiLabelF1") return MetricSummary.new_summary( data=metric.results, visualization=lambda: helpers.bar( { "micro": metric.results["micro"], "macro": metric.results["macro"], **metric.results["per_label"], } if metric.results else metric.results, title=metric.description, ), )
[ "rubrix._client_instance", "rubrix.metrics.helpers.bar" ]
[((602, 610), 'rubrix._client_instance', 'client', ([], {}), '()\n', (608, 610), True, 'from rubrix import _client_instance as client\n'), ((1380, 1388), 'rubrix._client_instance', 'client', ([], {}), '()\n', (1386, 1388), True, 'from rubrix import _client_instance as client\n'), ((773, 826), 'rubrix.metrics.helpers.bar', 'helpers.bar', (['metric.results'], {'title': 'metric.description'}), '(metric.results, title=metric.description)\n', (784, 826), False, 'from rubrix.metrics import helpers\n'), ((1561, 1747), 'rubrix.metrics.helpers.bar', 'helpers.bar', (["({'micro': metric.results['micro'], 'macro': metric.results['macro'], **\n metric.results['per_label']} if metric.results else metric.results)"], {'title': 'metric.description'}), "({'micro': metric.results['micro'], 'macro': metric.results[\n 'macro'], **metric.results['per_label']} if metric.results else metric.\n results, title=metric.description)\n", (1572, 1747), False, 'from rubrix.metrics import helpers\n')]
import FWCore.ParameterSet.Config as cms from Configuration.Eras.Modifier_run2_miniAOD_80XLegacy_cff import run2_miniAOD_80XLegacy from Configuration.Eras.Modifier_run2_nanoAOD_94X2016_cff import run2_nanoAOD_94X2016 from PhysicsTools.NanoAOD.common_cff import Var, P4Vars from PhysicsTools.NanoAOD.jets_cff import jetTable from PhysicsTools.PatAlgos.tools.jetCollectionTools import GenJetAdder, RecoJetAdder import copy # # By default, these collections are saved in NanoAODs: # - ak4gen (GenJet in NanoAOD) # - ak8gen (GenJetAK8 in NanoAOD) # Below is a list of genjets that we can save in NanoAOD. Set # "enabled" to true if you want to store the jet collection config_genjets = [ { "jet" : "ak5gen", "enabled" : False, }, { "jet" : "ak6gen", "enabled" : False, }, { "jet" : "ak7gen", "enabled" : False, }, { "jet" : "ak9gen", "enabled" : False, }, { "jet" : "ak10gen", "enabled" : False, }, ] config_genjets = list(filter(lambda k: k['enabled'], config_genjets)) # # GenJets info in NanoAOD # nanoInfo_genjets = { "ak5gen" : { "name" : "GenJetAK5", "doc" : "AK5 jets", }, "ak6gen" : { "name" : "GenJetAK6", "doc" : "AK6 jets", }, "ak7gen" : { "name" : "GenJetAK7", "doc" : "AK9 jets", }, "ak9gen" : { "name" : "GenJetAK9", "doc" : "AK9 jets", }, "ak10gen" : { "name" : "GenJetAK10", "doc" : "AK10 jets", }, } # # By default, these collections are saved in NanoAODs: # - ak4pfchs (Jet in NanoAOD) # - ak8pfpuppi (FatJet in NanoAOD) # By default, the ak4pfchs (Jet) and ak8pfpuppi (FatJet) collections # are saved in NanoAODs. # Below is a list of recojets that we can save in NanoAOD. Set "enabled" # to true if you want to store the recojet collection. # config_recojets = [ { "jet" : "ak4pfpuppi", "enabled" : True, "inputCollection" : "slimmedJetsPuppi", #Exist in MiniAOD "genJetsCollection": "slimmedGenJets", }, { "jet" : "ak4calo", "enabled" : True, "inputCollection" : "slimmedCaloJets", #Exist in MiniAOD "genJetsCollection": "slimmedGenJets", }, { "jet" : "ak4pf", "enabled" : True, "inputCollection" : "", "genJetsCollection": "slimmedGenJets", }, { "jet" : "ak8pf", "enabled" : True, "inputCollection" : "", "genJetsCollection": "slimmedGenJetsAK8", }, { "jet" : "ak8pfchs", "enabled" : True, "inputCollection" : "", "genJetsCollection": "slimmedGenJetsAK8", }, { "jet" : "ak6pf", "enabled" : False, "inputCollection" : "", "genJetsCollection": "AK6GenJetsNoNu", }, { "jet" : "ak10pf", "enabled" : False, "inputCollection" : "", "genJetsCollection": "AK10GenJetsNoNu", }, ] config_recojets = list(filter(lambda k: k['enabled'], config_recojets)) # # RecoJets info in NanoAOD # nanoInfo_recojets = { "ak4pfpuppi" : { "name" : "JetPUPPI", "doc" : "AK4PFPUPPI jets", }, "ak4calo" : { "name": "JetCalo", "doc" : "AK4Calo jets", }, "ak4pf" : { "name": "JetPF", "doc" : "AK4PF jets", }, "ak8pf" : { "name": "FatJetPF", "doc" : "AK8PF jets", }, "ak8pfchs" : { "name" : "FatJetCHS", "doc" : "AK8PFCHS jets", }, "ak6pf" : { "name": "JetAK6PF", "doc" : "AK6PF jets", }, "ak10pf" : { "name" : "FatJetAK10PF", "doc" : "AK10PF jets", }, } # # The reco jet names already exists # in NanoAOD. # recojetNameInNano = [ "Jet", "FatJet" ] # # The gen jet names already exists # in NanoAOD. # genjetNameInNano = [ "GenJet", "GenJetAK8" ] JETVARS = cms.PSet(P4Vars, HFHEF = Var("HFHadronEnergyFraction()", float, doc = "energy fraction in forward hadronic calorimeter", precision = 6), HFEMEF = Var("HFEMEnergyFraction()", float, doc = "energy fraction in forward EM calorimeter", precision = 6), area = jetTable.variables.area, chHEF = jetTable.variables.chHEF, neHEF = jetTable.variables.neHEF, chEmEF = jetTable.variables.chEmEF, neEmEF = jetTable.variables.neEmEF, muEF = jetTable.variables.muEF, rawFactor = jetTable.variables.rawFactor, jetId = jetTable.variables.jetId, jercCHPUF = jetTable.variables.jercCHPUF, jercCHF = jetTable.variables.jercCHF, ) for modifier in run2_miniAOD_80XLegacy, run2_nanoAOD_94X2016: modifier.toModify(JETVARS, jetId = Var("userInt('tightId')*2+userInt('looseId')", int, doc = "Jet ID flags bit1 is loose, bit2 is tight") ) #============================================ # # TableGenJetAdder # #============================================ class TableGenJetAdder(object): """ Tool to store gen jet variables in NanoAOD for customized gen jet collections. """ def __init__(self): self.main = [] def getSequence(self, proc): """ Tool to add """ tasks = self.main resultSequence = cms.Sequence() for idx, task in enumerate(tasks): if idx == 0: resultSequence = cms.Sequence(getattr(proc, task)) else: resultSequence.insert(idx, getattr(proc, task)) return resultSequence def addTable(self, proc, genJetInfo): currentTasks = [] print("custom_jme_cff::TableGenJetAdder::addTable: Adding Table for GenJet Collection: {}".format(genJetInfo.jet)) name = nanoInfo_genjets[genJetInfo.jet]["name"] doc = nanoInfo_genjets[genJetInfo.jet]["doc"] if name in genjetNameInNano: raise RuntimeError('GenJet collection name (%s) taken in NanoAOD for %s' %(name, genJetInfo.jet)) # # GenJet Table # table = "{}Table".format(genJetInfo.jetTagName) genJetsCollection = "{}{}{}".format(genJetInfo.jetAlgo.upper(), genJetInfo.jetSize, 'GenJetsNoNu') setattr(proc, table, cms.EDProducer("SimpleCandidateFlatTableProducer", src = cms.InputTag(genJetsCollection), cut = cms.string(""), name = cms.string(name), doc = cms.string('{} (generator level)'.format(doc)), singleton = cms.bool(False), extension = cms.bool(False), variables = cms.PSet(P4Vars, area = jetTable.variables.area, ), ) ) currentTasks.append(table) # # GenJet Flavour Table # genFlavour = "{}Flavour".format(genJetInfo.jetTagName) genFlavourTable = "{}Table".format(genFlavour) if genFlavourTable in self.main: raise ValueError("Step '%s' already implemented" % genFlavourTable) setattr(proc, genFlavourTable, cms.EDProducer("GenJetFlavourTableProducer", name = cms.string(name), src = cms.InputTag(genJetsCollection), cut = cms.string(""), deltaR = cms.double(0.1), jetFlavourInfos = cms.InputTag(genFlavour), ) ) currentTasks.append(genFlavourTable) self.main.extend(currentTasks) #============================================ # # TableRecoJetAdder # #============================================ class TableRecoJetAdder(object): """ Tool to store reco jet variables in NanoAOD for customized reco jet collections. """ def __init__(self): self.main = [] def getSequence(self, proc): tasks = self.main resultSequence = cms.Sequence() for idx, task in enumerate(tasks): if idx == 0: resultSequence = cms.Sequence(getattr(proc, task)) else: resultSequence.insert(idx, getattr(proc, task)) return resultSequence def addTable(self, proc, recoJetInfo): currentTasks = [] print("custom_jme_cff::TableRecoJetAdder::addTable: Adding Table for Reco Jet Collection: {}".format(recoJetInfo.jet)) name = nanoInfo_recojets[recoJetInfo.jet]["name"] doc = nanoInfo_recojets[recoJetInfo.jet]["doc"] if name in recojetNameInNano: raise RuntimeError('RecoJet collection name (%s) taken in NanoAOD for %s' %(name, recoJetInfo.jet)) table = "{}Table".format(recoJetInfo.jetTagName) if recoJetInfo.skipUserData: if recoJetInfo.doCalo: tableContents = cms.PSet( P4Vars, area = jetTable.variables.area, rawFactor = jetTable.variables.rawFactor, emf = Var("emEnergyFraction()", float, doc = "electromagnetic energy fraction", precision = 10), ) else: tableContents = cms.PSet( P4Vars, area = jetTable.variables.area, rawFactor = jetTable.variables.rawFactor, ) else: tableContents = JETVARS.clone() updatedJets = "updatedJets{}".format(recoJetInfo.jetTagName) setattr(proc, table, cms.EDProducer("SimpleCandidateFlatTableProducer", src = cms.InputTag(updatedJets), cut = cms.string(""), name = cms.string(name), doc = cms.string(doc), singleton = cms.bool(False), extension = cms.bool(False), variables = tableContents, ) ) currentTasks.append(table) tightJetIdLepVeto = "tightJetIdLepVeto{}".format(recoJetInfo.jetTagName) if not recoJetInfo.skipUserData: altTasks = copy.deepcopy(currentTasks) for idx, task in enumerate(altTasks): if task == tightJetIdLepVeto: altTasks[idx] = looseJetId for modifier in run2_miniAOD_80XLegacy, run2_nanoAOD_94X2016: modifier.toReplaceWith(currentTasks, altTasks) self.main.extend(currentTasks) def PrepJMECustomNanoAOD(process): # # Additional variables to AK4GenJets # process.genJetTable.variables.area = JETVARS.area # # Additional variables to AK8GenJets # process.genJetAK8Table.variables.area = JETVARS.area # # Additional variables for AK4PFCHS # process.jetTable.variables.HFHEF = JETVARS.HFHEF process.jetTable.variables.HFEMEF = JETVARS.HFEMEF # # Additional variables to AK8PFPUPPI # # These variables are not stored for AK8PFCHS (slimmedJetsAK8) # in MiniAOD if their pt < 170 GeV. Hence the conditional fill. # process.fatJetTable.variables.chHEF = Var("?isPFJet()?chargedHadronEnergyFraction():-1", float, doc="charged Hadron Energy Fraction", precision = 6) process.fatJetTable.variables.neHEF = Var("?isPFJet()?neutralHadronEnergyFraction():-1", float, doc="neutral Hadron Energy Fraction", precision = 6) process.fatJetTable.variables.chEmEF = Var("?isPFJet()?chargedEmEnergyFraction():-1", float, doc="charged Electromagnetic Energy Fraction", precision = 6) process.fatJetTable.variables.neEmEF = Var("?isPFJet()?neutralEmEnergyFraction():-1", float, doc="neutral Electromagnetic Energy Fraction", precision = 6) process.fatJetTable.variables.muEF = Var("?isPFJet()?muonEnergyFraction():-1", float, doc="muon Energy Fraction", precision = 6) process.fatJetTable.variables.HFHEF = Var("?isPFJet()?HFHadronEnergyFraction():-1", float, doc="energy fraction in forward hadronic calorimeter", precision = 6) process.fatJetTable.variables.HFEMEF = Var("?isPFJet()?HFEMEnergyFraction():-1", float, doc="energy fraction in forward EM calorimeter", precision = 6) # # # process.jercVarsFatJet = process.jercVars.clone( srcJet = "updatedJetsAK8", maxDR = 0.8, ) process.jetSequence.insert(process.jetSequence.index(process.updatedJetsAK8WithUserData), process.jercVarsFatJet) process.updatedJetsAK8WithUserData.userFloats.jercCHPUF = cms.InputTag( "%s:chargedHadronPUEnergyFraction" % process.jercVarsFatJet.label() ) process.updatedJetsAK8WithUserData.userFloats.jercCHF = cms.InputTag( "%s:chargedHadronCHSEnergyFraction" % process.jercVarsFatJet.label() ) process.fatJetTable.variables.jercCHPUF = JETVARS.jercCHPUF process.fatJetTable.variables.jercCHF = JETVARS.jercCHF # # Remove any pT cuts. # process.finalJets.cut = "" # 15 -> 10 process.finalJetsAK8.cut = "" # 170 -> 170 process.genJetTable.cut = "" # 10 -> 8 process.genJetFlavourTable.cut = "" # 10 -> 8 process.genJetAK8Table.cut = "" # 100 -> 80 process.genJetAK8FlavourTable.cut = "" # 100 -> 80 ###################################################################################################################### # # Add GenJets to NanoAOD # genJA = GenJetAdder() tableGenJA = TableGenJetAdder() for jetConfig in config_genjets: cfg = { k : v for k, v in jetConfig.items() if k != "enabled" } genJetInfo = genJA.addGenJetCollection(process, **cfg) tableGenJA.addTable(process, genJetInfo) process.nanoSequenceMC += genJA.getSequence(process) process.nanoSequenceMC += tableGenJA.getSequence(process) # # Add RecoJets to NanoAOD # recoJA = RecoJetAdder() tableRecoJA = TableRecoJetAdder() for jetConfig in config_recojets: cfg = { k : v for k, v in jetConfig.items() if k != "enabled" } recoJetInfo = recoJA.addRecoJetCollection(process, **cfg) tableRecoJA.addTable(process, recoJetInfo) process.nanoSequenceMC += recoJA.getSequence(process) process.nanoSequenceMC += tableRecoJA.getSequence(process)
[ "FWCore.ParameterSet.Config.string", "copy.deepcopy", "FWCore.ParameterSet.Config.bool", "FWCore.ParameterSet.Config.PSet", "FWCore.ParameterSet.Config.double", "PhysicsTools.PatAlgos.tools.jetCollectionTools.RecoJetAdder", "PhysicsTools.PatAlgos.tools.jetCollectionTools.GenJetAdder", "PhysicsTools.NanoAOD.common_cff.Var", "FWCore.ParameterSet.Config.Sequence", "FWCore.ParameterSet.Config.InputTag" ]
[((10283, 10396), 'PhysicsTools.NanoAOD.common_cff.Var', 'Var', (['"""?isPFJet()?chargedHadronEnergyFraction():-1"""', 'float'], {'doc': '"""charged Hadron Energy Fraction"""', 'precision': '(6)'}), "('?isPFJet()?chargedHadronEnergyFraction():-1', float, doc=\n 'charged Hadron Energy Fraction', precision=6)\n", (10286, 10396), False, 'from PhysicsTools.NanoAOD.common_cff import Var, P4Vars\n'), ((10452, 10565), 'PhysicsTools.NanoAOD.common_cff.Var', 'Var', (['"""?isPFJet()?neutralHadronEnergyFraction():-1"""', 'float'], {'doc': '"""neutral Hadron Energy Fraction"""', 'precision': '(6)'}), "('?isPFJet()?neutralHadronEnergyFraction():-1', float, doc=\n 'neutral Hadron Energy Fraction', precision=6)\n", (10455, 10565), False, 'from PhysicsTools.NanoAOD.common_cff import Var, P4Vars\n'), ((10621, 10739), 'PhysicsTools.NanoAOD.common_cff.Var', 'Var', (['"""?isPFJet()?chargedEmEnergyFraction():-1"""', 'float'], {'doc': '"""charged Electromagnetic Energy Fraction"""', 'precision': '(6)'}), "('?isPFJet()?chargedEmEnergyFraction():-1', float, doc=\n 'charged Electromagnetic Energy Fraction', precision=6)\n", (10624, 10739), False, 'from PhysicsTools.NanoAOD.common_cff import Var, P4Vars\n'), ((10790, 10908), 'PhysicsTools.NanoAOD.common_cff.Var', 'Var', (['"""?isPFJet()?neutralEmEnergyFraction():-1"""', 'float'], {'doc': '"""neutral Electromagnetic Energy Fraction"""', 'precision': '(6)'}), "('?isPFJet()?neutralEmEnergyFraction():-1', float, doc=\n 'neutral Electromagnetic Energy Fraction', precision=6)\n", (10793, 10908), False, 'from PhysicsTools.NanoAOD.common_cff import Var, P4Vars\n'), ((10959, 11052), 'PhysicsTools.NanoAOD.common_cff.Var', 'Var', (['"""?isPFJet()?muonEnergyFraction():-1"""', 'float'], {'doc': '"""muon Energy Fraction"""', 'precision': '(6)'}), "('?isPFJet()?muonEnergyFraction():-1', float, doc='muon Energy Fraction',\n precision=6)\n", (10962, 11052), False, 'from PhysicsTools.NanoAOD.common_cff import Var, P4Vars\n'), ((11128, 11253), 'PhysicsTools.NanoAOD.common_cff.Var', 'Var', (['"""?isPFJet()?HFHadronEnergyFraction():-1"""', 'float'], {'doc': '"""energy fraction in forward hadronic calorimeter"""', 'precision': '(6)'}), "('?isPFJet()?HFHadronEnergyFraction():-1', float, doc=\n 'energy fraction in forward hadronic calorimeter', precision=6)\n", (11131, 11253), False, 'from PhysicsTools.NanoAOD.common_cff import Var, P4Vars\n'), ((11297, 11412), 'PhysicsTools.NanoAOD.common_cff.Var', 'Var', (['"""?isPFJet()?HFEMEnergyFraction():-1"""', 'float'], {'doc': '"""energy fraction in forward EM calorimeter"""', 'precision': '(6)'}), "('?isPFJet()?HFEMEnergyFraction():-1', float, doc=\n 'energy fraction in forward EM calorimeter', precision=6)\n", (11300, 11412), False, 'from PhysicsTools.NanoAOD.common_cff import Var, P4Vars\n'), ((12595, 12608), 'PhysicsTools.PatAlgos.tools.jetCollectionTools.GenJetAdder', 'GenJetAdder', ([], {}), '()\n', (12606, 12608), False, 'from PhysicsTools.PatAlgos.tools.jetCollectionTools import GenJetAdder, RecoJetAdder\n'), ((13015, 13029), 'PhysicsTools.PatAlgos.tools.jetCollectionTools.RecoJetAdder', 'RecoJetAdder', ([], {}), '()\n', (13027, 13029), False, 'from PhysicsTools.PatAlgos.tools.jetCollectionTools import GenJetAdder, RecoJetAdder\n'), ((3873, 3984), 'PhysicsTools.NanoAOD.common_cff.Var', 'Var', (['"""HFHadronEnergyFraction()"""', 'float'], {'doc': '"""energy fraction in forward hadronic calorimeter"""', 'precision': '(6)'}), "('HFHadronEnergyFraction()', float, doc=\n 'energy fraction in forward hadronic calorimeter', precision=6)\n", (3876, 3984), False, 'from PhysicsTools.NanoAOD.common_cff import Var, P4Vars\n'), ((3999, 4100), 'PhysicsTools.NanoAOD.common_cff.Var', 'Var', (['"""HFEMEnergyFraction()"""', 'float'], {'doc': '"""energy fraction in forward EM calorimeter"""', 'precision': '(6)'}), "('HFEMEnergyFraction()', float, doc=\n 'energy fraction in forward EM calorimeter', precision=6)\n", (4002, 4100), False, 'from PhysicsTools.NanoAOD.common_cff import Var, P4Vars\n'), ((5127, 5141), 'FWCore.ParameterSet.Config.Sequence', 'cms.Sequence', ([], {}), '()\n', (5139, 5141), True, 'import FWCore.ParameterSet.Config as cms\n'), ((7499, 7513), 'FWCore.ParameterSet.Config.Sequence', 'cms.Sequence', ([], {}), '()\n', (7511, 7513), True, 'import FWCore.ParameterSet.Config as cms\n'), ((4627, 4732), 'PhysicsTools.NanoAOD.common_cff.Var', 'Var', (['"""userInt(\'tightId\')*2+userInt(\'looseId\')"""', 'int'], {'doc': '"""Jet ID flags bit1 is loose, bit2 is tight"""'}), '("userInt(\'tightId\')*2+userInt(\'looseId\')", int, doc=\n \'Jet ID flags bit1 is loose, bit2 is tight\')\n', (4630, 4732), False, 'from PhysicsTools.NanoAOD.common_cff import Var, P4Vars\n'), ((9364, 9391), 'copy.deepcopy', 'copy.deepcopy', (['currentTasks'], {}), '(currentTasks)\n', (9377, 9391), False, 'import copy\n'), ((8589, 8680), 'FWCore.ParameterSet.Config.PSet', 'cms.PSet', (['P4Vars'], {'area': 'jetTable.variables.area', 'rawFactor': 'jetTable.variables.rawFactor'}), '(P4Vars, area=jetTable.variables.area, rawFactor=jetTable.variables\n .rawFactor)\n', (8597, 8680), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6073, 6104), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['genJetsCollection'], {}), '(genJetsCollection)\n', (6085, 6104), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6126, 6140), 'FWCore.ParameterSet.Config.string', 'cms.string', (['""""""'], {}), "('')\n", (6136, 6140), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6162, 6178), 'FWCore.ParameterSet.Config.string', 'cms.string', (['name'], {}), '(name)\n', (6172, 6178), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6268, 6283), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(False)'], {}), '(False)\n', (6276, 6283), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6305, 6320), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(False)'], {}), '(False)\n', (6313, 6320), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6342, 6388), 'FWCore.ParameterSet.Config.PSet', 'cms.PSet', (['P4Vars'], {'area': 'jetTable.variables.area'}), '(P4Vars, area=jetTable.variables.area)\n', (6350, 6388), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6831, 6847), 'FWCore.ParameterSet.Config.string', 'cms.string', (['name'], {}), '(name)\n', (6841, 6847), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6875, 6906), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['genJetsCollection'], {}), '(genJetsCollection)\n', (6887, 6906), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6934, 6948), 'FWCore.ParameterSet.Config.string', 'cms.string', (['""""""'], {}), "('')\n", (6944, 6948), True, 'import FWCore.ParameterSet.Config as cms\n'), ((6976, 6991), 'FWCore.ParameterSet.Config.double', 'cms.double', (['(0.1)'], {}), '(0.1)\n', (6986, 6991), True, 'import FWCore.ParameterSet.Config as cms\n'), ((7019, 7043), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['genFlavour'], {}), '(genFlavour)\n', (7031, 7043), True, 'import FWCore.ParameterSet.Config as cms\n'), ((8940, 8965), 'FWCore.ParameterSet.Config.InputTag', 'cms.InputTag', (['updatedJets'], {}), '(updatedJets)\n', (8952, 8965), True, 'import FWCore.ParameterSet.Config as cms\n'), ((8987, 9001), 'FWCore.ParameterSet.Config.string', 'cms.string', (['""""""'], {}), "('')\n", (8997, 9001), True, 'import FWCore.ParameterSet.Config as cms\n'), ((9023, 9039), 'FWCore.ParameterSet.Config.string', 'cms.string', (['name'], {}), '(name)\n', (9033, 9039), True, 'import FWCore.ParameterSet.Config as cms\n'), ((9061, 9076), 'FWCore.ParameterSet.Config.string', 'cms.string', (['doc'], {}), '(doc)\n', (9071, 9076), True, 'import FWCore.ParameterSet.Config as cms\n'), ((9098, 9113), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(False)'], {}), '(False)\n', (9106, 9113), True, 'import FWCore.ParameterSet.Config as cms\n'), ((9135, 9150), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(False)'], {}), '(False)\n', (9143, 9150), True, 'import FWCore.ParameterSet.Config as cms\n'), ((8452, 8541), 'PhysicsTools.NanoAOD.common_cff.Var', 'Var', (['"""emEnergyFraction()"""', 'float'], {'doc': '"""electromagnetic energy fraction"""', 'precision': '(10)'}), "('emEnergyFraction()', float, doc='electromagnetic energy fraction',\n precision=10)\n", (8455, 8541), False, 'from PhysicsTools.NanoAOD.common_cff import Var, P4Vars\n')]
# Generated by Django 4.0.3 on 2022-03-13 15:43 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('transactions', '0009_wallets_date_created'), ] operations = [ migrations.RenameField( model_name='transactions', old_name='date', new_name='date_provided', ), ]
[ "django.db.migrations.RenameField" ]
[((234, 331), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""transactions"""', 'old_name': '"""date"""', 'new_name': '"""date_provided"""'}), "(model_name='transactions', old_name='date', new_name\n ='date_provided')\n", (256, 331), False, 'from django.db import migrations\n')]
import pytest from sciwing.datasets.seq_labeling.conll_yago_dataset import ConllYagoDatasetsManager from sciwing.datasets.seq_labeling.conll_yago_dataset import ConllYagoDataset import sciwing.constants as constants import pathlib from sciwing.tokenizers.word_tokenizer import WordTokenizer PATHS = constants.PATHS DATA_DIR = PATHS["DATA_DIR"] DATA_DIR = pathlib.Path(DATA_DIR) @pytest.fixture( params=["conll_yago_ner.train", "conll_yago_ner.dev", "conll_yago_ner.test"], scope="session", ) def conll_yago_dataset(request): train_filename = DATA_DIR.joinpath(request.param) dataset = ConllYagoDataset( filename=str(train_filename), tokenizers={"tokens": WordTokenizer(tokenizer="vanilla")}, column_names=["NER"], ) return dataset @pytest.fixture(scope="session") def conll_yago_dataset_manager(): train_filename = DATA_DIR.joinpath("conll_yago_ner.train") dev_filename = DATA_DIR.joinpath("conll_yago_ner.dev") test_filename = DATA_DIR.joinpath("conll_yago_ner.test") dataset_manager = ConllYagoDatasetsManager( train_filename=str(train_filename), dev_filename=str(dev_filename), test_filename=str(test_filename), ) return dataset_manager class TestConllYagoDataset: def test_get_lines_labels(self, conll_yago_dataset): dataset = conll_yago_dataset try: lines, labels = dataset.get_lines_labels() assert len(lines) > 0 assert len(labels) > 0 except: pytest.fail("Getting Lines and Labels failed") def test_labels_namespace(self, conll_yago_dataset): dataset = conll_yago_dataset lines, labels = dataset.get_lines_labels() for label in labels: namespaces = label.namespace assert len(namespaces) == 1 assert "NER" in namespaces def test_lines_labels_length(self, conll_yago_dataset): dataset = conll_yago_dataset lines, labels = dataset.get_lines_labels() for line, label in zip(lines, labels): line_tokens = line.tokens["tokens"] labels_ner = label.tokens["NER"] assert len(line_tokens) == len(labels_ner) def test_conll_yago_dataset_manager(self, conll_yago_dataset_manager): dataset_manager = conll_yago_dataset_manager tokens_vocab = dataset_manager.namespace_to_vocab["tokens"] assert tokens_vocab.get_vocab_len() > 0 def test_context_tokens_has_no_none(self, conll_yago_dataset): dataset = conll_yago_dataset lines, labels = dataset.get_lines_labels() for line in lines: context_tokens = line.tokens["contextual_tokens"] assert "None" not in context_tokens
[ "pathlib.Path", "pytest.fail", "pytest.fixture", "sciwing.tokenizers.word_tokenizer.WordTokenizer" ]
[((356, 378), 'pathlib.Path', 'pathlib.Path', (['DATA_DIR'], {}), '(DATA_DIR)\n', (368, 378), False, 'import pathlib\n'), ((382, 495), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['conll_yago_ner.train', 'conll_yago_ner.dev', 'conll_yago_ner.test']", 'scope': '"""session"""'}), "(params=['conll_yago_ner.train', 'conll_yago_ner.dev',\n 'conll_yago_ner.test'], scope='session')\n", (396, 495), False, 'import pytest\n'), ((786, 817), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (800, 817), False, 'import pytest\n'), ((690, 724), 'sciwing.tokenizers.word_tokenizer.WordTokenizer', 'WordTokenizer', ([], {'tokenizer': '"""vanilla"""'}), "(tokenizer='vanilla')\n", (703, 724), False, 'from sciwing.tokenizers.word_tokenizer import WordTokenizer\n'), ((1533, 1579), 'pytest.fail', 'pytest.fail', (['"""Getting Lines and Labels failed"""'], {}), "('Getting Lines and Labels failed')\n", (1544, 1579), False, 'import pytest\n')]
from django.conf.urls import include, url from django.views.generic import RedirectView from pgweb.util.signals import register_basic_signal_handlers import pgweb.contributors.views import pgweb.core.views import pgweb.docs.views import pgweb.downloads.views import pgweb.events.views import pgweb.featurematrix.views import pgweb.legacyurl.views import pgweb.lists.views import pgweb.misc.views import pgweb.news.views import pgweb.profserv.views import pgweb.pugs.views import pgweb.search.views import pgweb.security.views import pgweb.sponsors.views import pgweb.survey.views from pgweb.core.feeds import VersionFeed from pgweb.news.feeds import NewsFeed from pgweb.events.feeds import EventFeed # Uncomment the next two lines to enable the admin: from django.contrib import admin admin.autodiscover() # Register our save signal handlers register_basic_signal_handlers() urlpatterns = [ url(r'^$', pgweb.core.views.home), url(r'^dyncss/(?P<css>base|docs).css$', pgweb.core.views.dynamic_css), url(r'^about/$', pgweb.core.views.about), url(r'^about/newsarchive/([^/]+/)?$', pgweb.news.views.archive), url(r'^about/news/(\d+)(-.*)?/$', pgweb.news.views.item), url(r'^about/news/taglist.json/$', pgweb.news.views.taglist_json), url(r'^about/events/$', pgweb.events.views.main), url(r'^about/eventarchive/$', pgweb.events.views.archive), url(r'^about/event/(\d+)(-.*)?/$', pgweb.events.views.item), url(r'^about/featurematrix/$', pgweb.featurematrix.views.root), url(r'^about/featurematrix/detail/(\d+)/$', pgweb.featurematrix.views.detail), url(r'^about/privacypolicy/$', RedirectView.as_view(url='/about/policies/privacy/', permanent=True)), url(r'^ftp/(.*/)?$', pgweb.downloads.views.ftpbrowser), url(r'^download/mirrors-ftp/+(.*)$', pgweb.downloads.views.mirrorselect), url(r'^download/product-categories/$', pgweb.downloads.views.categorylist), url(r'^download/products/(\d+)(-.*)?/$', pgweb.downloads.views.productlist), url(r'^applications-v2.xml$', pgweb.downloads.views.applications_v2_xml), url(r'^download/uploadftp/', pgweb.downloads.views.uploadftp), url(r'^download/uploadyum/', pgweb.downloads.views.uploadyum), url(r'^download/js/yum.js', pgweb.downloads.views.yum_js), url(r'^docs/$', pgweb.docs.views.root), url(r'^docs/manuals/$', pgweb.docs.views.manuals), url(r'^docs/manuals/archive/$', pgweb.docs.views.manualarchive), url(r'^docs/release/$', pgweb.docs.views.release_notes), url(r'^docs/release/((?P<major_version>(\d+\.\d+)|\d+)\.(?P<minor_version>\d+))/$', pgweb.docs.views.release_notes), # Legacy URLs for accessing the docs page; provides a permanent redirect url(r'^docs/(current|devel|\d+(?:\.\d)?)/(static|interactive)/((.*).html?)?$', pgweb.docs.views.docspermanentredirect), url(r'^docs/(current|devel|\d+(?:\.\d)?)/(.*).html?$', pgweb.docs.views.docpage), url(r'^docs/(current|devel|\d+(?:\.\d)?)/(.*).svg$', pgweb.docs.views.docsvg), url(r'^docs/(current|devel|\d+(?:\.\d)?)/$', pgweb.docs.views.docsrootpage), url(r'^docs/(current|devel|\d+(?:\.\d)?)/$', pgweb.docs.views.redirect_root), url(r'^community/$', pgweb.core.views.community), url(r'^community/contributors/$', pgweb.contributors.views.completelist), url(r'^community/lists/$', RedirectView.as_view(url='/list/', permanent=True)), url(r'^community/lists/subscribe/$', RedirectView.as_view(url='https://lists.postgresql.org/', permanent=True)), url(r'^community/lists/listinfo/$', pgweb.lists.views.listinfo), url(r'^community/survey/vote/(\d+)/$', pgweb.survey.views.vote), url(r'^community/survey[/\.](\d+)(-.*)?/$', pgweb.survey.views.results), url(r'^community/user-groups/$', pgweb.pugs.views.index), url(r'^search/$', pgweb.search.views.search), url(r'^support/security/$', pgweb.security.views.index), url(r'^support/security/(\d\.\d|\d{2})/$', pgweb.security.views.version), url(r'^support/security_archive/$', RedirectView.as_view(url='/support/security/', permanent=True)), url(r'^support/professional_(support|hosting)/$', pgweb.profserv.views.root), url(r'^support/professional_(support|hosting)[/_](.*)/$', pgweb.profserv.views.region), url(r'^account/submitbug/$', pgweb.misc.views.submitbug), url(r'^account/submitbug/(\d+)/$', pgweb.misc.views.submitbug_done), url(r'^support/submitbug/$', RedirectView.as_view(url='/account/submitbug/', permanent=True)), url(r'^support/versioning/$', pgweb.core.views.versions), url(r'^bugs_redir/(\d+)/$', pgweb.misc.views.bugs_redir), url(r'^about/sponsors/$', pgweb.sponsors.views.sponsors), url(r'^about/servers/$', pgweb.sponsors.views.servers), url(r'^robots.txt$', pgweb.core.views.robots), ### # RSS feeds ### url(r'^versions.rss$', VersionFeed()), url(r'^news(/(?P<tagurl>[^/]+))?.rss$', NewsFeed()), url(r'^events.rss$', EventFeed()), ### # Special sections ### url(r'^account/', include('pgweb.account.urls')), ### # Sitemap (FIXME: support for >50k urls!) ### url(r'^sitemap.xml', pgweb.core.views.sitemap), url(r'^sitemap_internal.xml', pgweb.core.views.sitemap_internal), ### # Workaround for broken links pushed in press release ### url(r'^downloads/$', RedirectView.as_view(url='/download/', permanent=True)), ### # Legacy URLs from old structurs, but used in places like press releases # so needs to live a bit longer. ### url(r'^about/press/contact/$', RedirectView.as_view(url='/about/press/', permanent=True)), ### # Images that are used from other community sites ### url(r'^layout/images/(?P<f>[a-z0-9_\.]+)$', RedirectView.as_view(url='/media/img/layout/%(f)s', permanent=True)), ### # Handle redirect on incorrect spelling of licence ### url(r'^about/license/$', RedirectView.as_view(url='/about/licence', permanent=True)), ### # Links included in emails on the lists (do we need to check this for XSS?) ### url(r'^mailpref/([a-z0-9_-]+)/$', pgweb.legacyurl.views.mailpref), # Some basic information about the connection (for debugging purposes) url(r'^system_information/$', pgweb.core.views.system_information), # Sync timestamp, for automirror url(r'^web_sync_timestamp$', pgweb.core.views.sync_timestamp), # API endpoints url(r'^api/varnish/purge/$', pgweb.core.views.api_varnish_purge), # Override some URLs in admin, to provide our own pages url(r'^admin/pending/$', pgweb.core.views.admin_pending), url(r'^admin/purge/$', pgweb.core.views.admin_purge), url(r'^admin/mergeorg/$', pgweb.core.views.admin_mergeorg), # Uncomment the next line to enable the admin: url(r'^admin/', admin.site.urls), # Crash testing URL :-) url(r'^crashtest/$', pgweb.misc.views.crashtest), # Fallback for static pages, must be at the bottom url(r'^(.*)/$', pgweb.core.views.fallback), ]
[ "django.contrib.admin.autodiscover", "django.conf.urls.include", "pgweb.core.feeds.VersionFeed", "pgweb.events.feeds.EventFeed", "django.conf.urls.url", "pgweb.util.signals.register_basic_signal_handlers", "pgweb.news.feeds.NewsFeed", "django.views.generic.RedirectView.as_view" ]
[((789, 809), 'django.contrib.admin.autodiscover', 'admin.autodiscover', ([], {}), '()\n', (807, 809), False, 'from django.contrib import admin\n'), ((847, 879), 'pgweb.util.signals.register_basic_signal_handlers', 'register_basic_signal_handlers', ([], {}), '()\n', (877, 879), False, 'from pgweb.util.signals import register_basic_signal_handlers\n'), ((901, 933), 'django.conf.urls.url', 'url', (['"""^$"""', 'pgweb.core.views.home'], {}), "('^$', pgweb.core.views.home)\n", (904, 933), False, 'from django.conf.urls import include, url\n'), ((940, 1008), 'django.conf.urls.url', 'url', (['"""^dyncss/(?P<css>base|docs).css$"""', 'pgweb.core.views.dynamic_css'], {}), "('^dyncss/(?P<css>base|docs).css$', pgweb.core.views.dynamic_css)\n", (943, 1008), False, 'from django.conf.urls import include, url\n'), ((1016, 1055), 'django.conf.urls.url', 'url', (['"""^about/$"""', 'pgweb.core.views.about'], {}), "('^about/$', pgweb.core.views.about)\n", (1019, 1055), False, 'from django.conf.urls import include, url\n'), ((1062, 1124), 'django.conf.urls.url', 'url', (['"""^about/newsarchive/([^/]+/)?$"""', 'pgweb.news.views.archive'], {}), "('^about/newsarchive/([^/]+/)?$', pgweb.news.views.archive)\n", (1065, 1124), False, 'from django.conf.urls import include, url\n'), ((1131, 1187), 'django.conf.urls.url', 'url', (['"""^about/news/(\\\\d+)(-.*)?/$"""', 'pgweb.news.views.item'], {}), "('^about/news/(\\\\d+)(-.*)?/$', pgweb.news.views.item)\n", (1134, 1187), False, 'from django.conf.urls import include, url\n'), ((1193, 1257), 'django.conf.urls.url', 'url', (['"""^about/news/taglist.json/$"""', 'pgweb.news.views.taglist_json'], {}), "('^about/news/taglist.json/$', pgweb.news.views.taglist_json)\n", (1196, 1257), False, 'from django.conf.urls import include, url\n'), ((1264, 1311), 'django.conf.urls.url', 'url', (['"""^about/events/$"""', 'pgweb.events.views.main'], {}), "('^about/events/$', pgweb.events.views.main)\n", (1267, 1311), False, 'from django.conf.urls import include, url\n'), ((1318, 1374), 'django.conf.urls.url', 'url', (['"""^about/eventarchive/$"""', 'pgweb.events.views.archive'], {}), "('^about/eventarchive/$', pgweb.events.views.archive)\n", (1321, 1374), False, 'from django.conf.urls import include, url\n'), ((1381, 1440), 'django.conf.urls.url', 'url', (['"""^about/event/(\\\\d+)(-.*)?/$"""', 'pgweb.events.views.item'], {}), "('^about/event/(\\\\d+)(-.*)?/$', pgweb.events.views.item)\n", (1384, 1440), False, 'from django.conf.urls import include, url\n'), ((1446, 1507), 'django.conf.urls.url', 'url', (['"""^about/featurematrix/$"""', 'pgweb.featurematrix.views.root'], {}), "('^about/featurematrix/$', pgweb.featurematrix.views.root)\n", (1449, 1507), False, 'from django.conf.urls import include, url\n'), ((1514, 1591), 'django.conf.urls.url', 'url', (['"""^about/featurematrix/detail/(\\\\d+)/$"""', 'pgweb.featurematrix.views.detail'], {}), "('^about/featurematrix/detail/(\\\\d+)/$', pgweb.featurematrix.views.detail)\n", (1517, 1591), False, 'from django.conf.urls import include, url\n'), ((1704, 1757), 'django.conf.urls.url', 'url', (['"""^ftp/(.*/)?$"""', 'pgweb.downloads.views.ftpbrowser'], {}), "('^ftp/(.*/)?$', pgweb.downloads.views.ftpbrowser)\n", (1707, 1757), False, 'from django.conf.urls import include, url\n'), ((1764, 1835), 'django.conf.urls.url', 'url', (['"""^download/mirrors-ftp/+(.*)$"""', 'pgweb.downloads.views.mirrorselect'], {}), "('^download/mirrors-ftp/+(.*)$', pgweb.downloads.views.mirrorselect)\n", (1767, 1835), False, 'from django.conf.urls import include, url\n'), ((1842, 1915), 'django.conf.urls.url', 'url', (['"""^download/product-categories/$"""', 'pgweb.downloads.views.categorylist'], {}), "('^download/product-categories/$', pgweb.downloads.views.categorylist)\n", (1845, 1915), False, 'from django.conf.urls import include, url\n'), ((1922, 1997), 'django.conf.urls.url', 'url', (['"""^download/products/(\\\\d+)(-.*)?/$"""', 'pgweb.downloads.views.productlist'], {}), "('^download/products/(\\\\d+)(-.*)?/$', pgweb.downloads.views.productlist)\n", (1925, 1997), False, 'from django.conf.urls import include, url\n'), ((2003, 2074), 'django.conf.urls.url', 'url', (['"""^applications-v2.xml$"""', 'pgweb.downloads.views.applications_v2_xml'], {}), "('^applications-v2.xml$', pgweb.downloads.views.applications_v2_xml)\n", (2006, 2074), False, 'from django.conf.urls import include, url\n'), ((2081, 2141), 'django.conf.urls.url', 'url', (['"""^download/uploadftp/"""', 'pgweb.downloads.views.uploadftp'], {}), "('^download/uploadftp/', pgweb.downloads.views.uploadftp)\n", (2084, 2141), False, 'from django.conf.urls import include, url\n'), ((2148, 2208), 'django.conf.urls.url', 'url', (['"""^download/uploadyum/"""', 'pgweb.downloads.views.uploadyum'], {}), "('^download/uploadyum/', pgweb.downloads.views.uploadyum)\n", (2151, 2208), False, 'from django.conf.urls import include, url\n'), ((2215, 2271), 'django.conf.urls.url', 'url', (['"""^download/js/yum.js"""', 'pgweb.downloads.views.yum_js'], {}), "('^download/js/yum.js', pgweb.downloads.views.yum_js)\n", (2218, 2271), False, 'from django.conf.urls import include, url\n'), ((2279, 2316), 'django.conf.urls.url', 'url', (['"""^docs/$"""', 'pgweb.docs.views.root'], {}), "('^docs/$', pgweb.docs.views.root)\n", (2282, 2316), False, 'from django.conf.urls import include, url\n'), ((2323, 2371), 'django.conf.urls.url', 'url', (['"""^docs/manuals/$"""', 'pgweb.docs.views.manuals'], {}), "('^docs/manuals/$', pgweb.docs.views.manuals)\n", (2326, 2371), False, 'from django.conf.urls import include, url\n'), ((2378, 2440), 'django.conf.urls.url', 'url', (['"""^docs/manuals/archive/$"""', 'pgweb.docs.views.manualarchive'], {}), "('^docs/manuals/archive/$', pgweb.docs.views.manualarchive)\n", (2381, 2440), False, 'from django.conf.urls import include, url\n'), ((2447, 2501), 'django.conf.urls.url', 'url', (['"""^docs/release/$"""', 'pgweb.docs.views.release_notes'], {}), "('^docs/release/$', pgweb.docs.views.release_notes)\n", (2450, 2501), False, 'from django.conf.urls import include, url\n'), ((2508, 2633), 'django.conf.urls.url', 'url', (['"""^docs/release/((?P<major_version>(\\\\d+\\\\.\\\\d+)|\\\\d+)\\\\.(?P<minor_version>\\\\d+))/$"""', 'pgweb.docs.views.release_notes'], {}), "('^docs/release/((?P<major_version>(\\\\d+\\\\.\\\\d+)|\\\\d+)\\\\.(?P<minor_version>\\\\d+))/$'\n , pgweb.docs.views.release_notes)\n", (2511, 2633), False, 'from django.conf.urls import include, url\n'), ((2706, 2831), 'django.conf.urls.url', 'url', (['"""^docs/(current|devel|\\\\d+(?:\\\\.\\\\d)?)/(static|interactive)/((.*).html?)?$"""', 'pgweb.docs.views.docspermanentredirect'], {}), "('^docs/(current|devel|\\\\d+(?:\\\\.\\\\d)?)/(static|interactive)/((.*).html?)?$'\n , pgweb.docs.views.docspermanentredirect)\n", (2709, 2831), False, 'from django.conf.urls import include, url\n'), ((2830, 2917), 'django.conf.urls.url', 'url', (['"""^docs/(current|devel|\\\\d+(?:\\\\.\\\\d)?)/(.*).html?$"""', 'pgweb.docs.views.docpage'], {}), "('^docs/(current|devel|\\\\d+(?:\\\\.\\\\d)?)/(.*).html?$', pgweb.docs.views.\n docpage)\n", (2833, 2917), False, 'from django.conf.urls import include, url\n'), ((2916, 2995), 'django.conf.urls.url', 'url', (['"""^docs/(current|devel|\\\\d+(?:\\\\.\\\\d)?)/(.*).svg$"""', 'pgweb.docs.views.docsvg'], {}), "('^docs/(current|devel|\\\\d+(?:\\\\.\\\\d)?)/(.*).svg$', pgweb.docs.views.docsvg)\n", (2919, 2995), False, 'from django.conf.urls import include, url\n'), ((2999, 3076), 'django.conf.urls.url', 'url', (['"""^docs/(current|devel|\\\\d+(?:\\\\.\\\\d)?)/$"""', 'pgweb.docs.views.docsrootpage'], {}), "('^docs/(current|devel|\\\\d+(?:\\\\.\\\\d)?)/$', pgweb.docs.views.docsrootpage)\n", (3002, 3076), False, 'from django.conf.urls import include, url\n'), ((3080, 3158), 'django.conf.urls.url', 'url', (['"""^docs/(current|devel|\\\\d+(?:\\\\.\\\\d)?)/$"""', 'pgweb.docs.views.redirect_root'], {}), "('^docs/(current|devel|\\\\d+(?:\\\\.\\\\d)?)/$', pgweb.docs.views.redirect_root)\n", (3083, 3158), False, 'from django.conf.urls import include, url\n'), ((3163, 3210), 'django.conf.urls.url', 'url', (['"""^community/$"""', 'pgweb.core.views.community'], {}), "('^community/$', pgweb.core.views.community)\n", (3166, 3210), False, 'from django.conf.urls import include, url\n'), ((3217, 3288), 'django.conf.urls.url', 'url', (['"""^community/contributors/$"""', 'pgweb.contributors.views.completelist'], {}), "('^community/contributors/$', pgweb.contributors.views.completelist)\n", (3220, 3288), False, 'from django.conf.urls import include, url\n'), ((3497, 3559), 'django.conf.urls.url', 'url', (['"""^community/lists/listinfo/$"""', 'pgweb.lists.views.listinfo'], {}), "('^community/lists/listinfo/$', pgweb.lists.views.listinfo)\n", (3500, 3559), False, 'from django.conf.urls import include, url\n'), ((3566, 3629), 'django.conf.urls.url', 'url', (['"""^community/survey/vote/(\\\\d+)/$"""', 'pgweb.survey.views.vote'], {}), "('^community/survey/vote/(\\\\d+)/$', pgweb.survey.views.vote)\n", (3569, 3629), False, 'from django.conf.urls import include, url\n'), ((3635, 3707), 'django.conf.urls.url', 'url', (['"""^community/survey[/\\\\.](\\\\d+)(-.*)?/$"""', 'pgweb.survey.views.results'], {}), "('^community/survey[/\\\\.](\\\\d+)(-.*)?/$', pgweb.survey.views.results)\n", (3638, 3707), False, 'from django.conf.urls import include, url\n'), ((3712, 3767), 'django.conf.urls.url', 'url', (['"""^community/user-groups/$"""', 'pgweb.pugs.views.index'], {}), "('^community/user-groups/$', pgweb.pugs.views.index)\n", (3715, 3767), False, 'from django.conf.urls import include, url\n'), ((3775, 3818), 'django.conf.urls.url', 'url', (['"""^search/$"""', 'pgweb.search.views.search'], {}), "('^search/$', pgweb.search.views.search)\n", (3778, 3818), False, 'from django.conf.urls import include, url\n'), ((3826, 3880), 'django.conf.urls.url', 'url', (['"""^support/security/$"""', 'pgweb.security.views.index'], {}), "('^support/security/$', pgweb.security.views.index)\n", (3829, 3880), False, 'from django.conf.urls import include, url\n'), ((3887, 3962), 'django.conf.urls.url', 'url', (['"""^support/security/(\\\\d\\\\.\\\\d|\\\\d{2})/$"""', 'pgweb.security.views.version'], {}), "('^support/security/(\\\\d\\\\.\\\\d|\\\\d{2})/$', pgweb.security.views.version)\n", (3890, 3962), False, 'from django.conf.urls import include, url\n'), ((4071, 4146), 'django.conf.urls.url', 'url', (['"""^support/professional_(support|hosting)/$"""', 'pgweb.profserv.views.root'], {}), "('^support/professional_(support|hosting)/$', pgweb.profserv.views.root)\n", (4074, 4146), False, 'from django.conf.urls import include, url\n'), ((4153, 4243), 'django.conf.urls.url', 'url', (['"""^support/professional_(support|hosting)[/_](.*)/$"""', 'pgweb.profserv.views.region'], {}), "('^support/professional_(support|hosting)[/_](.*)/$', pgweb.profserv.\n views.region)\n", (4156, 4243), False, 'from django.conf.urls import include, url\n'), ((4245, 4300), 'django.conf.urls.url', 'url', (['"""^account/submitbug/$"""', 'pgweb.misc.views.submitbug'], {}), "('^account/submitbug/$', pgweb.misc.views.submitbug)\n", (4248, 4300), False, 'from django.conf.urls import include, url\n'), ((4307, 4374), 'django.conf.urls.url', 'url', (['"""^account/submitbug/(\\\\d+)/$"""', 'pgweb.misc.views.submitbug_done'], {}), "('^account/submitbug/(\\\\d+)/$', pgweb.misc.views.submitbug_done)\n", (4310, 4374), False, 'from django.conf.urls import include, url\n'), ((4479, 4534), 'django.conf.urls.url', 'url', (['"""^support/versioning/$"""', 'pgweb.core.views.versions'], {}), "('^support/versioning/$', pgweb.core.views.versions)\n", (4482, 4534), False, 'from django.conf.urls import include, url\n'), ((4541, 4597), 'django.conf.urls.url', 'url', (['"""^bugs_redir/(\\\\d+)/$"""', 'pgweb.misc.views.bugs_redir'], {}), "('^bugs_redir/(\\\\d+)/$', pgweb.misc.views.bugs_redir)\n", (4544, 4597), False, 'from django.conf.urls import include, url\n'), ((4604, 4659), 'django.conf.urls.url', 'url', (['"""^about/sponsors/$"""', 'pgweb.sponsors.views.sponsors'], {}), "('^about/sponsors/$', pgweb.sponsors.views.sponsors)\n", (4607, 4659), False, 'from django.conf.urls import include, url\n'), ((4666, 4719), 'django.conf.urls.url', 'url', (['"""^about/servers/$"""', 'pgweb.sponsors.views.servers'], {}), "('^about/servers/$', pgweb.sponsors.views.servers)\n", (4669, 4719), False, 'from django.conf.urls import include, url\n'), ((4727, 4771), 'django.conf.urls.url', 'url', (['"""^robots.txt$"""', 'pgweb.core.views.robots'], {}), "('^robots.txt$', pgweb.core.views.robots)\n", (4730, 4771), False, 'from django.conf.urls import include, url\n'), ((5107, 5152), 'django.conf.urls.url', 'url', (['"""^sitemap.xml"""', 'pgweb.core.views.sitemap'], {}), "('^sitemap.xml', pgweb.core.views.sitemap)\n", (5110, 5152), False, 'from django.conf.urls import include, url\n'), ((5159, 5222), 'django.conf.urls.url', 'url', (['"""^sitemap_internal.xml"""', 'pgweb.core.views.sitemap_internal'], {}), "('^sitemap_internal.xml', pgweb.core.views.sitemap_internal)\n", (5162, 5222), False, 'from django.conf.urls import include, url\n'), ((6059, 6123), 'django.conf.urls.url', 'url', (['"""^mailpref/([a-z0-9_-]+)/$"""', 'pgweb.legacyurl.views.mailpref'], {}), "('^mailpref/([a-z0-9_-]+)/$', pgweb.legacyurl.views.mailpref)\n", (6062, 6123), False, 'from django.conf.urls import include, url\n'), ((6206, 6271), 'django.conf.urls.url', 'url', (['"""^system_information/$"""', 'pgweb.core.views.system_information'], {}), "('^system_information/$', pgweb.core.views.system_information)\n", (6209, 6271), False, 'from django.conf.urls import include, url\n'), ((6315, 6375), 'django.conf.urls.url', 'url', (['"""^web_sync_timestamp$"""', 'pgweb.core.views.sync_timestamp'], {}), "('^web_sync_timestamp$', pgweb.core.views.sync_timestamp)\n", (6318, 6375), False, 'from django.conf.urls import include, url\n'), ((6403, 6466), 'django.conf.urls.url', 'url', (['"""^api/varnish/purge/$"""', 'pgweb.core.views.api_varnish_purge'], {}), "('^api/varnish/purge/$', pgweb.core.views.api_varnish_purge)\n", (6406, 6466), False, 'from django.conf.urls import include, url\n'), ((6534, 6589), 'django.conf.urls.url', 'url', (['"""^admin/pending/$"""', 'pgweb.core.views.admin_pending'], {}), "('^admin/pending/$', pgweb.core.views.admin_pending)\n", (6537, 6589), False, 'from django.conf.urls import include, url\n'), ((6596, 6647), 'django.conf.urls.url', 'url', (['"""^admin/purge/$"""', 'pgweb.core.views.admin_purge'], {}), "('^admin/purge/$', pgweb.core.views.admin_purge)\n", (6599, 6647), False, 'from django.conf.urls import include, url\n'), ((6654, 6711), 'django.conf.urls.url', 'url', (['"""^admin/mergeorg/$"""', 'pgweb.core.views.admin_mergeorg'], {}), "('^admin/mergeorg/$', pgweb.core.views.admin_mergeorg)\n", (6657, 6711), False, 'from django.conf.urls import include, url\n'), ((6770, 6801), 'django.conf.urls.url', 'url', (['"""^admin/"""', 'admin.site.urls'], {}), "('^admin/', admin.site.urls)\n", (6773, 6801), False, 'from django.conf.urls import include, url\n'), ((6837, 6884), 'django.conf.urls.url', 'url', (['"""^crashtest/$"""', 'pgweb.misc.views.crashtest'], {}), "('^crashtest/$', pgweb.misc.views.crashtest)\n", (6840, 6884), False, 'from django.conf.urls import include, url\n'), ((6947, 6988), 'django.conf.urls.url', 'url', (['"""^(.*)/$"""', 'pgweb.core.views.fallback'], {}), "('^(.*)/$', pgweb.core.views.fallback)\n", (6950, 6988), False, 'from django.conf.urls import include, url\n'), ((1628, 1696), 'django.views.generic.RedirectView.as_view', 'RedirectView.as_view', ([], {'url': '"""/about/policies/privacy/"""', 'permanent': '(True)'}), "(url='/about/policies/privacy/', permanent=True)\n", (1648, 1696), False, 'from django.views.generic import RedirectView\n'), ((3322, 3372), 'django.views.generic.RedirectView.as_view', 'RedirectView.as_view', ([], {'url': '"""/list/"""', 'permanent': '(True)'}), "(url='/list/', permanent=True)\n", (3342, 3372), False, 'from django.views.generic import RedirectView\n'), ((3416, 3489), 'django.views.generic.RedirectView.as_view', 'RedirectView.as_view', ([], {'url': '"""https://lists.postgresql.org/"""', 'permanent': '(True)'}), "(url='https://lists.postgresql.org/', permanent=True)\n", (3436, 3489), False, 'from django.views.generic import RedirectView\n'), ((4001, 4063), 'django.views.generic.RedirectView.as_view', 'RedirectView.as_view', ([], {'url': '"""/support/security/"""', 'permanent': '(True)'}), "(url='/support/security/', permanent=True)\n", (4021, 4063), False, 'from django.views.generic import RedirectView\n'), ((4409, 4472), 'django.views.generic.RedirectView.as_view', 'RedirectView.as_view', ([], {'url': '"""/account/submitbug/"""', 'permanent': '(True)'}), "(url='/account/submitbug/', permanent=True)\n", (4429, 4472), False, 'from django.views.generic import RedirectView\n'), ((4834, 4847), 'pgweb.core.feeds.VersionFeed', 'VersionFeed', ([], {}), '()\n', (4845, 4847), False, 'from pgweb.core.feeds import VersionFeed\n'), ((4894, 4904), 'pgweb.news.feeds.NewsFeed', 'NewsFeed', ([], {}), '()\n', (4902, 4904), False, 'from pgweb.news.feeds import NewsFeed\n'), ((4932, 4943), 'pgweb.events.feeds.EventFeed', 'EventFeed', ([], {}), '()\n', (4941, 4943), False, 'from pgweb.events.feeds import EventFeed\n'), ((5008, 5037), 'django.conf.urls.include', 'include', (['"""pgweb.account.urls"""'], {}), "('pgweb.account.urls')\n", (5015, 5037), False, 'from django.conf.urls import include, url\n'), ((5325, 5379), 'django.views.generic.RedirectView.as_view', 'RedirectView.as_view', ([], {'url': '"""/download/"""', 'permanent': '(True)'}), "(url='/download/', permanent=True)\n", (5345, 5379), False, 'from django.views.generic import RedirectView\n'), ((5548, 5605), 'django.views.generic.RedirectView.as_view', 'RedirectView.as_view', ([], {'url': '"""/about/press/"""', 'permanent': '(True)'}), "(url='/about/press/', permanent=True)\n", (5568, 5605), False, 'from django.views.generic import RedirectView\n'), ((5727, 5794), 'django.views.generic.RedirectView.as_view', 'RedirectView.as_view', ([], {'url': '"""/media/img/layout/%(f)s"""', 'permanent': '(True)'}), "(url='/media/img/layout/%(f)s', permanent=True)\n", (5747, 5794), False, 'from django.views.generic import RedirectView\n'), ((5897, 5955), 'django.views.generic.RedirectView.as_view', 'RedirectView.as_view', ([], {'url': '"""/about/licence"""', 'permanent': '(True)'}), "(url='/about/licence', permanent=True)\n", (5917, 5955), False, 'from django.views.generic import RedirectView\n')]
import json import os import socket import base64 import logging import subprocess import re from pathlib import Path import pyroute2 from pyroute2 import IPDB, WireGuard, NetlinkError from nacl.public import PrivateKey from platform_agent.cmd.iptables import add_iptable_rules, delete_iptable_rules, add_iptables_forward from platform_agent.cmd.lsmod import module_loaded from platform_agent.cmd.wg_show import get_wg_listen_port from platform_agent.files.tmp_files import get_peer_metadata from platform_agent.lib.ctime import now from platform_agent.routes import Routes from platform_agent.wireguard.helpers import find_free_port, get_peer_info, WG_NAME_PATTERN, WG_SYNTROPY_INT logger = logging.getLogger() class WgConfException(Exception): pass def delete_interface(ifname): subprocess.run(['ip', 'link', 'del', ifname], check=False, stderr=subprocess.DEVNULL) def create_interface(ifname): try: subprocess.run(['ip', 'link', 'add', 'dev', ifname, 'type', 'wireguard'], check=True, stderr=subprocess.DEVNULL) except subprocess.CalledProcessError: pass def set_interface_up(ifname): try: subprocess.run(['ip', 'link', 'set', 'up', ifname], check=True, stderr=subprocess.DEVNULL) except subprocess.CalledProcessError: pass def set_interface_ip(ifname, ip): try: subprocess.run(['ip', 'address', 'add', 'dev', ifname, ip], check=True, stderr=subprocess.DEVNULL) except subprocess.CalledProcessError: pass class WgConf(): def __init__(self, client=None): self.wg_kernel = module_loaded('wireguard') self.wg = WireGuard() if self.wg_kernel else WireguardGo() self.ipdb = IPDB() self.routes = Routes() self.client = client def create_syntropy_interfaces(self, ifaces): result = [] if not ifaces: return result for ifname in ifaces.keys(): int_data = self.create_interface("SYNTROPY_" + ifname, ifaces[ifname].get('internal_ip'), listen_port=ifaces[ifname].get('listen_port')) if int_data.get('public_key') != ifaces[ifname].get('public_key') or int_data.get('listen_port') != ifaces[ifname].get('listen_port'): result.append( { "fn": "create_interface", "data": int_data } ) return result @staticmethod def get_wg_interfaces(): with IPDB() as ipdb: current_interfaces = [k for k, v in ipdb.by_name.items() if re.match(WG_NAME_PATTERN, k) or k in WG_SYNTROPY_INT] return current_interfaces def clear_interfaces(self, dump, network_dump): remote_interfaces = [d['args']['ifname'] for d in dump if d['fn'] == 'create_interface'] if network_dump: remote_interfaces.extend(["SYNTROPY_" + ifname for ifname in network_dump.keys()]) current_interfaces = self.get_wg_interfaces() remove_interfaces = set(current_interfaces) - set(remote_interfaces) logger.debug( f"Clearing interfaces REMOTE - {remote_interfaces}, CURRENT - {current_interfaces} REMOVE={remove_interfaces}" ) for interface in remove_interfaces: self.remove_interface(interface) def clear_unused_routes(self, dump): remote_peers = [d['args'] for d in dump if d['fn'] == 'add_peer'] remote_interfaces = [d['args']['ifname'] for d in dump if d['fn'] == 'create_interface'] for ifname in remote_interfaces: allowed_ips = [] remote_peers = [allowed_ips.extend(peer['allowed_ips']) for peer in remote_peers if peer and peer['ifname'] == ifname] self.routes.clear_unused_routes(ifname, allowed_ips) def clear_peers(self, dump): remote_peers = [d['args']['public_key'] for d in dump if d['fn'] == 'add_peer'] current_interfaces = self.get_wg_interfaces() for iface in current_interfaces: peers = get_peer_info(iface, self.wg) for peer in peers: if peer not in remote_peers: self.remove_peer(iface, peer) def get_wg_keys(self, ifname): private_key_path = f"/etc/syntropy-agent/privatekey-{ifname}" public_key_path = f"/etc/syntropy-agent/publickey-{ifname}" private_key = Path(private_key_path) public_key = Path(public_key_path) if not private_key.is_file() or not public_key.is_file(): privKey = PrivateKey.generate() pubKey = base64.b64encode(bytes(privKey.public_key)) privKey = base64.b64encode(bytes(privKey)) base64_privKey = privKey.decode('ascii') base64_pubKey = pubKey.decode('ascii') private_key.write_text(base64_privKey) public_key.write_text(base64_pubKey) private_key.chmod(0o600) public_key.chmod(0o600) if self.wg_kernel: return public_key.read_text().strip(), private_key.read_text().strip() else: return public_key.read_text().strip(), private_key_path def next_free_port(self, port=1024, max_port=65535): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) while port <= max_port: try: sock.bind(('', port)) sock.close() return port except OSError: port += 1 raise IOError('no free ports') def create_interface(self, ifname, internal_ip, listen_port=None, **kwargs): public_key, private_key = self.get_wg_keys(ifname) peer_metadata = {'metadata': get_peer_metadata(public_key=public_key)} logger.info( f"[WG_CONF] - Creating interface {ifname}, {internal_ip} - wg_kernel={self.wg_kernel}", extra={'metadata': peer_metadata} ) if self.wg_kernel: create_interface(ifname) else: self.wg.create_interface(ifname) set_interface_up(ifname) set_interface_ip(ifname, internal_ip) self.routes.clear_unused_iface_addrs(ifname, internal_ip.split('/')[0]) if os.environ.get("SYNTROPY_PORT_RANGE") and not listen_port: listen_port = find_free_port() try: self.wg.set( ifname, private_key=private_key, listen_port=listen_port ) except NetlinkError as error: if error.code != 98: raise else: # if port was taken before creating. self.wg.set( ifname, private_key=private_key, ) listen_port = self.get_listening_port(ifname) if not listen_port: listen_port = find_free_port() self.wg.set( ifname, private_key=private_key, listen_port=listen_port ) add_iptables_forward(ifname) result = { "public_key": public_key, "listen_port": int(listen_port), "ifname": ifname, "internal_ip": internal_ip } logger.debug( f"[WG_CONF] - interface_created {result}", extra={'metadata': peer_metadata} ) return result def add_peer(self, ifname, public_key, allowed_ips, gw_ipv4, endpoint_ipv4=None, endpoint_port=None): peer_metadata = get_peer_metadata(public_key=public_key) if self.wg_kernel: try: peer_info = get_peer_info(ifname=ifname, wg=self.wg) except ValueError as e: raise WgConfException(str(e)) old_ips = set(peer_info.get(public_key, [])) - set(allowed_ips) self.routes.ip_route_del(ifname, old_ips) peer = {'public_key': public_key, 'persistent_keepalive': 15, 'allowed_ips': allowed_ips} if endpoint_ipv4 and endpoint_port: peer.update( { 'endpoint_addr': endpoint_ipv4, 'endpoint_port': endpoint_port, } ) self.wg.set(ifname, peer=peer) statuses = self.routes.ip_route_add(ifname, allowed_ips, gw_ipv4) add_iptable_rules(allowed_ips) data = { "connection_id": peer_metadata.get('connection_id'), "public_key": public_key, "statuses": statuses, } self.client.batch_send.queue.put({"data": data, "msg_type": 'WG_ROUTE_STATUS'}) def remove_peer(self, ifname, public_key, allowed_ips=None): if ifname not in self.get_wg_interfaces(): logger.debug(f'[WG_CONF] Remove peer - [{ifname}] does not exist') return peer = { 'public_key': public_key, 'remove': True } try: self.wg.set(ifname, peer=peer) if allowed_ips: self.routes.ip_route_del(ifname, allowed_ips) delete_iptable_rules(allowed_ips) except pyroute2.netlink.exceptions.NetlinkError as error: if error.code != 19: raise return def remove_interface(self, ifname): logger.debug(f'[WG_CONF] Removing interfcae - [{ifname}]') delete_interface(ifname) logger.debug(f'[WG_CONF] Removed interfcae - [{ifname}]') return def get_listening_port(self, ifname): if self.wg_kernel: wg_info = dict(self.wg.info(ifname)[0]['attrs']) return wg_info['WGDEVICE_A_LISTEN_PORT'] else: wg_info = self.wg.info(ifname) return wg_info['listen_port'] class WireguardGo: def set(self, ifname, peer=None, private_key=None, listen_port=None): full_cmd = f"wg set {ifname}".split(' ') if peer: allowed_ips_cmd = "" endpoint = f"endpoint {peer['endpoint_addr']}:{peer.get('endpoint_port')} " if peer.get('endpoint_addr') else "" if not peer.get('remove'): for ip in peer.get('allowed_ips', []): allowed_ips_cmd += f"allowed-ips {ip} " peer_cmd = f"peer {peer['public_key']} {allowed_ips_cmd}{endpoint}persistent-keepalive 15".split( ' ') else: peer_cmd = f"peer {peer['public_key']} remove".split(' ') full_cmd += peer_cmd if private_key: private_key_cmd = f"private-key {private_key}".split(' ') full_cmd += private_key_cmd if not listen_port: listen_port = find_free_port() if listen_port: listen_port_cmd = f"listen-port {listen_port}".split(' ') full_cmd += listen_port_cmd result_set = subprocess.run(full_cmd, encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE) complete_output = result_set.stdout or result_set.stderr complete_output = complete_output or 'Success' logger.debug(f"[Wireguard-go] - WG SET - {complete_output} , args {full_cmd}") return complete_output def create_interface(self, ifname): try: result_set = subprocess.Popen( ['wireguard-go', ifname], encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE, start_new_session=True, ) result_set.wait(timeout=2) except FileNotFoundError: raise WgConfException(f'Wireguard-go missing') complete_output = result_set.stdout or result_set.stderr complete_output = complete_output or 'Success' logger.debug(f"[Wireguard-go] - WG Create - {complete_output.read()} , args {ifname}") return complete_output def info(self, ifname): return { "listen_port": get_wg_listen_port(ifname) }
[ "nacl.public.PrivateKey.generate", "socket.socket", "pyroute2.IPDB", "platform_agent.files.tmp_files.get_peer_metadata", "pathlib.Path", "platform_agent.wireguard.helpers.find_free_port", "platform_agent.cmd.wg_show.get_wg_listen_port", "platform_agent.cmd.iptables.add_iptables_forward", "platform_agent.cmd.iptables.delete_iptable_rules", "subprocess.Popen", "platform_agent.cmd.lsmod.module_loaded", "pyroute2.WireGuard", "re.match", "platform_agent.wireguard.helpers.get_peer_info", "subprocess.run", "platform_agent.cmd.iptables.add_iptable_rules", "os.environ.get", "platform_agent.routes.Routes", "logging.getLogger" ]
[((695, 714), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (712, 714), False, 'import logging\n'), ((796, 886), 'subprocess.run', 'subprocess.run', (["['ip', 'link', 'del', ifname]"], {'check': '(False)', 'stderr': 'subprocess.DEVNULL'}), "(['ip', 'link', 'del', ifname], check=False, stderr=\n subprocess.DEVNULL)\n", (810, 886), False, 'import subprocess\n'), ((932, 1048), 'subprocess.run', 'subprocess.run', (["['ip', 'link', 'add', 'dev', ifname, 'type', 'wireguard']"], {'check': '(True)', 'stderr': 'subprocess.DEVNULL'}), "(['ip', 'link', 'add', 'dev', ifname, 'type', 'wireguard'],\n check=True, stderr=subprocess.DEVNULL)\n", (946, 1048), False, 'import subprocess\n'), ((1149, 1244), 'subprocess.run', 'subprocess.run', (["['ip', 'link', 'set', 'up', ifname]"], {'check': '(True)', 'stderr': 'subprocess.DEVNULL'}), "(['ip', 'link', 'set', 'up', ifname], check=True, stderr=\n subprocess.DEVNULL)\n", (1163, 1244), False, 'import subprocess\n'), ((1348, 1450), 'subprocess.run', 'subprocess.run', (["['ip', 'address', 'add', 'dev', ifname, ip]"], {'check': '(True)', 'stderr': 'subprocess.DEVNULL'}), "(['ip', 'address', 'add', 'dev', ifname, ip], check=True,\n stderr=subprocess.DEVNULL)\n", (1362, 1450), False, 'import subprocess\n'), ((1583, 1609), 'platform_agent.cmd.lsmod.module_loaded', 'module_loaded', (['"""wireguard"""'], {}), "('wireguard')\n", (1596, 1609), False, 'from platform_agent.cmd.lsmod import module_loaded\n'), ((1697, 1703), 'pyroute2.IPDB', 'IPDB', ([], {}), '()\n', (1701, 1703), False, 'from pyroute2 import IPDB, WireGuard, NetlinkError\n'), ((1726, 1734), 'platform_agent.routes.Routes', 'Routes', ([], {}), '()\n', (1732, 1734), False, 'from platform_agent.routes import Routes\n'), ((4374, 4396), 'pathlib.Path', 'Path', (['private_key_path'], {}), '(private_key_path)\n', (4378, 4396), False, 'from pathlib import Path\n'), ((4418, 4439), 'pathlib.Path', 'Path', (['public_key_path'], {}), '(public_key_path)\n', (4422, 4439), False, 'from pathlib import Path\n'), ((5213, 5262), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (5226, 5262), False, 'import socket\n'), ((7012, 7040), 'platform_agent.cmd.iptables.add_iptables_forward', 'add_iptables_forward', (['ifname'], {}), '(ifname)\n', (7032, 7040), False, 'from platform_agent.cmd.iptables import add_iptable_rules, delete_iptable_rules, add_iptables_forward\n'), ((7508, 7548), 'platform_agent.files.tmp_files.get_peer_metadata', 'get_peer_metadata', ([], {'public_key': 'public_key'}), '(public_key=public_key)\n', (7525, 7548), False, 'from platform_agent.files.tmp_files import get_peer_metadata\n'), ((8348, 8378), 'platform_agent.cmd.iptables.add_iptable_rules', 'add_iptable_rules', (['allowed_ips'], {}), '(allowed_ips)\n', (8365, 8378), False, 'from platform_agent.cmd.iptables import add_iptable_rules, delete_iptable_rules, add_iptables_forward\n'), ((10902, 10997), 'subprocess.run', 'subprocess.run', (['full_cmd'], {'encoding': '"""utf-8"""', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE'}), "(full_cmd, encoding='utf-8', stdout=subprocess.PIPE, stderr=\n subprocess.PIPE)\n", (10916, 10997), False, 'import subprocess\n'), ((1628, 1639), 'pyroute2.WireGuard', 'WireGuard', ([], {}), '()\n', (1637, 1639), False, 'from pyroute2 import IPDB, WireGuard, NetlinkError\n'), ((2484, 2490), 'pyroute2.IPDB', 'IPDB', ([], {}), '()\n', (2488, 2490), False, 'from pyroute2 import IPDB, WireGuard, NetlinkError\n'), ((4022, 4051), 'platform_agent.wireguard.helpers.get_peer_info', 'get_peer_info', (['iface', 'self.wg'], {}), '(iface, self.wg)\n', (4035, 4051), False, 'from platform_agent.wireguard.helpers import find_free_port, get_peer_info, WG_NAME_PATTERN, WG_SYNTROPY_INT\n'), ((4528, 4549), 'nacl.public.PrivateKey.generate', 'PrivateKey.generate', ([], {}), '()\n', (4547, 4549), False, 'from nacl.public import PrivateKey\n'), ((5678, 5718), 'platform_agent.files.tmp_files.get_peer_metadata', 'get_peer_metadata', ([], {'public_key': 'public_key'}), '(public_key=public_key)\n', (5695, 5718), False, 'from platform_agent.files.tmp_files import get_peer_metadata\n'), ((6192, 6229), 'os.environ.get', 'os.environ.get', (['"""SYNTROPY_PORT_RANGE"""'], {}), "('SYNTROPY_PORT_RANGE')\n", (6206, 6229), False, 'import os\n'), ((6277, 6293), 'platform_agent.wireguard.helpers.find_free_port', 'find_free_port', ([], {}), '()\n', (6291, 6293), False, 'from platform_agent.wireguard.helpers import find_free_port, get_peer_info, WG_NAME_PATTERN, WG_SYNTROPY_INT\n'), ((6843, 6859), 'platform_agent.wireguard.helpers.find_free_port', 'find_free_port', ([], {}), '()\n', (6857, 6859), False, 'from platform_agent.wireguard.helpers import find_free_port, get_peer_info, WG_NAME_PATTERN, WG_SYNTROPY_INT\n'), ((11311, 11448), 'subprocess.Popen', 'subprocess.Popen', (["['wireguard-go', ifname]"], {'encoding': '"""utf-8"""', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'start_new_session': '(True)'}), "(['wireguard-go', ifname], encoding='utf-8', stdout=\n subprocess.PIPE, stderr=subprocess.PIPE, start_new_session=True)\n", (11327, 11448), False, 'import subprocess\n'), ((11990, 12016), 'platform_agent.cmd.wg_show.get_wg_listen_port', 'get_wg_listen_port', (['ifname'], {}), '(ifname)\n', (12008, 12016), False, 'from platform_agent.cmd.wg_show import get_wg_listen_port\n'), ((7621, 7661), 'platform_agent.wireguard.helpers.get_peer_info', 'get_peer_info', ([], {'ifname': 'ifname', 'wg': 'self.wg'}), '(ifname=ifname, wg=self.wg)\n', (7634, 7661), False, 'from platform_agent.wireguard.helpers import find_free_port, get_peer_info, WG_NAME_PATTERN, WG_SYNTROPY_INT\n'), ((9119, 9152), 'platform_agent.cmd.iptables.delete_iptable_rules', 'delete_iptable_rules', (['allowed_ips'], {}), '(allowed_ips)\n', (9139, 9152), False, 'from platform_agent.cmd.iptables import add_iptable_rules, delete_iptable_rules, add_iptables_forward\n'), ((10729, 10745), 'platform_agent.wireguard.helpers.find_free_port', 'find_free_port', ([], {}), '()\n', (10743, 10745), False, 'from platform_agent.wireguard.helpers import find_free_port, get_peer_info, WG_NAME_PATTERN, WG_SYNTROPY_INT\n'), ((2572, 2600), 're.match', 're.match', (['WG_NAME_PATTERN', 'k'], {}), '(WG_NAME_PATTERN, k)\n', (2580, 2600), False, 'import re\n')]
import discord from discord.ext import commands, tasks from discord.voice_client import VoiceClient import youtube_dl import urllib.parse, urllib.request import re import json from random import choice youtube_dl.utils.bug_reports_message = lambda: '' ytdl_format_options = { 'format': 'bestaudio/best', 'outtmpl': '%(extractor)s-%(id)s-%(title)s.%(ext)s', 'restrictfilenames': False, 'noplaylist': False, 'nocheckcertificate': True, 'ignoreerrors': True, 'logtostderr': False, 'quiet': True, 'no_warnings': True, 'default_search': 'auto', 'source_address': '0.0.0.0' # bind to ipv4 } ffmpeg_options = { 'options': '' } ytdl = youtube_dl.YoutubeDL(ytdl_format_options) class YTDLSource(discord.PCMVolumeTransformer): def __init__(self, source, *, data, volume=0.5): super().__init__(source, volume) self.data = data self.title = data.get('title') self.url = data.get('url') @classmethod async def from_url(cls, url, *, loop=None, stream=False): loop = loop or asyncio.get_event_loop() data = await loop.run_in_executor(None, lambda: ytdl.extract_info(url, download=not stream)) if 'entries' in data: # take first item from a playlist data = data['entries'][0] filename = data['url'] if stream else ytdl.prepare_filename(data) return cls(discord.FFmpegPCMAudio(filename, **ffmpeg_options), data=data) client = commands.Bot(command_prefix='seulgi ') status = ['seulgi queue **url/song name**', 'seulgi view', 'seulgi play ~Song Name~'] queue = [] @client.event async def on_ready(): print('Bot is online!') @client.event async def on_member_join(member): channel = discord.utils.get(member.guild.channels, name='general') await channel.send(f'Welcome {member.mention}! Ready to jam out? See `Seulgi help` command for details!') @client.command(name='ping', help='Seulgi returns your latency') async def ping(ctx): await ctx.send(f'**Pong!** Latency: {round(client.latency * 1000)}ms') @client.command(name='hello', help='Seulgi choose a random hello message for you') async def hello(ctx): responses = ['***grumble*** Why did you wake me up?', 'Top of the morning to you lad!', 'Hello, how are you?', 'Hi', '**Wasssuup!**'] await ctx.send(choice(responses)) @client.command(name="quem", help='This returns the true love') async def teamo(ctx, *args): buf="" for x in args: buf+= x + " " buf = buf[:-1] if buf=="e meu amor": await ctx.send('tami') else: await ctx.send("quem?") @client.command(name='join', help='This command makes the bot join the voice channel') async def join(ctx): if not ctx.message.author.voice: await ctx.send("Você não está em um canal, e se nos encontrassemos no Geral? 👉👈") return else: channel = ctx.message.author.voice.channel await channel.connect() @client.command(name='queue', help='This command adds a song to the queue') async def queue_(ctx, url, *args): global queue helptext = url for word in args: helptext+= word queue.append(helptext) await ctx.send(f'`{queue[0]}` added to queue!') @client.command(name='skip', help='This command skip a song') async def skip(ctx): global queue server = ctx.message.guild voice_channel = server.voice_client if (len(queue)): async with ctx.typing(): player = await YTDLSource.from_url(queue[0], loop=client.loop) voice_channel.source = player await ctx.send('Now playing: {}'.format(player.title)) await client.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=('{}'.format(player.title)))) del(queue[0]) else: await ctx.send("**Your queue is empty!**") @client.command(name='remove', help='This command removes an item from the list') async def remove(ctx, number): global queue try: del(queue[int(number)]) await ctx.send(f'Your queue is now `{queue}!`') except: await ctx.send('Your queue is either **empty** or the index is **out of range**') @client.command(name='play', help='This command plays songs') async def play(ctx, url=None,*args): global queue try: await ctx.invoke(client.get_command('join')) except: pass #Seulgi is already at the channel server = ctx.message.guild voice_channel = server.voice_client helptext = url for word in args: helptext+= ' '+word queue.append(helptext) if voice_channel.is_playing(): await ctx.send(f'`{queue[len(queue)-1]}` added to queue!') return async with ctx.typing(): player = await YTDLSource.from_url(queue[0], loop=client.loop) voice_channel.play(player, after=lambda e: print('Player error: %s' % e) if e else None) playing.start(ctx) await ctx.send('**Now playing:** {}'.format(player.title)) await client.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=('{}'.format(player.title)))) del(queue[0]) @client.command(name='pause', help='This command pauses the song') async def pause(ctx): server = ctx.message.guild voice_channel = server.voice_client playing.cancel() voice_channel.pause() @client.command(name='resume', help='This command resumes the song!') async def resume(ctx): server = ctx.message.guild voice_channel = server.voice_client playing.start(ctx) voice_channel.resume() @client.command(name='view', help='This command shows the queue') async def view(ctx): indexes = list(range(len(queue))) pretty_display = dict(zip(indexes,queue)) await ctx.send(f'Your queue is now:\n```py\n{json.dumps(pretty_display, indent=4)}```') @client.command(name='leave', help='This command stops makes the bot leave the voice channel') async def leave(ctx): voice_client = ctx.message.guild.voice_client playing.cancel() await voice_client.disconnect() @client.command(name='stop', help='This command stops the song!') async def stop(ctx): server = ctx.message.guild voice_channel = server.voice_client playing.cancel() voice_channel.stop() @tasks.loop(seconds=1) async def playing(ctx): global queue voice_channel = ctx.message.guild.voice_client if(voice_channel.is_playing() is False): async with ctx.typing(): player = await YTDLSource.from_url(queue[0], loop=client.loop) voice_channel.play(player, after=lambda e: print('Player error: %s' % e) if e else None) await ctx.send('**Now playing:** {}'.format(player.title)) await client.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=('{}'.format(player.title)))) del (queue[0]) else: pass client.run('') #add your token there and run
[ "discord.utils.get", "random.choice", "json.dumps", "discord.ext.tasks.loop", "youtube_dl.YoutubeDL", "discord.FFmpegPCMAudio", "discord.ext.commands.Bot" ]
[((679, 720), 'youtube_dl.YoutubeDL', 'youtube_dl.YoutubeDL', (['ytdl_format_options'], {}), '(ytdl_format_options)\n', (699, 720), False, 'import youtube_dl\n'), ((1476, 1514), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': '"""seulgi """'}), "(command_prefix='seulgi ')\n", (1488, 1514), False, 'from discord.ext import commands, tasks\n'), ((6286, 6307), 'discord.ext.tasks.loop', 'tasks.loop', ([], {'seconds': '(1)'}), '(seconds=1)\n', (6296, 6307), False, 'from discord.ext import commands, tasks\n'), ((1741, 1797), 'discord.utils.get', 'discord.utils.get', (['member.guild.channels'], {'name': '"""general"""'}), "(member.guild.channels, name='general')\n", (1758, 1797), False, 'import discord\n'), ((1403, 1453), 'discord.FFmpegPCMAudio', 'discord.FFmpegPCMAudio', (['filename'], {}), '(filename, **ffmpeg_options)\n', (1425, 1453), False, 'import discord\n'), ((2342, 2359), 'random.choice', 'choice', (['responses'], {}), '(responses)\n', (2348, 2359), False, 'from random import choice\n'), ((5811, 5847), 'json.dumps', 'json.dumps', (['pretty_display'], {'indent': '(4)'}), '(pretty_display, indent=4)\n', (5821, 5847), False, 'import json\n')]
# Copyright (c) 2019, Bosch Engineering Center Cluj and BFMC organizers # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE from threading import Thread from functools import partial from inspect import signature class ThreadWithStop(Thread): def __init__(self,*args,**kwargs): """An extended version of the thread superclass, it contains a new attribute (_running) and a new method (stop). The '_running' flag can be used to control the state of the 'run' method and the 'stop' method can stop the running by changing its value. Raises ------ ValueError the 'target' parameter of the constructor have to be a unbounded function Examples -------- Creating a new subclass of 'ThreadWithStop' superclass: class AThread(ThreadWithStop): def run(self): while sel._running: ... th1 = AThread() th1.start() ... th1.stop() th1.join() An example with local function and witouth subclass definition: def examplesFunc(self,param): while self._running ... th1 = ThreadWithStop(target = examplesFunc, args = (param,)) th1.start() ... th1.stop() th1.join() """ #Check the target parameter definition. If it isn't a bounded method, then we have to give like the first parameter the new object. Thus the run method can access the object's field, (like self._running). if 'target' in kwargs: if not hasattr(kwargs['target'], '__self__'): kwargs['target'] = partial(kwargs['target'],self) else: raise ValueError("target parameter must be a unbounded function") super(ThreadWithStop,self).__init__(*args,**kwargs) self._running = True def stop(self): """This method has role to stop the thread by setting the '_running' flag to false value. """ self._running = False
[ "functools.partial" ]
[((3234, 3265), 'functools.partial', 'partial', (["kwargs['target']", 'self'], {}), "(kwargs['target'], self)\n", (3241, 3265), False, 'from functools import partial\n')]
#!/usr/bin/env python import numpy as np from typing import Callable def rectangle(a: float, b: float, f: Callable[[np.array], np.array], h: float) -> float: return h * np.sum(f(np.arange(a + h / 2, b + h / 2, h))) def trapezoid(a: float, b: float, f: Callable[[np.array], np.array], h: float) -> float: return h / 2 * (f(a) + 2 * np.sum(f(np.arange(a + h, b, h))) + f(b)) def simpson(a: float, b: float, f: Callable[[np.array], np.array], h: float) -> float: return h / 6 * (f(a) + 2 * np.sum(f(np.arange(a + h, b, h))) + 4 * np.sum(f(np.arange(a + h / 2, b + h / 2, h))) + f(b))
[ "numpy.arange" ]
[((193, 227), 'numpy.arange', 'np.arange', (['(a + h / 2)', '(b + h / 2)', 'h'], {}), '(a + h / 2, b + h / 2, h)\n', (202, 227), True, 'import numpy as np\n'), ((370, 392), 'numpy.arange', 'np.arange', (['(a + h)', 'b', 'h'], {}), '(a + h, b, h)\n', (379, 392), True, 'import numpy as np\n'), ((590, 624), 'numpy.arange', 'np.arange', (['(a + h / 2)', '(b + h / 2)', 'h'], {}), '(a + h / 2, b + h / 2, h)\n', (599, 624), True, 'import numpy as np\n'), ((541, 563), 'numpy.arange', 'np.arange', (['(a + h)', 'b', 'h'], {}), '(a + h, b, h)\n', (550, 563), True, 'import numpy as np\n')]
"""Views""" import json from math import log2 from django.contrib.auth import authenticate, login, logout from django.contrib.auth.decorators import login_required from django.views.decorators.csrf import csrf_exempt from django.shortcuts import render from django.db import IntegrityError from django.http import JsonResponse, HttpResponseRedirect from django.urls import reverse from .models import User, Game # Index -------------------------------------------------------- def index(request): """Index Page""" if request.method != "GET": return JsonResponse({"error": "GET request required"}, status=405) # view return render(request, "index.html") # Game --------------------------------------------------------- @login_required(login_url="game:loginUser") def play(request): """Play Page""" if request.method != "GET": return JsonResponse({"error": "GET request required"}, status=405) # view return render(request, "game/play.html", {"nav": "play"}) @csrf_exempt @login_required(login_url="game:loginUser") def leaderboard(request): """LeaderBoard Page""" if request.method != "POST" and request.method != "GET": return JsonResponse({"error": "POST or GET request required"}, status=405) if request.method == "POST": content = json.loads(request.body) value = content.get("value") time = content.get("time") value = round(log2(value) - 10) # Check if caule is valid if value > 6 or value < 1: return JsonResponse({"error": "Invalid Value"}, status=400) # Add to DB game = Game() game.user = request.user game.max_value = value game.time = time game.save() return JsonResponse({"saved": True}, status=201) # view return render(request, "game/leaderboard.html", { "nav": "leaderboard", "content": [content.serialize() for content in Game.objects.order_by("-max_value", "time")] }) # Users -------------------------------------------------------- def create(request): """Create Account""" if request.method != "POST" and request.method != "GET": return JsonResponse({"error": "POST or GET request required"}, status=405) if request.method == "POST": username = request.POST["username"] email = request.POST["email"] # Ensure password matches confirmation password = request.POST["password"] confirmation = request.POST["confirmation"] if password != confirmation: return render(request, "users/create.html", { "nav": "create", "message": "Passwords must match" }) # Attempt to create new user try: user = User.objects.create_user(username, email, password) user.save() except IntegrityError: return render(request, "users/create.html", { "nav": "create", "message": "Username already taken" }) # Login the user and redirect login(request, user) return HttpResponseRedirect(reverse("game:index")) # view return render(request, "users/create.html", {"nav": "create"}) def login_view(request): """Login View""" if request.method != "POST" and request.method != "GET": return JsonResponse({"error": "POST or GET request required"}, status=405) if request.method == "POST": # Attempt to sign user in username = request.POST["username"] password = request.POST["password"] user = authenticate(request, username=username, password=password) # Check if authentication successful if user is not None: login(request, user) return HttpResponseRedirect(reverse("game:index")) return render(request, "users/login.html", { "nav": "login", "message": "Invalid username and/or password." }) # view return render(request, "users/login.html", {"nav": "login"}) def logout_view(request): """LogOut""" if request.method != "GET": return JsonResponse({"error": "GET request required"}, status=405) logout(request) return HttpResponseRedirect(reverse("game:index"))
[ "django.contrib.auth.decorators.login_required", "json.loads", "django.http.JsonResponse", "django.urls.reverse", "django.contrib.auth.logout", "django.contrib.auth.authenticate", "django.shortcuts.render", "math.log2", "django.contrib.auth.login" ]
[((753, 795), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""game:loginUser"""'}), "(login_url='game:loginUser')\n", (767, 795), False, 'from django.contrib.auth.decorators import login_required\n'), ((1032, 1074), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': '"""game:loginUser"""'}), "(login_url='game:loginUser')\n", (1046, 1074), False, 'from django.contrib.auth.decorators import login_required\n'), ((654, 683), 'django.shortcuts.render', 'render', (['request', '"""index.html"""'], {}), "(request, 'index.html')\n", (660, 683), False, 'from django.shortcuts import render\n'), ((965, 1015), 'django.shortcuts.render', 'render', (['request', '"""game/play.html"""', "{'nav': 'play'}"], {}), "(request, 'game/play.html', {'nav': 'play'})\n", (971, 1015), False, 'from django.shortcuts import render\n'), ((3212, 3267), 'django.shortcuts.render', 'render', (['request', '"""users/create.html"""', "{'nav': 'create'}"], {}), "(request, 'users/create.html', {'nav': 'create'})\n", (3218, 3267), False, 'from django.shortcuts import render\n'), ((4037, 4090), 'django.shortcuts.render', 'render', (['request', '"""users/login.html"""', "{'nav': 'login'}"], {}), "(request, 'users/login.html', {'nav': 'login'})\n", (4043, 4090), False, 'from django.shortcuts import render\n'), ((4248, 4263), 'django.contrib.auth.logout', 'logout', (['request'], {}), '(request)\n', (4254, 4263), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((571, 630), 'django.http.JsonResponse', 'JsonResponse', (["{'error': 'GET request required'}"], {'status': '(405)'}), "({'error': 'GET request required'}, status=405)\n", (583, 630), False, 'from django.http import JsonResponse, HttpResponseRedirect\n'), ((882, 941), 'django.http.JsonResponse', 'JsonResponse', (["{'error': 'GET request required'}"], {'status': '(405)'}), "({'error': 'GET request required'}, status=405)\n", (894, 941), False, 'from django.http import JsonResponse, HttpResponseRedirect\n'), ((1204, 1271), 'django.http.JsonResponse', 'JsonResponse', (["{'error': 'POST or GET request required'}"], {'status': '(405)'}), "({'error': 'POST or GET request required'}, status=405)\n", (1216, 1271), False, 'from django.http import JsonResponse, HttpResponseRedirect\n'), ((1324, 1348), 'json.loads', 'json.loads', (['request.body'], {}), '(request.body)\n', (1334, 1348), False, 'import json\n'), ((1771, 1812), 'django.http.JsonResponse', 'JsonResponse', (["{'saved': True}"], {'status': '(201)'}), "({'saved': True}, status=201)\n", (1783, 1812), False, 'from django.http import JsonResponse, HttpResponseRedirect\n'), ((2206, 2273), 'django.http.JsonResponse', 'JsonResponse', (["{'error': 'POST or GET request required'}"], {'status': '(405)'}), "({'error': 'POST or GET request required'}, status=405)\n", (2218, 2273), False, 'from django.http import JsonResponse, HttpResponseRedirect\n'), ((3109, 3129), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (3114, 3129), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((3392, 3459), 'django.http.JsonResponse', 'JsonResponse', (["{'error': 'POST or GET request required'}"], {'status': '(405)'}), "({'error': 'POST or GET request required'}, status=405)\n", (3404, 3459), False, 'from django.http import JsonResponse, HttpResponseRedirect\n'), ((3631, 3690), 'django.contrib.auth.authenticate', 'authenticate', (['request'], {'username': 'username', 'password': 'password'}), '(request, username=username, password=password)\n', (3643, 3690), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((3878, 3983), 'django.shortcuts.render', 'render', (['request', '"""users/login.html"""', "{'nav': 'login', 'message': 'Invalid username and/or password.'}"], {}), "(request, 'users/login.html', {'nav': 'login', 'message':\n 'Invalid username and/or password.'})\n", (3884, 3983), False, 'from django.shortcuts import render\n'), ((4183, 4242), 'django.http.JsonResponse', 'JsonResponse', (["{'error': 'GET request required'}"], {'status': '(405)'}), "({'error': 'GET request required'}, status=405)\n", (4195, 4242), False, 'from django.http import JsonResponse, HttpResponseRedirect\n'), ((4296, 4317), 'django.urls.reverse', 'reverse', (['"""game:index"""'], {}), "('game:index')\n", (4303, 4317), False, 'from django.urls import reverse\n'), ((1551, 1603), 'django.http.JsonResponse', 'JsonResponse', (["{'error': 'Invalid Value'}"], {'status': '(400)'}), "({'error': 'Invalid Value'}, status=400)\n", (1563, 1603), False, 'from django.http import JsonResponse, HttpResponseRedirect\n'), ((2590, 2684), 'django.shortcuts.render', 'render', (['request', '"""users/create.html"""', "{'nav': 'create', 'message': 'Passwords must match'}"], {}), "(request, 'users/create.html', {'nav': 'create', 'message':\n 'Passwords must match'})\n", (2596, 2684), False, 'from django.shortcuts import render\n'), ((3166, 3187), 'django.urls.reverse', 'reverse', (['"""game:index"""'], {}), "('game:index')\n", (3173, 3187), False, 'from django.urls import reverse\n'), ((3778, 3798), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (3783, 3798), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((1444, 1455), 'math.log2', 'log2', (['value'], {}), '(value)\n', (1448, 1455), False, 'from math import log2\n'), ((2923, 3019), 'django.shortcuts.render', 'render', (['request', '"""users/create.html"""', "{'nav': 'create', 'message': 'Username already taken'}"], {}), "(request, 'users/create.html', {'nav': 'create', 'message':\n 'Username already taken'})\n", (2929, 3019), False, 'from django.shortcuts import render\n'), ((3839, 3860), 'django.urls.reverse', 'reverse', (['"""game:index"""'], {}), "('game:index')\n", (3846, 3860), False, 'from django.urls import reverse\n')]
# -*- coding: utf-8 -*- import os from phaxio import PhaxioApi from xero import Xero from xero.auth import PrivateCredentials # Setup the Xero Private App # Ensure your private key is listed in your .gitignore, so you doesn't end up in your repo. # Xero API Consumer Key is retrieved via an environmental variable # Ensure you add the environment variable on your local machine AND in your AWS Lambda Settings (via config.yaml) with open('./keys/lambdaprivatekey.pem') as keyfile: rsa_key = keyfile.read() credentials = PrivateCredentials(os.getenv("XERO_CONSUMER_KEY_PHAXIO"), rsa_key) xero = Xero(credentials) # Setup PhaxioAPI # Phaxio API keys are set via environment variables # Ensure you add the environment variables on your local machine AND in your AWS Lambda Settings (via config.yaml) phaxio = PhaxioApi(os.getenv('PHAXIO_API_KEY'), os.getenv('PHAXIO_API_SECRET')) fax_number = None def handler(event, context): # invoice_id is the only param sent to the lambda function invoice_id = event.get('invoice_id') # Use the invoice_id to retrieve the invoice details from the Xero API invoice = xero.invoices.get(invoice_id) # Included in the invoice response is the Contact and their Fax Number, lets parse it out. for phone in invoice[0]['Contact']['Phones']: if phone['PhoneType'] == "FAX": # Check to see if the fax number is complete, if not, return an error if not all((phone['PhoneCountryCode'], phone['PhoneAreaCode'], phone['PhoneNumber'])): return { "statusCode": 400, "headers": { "Content-Type": "application/json", "Access-Control-Allow-Origin" : "*", "Access-Control-Allow-Credentials" : True }, "body": 'Contact does not have a Fax Number, add Fax Number to Contact in Xero and try again.'} else: fax_number = phone['PhoneCountryCode']+phone['PhoneAreaCode']+phone['PhoneNumber'] # Sanity check, did the contact have a fax number? If not, return with relevant error. if fax_number == None: return { "statusCode": 400, "headers": { "Content-Type": "application/json", "Access-Control-Allow-Origin" : "*", "Access-Control-Allow-Credentials" : True }, "body": 'Contact does not have a Fax Number, add Fax Number to Contact in Xero and try again.'} # Now lets prepare to send a fax with Phaxio # Frist, retrieve the PDF version of the invoice from the Xero API invoice_pdf = xero.invoices.get(invoice_id, headers={'Accept': 'application/pdf'}) # Save the PDF retrieved to disk temporarily. This is probably unnecessary, but I'm a Python Noob. pdf_file = open('/tmp/tmp_fax.pdf', 'wb') pdf_file.write(invoice_pdf) pdf_file.close() # Send the PDF invoice as a fax with Phaxio response = phaxio.Fax.send(fax_number, files='/tmp/tmp_fax.pdf', header_text='Xero Invoice Faxed with Phaxio', tags_dict={'invoice_id': invoice_id}) # Upload a confirmation of the Fax Receipt # As the Fax is not sent immediately, this needs to be done via a second Lambda function, invoked via the callback url. # Skipping this feature for now. # Mark the invoice as sent invoice[0]['SentToContact'] = True xero.invoices.save(invoice[0]) # Return with a status code 200, adding the relevant CORS headers to handle cross domain scripting. return { "statusCode": 200, "headers": { "Content-Type": "application/json", "Access-Control-Allow-Origin" : "*", "Access-Control-Allow-Credentials" : True }, "body": 'Fax Sent Successfully' }
[ "xero.Xero", "os.getenv" ]
[((596, 613), 'xero.Xero', 'Xero', (['credentials'], {}), '(credentials)\n', (600, 613), False, 'from xero import Xero\n'), ((541, 578), 'os.getenv', 'os.getenv', (['"""XERO_CONSUMER_KEY_PHAXIO"""'], {}), "('XERO_CONSUMER_KEY_PHAXIO')\n", (550, 578), False, 'import os\n'), ((819, 846), 'os.getenv', 'os.getenv', (['"""PHAXIO_API_KEY"""'], {}), "('PHAXIO_API_KEY')\n", (828, 846), False, 'import os\n'), ((848, 878), 'os.getenv', 'os.getenv', (['"""PHAXIO_API_SECRET"""'], {}), "('PHAXIO_API_SECRET')\n", (857, 878), False, 'import os\n')]
from dateutil.relativedelta import relativedelta from custom.icds_reports.const import AGG_AVAILING_SERVICES_TABLE from custom.icds_reports.utils.aggregation_helpers import month_formatter from custom.icds_reports.utils.aggregation_helpers.distributed.base import ( StateBasedAggregationDistributedHelper, ) class AvailingServiceFormsAggregationDistributedHelper(StateBasedAggregationDistributedHelper): helper_key = 'availing_service-forms' ucr_data_source_id = 'static-availing_service_form' aggregate_parent_table = AGG_AVAILING_SERVICES_TABLE def data_from_ucr_query(self): month = self.month.replace(day=1) current_month_start = month_formatter(self.month) next_month_start = month_formatter(self.month + relativedelta(months=1)) query_params = { "month": month_formatter(month), "state_id": self.state_id, "current_month_start": current_month_start, "next_month_start": next_month_start, } return """ SELECT DISTINCT %(state_id)s AS state_id, supervisor_id, awc_id, %(month)s::DATE AS month, person_case_id AS person_case_id, LAST_VALUE(is_registered) OVER w AS is_registered, timeend AS registration_date FROM "{ucr_tablename}" WHERE state_id = %(state_id)s AND timeend >= %(current_month_start)s AND timeend < %(next_month_start)s AND person_case_id IS NOT NULL WINDOW w AS ( PARTITION BY supervisor_id, person_case_id ORDER BY timeend RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING ) """.format( ucr_tablename=self.ucr_tablename, tablename=self.aggregate_parent_table, ), query_params def aggregation_query(self): month = self.month.replace(day=1) ucr_query, ucr_query_params = self.data_from_ucr_query() query_params = { "month": month_formatter(month), "state_id": self.state_id, "previous_month": month_formatter(month - relativedelta(months=1)), } query_params.update(ucr_query_params) return """ INSERT INTO "{tablename}" ( state_id, supervisor_id, awc_id, month, person_case_id, is_registered, registration_date ) ( SELECT %(state_id)s AS state_id, COALESCE(ucr.supervisor_id, prev_month.supervisor_id) AS supervisor_id, COALESCE(ucr.awc_id, prev_month.awc_id) AS awc_id, %(month)s::DATE AS month, COALESCE(ucr.person_case_id, prev_month.person_case_id) AS person_case_id, COALESCE(ucr.is_registered, prev_month.is_registered) as is_registered, COALESCE(ucr.registration_date, prev_month.registration_date) as registration_date FROM ({ucr_table_query}) ucr FULL OUTER JOIN ( SELECT * FROM "{tablename}" WHERE month = %(previous_month)s AND state_id = %(state_id)s ) prev_month ON ucr.person_case_id = prev_month.person_case_id AND ucr.supervisor_id = prev_month.supervisor_id WHERE coalesce(ucr.month, %(month)s) = %(month)s AND coalesce(prev_month.month, %(previous_month)s) = %(previous_month)s AND coalesce(prev_month.state_id, %(state_id)s) = %(state_id)s ) """.format( ucr_tablename=self.ucr_tablename, tablename=self.aggregate_parent_table, ucr_table_query=ucr_query ), query_params
[ "custom.icds_reports.utils.aggregation_helpers.month_formatter", "dateutil.relativedelta.relativedelta" ]
[((674, 701), 'custom.icds_reports.utils.aggregation_helpers.month_formatter', 'month_formatter', (['self.month'], {}), '(self.month)\n', (689, 701), False, 'from custom.icds_reports.utils.aggregation_helpers import month_formatter\n'), ((830, 852), 'custom.icds_reports.utils.aggregation_helpers.month_formatter', 'month_formatter', (['month'], {}), '(month)\n', (845, 852), False, 'from custom.icds_reports.utils.aggregation_helpers import month_formatter\n'), ((2041, 2063), 'custom.icds_reports.utils.aggregation_helpers.month_formatter', 'month_formatter', (['month'], {}), '(month)\n', (2056, 2063), False, 'from custom.icds_reports.utils.aggregation_helpers import month_formatter\n'), ((758, 781), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(1)'}), '(months=1)\n', (771, 781), False, 'from dateutil.relativedelta import relativedelta\n'), ((2158, 2181), 'dateutil.relativedelta.relativedelta', 'relativedelta', ([], {'months': '(1)'}), '(months=1)\n', (2171, 2181), False, 'from dateutil.relativedelta import relativedelta\n')]
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from desktop import api_public from desktop.lib.botserver import api as botserver_api if sys.version_info[0] > 2: from django.urls import re_path else: from django.conf.urls import url as re_path # "New" query API (i.e. connector based, lean arguments). # e.g. https://demo.gethue.com/api/query/execute/hive urlpatterns = [ re_path(r'^query/create_notebook/?$', api_public.create_notebook, name='query_create_notebook'), re_path(r'^query/autocomplete/?$', api_public.autocomplete, name='query_autocomplete_databases'), ] # Compatibility with "old" private API. # e.g. https://demo.gethue.com/notebook/api/execute/hive urlpatterns += [ re_path(r'^get_config/?$', api_public.get_config), re_path(r'^get_namespaces/(?P<interface>[\w\-]+)/?$', api_public.get_context_namespaces), # To remove re_path(r'^editor/create_notebook/?$', api_public.create_notebook, name='editor_create_notebook'), re_path(r'^editor/create_session/?$', api_public.create_session, name='editor_create_session'), re_path(r'^editor/close_session/?$', api_public.close_session, name='editor_close_session'), re_path(r'^editor/execute(?:/(?P<dialect>.+))?/?$', api_public.execute, name='editor_execute'), re_path(r'^editor/check_status/?$', api_public.check_status, name='editor_check_status'), re_path(r'^editor/fetch_result_data/?$', api_public.fetch_result_data, name='editor_fetch_result_data'), re_path(r'^editor/fetch_result_metadata/?$', api_public.fetch_result_metadata, name='editor_fetch_result_metadata'), re_path(r'^editor/fetch_result_size/?$', api_public.fetch_result_size, name='editor_fetch_result_size'), re_path(r'^editor/cancel_statement/?$', api_public.cancel_statement, name='editor_cancel_statement'), re_path(r'^editor/close_statement/?$', api_public.close_statement, name='editor_close_statement'), re_path(r'^editor/get_logs/?$', api_public.get_logs, name='editor_get_logs'), re_path(r'^editor/describe/(?P<database>[^/]*)/?$', api_public.describe, name='editor_describe_database'), re_path(r'^editor/describe/(?P<database>[^/]*)/(?P<table>[\w_\-]+)/?$', api_public.describe, name='editor_describe_table'), re_path(r'^editor/describe/(?P<database>[^/]*)/(?P<table>\w+)/stats(?:/(?P<column>\w+))?/?$', api_public.describe, name='editor_describe_column'), re_path(r'^editor/autocomplete/?$', api_public.autocomplete, name='editor_autocomplete_databases'), re_path( r"^editor/autocomplete/(?P<database>[^/?]*)/?$", api_public.autocomplete, name="editor_autocomplete_tables", ), re_path( r"^editor/autocomplete/(?P<database>[^/?]*)/(?P<table>[\w_\-]+)/?$", api_public.autocomplete, name="editor_autocomplete_columns", ), re_path( r"^editor/autocomplete/(?P<database>[^/?]*)/(?P<table>[\w_\-]+)/(?P<column>\w+)/?$", api_public.autocomplete, name="editor_autocomplete_column", ), re_path( r"^editor/autocomplete/(?P<database>[^/?]*)/(?P<table>[\w_\-]+)/(?P<column>\w+)/(?P<nested>.+)/?$", api_public.autocomplete, name="editor_autocomplete_nested", ), ] urlpatterns += [ re_path(r'^storage/view=(?P<path>.*)$', api_public.storage_view, name='storage_view'), re_path(r'^storage/download=(?P<path>.*)$', api_public.storage_download, name='storage_download'), re_path(r'^storage/upload/file/?$', api_public.storage_upload_file, name='storage_upload_file'), ] # Slack install API for using CORS by default urlpatterns += [ re_path(r'^slack/install/?$', botserver_api.generate_slack_install_link, name='botserver.api.slack_install_link'), ]
[ "django.conf.urls.url" ]
[((1137, 1236), 'django.conf.urls.url', 're_path', (['"""^query/create_notebook/?$"""', 'api_public.create_notebook'], {'name': '"""query_create_notebook"""'}), "('^query/create_notebook/?$', api_public.create_notebook, name=\n 'query_create_notebook')\n", (1144, 1236), True, 'from django.conf.urls import url as re_path\n'), ((1236, 1336), 'django.conf.urls.url', 're_path', (['"""^query/autocomplete/?$"""', 'api_public.autocomplete'], {'name': '"""query_autocomplete_databases"""'}), "('^query/autocomplete/?$', api_public.autocomplete, name=\n 'query_autocomplete_databases')\n", (1243, 1336), True, 'from django.conf.urls import url as re_path\n'), ((1453, 1501), 'django.conf.urls.url', 're_path', (['"""^get_config/?$"""', 'api_public.get_config'], {}), "('^get_config/?$', api_public.get_config)\n", (1460, 1501), True, 'from django.conf.urls import url as re_path\n'), ((1506, 1600), 'django.conf.urls.url', 're_path', (['"""^get_namespaces/(?P<interface>[\\\\w\\\\-]+)/?$"""', 'api_public.get_context_namespaces'], {}), "('^get_namespaces/(?P<interface>[\\\\w\\\\-]+)/?$', api_public.\n get_context_namespaces)\n", (1513, 1600), True, 'from django.conf.urls import url as re_path\n'), ((1612, 1713), 'django.conf.urls.url', 're_path', (['"""^editor/create_notebook/?$"""', 'api_public.create_notebook'], {'name': '"""editor_create_notebook"""'}), "('^editor/create_notebook/?$', api_public.create_notebook, name=\n 'editor_create_notebook')\n", (1619, 1713), True, 'from django.conf.urls import url as re_path\n'), ((1713, 1811), 'django.conf.urls.url', 're_path', (['"""^editor/create_session/?$"""', 'api_public.create_session'], {'name': '"""editor_create_session"""'}), "('^editor/create_session/?$', api_public.create_session, name=\n 'editor_create_session')\n", (1720, 1811), True, 'from django.conf.urls import url as re_path\n'), ((1811, 1906), 'django.conf.urls.url', 're_path', (['"""^editor/close_session/?$"""', 'api_public.close_session'], {'name': '"""editor_close_session"""'}), "('^editor/close_session/?$', api_public.close_session, name=\n 'editor_close_session')\n", (1818, 1906), True, 'from django.conf.urls import url as re_path\n'), ((1906, 2004), 'django.conf.urls.url', 're_path', (['"""^editor/execute(?:/(?P<dialect>.+))?/?$"""', 'api_public.execute'], {'name': '"""editor_execute"""'}), "('^editor/execute(?:/(?P<dialect>.+))?/?$', api_public.execute, name\n ='editor_execute')\n", (1913, 2004), True, 'from django.conf.urls import url as re_path\n'), ((2004, 2096), 'django.conf.urls.url', 're_path', (['"""^editor/check_status/?$"""', 'api_public.check_status'], {'name': '"""editor_check_status"""'}), "('^editor/check_status/?$', api_public.check_status, name=\n 'editor_check_status')\n", (2011, 2096), True, 'from django.conf.urls import url as re_path\n'), ((2096, 2203), 'django.conf.urls.url', 're_path', (['"""^editor/fetch_result_data/?$"""', 'api_public.fetch_result_data'], {'name': '"""editor_fetch_result_data"""'}), "('^editor/fetch_result_data/?$', api_public.fetch_result_data, name=\n 'editor_fetch_result_data')\n", (2103, 2203), True, 'from django.conf.urls import url as re_path\n'), ((2203, 2322), 'django.conf.urls.url', 're_path', (['"""^editor/fetch_result_metadata/?$"""', 'api_public.fetch_result_metadata'], {'name': '"""editor_fetch_result_metadata"""'}), "('^editor/fetch_result_metadata/?$', api_public.\n fetch_result_metadata, name='editor_fetch_result_metadata')\n", (2210, 2322), True, 'from django.conf.urls import url as re_path\n'), ((2322, 2429), 'django.conf.urls.url', 're_path', (['"""^editor/fetch_result_size/?$"""', 'api_public.fetch_result_size'], {'name': '"""editor_fetch_result_size"""'}), "('^editor/fetch_result_size/?$', api_public.fetch_result_size, name=\n 'editor_fetch_result_size')\n", (2329, 2429), True, 'from django.conf.urls import url as re_path\n'), ((2429, 2533), 'django.conf.urls.url', 're_path', (['"""^editor/cancel_statement/?$"""', 'api_public.cancel_statement'], {'name': '"""editor_cancel_statement"""'}), "('^editor/cancel_statement/?$', api_public.cancel_statement, name=\n 'editor_cancel_statement')\n", (2436, 2533), True, 'from django.conf.urls import url as re_path\n'), ((2533, 2634), 'django.conf.urls.url', 're_path', (['"""^editor/close_statement/?$"""', 'api_public.close_statement'], {'name': '"""editor_close_statement"""'}), "('^editor/close_statement/?$', api_public.close_statement, name=\n 'editor_close_statement')\n", (2540, 2634), True, 'from django.conf.urls import url as re_path\n'), ((2634, 2709), 'django.conf.urls.url', 're_path', (['"""^editor/get_logs/?$"""', 'api_public.get_logs'], {'name': '"""editor_get_logs"""'}), "('^editor/get_logs/?$', api_public.get_logs, name='editor_get_logs')\n", (2641, 2709), True, 'from django.conf.urls import url as re_path\n'), ((2715, 2823), 'django.conf.urls.url', 're_path', (['"""^editor/describe/(?P<database>[^/]*)/?$"""', 'api_public.describe'], {'name': '"""editor_describe_database"""'}), "('^editor/describe/(?P<database>[^/]*)/?$', api_public.describe,\n name='editor_describe_database')\n", (2722, 2823), True, 'from django.conf.urls import url as re_path\n'), ((2824, 2951), 'django.conf.urls.url', 're_path', (['"""^editor/describe/(?P<database>[^/]*)/(?P<table>[\\\\w_\\\\-]+)/?$"""', 'api_public.describe'], {'name': '"""editor_describe_table"""'}), "('^editor/describe/(?P<database>[^/]*)/(?P<table>[\\\\w_\\\\-]+)/?$',\n api_public.describe, name='editor_describe_table')\n", (2831, 2951), True, 'from django.conf.urls import url as re_path\n'), ((2950, 3106), 'django.conf.urls.url', 're_path', (['"""^editor/describe/(?P<database>[^/]*)/(?P<table>\\\\w+)/stats(?:/(?P<column>\\\\w+))?/?$"""', 'api_public.describe'], {'name': '"""editor_describe_column"""'}), "(\n '^editor/describe/(?P<database>[^/]*)/(?P<table>\\\\w+)/stats(?:/(?P<column>\\\\w+))?/?$'\n , api_public.describe, name='editor_describe_column')\n", (2957, 3106), True, 'from django.conf.urls import url as re_path\n'), ((3100, 3202), 'django.conf.urls.url', 're_path', (['"""^editor/autocomplete/?$"""', 'api_public.autocomplete'], {'name': '"""editor_autocomplete_databases"""'}), "('^editor/autocomplete/?$', api_public.autocomplete, name=\n 'editor_autocomplete_databases')\n", (3107, 3202), True, 'from django.conf.urls import url as re_path\n'), ((3202, 3322), 'django.conf.urls.url', 're_path', (['"""^editor/autocomplete/(?P<database>[^/?]*)/?$"""', 'api_public.autocomplete'], {'name': '"""editor_autocomplete_tables"""'}), "('^editor/autocomplete/(?P<database>[^/?]*)/?$', api_public.\n autocomplete, name='editor_autocomplete_tables')\n", (3209, 3322), True, 'from django.conf.urls import url as re_path\n'), ((3345, 3487), 'django.conf.urls.url', 're_path', (['"""^editor/autocomplete/(?P<database>[^/?]*)/(?P<table>[\\\\w_\\\\-]+)/?$"""', 'api_public.autocomplete'], {'name': '"""editor_autocomplete_columns"""'}), "('^editor/autocomplete/(?P<database>[^/?]*)/(?P<table>[\\\\w_\\\\-]+)/?$',\n api_public.autocomplete, name='editor_autocomplete_columns')\n", (3352, 3487), True, 'from django.conf.urls import url as re_path\n'), ((3509, 3673), 'django.conf.urls.url', 're_path', (['"""^editor/autocomplete/(?P<database>[^/?]*)/(?P<table>[\\\\w_\\\\-]+)/(?P<column>\\\\w+)/?$"""', 'api_public.autocomplete'], {'name': '"""editor_autocomplete_column"""'}), "(\n '^editor/autocomplete/(?P<database>[^/?]*)/(?P<table>[\\\\w_\\\\-]+)/(?P<column>\\\\w+)/?$'\n , api_public.autocomplete, name='editor_autocomplete_column')\n", (3516, 3673), True, 'from django.conf.urls import url as re_path\n'), ((3688, 3867), 'django.conf.urls.url', 're_path', (['"""^editor/autocomplete/(?P<database>[^/?]*)/(?P<table>[\\\\w_\\\\-]+)/(?P<column>\\\\w+)/(?P<nested>.+)/?$"""', 'api_public.autocomplete'], {'name': '"""editor_autocomplete_nested"""'}), "(\n '^editor/autocomplete/(?P<database>[^/?]*)/(?P<table>[\\\\w_\\\\-]+)/(?P<column>\\\\w+)/(?P<nested>.+)/?$'\n , api_public.autocomplete, name='editor_autocomplete_nested')\n", (3695, 3867), True, 'from django.conf.urls import url as re_path\n'), ((3902, 3991), 'django.conf.urls.url', 're_path', (['"""^storage/view=(?P<path>.*)$"""', 'api_public.storage_view'], {'name': '"""storage_view"""'}), "('^storage/view=(?P<path>.*)$', api_public.storage_view, name=\n 'storage_view')\n", (3909, 3991), True, 'from django.conf.urls import url as re_path\n'), ((3991, 4091), 'django.conf.urls.url', 're_path', (['"""^storage/download=(?P<path>.*)$"""', 'api_public.storage_download'], {'name': '"""storage_download"""'}), "('^storage/download=(?P<path>.*)$', api_public.storage_download,\n name='storage_download')\n", (3998, 4091), True, 'from django.conf.urls import url as re_path\n'), ((4092, 4191), 'django.conf.urls.url', 're_path', (['"""^storage/upload/file/?$"""', 'api_public.storage_upload_file'], {'name': '"""storage_upload_file"""'}), "('^storage/upload/file/?$', api_public.storage_upload_file, name=\n 'storage_upload_file')\n", (4099, 4191), True, 'from django.conf.urls import url as re_path\n'), ((4257, 4373), 'django.conf.urls.url', 're_path', (['"""^slack/install/?$"""', 'botserver_api.generate_slack_install_link'], {'name': '"""botserver.api.slack_install_link"""'}), "('^slack/install/?$', botserver_api.generate_slack_install_link,\n name='botserver.api.slack_install_link')\n", (4264, 4373), True, 'from django.conf.urls import url as re_path\n')]
# # Autogenerated by Thrift Compiler (0.9.2) # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # # options string: py # from thrift.Thrift import TType, TMessageType, TException, TApplicationException from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol, TProtocol try: from thrift.protocol import fastbinary except: fastbinary = None class ConnectionParams: """ Attributes: - client_id - seq_id - user - password - app_id - app_token - repo_base """ thrift_spec = ( None, # 0 (1, TType.STRING, 'client_id', None, None, ), # 1 (2, TType.STRING, 'seq_id', None, None, ), # 2 (3, TType.STRING, 'user', None, None, ), # 3 (4, TType.STRING, 'password', None, None, ), # 4 (5, TType.STRING, 'app_id', None, None, ), # 5 (6, TType.STRING, 'app_token', None, None, ), # 6 (7, TType.STRING, 'repo_base', None, None, ), # 7 ) def __init__(self, client_id=None, seq_id=None, user=None, password=None, app_id=None, app_token=None, repo_base=None,): self.client_id = client_id self.seq_id = seq_id self.user = user self.password = password self.app_id = app_id self.app_token = app_token self.repo_base = repo_base def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.client_id = iprot.readString(); else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.seq_id = iprot.readString(); else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.user = iprot.readString(); else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRING: self.password = iprot.readString(); else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: self.app_id = iprot.readString(); else: iprot.skip(ftype) elif fid == 6: if ftype == TType.STRING: self.app_token = iprot.readString(); else: iprot.skip(ftype) elif fid == 7: if ftype == TType.STRING: self.repo_base = iprot.readString(); else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('ConnectionParams') if self.client_id is not None: oprot.writeFieldBegin('client_id', TType.STRING, 1) oprot.writeString(self.client_id) oprot.writeFieldEnd() if self.seq_id is not None: oprot.writeFieldBegin('seq_id', TType.STRING, 2) oprot.writeString(self.seq_id) oprot.writeFieldEnd() if self.user is not None: oprot.writeFieldBegin('user', TType.STRING, 3) oprot.writeString(self.user) oprot.writeFieldEnd() if self.password is not None: oprot.writeFieldBegin('password', TType.STRING, 4) oprot.writeString(self.password) oprot.writeFieldEnd() if self.app_id is not None: oprot.writeFieldBegin('app_id', TType.STRING, 5) oprot.writeString(self.app_id) oprot.writeFieldEnd() if self.app_token is not None: oprot.writeFieldBegin('app_token', TType.STRING, 6) oprot.writeString(self.app_token) oprot.writeFieldEnd() if self.repo_base is not None: oprot.writeFieldBegin('repo_base', TType.STRING, 7) oprot.writeString(self.repo_base) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.client_id) value = (value * 31) ^ hash(self.seq_id) value = (value * 31) ^ hash(self.user) value = (value * 31) ^ hash(self.password) value = (value * 31) ^ hash(self.app_id) value = (value * 31) ^ hash(self.app_token) value = (value * 31) ^ hash(self.repo_base) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class Connection: """ Attributes: - client_id - seq_id - user - is_app - repo_base - cursor """ thrift_spec = ( None, # 0 (1, TType.STRING, 'client_id', None, None, ), # 1 (2, TType.STRING, 'seq_id', None, None, ), # 2 (3, TType.STRING, 'user', None, None, ), # 3 (4, TType.BOOL, 'is_app', None, None, ), # 4 (5, TType.STRING, 'repo_base', None, None, ), # 5 (6, TType.I64, 'cursor', None, None, ), # 6 ) def __init__(self, client_id=None, seq_id=None, user=None, is_app=None, repo_base=None, cursor=None,): self.client_id = client_id self.seq_id = seq_id self.user = user self.is_app = is_app self.repo_base = repo_base self.cursor = cursor def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.client_id = iprot.readString(); else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.seq_id = iprot.readString(); else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.user = iprot.readString(); else: iprot.skip(ftype) elif fid == 4: if ftype == TType.BOOL: self.is_app = iprot.readBool(); else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: self.repo_base = iprot.readString(); else: iprot.skip(ftype) elif fid == 6: if ftype == TType.I64: self.cursor = iprot.readI64(); else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('Connection') if self.client_id is not None: oprot.writeFieldBegin('client_id', TType.STRING, 1) oprot.writeString(self.client_id) oprot.writeFieldEnd() if self.seq_id is not None: oprot.writeFieldBegin('seq_id', TType.STRING, 2) oprot.writeString(self.seq_id) oprot.writeFieldEnd() if self.user is not None: oprot.writeFieldBegin('user', TType.STRING, 3) oprot.writeString(self.user) oprot.writeFieldEnd() if self.is_app is not None: oprot.writeFieldBegin('is_app', TType.BOOL, 4) oprot.writeBool(self.is_app) oprot.writeFieldEnd() if self.repo_base is not None: oprot.writeFieldBegin('repo_base', TType.STRING, 5) oprot.writeString(self.repo_base) oprot.writeFieldEnd() if self.cursor is not None: oprot.writeFieldBegin('cursor', TType.I64, 6) oprot.writeI64(self.cursor) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.client_id) value = (value * 31) ^ hash(self.seq_id) value = (value * 31) ^ hash(self.user) value = (value * 31) ^ hash(self.is_app) value = (value * 31) ^ hash(self.repo_base) value = (value * 31) ^ hash(self.cursor) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class Tuple: """ Attributes: - cells """ thrift_spec = ( None, # 0 (1, TType.LIST, 'cells', (TType.STRING,None), None, ), # 1 ) def __init__(self, cells=None,): self.cells = cells def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.LIST: self.cells = [] (_etype3, _size0) = iprot.readListBegin() for _i4 in xrange(_size0): _elem5 = iprot.readString(); self.cells.append(_elem5) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('Tuple') if self.cells is not None: oprot.writeFieldBegin('cells', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.cells)) for iter6 in self.cells: oprot.writeString(iter6) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.cells) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class ResultSet: """ Attributes: - status - con - num_tuples - num_more_tuples - tuples - field_names - field_types """ thrift_spec = ( None, # 0 (1, TType.BOOL, 'status', None, None, ), # 1 (2, TType.STRUCT, 'con', (Connection, Connection.thrift_spec), None, ), # 2 (3, TType.I64, 'num_tuples', None, None, ), # 3 (4, TType.I64, 'num_more_tuples', None, None, ), # 4 (5, TType.LIST, 'tuples', (TType.STRUCT,(Tuple, Tuple.thrift_spec)), None, ), # 5 (6, TType.LIST, 'field_names', (TType.STRING,None), None, ), # 6 (7, TType.LIST, 'field_types', (TType.STRING,None), None, ), # 7 ) def __init__(self, status=None, con=None, num_tuples=None, num_more_tuples=None, tuples=None, field_names=None, field_types=None,): self.status = status self.con = con self.num_tuples = num_tuples self.num_more_tuples = num_more_tuples self.tuples = tuples self.field_names = field_names self.field_types = field_types def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.BOOL: self.status = iprot.readBool(); else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.con = Connection() self.con.read(iprot) else: iprot.skip(ftype) elif fid == 3: if ftype == TType.I64: self.num_tuples = iprot.readI64(); else: iprot.skip(ftype) elif fid == 4: if ftype == TType.I64: self.num_more_tuples = iprot.readI64(); else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.tuples = [] (_etype10, _size7) = iprot.readListBegin() for _i11 in xrange(_size7): _elem12 = Tuple() _elem12.read(iprot) self.tuples.append(_elem12) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.LIST: self.field_names = [] (_etype16, _size13) = iprot.readListBegin() for _i17 in xrange(_size13): _elem18 = iprot.readString(); self.field_names.append(_elem18) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 7: if ftype == TType.LIST: self.field_types = [] (_etype22, _size19) = iprot.readListBegin() for _i23 in xrange(_size19): _elem24 = iprot.readString(); self.field_types.append(_elem24) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('ResultSet') if self.status is not None: oprot.writeFieldBegin('status', TType.BOOL, 1) oprot.writeBool(self.status) oprot.writeFieldEnd() if self.con is not None: oprot.writeFieldBegin('con', TType.STRUCT, 2) self.con.write(oprot) oprot.writeFieldEnd() if self.num_tuples is not None: oprot.writeFieldBegin('num_tuples', TType.I64, 3) oprot.writeI64(self.num_tuples) oprot.writeFieldEnd() if self.num_more_tuples is not None: oprot.writeFieldBegin('num_more_tuples', TType.I64, 4) oprot.writeI64(self.num_more_tuples) oprot.writeFieldEnd() if self.tuples is not None: oprot.writeFieldBegin('tuples', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.tuples)) for iter25 in self.tuples: iter25.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.field_names is not None: oprot.writeFieldBegin('field_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.field_names)) for iter26 in self.field_names: oprot.writeString(iter26) oprot.writeListEnd() oprot.writeFieldEnd() if self.field_types is not None: oprot.writeFieldBegin('field_types', TType.LIST, 7) oprot.writeListBegin(TType.STRING, len(self.field_types)) for iter27 in self.field_types: oprot.writeString(iter27) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.status is None: raise TProtocol.TProtocolException(message='Required field status is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.status) value = (value * 31) ^ hash(self.con) value = (value * 31) ^ hash(self.num_tuples) value = (value * 31) ^ hash(self.num_more_tuples) value = (value * 31) ^ hash(self.tuples) value = (value * 31) ^ hash(self.field_names) value = (value * 31) ^ hash(self.field_types) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class DBException(TException): """ Attributes: - error_code - message - details """ thrift_spec = ( None, # 0 (1, TType.I32, 'error_code', None, None, ), # 1 (2, TType.STRING, 'message', None, None, ), # 2 (3, TType.STRING, 'details', None, None, ), # 3 ) def __init__(self, error_code=None, message=None, details=None,): self.error_code = error_code self.message = message self.details = details def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.I32: self.error_code = iprot.readI32(); else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.message = iprot.readString(); else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.details = iprot.readString(); else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('DBException') if self.error_code is not None: oprot.writeFieldBegin('error_code', TType.I32, 1) oprot.writeI32(self.error_code) oprot.writeFieldEnd() if self.message is not None: oprot.writeFieldBegin('message', TType.STRING, 2) oprot.writeString(self.message) oprot.writeFieldEnd() if self.details is not None: oprot.writeFieldBegin('details', TType.STRING, 3) oprot.writeString(self.details) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __str__(self): return repr(self) def __hash__(self): value = 17 value = (value * 31) ^ hash(self.error_code) value = (value * 31) ^ hash(self.message) value = (value * 31) ^ hash(self.details) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other)
[ "thrift.protocol.TProtocol.TProtocolException", "thrift.protocol.fastbinary.encode_binary", "thrift.protocol.fastbinary.decode_binary" ]
[((1479, 1558), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', (['self', 'iprot.trans', '(self.__class__, self.thrift_spec)'], {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))\n', (1503, 1558), False, 'from thrift.protocol import fastbinary\n'), ((5948, 6027), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', (['self', 'iprot.trans', '(self.__class__, self.thrift_spec)'], {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))\n', (5972, 6027), False, 'from thrift.protocol import fastbinary\n'), ((9511, 9590), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', (['self', 'iprot.trans', '(self.__class__, self.thrift_spec)'], {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))\n', (9535, 9590), False, 'from thrift.protocol import fastbinary\n'), ((12451, 12530), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', (['self', 'iprot.trans', '(self.__class__, self.thrift_spec)'], {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))\n', (12475, 12530), False, 'from thrift.protocol import fastbinary\n'), ((16240, 16311), 'thrift.protocol.TProtocol.TProtocolException', 'TProtocol.TProtocolException', ([], {'message': '"""Required field status is unset!"""'}), "(message='Required field status is unset!')\n", (16268, 16311), False, 'from thrift.protocol import TBinaryProtocol, TProtocol\n'), ((17720, 17799), 'thrift.protocol.fastbinary.decode_binary', 'fastbinary.decode_binary', (['self', 'iprot.trans', '(self.__class__, self.thrift_spec)'], {}), '(self, iprot.trans, (self.__class__, self.thrift_spec))\n', (17744, 17799), False, 'from thrift.protocol import fastbinary\n'), ((2977, 3043), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', (['self', '(self.__class__, self.thrift_spec)'], {}), '(self, (self.__class__, self.thrift_spec))\n', (3001, 3043), False, 'from thrift.protocol import fastbinary\n'), ((7290, 7356), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', (['self', '(self.__class__, self.thrift_spec)'], {}), '(self, (self.__class__, self.thrift_spec))\n', (7314, 7356), False, 'from thrift.protocol import fastbinary\n'), ((10332, 10398), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', (['self', '(self.__class__, self.thrift_spec)'], {}), '(self, (self.__class__, self.thrift_spec))\n', (10356, 10398), False, 'from thrift.protocol import fastbinary\n'), ((14554, 14620), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', (['self', '(self.__class__, self.thrift_spec)'], {}), '(self, (self.__class__, self.thrift_spec))\n', (14578, 14620), False, 'from thrift.protocol import fastbinary\n'), ((18645, 18711), 'thrift.protocol.fastbinary.encode_binary', 'fastbinary.encode_binary', (['self', '(self.__class__, self.thrift_spec)'], {}), '(self, (self.__class__, self.thrift_spec))\n', (18669, 18711), False, 'from thrift.protocol import fastbinary\n')]
import unittest from test.std_test_utils import STDIOTest from teagles_advent_2021.day_10.pt1 import main TEST_INPUT = """[({(<(())[]>[[{[]{<()<>> [(()[<>])]({[<{<<[]>>( {([(<{}[<>[]}>{[]{[(<()> (((({<>}<{<{<>}{[]{[]{} [[<[([]))<([[{}[[()]]] [{[{({}]{}}([{[{{{}}([] {<[[]]>}<{[{[{[]{()[[[] [<(<(<(<{}))><([]([]() <{([([[(<>()){}]>(<<{{ <{([{{}}[<[[[<>{}]]]>[]] """ class TestDay10Part1(STDIOTest): def test_with_sample_input(self): self.assert_stdin_n_out(main, TEST_INPUT, "26397\n") if __name__ == '__main__': unittest.main()
[ "unittest.main" ]
[((534, 549), 'unittest.main', 'unittest.main', ([], {}), '()\n', (547, 549), False, 'import unittest\n')]
# -*- coding: utf-8 -*- {{{ # vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et: # # Copyright (c) 2015, Battelle Memorial Institute # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation are those # of the authors and should not be interpreted as representing official policies, # either expressed or implied, of the FreeBSD Project. # # This material was prepared as an account of work sponsored by an # agency of the United States Government. Neither the United States # Government nor the United States Department of Energy, nor Battelle, # nor any of their employees, nor any jurisdiction or organization # that has cooperated in the development of these materials, makes # any warranty, express or implied, or assumes any legal liability # or responsibility for the accuracy, completeness, or usefulness or # any information, apparatus, product, software, or process disclosed, # or represents that its use would not infringe privately owned rights. # # Reference herein to any specific commercial product, process, or # service by trade name, trademark, manufacturer, or otherwise does # not necessarily constitute or imply its endorsement, recommendation, # r favoring by the United States Government or any agency thereof, # or Battelle Memorial Institute. The views and opinions of authors # expressed herein do not necessarily state or reflect those of the # United States Government or any agency thereof. # # PACIFIC NORTHWEST NATIONAL LABORATORY # operated by BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY # under Contract DE-AC05-76RL01830 #}}} from master_driver.interfaces.fakedriver import Interface import pytest registry_config_string = """Point Name,Volttron Point Name,Units,Units Details,Writable,Starting Value,Type,Notes Float,Float,F,-100 to 300,TRUE,50,float,CO2 Reading 0.00-2000.0 ppm FloatNoDefault,FloatNoDefault,F,-100 to 300,TRUE,,float,CO2 Reading 0.00-2000.0 ppm """ @pytest.mark.revert def test_revert_point(): interface = Interface() interface.configure({}, registry_config_string) value = interface.get_point("Float") assert value == 50.0 interface.set_point("Float", 25.0) value = interface.get_point("Float") assert value == 25.0 interface.revert_point("Float") value = interface.get_point("Float") assert value == 50.0 @pytest.mark.revert def test_revert_device(): interface = Interface() interface.configure({}, registry_config_string) value = interface.get_point("Float") assert value == 50.0 interface.set_point("Float", 25.0) value = interface.get_point("Float") assert value == 25.0 interface.revert_all() value = interface.get_point("Float") assert value == 50.0 @pytest.mark.revert def test_revert_point_no_default(): interface = Interface() interface.configure({}, registry_config_string) initial_value = interface.get_point("FloatNoDefault") scrape_values = interface.scrape_all() assert scrape_values["FloatNoDefault"] == initial_value test_value = initial_value + 1.0 interface.set_point("FloatNoDefault", test_value) temp_value = interface.get_point("FloatNoDefault") assert temp_value == test_value interface.revert_point("FloatNoDefault") temp_value = interface.get_point("FloatNoDefault") assert temp_value == initial_value #Do it twice to make sure it restores state after revert interface.set_point("FloatNoDefault", test_value) temp_value = interface.get_point("FloatNoDefault") assert temp_value == test_value interface.revert_point("FloatNoDefault") temp_value = interface.get_point("FloatNoDefault") assert temp_value == initial_value @pytest.mark.revert def test_revert_all_no_default(): interface = Interface() interface.configure({}, registry_config_string) initial_value = interface.get_point("FloatNoDefault") scrape_values = interface.scrape_all() assert scrape_values["FloatNoDefault"] == initial_value test_value = initial_value + 1.0 interface.set_point("FloatNoDefault", test_value) temp_value = interface.get_point("FloatNoDefault") assert temp_value == test_value interface.revert_all() temp_value = interface.get_point("FloatNoDefault") assert temp_value == initial_value #Do it twice to make sure it restores state after revert interface.set_point("FloatNoDefault", test_value) temp_value = interface.get_point("FloatNoDefault") assert temp_value == test_value interface.revert_all() temp_value = interface.get_point("FloatNoDefault") assert temp_value == initial_value @pytest.mark.revert def test_revert_no_default_changing_value(): interface = Interface() interface.configure({}, registry_config_string) initial_value = interface.get_point("FloatNoDefault") #Initialize the revert value. interface.scrape_all() new_value = initial_value + 1.0 #Manually update the register values to give us something different to revert to. register = interface.get_register_by_name("FloatNoDefault") register.value = new_value #Update the revert value. interface.scrape_all() test_value = new_value + 1.0 interface.set_point("FloatNoDefault", test_value) temp_value = interface.get_point("FloatNoDefault") assert temp_value == test_value interface.revert_point("FloatNoDefault") temp_value = interface.get_point("FloatNoDefault") assert temp_value == new_value assert temp_value != initial_value #Do it twice to make sure it restores state after revert interface.set_point("FloatNoDefault", test_value) temp_value = interface.get_point("FloatNoDefault") assert temp_value == test_value interface.revert_point("FloatNoDefault") temp_value = interface.get_point("FloatNoDefault") assert temp_value == new_value
[ "master_driver.interfaces.fakedriver.Interface" ]
[((3297, 3308), 'master_driver.interfaces.fakedriver.Interface', 'Interface', ([], {}), '()\n', (3306, 3308), False, 'from master_driver.interfaces.fakedriver import Interface\n'), ((3711, 3722), 'master_driver.interfaces.fakedriver.Interface', 'Interface', ([], {}), '()\n', (3720, 3722), False, 'from master_driver.interfaces.fakedriver import Interface\n'), ((4126, 4137), 'master_driver.interfaces.fakedriver.Interface', 'Interface', ([], {}), '()\n', (4135, 4137), False, 'from master_driver.interfaces.fakedriver import Interface\n'), ((5127, 5138), 'master_driver.interfaces.fakedriver.Interface', 'Interface', ([], {}), '()\n', (5136, 5138), False, 'from master_driver.interfaces.fakedriver import Interface\n'), ((6103, 6114), 'master_driver.interfaces.fakedriver.Interface', 'Interface', ([], {}), '()\n', (6112, 6114), False, 'from master_driver.interfaces.fakedriver import Interface\n')]
from conexiones.auth_client import AuthClient from conexiones.hub_client import HubClient from conexiones.game_client import GameClient """ Clase LogicaCliente es un clase con metodos estaticos. Es la clase que lleva la logica y la que se comunica con la capa de datos. """ class LogicaCliente: """ Registra al usuario actual en el servidor --- Parameters: - username: Nombre del usuario a registrar - password: <PASSWORD> Returns: True si el jugador se ha registrado correctamente, si no, False. """ @staticmethod def registrar_usuario(username,password): return AuthClient.instance().registrar(username,password) """ Loguea al usuario actual --- Parameters: - username: Nombre del usuario a loguear - password: <PASSWORD> Returns: El token de validacion del usuario en caso de que se haya logueado corectamente, si no, None. """ @staticmethod def loguear_usuario(username,password): token = AuthClient.instance().login(username,password) return token """ Obtiene la lista de servidores --- Returns: Un array con la información del servidor escogido """ @staticmethod def obtener_servidores(token): return HubClient.instance().get_servers(token) """ Registra al usuario del token en el servidor pasado por parametro --- Parameters: - token: El token del usuario que realiza la petición - servidor_seleccionado: El servidor seleccionado para registrar al usuario Returns: El servidor de juego en el que se ha registrado el jugador, None si ha surgido un error. """ @staticmethod def registrarse_en_servidor(token,servidor_seleccionado): servidor_de_juego = GameClient(servidor_seleccionado['host'],servidor_seleccionado['port']) if((servidor_de_juego is None) or (not servidor_de_juego.registrar_usuario_en_servidor(token))): print('\nHa ocurrido un error al intentar entrar al servidor. Intenta probar en un nuevo servidor.\n') return None else: return servidor_de_juego """ Mantiene el curso de la partida de un jugador --- Parameters: - token: El token del usuario que va a jugar - servidor_seleccionado: el servidor seleccionado en el que va a jugando el usuario - x: Coordenada X donde poner la pieza - token: Coordenda Y donde poner la pieza Returns: El servidor de juego en el que se ha registrado el jugador, None si ha surgido un error. """ @staticmethod def realizar_jugada(token,servidor_de_juego,x,y): se_ha_realizado_jugada = servidor_de_juego.jugar(token,x,y) return se_ha_realizado_jugada """ Manda la señal de que el usuario ha finalizado la partida --- Parameters: - token: El token del usuario que va a jugar - servidor_seleccionado: el servidor seleccionado en el que va a jugando el usuario Returns: True si el servidor ha recibido la petición, si no, False. """ @staticmethod def finalizar_partida(token,servidor_seleccionado): return servidor_seleccionado.finalizar_partida(token) """ Obtiene el score actual --- Returns: El score actual """ @staticmethod def get_score(): return AuthClient.instance().get_score()
[ "conexiones.game_client.GameClient", "conexiones.auth_client.AuthClient.instance", "conexiones.hub_client.HubClient.instance" ]
[((1852, 1924), 'conexiones.game_client.GameClient', 'GameClient', (["servidor_seleccionado['host']", "servidor_seleccionado['port']"], {}), "(servidor_seleccionado['host'], servidor_seleccionado['port'])\n", (1862, 1924), False, 'from conexiones.game_client import GameClient\n'), ((648, 669), 'conexiones.auth_client.AuthClient.instance', 'AuthClient.instance', ([], {}), '()\n', (667, 669), False, 'from conexiones.auth_client import AuthClient\n'), ((1052, 1073), 'conexiones.auth_client.AuthClient.instance', 'AuthClient.instance', ([], {}), '()\n', (1071, 1073), False, 'from conexiones.auth_client import AuthClient\n'), ((1327, 1347), 'conexiones.hub_client.HubClient.instance', 'HubClient.instance', ([], {}), '()\n', (1345, 1347), False, 'from conexiones.hub_client import HubClient\n'), ((3481, 3502), 'conexiones.auth_client.AuthClient.instance', 'AuthClient.instance', ([], {}), '()\n', (3500, 3502), False, 'from conexiones.auth_client import AuthClient\n')]
import pandas as pd import numpy as np class Packing(object): def getMappedFitness(self, chromosome): mappedChromosome = self.items[chromosome] spaces = np.zeros(len(mappedChromosome), dtype=int) result = np.cumsum(mappedChromosome) - self.BIN_CAPACITY index_of_old_bin = 0 binsRequired = 0 spacesLeftOpen = [] consumedSpaces = [] itemsInBin = [] while True: binsRequired += 1 max_accumulate = np.maximum.accumulate(np.flipud(result <= 0)) index_of_new_bin = self.PROBLEM_SIZE - next((idx for idx, val in np.ndenumerate(max_accumulate) if val == True))[0] - 1 space_left_open = np.abs(result[index_of_new_bin]) spaces[index_of_new_bin] = space_left_open result += space_left_open spacesLeftOpen.append(space_left_open) consumedSpaces.append(self.BIN_CAPACITY - space_left_open) itemsInBin.append(index_of_new_bin - index_of_old_bin) index_of_old_bin = index_of_new_bin if np.max(result) <= 0: break result -= self.BIN_CAPACITY exec_result = self.fitTree.execute([spacesLeftOpen, consumedSpaces, itemsInBin], [binsRequired, self.BIN_CAPACITY, 1, 2]) return exec_result, binsRequired def toStringMappedFitness(self, chromosome): result = np.cumsum(self.problemSet[chromosome]) - self.BIN_CAPACITY output = '' while True: max_accumulate = np.maximum.accumulate(np.flipud(result <= 0)) index_of_new_bin = self.PROBLEM_SIZE - next((idx for idx, val in np.ndenumerate(max_accumulate) if val == True))[ 0] - 1 space_left_open = np.abs(result[index_of_new_bin]) result += space_left_open output += '|' output += (self.BIN_CAPACITY - space_left_open - 2) * 'X' output += '|' output += '_' * space_left_open output += '\n' if np.max(result) <= 0: break result -= self.BIN_CAPACITY return output def tournamentSelector(self, population, reverse=False): random_indicies = np.random.randint(self.POPULATION_SIZE, size=self.TOURNAMENT_SIZE).tolist() tournament = [] for idx, val in np.ndenumerate(random_indicies): tournament.append(population[val]) results = [] for val in tournament: result, bin = self.getMappedFitness(val) results.append(result) results = np.array(results) if not reverse: pos = np.argmin(results) else: pos = np.argmax(results) return population[random_indicies[pos]], random_indicies[pos], results[pos] def multipleSwapCrossover(self, p1, p2, swaps=4): draws = np.random.randint(self.PROBLEM_SIZE, size=swaps) c1 = p1.copy() c2 = p2.copy() for i, val in enumerate(draws): c1item = c1[val] c2item = c2[val] c1 = np.delete(c1, np.where(c1 == c2item)) c2 = np.delete(c2, np.where(c2 == c1item)) c1 = np.insert(c1, val, c2item) c2 = np.insert(c2, val, c1item) return c1, c2 def multipleMutator(self, p, swaps=4): draws = np.random.randint(self.PROBLEM_SIZE, size=(swaps, 2)) child = p.copy() for i, val in enumerate(draws): tmp = child[val[0]] child = np.delete(child, val[0]) child = np.insert(child, val[1], tmp) return child def tryMutate(self, population): draw = np.random.rand() if draw < self.MUTATION_RATE: p, pos, fit = self.tournamentSelector(population) _, kpos, _ = self.tournamentSelector(population, reverse=True) c = self.multipleMutator(p, 1) population[kpos] = c return population def tryCrossover(self, population): draw = np.random.rand() if draw < self.CROSSOVER_RATE: p1, p1pos, p1fit = self.tournamentSelector(population) p2, p2pos, p2fit = self.tournamentSelector(population) if any(p1 != p2): _, k1pos, _ = self.tournamentSelector(population, reverse=True) _, k2pos, _ = self.tournamentSelector(population, reverse=True) c1, c2 = self.multipleSwapCrossover(p1, p2, 3) population[k1pos] = c1 population[k2pos] = c2 else: p1 = self.multipleMutator(p1, swaps=int(self.PROBLEM_SIZE / 5)) population[p1pos] = p1 return population def run(self, fitTree, binFile, minBins): self.problemSet = pd.read_csv(binFile, header=None).values.tolist() self.PROBLEM_SIZE = self.problemSet.pop(0)[0] self.BIN_CAPACITY = self.problemSet.pop(0)[0] self.POPULATION_SIZE = 50 self.TOURNAMENT_SIZE = 4 self.GENERATIONS = 250 self.SAMPLES = 1 self.SAMPLE_RATE = 50 self.MUTATION_RATE = 0.3 self.CROSSOVER_RATE = 1 self.items = pd.DataFrame(self.problemSet) self.items = np.array(self.items[0]) self.organisedChromosome = np.arange(self.items.size) assert self.PROBLEM_SIZE == len(self.items) self.fitTree = fitTree population = [] chromosome = np.arange(self.PROBLEM_SIZE) for i in range(self.POPULATION_SIZE): np.random.shuffle(chromosome) population.append(chromosome.copy()) foundMin = False # Mutate and crossover for each generation for idx, generation in enumerate(range(self.GENERATIONS)): if foundMin == False: population = self.tryMutate(population) population = self.tryCrossover(population) if idx % self.SAMPLE_RATE == 0: bins = [] fitness = [] for chromosome in population: result, bin = self.getMappedFitness(chromosome) bins.append(bin) fitness.append(result) position = int(np.argmin(fitness)) if bins[position] == minBins: foundMin = True bins = [] fitness = [] for chromosome in population: result, bin = self.getMappedFitness(chromosome) bins.append(bin) fitness.append(np.array(result)) position = int(np.argmin(fitness)) return fitness[position], bins[position]
[ "pandas.DataFrame", "numpy.abs", "numpy.ndenumerate", "numpy.argmax", "pandas.read_csv", "numpy.flipud", "numpy.argmin", "numpy.insert", "numpy.cumsum", "numpy.max", "numpy.random.randint", "numpy.array", "numpy.arange", "numpy.where", "numpy.random.rand", "numpy.delete", "numpy.random.shuffle" ]
[((2352, 2383), 'numpy.ndenumerate', 'np.ndenumerate', (['random_indicies'], {}), '(random_indicies)\n', (2366, 2383), True, 'import numpy as np\n'), ((2591, 2608), 'numpy.array', 'np.array', (['results'], {}), '(results)\n', (2599, 2608), True, 'import numpy as np\n'), ((2876, 2924), 'numpy.random.randint', 'np.random.randint', (['self.PROBLEM_SIZE'], {'size': 'swaps'}), '(self.PROBLEM_SIZE, size=swaps)\n', (2893, 2924), True, 'import numpy as np\n'), ((3352, 3405), 'numpy.random.randint', 'np.random.randint', (['self.PROBLEM_SIZE'], {'size': '(swaps, 2)'}), '(self.PROBLEM_SIZE, size=(swaps, 2))\n', (3369, 3405), True, 'import numpy as np\n'), ((3675, 3691), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3689, 3691), True, 'import numpy as np\n'), ((4027, 4043), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4041, 4043), True, 'import numpy as np\n'), ((5189, 5218), 'pandas.DataFrame', 'pd.DataFrame', (['self.problemSet'], {}), '(self.problemSet)\n', (5201, 5218), True, 'import pandas as pd\n'), ((5240, 5263), 'numpy.array', 'np.array', (['self.items[0]'], {}), '(self.items[0])\n', (5248, 5263), True, 'import numpy as np\n'), ((5300, 5326), 'numpy.arange', 'np.arange', (['self.items.size'], {}), '(self.items.size)\n', (5309, 5326), True, 'import numpy as np\n'), ((5458, 5486), 'numpy.arange', 'np.arange', (['self.PROBLEM_SIZE'], {}), '(self.PROBLEM_SIZE)\n', (5467, 5486), True, 'import numpy as np\n'), ((234, 261), 'numpy.cumsum', 'np.cumsum', (['mappedChromosome'], {}), '(mappedChromosome)\n', (243, 261), True, 'import numpy as np\n'), ((703, 735), 'numpy.abs', 'np.abs', (['result[index_of_new_bin]'], {}), '(result[index_of_new_bin])\n', (709, 735), True, 'import numpy as np\n'), ((1403, 1441), 'numpy.cumsum', 'np.cumsum', (['self.problemSet[chromosome]'], {}), '(self.problemSet[chromosome])\n', (1412, 1441), True, 'import numpy as np\n'), ((1756, 1788), 'numpy.abs', 'np.abs', (['result[index_of_new_bin]'], {}), '(result[index_of_new_bin])\n', (1762, 1788), True, 'import numpy as np\n'), ((2651, 2669), 'numpy.argmin', 'np.argmin', (['results'], {}), '(results)\n', (2660, 2669), True, 'import numpy as np\n'), ((2702, 2720), 'numpy.argmax', 'np.argmax', (['results'], {}), '(results)\n', (2711, 2720), True, 'import numpy as np\n'), ((3198, 3224), 'numpy.insert', 'np.insert', (['c1', 'val', 'c2item'], {}), '(c1, val, c2item)\n', (3207, 3224), True, 'import numpy as np\n'), ((3242, 3268), 'numpy.insert', 'np.insert', (['c2', 'val', 'c1item'], {}), '(c2, val, c1item)\n', (3251, 3268), True, 'import numpy as np\n'), ((3525, 3549), 'numpy.delete', 'np.delete', (['child', 'val[0]'], {}), '(child, val[0])\n', (3534, 3549), True, 'import numpy as np\n'), ((3570, 3599), 'numpy.insert', 'np.insert', (['child', 'val[1]', 'tmp'], {}), '(child, val[1], tmp)\n', (3579, 3599), True, 'import numpy as np\n'), ((5545, 5574), 'numpy.random.shuffle', 'np.random.shuffle', (['chromosome'], {}), '(chromosome)\n', (5562, 5574), True, 'import numpy as np\n'), ((6582, 6600), 'numpy.argmin', 'np.argmin', (['fitness'], {}), '(fitness)\n', (6591, 6600), True, 'import numpy as np\n'), ((517, 539), 'numpy.flipud', 'np.flipud', (['(result <= 0)'], {}), '(result <= 0)\n', (526, 539), True, 'import numpy as np\n'), ((1082, 1096), 'numpy.max', 'np.max', (['result'], {}), '(result)\n', (1088, 1096), True, 'import numpy as np\n'), ((1553, 1575), 'numpy.flipud', 'np.flipud', (['(result <= 0)'], {}), '(result <= 0)\n', (1562, 1575), True, 'import numpy as np\n'), ((2035, 2049), 'numpy.max', 'np.max', (['result'], {}), '(result)\n', (2041, 2049), True, 'import numpy as np\n'), ((2228, 2294), 'numpy.random.randint', 'np.random.randint', (['self.POPULATION_SIZE'], {'size': 'self.TOURNAMENT_SIZE'}), '(self.POPULATION_SIZE, size=self.TOURNAMENT_SIZE)\n', (2245, 2294), True, 'import numpy as np\n'), ((3102, 3124), 'numpy.where', 'np.where', (['(c1 == c2item)'], {}), '(c1 == c2item)\n', (3110, 3124), True, 'import numpy as np\n'), ((3157, 3179), 'numpy.where', 'np.where', (['(c2 == c1item)'], {}), '(c2 == c1item)\n', (3165, 3179), True, 'import numpy as np\n'), ((6540, 6556), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (6548, 6556), True, 'import numpy as np\n'), ((4789, 4822), 'pandas.read_csv', 'pd.read_csv', (['binFile'], {'header': 'None'}), '(binFile, header=None)\n', (4800, 4822), True, 'import pandas as pd\n'), ((6244, 6262), 'numpy.argmin', 'np.argmin', (['fitness'], {}), '(fitness)\n', (6253, 6262), True, 'import numpy as np\n'), ((618, 648), 'numpy.ndenumerate', 'np.ndenumerate', (['max_accumulate'], {}), '(max_accumulate)\n', (632, 648), True, 'import numpy as np\n'), ((1654, 1684), 'numpy.ndenumerate', 'np.ndenumerate', (['max_accumulate'], {}), '(max_accumulate)\n', (1668, 1684), True, 'import numpy as np\n')]
import click from .. import __version__ @click.command() @click.option("--count", default=1, help="Number of greetings.") @click.version_option(version=__version__) def main(count): print(count) click.echo("Hello, Xbot!")
[ "click.version_option", "click.option", "click.echo", "click.command" ]
[((44, 59), 'click.command', 'click.command', ([], {}), '()\n', (57, 59), False, 'import click\n'), ((61, 124), 'click.option', 'click.option', (['"""--count"""'], {'default': '(1)', 'help': '"""Number of greetings."""'}), "('--count', default=1, help='Number of greetings.')\n", (73, 124), False, 'import click\n'), ((126, 167), 'click.version_option', 'click.version_option', ([], {'version': '__version__'}), '(version=__version__)\n', (146, 167), False, 'import click\n'), ((206, 232), 'click.echo', 'click.echo', (['"""Hello, Xbot!"""'], {}), "('Hello, Xbot!')\n", (216, 232), False, 'import click\n')]
import xmltodict, json, collections, typing, random, time f = open("kotus-sanalista_v1.xml") kotus_ = f.read() f.close() kotus = xmltodict.parse(kotus_) def toTn(tn): pass def toLine(item): if "s" not in item or "t" not in item: return (item["s"], -1,"_") word = item["s"] type_ = item["t"] if isinstance(type_, typing.List): result = [] for t_ in type_: tn = int(t_["tn"] if "tn" in t_ else "0") av = t_["av"] if "av" in t_ else "_" result.append((word, tn, av)) return result else: tn = int(type_["tn"] if "tn" in type_ else "0") if "av" not in type_: av = "_" elif isinstance(type_["av"], collections.OrderedDict) and "#text" in type_["av"]: av = type_["av"]["#text"] else: av = type_["av"] if "av" in type_ else "_" return (word, tn, av) def toKey(item): pass simple = [toLine(x) for x in kotus['kotus-sanalista']['st'] if "t" in x and not isinstance(x["t"], typing.List)] complex_ = [toLine(x) for x in kotus['kotus-sanalista']['st'] if "t" in x and isinstance(x["t"], typing.List)] d = collections.defaultdict(list) for s in simple: d[f"{s[1]}{s[2]}"].append({"word": s[0], "tn": s[1], "av": s[2]}) for c in complex_: for s in c: d[f"{s[1]}{s[2]}"].append({"word": s[0], "tn": s[1], "av": s[2]}) samples = [] full = [] for k in d.keys(): for x in d[k]: full.append(x) if(len(d[k]) < 20): for x in d[k]: samples.append(x) else: sample = random.sample(d[k], 20) for x in sample: samples.append(x) f = open(f"kotus_samples_{time.time()}.json", "w+") f.write(json.dumps(samples, sort_keys=True, indent=4)) f.close() f = open(f"kotus_all.json", "w+") f.write(json.dumps(full, sort_keys=True, indent=4)) f.close()
[ "random.sample", "json.dumps", "time.time", "collections.defaultdict", "xmltodict.parse" ]
[((131, 154), 'xmltodict.parse', 'xmltodict.parse', (['kotus_'], {}), '(kotus_)\n', (146, 154), False, 'import xmltodict, json, collections, typing, random, time\n'), ((1172, 1201), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (1195, 1201), False, 'import xmltodict, json, collections, typing, random, time\n'), ((1728, 1773), 'json.dumps', 'json.dumps', (['samples'], {'sort_keys': '(True)', 'indent': '(4)'}), '(samples, sort_keys=True, indent=4)\n', (1738, 1773), False, 'import xmltodict, json, collections, typing, random, time\n'), ((1828, 1870), 'json.dumps', 'json.dumps', (['full'], {'sort_keys': '(True)', 'indent': '(4)'}), '(full, sort_keys=True, indent=4)\n', (1838, 1870), False, 'import xmltodict, json, collections, typing, random, time\n'), ((1588, 1611), 'random.sample', 'random.sample', (['d[k]', '(20)'], {}), '(d[k], 20)\n', (1601, 1611), False, 'import xmltodict, json, collections, typing, random, time\n'), ((1694, 1705), 'time.time', 'time.time', ([], {}), '()\n', (1703, 1705), False, 'import xmltodict, json, collections, typing, random, time\n')]
from rest_framework import serializers class CodeCoverageSerializer(serializers.Serializer): entry = serializers.CharField() # id = serializers.CharField() # inputs = serializers.ListField() # expectedOutputs = serializers.ListField(required=False)
[ "rest_framework.serializers.CharField" ]
[((107, 130), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {}), '()\n', (128, 130), False, 'from rest_framework import serializers\n')]
from part1 import ( gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new, ) """ scenario: test_random_actions uuid: 507857314 """ """ random actions, total chaos """ board = gamma_new(5, 4, 4, 3) assert board is not None assert gamma_move(board, 1, 3, 1) == 1 assert gamma_move(board, 2, 2, 4) == 0 assert gamma_move(board, 2, 1, 0) == 1 board295535068 = gamma_board(board) assert board295535068 is not None assert board295535068 == (".....\n" ".....\n" "...1.\n" ".2...\n") del board295535068 board295535068 = None assert gamma_move(board, 3, 4, 0) == 1 assert gamma_move(board, 4, 3, 2) == 1 assert gamma_golden_possible(board, 4) == 1 assert gamma_move(board, 1, 3, 1) == 0 assert gamma_free_fields(board, 1) == 16 assert gamma_golden_possible(board, 2) == 1 assert gamma_move(board, 3, 3, 3) == 1 assert gamma_golden_possible(board, 4) == 1 assert gamma_move(board, 1, 2, 4) == 0 assert gamma_move(board, 1, 2, 2) == 1 assert gamma_move(board, 2, 1, 4) == 0 assert gamma_move(board, 2, 3, 3) == 0 assert gamma_move(board, 3, 0, 0) == 1 assert gamma_move(board, 3, 3, 0) == 1 board993913590 = gamma_board(board) assert board993913590 is not None assert board993913590 == ("...3.\n" "..14.\n" "...1.\n" "32.33\n") del board993913590 board993913590 = None assert gamma_move(board, 4, 1, 4) == 0 assert gamma_move(board, 1, 3, 1) == 0 assert gamma_move(board, 2, 3, 1) == 0 assert gamma_move(board, 3, 1, 0) == 0 assert gamma_move(board, 4, 3, 1) == 0 assert gamma_move(board, 4, 4, 3) == 1 assert gamma_move(board, 1, 3, 3) == 0 assert gamma_move(board, 2, 0, 2) == 1 board946766181 = gamma_board(board) assert board946766181 is not None assert board946766181 == ("...34\n" "2.14.\n" "...1.\n" "32.33\n") del board946766181 board946766181 = None assert gamma_move(board, 3, 4, 1) == 1 assert gamma_move(board, 4, 4, 2) == 1 assert gamma_move(board, 1, 3, 3) == 0 assert gamma_free_fields(board, 1) == 8 assert gamma_move(board, 2, 2, 2) == 0 assert gamma_move(board, 3, 0, 2) == 0 assert gamma_move(board, 4, 2, 3) == 1 assert gamma_move(board, 1, 1, 0) == 0 assert gamma_move(board, 2, 2, 1) == 1 assert gamma_move(board, 2, 0, 2) == 0 assert gamma_move(board, 3, 4, 0) == 0 assert gamma_move(board, 3, 0, 2) == 0 assert gamma_move(board, 4, 0, 0) == 0 assert gamma_golden_move(board, 4, 0, 0) == 1 assert gamma_move(board, 1, 2, 1) == 0 assert gamma_golden_move(board, 1, 0, 4) == 0 assert gamma_move(board, 2, 2, 1) == 0 assert gamma_move(board, 3, 4, 2) == 0 assert gamma_busy_fields(board, 4) == 5 assert gamma_move(board, 1, 1, 0) == 0 assert gamma_free_fields(board, 1) == 6 assert gamma_move(board, 2, 2, 1) == 0 assert gamma_move(board, 2, 0, 2) == 0 assert gamma_golden_possible(board, 2) == 1 assert gamma_move(board, 3, 1, 1) == 1 assert gamma_golden_move(board, 3, 0, 4) == 0 assert gamma_move(board, 4, 1, 0) == 0 assert gamma_move(board, 4, 2, 2) == 0 assert gamma_move(board, 1, 1, 0) == 0 assert gamma_move(board, 1, 2, 2) == 0 assert gamma_move(board, 2, 1, 1) == 0 assert gamma_move(board, 3, 3, 1) == 0 assert gamma_move(board, 3, 1, 0) == 0 assert gamma_move(board, 4, 0, 1) == 1 gamma_delete(board)
[ "part1.gamma_new", "part1.gamma_busy_fields", "part1.gamma_golden_move", "part1.gamma_golden_possible", "part1.gamma_move", "part1.gamma_board", "part1.gamma_free_fields", "part1.gamma_delete" ]
[((283, 304), 'part1.gamma_new', 'gamma_new', (['(5)', '(4)', '(4)', '(3)'], {}), '(5, 4, 4, 3)\n', (292, 304), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((471, 489), 'part1.gamma_board', 'gamma_board', (['board'], {}), '(board)\n', (482, 489), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1228, 1246), 'part1.gamma_board', 'gamma_board', (['board'], {}), '(board)\n', (1239, 1246), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1728, 1746), 'part1.gamma_board', 'gamma_board', (['board'], {}), '(board)\n', (1739, 1746), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3280, 3299), 'part1.gamma_delete', 'gamma_delete', (['board'], {}), '(board)\n', (3292, 3299), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((339, 365), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(3)', '(1)'], {}), '(board, 1, 3, 1)\n', (349, 365), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((379, 405), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(2)', '(4)'], {}), '(board, 2, 2, 4)\n', (389, 405), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((419, 445), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(1)', '(0)'], {}), '(board, 2, 1, 0)\n', (429, 445), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((639, 665), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(4)', '(0)'], {}), '(board, 3, 4, 0)\n', (649, 665), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((679, 705), 'part1.gamma_move', 'gamma_move', (['board', '(4)', '(3)', '(2)'], {}), '(board, 4, 3, 2)\n', (689, 705), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((719, 750), 'part1.gamma_golden_possible', 'gamma_golden_possible', (['board', '(4)'], {}), '(board, 4)\n', (740, 750), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((764, 790), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(3)', '(1)'], {}), '(board, 1, 3, 1)\n', (774, 790), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((804, 831), 'part1.gamma_free_fields', 'gamma_free_fields', (['board', '(1)'], {}), '(board, 1)\n', (821, 831), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((846, 877), 'part1.gamma_golden_possible', 'gamma_golden_possible', (['board', '(2)'], {}), '(board, 2)\n', (867, 877), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((891, 917), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(3)', '(3)'], {}), '(board, 3, 3, 3)\n', (901, 917), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((931, 962), 'part1.gamma_golden_possible', 'gamma_golden_possible', (['board', '(4)'], {}), '(board, 4)\n', (952, 962), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((976, 1002), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(2)', '(4)'], {}), '(board, 1, 2, 4)\n', (986, 1002), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1016, 1042), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(2)', '(2)'], {}), '(board, 1, 2, 2)\n', (1026, 1042), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1056, 1082), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(1)', '(4)'], {}), '(board, 2, 1, 4)\n', (1066, 1082), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1096, 1122), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(3)', '(3)'], {}), '(board, 2, 3, 3)\n', (1106, 1122), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1136, 1162), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(0)', '(0)'], {}), '(board, 3, 0, 0)\n', (1146, 1162), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1176, 1202), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(3)', '(0)'], {}), '(board, 3, 3, 0)\n', (1186, 1202), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1396, 1422), 'part1.gamma_move', 'gamma_move', (['board', '(4)', '(1)', '(4)'], {}), '(board, 4, 1, 4)\n', (1406, 1422), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1436, 1462), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(3)', '(1)'], {}), '(board, 1, 3, 1)\n', (1446, 1462), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1476, 1502), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(3)', '(1)'], {}), '(board, 2, 3, 1)\n', (1486, 1502), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1516, 1542), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(1)', '(0)'], {}), '(board, 3, 1, 0)\n', (1526, 1542), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1556, 1582), 'part1.gamma_move', 'gamma_move', (['board', '(4)', '(3)', '(1)'], {}), '(board, 4, 3, 1)\n', (1566, 1582), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1596, 1622), 'part1.gamma_move', 'gamma_move', (['board', '(4)', '(4)', '(3)'], {}), '(board, 4, 4, 3)\n', (1606, 1622), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1636, 1662), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(3)', '(3)'], {}), '(board, 1, 3, 3)\n', (1646, 1662), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1676, 1702), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(0)', '(2)'], {}), '(board, 2, 0, 2)\n', (1686, 1702), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1896, 1922), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(4)', '(1)'], {}), '(board, 3, 4, 1)\n', (1906, 1922), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1936, 1962), 'part1.gamma_move', 'gamma_move', (['board', '(4)', '(4)', '(2)'], {}), '(board, 4, 4, 2)\n', (1946, 1962), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((1976, 2002), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(3)', '(3)'], {}), '(board, 1, 3, 3)\n', (1986, 2002), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2016, 2043), 'part1.gamma_free_fields', 'gamma_free_fields', (['board', '(1)'], {}), '(board, 1)\n', (2033, 2043), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2057, 2083), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(2)', '(2)'], {}), '(board, 2, 2, 2)\n', (2067, 2083), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2097, 2123), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(0)', '(2)'], {}), '(board, 3, 0, 2)\n', (2107, 2123), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2137, 2163), 'part1.gamma_move', 'gamma_move', (['board', '(4)', '(2)', '(3)'], {}), '(board, 4, 2, 3)\n', (2147, 2163), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2177, 2203), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(0)'], {}), '(board, 1, 1, 0)\n', (2187, 2203), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2217, 2243), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(2)', '(1)'], {}), '(board, 2, 2, 1)\n', (2227, 2243), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2257, 2283), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(0)', '(2)'], {}), '(board, 2, 0, 2)\n', (2267, 2283), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2297, 2323), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(4)', '(0)'], {}), '(board, 3, 4, 0)\n', (2307, 2323), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2337, 2363), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(0)', '(2)'], {}), '(board, 3, 0, 2)\n', (2347, 2363), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2377, 2403), 'part1.gamma_move', 'gamma_move', (['board', '(4)', '(0)', '(0)'], {}), '(board, 4, 0, 0)\n', (2387, 2403), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2417, 2450), 'part1.gamma_golden_move', 'gamma_golden_move', (['board', '(4)', '(0)', '(0)'], {}), '(board, 4, 0, 0)\n', (2434, 2450), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2464, 2490), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(2)', '(1)'], {}), '(board, 1, 2, 1)\n', (2474, 2490), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2504, 2537), 'part1.gamma_golden_move', 'gamma_golden_move', (['board', '(1)', '(0)', '(4)'], {}), '(board, 1, 0, 4)\n', (2521, 2537), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2551, 2577), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(2)', '(1)'], {}), '(board, 2, 2, 1)\n', (2561, 2577), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2591, 2617), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(4)', '(2)'], {}), '(board, 3, 4, 2)\n', (2601, 2617), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2631, 2658), 'part1.gamma_busy_fields', 'gamma_busy_fields', (['board', '(4)'], {}), '(board, 4)\n', (2648, 2658), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2672, 2698), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(0)'], {}), '(board, 1, 1, 0)\n', (2682, 2698), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2712, 2739), 'part1.gamma_free_fields', 'gamma_free_fields', (['board', '(1)'], {}), '(board, 1)\n', (2729, 2739), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2753, 2779), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(2)', '(1)'], {}), '(board, 2, 2, 1)\n', (2763, 2779), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2793, 2819), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(0)', '(2)'], {}), '(board, 2, 0, 2)\n', (2803, 2819), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2833, 2864), 'part1.gamma_golden_possible', 'gamma_golden_possible', (['board', '(2)'], {}), '(board, 2)\n', (2854, 2864), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2878, 2904), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(1)', '(1)'], {}), '(board, 3, 1, 1)\n', (2888, 2904), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2918, 2951), 'part1.gamma_golden_move', 'gamma_golden_move', (['board', '(3)', '(0)', '(4)'], {}), '(board, 3, 0, 4)\n', (2935, 2951), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((2965, 2991), 'part1.gamma_move', 'gamma_move', (['board', '(4)', '(1)', '(0)'], {}), '(board, 4, 1, 0)\n', (2975, 2991), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3005, 3031), 'part1.gamma_move', 'gamma_move', (['board', '(4)', '(2)', '(2)'], {}), '(board, 4, 2, 2)\n', (3015, 3031), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3045, 3071), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(1)', '(0)'], {}), '(board, 1, 1, 0)\n', (3055, 3071), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3085, 3111), 'part1.gamma_move', 'gamma_move', (['board', '(1)', '(2)', '(2)'], {}), '(board, 1, 2, 2)\n', (3095, 3111), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3125, 3151), 'part1.gamma_move', 'gamma_move', (['board', '(2)', '(1)', '(1)'], {}), '(board, 2, 1, 1)\n', (3135, 3151), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3165, 3191), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(3)', '(1)'], {}), '(board, 3, 3, 1)\n', (3175, 3191), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3205, 3231), 'part1.gamma_move', 'gamma_move', (['board', '(3)', '(1)', '(0)'], {}), '(board, 3, 1, 0)\n', (3215, 3231), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n'), ((3245, 3271), 'part1.gamma_move', 'gamma_move', (['board', '(4)', '(0)', '(1)'], {}), '(board, 4, 0, 1)\n', (3255, 3271), False, 'from part1 import gamma_board, gamma_busy_fields, gamma_delete, gamma_free_fields, gamma_golden_move, gamma_golden_possible, gamma_move, gamma_new\n')]
import threading import time import logging import pyroute2 import ipaddress import json import re from pyroute2 import WireGuard from platform_agent.cmd.lsmod import module_loaded from platform_agent.cmd.wg_info import WireGuardRead from platform_agent.files.tmp_files import read_tmp_file from platform_agent.routes import Routes from platform_agent.lib.ctime import now from platform_agent.wireguard.helpers import WG_NAME_PATTERN, ping_internal_ips, get_peer_info_all logger = logging.getLogger() def get_routing_info(wg): routing_info = {} peers_internal_ips = [] interfaces = read_tmp_file(file_type='iface_info') res = {k: v for k, v in interfaces.items() if re.match(WG_NAME_PATTERN, k)} for ifname in res.keys(): if not res[ifname].get('internal_ip'): continue internal_ip = res[ifname]['internal_ip'] metadata = res[ifname]['metadata'] peers = get_peer_info_all(ifname, wg, kind=res[ifname]['kind']) for peer in peers: try: peer_internal_ip = next( ( ip for ip in peer['allowed_ips'] if ipaddress.ip_address(ip.split('/')[0]) in ipaddress.ip_network(f"{internal_ip.split('/')[0]}/24", False) ), None ) except ValueError: continue if not peer_internal_ip: continue peers_internal_ips.append(peer_internal_ip.split('/')[0]) peer['allowed_ips'].remove(peer_internal_ip) for allowed_ip in peer['allowed_ips']: if not routing_info.get(allowed_ip): routing_info[allowed_ip] = {'ifaces': {}} routing_info[allowed_ip]['ifaces'][ifname] = { 'internal_ip': peer_internal_ip, 'metadata': metadata } return routing_info, peers_internal_ips def get_interface_internal_ip(ifname): with pyroute2.IPDB() as ipdb: internal_ip = f"{ipdb.interfaces[ifname]['ipaddr'][0]['address']}" return internal_ip def get_fastest_routes(wg): result = {} routing_info, peers_internal_ips = get_routing_info(wg) ping_results = ping_internal_ips(peers_internal_ips, icmp_id=20000) for dest, routes in routing_info.items(): best_route = None best_ping = 9999 for iface, data in routes['ifaces'].items(): int_ip = data['internal_ip'].split('/')[0] if ping_results[int_ip]['latency_ms'] < best_ping: best_route = {'iface': iface, 'gw': data['internal_ip'], 'metadata': data.get('metadata')} best_ping = ping_results[int_ip]['latency_ms'] result[dest] = best_route return result, ping_results class Rerouting(threading.Thread): def __init__(self, client, interval=1): logger.debug(f"[REROUTING] Initializing") super().__init__() self.interval = interval self.client = client self.wg = WireGuard() if module_loaded("wireguard") else WireGuardRead() self.routes = Routes() self.stop_rerouting = threading.Event() self.daemon = True def run(self): logger.debug(f"[REROUTING] Running") previous_routes = {} while not self.stop_rerouting.is_set(): new_routes, ping_data = get_fastest_routes(self.wg) for dest, best_route in new_routes.items(): if not best_route or previous_routes.get(dest) == best_route: continue # Do rerouting logic with best_route logger.debug(f"[REROUTING] Rerouting {dest} via {best_route}", extra={'metadata': best_route.get('metadata')}) try: self.routes.ip_route_replace( ifname=best_route['iface'], ip_list=[dest], gw_ipv4=get_interface_internal_ip(best_route['iface']) ) except KeyError: # catch if interface was deleted while executing this code continue previous_routes = new_routes time.sleep(int(self.interval)) def send_latency_data(self, data): self.client.send_log(json.dumps({ 'id': "ID." + str(time.time()), 'executed_at': now(), 'type': 'PEERS_LATENCY_DATA', 'data': data })) def join(self, timeout=None): self.stop_rerouting.set() super().join(timeout)
[ "platform_agent.cmd.lsmod.module_loaded", "pyroute2.WireGuard", "platform_agent.lib.ctime.now", "pyroute2.IPDB", "re.match", "platform_agent.cmd.wg_info.WireGuardRead", "platform_agent.wireguard.helpers.get_peer_info_all", "time.time", "threading.Event", "platform_agent.wireguard.helpers.ping_internal_ips", "platform_agent.files.tmp_files.read_tmp_file", "platform_agent.routes.Routes", "logging.getLogger" ]
[((485, 504), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (502, 504), False, 'import logging\n'), ((600, 637), 'platform_agent.files.tmp_files.read_tmp_file', 'read_tmp_file', ([], {'file_type': '"""iface_info"""'}), "(file_type='iface_info')\n", (613, 637), False, 'from platform_agent.files.tmp_files import read_tmp_file\n'), ((2385, 2437), 'platform_agent.wireguard.helpers.ping_internal_ips', 'ping_internal_ips', (['peers_internal_ips'], {'icmp_id': '(20000)'}), '(peers_internal_ips, icmp_id=20000)\n', (2402, 2437), False, 'from platform_agent.wireguard.helpers import WG_NAME_PATTERN, ping_internal_ips, get_peer_info_all\n'), ((924, 979), 'platform_agent.wireguard.helpers.get_peer_info_all', 'get_peer_info_all', (['ifname', 'wg'], {'kind': "res[ifname]['kind']"}), "(ifname, wg, kind=res[ifname]['kind'])\n", (941, 979), False, 'from platform_agent.wireguard.helpers import WG_NAME_PATTERN, ping_internal_ips, get_peer_info_all\n'), ((2133, 2148), 'pyroute2.IPDB', 'pyroute2.IPDB', ([], {}), '()\n', (2146, 2148), False, 'import pyroute2\n'), ((3266, 3274), 'platform_agent.routes.Routes', 'Routes', ([], {}), '()\n', (3272, 3274), False, 'from platform_agent.routes import Routes\n'), ((3305, 3322), 'threading.Event', 'threading.Event', ([], {}), '()\n', (3320, 3322), False, 'import threading\n'), ((688, 716), 're.match', 're.match', (['WG_NAME_PATTERN', 'k'], {}), '(WG_NAME_PATTERN, k)\n', (696, 716), False, 'import re\n'), ((3196, 3222), 'platform_agent.cmd.lsmod.module_loaded', 'module_loaded', (['"""wireguard"""'], {}), "('wireguard')\n", (3209, 3222), False, 'from platform_agent.cmd.lsmod import module_loaded\n'), ((3181, 3192), 'pyroute2.WireGuard', 'WireGuard', ([], {}), '()\n', (3190, 3192), False, 'from pyroute2 import WireGuard\n'), ((3228, 3243), 'platform_agent.cmd.wg_info.WireGuardRead', 'WireGuardRead', ([], {}), '()\n', (3241, 3243), False, 'from platform_agent.cmd.wg_info import WireGuardRead\n'), ((4498, 4503), 'platform_agent.lib.ctime.now', 'now', ([], {}), '()\n', (4501, 4503), False, 'from platform_agent.lib.ctime import now\n'), ((4457, 4468), 'time.time', 'time.time', ([], {}), '()\n', (4466, 4468), False, 'import time\n')]
import os import json import sys import PTMCIM_A import PTMCIM_B import PTMCIM_C print(sys.path) class parameters: def __init__(self): # Spin lattice parameters self.M = 300 # Number of rows self.N = 300 # Number of cols self.J = 1 # Coupling between locations self.mu = 0.003 # field self.k = 20 # coupling between lattices # Metropolis Algorithm parameters self.numTrials = 1 self.BURN_IN = 0 self.STEPS = 5000 # How many time steps self.BOLTZ_CONST = 8.617333e-5 self.T_MIN = 0 self.T_MAX = 10 self.numTemp = 150 self.T_C = 0.44 def main(): params = parameters() k_values = [1, 2, 3, 5, 10, 20, 30, 50] numTrials = 3 # type = {1: 'point', 2: 'line', 3:'plane'} case_type = {1: 'point', 2: 'line'} for case in case_type: case_name = case_type[case] os.mkdir(case_name) os.chdir(case_name) for k in k_values: params.k = k trial_folder = case_name + ', ' + 'k = ' + str(k) os.mkdir(trial_folder) os.chdir(trial_folder) for trial in range(numTrials): print(case_name+', k = '+str(k)+', trialNum = ' + str(trial)) os.mkdir(str(trial)) os.chdir(str(trial)) if case_name == 'point': PTMCIM_A.routine(params) elif case_name == 'line': PTMCIM_B.routine(params) elif case_name == 'plane': PTMCIM_C.routine(params) # save parameters to folder with data with open("params.json", "w") as outfile: json.dump(params.__dict__, outfile) path_parent = os.path.dirname(os.getcwd()) # go up one level to os.chdir(path_parent) # new k value follows, so leave this k value level os.chdir(os.path.dirname(os.getcwd())) # new case type follows, so leave this case level and go up one os.chdir(os.path.dirname(os.getcwd())) if __name__ == "__main__": main()
[ "os.mkdir", "json.dump", "PTMCIM_B.routine", "os.getcwd", "PTMCIM_A.routine", "os.chdir", "PTMCIM_C.routine" ]
[((931, 950), 'os.mkdir', 'os.mkdir', (['case_name'], {}), '(case_name)\n', (939, 950), False, 'import os\n'), ((959, 978), 'os.chdir', 'os.chdir', (['case_name'], {}), '(case_name)\n', (967, 978), False, 'import os\n'), ((1108, 1130), 'os.mkdir', 'os.mkdir', (['trial_folder'], {}), '(trial_folder)\n', (1116, 1130), False, 'import os\n'), ((1143, 1165), 'os.chdir', 'os.chdir', (['trial_folder'], {}), '(trial_folder)\n', (1151, 1165), False, 'import os\n'), ((1908, 1929), 'os.chdir', 'os.chdir', (['path_parent'], {}), '(path_parent)\n', (1916, 1929), False, 'import os\n'), ((2151, 2162), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2160, 2162), False, 'import os\n'), ((1426, 1450), 'PTMCIM_A.routine', 'PTMCIM_A.routine', (['params'], {}), '(params)\n', (1442, 1450), False, 'import PTMCIM_A\n'), ((1759, 1794), 'json.dump', 'json.dump', (['params.__dict__', 'outfile'], {}), '(params.__dict__, outfile)\n', (1768, 1794), False, 'import json\n'), ((1842, 1853), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1851, 1853), False, 'import os\n'), ((2031, 2042), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2040, 2042), False, 'import os\n'), ((1513, 1537), 'PTMCIM_B.routine', 'PTMCIM_B.routine', (['params'], {}), '(params)\n', (1529, 1537), False, 'import PTMCIM_B\n'), ((1601, 1625), 'PTMCIM_C.routine', 'PTMCIM_C.routine', (['params'], {}), '(params)\n', (1617, 1625), False, 'import PTMCIM_C\n')]
#!/usr/bin/env python3 import pandas as pd import numpy as np import matplotlib.pyplot as plt df = pd.read_csv('../results/var_wifi_load.tsv', sep='\t', index_col=False, header=0) nodes = [1, 2, 5, 10] num_carriers = 20 for n in nodes: data = df[(df['numEnbs'] == n)] plt.cla() for wifi_carriers in [5, 10, 15, 20]: for scheme in {0, 1}: T = 75 * num_carriers / n data_ = data[(data['smart'] == scheme) & (data['numWifiCarriers'] == wifi_carriers)].groupby('txLock') t = data_['throughputPerCell']\ .agg([np.mean])\ .reset_index() s = data_['throughputPerCellStDev'] \ .agg([np.mean]) \ .reset_index() plt.errorbar(t['txLock'], t['mean'] / T, s['mean'] / T, marker='s', label=f'{"smart" if scheme else "basic"} K={wifi_carriers}') plt.grid(True) plt.legend(loc='best') plt.xlabel('D, backoff slots') plt.ylabel('Relative throughput per cell') plt.title(f'N={n}') plt.savefig(f'../plots/pdf/var_wifiload_{n}.pdf') plt.savefig(f'../plots/png/var_wifiload_{n}.png') plt.show()
[ "matplotlib.pyplot.title", "matplotlib.pyplot.show", "pandas.read_csv", "matplotlib.pyplot.legend", "matplotlib.pyplot.errorbar", "matplotlib.pyplot.cla", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.grid", "matplotlib.pyplot.savefig" ]
[((101, 186), 'pandas.read_csv', 'pd.read_csv', (['"""../results/var_wifi_load.tsv"""'], {'sep': '"""\t"""', 'index_col': '(False)', 'header': '(0)'}), "('../results/var_wifi_load.tsv', sep='\\t', index_col=False, header=0\n )\n", (112, 186), True, 'import pandas as pd\n'), ((281, 290), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (288, 290), True, 'import matplotlib.pyplot as plt\n'), ((884, 898), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (892, 898), True, 'import matplotlib.pyplot as plt\n'), ((903, 925), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (913, 925), True, 'import matplotlib.pyplot as plt\n'), ((930, 960), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""D, backoff slots"""'], {}), "('D, backoff slots')\n", (940, 960), True, 'import matplotlib.pyplot as plt\n'), ((965, 1007), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Relative throughput per cell"""'], {}), "('Relative throughput per cell')\n", (975, 1007), True, 'import matplotlib.pyplot as plt\n'), ((1012, 1031), 'matplotlib.pyplot.title', 'plt.title', (['f"""N={n}"""'], {}), "(f'N={n}')\n", (1021, 1031), True, 'import matplotlib.pyplot as plt\n'), ((1036, 1085), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""../plots/pdf/var_wifiload_{n}.pdf"""'], {}), "(f'../plots/pdf/var_wifiload_{n}.pdf')\n", (1047, 1085), True, 'import matplotlib.pyplot as plt\n'), ((1090, 1139), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""../plots/png/var_wifiload_{n}.png"""'], {}), "(f'../plots/png/var_wifiload_{n}.png')\n", (1101, 1139), True, 'import matplotlib.pyplot as plt\n'), ((1144, 1154), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1152, 1154), True, 'import matplotlib.pyplot as plt\n'), ((751, 884), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (["t['txLock']", "(t['mean'] / T)", "(s['mean'] / T)"], {'marker': '"""s"""', 'label': 'f"""{\'smart\' if scheme else \'basic\'} K={wifi_carriers}"""'}), '(t[\'txLock\'], t[\'mean\'] / T, s[\'mean\'] / T, marker=\'s\', label=\n f"{\'smart\' if scheme else \'basic\'} K={wifi_carriers}")\n', (763, 884), True, 'import matplotlib.pyplot as plt\n')]
''' Munivariate statistics exercises ================================ ''' import pandas as pd import numpy as np import matplotlib.pyplot as plt #%matplotlib inline np.random.seed(seed=42) # make the example reproducible ''' ### Dot product and Euclidean norm ''' a = np.array([2,1]) b = np.array([1,1]) def euclidian(x): return np.sqrt(np.dot(x, x)) euclidian(a) euclidian(a - b) np.dot(b, a / euclidian(a)) X = np.random.randn(100, 2) np.dot(X, a / euclidian(a)) ''' ### Covariance matrix and Mahalanobis norm ''' N = 100 mu = np.array([1, 1]) Cov = np.array([[1, .8], [.8, 1]]) X = np.random.multivariate_normal(mu, Cov, N) xbar = np.mean(X, axis=0) print(xbar) Xc = (X - xbar) np.mean(Xc, axis=0) S = 1 / (N - 1) * np.dot(Xc.T, Xc) print(S) #import scipy Sinv = np.linalg.inv(S) def mahalanobis(x, xbar, Sinv): xc = x - xbar return np.sqrt(np.dot(np.dot(xc, Sinv), xc)) dists = pd.DataFrame( [[mahalanobis(X[i, :], xbar, Sinv), euclidian(X[i, :] - xbar)] for i in range(X.shape[0])], columns = ['Mahalanobis', 'Euclidean']) print(dists[:10]) x = X[0, :] import scipy.spatial assert(mahalanobis(X[0, :], xbar, Sinv) == scipy.spatial.distance.mahalanobis(xbar, X[0, :], Sinv)) assert(mahalanobis(X[1, :], xbar, Sinv) == scipy.spatial.distance.mahalanobis(xbar, X[1, :], Sinv))
[ "numpy.random.seed", "numpy.random.randn", "numpy.mean", "numpy.array", "numpy.random.multivariate_normal", "numpy.linalg.inv", "numpy.dot" ]
[((165, 188), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(42)'}), '(seed=42)\n', (179, 188), True, 'import numpy as np\n'), ((271, 287), 'numpy.array', 'np.array', (['[2, 1]'], {}), '([2, 1])\n', (279, 287), True, 'import numpy as np\n'), ((291, 307), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (299, 307), True, 'import numpy as np\n'), ((425, 448), 'numpy.random.randn', 'np.random.randn', (['(100)', '(2)'], {}), '(100, 2)\n', (440, 448), True, 'import numpy as np\n'), ((543, 559), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (551, 559), True, 'import numpy as np\n'), ((566, 596), 'numpy.array', 'np.array', (['[[1, 0.8], [0.8, 1]]'], {}), '([[1, 0.8], [0.8, 1]])\n', (574, 596), True, 'import numpy as np\n'), ((616, 657), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['mu', 'Cov', 'N'], {}), '(mu, Cov, N)\n', (645, 657), True, 'import numpy as np\n'), ((666, 684), 'numpy.mean', 'np.mean', (['X'], {'axis': '(0)'}), '(X, axis=0)\n', (673, 684), True, 'import numpy as np\n'), ((715, 734), 'numpy.mean', 'np.mean', (['Xc'], {'axis': '(0)'}), '(Xc, axis=0)\n', (722, 734), True, 'import numpy as np\n'), ((803, 819), 'numpy.linalg.inv', 'np.linalg.inv', (['S'], {}), '(S)\n', (816, 819), True, 'import numpy as np\n'), ((754, 770), 'numpy.dot', 'np.dot', (['Xc.T', 'Xc'], {}), '(Xc.T, Xc)\n', (760, 770), True, 'import numpy as np\n'), ((345, 357), 'numpy.dot', 'np.dot', (['x', 'x'], {}), '(x, x)\n', (351, 357), True, 'import numpy as np\n'), ((898, 914), 'numpy.dot', 'np.dot', (['xc', 'Sinv'], {}), '(xc, Sinv)\n', (904, 914), True, 'import numpy as np\n')]
""" Finds the amount of reversible numbers below limit_n Author: <NAME> """ import math from time import time # Iterator to create values to run in the algorithm class Range_reverse: def __init__(self, start,limit_n): self.current_value = start self.limit = 10**7 self.maximum = limit_n def __iter__(self): return self def __next__(self): self.current_value += 2 if self.current_value > self.maximum: raise StopIteration if self.current_value > self.limit: self.current_value = self.limit+10**7+1 self.limit += 2*10**7 return self.current_value # Generator of possible values to visit def reverse_generator(start, limit_n): i = start max_gap = 10**7 while i<limit_n: i+=2 if i>max_gap: i=max_gap+10**7+1 max_gap += 2*10**7 yield i """ Finds the amount of reversible numbers below limit_n """ def reversible_numbers(limit_n): total = 0 start = 13 power = 10**7 t0 = time() for n in reverse_generator(start,limit_n): n_str = str(n) if int(n_str[0])%2==0: carry = 0 valid = True for i in range(len(n_str)): val = int(n_str[i])+int(n_str[-1-i])+carry carry = 0 if val>=10: carry = 1 val %= 10 if val%2==0: valid = False break if valid: total += 2 if n > power: print(n) power += 2*10**7 t1 = time() print('Total time to reach the solution: ', t1-t0) return total if __name__ == "__main__": limit_n = 10**8 print('The amount of reversible numbers below limit_n is {0}'.format(reversible_numbers(limit_n)))
[ "time.time" ]
[((1061, 1067), 'time.time', 'time', ([], {}), '()\n', (1065, 1067), False, 'from time import time\n'), ((1648, 1654), 'time.time', 'time', ([], {}), '()\n', (1652, 1654), False, 'from time import time\n')]
import logging import sys LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s" def get_logger(name, log_format=LOG_FORMAT): logger = logging.getLogger(name) handler = logging.StreamHandler(stream=sys.stdout) handler.setFormatter(logging.Formatter(log_format)) logger.addHandler(handler) logger.setLevel(logging.INFO) return logger
[ "logging.Formatter", "logging.StreamHandler", "logging.getLogger" ]
[((155, 178), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (172, 178), False, 'import logging\n'), ((193, 233), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (214, 233), False, 'import logging\n'), ((259, 288), 'logging.Formatter', 'logging.Formatter', (['log_format'], {}), '(log_format)\n', (276, 288), False, 'import logging\n')]
# -*- coding: utf-8 -*- # Builtin Modules import json import time import traceback # 3rd-party Modules import memcache import six # Project Modules from worker.utils import toolkit from worker.utils.log_helper import LogHelper def get_config(c): servers = c.get('servers') or None if servers and isinstance(servers, (six.string_types, six.text_type)): servers = servers.split(',') servers = servers or '127.0.0.1:11211' servers = toolkit.as_array(servers) return servers LIMIT_ARGS_DUMP = 200 class MemcachedHelper(object): def __init__(self, logger, config, *args, **kwargs): self.logger = logger self.config = config self.client = memcache.Client(get_config(config)) def __del__(self): pass def check(self): try: self.client.get_stats() except Exception as e: for line in traceback.format_exc().splitlines(): self.logger.error(line) raise Exception(str(e)) def query(self, *args): command = args[0] command_args = args[1:] args_dumps = ', '.join([toolkit.json_dumps(x) for x in command_args]) if len(args_dumps) > LIMIT_ARGS_DUMP: args_dumps = args_dumps[0:LIMIT_ARGS_DUMP-3] + '...' self.logger.debug('[MEMCACHED] Query `{}` <- `{}`'.format(command.upper(), args_dumps)) return getattr(self.client, command.lower())(*command_args) def run(self, *args, **kwargs): command = args[0] command_args = args[1:] args_dumps = ', '.join([toolkit.json_dumps(x) for x in command_args]) if len(args_dumps) > LIMIT_ARGS_DUMP: args_dumps = args_dumps[0:LIMIT_ARGS_DUMP-3] + '...' self.logger.debug('[MEMCACHED] Run `{}` <- `{}`'.format(command.upper(), args_dumps)) return getattr(self.client, command.lower())(*command_args, **kwargs) def get(self, key): return self.run('get', key) def set(self, key, value): return self.run('set', key, value) def add(self, key, value): return self.run('add', key, value) def replace(self, key, value): return self.run('replace', key, value) def delete(self, key): return self.run('delete', key)
[ "worker.utils.toolkit.as_array", "traceback.format_exc", "worker.utils.toolkit.json_dumps" ]
[((459, 484), 'worker.utils.toolkit.as_array', 'toolkit.as_array', (['servers'], {}), '(servers)\n', (475, 484), False, 'from worker.utils import toolkit\n'), ((1137, 1158), 'worker.utils.toolkit.json_dumps', 'toolkit.json_dumps', (['x'], {}), '(x)\n', (1155, 1158), False, 'from worker.utils import toolkit\n'), ((1593, 1614), 'worker.utils.toolkit.json_dumps', 'toolkit.json_dumps', (['x'], {}), '(x)\n', (1611, 1614), False, 'from worker.utils import toolkit\n'), ((898, 920), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (918, 920), False, 'import traceback\n')]
import re from .sentinel import Sentinel def kleene(parser): def parse_many(text): text = text.lstrip() if len(text) > 0: for item, text1 in parser(text): for items, text2 in parse_many(text1): yield ([item] + items, text2) else: yield([], text) else: yield ([], text) return parse_many def max_kleene(parser): def parse_many(text): text = text.lstrip() any = False for item, text1 in parser(text): for items, text2 in parse_many(text1): any = True yield ([item] + items, text2) if not any: yield ([], text) return parse_many def cat(*parsers): def parse_cat(text, parsers=parsers): for item1, text1 in parsers[0](text): first = (item1,) if item1 is not DROP else () if len(parsers) == 1: yield first, text1 else: for rest, text2 in parse_cat(text1, parsers=parsers[1:]): yield first + rest, text2 return parse_cat def alt(*parsers): def parse_alt(text): for parser in parsers: for item, text1 in parser(text): yield item, text1 return parse_alt def maybe(parser): def parse_maybe(text): yield from parser(text) yield None, text return parse_maybe def token(string): def parse_token(text): text = text.lstrip() if text.startswith(string): yield string, text[len(string):] return parse_token DROP = Sentinel("DROP") def map(parser, func): def parse_map(text): for item, text1 in parser(text): yield func(item), text1 return parse_map def drop_token(string): return map(token(string), lambda _: DROP) EOS = Sentinel("EOS") def eos(text): text = text.lstrip() if len(text) == 0: yield (EOS, text) NATURAL = re.compile(r'^([0-9]+)') def natural(text): text = text.lstrip() match = NATURAL.match(text) if match is not None: yield int(match.group(1)), text[match.end():]
[ "re.compile" ]
[((2000, 2023), 're.compile', 're.compile', (['"""^([0-9]+)"""'], {}), "('^([0-9]+)')\n", (2010, 2023), False, 'import re\n')]
import platform if platform.system() == "Windows": import os import sys shared_lib_paths = [os.path.join(os.path.dirname(__file__), 'shared_libs')] # cf setup.py lk_path = os.environ.get("LIBKRIGING_DLL_PATH") if lk_path: for path in lk_path.split(os.pathsep): shared_lib_paths.append(path) # alternative method if lib/site-packages prefix is not reliable (requires update of setup.py) # import distutils # https://docs.python.org/3/distutils/apiref.html#module-distutils.sysconfig # shared_lib_path = os.path.join(distutils.sysconfig.PREFIX, 'pylibkriging', 'shared_libs') if sys.version_info[:2] < (3, 8): # < 3.8.0 for path in shared_lib_paths: if os.path.isdir(path): os.environ['PATH'] = path + os.pathsep + os.environ['PATH'] else: for path in shared_lib_paths: if os.path.isdir(path): os.add_dll_directory(path) from _pylibkriging import * from _pylibkriging import __version__, __build_type__ # Type alias to switch to the right binding Kriging = WrappedPyKriging LinearRegression = WrappedPyLinearRegression
[ "os.path.isdir", "os.path.dirname", "os.environ.get", "os.add_dll_directory", "platform.system" ]
[((20, 37), 'platform.system', 'platform.system', ([], {}), '()\n', (35, 37), False, 'import platform\n'), ((191, 228), 'os.environ.get', 'os.environ.get', (['"""LIBKRIGING_DLL_PATH"""'], {}), "('LIBKRIGING_DLL_PATH')\n", (205, 228), False, 'import os\n'), ((119, 144), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (134, 144), False, 'import os\n'), ((734, 753), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (747, 753), False, 'import os\n'), ((894, 913), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (907, 913), False, 'import os\n'), ((931, 957), 'os.add_dll_directory', 'os.add_dll_directory', (['path'], {}), '(path)\n', (951, 957), False, 'import os\n')]
# this handles all the reading from file + creating dictionaries # everything is separated by tabs import os def open_table(path, hunting=False): if hunting: _bullshit = "hunting/" + path else: _bullshit = path file_path = "loot_tables" num_tables = sum(len(files) for f, f, files in os.walk(file_path + "/" + _bullshit)) table = [[] for m in range(0, num_tables)] for i in range(num_tables): file = open(file_path + "/" + _bullshit + "/" + path + str(i + 1), "r") test_list = file.readlines() for j in range(len(test_list)): table[i].append(test_list[j].replace(',\n', '').replace(',,', '').split(',')) file.close() return table
[ "os.walk" ]
[((331, 367), 'os.walk', 'os.walk', (["(file_path + '/' + _bullshit)"], {}), "(file_path + '/' + _bullshit)\n", (338, 367), False, 'import os\n')]
import unittest from unittest import mock from tethys_apps.templatetags import tags as t class TestTags(unittest.TestCase): def setUp(self): # app_list self.app_names = ['app1', 'app2', 'app3', 'app4', 'app5', 'app6'] self.tag_names = ['tag1', 'tag_2', 'tag 3', 'tag four', 'Tag Five', 'tag6'] self.tag_classes = ['tag1', 'tag_2', 'tag-3', 'tag-four', 'tag-five', 'tag6'] self.tag_pairs = [ ('tag1', 'Tag1'), ('tag_2', 'Tag_2'), ('tag-3', 'Tag 3'), ('tag-four', 'Tag Four'), ('tag-five', 'Tag Five'), ('tag6', 'Tag6'), ] # Object apps self.mock_object_apps = {'configured': []} for i, app_name in enumerate(self.app_names): mock_app = mock.MagicMock(tags=','.join(self.tag_names[:i+1])) mock_app.name = app_name self.mock_object_apps['configured'].append(mock_app) # Dictionary apps self.mock_dict_apps = {'configured': []} for i, app_name in enumerate(self.app_names): mock_app = dict(tags=','.join(self.tag_names[:i+1]), name=app_name) self.mock_dict_apps['configured'].append(mock_app) def tearDown(self): pass def test_get_tag_class(self): ret_tag_str = t.get_tag_class(self.mock_object_apps['configured'][-1]) ret_tag_list = ret_tag_str.split(' ') self.assertEqual(sorted(self.tag_classes), sorted(ret_tag_list)) def test_get_tag_class_dict(self): ret_tag_str = t.get_tag_class(self.mock_dict_apps['configured'][-1]) ret_tag_list = ret_tag_str.split(' ') self.assertEqual(sorted(self.tag_classes), sorted(ret_tag_list)) def test_get_tags_from_apps(self): ret_tag_list = t.get_tags_from_apps(self.mock_object_apps) self.assertEqual(sorted(self.tag_pairs), sorted(ret_tag_list)) def test_get_tags_from_apps_dict(self): ret_tag_list = t.get_tags_from_apps(self.mock_dict_apps) self.assertEqual(sorted(self.tag_pairs), sorted(ret_tag_list)) def test_get_tags_from_apps_object_disabled(self): self.mock_object_apps['configured'].append(mock.MagicMock(tags='disabled', enabled=False)) ret_tag_list = t.get_tags_from_apps(self.mock_object_apps) self.assertNotIn('disabled', ret_tag_list) def test_get_tags_from_apps_dict_disabled(self): self.mock_dict_apps['configured'].append({'tags': 'disabled', 'enabled': False}) ret_tag_list = t.get_tags_from_apps(self.mock_dict_apps) self.assertNotIn('disabled', ret_tag_list) def test_get_tags_from_apps_object_dont_show(self): self.mock_object_apps['configured'].append(mock.MagicMock(tags='disabled', show_in_apps_library=False)) ret_tag_list = t.get_tags_from_apps(self.mock_object_apps) self.assertNotIn('disabled', ret_tag_list) def test_get_tags_from_apps_dict_dont_show(self): self.mock_dict_apps['configured'].append({'tags': 'disabled', 'show_in_apps_library': False}) ret_tag_list = t.get_tags_from_apps(self.mock_dict_apps) self.assertNotIn('disabled', ret_tag_list)
[ "tethys_apps.templatetags.tags.get_tag_class", "tethys_apps.templatetags.tags.get_tags_from_apps", "unittest.mock.MagicMock" ]
[((1321, 1377), 'tethys_apps.templatetags.tags.get_tag_class', 't.get_tag_class', (["self.mock_object_apps['configured'][-1]"], {}), "(self.mock_object_apps['configured'][-1])\n", (1336, 1377), True, 'from tethys_apps.templatetags import tags as t\n'), ((1559, 1613), 'tethys_apps.templatetags.tags.get_tag_class', 't.get_tag_class', (["self.mock_dict_apps['configured'][-1]"], {}), "(self.mock_dict_apps['configured'][-1])\n", (1574, 1613), True, 'from tethys_apps.templatetags import tags as t\n'), ((1796, 1839), 'tethys_apps.templatetags.tags.get_tags_from_apps', 't.get_tags_from_apps', (['self.mock_object_apps'], {}), '(self.mock_object_apps)\n', (1816, 1839), True, 'from tethys_apps.templatetags import tags as t\n'), ((1979, 2020), 'tethys_apps.templatetags.tags.get_tags_from_apps', 't.get_tags_from_apps', (['self.mock_dict_apps'], {}), '(self.mock_dict_apps)\n', (1999, 2020), True, 'from tethys_apps.templatetags import tags as t\n'), ((2270, 2313), 'tethys_apps.templatetags.tags.get_tags_from_apps', 't.get_tags_from_apps', (['self.mock_object_apps'], {}), '(self.mock_object_apps)\n', (2290, 2313), True, 'from tethys_apps.templatetags import tags as t\n'), ((2531, 2572), 'tethys_apps.templatetags.tags.get_tags_from_apps', 't.get_tags_from_apps', (['self.mock_dict_apps'], {}), '(self.mock_dict_apps)\n', (2551, 2572), True, 'from tethys_apps.templatetags import tags as t\n'), ((2816, 2859), 'tethys_apps.templatetags.tags.get_tags_from_apps', 't.get_tags_from_apps', (['self.mock_object_apps'], {}), '(self.mock_object_apps)\n', (2836, 2859), True, 'from tethys_apps.templatetags import tags as t\n'), ((3091, 3132), 'tethys_apps.templatetags.tags.get_tags_from_apps', 't.get_tags_from_apps', (['self.mock_dict_apps'], {}), '(self.mock_dict_apps)\n', (3111, 3132), True, 'from tethys_apps.templatetags import tags as t\n'), ((2199, 2245), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'tags': '"""disabled"""', 'enabled': '(False)'}), "(tags='disabled', enabled=False)\n", (2213, 2245), False, 'from unittest import mock\n'), ((2732, 2791), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {'tags': '"""disabled"""', 'show_in_apps_library': '(False)'}), "(tags='disabled', show_in_apps_library=False)\n", (2746, 2791), False, 'from unittest import mock\n')]
import tensorflow as tf import tensorflow.keras as keras import lib_stylegan def get_random_noise(batch_size=8): random_noise = tf.random.normal(shape=(batch_size, 8)) return random_noise[:,:3], random_noise[:,3:6], random_noise[:,6], random_noise[:,7] def make_seed_standard(model): start_dim = model.im_size // (2**(model.n_layers-1)) style_input = inp_style = keras.layers.Input([model.n_layers, model.latent_size]) x = tf.stop_gradient(style_input)[:,0,:1] * 0 + 1 x = keras.layers.Dense(start_dim*start_dim*4*model.channels, activation = 'relu', kernel_initializer = 'random_normal')(x) x = keras.layers.Reshape([start_dim, start_dim, 4*model.channels])(x) return keras.models.Model(inputs = style_input, outputs = x) def make_seed_3d(model): start_dim = model.im_size // (2**(model.n_layers-1)) style_input = keras.layers.Input([model.n_layers, model.latent_size]) inputs_camera = [ keras.layers.Input([3]), keras.layers.Input([3]), keras.layers.Input(batch_shape=(None,)), keras.layers.Input(batch_shape=(None,)), ] random_view = lib_stylegan.lib_3d.layers.CameraStd()(inputs_camera) rays = lib_stylegan.lib_3d.layers.RayTracer()(random_view) hiddens = keras.layers.Dense(model.channels*4,activation="relu")(rays) hiddens = keras.layers.Dense(model.channels*4,activation="relu")(hiddens) hiddens = keras.layers.Dense(model.channels*4,activation="relu")(hiddens) feature_map = lib_stylegan.lib_3d.math_3d.to_feature_map(hiddens) raw_model = keras.models.Model(inputs = inputs_camera, outputs = feature_map) r = get_random_noise(batch_size=tf.shape(style_input)[0]) feature_map_random = raw_model(r) return keras.models.Model(inputs = style_input, outputs = feature_map_random)
[ "tensorflow.keras.layers.Reshape", "lib_stylegan.lib_3d.layers.CameraStd", "tensorflow.keras.layers.Dense", "tensorflow.random.normal", "tensorflow.stop_gradient", "tensorflow.keras.models.Model", "tensorflow.shape", "tensorflow.keras.layers.Input", "lib_stylegan.lib_3d.layers.RayTracer", "lib_stylegan.lib_3d.math_3d.to_feature_map" ]
[((135, 174), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '(batch_size, 8)'}), '(shape=(batch_size, 8))\n', (151, 174), True, 'import tensorflow as tf\n'), ((392, 447), 'tensorflow.keras.layers.Input', 'keras.layers.Input', (['[model.n_layers, model.latent_size]'], {}), '([model.n_layers, model.latent_size])\n', (410, 447), True, 'import tensorflow.keras as keras\n'), ((760, 809), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'style_input', 'outputs': 'x'}), '(inputs=style_input, outputs=x)\n', (778, 809), True, 'import tensorflow.keras as keras\n'), ((916, 971), 'tensorflow.keras.layers.Input', 'keras.layers.Input', (['[model.n_layers, model.latent_size]'], {}), '([model.n_layers, model.latent_size])\n', (934, 971), True, 'import tensorflow.keras as keras\n'), ((1564, 1615), 'lib_stylegan.lib_3d.math_3d.to_feature_map', 'lib_stylegan.lib_3d.math_3d.to_feature_map', (['hiddens'], {}), '(hiddens)\n', (1606, 1615), False, 'import lib_stylegan\n'), ((1637, 1698), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'inputs_camera', 'outputs': 'feature_map'}), '(inputs=inputs_camera, outputs=feature_map)\n', (1655, 1698), True, 'import tensorflow.keras as keras\n'), ((1819, 1885), 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'style_input', 'outputs': 'feature_map_random'}), '(inputs=style_input, outputs=feature_map_random)\n', (1837, 1885), True, 'import tensorflow.keras as keras\n'), ((518, 640), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(start_dim * start_dim * 4 * model.channels)'], {'activation': '"""relu"""', 'kernel_initializer': '"""random_normal"""'}), "(start_dim * start_dim * 4 * model.channels, activation=\n 'relu', kernel_initializer='random_normal')\n", (536, 640), True, 'import tensorflow.keras as keras\n'), ((679, 743), 'tensorflow.keras.layers.Reshape', 'keras.layers.Reshape', (['[start_dim, start_dim, 4 * model.channels]'], {}), '([start_dim, start_dim, 4 * model.channels])\n', (699, 743), True, 'import tensorflow.keras as keras\n'), ((1007, 1030), 'tensorflow.keras.layers.Input', 'keras.layers.Input', (['[3]'], {}), '([3])\n', (1025, 1030), True, 'import tensorflow.keras as keras\n'), ((1040, 1063), 'tensorflow.keras.layers.Input', 'keras.layers.Input', (['[3]'], {}), '([3])\n', (1058, 1063), True, 'import tensorflow.keras as keras\n'), ((1073, 1112), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'batch_shape': '(None,)'}), '(batch_shape=(None,))\n', (1091, 1112), True, 'import tensorflow.keras as keras\n'), ((1122, 1161), 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'batch_shape': '(None,)'}), '(batch_shape=(None,))\n', (1140, 1161), True, 'import tensorflow.keras as keras\n'), ((1192, 1230), 'lib_stylegan.lib_3d.layers.CameraStd', 'lib_stylegan.lib_3d.layers.CameraStd', ([], {}), '()\n', (1228, 1230), False, 'import lib_stylegan\n'), ((1257, 1295), 'lib_stylegan.lib_3d.layers.RayTracer', 'lib_stylegan.lib_3d.layers.RayTracer', ([], {}), '()\n', (1293, 1295), False, 'import lib_stylegan\n'), ((1324, 1381), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(model.channels * 4)'], {'activation': '"""relu"""'}), "(model.channels * 4, activation='relu')\n", (1342, 1381), True, 'import tensorflow.keras as keras\n'), ((1399, 1456), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(model.channels * 4)'], {'activation': '"""relu"""'}), "(model.channels * 4, activation='relu')\n", (1417, 1456), True, 'import tensorflow.keras as keras\n'), ((1477, 1534), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(model.channels * 4)'], {'activation': '"""relu"""'}), "(model.channels * 4, activation='relu')\n", (1495, 1534), True, 'import tensorflow.keras as keras\n'), ((460, 489), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['style_input'], {}), '(style_input)\n', (476, 489), True, 'import tensorflow as tf\n'), ((1739, 1760), 'tensorflow.shape', 'tf.shape', (['style_input'], {}), '(style_input)\n', (1747, 1760), True, 'import tensorflow as tf\n')]
from typing import List, Dict from pydantic import BaseModel, Field from models.domain.resource import ResourceType from models.domain.resource_template import ResourceTemplate, Property def get_sample_workspace_template_object(template_name: str = "tre-workspace-vanilla") -> ResourceTemplate: return ResourceTemplate( id="a7a7a7bd-7f4e-4a4e-b970-dc86a6b31dfb", name=template_name, description="vanilla workspace bundle", version="0.1.0", resourceType=ResourceType.Workspace, current=True, type="object", required=["display_name", "description", "app_id"], properties={ "display_name": Property(type="string"), "description": Property(type="string"), "app_id": Property(type="string"), "address_space": Property(type="string", default="10.2.1.0/24", description="VNet address space for the workspace services") } ) def get_sample_workspace_template() -> dict: return get_sample_workspace_template_object().dict() def get_sample_workspace_template_in_response() -> dict: workspace_template = get_sample_workspace_template_object().dict() workspace_template["system_properties"] = { "tre_id": Property(type="string"), "workspace_id": Property(type="string"), "azure_location": Property(type="string"), } return workspace_template class WorkspaceTemplateNamesInList(BaseModel): templateNames: List[str] class Config: schema_extra = { "example": { "templateNames": ["tre-workspace-vanilla", "tre-workspace-base"] } } class WorkspaceTemplateInCreate(BaseModel): name: str = Field(title="Name of workspace template") version: str = Field(title="Version of workspace template") current: bool = Field(title="Mark this version as current") json_schema: Dict = Field(title="JSON Schema compliant template") class Config: schema_extra = { "example": { "name": "my-tre-workspace", "version": "0.0.1", "current": "true", "json_schema": { "$schema": "http://json-schema.org/draft-07/schema", "$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace.json", "type": "object", "title": "My Workspace Template Custom Parameters", "description": "These parameters are specific to my workspace template", "required": [ "vm_size", "no_of_vms" ], "properties": { "vm_size": { "$id": "#/properties/vm_size", "type": "string", "title": "VM size", "description": "Size of the VMs in my workspace", "default": "Standard_A1", "enum": [ "Standard_A1", "Standard_A2", "Standard_A3" ] }, "no_of_vms": { "$id": "#/properties/no_of_vms", "type": "integer", "title": "Number of VMs", "description": "Number of virtual machines to be deployed in the workspace", "default": 0 } } } } } class WorkspaceTemplateInResponse(ResourceTemplate): system_properties: Dict[str, Property] = Field(title="System properties") class Config: schema_extra = { "example": get_sample_workspace_template_in_response() }
[ "models.domain.resource_template.Property", "pydantic.Field" ]
[((1730, 1771), 'pydantic.Field', 'Field', ([], {'title': '"""Name of workspace template"""'}), "(title='Name of workspace template')\n", (1735, 1771), False, 'from pydantic import BaseModel, Field\n'), ((1791, 1835), 'pydantic.Field', 'Field', ([], {'title': '"""Version of workspace template"""'}), "(title='Version of workspace template')\n", (1796, 1835), False, 'from pydantic import BaseModel, Field\n'), ((1856, 1899), 'pydantic.Field', 'Field', ([], {'title': '"""Mark this version as current"""'}), "(title='Mark this version as current')\n", (1861, 1899), False, 'from pydantic import BaseModel, Field\n'), ((1924, 1969), 'pydantic.Field', 'Field', ([], {'title': '"""JSON Schema compliant template"""'}), "(title='JSON Schema compliant template')\n", (1929, 1969), False, 'from pydantic import BaseModel, Field\n'), ((3837, 3869), 'pydantic.Field', 'Field', ([], {'title': '"""System properties"""'}), "(title='System properties')\n", (3842, 3869), False, 'from pydantic import BaseModel, Field\n'), ((1255, 1278), 'models.domain.resource_template.Property', 'Property', ([], {'type': '"""string"""'}), "(type='string')\n", (1263, 1278), False, 'from models.domain.resource_template import ResourceTemplate, Property\n'), ((1304, 1327), 'models.domain.resource_template.Property', 'Property', ([], {'type': '"""string"""'}), "(type='string')\n", (1312, 1327), False, 'from models.domain.resource_template import ResourceTemplate, Property\n'), ((1355, 1378), 'models.domain.resource_template.Property', 'Property', ([], {'type': '"""string"""'}), "(type='string')\n", (1363, 1378), False, 'from models.domain.resource_template import ResourceTemplate, Property\n'), ((678, 701), 'models.domain.resource_template.Property', 'Property', ([], {'type': '"""string"""'}), "(type='string')\n", (686, 701), False, 'from models.domain.resource_template import ResourceTemplate, Property\n'), ((730, 753), 'models.domain.resource_template.Property', 'Property', ([], {'type': '"""string"""'}), "(type='string')\n", (738, 753), False, 'from models.domain.resource_template import ResourceTemplate, Property\n'), ((777, 800), 'models.domain.resource_template.Property', 'Property', ([], {'type': '"""string"""'}), "(type='string')\n", (785, 800), False, 'from models.domain.resource_template import ResourceTemplate, Property\n'), ((831, 943), 'models.domain.resource_template.Property', 'Property', ([], {'type': '"""string"""', 'default': '"""10.2.1.0/24"""', 'description': '"""VNet address space for the workspace services"""'}), "(type='string', default='10.2.1.0/24', description=\n 'VNet address space for the workspace services')\n", (839, 943), False, 'from models.domain.resource_template import ResourceTemplate, Property\n')]
import click from rich.console import Console from rich.table import Table from .cmd_group import listener_cmds @click.command("scan", help="scan for Nodes") def scan_and_print(): from HiveMind_presence import LocalDiscovery table = Table(title="HiveMind Devices") table.add_column("Name", justify="center") table.add_column("Protocol", justify="center") table.add_column("Host", justify="center") table.add_column("Port", justify="center") console = Console() console.print("Scanning....") for device in LocalDiscovery().scan(timeout=10): proto = "wss" if device.ssl else "ws" table.add_row(device.friendly_name, proto, device.host, str(device.port)) console.print(table) listener_cmds.add_command(scan_and_print)
[ "rich.console.Console", "HiveMind_presence.LocalDiscovery", "rich.table.Table", "click.command" ]
[((116, 160), 'click.command', 'click.command', (['"""scan"""'], {'help': '"""scan for Nodes"""'}), "('scan', help='scan for Nodes')\n", (129, 160), False, 'import click\n'), ((244, 275), 'rich.table.Table', 'Table', ([], {'title': '"""HiveMind Devices"""'}), "(title='HiveMind Devices')\n", (249, 275), False, 'from rich.table import Table\n'), ((484, 493), 'rich.console.Console', 'Console', ([], {}), '()\n', (491, 493), False, 'from rich.console import Console\n'), ((546, 562), 'HiveMind_presence.LocalDiscovery', 'LocalDiscovery', ([], {}), '()\n', (560, 562), False, 'from HiveMind_presence import LocalDiscovery\n')]
s = b'Hello World' import binascii h = binascii.b2a_hex( s ) print( h ) print( binascii.a2b_hex( h )) import base64 h = base64.b16encode( s ) print( h ) print( base64.b16decode( h ) ) print( h.decode('ascii'))
[ "binascii.a2b_hex", "base64.b16encode", "base64.b16decode", "binascii.b2a_hex" ]
[((41, 60), 'binascii.b2a_hex', 'binascii.b2a_hex', (['s'], {}), '(s)\n', (57, 60), False, 'import binascii\n'), ((126, 145), 'base64.b16encode', 'base64.b16encode', (['s'], {}), '(s)\n', (142, 145), False, 'import base64\n'), ((82, 101), 'binascii.a2b_hex', 'binascii.a2b_hex', (['h'], {}), '(h)\n', (98, 101), False, 'import binascii\n'), ((166, 185), 'base64.b16decode', 'base64.b16decode', (['h'], {}), '(h)\n', (182, 185), False, 'import base64\n')]
import taichi as ti from taichi_course01_final.Types import HitResult, HittableObject, HittableObjectType, MaterialType import taichi_course01_final.HittableObject.Sphere as Sphere import taichi_course01_final.HittableObject.Plane as Plane import taichi_course01_final.HittableObject.Ellipse as Ellipse @ti.data_oriented class Scene: MAX_OBJECTS = 100 def __init__(self): self.objects = HittableObject.field(shape=self.MAX_OBJECTS) self._obj_count = ti.field(dtype=ti.i32, shape=()) self._portal_id_tmp = None def add(self, obj): if obj.material == MaterialType.PORTAL: if self._portal_id_tmp: self.objects[self._portal_id_tmp].portal_id = self._obj_count[None] obj.portal_id = self._portal_id_tmp self._portal_id_tmp = None else: self._portal_id_tmp = self._obj_count[None] self.objects[self._obj_count[None]] = obj self._obj_count[None] += 1 @ti.kernel def write_portal_id(self, i: ti.i32, id: ti.i32): self.objects[i].portal_id = id @ti.func def hit(self, ray, t_min=0.001, t_max=10e8): res = res_tmp = HitResult( did_hit=False, root=0., color=ti.Vector([0., 0., 0.]), hit_point=ti.Vector([0., 0., 0.]), hit_point_normal=ti.Vector([0., 0., 0.]), front_face=False, material=-1, id=-1, ) for i in range(self._obj_count[None]): if self.objects[i].type == HittableObjectType.SPHERE: res_tmp = Sphere.hit(self.objects[i], ray, t_min, t_max) elif self.objects[i].type == HittableObjectType.PLANE: res_tmp = Plane.hit(self.objects[i], ray, t_min, t_max) elif self.objects[i].type == HittableObjectType.ELLIPSE: res_tmp = Ellipse.hit(self.objects[i], ray, t_min, t_max) if res_tmp.did_hit: res = res_tmp res.id = i t_max = res.root return res
[ "taichi.field", "taichi_course01_final.Types.HittableObject.field", "taichi_course01_final.HittableObject.Plane.hit", "taichi.Vector", "taichi_course01_final.HittableObject.Ellipse.hit", "taichi_course01_final.HittableObject.Sphere.hit" ]
[((407, 451), 'taichi_course01_final.Types.HittableObject.field', 'HittableObject.field', ([], {'shape': 'self.MAX_OBJECTS'}), '(shape=self.MAX_OBJECTS)\n', (427, 451), False, 'from taichi_course01_final.Types import HitResult, HittableObject, HittableObjectType, MaterialType\n'), ((478, 510), 'taichi.field', 'ti.field', ([], {'dtype': 'ti.i32', 'shape': '()'}), '(dtype=ti.i32, shape=())\n', (486, 510), True, 'import taichi as ti\n'), ((1272, 1298), 'taichi.Vector', 'ti.Vector', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1281, 1298), True, 'import taichi as ti\n'), ((1319, 1345), 'taichi.Vector', 'ti.Vector', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1328, 1345), True, 'import taichi as ti\n'), ((1373, 1399), 'taichi.Vector', 'ti.Vector', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1382, 1399), True, 'import taichi as ti\n'), ((1622, 1668), 'taichi_course01_final.HittableObject.Sphere.hit', 'Sphere.hit', (['self.objects[i]', 'ray', 't_min', 't_max'], {}), '(self.objects[i], ray, t_min, t_max)\n', (1632, 1668), True, 'import taichi_course01_final.HittableObject.Sphere as Sphere\n'), ((1762, 1807), 'taichi_course01_final.HittableObject.Plane.hit', 'Plane.hit', (['self.objects[i]', 'ray', 't_min', 't_max'], {}), '(self.objects[i], ray, t_min, t_max)\n', (1771, 1807), True, 'import taichi_course01_final.HittableObject.Plane as Plane\n'), ((1903, 1950), 'taichi_course01_final.HittableObject.Ellipse.hit', 'Ellipse.hit', (['self.objects[i]', 'ray', 't_min', 't_max'], {}), '(self.objects[i], ray, t_min, t_max)\n', (1914, 1950), True, 'import taichi_course01_final.HittableObject.Ellipse as Ellipse\n')]
from kafka import KafkaConsumer import time Topic ='pi_test' consumer = KafkaConsumer(Topic,auto_offset_reset='earliest', enable_auto_commit=False) i=0 DATA_DICT = {"u","v","t","s"} for message in consumer: text = message.value.decode("utf-8") text = text.translate('b') print (message.topic, text) appendFile = open('Data.txt','a') if i==0: appendFile.write('\n') appendFile.write(message.topic) appendFile.write(' ') appendFile.write('u=') DATA_DICT[0]="%s"%text i=i+1 elif i==1: appendFile.write(' ') appendFile.write('v=') DATA_DICT[1]="%s"%text i=i+1 elif i==2: appendFile.write(' ') appendFile.write('t=') DATA_DICT[2]="%s"%text i=i+1 elif i==3: appendFile.write(' ') appendFile.write('s=') DATA_DICT[3]="%s"%text i=i-3 appendFile.write(text) appendFile.close() time.sleep(0.1)
[ "kafka.KafkaConsumer", "time.sleep" ]
[((72, 148), 'kafka.KafkaConsumer', 'KafkaConsumer', (['Topic'], {'auto_offset_reset': '"""earliest"""', 'enable_auto_commit': '(False)'}), "(Topic, auto_offset_reset='earliest', enable_auto_commit=False)\n", (85, 148), False, 'from kafka import KafkaConsumer\n'), ((960, 975), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (970, 975), False, 'import time\n')]
import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import itertools from memory import State device = torch.device("cuda" if torch.cuda.is_available() else "cpu") class DRRN(torch.nn.Module): """ Deep Reinforcement Relevance Network - He et al. '16 """ def __init__(self, vocab_size, embedding_dim, hidden_dim): super(DRRN, self).__init__() self.embedding = nn.Embedding(vocab_size, embedding_dim) self.obs_encoder = nn.GRU(embedding_dim, hidden_dim) self.look_encoder = nn.GRU(embedding_dim, hidden_dim) self.inv_encoder = nn.GRU(embedding_dim, hidden_dim) self.act_encoder = nn.GRU(embedding_dim, hidden_dim) self.hidden = nn.Linear(4 * hidden_dim, hidden_dim) self.act_scorer = nn.Linear(hidden_dim, 1) def packed_rnn(self, x, rnn): """ Runs the provided rnn on the input x. Takes care of packing/unpacking. x: list of unpadded input sequences Returns a tensor of size: len(x) x hidden_dim """ lengths = torch.tensor([len(n) for n in x], dtype=torch.long, device=device) # Sort this batch in descending order by seq length lengths, idx_sort = torch.sort(lengths, dim=0, descending=True) _, idx_unsort = torch.sort(idx_sort, dim=0) idx_sort = torch.autograd.Variable(idx_sort) idx_unsort = torch.autograd.Variable(idx_unsort) padded_x = pad_sequences(x) x_tt = torch.from_numpy(padded_x).type(torch.long).to(device) x_tt = x_tt.index_select(0, idx_sort) # Run the embedding layer embed = self.embedding(x_tt).permute(1, 0, 2) # Time x Batch x EncDim # Pack padded batch of sequences for RNN module packed = nn.utils.rnn.pack_padded_sequence(embed, lengths.cpu()) # Run the RNN out, _ = rnn(packed) # Unpack out, _ = nn.utils.rnn.pad_packed_sequence(out) # Get the last step of each sequence idx = (lengths - 1).view(-1, 1).expand(len(lengths), out.size(2)).unsqueeze(0) out = out.gather(0, idx).squeeze(0) # Unsort out = out.index_select(0, idx_unsort) return out def forward(self, state_batch, act_batch, poss_acts, detach=False, cond_weight=0, cclm=None, cond_threshold=0, args=None, testing_flag=False): """ Batched forward pass. obs_id_batch: iterable of unpadded sequence ids act_batch: iterable of lists of unpadded admissible command ids Returns a tuple of tensors containing q-values for each item in the batch """ # Zip the state_batch into an easy access format state = State(*zip(*state_batch)) # This is number of admissible commands in each element of the batch act_sizes = [len(a) for a in act_batch] # Combine next actions into one long list act_batch = list(itertools.chain.from_iterable(act_batch)) act_out = self.packed_rnn(act_batch, self.act_encoder) # Encode the various aspects of the state obs_out = self.packed_rnn(state.obs, self.obs_encoder) look_out = self.packed_rnn(state.description, self.look_encoder) inv_out = self.packed_rnn(state.inventory, self.inv_encoder) state_out = torch.cat((obs_out, look_out, inv_out), dim=1) # Expand the state to match the batches of actions state_out = torch.cat([state_out[i].repeat(j, 1) for i, j in enumerate(act_sizes)], dim=0) z = torch.cat((state_out, act_out), dim=1) # Concat along hidden_dim z = F.relu(self.hidden(z)) act_values = self.act_scorer(z).squeeze(-1) if detach: act_values = act_values.detach().cpu() q_values = act_values.split(act_sizes) if cclm: cond_probs = cclm.get_probs(state_batch, poss_acts, cond_threshold=cond_threshold, testing_flag=testing_flag) if cond_weight > 0: if args.adaptive_cond_weight: new_q_values = [] for vals, probs in zip(q_values, cond_probs): probs = probs.int() scaling = (cond_weight * probs) + (1 - probs) new_q_values.append(torch.log(F.softmax(vals, dim=0) * scaling) + torch.logsumexp(vals, dim=0)) q_values = new_q_values elif args.adaptive_cond_weight2: new_q_values = [] for vals, probs in zip(q_values, cond_probs): probs = probs.int() # basically an indicator for whether the action is immoral mult_weight = torch.minimum(2 * F.softmax(vals, dim=0).pow(0.5), torch.ones_like(vals)) scaling = (mult_weight * probs) + (1 - probs) new_q_values.append(torch.log(F.softmax(vals, dim=0) * scaling) + torch.logsumexp(vals, dim=0)) q_values = new_q_values else: q_values = [vals - cond_weight * probs for vals, probs in zip(q_values, cond_probs)] else: raise NotImplementedError else: cond_probs = q_values if cond_weight > 0: raise NotImplementedError q_values = [vals - cond_weight * torch.rand(vals.shape, device=device) for vals, probs in zip(q_values, cond_probs)] # Split up the q-values by batch return q_values, cond_probs @torch.no_grad() def act(self, states, poss_acts_tokenized, poss_acts, lm=None, eps=None, alpha=0, k=-1, argmax=False, cond_weight=0, cclm=None, cond_threshold=0, args=None): """ Returns an action-string, optionally sampling from the distribution of Q-Values. """ valid_ids = poss_acts_tokenized q_values, cond_probs = self.forward(states, valid_ids, poss_acts, detach=False, cond_weight=cond_weight, cclm=cclm, cond_threshold=cond_threshold, args=args) # detach only when using two GPUs if alpha > 0 or (eps is not None and k != -1): # need to use lm_values lm_values = [torch.tensor(lm.score(state.obs, act_ids), device=device) for state, act_ids in zip(states, valid_ids)] act_values = [q_value * (1 - alpha) + bert_value * alpha for q_value, bert_value in zip(q_values, lm_values)] else: act_values = q_values if eps is None: # sample ~ softmax(act_values) if argmax: sampling_func = torch.argmax else: sampling_func = lambda vals: torch.multinomial(F.softmax(vals, dim=0), num_samples=1) act_idxs = [sampling_func(vals).item() for vals in act_values] else: # w.p. eps, ~ softmax(act_values) | uniform(top_k(act_values)), w.p. (1-eps) arg max q_values raise NotImplementedError if k == 0: # soft sampling act_idxs = [torch.multinomial(F.softmax(vals, dim=0), num_samples=1).item() for vals in lm_values] elif k == -1: act_idxs = [np.random.choice(range(len(vals))) for vals in q_values] else: # hard (uniform) sampling act_idxs = [np.random.choice(vals.topk(k=min(k, len(vals)), dim=0).indices.tolist()) for vals in lm_values] act_idxs = [vals.argmax(dim=0).item() if np.random.rand() > eps else idx for idx, vals in zip(act_idxs, q_values)] return act_idxs, act_values, cond_probs def pad_sequences(sequences, maxlen=None, dtype='int32', value=0.): lengths = [len(s) for s in sequences] nb_samples = len(sequences) if maxlen is None: maxlen = np.max(lengths) # take the sample shape from the first non empty sequence # checking for consistency in the main loop below. sample_shape = tuple() for s in sequences: if len(s) > 0: sample_shape = np.asarray(s).shape[1:] break x = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype) for idx, s in enumerate(sequences): if len(s) == 0: continue # empty list was found # pre truncating trunc = s[-maxlen:] # check `trunc` has expected shape trunc = np.asarray(trunc, dtype=dtype) if trunc.shape[1:] != sample_shape: raise ValueError('Shape of sample %s of sequence at position %s is different from expected shape %s' % (trunc.shape[1:], idx, sample_shape)) # post padding x[idx, :len(trunc)] = trunc return x
[ "torch.nn.Embedding", "torch.cat", "numpy.ones", "torch.nn.utils.rnn.pad_packed_sequence", "torch.no_grad", "numpy.max", "torch.nn.Linear", "torch.nn.GRU", "torch.logsumexp", "torch.autograd.Variable", "numpy.asarray", "torch.cuda.is_available", "torch.rand", "torch.sort", "torch.from_numpy", "torch.ones_like", "torch.nn.functional.softmax", "numpy.random.rand", "itertools.chain.from_iterable" ]
[((5580, 5595), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5593, 5595), False, 'import torch\n'), ((162, 187), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (185, 187), False, 'import torch\n'), ((434, 473), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'embedding_dim'], {}), '(vocab_size, embedding_dim)\n', (446, 473), True, 'import torch.nn as nn\n'), ((501, 534), 'torch.nn.GRU', 'nn.GRU', (['embedding_dim', 'hidden_dim'], {}), '(embedding_dim, hidden_dim)\n', (507, 534), True, 'import torch.nn as nn\n'), ((563, 596), 'torch.nn.GRU', 'nn.GRU', (['embedding_dim', 'hidden_dim'], {}), '(embedding_dim, hidden_dim)\n', (569, 596), True, 'import torch.nn as nn\n'), ((624, 657), 'torch.nn.GRU', 'nn.GRU', (['embedding_dim', 'hidden_dim'], {}), '(embedding_dim, hidden_dim)\n', (630, 657), True, 'import torch.nn as nn\n'), ((685, 718), 'torch.nn.GRU', 'nn.GRU', (['embedding_dim', 'hidden_dim'], {}), '(embedding_dim, hidden_dim)\n', (691, 718), True, 'import torch.nn as nn\n'), ((741, 778), 'torch.nn.Linear', 'nn.Linear', (['(4 * hidden_dim)', 'hidden_dim'], {}), '(4 * hidden_dim, hidden_dim)\n', (750, 778), True, 'import torch.nn as nn\n'), ((805, 829), 'torch.nn.Linear', 'nn.Linear', (['hidden_dim', '(1)'], {}), '(hidden_dim, 1)\n', (814, 829), True, 'import torch.nn as nn\n'), ((1239, 1282), 'torch.sort', 'torch.sort', (['lengths'], {'dim': '(0)', 'descending': '(True)'}), '(lengths, dim=0, descending=True)\n', (1249, 1282), False, 'import torch\n'), ((1307, 1334), 'torch.sort', 'torch.sort', (['idx_sort'], {'dim': '(0)'}), '(idx_sort, dim=0)\n', (1317, 1334), False, 'import torch\n'), ((1354, 1387), 'torch.autograd.Variable', 'torch.autograd.Variable', (['idx_sort'], {}), '(idx_sort)\n', (1377, 1387), False, 'import torch\n'), ((1409, 1444), 'torch.autograd.Variable', 'torch.autograd.Variable', (['idx_unsort'], {}), '(idx_unsort)\n', (1432, 1444), False, 'import torch\n'), ((1924, 1961), 'torch.nn.utils.rnn.pad_packed_sequence', 'nn.utils.rnn.pad_packed_sequence', (['out'], {}), '(out)\n', (1956, 1961), True, 'import torch.nn as nn\n'), ((3343, 3389), 'torch.cat', 'torch.cat', (['(obs_out, look_out, inv_out)'], {'dim': '(1)'}), '((obs_out, look_out, inv_out), dim=1)\n', (3352, 3389), False, 'import torch\n'), ((3560, 3598), 'torch.cat', 'torch.cat', (['(state_out, act_out)'], {'dim': '(1)'}), '((state_out, act_out), dim=1)\n', (3569, 3598), False, 'import torch\n'), ((7911, 7926), 'numpy.max', 'np.max', (['lengths'], {}), '(lengths)\n', (7917, 7926), True, 'import numpy as np\n'), ((8485, 8515), 'numpy.asarray', 'np.asarray', (['trunc'], {'dtype': 'dtype'}), '(trunc, dtype=dtype)\n', (8495, 8515), True, 'import numpy as np\n'), ((2963, 3003), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['act_batch'], {}), '(act_batch)\n', (2992, 3003), False, 'import itertools\n'), ((8196, 8240), 'numpy.ones', 'np.ones', (['((nb_samples, maxlen) + sample_shape)'], {}), '((nb_samples, maxlen) + sample_shape)\n', (8203, 8240), True, 'import numpy as np\n'), ((8145, 8158), 'numpy.asarray', 'np.asarray', (['s'], {}), '(s)\n', (8155, 8158), True, 'import numpy as np\n'), ((1496, 1522), 'torch.from_numpy', 'torch.from_numpy', (['padded_x'], {}), '(padded_x)\n', (1512, 1522), False, 'import torch\n'), ((6804, 6826), 'torch.nn.functional.softmax', 'F.softmax', (['vals'], {'dim': '(0)'}), '(vals, dim=0)\n', (6813, 6826), True, 'import torch.nn.functional as F\n'), ((7581, 7597), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (7595, 7597), True, 'import numpy as np\n'), ((5412, 5449), 'torch.rand', 'torch.rand', (['vals.shape'], {'device': 'device'}), '(vals.shape, device=device)\n', (5422, 5449), False, 'import torch\n'), ((4356, 4384), 'torch.logsumexp', 'torch.logsumexp', (['vals'], {'dim': '(0)'}), '(vals, dim=0)\n', (4371, 4384), False, 'import torch\n'), ((4776, 4797), 'torch.ones_like', 'torch.ones_like', (['vals'], {}), '(vals)\n', (4791, 4797), False, 'import torch\n'), ((7151, 7173), 'torch.nn.functional.softmax', 'F.softmax', (['vals'], {'dim': '(0)'}), '(vals, dim=0)\n', (7160, 7173), True, 'import torch.nn.functional as F\n'), ((4959, 4987), 'torch.logsumexp', 'torch.logsumexp', (['vals'], {'dim': '(0)'}), '(vals, dim=0)\n', (4974, 4987), False, 'import torch\n'), ((4320, 4342), 'torch.nn.functional.softmax', 'F.softmax', (['vals'], {'dim': '(0)'}), '(vals, dim=0)\n', (4329, 4342), True, 'import torch.nn.functional as F\n'), ((4743, 4765), 'torch.nn.functional.softmax', 'F.softmax', (['vals'], {'dim': '(0)'}), '(vals, dim=0)\n', (4752, 4765), True, 'import torch.nn.functional as F\n'), ((4923, 4945), 'torch.nn.functional.softmax', 'F.softmax', (['vals'], {'dim': '(0)'}), '(vals, dim=0)\n', (4932, 4945), True, 'import torch.nn.functional as F\n')]
import test.xcursor_test as xcursor_test import test.toolbox.parameters_test as parameters_test import test.helper_test as helper_test if __name__ == "__main__": xcursor_test.run_tests() parameters_test.run_tests() helper_test.run_tests()
[ "test.helper_test.run_tests", "test.xcursor_test.run_tests", "test.toolbox.parameters_test.run_tests" ]
[((169, 193), 'test.xcursor_test.run_tests', 'xcursor_test.run_tests', ([], {}), '()\n', (191, 193), True, 'import test.xcursor_test as xcursor_test\n'), ((198, 225), 'test.toolbox.parameters_test.run_tests', 'parameters_test.run_tests', ([], {}), '()\n', (223, 225), True, 'import test.toolbox.parameters_test as parameters_test\n'), ((230, 253), 'test.helper_test.run_tests', 'helper_test.run_tests', ([], {}), '()\n', (251, 253), True, 'import test.helper_test as helper_test\n')]
# Generated by Django 3.1 on 2020-09-16 06:52 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('package', '0006_auto_20200916_1440'), ] operations = [ migrations.RenameField( model_name='packageline', old_name='fulfilmentline', new_name='fulfillmentline', ), ]
[ "django.db.migrations.RenameField" ]
[((237, 344), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""packageline"""', 'old_name': '"""fulfilmentline"""', 'new_name': '"""fulfillmentline"""'}), "(model_name='packageline', old_name='fulfilmentline',\n new_name='fulfillmentline')\n", (259, 344), False, 'from django.db import migrations\n')]
from plane import Plane def test_create(): assert Plane() is not None assert Plane().points_count == 0 assert Plane().maximum_distance_between_points == 0 def test_add_first_point(): plane = Plane() plane.add_point((1,2)) assert plane.points_count == 1 assert plane.maximum_distance_between_points == 0 assert plane.maximum_distance_points[0].point.x_coordinate == 1 assert plane.maximum_distance_points[0].point.y_coordinate == 2 assert plane.maximum_distance_points[1].point.x_coordinate == 1 assert plane.maximum_distance_points[1].point.y_coordinate == 2 def test_add_two_points(): plane = Plane() plane.add_point((1,2)) plane.add_point((2,2)) assert plane.points_count == 2 assert plane.maximum_distance_between_points > 0 def test_statment_one(): ''' 1 1 - 1 1 2 1 - 1 2 1 3 - 2 3 ''' plane = Plane() plane.add_point((1, 1)) assert 1 in plane.get_points_with_maximal_manhattan_distance() assert 1 in plane.get_points_with_maximal_manhattan_distance() plane.add_point((2, 1)) assert 1 in plane.get_points_with_maximal_manhattan_distance() assert 2 in plane.get_points_with_maximal_manhattan_distance() plane.add_point((1, 3)) assert 3 in plane.get_points_with_maximal_manhattan_distance() assert 2 in plane.get_points_with_maximal_manhattan_distance() def test_statment_two(): ''' 2 2 - 1 1 1 3 - 1 2 1 1 - 1 3 3 1 - 4 2 3 3 - 4 2 ''' plane = Plane() plane.add_point((2, 2)) assert 1 in plane.get_points_with_maximal_manhattan_distance() assert 1 in plane.get_points_with_maximal_manhattan_distance() plane.add_point((1, 2)) assert 1 in plane.get_points_with_maximal_manhattan_distance() assert 2 in plane.get_points_with_maximal_manhattan_distance() plane.add_point((1, 1)) assert 1 in plane.get_points_with_maximal_manhattan_distance() assert 3 in plane.get_points_with_maximal_manhattan_distance() plane.add_point((3, 1)) assert 4 in plane.get_points_with_maximal_manhattan_distance() assert 2 in plane.get_points_with_maximal_manhattan_distance() plane.add_point((3, 3)) assert 3 in plane.get_points_with_maximal_manhattan_distance() assert 5 in plane.get_points_with_maximal_manhattan_distance()
[ "plane.Plane" ]
[((219, 226), 'plane.Plane', 'Plane', ([], {}), '()\n', (224, 226), False, 'from plane import Plane\n'), ((667, 674), 'plane.Plane', 'Plane', ([], {}), '()\n', (672, 674), False, 'from plane import Plane\n'), ((929, 936), 'plane.Plane', 'Plane', ([], {}), '()\n', (934, 936), False, 'from plane import Plane\n'), ((1574, 1581), 'plane.Plane', 'Plane', ([], {}), '()\n', (1579, 1581), False, 'from plane import Plane\n'), ((60, 67), 'plane.Plane', 'Plane', ([], {}), '()\n', (65, 67), False, 'from plane import Plane\n'), ((92, 99), 'plane.Plane', 'Plane', ([], {}), '()\n', (97, 99), False, 'from plane import Plane\n'), ((130, 137), 'plane.Plane', 'Plane', ([], {}), '()\n', (135, 137), False, 'from plane import Plane\n')]
import sqlite3 def sql_insert_now(sql_datensatz): dsatz = sql_datensatz vorname = dsatz[0] nachname = dsatz[1] gruppenleiter = dsatz[2] bearbeitet = dsatz[3] grund = dsatz[4] abwesend_seit = dsatz[5] gemeldet = dsatz[6] gemeldet_time =dsatz[7] meldepflicht = dsatz[8] meldepflicht_time = dsatz[9] prognose = dsatz[10] notiz = dsatz[11] if notiz == "": notiz = "Noch keine Notiz hinterlegt." status = dsatz[12] #connect to db verbindung = sqlite3.connect("datenbank/abwesenheiten.db") zeiger = verbindung.cursor() zeiger.execute("INSERT INTO mitarbeiter (vorname, nachname, gruppenleiter, bearbeitet, grund, abwesend_seit, gemeldet, gemeldet_time, meldepflicht, meldepflicht_time, prognose, notiz, status) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)", (vorname, nachname, gruppenleiter, bearbeitet, grund, abwesend_seit, gemeldet, gemeldet_time, meldepflicht, meldepflicht_time, prognose, notiz, status)) verbindung.commit() verbindung.close()
[ "sqlite3.connect" ]
[((517, 562), 'sqlite3.connect', 'sqlite3.connect', (['"""datenbank/abwesenheiten.db"""'], {}), "('datenbank/abwesenheiten.db')\n", (532, 562), False, 'import sqlite3\n')]