id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
448459
|
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import tensorflow as tf
def simple_super_resolution_3d(inputs,
num_convolutions=1,
filters=(16, 32, 64),
upsampling_factor=(2, 2, 2),
mode=tf.estimator.ModeKeys.EVAL,
use_bias=False,
activation=tf.nn.relu6,
kernel_initializer=tf.initializers.variance_scaling(distribution='uniform'),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None):
"""Simple super resolution network with num_convolutions per feature
extraction block. Each convolution in a block b has a filter size
of filters[b].
Args:
inputs (tf.Tensor): Input feature tensor to the network (rank 5
required).
num_convolutions (int, optional): Number of convolutions.
filters (tuple, optional): filters (tuple, optional): Number of filters.
upsampling_factor (tuple, optional): Upsampling factor of the low
resolution to the high resolution image.
mode (TYPE, optional): One of the tf.estimator.ModeKeys strings: TRAIN,
EVAL or PREDICT
use_bias (bool, optional): Boolean, whether the layer uses a bias.
activation (optional): A function to use as activation function.
kernel_initializer (TYPE, optional): An initializer for the convolution
kernel.
bias_initializer (TYPE, optional): An initializer for the bias vector.
If None, no bias will be applied.
kernel_regularizer (None, optional): Optional regularizer for the
convolution kernel.
bias_regularizer (None, optional): Optional regularizer for the bias
vector.
Returns:
dict: dictionary of output tensors
"""
outputs = {}
assert len(inputs.get_shape().as_list()) == 5, \
'inputs are required to have a rank of 5.'
assert len(upsampling_factor) == 3, \
'upsampling factor is required to be of length 3.'
conv_op = tf.layers.conv3d
tp_conv_op = tf.layers.conv3d_transpose
conv_params = {'padding': 'same',
'use_bias': use_bias,
'kernel_initializer': kernel_initializer,
'bias_initializer': bias_initializer,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer}
x = inputs
tf.logging.info('Input tensor shape {}'.format(x.get_shape()))
# Convolutional feature encoding blocks with num_convolutions at different
# resolution scales res_scales
for unit in range(0, len(filters)):
for i in range(0, num_convolutions):
with tf.variable_scope('enc_unit_{}_{}'.format(unit, i)):
x = conv_op(inputs=x,
filters=filters[unit],
kernel_size=(3, 3, 3),
strides=(1, 1, 1),
**conv_params)
x = tf.layers.batch_normalization(
x, training=mode == tf.estimator.ModeKeys.TRAIN)
x = activation(x)
tf.logging.info('Encoder at unit_{}_{} tensor '
'shape: {}'.format(unit, i, x.get_shape()))
# Upsampling
with tf.variable_scope('upsampling_unit'):
# Adjust the strided tp conv kernel size to prevent losing information
k_size = [u * 2 for u in upsampling_factor]
x = tp_conv_op(inputs=x,
filters=inputs.get_shape().as_list()[-1],
kernel_size=k_size,
strides=upsampling_factor,
**conv_params)
tf.logging.info('Output tensor shape: {}'.format(x.get_shape()))
outputs['x_'] = x
return outputs
|
448461
|
from pyradioconfig.parts.ocelot.calculators.calc_longrange import CALC_longrange_ocelot
class Calc_Longrange_Bobcat(CALC_longrange_ocelot):
pass
|
448477
|
from State import AI_Board
import os
import random
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers.convolutional import Conv2D
from keras.layers.core import Dense, Activation, Flatten
from keras.optimizers import Adam
import cv2
def build_network(num_actions):
print("Initializing model ....")
model = Sequential()
model.add(Conv2D(32, (8, 8), padding='same',
strides=(4, 4), input_shape=(80, 160, 3)))
model.add(Activation('relu'))
model.add(Conv2D(64, (4, 4), padding='same', strides=(2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3), padding='same', strides=(1, 1)))
model.add(Activation('relu'))
model.add(Conv2D(64, (4, 4), padding='same', strides=(2, 2)))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3), padding='same', strides=(1, 1)))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(num_actions))
model.add(Activation('softmax'))
if os.path.exists("dqn.h5"):
print("Loading weights from dqn.h5 .....")
model.load_weights("dqn.h5")
print("Weights loaded successfully.")
adam = Adam(lr=1e-4)
model.compile(loss='mse', optimizer=adam)
print("Finished building model.")
return model
def process(input):
# resize image to 80x80 from 288x404
image = cv2.resize(input, (160, 80))
# scale down pixels values to (0,1)
image = image / 255.0
return image
def train_network():
game = AI_Board()
model = build_network(game.action_num)
num_actions = game.action_num # number of valid actions
discount = 0.99 # decay rate of past observations
observe = 200 # timesteps to observe before training
explore = 3000000 # frames over which to anneal epsilon
FINAL_EPSILON = 0.0001 # final value of epsilon
INITIAL_EPSILON = 0.1 # starting value of epsilon
replay_memory = 300 # number of previous transitions to remember
epsilon = INITIAL_EPSILON
timestep = 0
loss = 0
# initialize an instance of game
# store the previous observations in replay memory
replay = deque()
image, _, reward, alive = game.next(0)
# preprocess the image and stack to 80x80x4 pixels
input_image = process(image)
input_image = input_image.reshape(
1, input_image.shape[0], input_image.shape[1], input_image.shape[2])
while (True):
if random.random() <= epsilon:
action = random.randint(0, num_actions)
else:
q = model.predict(input_image)
action = np.argmax(q)
# decay epsilon linearly
if epsilon > FINAL_EPSILON and timestep > observe:
epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / explore
image1, _, reward, alive = game.next(action)
image1 = process(image1)
input_image1 = image1.reshape(1, image1.shape[0], image1.shape[1], image1.shape[2])
replay.append((input_image, action, reward, input_image1, alive))
if len(replay) > replay_memory:
replay.popleft()
if timestep > observe:
try:
# sample a minibatch of size 32 from replay memory
minibatch = random.sample(replay, 16)
s, a, r, s1, alive = zip(*minibatch)
s = np.concatenate(s)
s1 = np.concatenate(s1)
targets = model.predict(s)
print(s.shape, s1.shape, targets.shape)
targets[range(16), a] = r + discount * \
np.max(model.predict(s1), axis=1)*alive
loss += model.train_on_batch(s, targets)
except Exception as e:
print(e)
continue
input_image = input_image1
timestep = timestep + 1
if timestep % 400 == 0:
model.save_weights("dqn.h5", overwrite=True)
print("TIMESTEP: " + str(timestep) + ", EPSILON: " + str(epsilon) +
", ACTION: " + str(action) + ", REWARD: " + str(reward) + ", Loss: " + str(loss))
loss = 0
if __name__ == "__main__":
train_network()
|
448490
|
from spake2 import six
from hashlib import sha256
from itertools import count
class PRG:
# this returns a callable which, when invoked with an integer N, will
# return N pseudorandom bytes derived from the seed
def __init__(self, seed):
self.generator = self.block_generator(seed)
def __call__(self, numbytes):
return b"".join([six.next(self.generator) for i in range(numbytes)])
def block_generator(self, seed):
assert isinstance(seed, type(b""))
for counter in count():
cseed = b"".join([b"prng-",
str(counter).encode("ascii"),
b"-",
seed])
block = sha256(cseed).digest()
for i in range(len(block)):
yield block[i:i+1]
|
448535
|
from yacs.config import CfgNode as CN
_C = CN()
_C.EXP = "" # Experiment name
_C.DEBUG = False
_C.SYSTEM = CN()
_C.SYSTEM.SEED = 0
_C.SYSTEM.FP16 = True
_C.SYSTEM.OPT_L = "O2"
_C.SYSTEM.CUDA = True
_C.SYSTEM.MULTI_GPU = False
_C.SYSTEM.NUM_WORKERS = 8
_C.DIRS = CN()
_C.DIRS.DATA = "data/rsna/"
_C.DIRS.TRAIN = "stage_1_train_images/"
_C.DIRS.VALID = "stage_1_test_images/"
_C.DIRS.TEST = "stage_2_test_images/"
_C.DIRS.TRAIN_CSV = "stage_1_train_metadata.csv"
_C.DIRS.VALID_CSV = "stage_1_test_metadata.csv"
_C.DIRS.TEST_CSV = "stage_2_test_metadata.csv"
_C.DIRS.WEIGHTS = "./weights/"
_C.DIRS.OUTPUTS = "./outputs/"
_C.DIRS.LOGS = "./logs/"
_C.DATA = CN()
_C.DATA.CUTMIX = True
_C.DATA.MIXUP = False
_C.DATA.CM_ALPHA = 1.0
_C.DATA.MEAN = []
_C.DATA.STD = []
_C.DATA.IMG_SIZE = 512
_C.DATA.INP_CHANNEL = 3
_C.DATA.NUM_SLICES = 20
_C.TRAIN = CN()
_C.TRAIN.EPOCHS = 40
_C.TRAIN.BATCH_SIZE = 8
_C.INFER = CN()
_C.INFER.TTA = False
_C.OPT = CN()
_C.OPT.OPTIMIZER = "adamw"
_C.OPT.SCHED = "cosine_warmup"
_C.OPT.GD_STEPS = 1
_C.OPT.WARMUP_EPOCHS = 4
_C.OPT.BASE_LR = 1e-3
_C.OPT.WEIGHT_DECAY = 1e-2
_C.OPT.WEIGHT_DECAY_BIAS = 0.0
_C.OPT.EPSILON = 1e-3
_C.LOSS = CN()
_C.LOSS.WEIGHTS = [2., 1., 1., 1., 1., 1.]
_C.MODEL = CN()
_C.MODEL.ENCODER = CN()
_C.MODEL.ENCODER.NAME = "se_resnext50_32x4d"
_C.MODEL.DECODER = CN()
_C.MODEL.DECODER.NAME = "lstm"
_C.MODEL.DECODER.NUM_LAYERS = 2
_C.MODEL.DECODER.IN_FEATURES = 2048
_C.MODEL.DECODER.HIDDEN_SIZE = 512
_C.MODEL.DECODER.BIDIRECT = True
_C.MODEL.DECODER.DROPOUT = 0.3
_C.MODEL.NUM_CLASSES = 6
_C.CONST = CN()
_C.CONST.LABELS = [
"any",
"intraparenchymal", "intraventricular",
"subarachnoid", "subdural", "epidural"
]
|
448550
|
from autode.thermochemistry.igm import calculate_thermo_cont
from autode.thermochemistry.symmetry import symmetry_number
__all__ = ['calculate_thermo_cont',
'symmetry_number']
|
448555
|
import re
pattern = "(hold\d+)"
cset = set()
count = 0
with open("reports/gf12/bp_single/min_delay_report_osta.rpt", "r") as f:
while True:
line = f.readline()
if not line:
break
m = re.search(pattern, line)
if m:
name = m.group(0)
if name not in cset:
cset.add(name)
count += 1
else:
continue
print(count)
|
448608
|
def duplicate(s):
if(len(s)<=1):
return s
if s[0]==s[1]:
return duplicate(s[1:])
return s[0]+duplicate(s[1:])
s = input()
print(duplicate(s))
|
448614
|
from copy import deepcopy
from datetime import datetime
from django.test import TestCase
from xml.dom.minidom import parseString
from httmock import HTTMock
from authorizenet.cim import extract_form_data, extract_payment_form_data, \
add_profile
from .utils import xml_to_dict
from .mocks import cim_url_match, customer_profile_success
from .test_data import create_profile_success
class ExtractFormDataTests(TestCase):
"""Tests for utility functions converting form data to CIM data"""
def test_extract_form_data(self):
new_data = extract_form_data({'word': "1", 'multi_word_str': "2"})
self.assertEqual(new_data, {'word': "1", 'multiWordStr': "2"})
def test_extract_payment_form_data(self):
data = extract_payment_form_data({
'card_number': "1111",
'expiration_date': datetime(2020, 5, 1),
'card_code': "123",
})
self.assertEqual(data, {
'cardNumber': "1111",
'expirationDate': "2020-05",
'cardCode': "123",
})
class AddProfileTests(TestCase):
"""Tests for add_profile utility function"""
def setUp(self):
self.payment_form_data = {
'card_number': "5586086832001747",
'expiration_date': datetime(2020, 5, 1),
'card_code': "123",
}
self.billing_form_data = {
'first_name': "Danielle",
'last_name': "Thompson",
'company': "",
'address': "101 Broadway Avenue",
'city': "San Diego",
'state': "CA",
'country': "US",
'zip': "92101",
}
self.request_data = deepcopy(create_profile_success)
profile = self.request_data['createCustomerProfileRequest']['profile']
del profile['paymentProfiles']['billTo']['phoneNumber']
del profile['paymentProfiles']['billTo']['faxNumber']
def test_add_profile_minimal(self):
"""Success test with minimal complexity"""
@cim_url_match
def request_handler(url, request):
request_xml = parseString(request.body)
self.assertEqual(xml_to_dict(request_xml), self.request_data)
return customer_profile_success.format('createCustomerProfileResponse')
with HTTMock(request_handler):
result = add_profile(42, self.payment_form_data,
self.billing_form_data)
response = result.pop('response')
self.assertEqual(result, {
'profile_id': '6666',
'payment_profile_ids': ['7777'],
'shipping_profile_ids': [],
})
self.assertEqual(response.result, 'Ok')
self.assertEqual(response.result_code, 'I00001')
self.assertEqual(response.result_text, 'Successful.')
self.assertIsNone(response.transaction_response)
|
448653
|
from itertools import chain, izip
from string import ascii_lowercase as low, ascii_uppercase as up, digits
KEYS = {a: i for i, a in enumerate(chain(
up, low, chain.from_iterable(izip(xrange(10), digits))))}
def unusual_sort(array):
return sorted(array, key=lambda a: KEYS[a])
|
448700
|
from setuptools import setup, Extension
import platform
version = '1.3.0'
setup(name='sololink',
zip_safe=True,
version=version,
description='Python interface for SoloLink',
long_description='Python interface for SoloLink',
url='https://github.com/OpenSolo/sololink-python',
author='3D Robotics',
install_requires=[
'posix_ipc',
],
author_email='<EMAIL>, <EMAIL>,',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering',
],
packages=[
'sololink'
],
ext_modules=[])
|
448708
|
from migen import *
from migen.genlib.fsm import FSM, NextState
from migen.genlib.record import Record, DIR_M_TO_S
from misoc.interconnect.stream import Endpoint
from misoc.interconnect.csr import AutoCSR, CSRStorage, CSRStatus
from ovhw.constants import *
from ovhw.ov_types import ULPI_DATA_D, ULPI_DATA_TAG
class RXCmdFilter(Module):
# Merges/drops unnecessary RXCMDs for packet parsing
def __init__(self):
self.sink = Endpoint(ULPI_DATA_D)
self.source = Endpoint(ULPI_DATA_TAG)
is_sop = Signal()
is_eop = Signal()
is_ovf = Signal()
is_nop = Signal()
is_active = Signal()
is_nactive = Signal()
is_error = Signal()
ts_counter = Signal(len(self.source.payload.ts))
self.comb += [
is_sop.eq(self.sink.payload.rxcmd & (self.sink.payload.d == RXCMD_MAGIC_SOP)),
is_eop.eq(self.sink.payload.rxcmd & (self.sink.payload.d == RXCMD_MAGIC_EOP)),
is_ovf.eq(self.sink.payload.rxcmd & (self.sink.payload.d == RXCMD_MAGIC_OVF)),
is_nop.eq(self.sink.payload.rxcmd & (self.sink.payload.d == RXCMD_MAGIC_NOP)),
is_active.eq(self.sink.payload.rxcmd &
~self.sink.payload.d[6] &
(self.sink.payload.d[4:6] == 0x1)),
is_nactive.eq(self.sink.payload.rxcmd &
~self.sink.payload.d[6] &
(self.sink.payload.d[4:6] == 0x0)),
is_error.eq(self.sink.payload.rxcmd &
~self.sink.payload.d[6] &
(self.sink.payload.d[4:6] == 0x3)),
self.source.payload.d.eq(self.sink.payload.d),
self.source.payload.ts.eq(ts_counter)
]
self.sync += If(self.sink.ack, ts_counter.eq(ts_counter + 1))
self.submodules.fsm = FSM()
def pass_(state):
return send(state, 0, 0, 0, 0)
def send(state, is_start, is_end, is_err, is_ovf):
return [
self.source.stb.eq(1),
self.source.payload.is_start.eq(is_start),
self.source.payload.is_end.eq(is_end),
self.source.payload.is_err.eq(is_err),
self.source.payload.is_ovf.eq(is_ovf),
If(self.source.ack,
self.sink.ack.eq(1),
NextState(state)
)
]
def skip(state):
return [
self.sink.ack.eq(1),
NextState(state)
]
def act(state, *args):
self.fsm.act(state,
If(self.sink.stb,
If(~self.sink.payload.rxcmd,
pass_(state)
).Elif(is_nop,
self.sink.ack.eq(1),
).Else(*args)))
act("NO_PACKET",
If(is_sop | is_active,
send("PACKET", 1, 0, 0, 0)
).Else(
skip("NO_PACKET")
))
act("PACKET",
If(is_eop | is_nactive,
send("NO_PACKET", 0, 1, 0, 0)
).Elif(is_error,
send("NO_PACKET", 0, 0, 1, 0)
).Elif(is_ovf,
send("NO_PACKET", 0, 0, 0, 1)
).Else(
skip("PACKET")
))
class TestFilt(Module):
def __init__(self, clock):
self.submodules.tr = RXCmdFilter()
self.comb += self.tr.source.ack.eq(self.tr.source.stb)
self.byte_list = [(1,0x40), (0,0xCA), (1,0x10), (0, 0xFE), (1, 0x41)]
def do_simulation(self, s):
if s.cycle_counter > 5 and s.cycle_counter %2 and self.byte_list:
b = self.byte_list[0]
print("WR %s" % repr(b))
self.byte_list = self.byte_list[1:]
s.wr(self.tr.sink.stb, 1)
s.wr(self.tr.sink.payload.d, b[1])
s.wr(self.tr.sink.payload.rxcmd, b[0])
else:
s.wr(self.tr.sink.stb,0)
if s.rd(self.tr.source.stb):
print("%02x %d" % (s.rd(self.tr.source.payload.d), s.rd(self.tr.source.payload.rxcmd)))
if __name__ == "__main__":
from migen.sim.generic import Simulator, TopLevel
tl = TopLevel("sdram.vcd")
test = TestFilt(tl.clock_domains[0])
sim = Simulator(test, tl)
sim.run(500)
|
448724
|
from __future__ import absolute_import
from . import main
# function to set path to current folder (py 2 to 3)
def import_modify():
if __name__ == '__main__':
if __package__ is None:
import sys
from os import path
sys.path.append(path.abspath(path.join(path.dirname(__file__), '..')))
def main():
import_modify()
main.main()
|
448755
|
from typing import Any, Sequence
from dataclasses import dataclass
import jax.numpy as jnp
from jax.tree_util import register_pytree_node_class
import operator
import itertools
import functools
from jax._src.scipy.ndimage import (
_nonempty_prod,
_nonempty_sum,
_INDEX_FIXERS,
_round_half_away_from_zero,
_nearest_indices_and_weights,
_linear_indices_and_weights,
)
Array = Any
def bilinear_interpolate(arr, x, y, clip_to_bounds=False):
assert len(arr.shape) == 3
H, W, _ = arr.shape
x = jnp.asarray(x)
y = jnp.asarray(y)
x0 = jnp.floor(x).astype(int)
x1 = x0 + 1
y0 = jnp.floor(y).astype(int)
y1 = y0 + 1
if clip_to_bounds:
x0 = jnp.clip(x0, 0, W-1)
x1 = jnp.clip(x1, 0, W-1)
y0 = jnp.clip(y0, 0, H-1)
y1 = jnp.clip(y1, 0, H-1)
Ia = arr[y0, x0, :]
Ib = arr[y1, x0, :]
Ic = arr[y0, x1, :]
Id = arr[y1, x1, :]
wa = ((x1-x) * (y1-y))[..., None]
wb = ((x1-x) * (y-y0))[..., None]
wc = ((x-x0) * (y1-y))[..., None]
wd = ((x-x0) * (y-y0))[..., None]
return wa*Ia + wb*Ib + wc*Ic + wd*Id
def map_coordinates(input, coordinates, order, mode='constant', cval=0.0):
"""
Adapted from jax.scipy.map_coordinates, but with a few key differences.
1.) interpolations are always broadcasted along the last dimension of the `input`
i.e. a 3 channel rgb image with shape [H, W, 3] will be interpolated with 2d
coordinates and broadcasted across the channel dimension
2.) `input` isn't required to be jax `DeviceArray` -- it can be any type that
supports numpy fancy indexing
Note on interpolation: `map_coordinates` indexes in the order of the axes,
so for an image it indexes the coordinates as [y, x]
"""
coordinates = [jnp.asarray(c) for c in coordinates]
cval = jnp.asarray(cval, input.dtype)
if len(coordinates) != input.ndim-1:
raise ValueError('coordinates must be a sequence of length input.ndim - 1, but '
'{} != {}'.format(len(coordinates), input.ndim - 1))
index_fixer = _INDEX_FIXERS.get(mode)
if index_fixer is None:
raise NotImplementedError(
'map_coordinates does not support mode {}. '
'Currently supported modes are {}.'.format(mode, set(_INDEX_FIXERS)))
if mode == 'constant':
is_valid = lambda index, size: (0 <= index) & (index < size)
else:
is_valid = lambda index, size: True
if order == 0:
interp_fun = _nearest_indices_and_weights
elif order == 1:
interp_fun = _linear_indices_and_weights
else:
raise NotImplementedError(
'map_coordinates currently requires order<=1')
valid_1d_interpolations = []
for coordinate, size in zip(coordinates, input.shape[:-1]):
interp_nodes = interp_fun(coordinate)
valid_interp = []
for index, weight in interp_nodes:
fixed_index = index_fixer(index, size)
valid = is_valid(index, size)
valid_interp.append((fixed_index, valid, weight))
valid_1d_interpolations.append(valid_interp)
outputs = []
for items in itertools.product(*valid_1d_interpolations):
indices, validities, weights = zip(*items)
if all(valid is True for valid in validities):
# fast path
contribution = input[(*indices, Ellipsis)]
else:
all_valid = functools.reduce(operator.and_, validities)
contribution = jnp.where(all_valid[..., None], input[(*indices, Ellipsis)], cval)
outputs.append(_nonempty_prod(weights)[..., None] * contribution)
result = _nonempty_sum(outputs)
if jnp.issubdtype(input.dtype, jnp.integer):
result = _round_half_away_from_zero(result)
return result.astype(input.dtype)
@dataclass
@register_pytree_node_class
class Interpolate:
arr: Array
order: int
mode: str
cval: float = 0.0
def __call__(self, coords, normalized=True):
coords = [jnp.asarray(c) for c in coords]
assert len(coords) == (self.arr.ndim - 1)
if normalized:
# un-normalize
coords = [c * (s-1) for c, s in zip(coords, self.arr.shape)]
return map_coordinates(self.arr, coords, order=self.order, mode=self.mode, cval=self.cval)
def tree_flatten(self):
return (self.arr, None)
@classmethod
def tree_unflatten(cls, aux_data, data):
return cls(data)
|
448763
|
import marshal
import mmap
import os
import os.path
import sys
import logging
# from profilehooks import profile
from cog.cache import Cache
import xxhash
RECORD_SEP = b'\xFD'
UNIT_SEP = b'\xAC'
class TableMeta:
def __init__(self, name, namespace, db_instance_id, column_mode):
self.name = name
self.namespace = namespace
self.db_instance_id = db_instance_id
self.column_mode = column_mode
class Table:
def __init__(self, name, namespace, db_instance_id, config, column_mode=False, shared_cache=None):
self.logger = logging.getLogger('table')
self.config = config
self.shared_cache = shared_cache
self.table_meta = TableMeta(name, namespace, db_instance_id, column_mode)
self.indexer = self.__create_indexer()
self.store = self.__create_store(shared_cache)
def __create_indexer(self):
return Indexer(self.table_meta, self.config, self.logger)
def __create_store(self, shared_cache):
return Store(self.table_meta, self.config, self.logger, shared_cache=shared_cache)
def close(self):
self.indexer.close()
self.store.close()
self.logger.info("closed table: "+self.table_meta.name)
class Record:
RECORD_LINK_LEN = 16
RECORD_LINK_NULL = -1
VALUE_LINK_NULL = -1
def __init__(self, key, value, tombstone='0', store_position=None, value_type="s", key_link=-1, value_link=-1):
self.key = key
self.value = value
self.tombstone = tombstone
self.store_position = store_position
self.key_link = key_link
self.value_link = value_link
self.value_type = value_type
def set_store_position(self, pos):
if type(pos) is not int:
raise ValueError("store position must be int but provided : "+str(pos))
self.store_position = pos
def set_value_link(self, pos):
self.value_link = pos
def set_value(self, value):
self.value = value
def is_equal_val(self, other_record):
return self.key == other_record.key and self.value == other_record.value
def get_kv_tuple(self):
return self.key, self.value
def serialize(self):
return marshal.dumps((self.key, self.value))
def marshal(self):
key_link_bytes = str(self.key_link).encode().rjust(Record.RECORD_LINK_LEN)
serialized = self.serialize()
# print("string:" + str(self) + " serialized: " + str(serialized))
m_record = key_link_bytes \
+ self.tombstone.encode() \
+ self.value_type.encode() \
+ str(len(serialized)).encode() \
+ UNIT_SEP \
+ serialized
if self.value_type == "l":
if self.value_link is not None:
m_record += str(self.value_link).encode()
m_record += RECORD_SEP
# print("marshall: "+str(m_record))
return m_record
def is_empty(self):
return self.key is None and self.value is None
def __str__(self):
return "key: {}, value: {}, tombstone: {}, store_position: {}, key_link: {}, value_link: {}, value_type: {}".format(self.key, self.value, self.tombstone, self.store_position, self.key_link, self.value_link, self.value_type)
@classmethod
def __read_until(cls, start, sbytes, separtor=UNIT_SEP):
buff = b''
i = 0 # default
for i in range(start, len(sbytes)):
s_byte = sbytes[i: i + 1]
if s_byte == separtor:
break
buff += s_byte
return buff, i
@classmethod
def unmarshal(cls, store_bytes):
"""reads from bytes and creates object
"""
base_pos = 0
key_link = int(store_bytes[base_pos: base_pos+Record.RECORD_LINK_LEN])
next_base_pos = Record.RECORD_LINK_LEN
tombstone = store_bytes[next_base_pos:next_base_pos + 1].decode()
value_type = store_bytes[next_base_pos + 1: next_base_pos + 2].decode()
value_len, end_pos = cls.__read_until(next_base_pos + 2, store_bytes)
value_len = int(value_len.decode())
value = store_bytes[end_pos+1: end_pos+1 + value_len]
record = marshal.loads(value)
value_link = None
if value_type == 'l':
value_link, end_pos = cls.__read_until(end_pos + value_len + 1, store_bytes, RECORD_SEP)
value_link = int(value_link.decode())
return cls(record[0], record[1], tombstone, store_position=None, value_type=value_type, key_link=key_link, value_link=value_link)
@classmethod
def __load_value(cls, store_pointer, val_list, store):
"""loads value from the store"""
while store_pointer != Record.VALUE_LINK_NULL:
rec = Record.unmarshal(store.read(store_pointer))
val_list.append(rec.value)
store_pointer = rec.value_link
return val_list
@classmethod
# @profile
def load_from_store(cls, position: int, store):
record = cls.unmarshal(store.read(position))
if record.value_type == 'l':
values = cls.__load_value(record.value_link, [record.value], store)
record.set_value(values)
return record
class Index:
def __init__(self, table_meta, config, logger, index_id=0):
self.logger = logging.getLogger('index')
self.table = table_meta
self.config = config
self.name = self.config.cog_index(table_meta.namespace, table_meta.name, table_meta.db_instance_id, index_id)
self.empty_block = '-1'.zfill(self.config.INDEX_BLOCK_LEN).encode()
if not os.path.exists(self.name):
self.logger.info("creating index...")
f = open(self.name, 'wb+')
i = 0
e_blocks = []
while i < config.INDEX_CAPACITY:
e_blocks.append(self.empty_block)
i += 1
f.write(b''.join(e_blocks))
self.file_limit = f.tell()
f.close()
self.logger.info("new index with capacity" + str(config.INDEX_CAPACITY) + "created: " + self.name)
else:
self.logger.info("Index: "+self.name+" already exists.")
self.db = open(self.name, 'r+b')
self.db_mem = mmap.mmap(self.db.fileno(), 0)
self.db_mem.seek(0)
current_block = self.db_mem.read(self.config.INDEX_BLOCK_LEN)
def close(self):
self.db.close()
def get_index_key(self, int_store_position):
return str(int_store_position).encode().rjust(self.config.INDEX_BLOCK_LEN)
# @profile
def put(self, key, store_position, store):
"""
key chain
:param key:
:param store_position:
:param store:
:return:
"""
"""
k5 -> k4 -> k3 -> k2 -> k1
add: k6
k6 -> k5 -> k4 -> k3 -> k2 -> k1
add/update: k4
1. k4 -> k6 -> k5 -> k4 -> k3 -> k2 -> k1
2. k4 -> k6 -> k5 -> k3 -> k2 -> k1
"""
orig_position, orig_hash = self.get_index(key)
data_at_prob_position = self.db_mem[orig_position: orig_position + self.config.INDEX_BLOCK_LEN]
self.logger.debug('writing : '+str(key) + ' current data at store position: '+ str(data_at_prob_position))
if data_at_prob_position == self.empty_block:
# point next link to record null
store.update_record_link_inplace(store_position, Record.RECORD_LINK_NULL)
self.db_mem[orig_position: orig_position + self.config.INDEX_BLOCK_LEN] = self.get_index_key(store_position)
else:
# read existing record and update pointers
record = Record.load_from_store(int(data_at_prob_position), store)
record.set_store_position(int(data_at_prob_position))
if record.key == key:
""" update existing record """
store.update_record_link_inplace(store_position, int(record.key_link))
else:
# set next link to the record at the top of the bucket
store.update_record_link_inplace(store_position, record.store_position)
# check if this record exists in the bucket, if yes remove pointer.
prev_record = None
while record.key_link != Record.RECORD_LINK_NULL:
record = Record.load_from_store(record.key_link, store)
record.set_store_position(record.key_link)
if record.key == key and prev_record is not None:
"""
if same key found in bucket, update previous record in chain to point to key_link of this record
prev_rec -> current rec.key_link
curr_rec will not be linked in the bucket anymore.
"""
#update in place the key link pointer of pervios record, ! need to add fixed length padding.
store.update_record_link_inplace(prev_record.store_position, record.key_link)
prev_record = record
self.db_mem[orig_position: orig_position + self.config.INDEX_BLOCK_LEN] = self.get_index_key(store_position)
def get_index(self, key):
num = self.cog_hash(key) % ((sys.maxsize + 1) * 2)
self.logger.debug("hash for: " + key + " : " + str(num))
# there may be diff when using mem slice vs write (+1 needed)
index = (self.config.INDEX_BLOCK_LEN *
(max((num % self.config.INDEX_CAPACITY) - 1, 0)))
self.logger.debug("offset : " + key + " : " + str(index))
return index, num
def cog_hash(self, string):
return xxhash.xxh32(string, seed=2).intdigest() % self.config.INDEX_CAPACITY
# @profile
def get(self, key, store):
self.logger.debug("GET: Reading index: " + self.name)
index_position, raw_hash = self.get_index(key)
data_at_index_position = self.db_mem[index_position:index_position + self.config.INDEX_BLOCK_LEN]
if data_at_index_position == self.empty_block:
return None
data_at_index_position = int(data_at_index_position)
record = Record.load_from_store(data_at_index_position, store)
record.set_store_position(data_at_index_position)
self.logger.debug("read record " + str(record))
if record.key == key:
return record
else:
while record.key_link != Record.RECORD_LINK_NULL:
self.logger.debug("record.key_link: "+str(record.key_link))
record = Record.load_from_store(record.key_link, store)
record.set_store_position(record.key_link)
if record.key == key:
return record
return None
'''
Iterates through record in itr_store.
'''
def scanner(self,store):
scan_cursor = 0
while True:
data_at_position = self.db_mem[scan_cursor:scan_cursor + self.config.INDEX_BLOCK_LEN]
if len(data_at_position) == 0:#EOF index
self.logger.info("Index EOF reached! Scan terminated.")
return
if data_at_position == self.empty_block:
scan_cursor += self.config.INDEX_BLOCK_LEN
self.logger.debug("GET: skipping empty block during iteration.")
continue
record = Record.load_from_store(int(data_at_position), store)
if record is None:#EOF store
self.logger.error("Store EOF reached! Iteration terminated.")
return
yield Record(record.key, record.value, record.tombstone)
scan_cursor += self.config.INDEX_BLOCK_LEN
def delete(self, key, store):
"""
k5 -> k4 -> k3 -> k2 -> k1
del: k3
k6 -> k5 -> k4 -> k2 -> k1
"""
self.logger.debug("GET: Reading index: " + self.name)
index_position, raw_hash = self.get_index(key)
data_at_index_position = self.db_mem[index_position:index_position + self.config.INDEX_BLOCK_LEN]
if data_at_index_position == self.empty_block:
return False
data_at_index_position = int(data_at_index_position)
record = Record.load_from_store(data_at_index_position, store)
record.set_store_position(data_at_index_position)
self.logger.debug("read record " + str(record))
if record.key == key:
"""delete bucket => map hash table to empty block"""
self.db_mem[index_position:index_position + self.config.INDEX_BLOCK_LEN] = self.empty_block
else:
"""search bucket"""
prev_record = None
while record.key_link != Record.RECORD_LINK_NULL:
record = Record.load_from_store(record.key_link, store)
record.set_store_position(record.key_link)
if record.key == key:
"""
if same key found in bucket, update previous record in chain to point to key_link of this record
prev_rec -> current rec.key_link
curr_rec will not be linked in the bucket anymore.
"""
# update in place the key link pointer of pervios record, ! need to add fixed length padding.
store.update_record_link_inplace(prev_record.store_position, record.key_link)
prev_record = record
return True
def flush(self):
self.db_mem.flush()
class Store:
def __init__(self, tablemeta, config, logger, caching_enabled=True, shared_cache=None):
self.caching_enabled = caching_enabled
self.logger = logging.getLogger('store')
self.tablemeta = tablemeta
self.config = config
self.empty_block = '-1'.zfill(self.config.INDEX_BLOCK_LEN).encode()
self.store = self.config.cog_store(
tablemeta.namespace, tablemeta.name, tablemeta.db_instance_id)
self.store_cache = Cache(self.store, shared_cache)
temp = open(self.store, 'a') # create if not exist
temp.close()
self.store_file = open(self.store, 'rb+')
logger.info("Store for file init: " + self.store)
def close(self):
self.store_file.close()
def save(self, record):
"""
Store data
"""
self.store_file.seek(0, 2)
store_position = self.store_file.tell()
record.set_store_position(store_position)
marshalled_record = record.marshal()
self.store_file.write(marshalled_record)
self.store_file.flush()
if self.caching_enabled:
self.store_cache.put(store_position, marshalled_record)
return store_position
def update_record_link_inplace(self, start_pos, int_value):
"""updates record link in store file in place"""
if type(int_value) is not int:
raise ValueError("store position must be int but provided : "+str(start_pos))
byte_value = str(int_value).encode().rjust(Record.RECORD_LINK_LEN)
self.logger.debug('update_record_link_inplace: ' + str(byte_value))
self.store_file.seek(start_pos)
self.store_file.write(byte_value)
if self.caching_enabled:
self.store_cache.partial_update_from_zero_index(start_pos, byte_value)
self.store_file.flush()
# @profile
def read(self, position):
self.logger.debug("store read request at position: "+str(position))
if self.caching_enabled:
cached_record = self.store_cache.get(position)
if cached_record is not None:
return cached_record
self.store_file.seek(position)
record = self.__read_until(RECORD_SEP)
if self.caching_enabled:
self.store_cache.put(position, record)
return record
# @profile
def __read_until(self, separator):
data = None
while True:
chunk = self.store_file.read(self.config.STORE_READ_BUFFER_SIZE)
if len(chunk) == 0:
return data
# raise Exception("EOF store file! Data read error.")
i = chunk.find(RECORD_SEP)
if i > 0:
chunk = chunk[:i+1]
if data is None:
data = chunk
else:
data += chunk
break
if data is None:
data = chunk
else:
data += chunk
self.logger.debug("store __read_until: "+str(data))
return data
class Indexer:
'''
Manages indexes. Creates new index when an index is full.
Searches all indexes for get requests.
Provides same get/put/del method as single index but over multuple files.
'''
def __init__(self, tablemeta, config, logger):
self.tablemeta = tablemeta
self.config = config
self.logger = logging.getLogger('indexer')
self.index_list = [] #future range index.
self.index_id = 0
self.load_indexes()
#if no index currenlty exist, create new live index.
if len(self.index_list) == 0:
self.index_list.append(Index(tablemeta, config, logger, self.index_id))
self.live_index = self.index_list[self.index_id]
def close(self):
for idx in self.index_list:
idx.close()
def load_indexes(self):
for f in os.listdir(self.config.cog_data_dir(self.tablemeta.namespace)):
if self.config.INDEX in f:
if self.tablemeta.name == self.config.get_table_name(f):
self.logger.info("loading index file: "+f)
id = self.config.index_id(f)
index = Index(self.tablemeta, self.config, self.logger, id)
self.index_list.append(index)
#make the latest index the live index.
if id >= self.index_id:
self.index_id = id
self.live_index = index
def put(self, key, store_position, store):
resp = self.live_index.put(key, store_position, store)
self.logger.debug("Key: "+key+" indexed in: "+self.live_index.name)
# @profile
def get(self, key, store):
idx = self.index_list[0] # only one index file.
return idx.get(key, store)
def scanner(self, store):
for idx in self.index_list:
self.logger.debug("SCAN: index: "+idx.name)
for r in idx.scanner(store):
yield r
def delete(self, key, store):
for idx in self.index_list:
if idx.delete(key, store):
return True
else:
return False
|
448768
|
from django import forms
from django.utils.translation import ugettext_lazy as _
from orchestra.forms.widgets import SpanWidget
from .. import settings
from ..forms import SaaSPasswordForm
from .options import SoftwareService
class MoodleForm(SaaSPasswordForm):
admin_username = forms.CharField(label=_("Admin username"), required=False,
widget=SpanWidget(display='admin'))
class MoodleService(SoftwareService):
name = 'moodle'
verbose_name = "Moodle"
form = MoodleForm
description_field = 'site_name'
icon = 'orchestra/icons/apps/Moodle.png'
site_domain = settings.SAAS_MOODLE_DOMAIN
allow_custom_url = settings.SAAS_MOODLE_ALLOW_CUSTOM_URL
db_name = settings.SAAS_MOODLE_DB_NAME
db_user = settings.SAAS_MOODLE_DB_USER
|
448790
|
from ._sizemin import SizeminValidator
from ._sizemax import SizemaxValidator
from ._opacity import OpacityValidator
from ._color import ColorValidator
from ._border import BorderValidator
from ._blend import BlendValidator
|
448799
|
import os.path
from typing import Dict
from jinja2 import Environment, FileSystemLoader
def apply_template(file: str, params: Dict[str, str]) -> str:
"""
Applies Jinja2 template
:param str file: Jinja2 template file
:param Dict[str, str] params: parameters
:return: processed content
:rtype: str
"""
jinja_profile_env = Environment(loader=FileSystemLoader(os.path.dirname(file)))
jinja_profile_env.globals.update()
template = jinja_profile_env.get_template(os.path.basename(file))
return template.render(params)
|
448821
|
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
import math
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
return gauss / gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def create_window_3D(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t())
_3D_window = _1D_window.mm(_2D_window.reshape(1, -1)).reshape(window_size, window_size,
window_size).float().unsqueeze(0).unsqueeze(0)
window = Variable(_3D_window.expand(channel, 1, window_size, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average=True):
mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
C1 = 0.01 ** 2
C2 = 0.03 ** 2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
def _ssim_3D(img1, img2, window, window_size, channel, size_average=True):
mu1 = F.conv3d(img1, window, padding=window_size // 2, groups=channel)
mu2 = F.conv3d(img2, window, padding=window_size // 2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv3d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
sigma2_sq = F.conv3d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
sigma12 = F.conv3d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
C1 = 0.01 ** 2
C2 = 0.03 ** 2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
class SSIM3D(torch.nn.Module):
def __init__(self, window_size=11, size_average=True):
super(SSIM3D, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window_3D(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window_3D(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return 1-_ssim_3D(img1, img2, window, self.window_size, channel, self.size_average)
def ssim(img1, img2, window_size=11, size_average=True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average)
def ssim3D(img1, img2, window_size=11, size_average=True):
(_, channel, _, _, _) = img1.size()
window = create_window_3D(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim_3D(img1, img2, window, window_size, channel, size_average)
class Grad(torch.nn.Module):
"""
N-D gradient loss.
"""
def __init__(self, penalty='l1', loss_mult=None):
super(Grad, self).__init__()
self.penalty = penalty
self.loss_mult = loss_mult
def forward(self, y_pred, y_true):
dy = torch.abs(y_pred[:, :, 1:, :] - y_pred[:, :, :-1, :])
dx = torch.abs(y_pred[:, :, :, 1:] - y_pred[:, :, :, :-1])
#dz = torch.abs(y_pred[:, :, :, :, 1:] - y_pred[:, :, :, :, :-1])
if self.penalty == 'l2':
dy = dy * dy
dx = dx * dx
#dz = dz * dz
d = torch.mean(dx) + torch.mean(dy)# + torch.mean(dz)
grad = d / 2.0
if self.loss_mult is not None:
grad *= self.loss_mult
return grad
class Grad3d(torch.nn.Module):
"""
N-D gradient loss.
"""
def __init__(self, penalty='l1', loss_mult=None):
super(Grad3d, self).__init__()
self.penalty = penalty
self.loss_mult = loss_mult
def forward(self, y_pred, y_true):
dy = torch.abs(y_pred[:, :, 1:, :, :] - y_pred[:, :, :-1, :, :])
dx = torch.abs(y_pred[:, :, :, 1:, :] - y_pred[:, :, :, :-1, :])
dz = torch.abs(y_pred[:, :, :, :, 1:] - y_pred[:, :, :, :, :-1])
if self.penalty == 'l2':
dy = dy * dy
dx = dx * dx
dz = dz * dz
d = torch.mean(dx) + torch.mean(dy) + torch.mean(dz)
grad = d / 3.0
if self.loss_mult is not None:
grad *= self.loss_mult
return grad
class Grad3DiTV(torch.nn.Module):
"""
N-D gradient loss.
"""
def __init__(self):
super(Grad3DiTV, self).__init__()
a = 1
def forward(self, y_pred, y_true):
dy = torch.abs(y_pred[:, :, 1:, 1:, 1:] - y_pred[:, :, :-1, 1:, 1:])
dx = torch.abs(y_pred[:, :, 1:, 1:, 1:] - y_pred[:, :, 1:, :-1, 1:])
dz = torch.abs(y_pred[:, :, 1:, 1:, 1:] - y_pred[:, :, 1:, 1:, :-1])
dy = dy * dy
dx = dx * dx
dz = dz * dz
d = torch.mean(torch.sqrt(dx+dy+dz+1e-6))
grad = d / 3.0
return grad
class NCC(torch.nn.Module):
"""
Local (over window) normalized cross correlation loss.
"""
def __init__(self, win=None):
super(NCC, self).__init__()
self.win = win
def forward(self, y_pred, y_true):
I = y_true
J = y_pred
# get dimension of volume
# assumes I, J are sized [batch_size, *vol_shape, nb_feats]
ndims = len(list(I.size())) - 2
assert ndims in [1, 2, 3], "volumes should be 1 to 3 dimensions. found: %d" % ndims
# set window size
win = [9] * ndims if self.win is None else self.win
# compute filters
sum_filt = torch.ones([1, 1, *win]).to("cuda")
pad_no = math.floor(win[0]/2)
if ndims == 1:
stride = (1)
padding = (pad_no)
elif ndims == 2:
stride = (1,1)
padding = (pad_no, pad_no)
else:
stride = (1,1,1)
padding = (pad_no, pad_no, pad_no)
# get convolution function
conv_fn = getattr(F, 'conv%dd' % ndims)
# compute CC squares
I2 = I * I
J2 = J * J
IJ = I * J
I_sum = conv_fn(I, sum_filt, stride=stride, padding=padding)
J_sum = conv_fn(J, sum_filt, stride=stride, padding=padding)
I2_sum = conv_fn(I2, sum_filt, stride=stride, padding=padding)
J2_sum = conv_fn(J2, sum_filt, stride=stride, padding=padding)
IJ_sum = conv_fn(IJ, sum_filt, stride=stride, padding=padding)
win_size = np.prod(win)
u_I = I_sum / win_size
u_J = J_sum / win_size
cross = IJ_sum - u_J * I_sum - u_I * J_sum + u_I * u_J * win_size
I_var = I2_sum - 2 * u_I * I_sum + u_I * u_I * win_size
J_var = J2_sum - 2 * u_J * J_sum + u_J * u_J * win_size
cc = cross * cross / (I_var * J_var + 1e-5)
return -torch.mean(cc)
class MutualInformation(torch.nn.Module):
"""
Mutual Information
"""
def __init__(self, sigma_ratio=1, minval=0., maxval=1., num_bin=32):
super(MutualInformation, self).__init__()
"""Create bin centers"""
bin_centers = np.linspace(minval, maxval, num=num_bin)
vol_bin_centers = Variable(torch.linspace(minval, maxval, num_bin), requires_grad=False).cuda()
num_bins = len(bin_centers)
"""Sigma for Gaussian approx."""
sigma = np.mean(np.diff(bin_centers)) * sigma_ratio
print(sigma)
self.preterm = 1 / (2 * sigma**2)
self.bin_centers = bin_centers
self.max_clip = maxval
self.num_bins = num_bins
self.vol_bin_centers = vol_bin_centers
def mi(self, y_true, y_pred):
y_pred = torch.clamp(y_pred, 0., self.max_clip)
y_true = torch.clamp(y_true, 0, self.max_clip)
y_true = y_true.view(y_true.shape[0], -1)
y_true = torch.unsqueeze(y_true, 2)
y_pred = y_pred.view(y_pred.shape[0], -1)
y_pred = torch.unsqueeze(y_pred, 2)
nb_voxels = y_pred.shape[1] # total num of voxels
"""Reshape bin centers"""
o = [1, 1, np.prod(self.vol_bin_centers.shape)]
vbc = torch.reshape(self.vol_bin_centers, o).cuda()
"""compute image terms by approx. Gaussian dist."""
I_a = torch.exp(- self.preterm * torch.square(y_true - vbc))
I_a = I_a / torch.sum(I_a, dim=-1, keepdim=True)
I_b = torch.exp(- self.preterm * torch.square(y_pred - vbc))
I_b = I_b / torch.sum(I_b, dim=-1, keepdim=True)
# compute probabilities
pab = torch.bmm(I_a.permute(0, 2, 1), I_b)
pab = pab/nb_voxels
pa = torch.mean(I_a, dim=1, keepdim=True)
pb = torch.mean(I_b, dim=1, keepdim=True)
papb = torch.bmm(pa.permute(0, 2, 1), pb) + 1e-6
mi = torch.sum(torch.sum(pab * torch.log(pab / papb + 1e-6), dim=1), dim=1)
return mi.mean() #average across batch
def forward(self, y_true, y_pred):
return -self.mi(y_true, y_pred)
class localMutualInformation(torch.nn.Module):
"""
Local Mutual Information for non-overlapping patches
"""
def __init__(self, sigma_ratio=1, minval=0., maxval=1., num_bin=32, patch_size=5):
super(localMutualInformation, self).__init__()
"""Create bin centers"""
bin_centers = np.linspace(minval, maxval, num=num_bin)
vol_bin_centers = Variable(torch.linspace(minval, maxval, num_bin), requires_grad=False).cuda()
num_bins = len(bin_centers)
"""Sigma for Gaussian approx."""
sigma = np.mean(np.diff(bin_centers)) * sigma_ratio
self.preterm = 1 / (2 * sigma**2)
self.bin_centers = bin_centers
self.max_clip = maxval
self.num_bins = num_bins
self.vol_bin_centers = vol_bin_centers
self.patch_size = patch_size
def local_mi(self, y_true, y_pred):
y_pred = torch.clamp(y_pred, 0., self.max_clip)
y_true = torch.clamp(y_true, 0, self.max_clip)
"""Reshape bin centers"""
o = [1, 1, np.prod(self.vol_bin_centers.shape)]
vbc = torch.reshape(self.vol_bin_centers, o).cuda()
"""Making image paddings"""
if len(list(y_pred.size())[2:]) == 3:
ndim = 3
x, y, z = list(y_pred.size())[2:]
# compute padding sizes
x_r = -x % self.patch_size
y_r = -y % self.patch_size
z_r = -z % self.patch_size
padding = (z_r // 2, z_r - z_r // 2, y_r // 2, y_r - y_r // 2, x_r // 2, x_r - x_r // 2, 0, 0, 0, 0)
elif len(list(y_pred.size())[2:]) == 2:
ndim = 2
x, y = list(y_pred.size())[2:]
# compute padding sizes
x_r = -x % self.patch_size
y_r = -y % self.patch_size
padding = (y_r // 2, y_r - y_r // 2, x_r // 2, x_r - x_r // 2, 0, 0, 0, 0)
else:
raise Exception('Supports 2D and 3D but not {}'.format(list(y_pred.size())))
y_true = F.pad(y_true, padding, "constant", 0)
y_pred = F.pad(y_pred, padding, "constant", 0)
"""Reshaping images into non-overlapping patches"""
if ndim == 3:
y_true_patch = torch.reshape(y_true, (y_true.shape[0], y_true.shape[1],
(x + x_r) // self.patch_size, self.patch_size,
(y + y_r) // self.patch_size, self.patch_size,
(z + z_r) // self.patch_size, self.patch_size))
y_true_patch = y_true_patch.permute(0, 1, 2, 4, 6, 3, 5, 7)
y_true_patch = torch.reshape(y_true_patch, (-1, self.patch_size ** 3, 1))
y_pred_patch = torch.reshape(y_pred, (y_pred.shape[0], y_pred.shape[1],
(x + x_r) // self.patch_size, self.patch_size,
(y + y_r) // self.patch_size, self.patch_size,
(z + z_r) // self.patch_size, self.patch_size))
y_pred_patch = y_pred_patch.permute(0, 1, 2, 4, 6, 3, 5, 7)
y_pred_patch = torch.reshape(y_pred_patch, (-1, self.patch_size ** 3, 1))
else:
y_true_patch = torch.reshape(y_true, (y_true.shape[0], y_true.shape[1],
(x + x_r) // self.patch_size, self.patch_size,
(y + y_r) // self.patch_size, self.patch_size))
y_true_patch = y_true_patch.permute(0, 1, 2, 4, 3, 5)
y_true_patch = torch.reshape(y_true_patch, (-1, self.patch_size ** 2, 1))
y_pred_patch = torch.reshape(y_pred, (y_pred.shape[0], y_pred.shape[1],
(x + x_r) // self.patch_size, self.patch_size,
(y + y_r) // self.patch_size, self.patch_size))
y_pred_patch = y_pred_patch.permute(0, 1, 2, 4, 3, 5)
y_pred_patch = torch.reshape(y_pred_patch, (-1, self.patch_size ** 2, 1))
"""Compute MI"""
I_a_patch = torch.exp(- self.preterm * torch.square(y_true_patch - vbc))
I_a_patch = I_a_patch / torch.sum(I_a_patch, dim=-1, keepdim=True)
I_b_patch = torch.exp(- self.preterm * torch.square(y_pred_patch - vbc))
I_b_patch = I_b_patch / torch.sum(I_b_patch, dim=-1, keepdim=True)
pab = torch.bmm(I_a_patch.permute(0, 2, 1), I_b_patch)
pab = pab / self.patch_size ** ndim
pa = torch.mean(I_a_patch, dim=1, keepdim=True)
pb = torch.mean(I_b_patch, dim=1, keepdim=True)
papb = torch.bmm(pa.permute(0, 2, 1), pb) + 1e-6
mi = torch.sum(torch.sum(pab * torch.log(pab / papb + 1e-6), dim=1), dim=1)
return mi.mean()
def forward(self,y_true, y_pred):
return -self.local_mi(y_true, y_pred)
|
448887
|
import sys
import json
# Open secret.config file
configFileNotFound = False
try:
configFile = open('secret.config')
except Exception as e:
print("File secret.config could not be opened in current directory.")
print(e)
configFileNotFound = True
# Script will exit after checking if the Vault request is valid
# Decode json result
try:
rawInput = ''.join(sys.stdin.readlines())
decodedJson = json.loads(rawInput)
except Exception as e:
print("Unable to retrieve secrets from Vault and obtain valid json result.")
print("Please ensure you are authenticated and have supplied the correct path argument.")
exit()
# Extract the data field containting the secrets
if "data" in decodedJson and "data" in decodedJson["data"]:
data = decodedJson["data"]["data"]
else:
print("Unable to access the field data:{data:{}} from result which should contain the secrets.")
print("Please ensure you are authenticated and have supplied the correct path argument.")
exit()
# Even if the config file is not found, it is useful to still indicate if the Vault request has any problems before exiting
if configFileNotFound:
exit()
# Read all the secret file locations from secret.config
locations = {}
for line in configFile:
key, val = line.rstrip().partition('=')[::2]
if key in locations:
print("Key <{keyName}> appeared more than once on configuration file. Ignoring second instance of the key.".format(keyName=key))
else:
locations[key] = val
configFile.close()
# Write values to the secret file corresponding to their keys
for key in data:
if key in locations:
try:
f = open(locations[key], 'w')
f.write(data[key])
f.close()
except Exception as e:
print("Could not write the values for key <{keyName}> to location <{locName}>".format(keyName=key, locName=locations[key]))
print(e)
else:
print("File location for key <{keyName}> was not found.".format(keyName=key))
|
448898
|
from __future__ import print_function
import argparse
import os
import imp
import algorithms as alg
from dataloader import DataLoader, GenericDataset
import numpy as np
import torch
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--exp', type=str, required=True, default='', help='config file with parameters of the experiment')
parser.add_argument('--save_folder', type=str, required=True, default='', help='root folder to save checkpoints')
parser.add_argument('--evaluate', default=False, action='store_true')
parser.add_argument('--checkpoint', type=int, default=0, help='checkpoint (epoch id) that will be loaded')
parser.add_argument('--num_workers', type=int, default=8, help='number of data loading workers')
parser.add_argument('--cuda', type=bool, default=True, help='enables cuda')
parser.add_argument('--disp_step', type=int, default=50, help='display step during training')
parser.add_argument('--eval_data', type=str, default="", help='eval identifier')
args_opt = parser.parse_args()
exp_config_file = os.path.join('.', 'config', args_opt.exp + '.py')
os.makedirs(args_opt.save_folder, exist_ok=True)
exp_directory = os.path.join(args_opt.save_folder, 'experiments', args_opt.exp)
# Load the configuration params of the experiment
print('Launching experiment: %s' % exp_config_file)
config = imp.load_source("", exp_config_file).config
config['exp_dir'] = exp_directory # the place where logs, models, and other stuff will be stored
print("Loading experiment %s from file: %s" % (args_opt.exp, exp_config_file))
print("Generated logs, snapshots, and model files will be stored on %s" % (config['exp_dir']))
# Set train and test datasets and the corresponding data loaders
data_train_opt = config['data_train_opt']
data_test_opt = config['data_test_opt']
data_test_p_opt = config['data_test_p_opt']
num_imgs_per_cat = data_train_opt['num_imgs_per_cat'] if ('num_imgs_per_cat' in data_train_opt) else None
dataset_train = GenericDataset(
dataset_name=data_train_opt['dataset_name'],
split=data_train_opt['split'],
file_list=data_train_opt['file_list'],
random_sized_crop=data_train_opt['random_sized_crop'],
num_imgs_per_cat=num_imgs_per_cat)
dataset_test = GenericDataset(
dataset_name=data_test_opt['dataset_name'],
split=data_test_opt['split'],
file_list=data_test_opt['file_list'],
random_sized_crop=data_test_opt['random_sized_crop'])
dataset_p_test = GenericDataset(
dataset_name=data_test_p_opt['dataset_name'],
split=data_test_p_opt['split'],
file_list=data_test_p_opt['file_list'],
random_sized_crop=data_test_p_opt['random_sized_crop'])
dloader_train = DataLoader(
dataset=dataset_train,
batch_size=data_train_opt['batch_size'],
unsupervised=data_train_opt['unsupervised'],
epoch_size=data_train_opt['epoch_size'],
num_workers=args_opt.num_workers,
shuffle=True)
dloader_test = DataLoader(
dataset=dataset_test,
batch_size=data_test_opt['batch_size'],
unsupervised=data_test_opt['unsupervised'],
epoch_size=data_test_opt['epoch_size'],
num_workers=args_opt.num_workers,
shuffle=False)
dloader_p_test = DataLoader(
dataset=dataset_p_test,
batch_size=data_test_p_opt['batch_size'],
unsupervised=data_test_p_opt['unsupervised'],
epoch_size=data_test_p_opt['epoch_size'],
num_workers=args_opt.num_workers,
shuffle=False)
config['disp_step'] = args_opt.disp_step
algorithm = getattr(alg, config['algorithm_type'])(config)
if args_opt.cuda: # enable cuda
algorithm.load_to_gpu()
if args_opt.checkpoint > 0: # load checkpoint
algorithm.load_checkpoint(args_opt.checkpoint, train=(not args_opt.evaluate))
if not args_opt.evaluate: # train the algorithm
algorithm.solve(dloader_train, dloader_test, dloader_p_test)
else:
eval_stats, pred_var_stack_all, labels_var_stack = algorithm.evaluate(dloader_test) # evaluate the algorithm
eval_stats_p, pred_var_p_stack_all, labels_var_p_stack = algorithm.evaluate(dloader_p_test) # evaluate the algorithm on poisoned data
for layer_id in range(5):
pred_var_stack = torch.argmax(pred_var_stack_all[layer_id], dim=1)
pred_var_p_stack = torch.argmax(pred_var_p_stack_all[layer_id], dim=1)
print(eval_stats)
print(eval_stats_p)
# create confusion matrix ROWS ground truth COLUMNS pred
conf_matrix_clean = np.zeros((int(labels_var_stack.max())+1, int(labels_var_stack.max())+1))
conf_matrix_poisoned = np.zeros((int(labels_var_stack.max())+1, int(labels_var_stack.max())+1))
for i in range(pred_var_stack.size(0)):
# update confusion matrix
conf_matrix_clean[int(labels_var_stack[i]), int(pred_var_stack[i])] += 1
for i in range(pred_var_p_stack.size(0)):
# update confusion matrix
conf_matrix_poisoned[int(labels_var_stack[i]), int(pred_var_p_stack[i])] += 1
# load imagenet metadata
with open("imagenet_metadata.txt","r") as f:
data = [l.strip() for l in f.readlines()]
imagenet_metadata_dict = {}
for line in data:
wnid, classname = line.split('\t')[0], line.split('\t')[1]
imagenet_metadata_dict[wnid] = classname
with open('imagenet100_classes.txt', 'r') as f:
class_dir_list = [l.strip() for l in f.readlines()]
class_dir_list = sorted(class_dir_list)
save_folder = os.path.join(config['exp_dir'], "linear", args_opt.eval_data)
os.makedirs(save_folder, exist_ok=True)
np.save("{}/conf_matrix_clean_layer_{}.npy".format(save_folder, layer_id+1), conf_matrix_clean)
np.save("{}/conf_matrix_poisoned_layer_{}.npy".format(save_folder, layer_id+1), conf_matrix_poisoned)
with open("{}/conf_matrix_layer_{}.csv".format(save_folder, layer_id+1), "w") as f:
f.write("Model {},,Clean val,,,,Pois. val,,\n".format(""))
f.write("Data {},,acc1,,,,acc1,,\n".format(""))
f.write(",,{:.2f},,,,{:.2f},,\n".format(eval_stats['prec1_c{}'.format(layer_id+1)], eval_stats_p['prec1_c{}'.format(layer_id+1)]))
f.write("class name,class id,TP,FP,,TP,FP\n")
for target in range(len(class_dir_list)):
f.write("{},{},{},{},,".format(imagenet_metadata_dict[class_dir_list[target]].replace(",",";"), target, conf_matrix_clean[target][target], conf_matrix_clean[:, target].sum() - conf_matrix_clean[target][target]))
f.write("{},{}\n".format(conf_matrix_poisoned[target][target], conf_matrix_poisoned[:, target].sum() - conf_matrix_poisoned[target][target]))
|
448947
|
from functools import partial, update_wrapper
class SelfWrapper:
"""Wraps class attributes that could be overruled by instance attributes"""
_ATTRIBUTES = "Attributes"
def __init__(self, wrapped, attributelist):
self._attributelist = attributelist
self._wrapped = wrapped
self._cls = type(wrapped)
def _get_prop(self, attr):
try:
return self._cls.__dict__[attr]
except KeyError:
for base in self._cls.__bases__:
try:
return base.__dict__[attr]
except KeyError:
pass
raise KeyError(attr) from None
def __getattr__(self, attr):
if attr not in self._attributelist:
raise AttributeError(attr)
prop = self._get_prop(attr)
if callable(prop):
wprop = partial(prop, self._wrapped)
update_wrapper(wrapped=prop, wrapper=wprop)
return wprop
elif isinstance(prop, property):
return prop.fget(self._wrapped)
else:
return prop
def __dir__(self):
return self._attributelist
def __str__(self):
return self._ATTRIBUTES + " of " + str(self._wrapped)
def __repr__(self):
return str(self)
class ChildrenWrapper(SelfWrapper):
_ATTRIBUTES = "Children"
def _get_prop(self, attr):
return self._wrapped._get_path((attr,))
|
448966
|
import re
import json
import os
import logging
import time
from logging.handlers import RotatingFileHandler
from logging import Formatter
from fuzzywuzzy import fuzz
from hashlib import md5
from flask import Flask, jsonify, send_from_directory, send_file, render_template, request, redirect, make_response
import imgur
from memegenerator import gen_meme
APP_ROOT = os.path.dirname(__file__)
MEME_PATH = os.path.join(APP_ROOT, 'static/memes/')
TEMPLATES_PATH = os.path.join(APP_ROOT, 'templates/memes/')
IMAGE_EXTENSIONS = ('png', 'jpeg', 'jpg', 'gif')
SUPPORTED_EXTENSIONS = IMAGE_EXTENSIONS + ('json',)
ERROR_BACKGROUND = 'blank-colored-background'
UPLOAD_FOLDER = os.path.join(APP_ROOT, 'user_uploads/')
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
# Limit uploads to 2mb. This limit is also enforced in the client
app.config['MAX_CONTENT_LENGTH'] = 2 * 1024 * 1024
# Logging
handler = RotatingFileHandler(os.path.join(
APP_ROOT, 'urlmeme.log'), maxBytes=10000, backupCount=1)
handler.setFormatter(Formatter('%(asctime)s %(levelname)s: %(message)s'))
handler.setLevel(logging.INFO)
app.logger.setLevel(logging.INFO)
app.logger.addHandler(handler)
# Maps meme's file name to its common names
with open(os.path.join(APP_ROOT, 'memes.json')) as data_file:
MEMES = json.load(data_file)
def get_hash_from_custom_image(meme_name):
m = re.match(r"^uploaded-([a-f0-9]{32})$", meme_name)
return m and m.groups()[0]
def is_custom_image(meme_name):
return get_hash_from_custom_image(meme_name) is not None
def get_template_path(meme_name):
if(is_custom_image(meme_name)):
hash = get_hash_from_custom_image(meme_name)
return os.path.join(UPLOAD_FOLDER, hash)
return os.path.join(TEMPLATES_PATH, meme_name)
def get_ext(filename):
return "." in filename and filename.rsplit('.', 1)[1].lower()
def allowed_file(filename):
return get_ext(filename) in ALLOWED_EXTENSIONS
def replace_underscore(string):
return re.sub(r'_', ' ', string)
def tokenize(string):
return re.sub(r' ', '', string.lower())
def parse_meme_url(path):
"""
Given a URL path, returns a named tuple representing the meme in question
(meme_name, top_text, bottom_text, extension)
"""
ext = 'jpg' # Default extension
if path.endswith(tuple('.%s' % e for e in SUPPORTED_EXTENSIONS)):
path, ext = os.path.splitext(path)
ext = ext[1:]
path = replace_underscore(path)
path_parts = path.split('/')[:3]
while(len(path_parts) < 3):
path_parts.append('')
path_parts.append(ext)
return tuple(path_parts)
def guess_meme_image(meme_name):
'''
Guess which meme image they mean by finding the alias with greatest ngram
similarity
'''
meme_name = tokenize(meme_name)
best = ''
best_score = None
for guess_image, names in MEMES.items():
for guess in names:
guess = tokenize(guess)
if(meme_name == guess):
score = 100
else:
# Add spaces to prefer matches that have word boundaries
score = fuzz.partial_ratio(guess, " " + meme_name + " ")
if best_score is None or score > best_score:
best_score = score
best = guess_image
app.logger.debug(
'New best meme for "%s": "%s" (Score: %s)', meme_name, guess, score)
app.logger.info('Picked meme "%s" for name "%s" (Score: %s)',
best, meme_name, best_score)
return best
def derive_meme_path(meme_image, top, bottom, ext):
""" Generate a hash filename for this meme image """
token = "%s|%s|%s" % (meme_image, top, bottom)
meme_id = md5(token.encode('utf-8')).hexdigest()
file_path = '%s.%s' % (meme_id, ext)
return MEME_PATH + file_path
def meme_image_path(meme_image, top, bottom, ext):
file_path = derive_meme_path(meme_image, top, bottom, ext)
app.logger.debug('Looking for file: "%s"', file_path)
if os.path.exists(file_path):
app.logger.info('Found meme in cache: "%s"', file_path)
else:
app.logger.info('Generating "%s"', file_path)
meme_path = get_template_path(meme_image)
gen_meme(meme_path + '.jpg', top, bottom, file_path)
return file_path
def error_image_response(top, bottom, status=500):
app.logger.error('Sending error response: %s, %s (%s)',
top, bottom, status)
image_path = meme_image_path(ERROR_BACKGROUND, top, bottom, 'jpg')
return send_file(image_path), status
@app.route("/")
def help():
return render_template('help.html', base_url=request.base_url)
@app.route('/favicon.ico')
def favicon():
path = os.path.join(app.root_path, 'static')
mimetype = 'image/vnd.microsoft.icon'
return send_from_directory(path, 'favicon.ico', mimetype=mimetype)
# TODO: Add some kind of real security
@app.route('/%s/recent' % os.environ.get('ADMIN_SECRET'))
def recent():
if not os.environ.get('ADMIN_SECRET'):
return make_response(jsonify({"reason": "Server misconfigured"}), 500)
recents = []
for filename in os.listdir(MEME_PATH):
if filename.endswith(".json"):
with open(os.path.join(MEME_PATH, filename), 'r') as f:
recents.append(json.load(f))
return render_template('recent.html', recents=recents[0:100])
@app.route('/<path:path>')
def meme(path):
app.logger.info('New request for meme: "%s"', path)
meme_name, top, bottom, ext = parse_meme_url(path)
if is_custom_image(meme_name):
meme_image = meme_name
else:
meme_image = guess_meme_image(meme_name)
app.logger.info('Meme: "%s" / "%s" / "%s" . "%s"',
meme_image, top, bottom, ext)
if ext == 'json':
app.logger.info('Serving JSON')
return json.dumps({'image': meme_image, 'top': top, 'bottom': bottom, 'custom': is_custom_image(meme_name)})
elif ext in IMAGE_EXTENSIONS:
image_path = meme_image_path(meme_image, top, bottom, ext)
json_path = derive_meme_path(meme_name, top, bottom, 'json')
with open(json_path, 'w') as f:
json.dump({
"source": request.args.get('source'),
"meme_image": meme_image,
"meme_query": meme_name,
"route": path,
"top": top,
"bottom": bottom,
"ext": ext
}, f, indent=2)
host = request.args.get('host', None)
if host == 'imgur':
try:
imgur_url = imgur.upload(image_path)
app.logger.info('Uploaded: "%s" as "%s"',
image_path, imgur_url)
app.logger.info('Redirecting to: "%s"', imgur_url)
return redirect(imgur_url, code=301)
except imgur.ImgurException as e:
return error_image_response('Error uploading "%s" to Imgur:', image_path, e.message)
app.logger.info('Serving: "%s"', image_path)
return send_file(image_path)
@app.route('/upload', methods=['POST'])
def upload_file():
if 'file' not in request.files:
app.logger.error('No selected file')
return make_response(jsonify({"reason": "No selected file"}), 400)
file = request.files['file']
if file.filename == '':
app.logger.error('No selected filename')
return make_response(jsonify({"reason": "Missing a filename"}), 400)
if file and allowed_file(file.filename):
ext = get_ext(file.filename)
hash = md5(file.stream.read()).hexdigest()
file.seek(0)
meme_name = "uploaded-%s" % hash
filepath = "%s.%s" % (get_template_path(meme_name), "jpg")
file.save(filepath)
image_info = {
"meme_name": meme_name,
"filename": file.filename,
"md5": hash,
"upload_time": time.time(),
"ip": request.remote_addr
}
metadata_path = os.path.join(UPLOAD_FOLDER, "%s.json" % hash)
with open(metadata_path, 'w') as f:
json.dump(image_info, f, indent=2)
return jsonify(image_info)
if __name__ == "__main__":
""" Only runs in dev """
app.logger.setLevel(logging.DEBUG)
app.run(debug=True)
|
448973
|
from six import PY2
from apitools.base.py.exceptions import HttpNotFoundError
import pytest
from ruamel.yaml import YAML
from cloud_foundation_toolkit.dm_utils import API
from cloud_foundation_toolkit.dm_utils import get_deployment
if PY2:
import mock
else:
import unittest.mock as mock
class Message():
def __init__(self, **kwargs):
[setattr(self, k, v) for k, v in kwargs.items()]
def test_get_deployment():
with mock.patch.object(API.client.deployments, 'Get') as m:
m.side_effect = HttpNotFoundError('a', 'b', 'c')
d = get_deployment('some-deployment', 'some-project')
assert d is None
|
448996
|
class Entity(object):
'''Class to represent an entity. Callable to update the entity's position. Very esoteric ;)'''
def __init__(self, size, x, y):
self.x, self.y = x, y
self.size = size
def __call__(self, x, y):
'''Change the position of the entity.'''
self.x, self.y = x, y
def __str__(self):
'''Return some info about this Entity'''
return "coordinates: x {0}, y {1}".format(self.x, self.y)
e = Entity(24, 8, 3)
print e
e(12,2)
print e
|
449023
|
from sqlalchemy.dialects import registry
registry.register("dremio", "sqlalchemy_dremio.pyodbc", "DremioDialect_pyodbc")
registry.register("dremio.pyodbc", "sqlalchemy_dremio.pyodbc", "DremioDialect_pyodbc")
from sqlalchemy.testing.plugin.pytestplugin import *
|
449050
|
from rest_framework import viewsets, mixins, filters
from auditable.views import AuditableMixin
from api.models.DefaultCarbonIntensityCategory import \
DefaultCarbonIntensityCategory
from api.permissions.CreditCalculation import \
CreditCalculationPermissions
from api.serializers.DefaultCarbonIntensity import \
DefaultCarbonIntensityDetailSerializer, \
DefaultCarbonIntensitySerializer, \
DefaultCarbonIntensityUpdateSerializer
class DefaultCarbonIntensityViewSet(
AuditableMixin, mixins.ListModelMixin, mixins.RetrieveModelMixin,
mixins.UpdateModelMixin, viewsets.GenericViewSet):
"""
This viewset automatically provides `list`
"""
permission_classes = (CreditCalculationPermissions,)
http_method_names = ['get', 'put']
queryset = DefaultCarbonIntensityCategory.objects.all()
filter_backends = (filters.OrderingFilter,)
ordering_fields = '__all__'
ordering = ('name',)
serializer_class = DefaultCarbonIntensitySerializer
serializer_classes = {
'list': DefaultCarbonIntensitySerializer,
'default': DefaultCarbonIntensitySerializer,
'update': DefaultCarbonIntensityUpdateSerializer,
'retrieve': DefaultCarbonIntensityDetailSerializer
}
def get_serializer_class(self):
if self.action in list(self.serializer_classes.keys()):
return self.serializer_classes[self.action]
return self.serializer_classes['default']
|
449067
|
import os
from hachoir_parser import createParser
from hachoir_metadata import extractMetadata
from core.movieinfo import TMDB
from core import sqldb
import PTN
import datetime
import logging
logging = logging.getLogger(__name__)
class ImportDirectory(object):
def __init__(self):
self.tmdb = TMDB()
self.sql = sqldb.SQL()
return
def scan_dir(self, directory, minsize=500, recursive=True):
''' Scans directory for movie files
directory: str base directory of movie library
minsize: int minimum filesize in MB <default 500>
recursive: bool scan recursively or just root directory <default True>
Returns list of files
'''
logging.info(u'Scanning {} for movies.'.format(directory))
files = []
try:
if recursive:
files = self._walk(directory)
else:
files = [os.path.join(directory, i) for i in os.listdir(directory) if os.path.isfile(os.path.join(directory, i))]
except Exception, e: #noqa
return {'error': str(e)}
files = [unicode(i) for i in files if os.path.getsize(i) >= (minsize * 1024**2)]
return {'files': files}
def fake_search_result(self, movie):
''' Generated fake search result for imported movies
movie: dict of movie info
Resturns dict to match SEARCHRESULTS table
'''
result = {'status': 'Finished',
'info_link': '#',
'pubdate': None,
'title': None,
'imdbid': movie['imdbid'],
'torrentfile': None,
'indexer': 'Library Import',
'date_found': str(datetime.date.today()),
'score': None,
'type': 'import',
'downloadid': None,
'guid': None,
'resolution': movie.get('resolution'),
'size': movie.get('size', ''),
'freeleech': 0
}
title = u'{}.{}.{}.{}.{}.{}.{}'.format(movie['title'],
movie['year'],
result['resolution'],
movie['source'],
movie['audiocodec'],
movie['videocodec'],
movie['releasegroup']
)
while len(title) > 0 and title[-1] == '.':
title = title[:-1]
while '..' in title:
title = title.replace('..', '.')
result['title'] = title
result['guid'] = movie.get('guid') or u'IMPORT{}'.format(title.encode("hex").zfill(16)[:16])
return result
def _walk(self, directory):
''' Recursively gets all files in dir
dir: directory to scan for files
Returns list of absolute file paths
'''
files = []
dir_contents = os.listdir(directory)
for i in dir_contents:
logging.info(u'Scanning {}{}{}'.format(directory, os.sep, i))
full_path = os.path.join(directory, i)
if os.path.isdir(full_path):
files = files + self._walk(full_path)
else:
files.append(full_path)
return files
class Metadata(object):
def __init__(self):
self.tmdb = TMDB()
return
def get_metadata(self, filepath):
''' Gets video metadata using hachoir_parser
filepath: str absolute path to movie file
On failure can return empty dict
Returns dict
'''
logging.info(u'Gathering metada for {}.'.format(filepath))
data = {
'title': '',
'year': '',
'resolution': '',
'releasegroup': '',
'audiocodec': '',
'videocodec': '',
'source': '',
'imdbid': '',
'size': '',
'path': filepath
}
titledata = self.parse_filename(filepath)
data.update(titledata)
filedata = self.parse_media(filepath)
data.update(filedata)
if data.get('resolution'):
if data['resolution'].upper() in ['4K', '1080P', '720P']:
data['resolution'] = u'{}-{}'.format(data['source'] or 'BluRay', data['resolution'].upper())
else:
data['resolution'] = 'DVD-SD'
if data.get('title') and not data.get('imdbid'):
tmdbdata = self.tmdb.search('{} {}'.format(data['title'], data.get('year', '')), single=True)
if tmdbdata:
data['year'] = tmdbdata['release_date'][:4]
data.update(tmdbdata)
data['imdbid'] = self.tmdb.get_imdbid(data['id'])
else:
logging.warning('Unable to get data from TMDB for {}'.format(data['imdbid']))
return data
return data
def parse_media(self, filepath):
''' Uses Hachoir-metadata to parse the file header to metadata
filepath: str absolute path to file
Attempts to get resolution from media width
Returns dict of metadata
'''
metadata = {}
try:
# with createParser(filepath) as parser:
parser = createParser(filepath)
extractor = extractMetadata(parser)
filedata = extractor.exportDictionary(human=False)
parser.stream._input.close()
except Exception, e: #noqa
logging.error(u'Unable to parse metadata from file header.', exc_info=True)
return metadata
if filedata:
if filedata.get('Metadata'):
width = filedata['Metadata'].get('width')
elif metadata.get('video[1]'):
width = filedata['video[1]'].get('width')
else:
width = None
if width:
width = int(width)
if width > 1920:
filedata['resolution'] = '4K'
elif 1920 >= width > 1440:
filedata['resolution'] = '1080P'
elif 1440 >= width > 720:
filedata['resolution'] = '720P'
else:
filedata['resolution'] = 'SD'
if filedata.get('audio[1]'):
metadata['audiocodec'] = filedata['audio[1]'].get('compression').replace('A_', '')
if filedata.get('video[1]'):
metadata['videocodec'] = filedata['video[1]'].get('compression').split('/')[0].replace('V_', '')
return metadata
def parse_filename(self, filepath):
''' Uses PTN to get as much info as possible from path
filepath: str absolute path to file
Returns dict of Metadata
'''
logging.info(u'Parsing {} for movie information.'.format(filepath))
# This is our base dict. Contains all neccesary keys, though they can all be empty if not found.
metadata = {
'title': '',
'year': '',
'resolution': '',
'releasegroup': '',
'audiocodec': '',
'videocodec': '',
'source': '',
'imdbid': ''
}
titledata = PTN.parse(os.path.basename(filepath))
# this key is useless
if 'excess' in titledata:
titledata.pop('excess')
if len(titledata) < 2:
logging.info(u'Parsing filename doesn\'t look accurate. Parsing parent folder name.')
path_list = os.path.split(filepath)[0].split(os.sep)
titledata = PTN.parse(path_list[-1])
logging.info(u'Found {} in parent folder.'.format(titledata))
else:
logging.info(u'Found {} in filename.'.format(titledata))
title = titledata.get('title')
if title and title[-1] == '.':
titledata['title'] = title[:-1]
# Make sure this matches our key names
if 'codec' in titledata:
titledata['videocodec'] = titledata.pop('codec')
if 'audio' in titledata:
titledata['audiocodec'] = titledata.pop('audio')
if 'quality' in titledata:
titledata['source'] = titledata.pop('quality')
if 'group' in titledata:
titledata['releasegroup'] = titledata.pop('group')
metadata.update(titledata)
return metadata
def convert_to_db(self, movie):
''' Takes movie data and converts to a database-writable dict
movie: dict of movie information
Used to prepare movie data for write into MOVIES
Makes sure all keys match and are present.
Sorts out alternative titles and digital release dates
Returns dict ready to sql.write into MOVIES
'''
if not movie.get('imdbid'):
movie['imdbid'] = movie.pop('imdb_id')
if movie.get('release_date'):
movie['year'] = movie['release_date'][:4]
else:
movie['year'] = 'N/A'
movie['poster'] = u'images/poster/{}.jpg'.format(movie['imdbid'])
movie['plot'] = movie['overview']
movie['url'] = u'https://www.themoviedb.org/movie/{}'.format(movie['id'])
movie['score'] = movie['vote_average']
if movie.get('status') != 'Disabled':
movie['status'] = u'Wanted'
movie['added_date'] = str(datetime.date.today())
movie['backlog'] = 0
movie['tmdbid'] = movie['id']
a_t = []
for i in movie['alternative_titles']['titles']:
if i['iso_3166_1'] == 'US':
a_t.append(i['title'])
movie['alternative_titles'] = ','.join(a_t)
dates = [None]
for i in movie['release_dates']['results']:
for d in i['release_dates']:
if d['type'] == 4:
dates.append(d['release_date'])
digital_date = max(dates)
if digital_date:
movie['digital_release_date'] = digital_date[:10]
if movie.get('quality') is None:
movie['quality'] = 'Default'
required_keys = ('added_date', 'alternative_titles', 'digital_release_date', 'imdbid', 'tmdbid', 'title', 'year', 'poster', 'plot', 'url', 'score', 'release_date', 'rated', 'status', 'quality', 'addeddate', 'backlog')
for i in movie.keys():
if i not in required_keys:
del movie[i]
return movie
|
449071
|
from ._feature_extractor import _FeatureExtractor
from .. import submodules, weight_init
class MobileNetV2(_FeatureExtractor):
def __init__(self, feature_channels=1280):
super().__init__(feature_channels)
self.initial = submodules.conv(3, 32, stride=2)
self.block1 = submodules.inverted_residuals(32, 16, expansion=1)
self.block2 = submodules.inverted_residuals(16, 24, stride=2, blocks=2)
self.block3 = submodules.inverted_residuals(24, 32, stride=2, blocks=3)
self.block4a = submodules.inverted_residuals(32, 64, stride=2, blocks=4)
self.block4b = submodules.inverted_residuals(64, 96, blocks=3)
self.block5a = submodules.inverted_residuals(
96, 160, stride=2, blocks=3
)
self.block5b = submodules.inverted_residuals(160, 320)
self.final = submodules.conv(320, self.feature_channels, 1)
weight_init.init(self.modules())
def forward(self, input_):
initial = self.initial(input_)
block1 = self.block1(initial)
block2 = self.block2(block1)
block3 = self.block3(block2)
block4a = self.block4a(block3)
block4b = self.block4b(block4a)
block5a = self.block5a(block4b)
block5b = self.block5b(block5a)
final = self.final(block5b)
return final
|
449078
|
import os
from collections import namedtuple
from typing import Sequence, Set, TypeVar
from anoncreds.protocol.utils import toDictWithStrValues, \
fromDictWithStrValues, encodeAttr, crypto_int_to_str, to_crypto_int, isCryptoInteger, \
intToArrayBytes, bytesToInt
from config.config import cmod
from typing import NamedTuple
import uuid
class AttribType:
def __init__(self, name: str, encode: bool):
self.name = name
self.encode = encode
def __eq__(self, y):
return self.__dict__ == y.__dict__
def __lt__(self, other):
return self.name < other.name
def __repr__(self):
return str(self.__dict__)
class AttribDef:
def __init__(self, name, attrTypes):
if isinstance(name, str):
self.names = [name]
self.attrTypes = [attrTypes]
else:
self.names = name
self.attrTypes = attrTypes
@property
def name(self):
return ', '.join(self.names)
def __getattr__(self, item):
for attr_types in self.attrTypes:
for at in attr_types:
if item == at.name:
return at
raise AttributeError
def __add__(self, other):
return AttribDef(self.names + other.names,
self.attrTypes + other.attrTypes)
def attribs(self, **vals):
for k in vals:
# Check that keys provided in vals match the attibute names
# in the schema definition
assert k in self.attribNames()
return Attribs(self, **vals)
def attribNames(self):
return [at.name
for attr_types in self.attrTypes
for at in attr_types]
def __eq__(self, y):
return sorted(self.names) == sorted(y.names) \
and sorted(self.attrTypes) == sorted(y.attrTypes)
def __repr__(self):
return str(self.__dict__)
class Attribs:
def __init__(self, credType: AttribDef = None, **vals):
self.credType = credType if credType else AttribDef([], [])
self._vals = vals
def encoded(self):
"""
This function will encode all the attributes to 256 bit integers
:return:
"""
encoded = {}
for i in range(len(self.credType.names)):
self.credType.names[i]
attr_types = self.credType.attrTypes[i]
for at in attr_types:
attrName = at.name
if attrName in self._vals:
if at.encode:
encoded[attrName] = encodeAttr(self._vals[attrName])
else:
encoded[attrName] = self._vals[at.name]
return encoded
def __add__(self, other):
vals = self._vals.copy()
vals.update(other._vals)
return Attribs(self.credType + other.credType, **vals)
def __iter__(self):
return self._vals.__iter__()
def __getitem__(self, key):
return self._vals[key]
def keys(self):
return self._vals.keys()
def values(self):
return self._vals.values()
def items(self):
return self._vals.items()
def __repr__(self):
return str(self.__dict__)
def __eq__(self, y):
return self.credType == y.credType \
and self._vals == y._vals
PublicParams = namedtuple('PublicParams', 'Gamma, rho, g, h')
T = TypeVar('T')
VType = Set[int]
TimestampType = int
class Tails:
def __init__(self):
self.g = {}
self.gprime = {}
def addValue(self, index, gVal, gprimeVal):
self.g[index] = gVal
self.gprime[index] = gprimeVal
class NamedTupleStrSerializer:
def toStrDict(self):
return toDictWithStrValues(self._asdict())
@classmethod
def fromStrDict(cls, d):
d = fromDictWithStrValues(d)
return cls(**d)
class StrSerializer:
def toStrDict(self):
return toDictWithStrValues(self.__dict__)
@classmethod
def fromStrDict(cls, d):
d = fromDictWithStrValues(d)
return cls(**d)
class SchemaKey(
namedtuple('SchemaKey', 'name, version, issuerId'),
NamedTupleStrSerializer):
def __new__(cls, name=None, version=None, issuerId=None):
return super(SchemaKey, cls).__new__(cls, name, version, issuerId)
def __hash__(self):
keys = (self.name, self.version, self.issuerId)
return hash(keys)
def __str__(self):
rtn = list()
rtn.append('Schema Key')
rtn.append(" Name: {}".format(str(self.name)))
rtn.append(" Version: {}".format(str(self.version)))
rtn.append(" IssuerId: {}".format(str(self.issuerId)))
return os.linesep.join(rtn)
class ID(namedtuple('ID', 'schemaKey, schemaId, seqId')):
def __new__(cls, schemaKey: SchemaKey = None, schemaId=None,
seqId=None):
return super(ID, cls).__new__(cls, schemaKey, schemaId, seqId)
class Schema(namedtuple('Schema',
'name, version, attrNames, issuerId, seqId'),
NamedTupleStrSerializer):
def __new__(cls, name, version, attrNames, issuerId, seqId=None):
return super(Schema, cls).__new__(cls,
name,
version,
attrNames,
issuerId,
seqId)
def getKey(self):
return SchemaKey(self.name, self.version, self.issuerId)
class PublicKey(namedtuple('PublicKey', 'N, Rms, Rctxt, R, S, Z, seqId'),
NamedTupleStrSerializer):
def __new__(cls, N, Rms, Rctxt, R, S, Z, seqId=None):
return super(PublicKey, cls).__new__(cls, N, Rms, Rctxt, R, S, Z, seqId)
def __eq__(self, other):
return self.N == other.N and self.Rms == other.Rms \
and self.Rctxt == other.Rctxt and self.S == other.S \
and self.Z == other.Z and self.seqId == other.seqId \
and dict(self.R) == dict(other.R)
def to_str_dict(self):
public_key = {
'n': str(crypto_int_to_str(self.N)),
's': str(crypto_int_to_str(self.S)),
'rms': str(crypto_int_to_str(self.Rms)),
'rctxt': str(crypto_int_to_str(self.Rctxt)),
'z': str(crypto_int_to_str(self.Z)),
'r': {k: str(crypto_int_to_str(v)) for k, v in self.R.items()}
}
return public_key
@classmethod
def from_str_dict(cls, data):
N = to_crypto_int(data['n'])
Rms = to_crypto_int(data['rms'], data['n'])
Rctxt = to_crypto_int(data['rctxt'], data['n'])
S = to_crypto_int(data['s'], data['n'])
Z = to_crypto_int(data['z'], data['n'])
R = {k: to_crypto_int(v, data['n']) for k, v in data['r'].items()}
return cls(N, Rms, Rctxt, R, S, Z)
class SecretKey(namedtuple('SecretKey', 'pPrime, qPrime'),
NamedTupleStrSerializer):
pass
class RevocationPublicKey(namedtuple('RevocationPublicKey',
'qr, g, gprime, h, h0, h1, h2, htilde, hhat, u, pk, y, seqId'),
NamedTupleStrSerializer):
def __new__(cls, qr, g, gprime, h, h0, h1, h2, htilde, hhat, u, pk, y, seqId=None):
return super(RevocationPublicKey, cls).__new__(cls, qr, g, gprime, h, h0, h1,
h2, htilde, hhat, u, pk, y,
seqId)
class RevocationSecretKey(namedtuple('RevocationSecretKey', 'x, sk'),
NamedTupleStrSerializer):
pass
class AccumulatorPublicKey(namedtuple('AccumulatorPublicKey', 'z, seqId'),
NamedTupleStrSerializer):
def __new__(cls, z, seqId=None):
return super(AccumulatorPublicKey, cls).__new__(cls, z, seqId)
class AccumulatorSecretKey(
namedtuple('AccumulatorSecretKey', 'gamma'), NamedTupleStrSerializer):
pass
class Predicate(namedtuple('Predicate', 'attrName, value, type, schema_seq_no, issuer_did'),
NamedTupleStrSerializer):
def __new__(cls, attrName, value, type, schema_seq_no=None, issuer_did=None):
return super(Predicate, cls).__new__(cls, attrName, value, type, schema_seq_no, issuer_did)
def __key(self):
return self.attrName, self.value, self.type
def __eq__(self, y):
return self.__key() == y.__key()
def __hash__(self):
return hash(self.__key())
def to_str_dict(self):
return {
'attr_name': self.attrName,
'value': self.value,
'p_type': self.type,
'schema_seq_no': self.schema_seq_no,
'issuer_did': self.issuer_did
}
@classmethod
def from_str_dict(cls, d):
attrName = d['attr_name']
value = d['value']
type = d['p_type']
schema_seq_no = int(d['schema_seq_no']) if (
('schema_seq_no' in d) and d['schema_seq_no']) else None
issuer_did = int(d['issuer_did']) if (
('issuer_did' in d) and d['issuer_did']) else None
return PredicateGE(attrName=attrName, value=value, type=type,
schema_seq_no=schema_seq_no, issuer_did=issuer_did)
# TODO: now we consdider only >= predicate. Support other types of predicates
class PredicateGE(Predicate):
def __new__(cls, attrName, value, type='GE', schema_seq_no=None, issuer_did=None):
return super(PredicateGE, cls).__new__(cls, attrName, value, type, schema_seq_no, issuer_did)
class Accumulator:
def __init__(self, iA, acc, V: VType, L):
self.iA = iA
self.acc = acc
self.V = V
self.L = L
self.currentI = 1
def isFull(self):
return self.currentI > self.L
def __eq__(self, other):
return self.iA == other.iA and self.acc == other.acc \
and self.V == other.V and self.L == other.L \
and self.currentI == other.currentI
ClaimInitDataType = namedtuple('ClaimInitDataType', 'U, vPrime')
class ClaimRequest(namedtuple('ClaimRequest', 'userId, U, Ur'),
NamedTupleStrSerializer):
def __new__(cls, userId, U, Ur=None):
return super(ClaimRequest, cls).__new__(cls, userId, U, Ur)
def to_str_dict(self):
return {
'prover_did': str(self.userId),
'u': str(crypto_int_to_str(self.U)),
'ur': self.Ur
}
@classmethod
def from_str_dict(cls, data, n):
u = to_crypto_int(data['u'], str(n))
return cls(userId=data['prover_did'], U=u, Ur=data['ur'])
# Accumulator = namedtuple('Accumulator', ['iA', 'acc', 'V', 'L'])
class PrimaryClaim(
namedtuple('PrimaryClaim', 'm2, A, e, v'),
NamedTupleStrSerializer):
def to_str_dict(self):
return {
'm2': str(crypto_int_to_str(self.m2)),
'a': str(crypto_int_to_str(self.A)),
'e': str(self.e),
'v': str(self.v)
}
@classmethod
def from_str_dict(cls, data, n):
m2 = to_crypto_int(data['m2'])
a = to_crypto_int(data['a'], str(n))
e = int(data['e'])
v = int(data['v'])
return cls(m2=m2, A=a, e=e, v=v)
class Witness(namedtuple('Witness', 'sigmai, ui, gi, omega, V'),
NamedTupleStrSerializer):
pass
class NonRevocationClaim(
namedtuple('NonRevocationClaim', 'iA, sigma, c, v, witness,i, m2'),
NamedTupleStrSerializer):
@classmethod
def fromStrDict(cls, d):
d = fromDictWithStrValues(d)
witness = Witness(**d['witness'])
result = cls(**d)
return result._replace(witness=witness)
def to_str_dict(self):
return {
}
class Claims(namedtuple('Claims', 'primaryClaim, nonRevocClaim'),
NamedTupleStrSerializer):
def __new__(cls, primaryClaim, nonRevocClaim=None):
return super(Claims, cls).__new__(cls, primaryClaim, nonRevocClaim)
@classmethod
def fromStrDict(cls, d):
primary = PrimaryClaim.fromStrDict(d['primaryClaim'])
nonRevoc = None
if 'nonRevocClaim' in d:
nonRevoc = NonRevocationClaim.fromStrDict(d['nonRevocClaim'])
return Claims(primaryClaim=primary, nonRevocClaim=nonRevoc)
def to_str_dict(self):
return {
'primary_claim': self.primaryClaim.to_str_dict(),
'non_revocation_claim': self.nonRevocClaim.to_str_dict() if self.nonRevocClaim else None
}
@classmethod
def from_str_dict(cls, data, n):
primary = PrimaryClaim.from_str_dict(data['primary_claim'], n)
nonRevoc = None
if 'non_revocation_claim' in data and data['non_revocation_claim']:
nonRevoc = NonRevocationClaim.fromStrDict(
data['non_revocation_claim'])
return cls(primaryClaim=primary, nonRevocClaim=nonRevoc)
class ClaimsPair(dict):
def __str__(self):
rtn = list()
rtn.append('Claims')
for schema_key, claim_attrs in self.items():
rtn.append('')
rtn.append(schema_key.name)
rtn.append(str(schema_key))
rtn.append('Attributes:')
for attr_name, attr_raw_enc in claim_attrs.items():
rtn.append(' {}: {}'.format(str(attr_name),
str(attr_raw_enc)))
return os.linesep.join(rtn)
class AttributeInfo(
namedtuple('AttributeInfo', 'name, schema_seq_no, issuer_did'),
NamedTupleStrSerializer):
def __new__(cls, name=None, schema_seq_no=None, issuer_did=None):
return super(AttributeInfo, cls).__new__(cls, name, schema_seq_no, issuer_did)
def to_str_dict(self):
return {
'name': self.name,
'schema_seq_no': self.schema_seq_no,
'issuer_did': self.issuer_did
}
@classmethod
def from_str_dict(cls, d):
schema_seq_no = int(d['schema_seq_no']) if d['schema_seq_no'] else None
issuer_did = int(d['issuer_did']) if (
('issuer_did' in d) and d['issuer_did']) else None
name = d['name']
return AttributeInfo(name, schema_seq_no, issuer_did)
class ProofClaims(
namedtuple('ProofClaims', 'claims, revealedAttrs, predicates')):
def __new__(cls, claims=None, revealedAttrs=None, predicates=None):
return super(ProofClaims, cls).__new__(cls, claims, revealedAttrs or [],
predicates or [])
class NonRevocProofXList(
namedtuple('NonRevocProofXList',
'rho, r, rPrime, rPrimePrime, rPrimePrimePrime, o, oPrime, m, mPrime, t, tPrime, m2, s, c'),
NamedTupleStrSerializer):
def __new__(cls, rho=None, r=None, rPrime=None, rPrimePrime=None,
rPrimePrimePrime=None, o=None, oPrime=None,
m=None, mPrime=None, t=None, tPrime=None, m2=None, s=None,
c=None, group=None):
return super(NonRevocProofXList, cls).__new__(cls,
rho=cls._setValue(rho,
group),
r=cls._setValue(
r, group),
rPrime=cls._setValue(
rPrime, group),
rPrimePrime=cls._setValue(
rPrimePrime, group),
rPrimePrimePrime=cls._setValue(
rPrimePrimePrime,
group),
o=cls._setValue(
o, group),
oPrime=cls._setValue(
oPrime, group),
m=cls._setValue(
m, group),
mPrime=cls._setValue(
mPrime, group),
t=cls._setValue(
t, group),
tPrime=cls._setValue(
tPrime, group),
m2=cls._setValue(m2,
group),
s=cls._setValue(
s, group),
c=cls._setValue(c, group))
@staticmethod
def _setValue(v=None, group=None):
return v if v else group.random(cmod.ZR) if group else None
def asList(self):
return [self.rho, self.o, self.c, self.oPrime, self.m, self.mPrime,
self.t, self.tPrime,
self.m2, self.s, self.r, self.rPrime, self.rPrimePrime,
self.rPrimePrimePrime]
@staticmethod
def fromList(values: Sequence):
rho, o, c, oPrime, m, mPrime, t, tPrime, m2, s, r, rPrime, rPrimePrime, rPrimePrimePrime = tuple(
values)
return NonRevocProofXList(rho=rho, o=o, c=c, oPrime=oPrime, m=m,
mPrime=mPrime, t=t, tPrime=tPrime,
m2=m2, s=s, r=r, rPrime=rPrime,
rPrimePrime=rPrimePrime,
rPrimePrimePrime=rPrimePrimePrime)
class NonRevocProofCList(
namedtuple('NonRevocProofCList', 'E, D, A, G, W, S, U'),
NamedTupleStrSerializer):
def asList(self):
return [self.E, self.D, self.A, self.G, self.W, self.S, self.U]
class NonRevocProofTauList(
namedtuple('NonRevocProofTauList', 'T1, T2, T3, T4, T5, T6, T7, T8'),
NamedTupleStrSerializer):
def asList(self):
return [self.T1, self.T2, self.T3, self.T4, self.T5, self.T6, self.T7,
self.T8]
class NonRevocInitProof(namedtuple('NonRevocInitProof',
'CList, TauList, CListParams, TauListParams'),
NamedTupleStrSerializer):
def asCList(self):
return self.CList.asList()
def asTauList(self):
return self.TauList.asList()
class PrimaryEqualInitProof(namedtuple('PrimaryEqualInitProof',
'c1, Aprime, T, eTilde, ePrime, vTilde, vPrime, \
mTilde, m1Tilde, m2Tilde, unrevealedAttrs, revealedAttrs'),
NamedTupleStrSerializer):
def asCList(self):
return [self.Aprime]
def asTauList(self):
return [self.T]
class PrimaryPrecicateGEInitProof(
namedtuple('PrimaryPrecicateGEInitProof',
'CList, TauList, u, uTilde, r, rTilde, alphaTilde, predicate, T'),
NamedTupleStrSerializer):
def asCList(self):
return self.CList
def asTauList(self):
return self.TauList
class PrimaryInitProof(namedtuple('PrimaryInitProof', 'eqProof, geProofs'),
NamedTupleStrSerializer):
def asCList(self):
CList = self.eqProof.asCList()
for geProof in self.geProofs:
CList += geProof.asCList()
return CList
def asTauList(self):
TauList = self.eqProof.asTauList()
for geProof in self.geProofs:
TauList += geProof.asTauList()
return TauList
class InitProof(namedtuple('InitProof', 'nonRevocInitProof, primaryInitProof'),
NamedTupleStrSerializer):
def __new__(cls, nonRevocInitProof: NonRevocInitProof = None,
primaryInitProof: PrimaryInitProof = None):
return super(InitProof, cls).__new__(cls, nonRevocInitProof,
primaryInitProof)
class PrimaryEqualProof(namedtuple('PrimaryEqualProof',
'e, v, m, m1, m2, Aprime, revealedAttrs'),
NamedTupleStrSerializer):
def to_str_dict(self):
return {
'e': str(crypto_int_to_str(self.e)),
'v': str(crypto_int_to_str(self.v)),
'm1': str(crypto_int_to_str(self.m1)),
'm2': str(crypto_int_to_str(self.m2)),
'm': {k: str(crypto_int_to_str(v)) for k, v in self.m.items()},
'revealed_attrs': {k: str(v) for k, v in self.revealedAttrs.items()},
'a_prime': str(crypto_int_to_str(self.Aprime))
}
@classmethod
def from_str_dict(cls, d, n):
e = to_crypto_int(d['e'])
v = to_crypto_int(d['v'])
m1 = to_crypto_int(d['m1'])
m2 = to_crypto_int(d['m2'])
Aprime = to_crypto_int(d['a_prime'], str(n))
revealedAttrs = {k: to_crypto_int(v)
for k, v in d['revealed_attrs'].items()}
m = {k: to_crypto_int(v) for k, v in d['m'].items()}
return PrimaryEqualProof(e=e, v=v, m1=m1, m2=m2, m=m, Aprime=Aprime, revealedAttrs=revealedAttrs)
class PrimaryPredicateGEProof(
namedtuple('PrimaryPredicateGEProof', 'u, r, alpha, mj, T, predicate'),
NamedTupleStrSerializer):
@classmethod
def fromStrDict(cls, d):
d = fromDictWithStrValues(d)
predicate = PredicateGE(**d['predicate'])
result = cls(**d)
return result._replace(predicate=predicate)
def to_str_dict(self):
return {
'alpha': str(crypto_int_to_str(self.alpha)),
'mj': str(crypto_int_to_str(self.mj)),
'u': {k: str(crypto_int_to_str(v)) for k, v in self.u.items()},
'r': {k: str(crypto_int_to_str(v)) for k, v in self.r.items()},
't': {k: str(crypto_int_to_str(v)) for k, v in self.T.items()},
'predicate': self.predicate.to_str_dict()
}
@classmethod
def from_str_dict(cls, d, n):
alpha = to_crypto_int(d['alpha'])
mj = to_crypto_int(d['mj'])
u = {k: to_crypto_int(v) for k, v in d['u'].items()}
r = {k: to_crypto_int(v) for k, v in d['r'].items()}
T = {k: to_crypto_int(v, str(n)) for k, v in d['t'].items()}
predicate = PredicateGE.from_str_dict(d['predicate'])
return PrimaryPredicateGEProof(alpha=alpha, mj=mj, u=u, r=r, T=T, predicate=predicate)
class NonRevocProof(namedtuple('NonRevocProof', 'XList CProof'),
NamedTupleStrSerializer):
@classmethod
def fromStrDict(cls, d):
XList = NonRevocProofXList.fromStrDict(d['XList'])
CProof = NonRevocProofCList.fromStrDict(d['CProof'])
return NonRevocProof(XList=XList, CProof=CProof)
class PrimaryProof(namedtuple('PrimaryProof', 'eqProof, geProofs'),
NamedTupleStrSerializer):
def __new__(cls, eqProof: PrimaryEqualProof,
geProofs: Sequence[PrimaryPredicateGEProof]):
return super(PrimaryProof, cls).__new__(cls, eqProof, geProofs)
@classmethod
def fromStrDict(cls, d):
eqProof = PrimaryEqualProof.fromStrDict(d['eqProof'])
geProofs = [PrimaryPredicateGEProof.fromStrDict(
v) for v in d['geProofs']]
return PrimaryProof(eqProof=eqProof, geProofs=geProofs)
def to_str_dict(self):
return {
'eq_proof': self.eqProof.to_str_dict(),
'ge_proofs': [p.to_str_dict() for p in self.geProofs]
}
@classmethod
def from_str_dict(cls, d, n):
eqProof = PrimaryEqualProof.from_str_dict(d['eq_proof'], n)
geProofs = [PrimaryPredicateGEProof.from_str_dict(
p, n) for p in d['ge_proofs']]
return PrimaryProof(eqProof=eqProof, geProofs=geProofs)
class Proof(namedtuple('Proof', 'primaryProof, nonRevocProof'),
NamedTupleStrSerializer):
def __new__(cls, primaryProof: PrimaryProof,
nonRevocProof: NonRevocProof = None):
return super(Proof, cls).__new__(cls, primaryProof, nonRevocProof)
@classmethod
def fromStrDict(cls, d):
primaryProof = PrimaryProof.fromStrDict(d['primaryProof'])
nonRevocProof = None
if 'nonRevocProof' in d:
nonRevocProof = NonRevocProof.fromStrDict(d['nonRevocProof'])
return Proof(primaryProof=primaryProof, nonRevocProof=nonRevocProof)
def to_str_dict(self):
return {
'primary_proof': self.primaryProof.to_str_dict()
}
@classmethod
def from_str_dict(cls, d, n):
primaryProof = PrimaryProof.from_str_dict(d['primary_proof'], n)
return Proof(primaryProof=primaryProof)
class ProofInfo(namedtuple('ProofInfo', 'proof, schema_seq_no, issuer_did'),
NamedTupleStrSerializer):
@classmethod
def fromStrDict(cls, d):
d = fromDictWithStrValues(d)
proof = Proof.fromStrDict(d['proof'])
result = cls(**d)
return result._replace(proof=proof)
def to_str_dict(self):
return {
'proof': self.proof.to_str_dict(),
'schema_seq_no': self.schema_seq_no,
'issuer_did': self.issuer_did
}
@classmethod
def from_str_dict(cls, d, n):
proof = Proof.from_str_dict(d['proof'], n)
schema_seq_no = d['schema_seq_no']
issuer_did = d['issuer_did']
return ProofInfo(proof=proof, schema_seq_no=schema_seq_no, issuer_did=issuer_did)
class FullProof(namedtuple('FullProof', 'proofs, aggregatedProof, requestedProof'),
NamedTupleStrSerializer):
def getCredDefs(self):
return self.proofs.keys()
@classmethod
def fromStrDict(cls, d):
d = fromDictWithStrValues(d)
aggregatedProof = AggregatedProof.fromStrDict(d['aggregatedProof'])
requestedProof = RequestedProof.fromStrDict(d['requestedProof'])
proofs = {k: ProofInfo.fromStrDict(v) for k, v in d['proofs'].items()}
return FullProof(aggregatedProof=aggregatedProof, proofs=proofs, requestedProof=requestedProof)
def to_str_dict(self):
return {
'aggregated_proof': self.aggregatedProof.to_str_dict(),
'proofs': {k: v.to_str_dict() for k, v in self.proofs.items()},
'requested_proof': self.requestedProof.to_str_dict()
}
@classmethod
def from_str_dict(cls, d, n):
aggregatedProof = AggregatedProof.from_str_dict(d['aggregated_proof'])
requestedProof = RequestedProof.from_str_dict(d['requested_proof'])
proofs = {item[0]: ProofInfo.from_str_dict(
item[1], n[i]) for i, item in enumerate(d['proofs'].items())}
return FullProof(aggregatedProof=aggregatedProof, requestedProof=requestedProof, proofs=proofs)
class AggregatedProof(namedtuple('AggregatedProof', 'cHash, CList'),
NamedTupleStrSerializer):
def to_str_dict(self):
return {
'c_hash': str(self.cHash),
'c_list': [intToArrayBytes(v) for v in self.CList if isCryptoInteger(v)]
}
@classmethod
def from_str_dict(cls, d):
cHash = int(d['c_hash'])
CList = [bytesToInt(v) for v in d['c_list']]
return AggregatedProof(cHash=cHash, CList=CList)
class RequestedProof(namedtuple('RequestedProof', 'revealed_attrs, unrevealed_attrs, self_attested_attrs, predicates'),
NamedTupleStrSerializer):
def __new__(cls, revealed_attrs=None, unrevealed_attrs=None, self_attested_attrs=None, predicates=None):
return super(RequestedProof, cls).__new__(cls, revealed_attrs or {}, unrevealed_attrs or {},
self_attested_attrs or {}, predicates or {})
@classmethod
def fromStrDict(cls, d):
revealed_attrs = {k: [v[0], v[1], v[2]]
for k, v in d['revealed_attrs'].items()}
predicates = {k: v for k, v in d['predicates'].items()}
return RequestedProof(revealed_attrs=revealed_attrs, predicates=predicates)
def to_str_dict(self):
return {
'revealed_attrs': self.revealed_attrs,
'unrevealed_attrs': self.unrevealed_attrs,
'self_attested_attrs': self.self_attested_attrs,
'predicates': self.predicates
}
@classmethod
def from_str_dict(cls, d):
revealed_attrs = d['revealed_attrs']
unrevealed_attrs = d['unrevealed_attrs']
self_attested_attrs = d['self_attested_attrs']
predicates = d['predicates']
return RequestedProof(revealed_attrs=revealed_attrs, unrevealed_attrs=unrevealed_attrs,
self_attested_attrs=self_attested_attrs, predicates=predicates)
class ClaimAttributeValues(namedtuple('ClaimAttributeValues', 'raw, encoded'),
NamedTupleStrSerializer):
def __new__(cls, raw=None, encoded=None):
return super(ClaimAttributeValues, cls).__new__(cls, raw, encoded)
def __str__(self):
return self.raw
def to_str_dict(self):
return [str(self.raw), str(self.encoded)]
@classmethod
def from_str_dict(cls, d):
raw = d[0]
encoded = int(to_crypto_int(d[1]))
return ClaimAttributeValues(raw=raw, encoded=encoded)
AvailableClaim = NamedTuple("AvailableClaim", [("name", str),
("version", str),
("origin", str)])
class ProofRequest:
def __init__(self, name, version, nonce, attributes={}, verifiableAttributes={}, predicates={}):
self.name = name
self.version = version
self.nonce = nonce
self.attributes = attributes
self.verifiableAttributes = \
{str(uuid.uuid4()): AttributeInfo(name=a) for a in verifiableAttributes} if \
isinstance(verifiableAttributes, list) else verifiableAttributes
self.predicates = {str(uuid.uuid4()): PredicateGE(attrName=p['attrName'], value=p['value']) for p in
predicates} if isinstance(predicates, list) else predicates
self.fulfilledByClaims = []
self.selfAttestedAttrs = {}
self.ts = None
self.seqNo = None
# TODO _F_ need to add support for predicates on unrevealed attibutes
def __eq__(self, other):
return self.__dict__ == other.__dict__
@property
def toDict(self):
return {
"name": self.name,
"version": self.version,
"nonce": self.nonce,
"attributes": self.attributes,
"verifiableAttributes": self.verifiableAttributes
}
def to_str_dict(self):
return {
"name": self.name,
"version": self.version,
"nonce": str(self.nonce),
"requested_attrs": {k: v.to_str_dict() for k, v in self.verifiableAttributes.items()},
"requested_predicates": {k: v.to_str_dict() for k, v in self.predicates.items()}
}
@staticmethod
def from_str_dict(d):
return ProofRequest(name=d['name'],
version=d['version'],
nonce=int(d['nonce']),
attributes=d['attributes'] if 'attributes' in d else {
},
verifiableAttributes={k: AttributeInfo.from_str_dict(v) for k, v in
d['requested_attrs'].items()},
predicates={k: PredicateGE.from_str_dict(v) for k, v in d['requested_predicates'].items()})
@property
def attributeValues(self):
return \
'Attributes:' + '\n ' + \
format("\n ".join(
['{}: {}'.format(k, v)
for k, v in self.attributes.items()])) + '\n'
@property
def verifiableClaimAttributeValues(self):
return \
'Verifiable Attributes:' + '\n ' + \
format("\n ".join(
['{}'.format(v.name)
for k, v in self.verifiableAttributes.items()])) + '\n'
@property
def predicateValues(self):
return \
'Predicates:' + '\n ' + \
format("\n ".join(
['{}'.format(v.attrName)
for k, v in self.predicates.items()])) + '\n'
@property
def fixedInfo(self):
return 'Status: Requested' + '\n' + \
'Name: ' + self.name + '\n' + \
'Version: ' + self.version + '\n'
def __str__(self):
return 'Proof Request\n' + \
self.fixedInfo + \
self.attributeValues + \
self.verifiableClaimAttributeValues
|
449087
|
import common
import numpy as np
import util
def fit_phis(adj, superclusters, supervars, method, iterations, parallel):
if method == 'debug':
# Bypass cache when debugging.
return _fit_phis(adj, superclusters, supervars, method, iterations, parallel)
key = (hash(adj.tobytes()), iterations)
if key not in fit_phis.cache:
fit_phis.cache[key] = _fit_phis(adj, superclusters, supervars, method, iterations, parallel)
fit_phis.cache_misses += 1
else:
fit_phis.cache_hits += 1
return fit_phis.cache[key]
fit_phis.cache = {}
fit_phis.cache_hits = 0
fit_phis.cache_misses = 0
# Used only for `rprop_cached`.
last_eta = ['mle']
def _calc_llh(phi, V, N, omega_v, epsilon=1e-5):
import scipy
K, S = phi.shape
for arr in V, N, omega_v:
assert arr.shape == (K-1, S)
assert np.allclose(1, phi[0])
P = omega_v * phi[1:]
P = np.maximum(P, epsilon)
P = np.minimum(P, 1 - epsilon)
phi_llh = scipy.stats.binom.logpmf(V, N, P) / np.log(2)
assert not np.any(np.isnan(phi_llh))
assert not np.any(np.isinf(phi_llh))
llh_per_sample = -np.sum(phi_llh, axis=0) / K
nlglh = np.sum(llh_per_sample) / S
return (phi_llh, llh_per_sample, nlglh)
def _fit_phis(adj, superclusters, supervars, method, iterations, parallel):
# Calling `import` on each function call should be cheap, as Python caches a
# reference to the module after the first load.
if method in ('graddesc_old', 'rprop_old'):
import phi_fitter_iterative
eta = phi_fitter_iterative.fit_etas(adj, superclusters, supervars, method[:-4], iterations, parallel)
elif method == 'rprop':
import phi_fitter_lol
eta = phi_fitter_lol.fit_etas(adj, superclusters, supervars, 'rprop', iterations, parallel, eta_init='mle')
elif method == 'projection':
import phi_fitter_projection
eta = phi_fitter_projection.fit_etas(adj, superclusters, supervars)
elif method == 'proj_rprop':
import phi_fitter_projection
import phi_fitter_lol
eta_proj = phi_fitter_projection.fit_etas(adj, superclusters, supervars)
eta = phi_fitter_lol.fit_etas(adj, superclusters, supervars, 'rprop', iterations, parallel, eta_init=eta_proj)
elif method == 'debug':
import phi_fitter_iterative
import phi_fitter_projection
import phi_fitter_lol
import time
fitters = {
#'rprop_init_mle': lambda: phi_fitter_iterative.fit_etas(adj, superclusters, supervars, 'rprop', iterations, parallel, eta_init=None),
'lol_init_mle': lambda: phi_fitter_lol.fit_etas(adj, superclusters, supervars, 'rprop', iterations, parallel, eta_init='mle'),
'lol_init_dirichlet': lambda: phi_fitter_lol.fit_etas(adj, superclusters, supervars, 'rprop', iterations, parallel, eta_init='dirichlet'),
'projection': lambda: phi_fitter_projection.fit_etas(adj, superclusters, supervars),
}
#fitters['lol_init_proj'] = lambda: phi_fitter_lol.fit_etas(adj, superclusters, supervars, 'rprop', iterations, parallel, eta_init=fitters['projection']())
#fitters['lol_init_prev'] = lambda: phi_fitter_lol.fit_etas(adj, superclusters, supervars, 'rprop', iterations, parallel, eta_init=last_eta[0])
Z = util.make_ancestral_from_adj(adj)
svids = common.extract_vids(supervars)
total_reads = np.array([supervars[svid]['total_reads'] for svid in svids])
var_reads = np.array([supervars[svid]['var_reads'] for svid in svids])
omega = np.array([supervars[svid]['omega_v'] for svid in svids])
etas = {}
scores = {}
times = {}
zeros = {}
l1_dists = {}
l2_dists = {}
for name, F in fitters.items():
time_start = time.perf_counter_ns()
etas[name] = F()
time_end = time.perf_counter_ns()
phi = np.dot(Z, etas[name])
scores[name] = _calc_llh(phi, var_reads, total_reads, omega)
times[name] = (time_end - time_start)/1e6
zeros[name] = np.sum(phi == 0)
l1_dists[name] = util.lpdist(var_reads/(total_reads * omega), phi[1:], p=1)
l2_dists[name] = util.lpdist(var_reads/(total_reads * omega), phi[1:], p=2)
eta = etas['lol_init_mle']
last_eta[0] = np.copy(eta)
names = sorted(etas.keys())
sep = '\t'
if True and not hasattr(_fit_phis, 'printed_header'):
print(*names, sep=sep)
_fit_phis.printed_header = True
print(
*['%.3f' % scores[name][2] for name in names],
np.nan,
*['%.3f' % times[name] for name in names],
np.nan,
*[zeros[name] for name in names],
np.nan,
*['%.3f' % l1_dists[name] for name in names],
np.nan,
*['%.3f' % l2_dists[name] for name in names],
sep=sep,
flush=True
)
else:
raise Exception('Unknown phi fitter %s' % method)
assert np.allclose(1, np.sum(eta, axis=0))
Z = util.make_ancestral_from_adj(adj)
phi = np.dot(Z, eta)
return (phi, eta)
|
449124
|
class Calc:
"""Simple calculator."""
def __init__(self, a, b):
self.a = a
self.b = b
def do(self):
"""Perform calculation."""
return self.a + self.b
|
449130
|
from django import template
from django.utils import dateparse
register = template.Library()
@register.filter
def mfl_bool_none_date_filter(value):
"""
Coverts None to Not Applicable
"""
if value is True:
return "Yes"
elif value is False:
return "No"
elif value is None:
return "Not Applicable"
elif dateparse.parse_datetime(str(value)):
obj = dateparse.parse_datetime(str(value))
return "{0} - {1} - {2}".format(obj.year, obj.month, obj.day)
elif dateparse.parse_date(str(value)):
obj = dateparse.parse_date(str(value))
return "{0} - {1} - {2}".format(obj.year, obj.month, obj.day)
else:
return value
|
449145
|
import random
# playables
class wizard (object):
hp = 100
stregth = 12
defence = 12
magic = 30
class warrior (object):
hp = 100
stregth = 30
defence = 18
magic = 10
class elf (object):
hp = 100
stregth = 20
defence = 18
magic = 18
# enemies
class goblin (object):
name = "Goblin"
hp = 60
stregth = 10
defence = 10
magic = 8
loot = random.randint(0,2)
class witch (object):
name = "Witch"
hp = 90
stregth = 15
defence = 15
magic = 20
loot = random.randint(0,2)
class orc (object):
name = "Orc"
hp = 100
stregth = 20
defence = 18
magic = 10
loot = random.randint(0,2)
def death(character):
if character.hp < 1:
print("YOU DIED!")
print(".\n.\n.\n.")
exit()
# selecting hero
def heroselect():
print("select your character: ")
selection = input("1. Wizard \n2. Warrior \n3. Elf\n")
if selection == "1":
character = wizard
print("So, you're a warrior!\nYour stats:")
print("Health: ", character.hp)
print("Health: ", character.stregth)
print("Health: ", character.defence)
print("Health: ", character.magic)
return character
elif selection == "2":
character = warrior
print("So, you're a warrior!\nYour stats:")
print("Health: ", character.hp)
print("Health: ", character.stregth)
print("Health: ", character.defence)
print("Health: ", character.magic)
return character
elif selection == "3":
character = elf
print("So, you're an Elf!\nYour stats:")
print("Health: ", character.hp)
print("Health: ", character.stregth)
print("Health: ", character.defence)
print("Health: ", character.magic)
return character
else:
print("\n Only press 1, 2 or 3\n")
heroselect()
# spawn enemy
def enemyselect(goblin, witch, orc):
enemylist = [goblin, witch, orc]
encounter = random.randint(0,2)
enemy = enemylist[encounter]
return enemy
#spawn loot
def loot():
loot = ["weapon","armor","potion"]
lootRate = random.randint(0,2)
lootDrop = loot[lootRate]
return lootDrop
def battleState():
enemy = enemyselect(goblin, witch, orc)
print("A wild", enemy.name, "has appeared!")
while enemy.hp > 0:
action = input("Take action:\n1. Weapon \n2. Magic \n3.Run!\n>> ")
### Option 1
if action == "1":
print("You swing your sword, attacking the", enemy.name, " enemy!")
hitrate = random.randint(0, 10)
if hitrate > 3:
enemy.hp = enemy.hp - character.stregth
print ("You hit the enemy!\n", enemy.name, "health: ", enemy.hp)
if enemy.hp > 0:
character.hp = character.hp - (enemy.stregth/character.hp)
print("The ", enemy.name, "enemy attacks you! You are hit!")
print("You got", character.hp, "health left.\n")
death(character)
else:
if enemy.name == "Goblin":
enemy.hp = 60
elif enemy.name == "Witch":
enemy.hp = 150
elif enemy.name == "Orc":
enemy.hp = 150
print("You have defeated the ", enemy.name, "\nIt dropped an item!")
lootDrop = loot()
print("You found a", lootDrop)
break
else:
print("You missed your hit!")
print("The ", enemy.name, "hits you directly!")
character.hp = character.hp - enemy.stregth
print("Your health stat is: ", character.hp)
death(character)
### Option 2
elif action == "2":
print("You cast your spell, attacking the", enemy.name, " enemy!")
hitrate = random.randint(0, 10)
if hitrate > 3:
enemy.hp = enemy.hp - character.magic
print ("You hit the enemy!\n", enemy.name, "health: ", enemy.hp)
if enemy.hp > 0:
character.hp = character.hp - (enemy.magic/character.hp)
print("The ", enemy.name, "enemy attacks you! You are hit!")
print("You got", character.hp, "health left.\n")
death(character)
else:
if enemy.name == "Goblin":
enemy.hp = 60
elif enemy.name == "Witch":
enemy.hp = 150
elif enemy.name == "Orc":
enemy.hp = 150
print("You have defeated the ", enemy.name, "\nIt dropped an item!")
lootDrop = loot()
print("You found a", lootDrop)
break
else:
print("You missed your hit!")
print("The ", enemy.name, "hits you directly!")
character.hp = character.hp - enemy.magic
print("Your health stat is: ", character.hp)
death(character)
### Option 3
elif action == "3":
print("You try to run")
hitrate = random.randint(0, 10)
if hitrate > 4:
print ("You got away!")
break
else:
print ("You fail to escape.")
print ("The enemy hits you directly!")
character.hp = character.hp - enemy.stregth
print("You got", character.hp, "health left.\n")
death(character)
else:
print("Please press: 1, 2 or 3 only.")
character = heroselect()
battleState()
|
449152
|
import torch
from kornia.augmentation import RandomAffine,\
RandomCrop,\
CenterCrop, \
RandomResizedCrop
from kornia.filters import GaussianBlur2d
from torch import nn
import numpy as np
import glob
import gzip
import shutil
from pathlib import Path
import os
EPS = 1e-6
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def select_at_indexes(indexes, tensor):
"""Returns the contents of ``tensor`` at the multi-dimensional integer
array ``indexes``. Leading dimensions of ``tensor`` must match the
dimensions of ``indexes``.
"""
dim = len(indexes.shape)
assert indexes.shape == tensor.shape[:dim]
num = indexes.numel()
t_flat = tensor.view((num,) + tensor.shape[dim:])
s_flat = t_flat[torch.arange(num, device=tensor.device), indexes.view(-1)]
return s_flat.view(tensor.shape[:dim] + tensor.shape[dim + 1:])
def get_augmentation(augmentation, imagesize):
if isinstance(augmentation, str):
augmentation = augmentation.split("_")
transforms = []
for aug in augmentation:
if aug == "affine":
transformation = RandomAffine(5, (.14, .14), (.9, 1.1), (-5, 5))
elif aug == "rrc":
transformation = RandomResizedCrop((imagesize, imagesize), (0.8, 1))
elif aug == "blur":
transformation = GaussianBlur2d((5, 5), (1.5, 1.5))
elif aug == "shift" or aug == "crop":
transformation = nn.Sequential(nn.ReplicationPad2d(4), RandomCrop((84, 84)))
elif aug == "intensity":
transformation = Intensity(scale=0.05)
elif aug == "none":
continue
else:
raise NotImplementedError()
transforms.append(transformation)
return transforms
class Intensity(nn.Module):
def __init__(self, scale):
super().__init__()
self.scale = scale
def forward(self, x):
r = torch.randn((x.size(0), 1, 1, 1), device=x.device)
noise = 1.0 + (self.scale * r.clamp(-2.0, 2.0))
return x * noise
def maybe_transform(image, transform, p=0.8):
processed_images = transform(image)
if p >= 1:
return processed_images
else:
mask = torch.rand((processed_images.shape[0], 1, 1, 1),
device=processed_images.device)
mask = (mask < p).float()
processed_images = mask * processed_images + (1 - mask) * image
return processed_images
def renormalize(tensor, first_dim=-3):
if first_dim < 0:
first_dim = len(tensor.shape) + first_dim
flat_tensor = tensor.view(*tensor.shape[:first_dim], -1)
max = torch.max(flat_tensor, first_dim, keepdim=True).values
min = torch.min(flat_tensor, first_dim, keepdim=True).values
flat_tensor = (flat_tensor - min)/(max - min)
return flat_tensor.view(*tensor.shape)
def to_categorical(value, limit=300):
value = value.float() # Avoid any fp16 shenanigans
value = value.clamp(-limit, limit)
distribution = torch.zeros(value.shape[0], (limit*2+1), device=value.device)
lower = value.floor().long() + limit
upper = value.ceil().long() + limit
upper_weight = value % 1
lower_weight = 1 - upper_weight
distribution.scatter_add_(-1, lower.unsqueeze(-1), lower_weight.unsqueeze(-1))
distribution.scatter_add_(-1, upper.unsqueeze(-1), upper_weight.unsqueeze(-1))
return distribution
def from_categorical(distribution, limit=300, logits=True):
distribution = distribution.float() # Avoid any fp16 shenanigans
if logits:
distribution = torch.softmax(distribution, -1)
num_atoms = distribution.shape[-1]
weights = torch.linspace(-limit, limit, num_atoms, device=distribution.device).float()
return distribution @ weights
def extract_epoch(filename):
"""
Get the epoch from a model save string formatted as name_Epoch:{seed}.pt
:param str: Model save name
:return: epoch (int)
"""
if "epoch" not in filename.lower():
return 0
epoch = int(filename.lower().split("epoch_")[-1].replace(".pt", ""))
return epoch
def get_last_save(base_pattern, retry=True):
files = glob.glob(base_pattern+"*.pt")
epochs = [extract_epoch(path) for path in files]
inds = np.argsort(-np.array(epochs))
for ind in inds:
try:
print("Attempting to load {}".format(files[ind]))
state_dict = torch.load(Path(files[ind]))
epoch = epochs[ind]
return state_dict, epoch
except Exception as e:
if retry:
print("Loading failed: {}".format(e))
else:
raise e
def delete_all_but_last(base_pattern, num_to_keep=3):
files = glob.glob(base_pattern+"*.pt")
epochs = [extract_epoch(path) for path in files]
order = np.argsort(np.array(epochs))
for i in order[:-num_to_keep]:
os.remove(files[i])
print("Deleted old save {}".format(files[i]))
def save_model_fn(folder, model_save, seed, use_epoch=True, save_only_last=False):
def save_model(model, optim, epoch):
if use_epoch:
path = Path(f'{folder}/{model_save}_{seed}_epoch_{epoch}.pt')
else:
path = Path(f'{folder}/{model_save}_{seed}.pt')
torch.save({"model": model, "optim": optim}, path)
print("Saved model at {}".format(path))
if save_only_last:
delete_all_but_last(f'{folder}/{model_save}_{seed}')
return save_model
def find_weight_norm(parameters, norm_type=1.0) -> torch.Tensor:
r"""Finds the norm of an iterable of parameters.
The norm is computed over all parameterse together, as if they were
concatenated into a single vector.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor to find norms of
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p is not None]
norm_type = float(norm_type)
if len(parameters) == 0:
return torch.tensor(0.)
device = parameters[0].device
if norm_type == np.inf:
total_norm = max(p.data.detach().abs().max().to(device) for p in parameters)
else:
total_norm = torch.norm(torch.stack([torch.norm(p.data.detach(), norm_type).to(device) for p in parameters]), norm_type)
return total_norm
def minimal_quantile_loss(pred_values, target_values, taus, kappa=1.0):
if len(pred_values.shape) == 3:
output_shape = pred_values.shape[:2]
target_values = target_values.expand_as(pred_values)
pred_values = pred_values.flatten(0, 1)
target_values = target_values.flatten(0, 1)
else:
output_shape = pred_values.shape[:1]
if pred_values.shape[0] != taus.shape[0]:
# somebody has added states along the batch dimension,
# probably to do multiple timesteps' losses simultaneously.
# Since the standard in this codebase is to put time on dimension 1 and
# then flatten 0 and 1, we can do the same here to get the right shape.
expansion_factor = pred_values.shape[0]//taus.shape[0]
taus = taus.unsqueeze(1).expand(-1, expansion_factor, -1,).flatten(0, 1)
td_errors = pred_values.unsqueeze(-1) - target_values.unsqueeze(1)
assert not taus.requires_grad
batch_size, N, N_dash = td_errors.shape
# Calculate huber loss element-wisely.
element_wise_huber_loss = calculate_huber_loss(td_errors, kappa)
assert element_wise_huber_loss.shape == (
batch_size, N, N_dash)
# Calculate quantile huber loss element-wisely.
element_wise_quantile_huber_loss = torch.abs(
taus[..., None] - (td_errors.detach() < 0).float()
) * element_wise_huber_loss / kappa
assert element_wise_quantile_huber_loss.shape == (
batch_size, N, N_dash)
# Quantile huber loss.
batch_quantile_huber_loss = element_wise_quantile_huber_loss.sum(
dim=1).mean(dim=1, keepdim=True)
assert batch_quantile_huber_loss.shape == (batch_size, 1)
loss = batch_quantile_huber_loss.squeeze(1)
# Just use the regular loss as the error for PER, at least for now.
return loss.view(*output_shape), loss.detach().view(*output_shape)
def scalar_backup(n, returns, nonterminal, qs, discount, select_action=False, selection_values=None):
"""
:param qs: q estimates
:param n: n-step
:param nonterminal:
:param returns: Returns, already scaled by discount/nonterminal
:param discount: discount in [0, 1]
:return:
"""
if select_action:
if selection_values is None:
selection_values = qs
next_a = selection_values.mean(-1).argmax(-1)
qs = select_at_indexes(next_a, qs)
while len(returns.shape) < len(qs.shape):
returns = returns.unsqueeze(-1)
while len(nonterminal.shape) < len(qs.shape):
nonterminal = nonterminal.unsqueeze(-1)
discount = discount ** n
qs = nonterminal*qs*discount + returns
return qs
def calculate_huber_loss(td_errors, kappa=1.0):
return torch.where(
td_errors.abs() <= kappa,
0.5 * td_errors.pow(2),
kappa * (td_errors.abs() - 0.5 * kappa))
def c51_backup(n_step,
returns,
nonterminal,
target_ps,
select_action=False,
V_max=10.,
V_min=10.,
n_atoms=51,
discount=0.99,
selection_values=None):
z = torch.linspace(V_min, V_max, n_atoms, device=target_ps.device)
if select_action:
if selection_values is None:
selection_values = target_ps
target_qs = torch.tensordot(selection_values, z, dims=1) # [B,A]
next_a = torch.argmax(target_qs, dim=-1) # [B]
target_ps = select_at_indexes(next_a.to(target_ps.device), target_ps) # [B,P']
delta_z = (V_max - V_min) / (n_atoms - 1)
# Make 2-D tensor of contracted z_domain for each data point,
# with zeros where next value should not be added.
next_z = z * (discount ** n_step) # [P']
next_z = nonterminal.unsqueeze(-1)*next_z.unsqueeze(-2) # [B,P']
ret = returns.unsqueeze(-1) # [B,1]
num_extra_dims = len(ret.shape) - len(next_z.shape)
next_z = next_z.view(*([1]*num_extra_dims), *next_z.shape)
next_z = torch.clamp(ret + next_z, V_min, V_max) # [B,P']
z_bc = z.view(*([1]*num_extra_dims), 1, -1, 1) # [1,P,1]
next_z_bc = next_z.unsqueeze(-2) # [B,1,P']
abs_diff_on_delta = abs(next_z_bc - z_bc) / delta_z
projection_coeffs = torch.clamp(1 - abs_diff_on_delta, 0, 1) # Most 0.
# projection_coeffs is a 3-D tensor: [B,P,P']
# dim-0: independent data entries
# dim-1: base_z atoms (remains after projection)
# dim-2: next_z atoms (summed in projection)
target_ps = target_ps.unsqueeze(-2) # [B,1,P']
if not select_action and len(projection_coeffs.shape) != len(target_ps.shape):
projection_coeffs = projection_coeffs.unsqueeze(-3)
target_p = (target_ps * projection_coeffs).sum(-1) # [B,P]
target_p = torch.clamp(target_p, EPS, 1)
return target_p
class DataWriter:
def __init__(self,
save_data=True,
data_dir="/project/rrg-bengioy-ad/schwarzm/atari",
save_name="",
checkpoint_size=1000000,
game="Pong",
imagesize=(84, 84),
mmap=True):
self.save_name = save_name
self.save_data = save_data
if not self.save_data:
return
self.pointer = 0
self.checkpoint = 0
self.checkpoint_size = checkpoint_size
self.imagesize = imagesize
self.dir = Path(data_dir) / game.replace("_", " ").title().replace(" ", "")
os.makedirs(self.dir, exist_ok=True)
self.mmap = mmap
self.reset()
def reset(self):
self.pointer = 0
obs_data = np.zeros((self.checkpoint_size, *self.imagesize), dtype=np.uint8)
action_data = np.zeros((self.checkpoint_size,), dtype=np.int32)
reward_data = np.zeros((self.checkpoint_size,), dtype=np.float32)
terminal_data = np.zeros((self.checkpoint_size,), dtype=np.uint8)
self.arrays = []
self.filenames = []
for data, filetype in [(obs_data, 'observation'),
(action_data, 'action'),
(reward_data, 'reward'),
(terminal_data, 'terminal')]:
filename = Path(self.dir / f'{filetype}_{self.checkpoint}{self.save_name}.npy')
if self.mmap:
np.save(filename, data)
data_ = np.memmap(filename, mode="w+", dtype=data.dtype, shape=data.shape,)
del data
else:
data_ = data
self.arrays.append(data_)
self.filenames.append(filename)
def save(self):
for data, filename in zip(self.arrays, self.filenames):
if not self.mmap:
np.save(filename, data)
del data # Flushes memmap
with open(filename, 'rb') as f_in:
new_filename = os.path.join(self.dir, Path(os.path.basename(filename)[:-4]+".gz"))
with gzip.open(new_filename, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(filename)
def write(self, samples):
if not self.save_data:
return
self.arrays[0][self.pointer] = samples.env.observation[0, 0, -1, 0]
self.arrays[1][self.pointer] = samples.agent.action
self.arrays[2][self.pointer] = samples.env.reward
self.arrays[3][self.pointer] = samples.env.done
self.pointer += 1
if self.pointer == self.checkpoint_size:
self.checkpoint += 1
self.save()
self.reset()
def update_state_dict_compat(osd, nsd):
updated_osd = {k.replace("head.advantage", "head.goal_advantage").
replace("head.value", "head.goal_value").
replace("head.secondary_advantage_head", "head.rl_advantage").
replace("head.secondary_value_head", "head.rl_value")
: v for k, v in osd.items()}
filtered_osd = {k: v for k, v in updated_osd.items() if k in nsd}
missing_items = [k for k, v in updated_osd.items() if k not in nsd]
if len(missing_items) > 0:
print("Could not load into new model: {}".format(missing_items))
nsd.update(filtered_osd)
return nsd
def calculate_true_values(states,
goal,
distance,
gamma,
final_value,
nonterminal,
distance_scale,
reward_scale=10.,
all_to_all=False):
"""
:param states: (batch, jumps, dim)
:param goal: (batch, dim)
:param distance: distance function (state X state X scale -> R).
:param gamma: rl discount gamma in [0, 1]
:param nonterminal: 1 - done, (batch, jumps).
:return: returns: discounted sum of rewards up to t, (batch, jumps);
has shape (batch, batch, jumps) if all_to_all enabled
"""
nonterminal = nonterminal.transpose(0, 1)
if all_to_all:
states = states.unsqueeze(1)
goal = goal.unsqueeze(0)
nonterminal = nonterminal.unsqueeze(1)
goal = goal.unsqueeze(-2)
distances = distance(states, goal, distance_scale)
deltas = distances[..., 0:-1] - distances[..., 1:]
deltas = deltas*reward_scale
final_values = torch.zeros_like(deltas)
# final_values[..., -1] = final_value
# import ipdb; ipdb.set_trace()
for i in reversed(range(final_values.shape[1]-1)):
final_values[..., i] = deltas[..., i] + gamma*nonterminal[..., i]*final_values[..., i+1]
if all_to_all:
final_values = final_values.flatten(0, 1)
return final_values.transpose(0, 1)
@torch.no_grad()
def sanity_check_gcrl(states,
nonterminal,
actions,
distance,
gamma,
distance_scale,
reward_scale,
network,
window=50,
conv_goal=True
):
reps = network.encode_targets(states.flatten(2, 3))
goal_latents = (reps[1] if conv_goal else reps[0])
goal = goal_latents[window]
input_latents = reps[1].view(*reps[1].shape[:-1], -1, 7, 7)
input_latents = input_latents[:-1]
spatial_goal = goal.unsqueeze(0)
spatial_goal = spatial_goal.view(*spatial_goal.shape[:-1], -1, 7, 7).expand_as(input_latents)
pred_values = network.head_forward(input_latents.flatten(0, 1), None, None, spatial_goal.flatten(0, 1))
pred_values = pred_values.view(input_latents.shape[0], input_latents.shape[1], *pred_values.shape[1:])
actions = actions.contiguous()
pred_values = pred_values.contiguous()
pred_values = select_at_indexes(actions[:-1], pred_values)
pred_values = from_categorical(pred_values, limit=10, logits=False)
returns = calculate_true_values(goal_latents.transpose(0, 1),
goal,
distance,
gamma,
pred_values[-1],
nonterminal[:window],
distance_scale,
reward_scale)
return pred_values, returns
def discount_return_n_step(reward, done, n_step, discount, return_dest=None,
done_n_dest=None, do_truncated=False):
"""Time-major inputs, optional other dimension: [T], [T,B], etc. Computes
n-step discounted returns within the timeframe of the of given rewards. If
`do_truncated==False`, then only compute at time-steps with full n-step
future rewards are provided (i.e. not at last n-steps--output shape will
change!). Returns n-step returns as well as n-step done signals, which is
True if `done=True` at any future time before the n-step target bootstrap
would apply (bootstrap in the algo, not here)."""
rlen = reward.shape[0]
if not do_truncated:
rlen -= (n_step - 1)
return_ = torch.zeros(
(rlen,) + reward.shape[1:], dtype=reward.dtype, device=reward.device)
done_n = torch.zeros(
(rlen,) + reward.shape[1:], dtype=done.dtype, device=done.device)
return_[:] = reward[:rlen].float() # 1-step return is current reward.
done_n[:] = done[:rlen].float() # True at time t if done any time by t + n - 1
done_dtype = done.dtype
done_n = done_n.type(reward.dtype)
done = done.type(reward.dtype)
if n_step > 1:
if do_truncated:
for n in range(1, n_step):
return_[:-n] += (discount ** n) * reward[n:n + rlen] * (1 - done_n[:-n])
done_n[:-n] = torch.max(done_n[:-n], done[n:n + rlen])
else:
for n in range(1, n_step):
return_ += (discount ** n) * reward[n:n + rlen] * (1 - done_n)
done_n = torch.max(done_n, done[n:n + rlen]) # Supports tensors.
done_n = done_n.type(done_dtype)
return return_, done_n
|
449200
|
import collections
FieldRaw = collections.namedtuple("FieldRaw", "name value")
MediaField = collections.namedtuple("MediaField", "media port number_of_ports proto fmt")
OriginField = collections.namedtuple(
"OriginField", "username session_id session_type nettype addrtype unicast_address"
)
TimingField = collections.namedtuple("TimingField", "start_time stop_time")
RepeatTimesField = collections.namedtuple(
"RepeatTimesField", "repeat_interval active_duration offsets"
)
TimeDescription = collections.namedtuple("TimeDescription", "timing repeat_times")
MediaDescription = collections.namedtuple(
"MediaDescription",
"media media_title connection_information bandwidth_information encryption_key media_attributes",
)
ConnectionDataField = collections.namedtuple(
"ConnectionDataField", "net_type addr_type connection_address"
)
|
449226
|
import json
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from django.urls import reverse
from django.views.decorators.http import require_POST
from django.views.generic import TemplateView
from djstripe.models import Price
from .stripe_gateway import stripe_gateway
@login_required
def subscriptions_index(request):
"""Show the subscription plan options."""
livemode = settings.STRIPE_LIVE_MODE
context = {
"monthly_price": Price.objects.get(
nickname=settings.ACCOUNTS_MONTHLY_PRICE_NICKNAME, livemode=livemode
),
"annual_price": Price.objects.get(
nickname=settings.ACCOUNTS_ANNUAL_PRICE_NICKNAME, livemode=livemode
),
"stripe_publishable_key": settings.STRIPE_PUBLISHABLE_KEY,
}
return render(request, "accounts/subscriptions_index.html", context)
@login_required
@require_POST
def create_checkout_session(request):
"""Create a checkout session for Stripe."""
data = json.loads(request.body)
price_id = data.get("price_id")
if not Price.objects.filter(
id=price_id, nickname__in=settings.ACCOUNTS_PRICE_NICKNAMES
).exists():
messages.add_message(
request,
messages.ERROR,
"That plan price is not available. Please contact support for help.",
)
return HttpResponseRedirect(reverse("subscriptions:index"))
session_id = stripe_gateway.create_checkout_session(price_id, request.account)
return JsonResponse({"session_id": session_id})
class SuccessView(LoginRequiredMixin, TemplateView):
"""The landing page after the user signs up for School Desk"""
template_name = "accounts/subscriptions_success.html"
class StripeCancelView(LoginRequiredMixin, TemplateView):
"""The return page when a user cancels the request to create a subscription"""
template_name = "accounts/subscriptions_stripe_cancel.html"
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context["support_email"] = settings.SUPPORT_EMAIL
return context
@login_required
@require_POST
def create_billing_portal_session(request):
"""Create a billing portal session for a customer."""
portal_url = stripe_gateway.create_billing_portal_session(request.account)
return JsonResponse({"url": portal_url})
|
449248
|
import sys,os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__))).replace('\\','/')+'/..')
from decimal import Decimal
from decimal import getcontext
from common.constants import MAX_PRECISION
from common.constants import LAMBERT_POS2_EXTENT
from common.constants import LAMBERT_POS2_SAMPLES
from common.functions import lambertPos3
from common.functions import binarySearch
from FormulaSolidityPort import optimalLog
from FormulaSolidityPort import generalLog
from FormulaSolidityPort import OPT_LOG_MAX_VAL
getcontext().prec = 100
LAMBERT_CONV_RADIUS = int(Decimal(-1).exp()*2**MAX_PRECISION)
LAMBERT_POS2_SAMPLE = LAMBERT_POS2_EXTENT*2**MAX_PRECISION//(LAMBERT_POS2_SAMPLES-1)
LAMBERT_POS2_MAXVAL = LAMBERT_CONV_RADIUS+LAMBERT_POS2_SAMPLE*(LAMBERT_POS2_SAMPLES-1)
LAMBERT_POS3_MAXVAL = binarySearch(lambertPos3,[optimalLog,generalLog,OPT_LOG_MAX_VAL,2**MAX_PRECISION])
maxLen = len(hex(max([LAMBERT_CONV_RADIUS,LAMBERT_POS2_SAMPLE,LAMBERT_POS2_MAXVAL,LAMBERT_POS3_MAXVAL])))
print(' uint256 private constant LAMBERT_CONV_RADIUS = {0:#0{1}x};'.format(LAMBERT_CONV_RADIUS,maxLen))
print(' uint256 private constant LAMBERT_POS2_SAMPLE = {0:#0{1}x};'.format(LAMBERT_POS2_SAMPLE,maxLen))
print(' uint256 private constant LAMBERT_POS2_MAXVAL = {0:#0{1}x};'.format(LAMBERT_POS2_MAXVAL,maxLen))
print(' uint256 private constant LAMBERT_POS3_MAXVAL = {0:#0{1}x};'.format(LAMBERT_POS3_MAXVAL,maxLen))
|
449303
|
import uuid
from cassandra.cqlengine import columns as cassandra_columns
from django_cassandra_engine.models import DjangoCassandraModel
class CassandraThing(DjangoCassandraModel):
id = cassandra_columns.UUID(primary_key=True, default=uuid.uuid4)
data_abstract = cassandra_columns.Text(max_length=10)
|
449308
|
from typing import Any
def __getattr__(attr: Any) -> Any: ...
# 0: return value
# ? 0: return value
|
449318
|
import os, sys
import datetime
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.basemap import Basemap
import argparse
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from data.goesr import GOESL1b, L1bBand
import interpolate
def main(args):
''' Plot mesoscale given band, datetime'''
dayofyear = datetime.datetime(args.year, args.month, args.day).timetuple().tm_yday
product = args.product #'ABI-L1b-RadM'
T = args.step_size #10
checkpoint = args.checkpoint #f'../model_weights/slomo-ind-v2/Channel-{band:02g}/best.flownet.pth.tar'
geo = GOESL1b(channels=[args.band,], product=product)
files = geo.local_files(args.year, dayofyear, hour=args.hour)
files = files.xs(args.spatial, level='spatial')
l1b = L1bBand(files.values[0,0])
l1b.open_dataset()
l1b_10 = L1bBand(files.values[T,0])
l1b_10.open_dataset()
interp = interpolate.Interpolate(checkpoint)
X1 = l1b.data['Rad'].values
X2 = l1b_10.data['Rad'].values
Xt = interp.predict(X1[np.newaxis], X2[np.newaxis], 0.5)
l1b_5 = L1bBand(files.values[5,0])
l1b_5.open_dataset()
Xt_true = l1b_5.data['Rad'].values
fig, axs = plt.subplots(2,2, figsize=(12,12))
axs = np.ravel(axs)
l1b.plot(ax=axs[0])
l1b_10.plot(ax=axs[1])
axs[2].imshow(Xt)
axs[2].set_title("Predicted Frame")
axs[2].axis('off')
axs[3].imshow(((Xt-Xt_true)**2)**0.5)
axs[3].axis('off')
axs[3].set_title('Squared Error')
plt.tight_layout()
plt.savefig(f'figures/severe_event_map_{args.year}{args.month:02g}{args.day:02g}_{args.spatial}.png',
dpi=300)
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--checkpoint",
default="../model_weights/interp-ind/Channel-13/best.flownet.pth.tar", type=str)
parser.add_argument("--model_name", default="unet-medium", type=str)
parser.add_argument("--band", default=13, type=int)
parser.add_argument("--year", default=2017, type=int)
parser.add_argument("--day", default=10, type=int)
parser.add_argument("--month", default=9, type=int)
parser.add_argument("--hour", default=22, type=int)
parser.add_argument("--step_size", default=10, type=int)
parser.add_argument("--product", default='ABI-L1b-RadM', type=str)
parser.add_argument("--spatial", default='RadM1', type=str)
args = parser.parse_args()
main(args)
|
449327
|
import netCDF4
import numpy as np
import matplotlib.pyplot as plt
import datetime
import time
from matplotlib import gridspec
from matplotlib import pylab
########################################################################
# Boxplot Exampe
########################################################################
stn = '067'
yeardate = '2011'
# Comment out the URL that you are not using
# CDIP Archived Dataset URL
data_url = 'http://thredds.cdip.ucsd.edu/thredds/dodsC/cdip/archive/' + stn + 'p1/' + stn + 'p1_historic.nc'
# CDIP Realtime Dataset URL
# data_url = 'http://thredds.cdip.ucsd.edu/thredds/dodsC/cdip/realtime/' + stn + 'p1_rt.nc'
nc = netCDF4.Dataset(data_url)
ncTime = nc.variables['sstTime'][:]
timeall = [datetime.datetime.fromtimestamp(t) for t in ncTime] # Convert ncTime variable to datetime stamps
Hs = nc.variables['waveHs']
# Create a variable of the buoy name, to use in plot title
buoyname = nc.variables['metaStationName'][:]
buoytitle = buoyname[:-40].data.tostring().decode()
# Find nearest value in numpy array to inputted value
def find_nearest(array,value):
idx = (np.abs(array-value)).argmin()
return array[idx]
# Convert human-formatted date to UNIX timestamp
def getUnixTimestamp(humanTime,dateFormat):
unixTimestamp = int(time.mktime(datetime.datetime.strptime(humanTime, dateFormat).timetuple()))
return unixTimestamp
# Create array of month numbers to cycle through to grab Hs data
months = ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11', '12']
# Create array of lists of Hs data for each month
timeindex_start = []
timeindex_end = []
monthcount = 0
for monthi in months:
startdate = months[monthcount] + "/" + "01/" + str(yeardate) # Set start and end dates of each month, using the above 'months' array
enddate = months[monthcount] + "/" + "28/" + str(yeardate) # Set the end date to Day 28, to account for February's short length
unixstart = getUnixTimestamp(startdate,"%m/%d/%Y")
nearest_date = find_nearest(ncTime, unixstart) # Find the closest unix timestamp
near_index = np.where(ncTime==nearest_date)[0][0] # Grab the index number of found date
unixend = getUnixTimestamp(enddate,"%m/%d/%Y")
future_date = find_nearest(ncTime, unixend) # Find the closest unix timestamp
future_index = np.where(ncTime==future_date)[0][0] # Grab the index number of found date
monthcount = monthcount+1
timeindex_start.append(near_index) # Append 'month start date' and 'month end date' index numbers for each month to corresponding array
timeindex_end.append(future_index)
box_data = []
i = 0
for Hsi in range(len(Hs[timeindex_start])):
monthHs = Hs[timeindex_start[i]:timeindex_end[i]]
i = i+1
box_data.append(monthHs)
means = np.asarray([(np.mean(m)) for m in box_data])
meansround = [round(k,2) for k in means] # Round each monthly mean value to 2 decimal points, for plotting
monthlengths = []
j = 0
for Hsj in range(len(Hs[timeindex_start])):
monthlenHs = len(Hs[timeindex_start[j]:timeindex_end[j]])
j = j+1
monthlengths.append(monthlenHs)
#########################
# Plot data Boxplot
#
# Create boxplot graph of month-long datasets
# Each box includes:
# Median (red line)
# 25th and 75th percentiles (top and bottom of box)
# Remaining data within 1.5L above and below (whiskers) the quartiles, where L = length (m) from 25th to 75th percentile
# Outliers (red crosses) - data beyond whiskers
# Mean position (green line) and value (green number)
# Adjust colors and labels of graphical display
# Include a second plot of a sample 'legend' boxplot
#########################
# Create overall figure and specify size, and grid to specify positions of subplots
fig = plt.figure(figsize=(12,15))
gs = gridspec.GridSpec(2,2,height_ratios=[5,1])
# Create a dataset for sample 'legend' boxplot, to go underneath actual boxplot
bp_sample2 = np.random.normal(2.5,0.5,500)
# Create two subplots - actual monthly-averaged data (top) and example 'legend' boxplot (bottom)
# Subplot of monthly-averaged boxplot data
bp = plt.subplot(gs[0,:])
bp_data = bp.boxplot(box_data) # Add 'meanlineprops' to include the above-defined properties
bp.scatter(months,means,marker="_",color='g',linewidths=2.5,s=900) # Overlay monthly means as green lines using 'scatter' function.
# Subplot to show example 'legend' boxplot below actual monthly-averaged boxplot graph
bp2 = plt.subplot(gs[1,:])
bp2_example = bp2.boxplot(bp_sample2,vert=False) # Plot horizontal example boxplot with labels
bp2.scatter(2.3,1,marker="|",color='g',linewidths=2.5,s=400)
# Add values of monthly means as text
for i, txt in enumerate(meansround):
bp.annotate(txt, (months[i],means[i]),fontsize=12,horizontalalignment='center',verticalalignment='bottom',color='g')
# Get positions of Median, Quartiles and Outliers to use in 'legend' text labels
for line in bp2_example['medians']:
xm, ym = line.get_xydata()[0] # location of Median line
for line in bp2_example['boxes']:
xb, yb = line.get_xydata()[0] # location of Box edges (Quartiles)
for line in bp2_example['whiskers']:
xw, yw = line.get_xydata()[0] # location of Whisker ends (Outliers)
# Add text labels for 'Median', Mean', '25th/75th %iles' and 'Outliers' to subplot2, to create sample 'legend' boxplot
bp2.annotate("Median",[xm,ym-0.3*ym],fontsize=14,color='r')
bp2.annotate("Mean",[2.2,0.65],fontsize=14,color='g')
bp2.annotate("25%ile",[xb-0.01*xb,yb-0.15*yb],fontsize=12)
bp2.annotate("75%ile",[xb+0.2*xb,yb-0.15*yb],fontsize=12)
bp2.annotate("Outliers",[xw+0.38*xw,yw-0.3*yw],fontsize=14,color='r')
# Set colors of box aspects for top subplot
pylab.setp(bp_data['boxes'], color='black')
pylab.setp(bp_data['whiskers'], color='black')
pylab.setp(bp_data['fliers'], color='r')
# Set colors of box aspects for bottom (sample) subplot
pylab.setp(bp2_example['boxes'], color='black')
pylab.setp(bp2_example['whiskers'], color='black')
pylab.setp(bp2_example['fliers'], color='r')
# Set Titles
plt.suptitle(buoytitle, fontsize=30, y=0.97) # Overall plot title using 'buoytitle' variable
bp.set_title("Significant Wave Height by month for " + yeardate, fontsize=20, y=1.01) # Subtitle for top plot
bp2.set_title("Sample Boxplot", fontsize=16, y=1.02) # Subtitle for bottom plot
# Set axes labels and ticks
bp.set_xticklabels(['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'],fontsize=12)
bp.set_ylabel('Significant Wave Height, Hs (m)', fontsize=20)
bp.tick_params(axis='y', which='major', labelsize=12, right='off')
bp.tick_params(axis='x', which='major', labelsize=12, top='off')
# Create a second row of x-axis labels for top subplot
newax = bp.twiny()
newax.xaxis.set_ticks_position('bottom')
newax.xaxis.set_label_position('bottom')
newax.spines['bottom'].set_position(('outward',25))
newax.set_xticklabels(monthlengths,fontsize=10)
# Plot horizontal gridlines onto top subplot
bp.grid(axis='y', which='major', color='b', linestyle='-', alpha=0.25)
# Remove tickmarks from bottom subplot
bp2.axes.get_xaxis().set_visible(False)
bp2.axes.get_yaxis().set_visible(False)
|
449342
|
from sinling.core.joiner import *
from sinling.core.stemmer import *
from sinling.core.tokenizer import *
from sinling.core.tagger import *
|
449352
|
from keras.models import Model
from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, BatchNormalization
from keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard
from PIL import Image
import numpy as np
import csv
# Create CNN Model
print("Creating CNN model...")
in = Input((60, 200, 3))
out = in
out = Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu')(out)
out = Conv2D(filters=32, kernel_size=(3, 3), activation='relu')(out)
out = BatchNormalization()(out)
out = MaxPooling2D(pool_size=(2, 2))(out)
out = Dropout(0.5)(out)
out = Conv2D(filters=64, kernel_size=(3, 3), padding='same', activation='relu')(out)
out = Conv2D(filters=64, kernel_size=(3, 3), activation='relu')(out)
out = BatchNormalization()(out)
out = MaxPooling2D(pool_size=(2, 2))(out)
out = Dropout(0.5)(out)
out = Conv2D(filters=128, kernel_size=(3, 3), padding='same', activation='relu')(out)
out = Conv2D(filters=128, kernel_size=(3, 3), activation='relu')(out)
out = BatchNormalization()(out)
out = MaxPooling2D(pool_size=(2, 2))(out)
out = Dropout(0.5)(out)
out = Conv2D(filters=256, kernel_size=(3, 3), activation='relu')(out)
out = BatchNormalization()(out)
out = MaxPooling2D(pool_size=(2, 2))(out)
out = Flatten()(out)
out = Dropout(0.5)(out)
out = Dense(1, name='6digit', activation='sigmoid')(out)
model = Model(inputs=in, outputs=out)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
print("Reading training data...")
traincsv = open('./data/56_imitate_train_set/len_train.csv', 'r', encoding = 'utf8')
train_data = np.stack([np.array(Image.open("./data/56_imitate_train_set/" + row[0] + ".jpg"))/255.0 for row in csv.reader(traincsv)])
traincsv = open('./data/56_imitate_train_set/len_train.csv', 'r', encoding = 'utf8')
train_label = np.asarray([1 if row[1] == '6' else 0 for row in csv.reader(traincsv)])
print("Shape of train data:", train_data.shape)
print("Reading validation data...")
valicsv = open('./data/56_imitate_vali_set/len_vali.csv', 'r', encoding = 'utf8')
vali_data = np.stack([np.array(Image.open('./data/56_imitate_vali_set/' + row[0] + ".jpg"))/255.0 for row in csv.reader(valicsv)])
valicsv = open('./data/56_imitate_vali_set/len_vali.csv', 'r', encoding = 'utf8')
vali_label = np.asarray([1 if row[1] == '6' else 0 for row in csv.reader(valicsv)])
print("Shape of validation data:", vali_data.shape)
filepath="./data/model/imitate_56_model.h5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
earlystop = EarlyStopping(monitor='val_acc', patience=10, verbose=1, mode='auto')
tensorBoard = TensorBoard(log_dir = "./logs", histogram_freq = 1)
callbacks_list = [checkpoint, earlystop, tensorBoard]
model.fit(train_data, train_label, batch_size=400, epochs=100, verbose=1, validation_data=(vali_data, vali_label), callbacks=callbacks_list)
|
449359
|
import logging
import anndata as ad
import scipy.spatial
import scipy.sparse
import numpy as np
from sklearn.preprocessing import normalize
from sklearn.decomposition import TruncatedSVD
from sklearn.neighbors import NearestNeighbors
## VIASH START
# Anything within this block will be removed by `viash` and will be
# replaced with the parameters as specified in your config.vsh.yaml.
par = {
"input_train_mod1": "resources_test/match_modality/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.train_mod1.h5ad",
"input_train_mod2": "resources_test/match_modality/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.train_mod2.h5ad",
"input_train_sol": "resources_test/match_modality/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.train_sol.h5ad",
"input_test_mod1": "resources_test/match_modality/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.test_mod1.h5ad",
"input_test_mod2": "resources_test/match_modality/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.test_mod2.h5ad",
"output": "resources_test/match_modality/openproblems_bmmc_multiome_starter/openproblems_bmmc_multiome_starter.prediction.h5ad",
"n_svd": 100,
}
## VIASH END
logging.basicConfig(level=logging.INFO)
logging.info("Load datasets")
input_train_mod1 = ad.read_h5ad(par["input_train_mod1"])
input_train_mod2 = ad.read_h5ad(par["input_train_mod2"])
# input_train_sol = ad.read_h5ad(par["input_train_sol"])
input_test_mod1 = ad.read_h5ad(par["input_test_mod1"])
input_test_mod2 = ad.read_h5ad(par["input_test_mod2"])
# This method runs PCA on each modality individually, then uses the Procrustes method to identify
# a linear transform that best superimposes the points from modality 1 onto modality 2.
# concatenate train and test data
mod1 = ad.concat(
{
"train": input_train_mod1,
"test": input_test_mod1
},
index_unique="-",
label="group"
)
mod2 = ad.concat(
{
"train": input_train_mod2,
"test": input_test_mod2
},
index_unique="-",
label="group"
)
# Create helper views to access the test data later
mod1te = mod1[mod1.obs["group"] == "test", :]
mod2te = mod2[mod2.obs["group"] == "test", :]
logging.info("Running PCA")
n_svd = min(par["n_svd"], mod1.n_obs, mod2.n_obs, mod1.n_vars, mod1.n_vars)
# Use TruncatedSVD for fast decomposition of the data
mod1.obsm["X_pca"] = TruncatedSVD(n_svd).fit_transform(mod1.X)
mod2.obsm["X_pca"] = TruncatedSVD(n_svd).fit_transform(mod2.X)
logging.info("Running Procrustes Alignment")
# This function takes in two matrices of points A and B, standardizes both, and applies a linear to
# matrix B to minimize the disparity measured as the sum of the squares of the pointwise distances
# between the two input datasets
mod1.obsm["X_pro"], mod2.obsm["X_pro"], disparity = scipy.spatial.procrustes(
mod1.obsm["X_pca"],
mod2.obsm["X_pca"],
)
logging.info("> Disparity value is: %0.3f" % disparity)
logging.info("Perform nearest neighbors")
# To get the matching matrix, for each point in mod1_test, we take the 1000 nearest neighbors of that
# point in the transformed mod2_test dataset
n_neighbors = min(1000, mod1te.n_obs, mod1te.n_vars, mod2te.n_obs, mod2te.n_vars)
nn = NearestNeighbors(n_neighbors=n_neighbors).fit(mod1te.obsm["X_pro"])
distances, indices = nn.kneighbors(X=mod2te.obsm["X_pro"])
logging.info("Create pairing matrix")
# Translate the neighborhood assignments to a pairing matrix that is (n_obs, n_obs)
# NOTE: `pairing_matrix` must have NO MORE than 1000*n_obs non-zero entries for fast metric computation
ind_i = np.tile(np.arange(mod1te.n_obs), (n_neighbors, 1)).T.flatten()
ind_j = indices.flatten()
ind_dist = distances.flatten()
ind_x = 2 * max(ind_dist) - ind_dist
pairing_matrix = scipy.sparse.csr_matrix(
(ind_x, (ind_i, ind_j)),
shape=(input_test_mod1.n_obs, input_test_mod2.n_obs)
)
# row normalise
prob_matrix = normalize(pairing_matrix, norm="l1")
print("Write prediction output")
prediction = ad.AnnData(
X=prob_matrix,
uns={
"dataset_id": input_train_mod1.uns["dataset_id"],
"method_id": "baseline_procrustes_knn"
}
)
prediction.write_h5ad(par["output"])
|
449374
|
from django.contrib.auth import get_user_model
from django.core.management import call_command
from django.test import TestCase
from accounts.models import Major, School, Student
class UpdateMajorsTestCase(TestCase):
def test_update_academics(self):
call_command("update_academics")
self.assertNotEquals(0, Major.objects.all().count())
self.assertNotEquals(0, School.objects.all().count())
class PopulateUsersTestCase(TestCase):
def test_populate_users(self):
call_command("populate_users")
self.assertTrue(get_user_model().objects.all().count() > 0)
self.assertTrue(Major.objects.all().count() > 0)
self.assertTrue(Student.objects.all().count() > 0)
def test_populate_twice(self):
call_command("populate_users")
call_command("populate_users")
self.assertEqual(get_user_model().objects.all().count(), 10)
|
449378
|
import numpy as np
import sqlalchemy as sa
import pytest
from . import data, models
@pytest.fixture
def engine(postgresql):
return sa.create_engine('postgresql://', poolclass=sa.pool.StaticPool,
creator=lambda: postgresql)
@pytest.fixture
def session(engine):
with sa.orm.Session(engine) as session:
yield session
@pytest.fixture
def cursor(session):
return session.connection().connection.cursor()
@pytest.fixture
def tables(engine):
models.Base.metadata.create_all(engine)
@pytest.fixture
def random_galaxies(cursor, tables):
return data.get_random_galaxies(40_000, cursor)
@pytest.fixture(params=np.geomspace(1, 10_000, 10, dtype=int).tolist())
def random_fields(cursor, tables, request):
return data.get_random_fields(request.param, cursor)
@pytest.fixture
def random_sky_map(cursor, tables):
return data.get_random_sky_map(20_000, cursor)
|
449399
|
import os
import sys
import semver
from github import Github
PLUGIN_TYPES = "[vim | nvim | sublime]"
ORG_NAME = "typeintandem"
VIM_REPO = "https://github.com/{}/vim".format(ORG_NAME)
NVIM_REPO = "https://github.com/{}/nvim".format(ORG_NAME)
SUBLIME_REPO = "https://github.com/{}/sublime".format(ORG_NAME)
def error(msg):
print("ERROR: {}.".format(msg))
exit(1)
def main():
if len(sys.argv) < 2:
error("Pass in plugin type as the first argument. "
"Choose from: {}".format(PLUGIN_TYPES))
elif len(sys.argv) < 3:
error("You must also pass in repository SHA as the argument")
repo_type = sys.argv[1].lower()
if repo_type == "sublime":
repo_url = SUBLIME_REPO
elif repo_type == "vim":
repo_url = VIM_REPO
elif repo_type == "nvim":
repo_url = NVIM_REPO
else:
error("Please pass in one of {} as the plugin type"
.format(PLUGIN_TYPES))
master_SHA = sys.argv[2]
bot_username = os.environ.get("RELEASE_BOT_USERNAME")
bot_password = os.environ.get("RELEASE_BOT_PASSWORD")
g = Github(bot_username, bot_password)
release_repo = None
for repo in g.get_organization(ORG_NAME).get_repos():
if repo.html_url == repo_url:
release_repo = repo
break
if release_repo is None:
error("{} repo not found".format(repo_type))
tags = release_repo.get_tags()
last_tag = None
for t in tags:
last_tag = t
break
if (last_tag is None):
last_tag = '0.0.0'
else:
if last_tag.commit.sha == master_SHA:
error("Cannot create release with same SHA")
last_tag = last_tag.name
tag = semver.bump_minor(last_tag)
release_repo.create_git_tag_and_release(
tag,
"Release version {}".format(tag),
"v{}".format(tag),
"Release version {}".format(tag),
master_SHA,
"commit",
)
print("Succesfully created release v{}".format(tag))
if __name__ == "__main__":
main()
|
449451
|
class Formatter:
env = None
@classmethod
def get_format(cls, path):
i = path.find("{{")
if i > 0:
i = path.rfind("/", 0, i)
if i > 0:
return [path[0:i], path[i+1:]]
return [path, None]
@classmethod
def format(cls, f, value):
env = cls.get_environment()
return env.from_string(f).render(value if isinstance(value, dict) else {"x": value})
@classmethod
def get_environment(cls):
if cls.env is None:
from jinja2 import Environment # pip install jinja2
import filters
cls.env = Environment()
filters.register_filters(cls.env)
return cls.env
|
449475
|
from PySide import QtGui, QtCore
import shiboken
import maya.OpenMayaUI as omui
mainWin = None
class TestUI(QtGui.QMainWindow):
def __init__(self, parent):
super(TestUI, self).__init__(parent=parent)
self.centralWidget = QtGui.QWidget()
self.setCentralWidget(self.centralWidget)
self.layout = QtGui.QVBoxLayout()
self.centralWidget.setLayout(self.layout)
self.lineEdit = QtGui.QLineEdit('defaultText')
self.layout.addWidget(self.lineEdit)
self.font = self.lineEdit.font()
self.tableWidget = QtGui.QTableWidget()
self.layout.addWidget(self.tableWidget)
self.horHeader = self.tableWidget.horizontalHeader()
self.button = QtGui.QPushButton('Click Me')
self.layout.addWidget(self.button)
self.button.connect(self.button, QtCore.SIGNAL('clicked()'), self.onButtonClicked)
print 'try to access variables right after creation:'
self.onButtonClicked()
print
def onButtonClicked(self):
print 'QTableWidget -> QHeaderView: runtime: sortSection =', self.tableWidget.horizontalHeader().sortIndicatorSection()
try:
print 'QTableWidget -> QHeaderView: cached: sortSection =', self.horHeader.sortIndicatorSection()
except Exception as e:
print 'C++ object is dead. Exception:', str(e)
print 'QLineEdit -> QFont: runtime: font is italic =', self.lineEdit.font().italic()
try:
print 'QLineEdit -> QFont: cached: font is italic =', self.font.italic()
except Exception as e:
print 'C++ object is dead. Exception:', str(e)
def run():
global mainWin
if not mainWin:
ptr = omui.MQtUtil.mainWindow()
if ptr:
mayaQMainWindow = shiboken.wrapInstance(long(ptr), QtGui.QMainWindow)
else:
raise Exception('Cannot find Maya main window.')
mainWin = TestUI(parent=mayaQMainWindow)
# mainWin = TestUI(parent=None)
mainWin.show()
mainWin.raise_()
|
449477
|
import os
def get_directory_structure(rootdir):
"""
Creates a nested dictionary that represents the folder structure of rootdir
"""
dir = {}
rootdir = rootdir.rstrip(os.sep)
start = rootdir.rfind(os.sep) + 1
for path, dirs, files in os.walk(rootdir):
folders = path[start:].split(os.sep)
subdir = dict.fromkeys(files)
parent = reduce(dict.get, folders[:-1], dir)
parent[folders[-1]] = subdir
return dir
|
449478
|
import unittest
import catalogue
import scrubadub
import scrubadub.detectors.catalogue
class DetectorConfigTestCase(unittest.TestCase):
def test_register_detector(self):
class NewDetector(scrubadub.detectors.Detector):
name = 'new_detector'
scrubadub.detectors.catalogue.register_detector(NewDetector, autoload=False)
self.assertTrue(NewDetector.name in scrubadub.detectors.catalogue.detector_catalogue)
self.assertFalse(NewDetector.autoload)
self.assertEqual(scrubadub.detectors.catalogue.detector_catalogue.get(NewDetector.name), NewDetector)
scrubadub.detectors.catalogue.remove_detector(NewDetector)
with self.assertRaises(catalogue.RegistryError):
scrubadub.detectors.catalogue.detector_catalogue.get(NewDetector.name)
scrubadub.detectors.catalogue.register_detector(NewDetector, autoload=True)
self.assertTrue(NewDetector.name in scrubadub.detectors.catalogue.detector_catalogue)
self.assertTrue(NewDetector.autoload)
self.assertEqual(scrubadub.detectors.catalogue.detector_catalogue.get(NewDetector.name), NewDetector)
scrubadub.detectors.catalogue.remove_detector(NewDetector)
|
449704
|
import json
import time
# print("load============================================================")
def doload(file, count):
with open(file, "rb") as f:
str = f.read()
ti = time.time()
for i in range(count):
json.loads(str)
print("{}, seconds: {}".format(file, time.time() - ti))
# doload("test_float.json", 30)
# doload("test_int.json", 30)
# doload("test_string.json", 90)
# doload("test_string2.json", 200)
# doload("twitter.json", 60)
# doload("citm_catalog.json", 30)
doload("test_word.json", 100)
# doload("test_space.json", 200)
# doload("test_utf8escape.json", 100)
print("dump============================================================")
def dodump(file, count):
with open(file, "rb") as f:
obj = json.loads(f.read())
ti = time.time()
for i in range(count):
json.dumps(obj)
print("{}, seconds: {}".format(file, time.time() - ti))
# dodump("test_float.json", 10)
# dodump("test_int.json", 30)
# dodump("test_string.json", 90)
# dodump("twitter.json", 60)
# dodump("citm_catalog.json", 30)
# dodump("test_word.json", 100)
|
449705
|
import numpy as np
import anndata as ad
def remove_values_from_list(the_list, val):
return([value for value in the_list if value != val])
def imputation_feature(adata, variable_to_input, imputation_variable, var_category=None):
"""
impute the missing methylation feature according to the clusters of interest.
"""
if var_category:
col_index = adata.var[var_category].index(variable_to_input)
else:
col_index = adata.var_names.tolist().index(variable_to_input)
X = adata.X[:, [col_index]]
guess_imputed_value = np.median(X)
# get the new imputer value per louvain cluster:
new_imputed = {}
for cluster in list(set(adata.obs[imputation_variable])):
tmp_adata = adata[adata.obs[imputation_variable]==cluster,:].copy()
tmp_X = tmp_adata.X[:, [col_index]]
tmp_X = remove_values_from_list(tmp_X, guess_imputed_value)
new_imputed[cluster] = np.mean(tmp_X)
annot = []
annot2 = []
index = 0
for cluster in adata.obs[imputation_variable]:
if X[index] == guess_imputed_value:
annot.append(new_imputed[cluster])
annot2.append(np.nan)
else:
annot.append(X[index][0])
annot2.append(X[index][0])
index +=1
adata.obs[variable_to_input+'_no_input'] = annot2
adata.obs[variable_to_input+'_imputed'] = annot
|
449706
|
from ...models import Topology
from . import BaseSaveSnapshotCommand
class Command(BaseSaveSnapshotCommand):
topology_model = Topology
|
449710
|
import os
import os.path as osp
from typing import Optional, Tuple
import torch
from torch import Tensor
from pyg_lib import get_home_dir
def get_sparse_matrix(
group: str,
name: str,
dtype: torch.dtype = torch.long,
device: Optional[torch.device] = None,
) -> Tuple[Tensor, Tensor]:
r"""Returns a sparse matrix :obj:`(rowptr, col)` from the
`Suite Sparse Matrix Collection <https://sparse.tamu.edu>`_.
Args:
group (string): The group of the sparse matrix.
name (string): The name of the sparse matrix.
dtype (torch.dtype, optional): The desired data type of returned
tensors. (default: :obj:`torch.long`)
device (torch.device, optional): the desired device of returned
tensors. (default: :obj:`None`)
Returns:
(torch.Tensor, torch.Tensor): Compressed source node indices and target
node indices of the sparse matrix.
"""
path = osp.join(get_home_dir(), f'{name}.mat')
if not osp.exists(path):
os.makedirs(get_home_dir(), exist_ok=True)
import urllib
url = f'https://sparse.tamu.edu/mat/{group}/{name}.mat'
print(f'Downloading {url}...', end='')
data = urllib.request.urlopen(url)
with open(path, 'wb') as f:
f.write(data.read())
print(' Done!')
from scipy.io import loadmat
mat = loadmat(path)['Problem'][0][0][2].tocsr()
rowptr = torch.from_numpy(mat.indptr).to(device, dtype)
col = torch.from_numpy(mat.indices).to(device, dtype)
return rowptr, col
|
449758
|
import sys; sys.path.insert(0, sys.argv[1])
import mpi4py
if len(sys.argv) > 2:
lfn = "runtests-mpi4py-child"
mpe = sys.argv[2] == 'mpe'
vt = sys.argv[2] == 'vt'
if mpe: mpi4py.profile('mpe', logfile=lfn)
if vt: mpi4py.profile('vt', logfile=lfn)
from mpi4py import MPI
parent = MPI.Comm.Get_parent()
parent.Barrier()
parent.Disconnect()
assert parent == MPI.COMM_NULL
parent = MPI.Comm.Get_parent()
assert parent == MPI.COMM_NULL
|
449763
|
import os
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='nuscenes')
parser.add_argument('--step', type=int, default='-1')
parser.add_argument('--metric', type=str, default='label_aps')
parser.add_argument('--detailed', action='store_true')
args = parser.parse_args()
dataset = args.dataset
base_dir = f'models/{dataset}'
classes = [
# # official class order
# 'car', 'truck', 'bus', 'trailer', 'construction_vehicle', 'pedestrian', 'motorcycle', 'bicycle', 'traffic_cone', 'barrier'
# # sorted by percentage
'car', 'pedestrian', 'barrier', 'traffic_cone', 'truck', 'bus', 'trailer', 'construction_vehicle', 'motorcycle', 'bicycle'
# 'car', 'car[vehicle.moving]', 'car[vehicle.stopped]', 'car[vehicle.parked]',
# 'pedestrian', 'pedestrian[pedestrian.moving]', 'pedestrian[pedestrian.sitting_lying_down]', 'pedestrian[pedestrian.standing]',
# 'barrier',
# 'traffic_cone',
# 'truck', 'truck[vehicle.moving]', 'truck[vehicle.stopped]', 'truck[vehicle.parked]',
# 'bus', 'bus[vehicle.moving]', 'bus[vehicle.stopped]', 'bus[vehicle.parked]',
# 'trailer', 'trailer[vehicle.moving]', 'trailer[vehicle.stopped]', 'trailer[vehicle.parked]',
# 'construction_vehicle', 'construction_vehicle[vehicle.moving]', 'construction_vehicle[vehicle.stopped]', 'construction_vehicle[vehicle.parked]',
# 'motorcycle', 'motorcycle[cycle.with_rider]', 'motorcycle[cycle.without_rider]',
# 'bicycle', 'bicycle[cycle.with_rider]', 'bicycle[cycle.without_rider]'
]
if args.dataset == 'nuscenes':
methods = [
# 'all_pp_mhead_nodbs_cont_d8_ep50_ev5',
# 'all_pp_mhead_nodbs_swp0_d8_ep50_ev5',
# 'all_pp_mhead_nodbs_swp2_d8_ep50_ev5',
# 'all_pp_mhead_nodbs_swp4_d8_ep50_ev5',
# 'all_pp_mhead_nodbs_swp6_d8_ep50_ev5',
# 'all_pp_mhead_nodbs_swp8_d8_ep50_ev5',
# 'all_pp_mhead_nodbs_d8_ep50_ev5',
# 'all_pp_mhead_nodbs_swp0_nots_d8_ep50_ev5',
# 'all_pp_mhead_nodbs_swp2_nots_d8_ep50_ev5',
# 'all_pp_mhead_nodbs_swp4_nots_d8_ep50_ev5',
# 'all_pp_mhead_nodbs_swp6_nots_d8_ep50_ev5',
# 'all_pp_mhead_nodbs_swp8_nots_d8_ep50_ev5',
# 'all_pp_mhead_nodbs_nots_d8_ep50_ev5',
'all_pp_mhead_d8_ep50_ev5',
'all_pp_mhead_d2',
'all_pp_mhead_swpdb_d8_ep50_ev5.bak',
'all_pp_mhead_swpdb_d8_ep50_ev5',
'all_pp_mhead_opn_d8_ep50_ev5',
'all_pp_mhead_opn_d2_ep50_ev5',
'all_pp_mhead_vpn_swpdb_d8_ep50_ev5.bak',
'all_pp_mhead_vpn_swpdb_d8_ep50_ev5',
'all_pp_mhead_vpn_swpdb_d2_ep50_ev5',
'all_pp_mhead_vpn_swpdb_raystop_d8_ep50_ev5',
'all_pp_mhead_vpn_swpdb_cthruwall_d8_ep50_ev5',
'all_pp_mhead_vpn_swpdb_learning_d8_ep50_ev5',
'all_pp_mhead_vpn_swpdb_learning_d2_ep50_ev5',
'all_pp_mhead_vpn_swpdb_learning_d1_ep50_ev5',
# 'all_pp_mhead_swpdb_d2_ep50_ev5.bak',
# 'all_pp_mhead_d2',
# 'all_pp_mhead_occ_nodbs_d8_ep50_ev5',
# 'all_pp_mhead_vis_nodbs_d8_ep50_ev5',
# 'all_pp_mhead_opn_nodbs_d8_ep50_ev5',
# 'all_pp_mhead_aug_nodbs_d8_ep50_ev5',
# 'all_pp_mhead_vfn_nodbs_d8_ep50_ev5',
# 'all_pp_mhead_vfn_vpnonly_nodbs_d8_ep50_ev5',
# 'all_pp_mhead_nodbs_swp0_d8_ep50_ev5',
# 'all_pp_mhead_vfn_vpnonly_nodbs_swp0_d8_ep50_ev5',
# 'all_pp_mhead_moredbs_d8_ep50_ev5',
# 'all_pp_mhead_moredbs_swpdb_d8_ep50_ev5.bak',
# 'all_pp_mhead_moredbs_swpdb_d8_ep50_ev5',
# 'all_pp_mhead_vpn_moredbs_swpdb_d8_ep50_ev5.bak',
# 'all_pp_mhead_vpn_moredbs_swpdb_d8_ep50_ev5',
# 'all_pp_mhead_vpn_moredbs_swpdb_d2_ep50_ev5',
# 'all_pp_mhead_vpn_moredbs_swpdb_raystop_d8_ep50_ev5',
# 'all_pp_mhead_moredbs_swpdb_d2_ep50_ev5.bak',
# 'all_pp_mhead_moredbs_d2_ep50_ev5',
# 'all_fhd', 'all_pp_lowa', 'all_pp_mida', 'all_pp_largea',
# 'all_megvii_d8_ep20_ev2', 'all_megvii_d8_ep50_ev5', 'all_megvii_d2_ep20_ev2'
# 'megvii',
# 'pointpillars',
# 'mapillary',
]
names = [
# 'pp+10-contT',
# 'pp+0',
# 'pp+2',
# 'pp+4',
# 'pp+6',
# 'pp+8',
# 'pp+10',
# 'pp+0-NoT',
# 'pp+2-NoT',
# 'pp+4-NoT',
# 'pp+6-NoT',
# 'pp+8-NoT',
# 'pp+10-NoT',
'pp w/ aug',
'pp w/ aug d2',
'pp w/ swpaug old',
'pp w/ swpaug',
'pp w/ opn aug',
'pp w/ opn aug d2',
'pp w/ vpn swpaug old',
'pp w/ vpn swpaug',
'pp w/ vpn swpaug d2',
'pp w/ vpn swpaug raystop',
'pp w/ vpn swpaug cthruwall',
'pp w/ vpn swpaug learning',
'pp w/ vpn swpaug learning d2',
'pp w/ vpn swpaug learning d1',
# 'pp w/ swpaug d2 old',
# 'pp w/ aug d2',
# 'pp + early',
# 'pp + late',
# 'pp + late-opn',
# 'pp + occ-aug',
# 'pp + late-vfn',
# 'pp + vfn-vonly',
# 'pp w/ more aug',
# 'pp w/ more swpaug old',
# 'pp w/ more swpaug',
# 'pp w/ vpn more swpaug old',
# 'pp w/ vpn more swpaug',
# 'pp w/ vpn more swpaug d2',
# 'pp w/ vpn more swpaug raystop',
# 'pp w/ more swpaug d2 old',
# 'pp (1f)',
# 'pp + vfn-vonly (1f)',
# 'megvii',
# 'pointpillars',
# 'mapillary',
]
elif args.dataset == 'kitti':
methods = [
'all8_fhd', 'all8_fhd_gt_fgm'
]
cache = {}
# for method in os.listdir(base_dir):
for method in methods:
res_dir = f'{base_dir}/{method}/results'
if args.step == -1: # use the final checkpoint
all_steps = [
int(d.split('_')[1]) for d in os.listdir(res_dir)
]
step = max(all_steps)
else:
step = args.step
print(res_dir, step)
res_file = f'{res_dir}/step_{step}/metrics_summary.json'
if os.path.exists(res_file):
with open(res_file, 'r') as f:
summary = json.load(f)
cache[method] = summary
metric = args.metric
if args.detailed:
for cls in classes:
print('{:16}\t{:5}\t{:5}\t{:5}\t{:5}\t{:5}'.format(
cls, 0.5, 1.0, 2.0, 4.0, avg
))
# for method in cache:
for method in methods:
n = cache[method][metric][cls]
if metric == 'label_aps':
print('{:16}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.3f}'.format(
method, n['0.5'], n['1.0'], n['2.0'], n['4.0'], sum(n.values())/len(n)
))
delim = ''
print('{:16}\t'.format('method'), end=delim)
for cls in classes:
print('{:5}\t'.format(cls[:5]), end=delim)
print('{:5}'.format('avg'))
for name, method in zip(names, methods):
print('{:16}\t'.format(name), end=delim)
APs = []
for cls in classes:
n = cache[method][metric][cls]
AP = sum(n.values())/len(n)
APs.append(AP)
print('{:.3f}\t'.format(AP), end=delim)
mAP = sum(APs)/len(APs)
print('{:.3f}'.format(mAP))
|
449785
|
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
def log_synonym_existed(value):
logger.info("Collection already has a value '{}'. Skipped".format(value.encode('utf-8')))
def log_synonym_for_entity_does_not_exist(entity_value, end_type, raise_exception):
message = "'{s}' for end {e} does not exist in read-only SynonymsCollection".format(
s=entity_value,
e=end_type)
if raise_exception:
raise Exception(message)
else:
logger.info(message)
def log_opinion_already_exist(opinion, raise_exception, display_log):
message = "'{s}->{t}' already exists in collection".format(s=opinion.SourceValue,
t=opinion.TargetValue).encode('utf-8')
if raise_exception:
raise Exception(message)
elif display_log:
logger.info(message + ' [REJECTED]')
|
449794
|
import sys
import os
import importlib
import numpy as np
import torch
import torch.nn as nn
from core.classificationnet import ClassificationNet
from core.utils import build_backbone_info, build_imagedataloaders
from core.utils import save_checkpoint, load_checkpoint, save_val_record
from core.utils import build_optimizers, build_schedulers
from algorithms import train_epoch, test_epoch, ModelCheckpoint
def run_trainval(model, train_type, dataset, max_epoch, device, checkpoint_dir,
train_loader, val_loader, optimizers, schedulers, save_opt):
title_str = '== TRAINVAL {} on {} =='.format(train_type, dataset)
bound_str = '=' * len(title_str)
print(bound_str + '\n' + title_str + '\n' + bound_str)
print('Checkpoint Directory: {}'.format(checkpoint_dir))
output_dir, inner_chkpt = os.path.split(checkpoint_dir)
model_checkpoint = ModelCheckpoint(-1, checkpoint_dir, save_opt, max_epoch)
for epoch_idx in range(1, max_epoch+1):
train_loss, train_acc = train_epoch(
model, device, train_loader, optimizers, epoch_idx)
val_loss, val_acc = test_epoch(
model, device, val_loader, epoch_idx)
model_checkpoint(val_acc, epoch_idx, model)
schedulers.step()
return
def main(*args, **kwargs):
# ---------------------------------
# Loading the config
# ---------------------------------
config_module = importlib.import_module('configs.'+sys.argv[1])
args = config_module.args
print(args)
# ---------------------------------
# General settings
# ---------------------------------
device = 'cuda'
torch.manual_seed(args.rng_seed)
torch.cuda.manual_seed(args.rng_seed)
torch.cuda.manual_seed_all(args.rng_seed)
np.random.seed(args.rng_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
assert(args.train_type in ['baseline', 'finetune'])
assert(args.save_opt in ['best', 'last'])
# ---------------------------------
# Dataset settings
# ---------------------------------
image_size = args.image_size
batch_size = args.batch_size
padding = args.padding
transform_name = args.transform_name
# ---------------------------------
# Optimizer and Scheduler settings
#----------------------------------
param_types = args.param_types
max_epoch = args.max_epoch
optimizer_infos = args.optimizer_infos
scheduler_infos = args.scheduler_infos
# ---------------------------------
# Backbone settings
# ---------------------------------
backbone_info = build_backbone_info(args.backbone, 'standard', image_size)
# ---------------------------------
# Method settings
# ---------------------------------
experiment_dir = 'CHECKPOINTS/Individual/{}/{}/{}'.format(
args.exp_name, args.backbone, args.dataset)
if args.pretrain != '':
assert(args.train_type != 'baseline'), 'Cannot use pretrain in baseline train_type'
print('Load from the pretrained model!')
model, _ = load_checkpoint(args.pretrain)
else:
assert(args.train_type != 'finetune'), 'Cannot use finetune train_type without pretrain'
model = ClassificationNet(backbone_info, args.num_classes)
# ---------------------------------
# Build the parallel model
# ---------------------------------
model = nn.DataParallel(model.to(device))
# ---------------------------------
# Run trainval or evaluate
# ---------------------------------
# Build the train and validation dataloaders
train_loader, val_loader = build_imagedataloaders(
'trainval', os.path.join(args.exp_name, args.dataset), transform_name,
image_size, batch_size, padding, args.save_opt, args.workers)
# Get the checkpoint directory name
inner_chkpt = args.train_type + args.chkpt_postfix
checkpoint_dir = os.path.join(experiment_dir, inner_chkpt)
# Get the optimizers and schedulers
optimizers = build_optimizers(model.module, param_types, optimizer_infos)
schedulers = build_schedulers(optimizers, scheduler_infos)
# Run training and validation loops
run_trainval(
model, args.train_type, args.dataset, max_epoch, device, checkpoint_dir,
train_loader, val_loader, optimizers, schedulers, args.save_opt)
return
if __name__ == '__main__':
main()
|
449822
|
from .scroll_frame import ScrollFrame
from .label import Label
from .button import Button
from .box import Box
from .drop_target import DropTarget
from .seperator import Sep
from .widget import Widget
from .splitpane import SplitPane
from .checkbox import CheckBox
from .progressbar import ProgressBar
from .spinbox import SpinBox
from .sheet import Sheet
from .radiobutton import RadioButton
from .notebook import Notebook
from .calendar import CalendarWidget
from .imageviewer import ImageViewer
from .tree import TreeDisplay
from .htmlview import HTMLLabel, HTMLScrolledText, HTMLText
__all__ = [
"Widget", "SplitPane", "Label",
"Box", "ScrollFrame", "Button", "DropTarget", "CalendarWidget",
"Sep", "CheckBox", "ProgressBar", "SpinBox", "Sheet", "RadioButton", "Notebook",
"ImageViewer", "TreeDisplay", "HTMLLabel", "HTMLScrolledText", "HTMLText"
]
try:
from .input import Input, FileInput
from .combobox import ComboBox
from .textarea import TextArea
from .scale import Scale
__all__ = __all__ + [ "FileInput", "Input", "ComboBox", "TextArea", "Scale" ]
except ImportError:
# ttkwidgets not installed
pass
|
449825
|
import asyncio
import os
from FIREX.utils import admin_cmd
from userbot import ALIVE_NAME, bot
from userbot.cmdhelp import CmdHelp
eviral = str(ALIVE_NAME) if ALIVE_NAME else "Du"
running_processes: dict = {}
@bot.on(admin_cmd(pattern="term(?: |$|\n)([\s\S]*)"))
async def dc(event):
await event.edit(f"{eviral}: Running Terminal.....")
message = str(event.chat_id) + ":" + str(event.message.id)
if running_processes.get(message, False):
await event.edit("A process for this event is already running!")
return
cmd = event.pattern_match.group(1).strip()
if not cmd:
await event.edit(" Give a command or use .help terminal.")
return
if cmd in ("userbot.session", "env", "printenv"):
return await event.edit(f"{eviral}: Privacy Error, This command not permitted")
process = await asyncio.create_subprocess_shell(
cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
)
running_processes.update({message: process})
stdout, stderr = await process.communicate()
not_killed = running_processes.get(message, False)
if not_killed:
del running_processes[message]
text = f"Terminal Command: {cmd}\nReturn code: {process.returncode}\n\n"
if stdout:
text += "\n[stdout]\n" + stdout.decode("UTF-8").strip() + "\n"
if stderr:
text += "\n[stderr]\n" + stderr.decode("UTF-8").strip() + "\n"
if stdout or stderr:
if not len(text) > 4096:
return await event.edit(text)
output = open("term.txt", "w+")
output.write(text)
output.close()
await event.client.send_file(
event.chat_id,
"term.txt",
reply_to=event.id,
caption=f"{eviral}: Output too large, sending as file",
)
os.remove("term.txt")
return
from userbot.cmdhelp import CmdHelp
CmdHelp("terminal").add_command("term", None, "Reply to python file").add()
|
449868
|
import FWCore.ParameterSet.Config as cms
lsNumberFilter = cms.EDFilter("LSNumberFilter",
minLS = cms.untracked.uint32(21)
)
|
449899
|
from math import factorial
while True:
try:
num = int(input('Enter a positive integer: '))
print(f'{num}! = {factorial(num)}')
break
except ValueError:
print('Not a positive integer, try again')
|
449907
|
from migen import *
from migen.build.generic_platform import *
from migen.build.platforms.versaecp55g import Platform
from migen.genlib.io import CRG
from migen.genlib.cdc import MultiReg
from microscope import *
from ..gateware.platform.lattice_ecp5 import *
from ..gateware.serdes import *
from ..gateware.phy import *
from ..vendor.pads import *
from ..vendor.uart import *
class LTSSMTestbench(Module):
def __init__(self, **kwargs):
self.platform = Platform(**kwargs)
self.platform.add_extension([
("tp0", 0, Pins("X3:5"), IOStandard("LVCMOS33")),
])
self.clock_domains.cd_serdes = ClockDomain()
self.submodules.serdes = serdes = \
LatticeECP5PCIeSERDES(self.platform.request("pcie_x1"))
self.comb += [
self.cd_serdes.clk.eq(serdes.rx_clk_o),
serdes.rx_clk_i.eq(self.cd_serdes.clk),
serdes.tx_clk_i.eq(self.cd_serdes.clk),
]
with open("top.sdc", "w") as f:
f.write("define_clock -name {n:serdes_ref_clk} -freq 100.000\n")
f.write("define_clock -name {n:serdes_rx_clk_o} -freq 150.000\n")
self.platform.add_source("top.sdc")
# self.platform.add_platform_command("""FREQUENCY NET "serdes_ref_clk" 100 MHz;""")
# self.platform.add_platform_command("""FREQUENCY NET "serdes_rx_clk_o" 125 MHz;""")
self.submodules.aligner = aligner = \
ClockDomainsRenamer("rx")(PCIeSERDESAligner(serdes.lane))
self.submodules.phy = phy = \
ClockDomainsRenamer("rx")(PCIePHY(aligner, ms_cyc=125000))
led_att1 = self.platform.request("user_led")
led_att2 = self.platform.request("user_led")
led_sta1 = self.platform.request("user_led")
led_sta2 = self.platform.request("user_led")
led_err1 = self.platform.request("user_led")
led_err2 = self.platform.request("user_led")
led_err3 = self.platform.request("user_led")
led_err4 = self.platform.request("user_led")
self.comb += [
led_att1.eq(~(phy.rx.ts.link.valid)),
led_att2.eq(~(phy.rx.ts.lane.valid)),
led_sta1.eq(~(phy.rx.ts.valid)),
led_sta2.eq(~(0)),
led_err1.eq(~(~serdes.lane.rx_present)),
led_err2.eq(~(~serdes.lane.rx_locked)),
led_err3.eq(~(~serdes.lane.rx_aligned)),
led_err4.eq(~(phy.rx.error)),
]
tp0 = self.platform.request("tp0")
self.comb += tp0.eq(phy.rx.ts.link.valid)
uart_pads = Pads(self.platform.request("serial"))
self.submodules += uart_pads
self.submodules.uart = uart = ClockDomainsRenamer("rx")(
UART(uart_pads, bit_cyc=uart_bit_cyc(125e6, 115200)[0])
)
self.comb += [
uart.rx_ack.eq(uart.rx_rdy),
]
index = Signal(max=phy.ltssm_log.depth)
offset = Signal(8)
size = Signal(16)
entry = Signal(phy.ltssm_log.width)
self.comb += [
size.eq(phy.ltssm_log.width * phy.ltssm_log.depth // 8),
entry.eq(Cat(phy.ltssm_log.data_o, phy.ltssm_log.time_o)),
]
self.submodules.uart_fsm = ClockDomainsRenamer("rx")(FSM())
self.uart_fsm.act("WAIT",
NextValue(uart.tx_ack, 0),
If(uart.rx_rdy,
NextValue(phy.ltssm_log.trigger, 1),
NextValue(offset, 1),
NextState("WIDTH")
)
)
self.uart_fsm.act("WIDTH",
NextValue(uart.tx_ack, 0),
If(uart.tx_rdy & ~uart.tx_ack,
NextValue(uart.tx_data, size.part(offset << 3, 8)),
NextValue(uart.tx_ack, 1),
If(offset == 0,
NextValue(offset, phy.ltssm_log.width // 8 - 1),
NextValue(index, phy.ltssm_log.depth - 1),
NextState("DATA")
).Else(
NextValue(offset, offset - 1)
)
)
)
self.uart_fsm.act("DATA",
NextValue(uart.tx_ack, 0),
If(uart.tx_rdy & ~uart.tx_ack,
NextValue(uart.tx_data, entry.part(offset << 3, 8)),
NextValue(uart.tx_ack, 1),
If(offset == 0,
phy.ltssm_log.next.eq(1),
NextValue(offset, phy.ltssm_log.width // 8 - 1),
If(index == 0,
NextValue(phy.ltssm_log.trigger, 0),
NextState("WAIT")
).Else(
NextValue(index, index - 1)
)
).Else(
NextValue(offset, offset - 1)
)
)
)
# -------------------------------------------------------------------------------------------------
import sys
import serial
import struct
import subprocess
if __name__ == "__main__":
for arg in sys.argv[1:]:
if arg == "build":
toolchain = "diamond"
if toolchain == "trellis":
toolchain_path = "/usr/local/share/trellis"
elif toolchain == "diamond":
toolchain_path = "/usr/local/diamond/3.10_x64/bin/lin64"
design = LTSSMTestbench(toolchain=toolchain)
design.platform.build(design, toolchain_path=toolchain_path)
if arg == "load":
subprocess.call(["/home/whitequark/Projects/prjtrellis/tools/bit_to_svf.py",
"build/top.bit",
"build/top.svf"])
subprocess.call(["openocd",
"-f", "/home/whitequark/Projects/"
"prjtrellis/misc/openocd/ecp5-versa5g.cfg",
"-c", "init; svf -quiet build/top.svf; exit"])
if arg == "sample":
design = LTSSMTestbench()
design.finalize()
port = serial.Serial(port='/dev/ttyUSB1', baudrate=115200)
port.write(b"\x00")
length, = struct.unpack(">H", port.read(2))
data = port.read(length)
offset = 0
start = None
while offset < len(data):
time, state = struct.unpack_from(">LB", data, offset)
offset += struct.calcsize(">LB")
if start is not None:
delta = time - start
else:
delta = 0
print("%+10d cyc (%+10d us): %s" %
(delta, delta / 125, design.phy.ltssm.decoding[state]))
start = time
|
449965
|
import socket
def link_udp_client():
socket_client = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
content = b"i am student"
addr = ("127.0.0.1",10002)
socket_client.sendto(content,addr)
socket_client.close()
if __name__ == '__main__':
link_udp_client()
|
449980
|
from __future__ import print_function
from easydict import EasyDict as edict
from lib.cfgs import c as dcfgs
import lib.cfgs as cfgs
import os
os.environ['JOBLIB_TEMP_FOLDER']=dcfgs.shm
import argparse
os.environ['GLOG_minloglevel'] = '3'
import os.path as osp
import pickle
import sys
from multiprocessing import Process, Queue
import matplotlib.pyplot as plt
import numpy as np
from IPython import embed
from lib.decompose import *
from lib.net import Net, load_layer, caffe_test
from lib.utils import *
from lib.worker import Worker
import google.protobuf.text_format # added to fix missing protobuf properties -by Mario
sys.path.insert(0, osp.dirname(__file__)+'/lib')
def step0(pt, model):
net = Net(pt, model=model, noTF=1) # lib/net.Net instantiate the NetBuilder -by Mario
WPQ, pt, model = net.preprocess_resnet() # WPQ stores pruned values, which will be later saved to the caffemodel -by Mario
return {"WPQ": WPQ, "pt": pt, "model": model}
def step1(pt, model, WPQ, check_exist=False):
print(pt)
net = Net(pt, model, noTF=1)
model = net.finalmodel(WPQ) # loads weights into the caffemodel - by Mario
if 1:#TODO: Consider adding a configuration paramter to cfgs.py in order to control whether or not to prune the last conv layer -by Mario
convs = net.convs
redprint("including last conv layer!")
else:
convs = net.convs[:-1]
redprint("ignoring last conv layer!")
if dcfgs.dic.option == 1:
if DEBUG: redprint("This line executed because dcfgs.dic.option is set to 1 [train.step1()]")
sums = net.type2names('Eltwise')[:-1]
newsums = []
for i in sums:
if not i.endswith('block8_sum'):
newsums.append(i)
newconvs = []
for i in convs:
if i.endswith('_proj'):
newconvs.insert(0,i)
else:
newconvs.append(i)
convs = newsums + newconvs
else:
convs += net.type2names('Eltwise')[:-1] # I guess Element-wise operations are included in ResNet or Xception -by Mario
if dcfgs.dic.fitfc:
convs += net.type2names('InnerProduct')
if dcfgs.model in [cfgs.Models.xception,cfgs.Models.resnet]:
for i in net.bns:
if 'branch1' in i:
convs += [i]
net.freeze_images(check_exist=check_exist, convs=convs)
return {"model":model}
def combine():
net = Net(dcfgs.prototxt, dcfgs.weights)
net.combineHP()
def c3(pt=cfgs.vgg.model,model=cfgs.vgg.weights): # TODO: Consider changing cfgs.vgg.model and cfgs.vgg.weights (paths to the .prototxt and .caffemodel files) for a generic model reference -by Mario
dcfgs.splitconvrelu=True
cfgs.accname='accuracy@5' # name of layer in the prototxt -by Mario
def solve(pt, model):
net = Net(pt, model=model)
net.load_frozen() # this method can load images from memory if we pass a feats_dic. For what? -by Mario
WPQ, new_pt = net.R3()
return {"WPQ": WPQ, "new_pt": new_pt}
def stepend(new_pt, model, WPQ):
net = Net(new_pt, model=model)
net.WPQ = WPQ
net.finalmodel(save=False) # load weights into the caffemodel -by Mario
net.dis_memory()
#final = net.finalmodel(WPQ, prefix='3r')
new_pt, new_model = net.save(prefix='3c')
print('caffe test -model',new_pt, '-weights',new_model)
return {"final": None}
worker = Worker()
outputs = worker.do(step0, pt=pt, model=model)
printstage("freeze")
pt = outputs['pt']
outputs = worker.do(step1,**outputs)
printstage("speed", dcfgs.dic.keep)
outputs['pt'] = mem_pt(pt)
if 0:
outputs = solve(**outputs)
else:
outputs = worker.do(solve, **outputs)
printstage("saving")
outputs = worker.do(stepend, model=model, **outputs)
def splitrelu():
net = Net(dcfgs.prototxt, model=dcfgs.weights)
print(net.seperateConvReLU())
def addbn(pt='../resnet-cifar10-caffe/resnet-56/prb_mem_bn_trainval.prototxt', model="../resnet-cifar10-caffe/resnet-56/snapshot/prb_VH_bn__iter_64000.caffemodel"):
""" Restore BatchNorm for finetuning
"""
worker=Worker()
def ad(pt, model):
net = Net(pt, model=model, noTF=1)
#net.computation()
pt, WPQ = net.add_bn()
return {'new_pt': pt, 'model':model, 'WPQ':WPQ}
outs = worker.do(ad, pt=pt, model=model)
worker.do(stepend, **outs)
#stepend(**outs)
def compute(pt='../resnet-cifar10-caffe/resnet-56/trainval.prototxt', model="../resnet-cifar10-caffe/resnet-56/snapshot/_iter_64000.caffemodel"):
net = Net(pt, model=model, noTF=1)
net.computation()
def parse_args():
parser = argparse.ArgumentParser("experiment")
parser.add_argument('-tf', dest='tf_vis', help='tf devices', default=None, type=str)
parser.add_argument('-caffe', dest='caffe_vis', help='caffe devices', default=None, type=str)
parser.add_argument('-action', dest='action', help='action', default='train', type=str)
attrs = ['dic', 'an', 'res']
for d in attrs:
for i in dcfgs[d]:
parser.add_argument('-'+d+'.'+i, dest=d+'DOT'+i, help=d+'.'+i, default=None,type=str)
for i in dcfgs:
if i not in attrs:
parser.add_argument('-'+i, dest=i, help=i, default=None,type=str)
args = parser.parse_args()
if args.tf_vis is not None: cfgs.tf_vis = args.tf_vis
if args.caffe_vis is not None: cfgs.caffe_vis = args.caffe_vis
for d in attrs:
for i in dcfgs[d]:
att = getattr(args, d+'DOT'+i)
if att is not None:
if 0:
print(d,i, att)
dcfgs[d][i]=type(dcfgs[d][i])(att)
for i in dcfgs:
if i in attrs:
continue
att = getattr(args, i)
if att is not None:
dcfgs[i]=type(dcfgs[i])(att)
dcfgs.Action = args.action
if args.model is not None:
netmodel = getattr(cfgs, args.model)
cfgs.accname = netmodel.accname
if args.prototxt is None:
dcfgs.prototxt = netmodel.model
if args.weights is None:
dcfgs.weights = netmodel.weights
return args
if __name__ == '__main__':
args = parse_args()
cfgs.set_nBatches(dcfgs.nBatches)
dcfgs.dic.option=1
DEBUG = 0
if args.action == cfgs.Action.addbn:
addbn(pt=dcfgs.prototxt, model=dcfgs.weights)
elif args.action == cfgs.Action.splitrelu:
splitrelu()
elif args.action == cfgs.Action.c3:
c3()
elif args.action == cfgs.Action.combine:
combine()
else:
pass
|
449986
|
import math
import os
import numpy as np
from monai.data import Dataset, SmartCacheDataset
from skimage.transform import resize
from image_reader import WSIReader
class PatchWSIDataset(Dataset):
"""
Load whole slide images and associated class labels and create patches
"""
def __init__(self, data, region_size, grid_size, patch_size, image_reader_name="CuImage", transform=None):
if type(region_size) == int:
self.region_size = (region_size, region_size)
else:
self.region_size = region_size
if type(grid_size) == int:
self.grid_size = (grid_size, grid_size)
else:
self.grid_size = grid_size
self.sub_region_size = (self.region_size[0] / self.grid_size[0], self.region_size[1] / self.grid_size[1])
self.patch_size = patch_size
self.transform = transform
self.image_base_path = data[0]["image"]
self.samples = self.load_samples(data[0]["label"])
self.image_path_list = {x[0] for x in self.samples}
self.num_samples = len(self.samples)
self.image_reader_name = image_reader_name
self.image_reader = WSIReader(image_reader_name)
self.cu_image_dict = {}
self._fetch_cu_images()
def _fetch_cu_images(self):
for image_path in self.image_path_list:
self.cu_image_dict[image_path] = self.image_reader.read(image_path)
def process_label_row(self, row):
row = row.strip("\n").split(",")
# create full image path
image_name = row[0] + ".tif"
image_path = os.path.join(self.image_base_path, image_name)
# change center locations to upper left location
location = (int(row[1]) - self.region_size[0] // 2, int(row[2]) - self.region_size[1] // 2)
# convert labels to float32 and add empty HxW channel to label
labels = tuple(int(lbl) for lbl in row[3:])
labels = np.array(labels, dtype=np.float32)[:, np.newaxis, np.newaxis]
return image_path, location, labels
def load_samples(self, loc_path):
with open(loc_path) as label_file:
rows = [self.process_label_row(row) for row in label_file.readlines()]
return rows
def __len__(self):
return self.num_samples
def __getitem__(self, index):
image_path, location, labels = self.samples[index]
if self.image_reader_name == 'openslide':
img_obj = self.image_reader.read(image_path)
else:
img_obj = self.cu_image_dict[image_path]
images = self.image_reader.get_data(
img_obj=img_obj,
location=location,
size=self.region_size,
grid_shape=self.grid_size,
patch_size=self.patch_size,
)
samples = [{"image": images[i], "label": labels[i]} for i in range(labels.shape[0])]
if self.transform:
samples = self.transform(samples)
return samples
class SmartCachePatchWSIDataset(SmartCacheDataset):
"""
Add SmartCache functionality to PatchWSIDataset
"""
def __init__(
self,
data,
region_size,
grid_size,
patch_size,
transform,
replace_rate,
cache_num,
cache_rate=1.0,
num_init_workers=None,
num_replace_workers=0,
image_reader_name="CuImage",
):
extractor = PatchWSIDataset(data, region_size, grid_size, patch_size, image_reader_name)
super().__init__(
data=extractor,
transform=transform,
replace_rate=replace_rate,
cache_num=cache_num,
cache_rate=cache_rate,
num_init_workers=num_init_workers,
num_replace_workers=num_replace_workers,
)
class SlidingWindowWSIDataset(Dataset):
"""
Load image patches in a sliding window manner with foreground mask
Parameters include image and mask paths, and patch_size
Output will be at same level as the foreground mask
"""
def __init__(self, data, patch_size, image_reader_name="CuImage", transform=None):
if type(patch_size) == int:
self.patch_size = (patch_size, patch_size)
else:
self.patch_size = patch_size
self.image_reader = WSIReader(image_reader_name)
self.down_ratio = int(np.ceil(self.patch_size[0] / 32) - 6)
self.transform = transform
self.coords = []
self.info = {}
for wsi_sample in data:
image_name, img, num_idx, x_idx, y_idx, level, ratio, mask_dims, image_dims = self._preprocess(wsi_sample)
self.info[image_name] = {
"img": img,
"mask_dims": mask_dims,
"image_dims": image_dims,
"num_idx": num_idx,
"level": level,
"ratio": ratio,
"counter": 0,
}
coords = zip([image_name] * num_idx, x_idx, y_idx)
self.coords.extend(coords)
self.total_n_patches = len(self.coords)
def _preprocess(self, sample):
image_path = sample["image"]
mask_path = sample["label"]
image_name = os.path.splitext(os.path.basename(image_path))[0]
img = self.image_reader.read(image_path)
msk = np.load(mask_path)
dim_y_img, dim_x_img, _ = img.shape
dim_x_msk, dim_y_msk = msk.shape
ratio_x = dim_x_img / dim_x_msk
ratio_y = dim_y_img / dim_y_msk
level_x = math.log2(ratio_x)
if ratio_x != ratio_y:
raise Exception(
"{}: Image/Mask dimension does not match ,"
" dim_x_img / dim_x_msk : {} / {},"
" dim_y_img / dim_y_msk : {} / {}".format(image_name, dim_x_img, dim_x_msk, dim_y_img, dim_y_msk)
)
else:
if not level_x.is_integer():
raise Exception(
"{}: Mask not at regular level (ratio not power of 2),"
" image / mask ratio: {},".format(image_name, ratio_x)
)
else:
ratio = ratio_x
level = level_x
print("{}: Mask at level {}, with ratio {}".format(image_name, int(level), int(ratio)))
print("Downsample ratio {}".format(self.down_ratio))
msk_down = resize(msk, (int(dim_x_msk / self.down_ratio), int(dim_y_msk / self.down_ratio)))
# get all indices for tissue region from the foreground mask
x_idx, y_idx = np.where(msk_down)
# output same size as the foreground mask
# attention: not original wsi image size
self.x_idx = x_idx * self.down_ratio
self.y_idx = y_idx * self.down_ratio
num_idx = len(x_idx)
return image_name, img, num_idx, x_idx, y_idx, level, ratio, (dim_x_msk, dim_y_msk), (dim_x_img, dim_y_img)
def _load_sample(self, index):
"""
Load patch for sliding window inference on WSI
Read ROI with patch_size at patch_loc into a dictionary of {'image': array, "name": str}.
"""
name, x_msk, y_msk = self.coords[index]
ratio = self.info[name]["ratio"]
# convert to image space
x_img = int((x_msk + 0.5) * ratio - self.patch_size[0] / 2)
y_img = int((y_msk + 0.5) * ratio - self.patch_size[1] / 2)
location = (x_img, y_img)
image = self.image_reader.get_data(img_obj=self.info[name]["img"], location=location, size=self.patch_size)
sample = {"image": image, "name": name, "location": (x_msk, y_msk), "ratio": ratio}
return sample
def __len__(self):
return self.total_n_patches
def __getitem__(self, index):
sample = self._load_sample(index)
if self.transform:
sample = self.transform(sample)
return sample
|
450011
|
import time
#时间戳 #计算
# print(time.time()) #1481321748.481654秒
#结构化时间---当地时间
# print(time.localtime(1531242343))
# t=time.localtime()
# print(t.tm_year)
# print(t.tm_wday)
# #-----#结构化时间---UTC
# print(time.gmtime())
#-----将结构化时间转换成时间戳
# print(time.mktime(time.localtime()))
#------将结构化时间转成字符串时间strftime
#print(time.strftime("%Y---%m-%d %X",time.localtime()))
#------将字符串时间转成结构化时间strptime
#print(time.strptime("2016:12:24:17:50:36","%Y:%m:%d:%X"))
# print(time.asctime())
# print(time.ctime())
# import datetime
# print(datetime.datetime.now())
|
450024
|
from typing import Union
import coloredlogs
# Note: be sure to call this before importing any application modules!
def configure_logging(level: Union[int, str]): # pragma: no cover
coloredlogs.install(
level=level,
fmt="[%(asctime)s][%(name)s][%(levelname)s] - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
field_styles={
"asctime": {"color": "cyan"},
"hostname": {"color": "magenta"},
"levelname": {"bold": True, "color": "black"},
"name": {"color": "blue"},
"programname": {"color": "cyan"},
"username": {"color": "yellow"},
},
level_styles={
"debug": {"color": "magenta"},
"info": {"color": "green"},
"warning": {"color": "yellow"},
"error": {"color": "red"},
"critical": {"color": "red"},
},
)
|
450091
|
from decimal import Decimal
from datetime import timedelta
from time import sleep
from math import log10
import logging
from requests.packages.urllib3.exceptions import ProtocolError
from .base import OandaBrokerBase
from ..lib.oandapy import OandaError
from ..portfolio import Position
log = logging.getLogger('pyFx')
class OandaRealtimeBroker(OandaBrokerBase):
def __init__(self, api, account_id):
super(OandaRealtimeBroker, self).__init__(api)
self._account_id = account_id
self.last_transaction_id = None
def get_account_balance(self):
ret = self._api.get_account(self._account_id)
if 'balance' in ret:
return ret['balance']
return False
def get_price(self, instrument):
params = {
'instruments': str(instrument),
}
ret = self._api.get_prices(**params)
if ret and 'prices' in ret and len(ret['prices']) > 0:
return ret['prices'][0]
return None
def open_order(self, instrument, units, side, order_type,
price=None, expiry=None, stop_loss=None, take_profit=None):
params = {
'instrument': str(instrument),
'units': units,
'side': side,
'type': order_type,
}
log.debug("[{}] Broker received open order.".format(instrument))
# Available order_type's are:
# 'limit', 'stop', 'marketIfTouched' or 'market'.
if order_type in ['limit', 'stop', 'marketIfTouched']:
if not price:
raise ValueError(
"Price is required for OrderType {0:s}".format(order_type))
if not expiry:
expiry = (self._tick + timedelta(seconds=300)).isoformat()
# raise ValueError("Expiration time is required for OrderType {0:s}".format(order_type))
params['expiry'] = expiry
if price:
prec = abs(int(log10(float(instrument.pip))))
new_price = round(price, prec)
params['price'] = new_price
#if stop_loss:
# params['stopLoss'] = stop_loss
if take_profit:
params['takeProfit'] = take_profit
#else:
# if price:
# stop_loss_pips = (60 * float(instrument.pip))
# if side == 'buy':
# stop_loss_price = new_price - stop_loss_pips
# else:
# stop_loss_price = new_price + stop_loss_pips
# params['stopLoss'] = stop_loss_price
#print params
ret = None
for _ in range(3):
try:
ret = self._api.create_order(self._account_id, **params)
if ret:
break
except OandaError as e:
print "[!] Error while creating {} oder: {}. Retrying...".format(instrument, e)
if not ret:
return None
ret_detail = ret.get('orderOpened', None)
if not ret_detail:
ret_detail = ret.get('tradeOpened', None)
print ret, ret_detail
if ret and 'price' in ret:
if ret_detail and 'id' in ret_detail:
pos = Position(
side=side,
instrument=instrument,
open_price=Decimal(ret['price']),
open_time=ret['time'],
order_id=ret_detail['id'],
order_type=order_type,
)
return pos
return None
def close_trade(self, position):
# This method will block the rest, but it's important
# that trades get closed immediately
# while True:
for _ in range(3):
try:
ret = self._api.close_trade(
self._account_id,
position.transaction_id
)
if all(k in ret for k in ('id', 'price', 'time', 'profit')):
position.close_price = ret['price']
position.profit_cash = ret['profit']
position.close_time = ret['time']
return position
# TODO Check transactions
else:
pass
# TODO What if transaction was closed by broker?
# TODO Handle Oanda exceptions properly
except (ProtocolError, OandaError) as e:
print "[!] Error during CLOSE action ({}). Trying again...".format(
e)
sleep(3)
return position
return False
def delete_pending_order(self, position):
try:
ret = self._api.close_order(
account_id=self._account_id,
order_id=position.order_id
)
except OandaError as e:
print "[!] OandaError {}: {}".format(e.errno, e.strerror)
return True
def sync_transactions(self, position):
# TODO Refactor this just to use api.get_transactions()
# 1. Check if order is still PENDING
# Since OANDA handles limit/market and stoploss/takeprofit orders
# differently, we've to check both API endpoints.
ret = None
try:
if position.order_type in ['limit', 'market', 'marketIfTouched']:
ret = self._api.get_order(
account_id=self._account_id,
order_id=position.order_id)
# TODO Handle 'takeprofit' properly in Portfolio Class
if position.order_type in ['stop', 'takeprofit']:
ret = self._api.get_trade(
account_id=self._account_id,
trade_id=position.order_id)
except OandaError as e:
print e
pass
if ret and 'id' in ret:
return "PENDING"
# 2. Since no order/trade was found, check if it was executed
ret = self._api.get_transaction_history(
account_id=self._account_id)
if ret and "transactions" in ret:
for trans in ret['transactions']:
transaction_id = trans.get('id', None)
transaction_type = trans.get('type', None)
transaction_price = trans.get('price', None)
transaction_stoploss = trans.get('stopLossPrice', None)
order_id = trans.get('orderId', None)
if position.order_type == 'market':
if transaction_id == position.order_id:
if transaction_type in ['MARKET_ORDER_CREATE',]:
position.open_price = transaction_price
position.transaction_id = transaction_id
position.stop_loss = transaction_stoploss
return "CONFIRMED"
else:
if order_id and order_id == position.order_id:
if transaction_type in ['ORDER_FILLED',
'STOP_LOSS_FILLED',
'TAKE_PROFIT_FILLED',
'TRAILING_STOP_FILLED']:
if transaction_id:
position.open_price = transaction_price
position.transaction_id = transaction_id
position.stop_loss = transaction_stoploss
return "CONFIRMED"
# 3. Lastly, if ID isn't showing up anymore, return the info
return "NOTFOUND"
|
450125
|
from node.behaviors import BoundContext
from node.interfaces import IBoundContext
from node.tests import NodeTestCase
from plumber import plumbing
from zope.interface import implementer
from zope.interface import Interface
class IBoundInterface(Interface):
pass
class BoundClass(object):
pass
@plumbing(BoundContext)
class ContextAware(object):
@classmethod
def unbind_context(cls):
cls.__bound_context_interfaces__ = ()
cls.__bound_context_classes__ = ()
class TestContext(NodeTestCase):
def test_BoundContext_bind_context(self):
self.assertEqual(ContextAware.__bound_context_interfaces__, ())
self.assertEqual(ContextAware.__bound_context_classes__, ())
ContextAware.bind_context(None)
self.assertEqual(ContextAware.__bound_context_interfaces__, ())
self.assertEqual(ContextAware.__bound_context_classes__, ())
ca = ContextAware()
self.assertTrue(IBoundContext.providedBy(ca))
ContextAware.bind_context(IBoundInterface)
self.assertEqual(
ContextAware.__bound_context_interfaces__,
(IBoundInterface,)
)
self.assertEqual(ContextAware.__bound_context_classes__, ())
ContextAware.unbind_context()
ContextAware.bind_context(BoundClass)
self.assertEqual(ContextAware.__bound_context_interfaces__, ())
self.assertEqual(
ContextAware.__bound_context_classes__,
(BoundClass,)
)
ContextAware.unbind_context()
ContextAware.bind_context(IBoundInterface, BoundClass)
self.assertEqual(
ContextAware.__bound_context_interfaces__,
(IBoundInterface,)
)
self.assertEqual(
ContextAware.__bound_context_classes__,
(BoundClass,)
)
with self.assertRaises(RuntimeError):
ContextAware.bind_context(object)
ContextAware.unbind_context()
with self.assertRaises(ValueError):
ContextAware.bind_context(lambda: 1)
def test_BoundContext_context_matches(self):
@implementer(IBoundInterface)
class BoundInterface(object):
pass
ContextAware.unbind_context()
inst = ContextAware()
self.assertTrue(inst.context_matches(object()))
ContextAware.bind_context(BoundClass)
inst = ContextAware()
self.assertFalse(inst.context_matches(object()))
self.assertTrue(inst.context_matches(BoundClass()))
ContextAware.unbind_context()
ContextAware.bind_context(IBoundInterface)
inst = ContextAware()
self.assertFalse(inst.context_matches(object()))
self.assertTrue(inst.context_matches(BoundInterface()))
ContextAware.unbind_context()
ContextAware.bind_context(IBoundInterface, BoundClass)
inst = ContextAware()
self.assertFalse(inst.context_matches(object()))
self.assertTrue(inst.context_matches(BoundClass()))
self.assertTrue(inst.context_matches(BoundInterface()))
|
450129
|
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from wagtail_wordpress_import.importers.wordpress import WordpressImporter
from wagtail_wordpress_import.logger import Logger
LOG_DIR = "log"
class Command(BaseCommand):
help = """Run the import process on all items in the XML file and make
them child pages of a specific page."""
"""
./manage.py import_xml path/to/xml/file.xml parent_page_id [--app] [--model] [--type] [--status]
The default is:
Import all `post` and `page` types of status `draft` and `publish` as children
of the page with id of parent_id
"""
def add_arguments(self, parser):
parser.add_argument("xml_file", type=str, help="The full path to your xml file")
parser.add_argument(
"parent_id",
type=int,
help="The page ID of the parent page to use when creating imported pages",
)
parser.add_argument(
"-a",
"--app",
type=str,
help="The app which contains your page models for the import",
default="pages",
)
parser.add_argument(
"-m",
"--model",
type=str,
help="The page model to use for the imported pages",
default="PostPage",
)
parser.add_argument(
"-t",
"--type",
type=str,
help="The wordpress post type/s to import. Use a comma to separate multiple types",
default="page,post",
)
parser.add_argument(
"-s",
"--status",
type=str,
help="The wordpress post statuse/s to import. Use a comma to separate multiple types",
default="publish,draft",
)
def handle(self, **options):
if not getattr(settings, "WAGTAIL_WORDPRESS_IMPORTER_SOURCE_DOMAIN", ""):
self.stdout.write(
self.style.ERROR(
"WAGTAIL_WORDPRESS_IMPORTER_SOURCE_DOMAIN: is missing in your site settings"
)
)
exit()
xml_file_path = self.get_xml_file(f"{options['xml_file']}")
logger = Logger(LOG_DIR)
importer = WordpressImporter(xml_file_path)
importer.run(
page_types=options["type"].split(","),
page_statuses=options["status"].split(","),
app_for_pages=options["app"],
model_for_pages=options["model"],
parent_id=options["parent_id"],
logger=logger,
)
logger.output_import_summary()
logger.save_csv_import_report()
logger.save_csv_images_report()
logger.save_csv_pagelink_errors_report()
def get_xml_file(self, xml_file):
if os.path.exists(xml_file):
return xml_file
self.stdout.write(
self.style.ERROR(f"The xml file cannot be found at: {xml_file}")
)
exit()
|
450135
|
from streamlink.plugins.streamable import Streamable
from tests.plugins import PluginCanHandleUrl
class TestPluginCanHandleUrlStreamable(PluginCanHandleUrl):
__plugin__ = Streamable
should_match = [
'https://streamable.com/example',
]
|
450136
|
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="imax",
version="0.0.1-beta4",
author="<NAME>",
author_email="<EMAIL>",
description="Image augmentation library for Jax",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/4rtemi5/imax",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
python_requires=">=3.6",
install_requires=[
"jax",
"jaxlib",
],
)
|
450167
|
from arjuna import *
@test
def check_ext_dep_dir(request):
print(C("project.root.dir"))
print(C("deps.dir"))
from sample.mod import hello
hello()
|
450179
|
from .markdowntoc.autorunner import AutoRunner
from .markdowntoc.markdowntoc_insert import MarkdowntocInsert
from .markdowntoc.markdowntoc_update import MarkdowntocUpdate
|
450216
|
def _dump_llvm(f, output_func):
d = f.inspect_llvm()
if isinstance(d, dict):
for ty, code in d.items():
output_func("\n===== {}\n{}".format(ty, code))
else:
output_func("\n" + d)
def dump_numba_llvm(func):
out = print
module = __import__(func.__module__)
if hasattr(module, "_logger"):
out = module._logger.debug
if hasattr(func, "inspect_llvm"):
_dump_llvm(func, output_func=out)
|
450260
|
from distutils.core import setup
setup(
name='filesearch',
version='',
packages=['', 'enjarify', 'enjarify.jvm', 'enjarify.jvm.constants', 'enjarify.jvm.optimization',
'enjarify.typeinference'],
url='',
license='',
author='<NAME>',
author_email='',
description=''
)
|
450266
|
import re
import sys
import os
from typing import Optional, List
from deca.errors import *
from deca.db_processor import VfsProcessor, vfs_structure_new, vfs_structure_open, vfs_structure_empty, VfsNode
from deca.db_view import VfsView
from deca.builder import Builder
from deca.util import Logger, to_unicode
from deca.cmds.tool_make_web_map import ToolMakeWebMap
from deca.export_import import \
nodes_export_raw, nodes_export_contents, nodes_export_processed, nodes_export_gltf, nodes_export_map
from .main_window import Ui_MainWindow
from .deca_interfaces import IVfsViewSrc
from .vfsdirwidget import VfsDirWidget
from .vfsnodetablewidget import VfsNodeTableWidget
from PySide2.QtCore import Slot, QUrl, Signal, QEvent
from PySide2.QtWidgets import QApplication, QMainWindow, QMessageBox, QFileDialog, QStyle
from PySide2.QtGui import QDesktopServices, QKeyEvent
window_title = 'decaGUI: v0.2.13'
class MainWindowDataSource(IVfsViewSrc):
def __init__(self, main_window, *args, **kwargs):
super().__init__(*args, **kwargs)
self.main_window: MainWindow = main_window
def vfs_get(self):
return self.main_window.vfs
def vfs_view_get(self):
return self.main_window.vfs_view_current()
def archive_open(self, selection):
return self.main_window.slot_archive_open(selection)
class MainWindow(QMainWindow):
signal_visible_changed = Signal(VfsView)
signal_selection_changed = Signal(VfsView)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.data_source = MainWindowDataSource(self)
self.vfs: Optional[VfsProcessor] = None
self.logger = Logger('./')
self.builder = Builder()
self.current_uids = None
self.vfs_view_root: Optional[VfsView] = None
self.tab_nodes_deletable = set()
self.signal_visible_changed.connect(self.slot_visible_changed)
self.signal_selection_changed.connect(self.slot_selection_changed)
# Configure Actions
self.ui.action_project_new.triggered.connect(self.project_new)
self.ui.action_project_open.triggered.connect(self.project_open)
self.ui.action_file_gz_open.triggered.connect(self.file_gz_open)
self.ui.action_external_add.triggered.connect(self.external_add)
self.ui.action_external_add.setEnabled(False)
# self.ui.action_external_manage.triggered.connect(self.external_manage)
self.ui.action_exit.triggered.connect(self.exit_app)
self.ui.action_make_web_map.triggered.connect(self.tool_make_web_map)
# filter
self.ui.filter_edit.textChanged.connect(self.filter_text_changed)
self.ui.filter_edit.installEventFilter(self)
self.ui.filter_set_bt.clicked.connect(self.filter_text_accepted)
self.ui.filter_clear_bt.clicked.connect(self.filter_text_cleared)
self.ui.vhash_to_vpath_in_edit.textChanged.connect(self.vhash_to_vpath_text_changed)
self.ui.chkbx_export_raw_extract.setChecked(True)
self.ui.chkbx_export_contents_extract.setChecked(False)
self.ui.chkbx_export_text_extract.setChecked(False)
self.ui.chkbx_export_processed_extract.setChecked(False)
self.ui.chkbx_export_raw_mods.setChecked(True)
self.ui.chkbx_export_contents_mods.setChecked(False)
self.ui.chkbx_export_processed_mods.setChecked(False)
self.ui.chkbx_mod_build_subset.setChecked(False)
self.ui.chkbx_mod_build_subset.clicked.connect(self.slot_mod_build_subset_clicked)
self.ui.chkbx_export_save_to_one_dir.setChecked(False)
self.ui.bt_extract.setEnabled(False)
self.ui.bt_extract.clicked.connect(self.slot_extract_clicked)
self.ui.bt_extract_folder_show.setEnabled(False)
self.ui.bt_extract_folder_show.setIcon(self.style().standardIcon(QStyle.SP_DirIcon))
self.ui.bt_extract_folder_show.clicked.connect(self.slot_folder_show_clicked)
self.ui.bt_extract_gltf_3d.setEnabled(False)
self.ui.bt_extract_gltf_3d.clicked.connect(self.slot_extract_gltf_clicked)
self.ui.bt_extract_gltf_3d_folder_show.setEnabled(False)
self.ui.bt_extract_gltf_3d_folder_show.setIcon(self.style().standardIcon(QStyle.SP_DirIcon))
self.ui.bt_extract_gltf_3d_folder_show.clicked.connect(self.slot_folder_show_clicked)
self.ui.bt_mod_prep.setEnabled(False)
self.ui.bt_mod_prep.clicked.connect(self.slot_mod_prep_clicked)
self.ui.bt_mod_folder_show.setEnabled(False)
self.ui.bt_mod_folder_show.setIcon(self.style().standardIcon(QStyle.SP_DirIcon))
self.ui.bt_mod_folder_show.clicked.connect(self.slot_folder_show_clicked)
self.ui.bt_mod_build_folder_show.setIcon(self.style().standardIcon(QStyle.SP_DirIcon))
self.ui.bt_mod_build_folder_show.clicked.connect(self.slot_folder_show_clicked)
self.ui.bt_mod_build.clicked.connect(self.slot_mod_build_clicked)
self.ui.tabs_nodes.tabCloseRequested.connect(self.slot_nodes_tab_close)
self.ui.tabs_nodes.currentChanged.connect(self.slot_nodes_tab_current_changed)
self.ui.data_view.data_source_set(self.data_source)
self.ui.filter_edit.setText('.*')
def eventFilter(self, source, event):
if event.type() == QEvent.KeyRelease and source is self.ui.filter_edit:
self.filter_text_key_release(source, event)
return super().eventFilter(source, event)
def error_dialog(self, s):
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setWindowTitle("DECA: ERROR")
msg.setText(s)
# msg.setInformativeText("This is additional information")
# msg.setDetailedText("The details are as follows:")
msg.setStandardButtons(QMessageBox.Ok)
retval = msg.exec_()
def dialog_good(self, s):
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setWindowTitle("DECA")
msg.setText(s)
# msg.setInformativeText("This is additional information")
# msg.setDetailedText("The details are as follows:")
msg.setStandardButtons(QMessageBox.Ok)
retval = msg.exec_()
def vfs_view_create(self, *args, **kwargs):
vfs_view = VfsView(*args, **kwargs)
vfs_view.signal_visible_changed.connect(
self, lambda x: x.signal_visible_changed.emit(vfs_view))
vfs_view.signal_selection_changed.connect(
self, lambda x: x.signal_selection_changed.emit(vfs_view))
return vfs_view
def vfs_set(self, vfs):
self.vfs = vfs
self.vfs_view_root = self.vfs_view_create(vfs, None, b'^.*$')
# Configure VFS dir table
widget = self.tab_nodes_add(VfsDirWidget, self.vfs_view_root, 'Directory')
# Configure VFS Node table (non-mapped nodes)
widget = self.tab_nodes_add(VfsNodeTableWidget, self.vfs_view_root, 'Non-Mapped List')
widget.show_all_set(False)
# Configure VFS Node table (all nodes)
widget = self.tab_nodes_add(VfsNodeTableWidget, self.vfs_view_root, 'Raw List')
widget.show_all_set(True)
self.ui.action_external_add.setEnabled(True)
self.setWindowTitle("{}: Archive: {}".format(window_title, vfs.game_info.game_dir))
self.ui.statusbar.showMessage("LOAD COMPLETE")
def vfs_view_current(self):
widget = self.ui.tabs_nodes.currentWidget()
if widget is None:
return None
return widget.vfs_view_get()
def tab_nodes_add(self, widget_class, vfs_view, name, deletable=False):
# self.tab_extract = QtWidgets.QWidget()
# self.tab_extract.setObjectName("tab_extract")
# self.gridLayout = QtWidgets.QGridLayout(self.tab_extract)
# self.gridLayout.setObjectName("gridLayout")
widget = widget_class(vfs_view, self.ui.tabs_nodes)
self.ui.tabs_nodes.addTab(widget, name)
widget.vnode_2click_selected = self.vnode_2click_selected
if deletable:
self.tab_nodes_deletable.add(widget)
return widget
def slot_archive_open(self, vnode: VfsNode):
vfs_view = self.vfs_view_create(self.vfs_view_current(), parent_id=vnode.uid)
self.tab_nodes_add(VfsDirWidget, vfs_view, to_unicode(vnode.v_path), True)
def slot_nodes_tab_close(self, index):
widget = self.ui.tabs_nodes.widget(index)
if widget in self.tab_nodes_deletable:
self.tab_nodes_deletable.remove(widget)
if widget is not None:
widget.deleteLater()
self.ui.tabs_nodes.removeTab(index)
def slot_nodes_tab_current_changed(self, index):
widget = self.ui.tabs_nodes.widget(index)
self.update_select_state(self.vfs_view_current())
def slot_visible_changed(self, vfs_view: VfsView):
self.update_select_state(vfs_view)
def slot_selection_changed(self, vfs_view: VfsView):
self.update_select_state(vfs_view)
def update_select_state(self, vfs_view):
if vfs_view == self.vfs_view_current():
any_selected = vfs_view.paths_count() > 0
if not self.ui.filter_edit.hasFocus():
self.ui.filter_edit.setText(to_unicode(vfs_view.mask))
self.ui.bt_extract.setEnabled(any_selected)
self.ui.bt_extract_gltf_3d.setEnabled(any_selected)
self.ui.bt_mod_prep.setEnabled(any_selected)
self.ui.bt_extract_folder_show.setEnabled(any_selected)
self.ui.bt_extract_gltf_3d_folder_show.setEnabled(any_selected)
self.ui.bt_mod_folder_show.setEnabled(any_selected)
str_vpaths = self.vfs_view_current().paths_summary_str()
self.ui.bt_extract.setText('EXTRACT: {}'.format(str_vpaths))
self.ui.bt_extract_gltf_3d.setText('EXTRACT 3D/GLTF2: {}'.format(str_vpaths))
self.ui.bt_mod_prep.setText('PREP MOD: {}'.format(str_vpaths))
if self.ui.chkbx_mod_build_subset.isChecked():
self.ui.bt_mod_build.setText('Build Mod Subset: {}'.format(str_vpaths))
self.ui.bt_mod_build.setEnabled(any_selected)
else:
self.ui.bt_mod_build.setText('Build Mod All')
def vnode_2click_selected(self, uids: List[int]):
self.current_uids = uids
self.ui.data_view.vnode_2click_selected(uids)
def extract(
self, eid, extract_dir, export_raw, export_contents, save_to_processed, save_to_text,
export_map_full, export_map_tiles):
if self.vfs_view_current().node_selected_count() > 0:
try:
if export_raw:
nodes_export_raw(self.vfs, self.vfs_view_current(), extract_dir)
if export_contents:
nodes_export_contents(self.vfs, self.vfs_view_current(), extract_dir)
if export_map_full or export_map_tiles:
nodes_export_map(self.vfs, self.vfs_view_current(), extract_dir, export_map_full, export_map_tiles)
nodes_export_processed(
self.vfs, self.vfs_view_current(), extract_dir,
allow_overwrite=False,
save_to_processed=save_to_processed,
save_to_text=save_to_text)
except EDecaFileExists as exce:
self.error_dialog('{} Canceled: File Exists: {}'.format(eid, exce.args))
def extract_gltf(self, eid, extract_dir, save_to_one_dir, include_skeleton, texture_format):
if self.vfs_view_current().node_selected_count() > 0:
try:
nodes_export_gltf(
self.vfs, self.vfs_view_current(), extract_dir,
allow_overwrite=False,
save_to_one_dir=save_to_one_dir,
include_skeleton=include_skeleton,
texture_format=texture_format,
)
except EDecaFileExists as exce:
self.error_dialog('{} Canceled: File Exists: {}'.format(eid, exce.args))
def slot_folder_show_clicked(self, checked):
if self.vfs_view_current().node_selected_count() > 0:
path = self.vfs_view_current().common_prefix()
if self.sender() == self.ui.bt_extract_folder_show:
root = self.vfs.working_dir + 'extracted/'
elif self.sender() == self.ui.bt_extract_gltf_3d_folder_show:
root = self.vfs.working_dir + 'gltf2_3d/'
elif self.sender() == self.ui.bt_mod_folder_show:
root = self.vfs.working_dir + 'mod/'
elif self.sender() == self.ui.bt_mod_build_folder_show:
root = self.vfs.working_dir + 'build/'
path = None
else:
root = None
if root:
if path:
path = os.path.join(root, path)
else:
path = root
path = path.replace('\\', '/')
if not os.path.isdir(path):
path, _ = os.path.split(path)
if os.path.isdir(path):
QDesktopServices.openUrl(QUrl(f'{path}'))
else:
self.vfs.logger.warning(f'Folder does not exist: {path}')
def slot_extract_clicked(self, checked):
self.extract(
'Extraction', self.vfs.working_dir + 'extracted/',
export_raw=self.ui.chkbx_export_raw_extract.isChecked(),
export_contents=self.ui.chkbx_export_contents_extract.isChecked(),
save_to_processed=self.ui.chkbx_export_processed_extract.isChecked(),
save_to_text=self.ui.chkbx_export_text_extract.isChecked(),
export_map_full=self.ui.cmbbx_map_format.currentText().find('Full') > -1,
export_map_tiles=self.ui.cmbbx_map_format.currentText().find('Tiles') > -1,
)
def slot_extract_gltf_clicked(self, checked):
self.extract_gltf(
'GLTF2 / 3D', self.vfs.working_dir + 'gltf2_3d/',
save_to_one_dir=self.ui.chkbx_export_save_to_one_dir.isChecked(),
include_skeleton=self.ui.chkbx_export_3d_include_skeleton.isChecked(),
texture_format=self.ui.cmbbx_texture_format.currentText(),
)
def slot_mod_build_subset_clicked(self, checked):
self.update_select_state(self.vfs_view_current())
def slot_mod_prep_clicked(self, checked):
self.extract(
'Mod Prep', self.vfs.working_dir + 'mod/',
export_raw=self.ui.chkbx_export_raw_mods.isChecked(),
export_contents=self.ui.chkbx_export_contents_mods.isChecked(),
save_to_processed=self.ui.chkbx_export_processed_mods.isChecked(),
save_to_text=False,
export_map_full=False,
export_map_tiles=False,
)
def slot_mod_build_clicked(self, checked):
try:
subset = None
if self.ui.chkbx_mod_build_subset.isChecked():
subset = self.vfs_view_current().nodes_selected_uids_get()
self.builder.build_dir(
self.vfs,
self.vfs.working_dir + 'mod/',
self.vfs.working_dir + 'build/',
subset=subset,
symlink_changed_file=False,
do_not_build_archive=self.ui.chkbx_mod_do_not_build_archives.isChecked()
)
self.dialog_good('BUILD SUCCESS')
except EDecaFileExists as ex:
self.error_dialog('Build Failed: File Exists: {}'.format(ex.args))
except EDecaBuildError as ex:
self.error_dialog('Build Failed: {}'.format(ex.args))
def filter_text_get(self):
txt = self.ui.filter_edit.text()
if len(txt) == 0:
txt = '^.*$'
else:
if txt[0] != '^':
txt = '^' + txt
if txt[-1] != '$':
txt = txt + '$'
return txt
def filter_text_changed(self):
txt = self.filter_text_get()
same = False
try:
valid = True
re.compile(txt) # test compile
except re.error as err:
valid = False
if self.vfs_view_current():
same = txt == to_unicode(self.vfs_view_current().mask)
if not valid:
ss = 'QLineEdit {background-color: red;}'
elif same:
ss = ''
else:
ss = 'QLineEdit {background-color: yellow;}'
self.ui.filter_edit.setStyleSheet(ss)
self.ui.filter_set_bt.setEnabled(valid and not same)
self.ui.filter_clear_bt.setEnabled(not same)
def filter_text_key_release(self, source, event: QKeyEvent):
if event.text() == '\r':
if self.ui.filter_set_bt.isEnabled():
self.filter_text_accepted(True)
elif event.text() == '\x1b':
self.filter_text_cleared(True)
def filter_text_accepted(self, checked):
if self.vfs_view_current():
txt = self.filter_text_get()
self.vfs_view_current().mask_set(txt.encode('ascii'))
self.filter_text_changed()
def filter_text_cleared(self, checked):
if self.vfs_view_current():
txt = to_unicode(self.vfs_view_current().mask)
else:
txt = '^.*$'
self.ui.filter_edit.setText(txt)
def vhash_to_vpath_text_changed(self):
txt_in = self.ui.vhash_to_vpath_in_edit.text()
txt_out = ''
if self.vfs is not None:
try:
val_in = int(txt_in, 0)
strings = self.vfs.hash_string_match(hash32=val_in)
for s in strings:
if len(s) > 0:
txt_out = s[1].decode('utf-8')
except ValueError:
pass
self.ui.vhash_to_vpath_out_edit.setText(txt_out)
@Slot()
def project_new(self, checked):
if os.name == 'nt':
game_loc = 'C:/Program Files(x86)/Steam/steamapps/common/'
else:
game_loc = os.path.expanduser('~/.steam/steamapps/common')
filename = QFileDialog.getOpenFileName(self, 'Create Project ...', game_loc, 'Game EXE (*.exe *.EXE)')
if filename is not None and len(filename[0]) > 0:
vfs = vfs_structure_new(filename)
if vfs is None:
self.logger.log('Unknown Game {}'.format(filename))
else:
self.vfs_set(vfs)
else:
self.logger.log('Cannot Create {}'.format(filename))
@Slot()
def project_open(self, checked):
filename = QFileDialog.getOpenFileName(self, 'Open Project ...', '../work', 'Project File (project.json)')
if filename is not None and len(filename[0]) > 0:
project_file = filename[0]
vfs = vfs_structure_open(project_file)
self.vfs_set(vfs)
else:
self.logger.log('Cannot Open {}'.format(filename))
@Slot()
def external_add(self, checked):
filenames, selected_filter = QFileDialog.getOpenFileNames(self, 'Open External File ...', '.', 'Any File (*)')
if filenames:
for filename in filenames:
if len(filename) > 0:
self.vfs.external_file_add(filename)
else:
self.logger.log('Cannot Open {}'.format(filenames))
@Slot()
def file_gz_open(self, checked):
filenames, selected_filter = QFileDialog.getOpenFileNames(self, 'Open GZ File ...', '../work', 'Any File (*)')
if filenames and len(filenames[0]) > 0:
path, _ = os.path.split(filenames[0])
vfs = vfs_structure_empty(path, 'GenerationZero')
self.vfs_set(vfs)
for filename in filenames:
if len(filename) > 0:
self.vfs.external_file_add(filename)
else:
self.logger.log('Cannot Open {}'.format(filenames))
@Slot()
def exit_app(self, checked):
self.close()
@Slot()
def tool_make_web_map(self, checked):
tool = ToolMakeWebMap(self.vfs)
tool.make_web_map(self.vfs.working_dir, True)
def main():
# options = argparse.ArgumentParser()
# options.add_argument("-f", "--file", type=str, required=True)
# args = options.parse_args()
# Qt Application
app = QApplication(sys.argv)
window = MainWindow()
window.setWindowTitle(window_title)
window.show()
app.exec_()
return window.vfs
|
450272
|
import fault
import aetherling.helpers.fault_helpers as fault_helpers
from aetherling.space_time import *
from aetherling.space_time.reshape_st import DefineReshape_ST
import magma as m
import json
@cache_definition
def Module_0() -> DefineCircuitKind:
class _Module_0(Circuit):
name = "top"
IO = ['I', In(ST_SSeq(4, ST_Int(8, False)).magma_repr()),'O', Out(ST_SSeq(4, ST_Int(8, False)).magma_repr())] + ClockInterface(has_ce=False,has_reset=False) + valid_ports
st_in_t = [ST_SSeq(4, ST_Int(8, False))]
st_out_t = ST_SSeq(4, ST_Int(8, False))
binary_op = False
@classmethod
def definition(cls):
n14 = DefineConst(ST_SSeq(4, ST_Int(8, False)), ((1,2,3,4,),), has_valid=True, delay=1)()
wire(cls.valid_up, n14.valid_up)
n2 = DefineFIFO(ST_SSeq(4, ST_Int(8, False)), 1, has_valid=True)()
wire(cls.I, n2.I)
wire(cls.valid_up, n2.valid_up)
n4 = DefineMap2_S(4, DefineAtomTupleCreator(ST_Int(8, False), ST_Int(8, False), has_valid=True),True)()
wire(n14.O, n4.I0)
wire(n2.O, n4.I1)
wire(n14.valid_down & n2.valid_down, n4.valid_up)
n10 = DefineMap_S(4, DefineAdd_Atom(True),True)()
wire(n4.O, n10.I)
wire(n4.valid_down, n10.valid_up)
n11 = DefineFIFO(ST_SSeq(4, ST_Int(8, False)), 1, has_valid=True)()
wire(n10.O, n11.I)
wire(n10.valid_down, n11.valid_up)
n12 = DefineFIFO(ST_SSeq(4, ST_Int(8, False)), 1, has_valid=True)()
wire(n11.O, n12.I)
wire(n11.valid_down, n12.valid_up)
n13 = DefineFIFO(ST_SSeq(4, ST_Int(8, False)), 1, has_valid=True)()
wire(n12.O, n13.I)
wire(n12.valid_down, n13.valid_up)
wire(n13.O, cls.O)
wire(n13.valid_down, cls.valid_down)
return _Module_0
Main = Module_0
fault_helpers.compile(Main(), "v./home/durst/dev/embeddedHaskellAetherling//test/no_bench/magma_examples/tuple_sum/tuple_sum_4 % 1thr.py")
|
450277
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fontforge # noqa
import os
import multiprocessing as mp
import argparse
# conda deactivate
# apt install python3-fontforge
def convert_mp(opts):
"""Useing multiprocessing to convert all fonts to sfd files"""
alphabet_chars = opts.alphabet
fonts_file_path = opts.ttf_path
sfd_path = opts.sfd_path
for root, dirs, files in os.walk(os.path.join(opts.ttf_path, opts.split)):
ttf_fnames = files
#print(ttf_fnames)
font_num = len(ttf_fnames)
process_nums = mp.cpu_count() - 2
if font_num // process_nums < 1:
process_nums = font_num
font_num_per_process = min(font_num // process_nums, 1)
else:
font_num_per_process = font_num // process_nums
def process(process_id, font_num_p_process):
for i in range(process_id * font_num_p_process, (process_id + 1) * font_num_p_process):
if i >= font_num:
break
font_id = ttf_fnames[i].split('.')[0]
split = opts.split
font_name = ttf_fnames[i]
font_file_path = os.path.join(fonts_file_path, split, font_name)
try:
cur_font = fontforge.open(font_file_path)
except Exception as e:
print('Cannot open', font_name)
print(e)
continue
target_dir = os.path.join(sfd_path, split, "{}".format(font_id))
if not os.path.exists(target_dir):
os.makedirs(target_dir)
for char_id, char in enumerate(alphabet_chars):
char_description = open(os.path.join(target_dir, '{}_{:02d}.txt'.format(font_id, char_id)), 'w')
cur_font.selection.select(char)
cur_font.copy()
new_font_for_char = fontforge.font()
new_font_for_char.selection.select(char)
new_font_for_char.paste()
new_font_for_char.fontname = "{}_".format(font_id) + font_name
new_font_for_char.save(os.path.join(target_dir, '{}_{:02d}.sfd'.format(font_id, char_id)))
char_description.write(str(ord(char)) + '\n')
char_description.write(str(new_font_for_char[char].width) + '\n')
char_description.write(str(new_font_for_char[char].vwidth) + '\n')
char_description.write('{:02d}'.format(char_id) + '\n')
char_description.write('{}'.format(font_id))
char_description.close()
cur_font.close()
processes = [mp.Process(target=process, args=(pid, font_num_per_process)) for pid in range(process_nums + 1)]
for p in processes:
p.start()
for p in processes:
p.join()
def main():
parser = argparse.ArgumentParser(description="Convert ttf fonts to sfd fonts")
parser.add_argument("--alphabet", type=str, default='ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz')
parser.add_argument("--ttf_path", type=str, default='font_ttfs')
parser.add_argument('--sfd_path', type=str, default='font_sfds')
parser.add_argument('--split', type=str, default='train')
opts = parser.parse_args()
convert_mp(opts)
if __name__ == "__main__":
main()
|
450285
|
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def insertNode(root, data):
if root == None:
root = Node(data)
else:
if root.data > data:
root.left = insertNode(root.left, data)
elif root.data < data:
root.right = insertNode(root.right, data)
return root
def minValue(root):
curr = root
while curr.left is not None:
curr = curr.left
return curr.data
def search(root, data):
if root == None:
return "Element Not Found :("
elif root.data == data:
return "Element Found"
elif root.data > data:
result = search(root.left, data)
else:
result = search(root.right, data)
return result
def delete(root, data):
if root is None:
return root
elif root.data > data:
root.left = delete(root.left, data)
elif root.data < data:
root.right = delete(root.right, data)
else:
if root.left == None:
temp = root.right
root = None
return temp
elif root.right == None:
temp = root.left
root = None
return temp
else:
value = minValue(root.right)
root.data = value
root.right = delete(root.right, value)
return root
def postorder(root):
if root is not None:
postorder(root.left)
postorder(root.right)
print(root.data, end=" "),
def preorder(root):
if root is not None:
print(root.data, end=" "),
preorder(root.left)
preorder(root.right)
def inorder(root):
if root is not None:
inorder(root.left)
print(root.data, end=" "),
inorder(root.right)
def levelOrder(root):
if root is None:
return None
else:
q = []
q.append(root)
while(len(q)):
temp = q.pop(0)
print(temp.data, end=" ")
if temp.left is not None:
q.append(temp.left)
if temp.right is not None:
q.append(temp.right)
def height(root):
if root is None:
return -1
return 1+max(height(root.left), height(root.right))
def MinHeight(root):
if not root:
return -1
if not root.left or not root.right:
return max(MinHeight(root.left), MinHeight(root.right))+1
else:
return min(MinHeight(root.left), MinHeight(root.right))+1
def FindMin(root):
if root is None:
return
if root.left is None:
return root.data
else:
return FindMin(root.left)
r = insertNode(None, 15)
r = insertNode(r, 10)
r = insertNode(r, 20)
r = insertNode(r, 5)
r = insertNode(r, 13)
r = insertNode(r, 11)
r = insertNode(r, 14)
r = insertNode(r, 0)
# print(search(r, 20))
# print('InOrder Traversal : ', end=" ")
# inorder(r)
# r = delete(r, 10)
# print('\nInOrder Traversal : ', end=" ")
# inorder(r)
# print('Min in BST :', FindMin(r))
# print('Height of BST : ', height(r))
# print('Level Order Traversal :', end=" ")
# levelOrder(r)
preorder(r)
print('\n')
postorder(r)
|
450357
|
from collections import deque
import logging
import elasticsearch
from elasticsearch import Elasticsearch, helpers
from elasticsearch_dsl import Q, Search
from mayan.apps.common.utils import parse_range
from ..classes import SearchBackend, SearchModel
from ..exceptions import DynamicSearchException
from ..settings import setting_results_limit
from .literals import (
DEFAULT_ELASTICSEARCH_CLIENT_MAXSIZE,
DEFAULT_ELASTICSEARCH_CLIENT_SNIFF_ON_START,
DEFAULT_ELASTICSEARCH_CLIENT_SNIFF_ON_CONNECTION_FAIL,
DEFAULT_ELASTICSEARCH_CLIENT_SNIFFER_TIMEOUT, DEFAULT_ELASTICSEARCH_HOST,
DEFAULT_ELASTICSEARCH_INDICES_NAMESPACE,
DJANGO_TO_ELASTICSEARCH_FIELD_MAP
)
logger = logging.getLogger(name=__name__)
class ElasticSearchBackend(SearchBackend):
_client = None
_search_model_mappings = {}
field_map = DJANGO_TO_ELASTICSEARCH_FIELD_MAP
def __init__(self, **kwargs):
self.client_kwargs = {}
self.indices_namespace = kwargs.pop(
'indices_namespace', DEFAULT_ELASTICSEARCH_INDICES_NAMESPACE
)
host = kwargs.pop('client_host', DEFAULT_ELASTICSEARCH_HOST)
hosts = kwargs.pop('client_hosts', None)
if not hosts:
hosts = (host,)
self.client_kwargs['hosts'] = hosts
self.client_kwargs['http_auth'] = kwargs.pop('client_http_auth', None)
self.client_kwargs['port'] = kwargs.pop('client_port', None)
self.client_kwargs['scheme'] = kwargs.pop('client_scheme', None)
self.client_kwargs['maxsize'] = kwargs.pop(
'client_maxsize', DEFAULT_ELASTICSEARCH_CLIENT_MAXSIZE
)
self.client_kwargs['sniff_on_start'] = kwargs.pop(
'client_sniff_on_start', DEFAULT_ELASTICSEARCH_CLIENT_SNIFF_ON_START
)
self.client_kwargs['sniff_on_connection_fail'] = kwargs.pop(
'client_sniff_on_connection_fail', DEFAULT_ELASTICSEARCH_CLIENT_SNIFF_ON_CONNECTION_FAIL
)
self.client_kwargs['sniffer_timeout'] = kwargs.pop(
'client_sniffer_timeout', DEFAULT_ELASTICSEARCH_CLIENT_SNIFFER_TIMEOUT
)
super().__init__(**kwargs)
def _get_status(self):
client = self.get_client()
result = []
title = 'Elastic Search search model indexing status'
result.append(title)
result.append(len(title) * '=')
stats = client.indices.stats()
for search_model in SearchModel.all():
index_name = self.get_index_name(search_model=search_model)
index_stats = stats['indices'].get(index_name, {})
if index_stats:
count = index_stats['total']['docs']['count']
else:
count = '-1'
result.append(
'{}: {}'.format(search_model.label, count)
)
return '\n'.join(result)
def _initialize(self):
self.update_mappings()
def _search(
self, query, search_model, user, global_and_search=False,
ignore_limit=False
):
client = self.get_client()
index_name = self.get_index_name(
search_model=search_model
)
search = Search(index=index_name, using=client)
final_elasticsearch_query = None
for key, value in query.items():
elasticsearch_query = Q(
Q(
name_or_query='fuzzy', _expand__to_dot=False, **{key: value}
) | Q(
name_or_query='match', _expand__to_dot=False, **{key: value}
) | Q(
name_or_query='regexp', _expand__to_dot=False, **{key: value}
) | Q(
name_or_query='wildcard', _expand__to_dot=False, **{key: value}
)
)
if final_elasticsearch_query is None:
final_elasticsearch_query = elasticsearch_query
else:
if global_and_search:
final_elasticsearch_query &= elasticsearch_query
else:
final_elasticsearch_query |= elasticsearch_query
search = search.source(None).query(final_elasticsearch_query)
client.indices.refresh(index=index_name)
if ignore_limit:
limit = None
else:
limit = setting_results_limit.value
response = search[0:limit].execute()
id_list = []
for hit in response:
id_list.append(hit['id'])
return search_model.get_queryset().filter(
pk__in=id_list
).distinct()
def close(self):
self.get_client().transport.close()
self.__class__._client = None
def deindex_instance(self, instance):
search_model = SearchModel.get_for_model(instance=instance)
client = self.get_client()
client.delete(
id=instance.pk,
index=self.get_index_name(search_model=search_model)
)
def get_client(self):
try:
if self.__class__._client is None:
self.__class__._client = Elasticsearch(**self.client_kwargs)
except Exception as exception:
raise DynamicSearchException(
'Unable to instantiate client; {}'.format(self.client_kwargs)
) from exception
return self.__class__._client
def get_index_name(self, search_model):
return '{}-{}'.format(
self.indices_namespace, search_model.model_name.lower()
)
def get_search_model_mappings(self, search_model):
try:
return self.__class__._search_model_mappings[search_model]
except KeyError:
mappings = {}
field_map = self.get_resolved_field_map(search_model=search_model)
for field_name, search_field_data in field_map.items():
mappings[field_name] = {'type': search_field_data['field'].name}
self.__class__._search_model_mappings[search_model] = mappings
return mappings
def index_instance(self, instance, exclude_model=None, exclude_kwargs=None):
search_model = SearchModel.get_for_model(instance=instance)
document = search_model.populate(
backend=self, instance=instance, exclude_model=exclude_model,
exclude_kwargs=exclude_kwargs
)
self.get_client().index(
index=self.get_index_name(search_model=search_model),
id=instance.pk, document=document
)
def index_search_model(self, search_model, range_string=None):
client = self.get_client()
index_name = self.get_index_name(search_model=search_model)
def generate_actions():
queryset = search_model.get_queryset()
if range_string:
queryset = queryset.filter(pk__in=list(parse_range(range_string=range_string)))
for instance in queryset:
kwargs = search_model.populate(
backend=self, instance=instance
)
kwargs['_id'] = kwargs['id']
yield kwargs
bulk_indexing_generator = helpers.streaming_bulk(
client=client, index=index_name, actions=generate_actions(),
yield_ok=False
)
deque(iterable=bulk_indexing_generator, maxlen=0)
def reset(self, search_model=None):
self.tear_down(search_model=search_model)
self.update_mappings(search_model=search_model)
def tear_down(self, search_model=None):
client = self.get_client()
if search_model:
client.indices.delete(
index=self.get_index_name(search_model=search_model)
)
else:
client.indices.delete(
index='{}-*'.format(self.indices_namespace)
)
def update_mappings(self, search_model=None):
client = self.get_client()
if search_model:
search_models = (search_model,)
else:
search_models = SearchModel.all()
for search_model in search_models:
index_name = self.get_index_name(search_model=search_model)
mappings = self.get_search_model_mappings(search_model=search_model)
try:
client.indices.create(
index=index_name,
body={'mappings': {'properties': mappings}}
)
except elasticsearch.exceptions.RequestError:
try:
client.indices.put_mapping(
index=index_name,
body={'properties': mappings}
)
except elasticsearch.exceptions.RequestError:
"""There a mapping changes that were not allowed.
Example: Text to Keyword.
Boot up regardless and allow user to reindex to delete
old indices.
"""
|
450417
|
class Solution:
def longestDupSubstring(self, S: str) -> str:
nums = [ord(S[i])-ord('a') for i in range(len(S))]
left, right = 1, len(S)-1
while left<=right:
mid = left+(right-left)//2
if self.helper(mid, nums)!=-1:
left = mid + 1
else:
right = mid - 1
start = self.helper(left-1, nums)
return S[start: start+left-1] if start != -1 else ""
def helper(self, pos, nums):
h = 0
for i in range(pos):
h = (h*26 + nums[i])%(2**32)
aL = 26**pos %(2**32)
visited = set()
visited.add(h)
for start in range(1, len(nums)-pos+1):
h = ((h*26 - nums[start-1]*aL)+nums[start+pos-1])%(2**32)
if h in visited:
return start
visited.add(h)
return -1
# https://leetcode-cn.com/problems/longest-duplicate-substring/solution/zui-chang-zhong-fu-zi-chuan-by-leetcode/
|
450421
|
from django.contrib import admin
from .models import RecProduct, RecVote
admin.site.register(RecProduct)
admin.site.register(RecVote)
|
450488
|
from qgl2.qgl2 import qgl2decl, qgl2main, qreg
from qgl2.qgl2 import QRegister
from qgl2.qgl1 import X, Y, Z, Id, Utheta
from itertools import product
@qgl2decl
def t1():
"""
Expected: [X(q1), X(q1)]
"""
q1 = QRegister('q1')
l1 = list()
l1 += [ 0 ]
l1 += [ 1 ]
l1 += [ 2 ]
if l1 == [0, 1, 2]:
X(q1)
else:
Y(q1)
if len(l1) == 3:
X(q1)
else:
Y(q1)
@qgl2decl
def t2():
"""
Expected: [X(q1), X(q1), X(q1), X(q1)]
"""
q1 = QRegister('q1')
l1 = [0, 1, 2, 3]
l1 = l1[:2] + l1[2:]
if l1 == [0, 1, 2, 3]:
X(q1)
else:
Y(q1)
l1 = l1[2:] + l1[:2]
if l1 == [2, 3, 0, 1]:
X(q1)
else:
Y(q1)
l1 = l1[3:] + l1[:3]
if l1 == [1, 2, 3, 0]:
X(q1)
else:
Y(q1)
l1 = l1[1:] + l1[:1]
if l1 == [2, 3, 0, 1]:
X(q1)
else:
Y(q1)
@qgl2decl
def t3():
"""
Expected: [X(q1), X(q1), X(q1)]
"""
q1 = QRegister('q1')
total = 0
if total == 0:
X(q1)
else:
Y(q1)
total += 2
if total == 2:
total += 2
X(q1)
else:
total += 1
Y(q1)
if total == 4:
X(q1)
else:
Y(q1)
@qgl2decl
def t4():
"""
Expected: [X(q1), Y(q1), Y(q1), Z(q1), Z(q1), Z(q1), Z(q1)]
TODO: currently fails; instance methods confuse the evaluator.
"""
q1 = QRegister('q1')
l1 = list()
l1.append('a')
for _ in l1:
X(q1)
l1.append('b')
for _ in l1:
Y(q1)
l1.append('c')
l1.append('d')
for _ in l1:
Z(q1)
@qgl2decl
def t5():
"""
Expected: [X(q1), Y(q1), Y(q1), Z(q1), Z(q1), Z(q1), Z(q1)]
Like t4, but uses operators that work properly right now.
"""
q1 = QRegister('q1')
l1 = list()
l1 += ['a']
for _ in l1:
X(q1)
l1 += ['b']
for _ in l1:
Y(q1)
l1 += ['c', 'd']
for _ in l1:
Z(q1)
|
450560
|
import os
from aws_xray_sdk import global_sdk_config
import pytest
from aws_xray_sdk.core import lambda_launcher
from aws_xray_sdk.core.models.subsegment import Subsegment
TRACE_ID = '1-5759e988-bd862e3fe1be46a994272793'
PARENT_ID = '53995c3f42cd8ad8'
HEADER_VAR = "Root=%s;Parent=%s;Sampled=1" % (TRACE_ID, PARENT_ID)
os.environ[lambda_launcher.LAMBDA_TRACE_HEADER_KEY] = HEADER_VAR
context = lambda_launcher.LambdaContext()
@pytest.fixture(autouse=True)
def setup():
yield
global_sdk_config.set_sdk_enabled(True)
def test_facade_segment_generation():
segment = context.get_trace_entity()
assert segment.id == PARENT_ID
assert segment.trace_id == TRACE_ID
assert segment.sampled
def test_put_subsegment():
segment = context.get_trace_entity()
subsegment = Subsegment('name', 'local', segment)
context.put_subsegment(subsegment)
assert context.get_trace_entity().id == subsegment.id
subsegment2 = Subsegment('name', 'local', segment)
context.put_subsegment(subsegment2)
assert context.get_trace_entity().id == subsegment2.id
assert subsegment.subsegments[0] is subsegment2
assert subsegment2.parent_id == subsegment.id
assert subsegment.parent_id == segment.id
assert subsegment2.parent_segment is segment
context.end_subsegment()
assert context.get_trace_entity().id == subsegment.id
context.end_subsegment()
assert context.get_trace_entity().id == segment.id
def test_disable():
context.clear_trace_entities()
segment = context.get_trace_entity()
assert segment.sampled
context.clear_trace_entities()
global_sdk_config.set_sdk_enabled(False)
segment = context.get_trace_entity()
assert not segment.sampled
def test_non_initialized():
# Context that hasn't been initialized by lambda container should not add subsegments to the facade segment.
temp_header_var = os.environ[lambda_launcher.LAMBDA_TRACE_HEADER_KEY]
del os.environ[lambda_launcher.LAMBDA_TRACE_HEADER_KEY]
temp_context = lambda_launcher.LambdaContext()
facade_segment = temp_context.get_trace_entity()
subsegment = Subsegment("TestSubsegment", "local", facade_segment)
temp_context.put_subsegment(subsegment)
assert temp_context.get_trace_entity() == facade_segment
# "Lambda" container added metadata now. Should see subsegment now.
os.environ[lambda_launcher.LAMBDA_TRACE_HEADER_KEY] = temp_header_var
temp_context.put_subsegment(subsegment)
assert temp_context.get_trace_entity() == subsegment
|
450660
|
from django.db import transaction
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from openbook_common.utils.model_loaders import get_emoji_group_model, get_post_model
# TODO Use post uuid also internally, not only as API resource identifier
# In order to prevent enumerable posts API in alpha, this is done as a hotfix
from openbook_moderation.permissions import IsNotSuspended
from openbook_posts.views.post_reactions.serializers import GetPostReactionsSerializer, \
GetPostReactionsEmojiCountSerializer, PostEmojiCountSerializer, PostReactionEmojiGroupSerializer, \
ReactToPostSerializer
from openbook_posts.views.post_reaction.serializers import PostReactionSerializer
def get_post_id_for_post_uuid(post_uuid):
Post = get_post_model()
post = Post.objects.values('id').get(uuid=post_uuid)
return post['id']
class PostReactions(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def get(self, request, post_uuid):
request_data = self._get_request_data(request, post_uuid)
serializer = GetPostReactionsSerializer(data=request_data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
post_uuid = data.get('post_uuid')
emoji_id = data.get('emoji_id')
max_id = data.get('max_id')
count = data.get('count', 10)
user = request.user
post_id = get_post_id_for_post_uuid(post_uuid)
post_reactions = user.get_reactions_for_post_with_id(post_id=post_id, max_id=max_id,
emoji_id=emoji_id).order_by(
'-created')[
:count]
post_reactions_serializer = PostReactionSerializer(post_reactions, many=True, context={"request": request})
return Response(post_reactions_serializer.data, status=status.HTTP_200_OK)
def put(self, request, post_uuid):
request_data = self._get_request_data(request, post_uuid)
serializer = ReactToPostSerializer(data=request_data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
emoji_id = data.get('emoji_id')
post_uuid = data.get('post_uuid')
user = request.user
post_id = get_post_id_for_post_uuid(post_uuid)
with transaction.atomic():
post_reaction = user.react_to_post_with_id(post_id=post_id, emoji_id=emoji_id,)
post_reaction_serializer = PostReactionSerializer(post_reaction, context={"request": request})
return Response(post_reaction_serializer.data, status=status.HTTP_201_CREATED)
def _get_request_data(self, request, post_uuid):
request_data = request.data.copy()
query_params = request.query_params.dict()
request_data.update(query_params)
request_data['post_uuid'] = post_uuid
return request_data
class PostReactionsEmojiCount(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def get(self, request, post_uuid):
request_data = self._get_request_data(request, post_uuid)
serializer = GetPostReactionsEmojiCountSerializer(data=request_data)
serializer.is_valid(raise_exception=True)
data = serializer.validated_data
post_uuid = data.get('post_uuid')
user = request.user
post_id = get_post_id_for_post_uuid(post_uuid)
post_emoji_counts = user.get_emoji_counts_for_post_with_id(post_id)
post_reactions_serializer = PostEmojiCountSerializer(post_emoji_counts, many=True, context={"request": request})
return Response(post_reactions_serializer.data, status=status.HTTP_200_OK)
def _get_request_data(self, request, post_uuid):
request_data = request.data.copy()
query_params = request.query_params.dict()
request_data.update(query_params)
request_data['post_uuid'] = post_uuid
return request_data
class PostReactionEmojiGroups(APIView):
permission_classes = (IsAuthenticated, IsNotSuspended)
def get(self, request):
EmojiGroup = get_emoji_group_model()
emoji_groups = EmojiGroup.objects.filter(is_reaction_group=True).all().order_by('order')
serializer = PostReactionEmojiGroupSerializer(emoji_groups, many=True, context={'request': request})
return Response(serializer.data, status=status.HTTP_200_OK)
|
450701
|
from selenium.webdriver import Firefox
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support.expected_conditions import (
visibility_of_element_located,
invisibility_of_element_located
)
url = 'https://selenium.dunossauro.live/aula_10_b.html'
browser = Firefox()
browser.get(url)
wdw = WebDriverWait(browser, 60)
locator = (By.TAG_NAME,'h1')
# wdw.until_not(
# invisibility_of_element_located(locator),
# 'h1 não foi encontrado na página. Espera de 60seg.'
# )
wdw.until(
visibility_of_element_located(locator),
'h1 não foi encontrado na página. Espera de 60seg.'
)
print('h1 disponível')
|
450729
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class PreActBlock(nn.Module):
"""
Pre-activation version of the BasicBlock for Resnets.
Arguments:
in_planes (int): number of input planes.
planes (int): number of output filters.
stride (int): stride of convolution.
"""
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(PreActBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out += shortcut
return out
class PreActBottleneck(nn.Module):
"""
Pre-activation version of the original Bottleneck module for Resnets.
Arguments:
in_planes (int): number of input planes.
planes (int): number of output filters.
stride (int): stride of convolution.
"""
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(PreActBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out
class PreActResNet(nn.Module):
"""
Pre-activation Resnet model
"""
def __init__(self, block, num_blocks, num_classes=10):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.bn = nn.BatchNorm2d(512 * block.expansion)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.relu(self.bn(out))
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def preact_resnet(name, num_classes=10):
"""
Returns suitable Resnet model from its name.
Arguments:
name (str): name of resnet architecture.
num_classes (int): number of target classes.
Returns:
torch.nn.Module.
"""
if name == 'preact-resnet18':
return PreActResNet(PreActBlock, [2,2,2,2], num_classes=num_classes)
elif name == 'preact-resnet34':
return PreActResNet(PreActBlock, [3,4,6,3], num_classes=num_classes)
elif name == 'preact-resnet50':
return PreActResNet(PreActBottleneck, [3,4,6,3], num_classes=num_classes)
elif name == 'preact-resnet101':
return PreActResNet(PreActBottleneck, [3,4,23,3], num_classes=num_classes)
raise ValueError('Only preact-resnet18, preact-resnet34, preact-resnet50 and preact-resnet101 are supported!')
return
|
450756
|
from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
# Create a new chat bot named Charlie
chatbot = ChatBot('Charlie')
trainer = ListTrainer(chatbot)
trainer.train([
"Hi, can I help you?",
"Sure, I'd like to book a flight to Iceland.",
"Your flight has been booked."
])
# Get a response to the input text 'I would like to book a flight.'
response = chatbot.get_response('I would like to book a flight.')
print(response)
|
450792
|
import cgi
import unittest
from openid.consumer import consumer
from openid import message
from openid.test import support
class DummyEndpoint(object):
preferred_namespace = None
local_id = None
server_url = None
is_op_identifier = False
def preferredNamespace(self):
return self.preferred_namespace
def getLocalID(self):
return self.local_id
def isOPIdentifier(self):
return self.is_op_identifier
class DummyAssoc(object):
handle = "assoc-handle"
class TestAuthRequestMixin(support.OpenIDTestMixin):
"""Mixin for AuthRequest tests for OpenID 1 and 2; DON'T add
unittest.TestCase as a base class here."""
preferred_namespace = None
immediate = False
expected_mode = 'checkid_setup'
def setUp(self):
self.endpoint = DummyEndpoint()
self.endpoint.local_id = 'http://server.unittest/joe'
self.endpoint.claimed_id = 'http://joe.vanity.example/'
self.endpoint.server_url = 'http://server.unittest/'
self.endpoint.preferred_namespace = self.preferred_namespace
self.realm = 'http://example/'
self.return_to = 'http://example/return/'
self.assoc = DummyAssoc()
self.authreq = consumer.AuthRequest(self.endpoint, self.assoc)
def failUnlessAnonymous(self, msg):
for key in ['claimed_id', 'identity']:
self.failIfOpenIDKeyExists(msg, key)
def failUnlessHasRequiredFields(self, msg):
self.failUnlessEqual(self.preferred_namespace,
self.authreq.message.getOpenIDNamespace())
self.failUnlessEqual(self.preferred_namespace,
msg.getOpenIDNamespace())
self.failUnlessOpenIDValueEquals(msg, 'mode',
self.expected_mode)
# Implement these in subclasses because they depend on
# protocol differences!
self.failUnlessHasRealm(msg)
self.failUnlessIdentifiersPresent(msg)
# TESTS
def test_checkNoAssocHandle(self):
self.authreq.assoc = None
msg = self.authreq.getMessage(self.realm, self.return_to,
self.immediate)
self.failIfOpenIDKeyExists(msg, 'assoc_handle')
def test_checkWithAssocHandle(self):
msg = self.authreq.getMessage(self.realm, self.return_to,
self.immediate)
self.failUnlessOpenIDValueEquals(msg, 'assoc_handle',
self.assoc.handle)
def test_addExtensionArg(self):
self.authreq.addExtensionArg('bag:', 'color', 'brown')
self.authreq.addExtensionArg('bag:', 'material', 'paper')
self.failUnless('bag:' in self.authreq.message.namespaces)
self.failUnlessEqual(self.authreq.message.getArgs('bag:'),
{'color': 'brown',
'material': 'paper'})
msg = self.authreq.getMessage(self.realm, self.return_to,
self.immediate)
# XXX: this depends on the way that Message assigns
# namespaces. Really it doesn't care that it has alias "0",
# but that is tested anyway
post_args = msg.toPostArgs()
self.failUnlessEqual('brown', post_args['openid.ext0.color'])
self.failUnlessEqual('paper', post_args['openid.ext0.material'])
def test_standard(self):
msg = self.authreq.getMessage(self.realm, self.return_to,
self.immediate)
self.failUnlessHasIdentifiers(
msg, self.endpoint.local_id, self.endpoint.claimed_id)
class TestAuthRequestOpenID2(TestAuthRequestMixin, unittest.TestCase):
preferred_namespace = message.OPENID2_NS
def failUnlessHasRealm(self, msg):
# check presence of proper realm key and absence of the wrong
# one.
self.failUnlessOpenIDValueEquals(msg, 'realm', self.realm)
self.failIfOpenIDKeyExists(msg, 'trust_root')
def failUnlessIdentifiersPresent(self, msg):
identity_present = msg.hasKey(message.OPENID_NS, 'identity')
claimed_present = msg.hasKey(message.OPENID_NS, 'claimed_id')
self.failUnlessEqual(claimed_present, identity_present)
def failUnlessHasIdentifiers(self, msg, op_specific_id, claimed_id):
self.failUnlessOpenIDValueEquals(msg, 'identity', op_specific_id)
self.failUnlessOpenIDValueEquals(msg, 'claimed_id', claimed_id)
# TESTS
def test_setAnonymousWorksForOpenID2(self):
"""OpenID AuthRequests should be able to set 'anonymous' to true."""
self.failUnless(self.authreq.message.isOpenID2())
self.authreq.setAnonymous(True)
self.authreq.setAnonymous(False)
def test_userAnonymousIgnoresIdentfier(self):
self.authreq.setAnonymous(True)
msg = self.authreq.getMessage(self.realm, self.return_to,
self.immediate)
self.failUnlessHasRequiredFields(msg)
self.failUnlessAnonymous(msg)
def test_opAnonymousIgnoresIdentifier(self):
self.endpoint.is_op_identifier = True
self.authreq.setAnonymous(True)
msg = self.authreq.getMessage(self.realm, self.return_to,
self.immediate)
self.failUnlessHasRequiredFields(msg)
self.failUnlessAnonymous(msg)
def test_opIdentifierSendsIdentifierSelect(self):
self.endpoint.is_op_identifier = True
msg = self.authreq.getMessage(self.realm, self.return_to,
self.immediate)
self.failUnlessHasRequiredFields(msg)
self.failUnlessHasIdentifiers(
msg, message.IDENTIFIER_SELECT, message.IDENTIFIER_SELECT)
class TestAuthRequestOpenID1(TestAuthRequestMixin, unittest.TestCase):
preferred_namespace = message.OPENID1_NS
def setUpEndpoint(self):
TestAuthRequestBase.setUpEndpoint(self)
self.endpoint.preferred_namespace = message.OPENID1_NS
def failUnlessHasIdentifiers(self, msg, op_specific_id, claimed_id):
"""Make sure claimed_is is *absent* in request."""
self.failUnlessOpenIDValueEquals(msg, 'identity', op_specific_id)
self.failIfOpenIDKeyExists(msg, 'claimed_id')
def failUnlessIdentifiersPresent(self, msg):
self.failIfOpenIDKeyExists(msg, 'claimed_id')
self.failUnless(msg.hasKey(message.OPENID_NS, 'identity'))
def failUnlessHasRealm(self, msg):
# check presence of proper realm key and absence of the wrong
# one.
self.failUnlessOpenIDValueEquals(msg, 'trust_root', self.realm)
self.failIfOpenIDKeyExists(msg, 'realm')
# TESTS
def test_setAnonymousFailsForOpenID1(self):
"""OpenID 1 requests MUST NOT be able to set anonymous to True"""
self.failUnless(self.authreq.message.isOpenID1())
self.failUnlessRaises(ValueError, self.authreq.setAnonymous, True)
self.authreq.setAnonymous(False)
def test_identifierSelect(self):
"""Identfier select SHOULD NOT be sent, but this pathway is in
here in case some special discovery stuff is done to trigger
it with OpenID 1. If it is triggered, it will send
identifier_select just like OpenID 2.
"""
self.endpoint.is_op_identifier = True
msg = self.authreq.getMessage(self.realm, self.return_to,
self.immediate)
self.failUnlessHasRequiredFields(msg)
self.failUnlessEqual(message.IDENTIFIER_SELECT,
msg.getArg(message.OPENID1_NS, 'identity'))
class TestAuthRequestOpenID1Immediate(TestAuthRequestOpenID1):
immediate = True
expected_mode = 'checkid_immediate'
class TestAuthRequestOpenID2Immediate(TestAuthRequestOpenID2):
immediate = True
expected_mode = 'checkid_immediate'
if __name__ == '__main__':
unittest.main()
|
450852
|
import unittest
from unittest.mock import MagicMock
from heap import Heap
class HeapTests(unittest.TestCase):
def setUp(self):
self.heap = Heap()
def test_get_max_works(self):
self.heap.insert(6)
self.heap.insert(8)
self.heap.insert(10)
self.heap.insert(9)
self.heap.insert(1)
self.heap.insert(9)
self.heap.insert(9)
self.heap.insert(5)
self.assertEqual(self.heap.get_max(), 10)
def test_get_max_after_delete(self):
self.heap.insert(6)
self.heap.insert(8)
self.heap.insert(10)
self.heap.insert(9)
self.heap.insert(1)
self.heap.insert(9)
self.heap.insert(9)
self.heap.insert(5)
self.heap.delete()
self.assertEqual(self.heap.get_max(), 9)
def test_delete_elements_in_order(self):
self.heap.insert(6)
self.heap.insert(7)
self.heap.insert(5)
self.heap.insert(8)
self.heap.insert(10)
self.heap.insert(1)
self.heap.insert(2)
self.heap.insert(5)
descending_order = []
while self.heap.get_size() > 0:
descending_order.append(self.heap.delete())
self.assertEqual(descending_order, [10, 8, 7, 6, 5, 5, 2, 1])
def test_bubble_up_was_called(self):
self.heap._bubble_up = MagicMock(name='bubble up')
self.heap.insert(5)
self.assertTrue(self.heap._bubble_up.called)
def test_sift_down_was_called(self):
self.heap._sift_down = MagicMock(name='sift down')
self.heap.insert(10)
self.heap.insert(11)
self.heap.delete()
self.assertTrue(self.heap._sift_down.called)
if __name__ == '__main__':
unittest.main()
|
450864
|
import sys
sys.path.append('../build/src')
import pypangolin as pango
from OpenGL.GL import *
def a_callback():
print("a pressed")
def main():
win = pango.CreateWindowAndBind("pySimpleDisplay", 640, 480)
glEnable(GL_DEPTH_TEST)
pm = pango.ProjectionMatrix(640,480,420,420,320,240,0.1,1000);
mv = pango.ModelViewLookAt(-0, 0.5, -3, 0, 0, 0, pango.AxisY)
s_cam = pango.OpenGlRenderState(pm, mv)
ui_width = 180
handler=pango.Handler3D(s_cam)
d_cam = pango.CreateDisplay().SetBounds(pango.Attach(0),
pango.Attach(1),
pango.Attach.Pix(ui_width),
pango.Attach(1),
-640.0/480.0).SetHandler(handler)
pango.CreatePanel("ui").SetBounds( pango.Attach(0),
pango.Attach(1),
pango.Attach(0),
pango.Attach.Pix(ui_width))
var_ui=pango.Var("ui")
var_ui.A_Button=False
var_ui.B_Button=True
var_ui.B_Double=1
var_ui.B_Str="sss"
ctrl=-96
pango.RegisterKeyPressCallback(ctrl+ord('a'), a_callback)
while not pango.ShouldQuit():
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
d_cam.Activate(s_cam)
pango.glDrawColouredCube()
pango.FinishFrame()
if __name__ == "__main__":
main()
|
450865
|
import os
import sys
from setuptools import find_packages, setup
try:
from pypandoc import convert
README = convert("README.md", "rst")
except (ImportError, OSError):
README = open(os.path.join(os.path.dirname(__file__), "README.md"), "r").read()
install_requires = ["sqlparse", "wrapt"]
if sys.version_info[0] == 2:
install_requires.append("futures")
tests_require = [
"botocore==1.12.162",
"fakeredis==1.0.3",
"jmespath>=0.7.1,<1.0.0",
"mock",
"mongomock==3.17.0",
"more-itertools<6.0.0",
"mysqlclient==1.4.4",
"psycopg2-binary==2.8.3",
"pymongo==3.8.0",
"PyMySQL==0.9.3",
"pytest==4.6.6",
"pytest-benchmark==3.2.0",
"redis==3.3.4",
"requests",
]
# Lambda doesn't come with sqlite3 support, which is a dependency of pytest-cov, so we
# have to define these dependencies outside the test suite.
coverage_requires = tests_require + ["coverage==5.0a2", "pytest-cov==2.6.1"]
setup(
name="iopipe",
version="1.10.2",
description="IOpipe agent for serverless Application Performance Monitoring",
long_description=README,
author="IOpipe",
author_email="<EMAIL>",
url="https://github.com/iopipe/iopipe-python",
packages=find_packages(exclude=("tests", "tests.*")),
extras_require={
"coverage": coverage_requires,
"dev": tests_require + ["black==19.3b0", "pre-commit"],
"local": install_requires
+ ["botocore==1.12.162", "jmespath>=0.7.1,<1.0.0", "requests"],
},
install_requires=install_requires,
setup_requires=["pytest-runner==4.2"],
tests_require=tests_require,
zip_safe=True,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
)
|
450905
|
from django.contrib.auth import get_user_model
from rest_framework.authtoken.models import Token
from rest_framework import serializers
from notifications.models import Notification
from applications.complaint_system.models import Caretaker, StudentComplain, Supervisor, Workers
from applications.globals.models import ExtraInfo,User
class StudentComplainSerializers(serializers.ModelSerializer):
class Meta:
model=StudentComplain
fields=('__all__')
class WorkersSerializers(serializers.ModelSerializer):
class Meta:
model = Workers
fields=('__all__')
class CaretakerSerializers(serializers.ModelSerializer):
class Meta:
model = Caretaker
fields=('__all__')
class SupervisorSerializers(serializers.ModelSerializer):
class Meta:
model=Supervisor
fields=('__all__')
class ExtraInfoSerializers(serializers.ModelSerializer):
class Meta:
model=ExtraInfo
fields=('__all__')
class UserSerializers(serializers.ModelSerializer):
class Meta:
model=User
fields=('__all__')
|
450910
|
import os
from os.path import join, exists
import argparse
import pathlib
import click
import numpy as np
import pandas as pd
import scipy.stats
import download_data
import dataframe
import plotter
from plotter import transform_acc, inv_transform_acc
from model_types import ModelTypes, model_types_map, NatModelTypes, nat_model_types_map
cur_model_types, cur_model_types_map = None, None
def get_model_type(df_row):
return cur_model_types_map[df_row.name]
def show_in_plot(df_row):
model_name, model_type = df_row.name.lower(), df_row.model_type
return 'subsample' not in model_name and model_type != cur_model_types.STANDARD # and df_row.val >= 55
def use_for_line_fit(df_row):
model_name, model_type, in_plot = df_row.name.lower(), df_row.model_type, df_row.show_in_plot
return 'aws' not in model_name and 'batch64' not in model_name and 'subsample' not in model_name and model_type is cur_model_types.STANDARD
def format_eff_robust(df, x_axis, y_axis, x_axis_fit, y_axis_fit, transform):
df_line = df[df.use_for_line_fit == True]
if (df_line[y_axis_fit] == 0).any():
pivot = df_line[df_line[y_axis_fit] == 0][x_axis_fit][0]
df_line1 = df_line[df_line[x_axis_fit] < pivot]
x_acc_line_trans = transform_acc(df_line1[x_axis_fit], transform)
y_acc_line_trans = transform_acc(df_line1[y_axis_fit], transform)
lin_fit = scipy.stats.linregress(x_acc_line_trans, y_acc_line_trans)
intercept, slope = lin_fit[1], lin_fit[0]
lin_fit_ys_trans = transform_acc(df[x_axis_fit], transform) * slope + intercept
lin_fit_ys = inv_transform_acc(lin_fit_ys_trans, transform)
df['eff_robust_y'] = df[y_axis_fit] - lin_fit_ys
df_line2 = df_line[df_line[x_axis_fit] > pivot]
x_acc_line_trans = transform_acc(df_line2[x_axis_fit], transform)
y_acc_line_trans = transform_acc(df_line2[y_axis_fit], transform)
lin_fit = scipy.stats.linregress(x_acc_line_trans, y_acc_line_trans)
intercept, slope = lin_fit[1], lin_fit[0]
lin_fit_ys_trans = transform_acc(df[x_axis_fit], transform) * slope + intercept
lin_fit_ys = inv_transform_acc(lin_fit_ys_trans, transform)
df.loc[df[x_axis_fit] > pivot, 'eff_robust_y'] = (df[y_axis_fit] - lin_fit_ys)[df[x_axis_fit] > pivot]
else:
x_acc_line_trans = transform_acc(df_line[x_axis_fit], transform)
y_acc_line_trans = transform_acc(df_line[y_axis_fit], transform)
lin_fit = scipy.stats.linregress(x_acc_line_trans, y_acc_line_trans)
intercept, slope = lin_fit[1], lin_fit[0]
lin_fit_ys_trans = transform_acc(df[x_axis_fit], transform) * slope + intercept
lin_fit_ys = inv_transform_acc(lin_fit_ys_trans, transform)
df['eff_robust_y'] = df[y_axis_fit] - lin_fit_ys
x_acc_line_trans = transform_acc(df_line[x_axis], transform)
y_acc_line_trans = transform_acc(df_line[y_axis], transform)
lin_fit = scipy.stats.linregress(x_acc_line_trans, y_acc_line_trans)
intercept, slope = lin_fit[1], lin_fit[0]
lin_fit_ys_trans = transform_acc(df[x_axis], transform) * slope + intercept
lin_fit_ys = inv_transform_acc(lin_fit_ys_trans, transform)
df['eff_robust_x'] = df[y_axis] - lin_fit_ys
return df
def generate_xy_plot(x_axis, y_axis, x_axis_fit, y_axis_fit, transform, num_bootstrap_samples, output_dir,
output_file_dir, skip_download, x_label, y_label):
if skip_download:
filename = join(output_dir, 'grid_df.pkl')
if not exists(filename):
raise Exception(f'Downloaded data not found at {filename}. Please run python src/plotting/download_data.py first')
df = pd.read_pickle(filename)
else:
df = download_data.download_plotting_data(output_dir, store_data=True, verbose=True)
df, df_metadata = dataframe.extract_metadata(df)
df, df_metadata = dataframe.replace_10percent_with_metadata(df, df_metadata)
df, df_metadata = dataframe.aggregate_corruptions_with_metadata(df, df_metadata)
df = prepare_df_for_plotting(df, df_metadata, [x_axis, y_axis, x_axis_fit, y_axis_fit])
df = plotter.add_plotting_data(df, [x_axis, y_axis, x_axis_fit, y_axis_fit])
df = format_eff_robust(df, x_axis, y_axis, x_axis_fit, y_axis_fit, transform)
# dfp = df[df.show_in_plot][['eff_robust_x', 'eff_robust_y']].dropna()
# print("PEARSONR:", scipy.stats.pearsonr(dfp['eff_robust_x'], dfp['eff_robust_y'])[0])
# auto set xlim and ylim based on visible points
df_visible = df[df.show_in_plot == True]
xlim = [df_visible['eff_robust_x'].min() - 1, df_visible['eff_robust_x'].max() + 1]
ylim = [df_visible['eff_robust_y'].min() - 0.5, df_visible['eff_robust_y'].values.max() + 0.5]
fig, _, legend = plotter.simple_scatter_plot(df, 'eff_robust_x', 'eff_robust_y', xlim, ylim, cur_model_types,
title='Effective Robustness Scatterplot',
x_tick_multiplier=5, y_tick_multiplier=1,
x_label=f'{x_label} Effective Robustness', y_label=f'{y_label}\nEffective Robustness',
figsize=(12, 8), include_legend=False, return_separate_legend=True)
os.makedirs(output_file_dir, exist_ok=True)
name = f'eff_robust_legend.pdf' if len(cur_model_types) == 3 else f'eff_robust_legend2.pdf'
legend.savefig(join(output_file_dir, name), dpi='figure', bbox_inches='tight', pad_inches=0.1)
print(f"Legend saved to {join(output_file_dir, name)}")
fig_name = f'eff_robust_{y_axis.split("_")[1]}_{y_axis_fit.replace("1.0", "1")}.pdf'
fig.savefig(join(output_file_dir, fig_name), dpi='figure', bbox_inches='tight', pad_inches=0.1)
print(f"Plot saved to {join(output_file_dir, fig_name)}")
def prepare_df_for_plotting(df, df_metadata, columns):
assert set(columns).issubset(set(df.columns))
columns = list(set(columns))
df = df[columns]
df_metadata = df_metadata[[x+'_dataset_size' for x in columns]]
df = df.merge(df_metadata, right_index=True, left_index=True)
df = df.dropna()
df['model_type'] = df.apply(get_model_type, axis=1)
df['show_in_plot'] = df.apply(show_in_plot, axis=1)
df['use_for_line_fit'] = df.apply(use_for_line_fit, axis=1)
return df
if __name__ == '__main__':
for y_axis in ['avg_pgd', 'avg_corruptions']:
cur_model_types, cur_model_types_map = NatModelTypes, nat_model_types_map
generate_xy_plot(x_axis='val',
y_axis=y_axis,
x_axis_fit='val',
y_axis_fit='imagenetv2-matched-frequency-format-val',
transform='logit',
num_bootstrap_samples=1000,
output_dir=str((pathlib.Path(__file__).parent / '../outputs').resolve()),
output_file_dir=str((pathlib.Path(__file__).parent / '../paper/appendix').resolve()),
skip_download=True,
x_label='Lp Attacks' if 'pgd' in y_axis else 'Corruptions Averaged',
y_label='ImageNetV2',
)
generate_xy_plot(x_axis='val',
y_axis=y_axis,
x_axis_fit='val-on-objectnet-classes',
y_axis_fit='objectnet-1.0-beta',
transform='logit',
num_bootstrap_samples=1000,
output_dir=str((pathlib.Path(__file__).parent / '../outputs').resolve()),
output_file_dir=str((pathlib.Path(__file__).parent / '../paper/appendix').resolve()),
skip_download=True,
x_label='Lp Attacks' if 'pgd' in y_axis else 'Corruptions Averaged',
y_label='ObjectNet',
)
generate_xy_plot(x_axis='val',
y_axis=y_axis,
x_axis_fit='val-on-imagenet-a-classes',
y_axis_fit='imagenet-a',
transform='logit',
num_bootstrap_samples=1000,
output_dir=str((pathlib.Path(__file__).parent / '../outputs').resolve()),
output_file_dir=str((pathlib.Path(__file__).parent / '../paper/appendix').resolve()),
skip_download=True,
x_label='Lp Attacks' if 'pgd' in y_axis else 'Corruptions Averaged',
y_label='ImageNet-A',
)
generate_xy_plot(x_axis='val',
y_axis=y_axis,
x_axis_fit='val-on-vid-robust-classes',
y_axis_fit='imagenet-vid-robust_pm0',
transform='logit',
num_bootstrap_samples=1000,
output_dir=str((pathlib.Path(__file__).parent / '../outputs').resolve()),
output_file_dir=str((pathlib.Path(__file__).parent / '../paper/appendix').resolve()),
skip_download=True,
x_label='Lp Attacks' if 'pgd' in y_axis else 'Corruptions Averaged',
y_label='ImageNet-Vid-Robust (pm-0)',
)
generate_xy_plot(x_axis='val',
y_axis=y_axis,
x_axis_fit='val-on-ytbb-robust-classes',
y_axis_fit='ytbb-robust_pm0',
transform='logit',
num_bootstrap_samples=1000,
output_dir=str((pathlib.Path(__file__).parent / '../outputs').resolve()),
output_file_dir=str((pathlib.Path(__file__).parent / '../paper/appendix').resolve()),
skip_download=True,
x_label='Lp Attacks' if 'pgd' in y_axis else 'Corruptions Averaged',
y_label='YTBB-Robust (pm-0)',
)
cur_model_types, cur_model_types_map = ModelTypes, model_types_map
generate_xy_plot(x_axis='val',
y_axis=y_axis,
x_axis_fit='imagenet-vid-robust_pm0',
y_axis_fit='imagenet-vid-robust_pm10',
transform='logit',
num_bootstrap_samples=1000,
output_dir=str((pathlib.Path(__file__).parent / '../outputs').resolve()),
output_file_dir=str((pathlib.Path(__file__).parent / '../paper/appendix').resolve()),
skip_download=True,
x_label='Lp Attacks' if 'pgd' in y_axis else 'Corruptions Averaged',
y_label='ImageNet-Vid-Robust (pm-10)',
)
generate_xy_plot(x_axis='val',
y_axis=y_axis,
x_axis_fit='ytbb-robust_pm0',
y_axis_fit='ytbb-robust_pm10',
transform='logit',
num_bootstrap_samples=1000,
output_dir=str((pathlib.Path(__file__).parent / '../outputs').resolve()),
output_file_dir=str((pathlib.Path(__file__).parent / '../paper/appendix').resolve()),
skip_download=True,
x_label='Lp Attacks' if 'pgd' in y_axis else 'Corruptions Averaged',
y_label='YTBB-Robust (pm-10)',
)
generate_xy_plot(x_axis='val',
y_axis=y_axis,
x_axis_fit='val-on-imagenet-r-classes',
y_axis_fit='imagenet-r',
transform='logit',
num_bootstrap_samples=1000,
output_dir=str((pathlib.Path(__file__).parent / '../outputs').resolve()),
output_file_dir=str((pathlib.Path(__file__).parent / '../paper/appendix').resolve()),
skip_download=True,
x_label='Lp Attacks' if 'pgd' in y_axis else 'Corruptions Averaged',
y_label='ImageNet-R',
)
generate_xy_plot(x_axis='val',
y_axis=y_axis,
x_axis_fit='val',
y_axis_fit='imagenet-sketch',
transform='logit',
num_bootstrap_samples=1000,
output_dir=str((pathlib.Path(__file__).parent / '../outputs').resolve()),
output_file_dir=str((pathlib.Path(__file__).parent / '../paper/appendix').resolve()),
skip_download=True,
x_label='Lp Attacks' if 'pgd' in y_axis else 'Corruptions Averaged',
y_label='ImageNet-Sketch',
)
|
450928
|
from .argo_client import ArgoClient
from .argo_options import ArgoOptions
from .dsl import CronWorkflow, Workflow, WorkflowTemplate
import argo_workflow_tools.dsl.dsl_decorators as dsl
from .dsl.condition import Condition
from .exceptions.workflow_not_found_exception import WorkflowNotFoundException
from .workflow_result import WorkflowResult
from .workflow_status import WorkflowStatus
|
450993
|
from .common import InfoExtractor
class MyChannelsIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?mychannels\.com/.*(?P<id_type>video|production)_id=(?P<id>[0-9]+)'
_TEST = {
'url': 'https://mychannels.com/missholland/miss-holland?production_id=3416',
'md5': 'b8993daad4262dd68d89d651c0c52c45',
'info_dict': {
'id': 'wUUDZZep6vQD',
'ext': 'mp4',
'title': '<NAME> joins VOTE LEAVE',
'description': '<NAME> | #13 Not a potato',
'uploader': '<NAME>',
}
}
def _real_extract(self, url):
id_type, url_id = self._match_valid_url(url).groups()
webpage = self._download_webpage(url, url_id)
video_data = self._html_search_regex(r'<div([^>]+data-%s-id="%s"[^>]+)>' % (id_type, url_id), webpage, 'video data')
def extract_data_val(attr, fatal=False):
return self._html_search_regex(r'data-%s\s*=\s*"([^"]+)"' % attr, video_data, attr, fatal=fatal)
minoto_id = extract_data_val('minoto-id') or self._search_regex(r'/id/([a-zA-Z0-9]+)', extract_data_val('video-src', True), 'minoto id')
return {
'_type': 'url_transparent',
'url': 'minoto:%s' % minoto_id,
'id': url_id,
'title': extract_data_val('title', True),
'description': extract_data_val('description'),
'thumbnail': extract_data_val('image'),
'uploader': extract_data_val('channel'),
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.