id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
57404
|
import sys
def inline_validate_1(s):
from valid8 import validate
validate('s', s, instance_of=str, min_len=1)
validate('s', s, equals=s.lower())
def inline_validate_2(s):
from valid8 import validate
validate('s', s, instance_of=str, min_len=1, custom=str.islower)
def inline_validate_3(s):
from valid8 import validate
# we create a custom mini_lambda variable, since the name 's' is already used
from mini_lambda import InputVar
txt = InputVar('txt', str)
validate('s', s, instance_of=str, min_len=1, custom=txt.islower())
def with_validator(s):
from valid8 import validator
with validator('s', s, instance_of=str) as v:
v.alid = (len(s) > 0) and s.islower()
def function_input_builtin_stdlib(value):
from valid8 import validate_arg
from valid8.validation_lib import instance_of, minlen
@validate_arg('s', instance_of(str), minlen(1), str.islower)
def my_function(s):
pass
my_function(value)
def function_input_mini_lambda(value):
from mini_lambda import s, Len
from valid8 import validate_arg
from valid8.validation_lib import instance_of
@validate_arg('s', instance_of(str), Len(s) > 0, s.islower())
def my_function(s):
pass
my_function(value)
def class_field_builtin_stdlib(value):
from valid8 import validate_field
from valid8.validation_lib import instance_of, minlen
@validate_field('s', instance_of(str), minlen(1), str.islower)
class Foo:
def __init__(self, s):
self.s = s
Foo(value)
def class_field_mini_lambda(value):
from mini_lambda import s, Len
from valid8 import validate_field
from valid8.validation_lib import instance_of
@validate_field('s', instance_of(str), Len(s) > 0, s.islower())
class Foo:
def __init__(self, s):
self.s = s
Foo(value)
if sys.version_info >= (3, 0):
from ._tests_pep484 import ex2_pep484 as pep484
|
57411
|
import tensorflow as tf
import numpy as np
import os
class TFModel(object):
'''
This class contains the general functions for a tensorflow model
'''
def __init__(self, config):
# Limit the TensorFlow's logs
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '4'
# tf.logging.set_verbosity(tf.logging.ERROR)
self.config = config
self.sess = None
self.saver = None
def initialize_session(self):
"""
Set configurations:
* allow_soft_placement : If True, will allow models trained
on GPU to be deployed unto CPU
* log_device_placement : If True, will print the hardware
and operations that have been placed on it
"""
sess_conf = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)
sess_conf.gpu_options.allow_growth = True
self.sess = tf.Session(config=sess_conf)
# Save object
if not self.config.save == None:
self.saver = tf.train.Saver()
# Initialize all variables
self.sess.run(tf.global_variables_initializer())
def save_model(self, fold, timestamp, name):
"""
Save the model and the config file
"""
model_name = name + "_" + timestamp
main_dir = "./checkpoints/" + model_name + "/"
# Check main model dir
if not os.path.exists(main_dir):
os.makedirs(main_dir)
# If using K-Fold Cross Validation, save each model
if self.config.k_folds > 1:
dir = main_dir + "Fold_" + str(fold + 1) + "/"
# Create Fold dir
if not os.path.exists(dir):
os.makedirs(dir)
# Save the model
self.saver.save(self.sess, dir)
else:
self.saver.save(self.sess, main_dir)
return main_dir
def ner_save(self, fold, timestamp, name, ep):
# Save the model
main_dir = self.save_model(fold, timestamp, name)
# Save the corresponding config file
if fold == 0:
np.savez(main_dir + "config",
model=self.config.model,
k_folds=self.config.k_folds,
words=self.config.words,
tags=self.config.tags,
chars=self.config.chars,
use_crf=self.config.use_crf,
epoch=ep+1)
def class_save(self, fold, timestamp, name, ep):
# Save the model
main_dir = self.save_model(fold, timestamp, name)
# Save the config file
if fold == 0:
np.savez(main_dir + "config",
model=self.config.model,
k_folds=self.config.k_folds,
words=self.config.words,
chars=self.config.chars,
epoch=ep+1)
def close_session(self):
self.sess.close()
tf.reset_default_graph()
|
57412
|
from flask import Flask, request, jsonify
app = Flask(__name__)
@app.route('/upload', methods=['POST'])
def index():
image_files = request.files.getlist('image')
video_files = request.files.getlist('video')
if not image_files and not video_files:
return jsonify({
"code": -1,
"message": "No upload images or videos."
})
for image_file in image_files:
image_file.save(image_file.filename)
for video_file in video_files:
video_file.save(video_file.filename)
return jsonify({
"code": 0,
"message": "upload images and videos success."
})
if __name__ == '__main__':
app.run('0.0.0.0', debug=True, port=5000)
|
57413
|
import logging
from dataclasses import dataclass
from unittest.mock import patch
import pytest
from tests.utils.mock_backend import (
ApiKey,
BackendContext,
Run,
Project,
Team,
User,
)
from tests.utils.mock_base_client import MockBaseClient
########################################
########### BackendContext #############
########################################
@dataclass
class DefaultData:
user: User
api_key: ApiKey
team: Team
project: Project
run: Run
@pytest.fixture(scope="session")
def default_data() -> DefaultData:
user = User()
api_key = ApiKey(user.Id)
team = Team(user.Id, isPersonal=True)
project = Project(team.Id)
run = Run(userId=user.Id, teamId=team.Id, projectId=project.Id)
return DefaultData(user=user, api_key=api_key, team=team, project=project, run=run)
@pytest.fixture(scope="function", autouse=True)
def patch_ctx(default_data: DefaultData):
logging.info("Patching tests.utils.mock_backend.ctx to have default values")
ctx = BackendContext()
for (k, v) in default_data.__dict__.items():
ctx.set(k, v)
with patch("tests.utils.mock_backend.ctx", ctx):
logging.info("Successfully patched tests.utils.mock_backend.ctx")
yield
logging.info("unpatching tests.utils.mock_backend.ctx back to fresh state")
@pytest.fixture(scope="session", autouse=True)
def patch_base_client():
with patch("manta_lab.api.client._BaseClient", MockBaseClient):
logging.info("Successfully patched manta_lab.api.client_BaseClient with MockBaseClient")
yield
logging.info("unpatching manta_lab.api.client_BaseClient")
# @pytest.fixture()
# def run(request):
# marker = request.node.get_closest_marker("manta_args")
# kwargs = marker.kwargs if marker else dict(env={})
# for k, v in kwargs["env"].items():
# os.environ[k] = v
# # TODO: should be create run by manta.init
# s = Settings()
# s.update_envs(kwargs["env"])
# return Run(settings=s)
|
57430
|
import codecs
import importlib
import logging
import os
import sys
import time
import html
import accounts
import config
import log
import storage
from args import args
from vkapi import VkApi
from vkbot import createVkApi
from scripts import runScript, runInMaster
os.chdir(os.path.dirname(os.path.realpath(sys.argv[0])))
accounts.init()
class MyHandler(logging.Handler):
def emit(self, record):
pass
def handle(self, record):
msg = record.getMessage()
lvl = record.levelname
if any(msg.lower().startswith(i) for i in ('red|', 'green|', 'yellow|')):
color, msg = msg.split('|', maxsplit=1)
log.info((msg, html.escape(msg)), color.lower())
return
db_msg = getattr(record, 'db', None)
if db_msg:
msg = (msg, db_msg)
if lvl == 'CRITICAL':
log.error(msg, fatal=True)
elif lvl == 'ERROR':
log.error(msg, record.exc_info is not None)
elif lvl == 'WARNING':
log.warning(msg)
elif lvl == 'INFO':
log.info(msg)
elif lvl == 'DEBUG':
log.debug(msg)
logging.basicConfig(handlers=[MyHandler()], level=logging.DEBUG)
logging.getLogger('antigate').setLevel(logging.CRITICAL)
logging.getLogger('requests').setLevel(logging.CRITICAL)
logging.getLogger('asyncio').setLevel(logging.CRITICAL)
if config.get('vkbot.suppress_chat_stderr', 'b'):
logging.getLogger('chatlog').setLevel(logging.CRITICAL)
os.environ['LC_ALL'] = 'ru_RU.utf-8'
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())
sys.stdout.encoding = 'UTF-8'
login = config.get('login.login')
password = config.get('login.password')
storage.init(accounts.getFile('storage.db'))
def availableScripts():
print('Available scripts:', ', '.join(sorted(i[:-3] for i in os.listdir('scripts') if i.endswith('.py') and not i.startswith('__'))))
if args['script'] is None:
availableScripts()
sys.exit()
if args['script']:
if not args['script'].replace('_', '').isalpha():
print('Invalid script')
availableScripts()
sys.exit()
if args['master']:
if runInMaster(args['script'].lower(), args['args']):
sys.exit()
logging.warning('Failed to run script in master')
v = createVkApi(login, password)
try:
runScript(args['script'].lower(), args['args'], v)
except ImportError:
print('Invalid script')
availableScripts()
sys.exit(1)
sys.exit()
logging.info('Starting vkbot, pid ' + str(os.getpid()))
|
57502
|
import cv2
import numpy as np
thres = 0.45
nms_threshold = 0.2
#Default Camera Capture
cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)
cap.set(10, 150)
##Importing the COCO dataset in a list
classNames= []
classFile = 'coco.names'
with open(classFile,'rt') as f:
classNames = f.read().rstrip('\n').split('\n')
##Configuring both SSD model and weights (assigning)
configPath = 'ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt'
weightsPath = 'frozen_inference_graph.pb'
##dnn-Inbuilt method of OpenCV
net = cv2.dnn_DetectionModel(weightsPath,configPath)
net.setInputSize(320, 320)
net.setInputScale(1.0 / 127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
## using Detect method
while True:
success, img = cap.read()
classIds, confs, bbox = net.detect(img, confThreshold=thres)
bbox = list(bbox)
confs = list(np.array(confs).reshape(1, -1)[0])
confs = list(map(float, confs))
indices = cv2.dnn.NMSBoxes(bbox, confs, thres, nms_threshold)
for i in indices:
i = i[0]
box = bbox[i]
x, y, w, h = box[0], box[1], box[2], box[3]
cv2.rectangle(img, (x, y),(x+w, h+y), color=(0, 255, 0), thickness=2)
cv2.putText(img,classNames[classIds[i][0]-1].upper(), (box[0]+10, box[1]+30),
cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
cv2.imshow("Output", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
57507
|
from openmdao.api import ExplicitComponent
from gebtaero import *
import numpy as np
class SouplesseMat(ExplicitComponent):
def setup(self):
self.add_input('El',val=125e9,units='Pa')
self.add_input('Et',val=9.3e9,units='Pa')
self.add_input('Nult',val=0.28)
self.add_input('Glt',val=7.5e9,units='Pa')
self.add_input('Rho',val=1.57e3,units='kg/m**3')
self.add_input('Thickness',val=0.00032,units='m')
self.add_input('Ori_1',val=30.,units='deg')
self.add_input('Ori_2',val=30.,units='deg')
self.add_input('Chord',val=0.03,units='m')
self.add_input('SectionLength',val=0.42,units='m')
self.add_input('Vmin',val=0,units='m/s') # Lower boundary of the search interval
self.add_input('Vmax',val=100,units='m/s') # Upper boundary of the search interval
self.add_input('Vstep',val=1,units='m/s') # Velocity step used in the algorithm
self.add_input('DeltaV',val=0.01,units='m/s') # Critical velocity tolerance
self.add_input('AeroFlag',val=3) # type of aerodynamic model used : 0 = no aero, 1 = quasi-steady, 2= quasi-steady with added mass, 3 = unsteady (Peters)
self.add_input('GravFlag',val=0) # type of aerodynamic model used : 0 = no aero, 1 = quasi-steady, 2= quasi-steady with added mass, 3 = unsteady (Peters)
self.add_input('AlphaAC',val=0,units='deg') # Aircraft angle of attack
self.add_input('BetaAC',val=0,units='deg') # Aircraft yaw angle
self.add_output('bendflex',val=1.,units='N**-1*m**-2')
self.add_output('twistflex',val=1.,units='N**-1*m**-2')
self.add_output('coupling',val=1.,units='N**-1*m**-2')
self.add_output('mCoupling',val=1.,units='N**-1*m**-2')
def compute(self,inputs,outputs):
El = inputs['El'][0]
Et = inputs['Et'][0]
Nult = inputs['Nult'][0]
Glt = inputs['Glt'][0]
Rho = inputs['Rho'][0]
Thickness = inputs['Thickness'][0]
Ori_1 = inputs['Ori_1'][0]
Ori_2 = inputs['Ori_2'][0]
Chord = inputs['Chord'][0]
SectionLength = inputs['SectionLength'][0]
Vmin = inputs['Vmin'][0]
Vmax = inputs['Vmax'][0]
Vstep= inputs['Vstep'][0]
DeltaV= inputs['DeltaV'][0]
AeroFlag= inputs['AeroFlag'][0]
GravFlag= inputs['GravFlag'][0]
AlphaAC = inputs['AlphaAC'][0]
BetaAC = inputs['BetaAC'][0]
CS = CrossSection()
Mat = OrthoMaterial(El,Et,Nult,Glt,Rho)
Plate = CompositePlate(Chord)
Plate.AppendPly(CompositePly(Mat,Thickness,Ori_1))
Plate.AppendPly(CompositePly(Mat,Thickness,Ori_2))
CS.SetFlexibilityMatrixByPlate(Plate,"he20r",1,10,10,RigidX=True,RigidZ=True) # compute flexibility matrix (overwrite the previous flexibility matrix). RigidX = True : suppression of traction along beam axis ddl. RigidZ = True : suppression of bending in lag axis ddl
print(Ori_1,Ori_2)
outputs['bendflex'] = CS.GetFlexibilityMatrix()[4,4]
outputs['twistflex'] = CS.GetFlexibilityMatrix()[3,3]
outputs['coupling'] = CS.GetFlexibilityMatrix()[3,4]
outputs['mCoupling'] = -CS.GetFlexibilityMatrix()[3,4]
|
57536
|
import torch
import torch.nn as nn
from torch.nn import Parameter as P
from torchvision.models.inception import inception_v3
import torch.nn.functional as F
# Module that wraps the inception network to enable use with dataparallel and
# returning pool features and logits.
class WrapInception(nn.Module):
def __init__(self, net):
super(WrapInception,self).__init__()
self.net = net
self.mean = P(torch.tensor([0.485, 0.456, 0.406]).view(1, -1, 1, 1),
requires_grad=False)
self.std = P(torch.tensor([0.229, 0.224, 0.225]).view(1, -1, 1, 1),
requires_grad=False)
def forward(self, x):
x = (x - self.mean) / self.std
# Upsample if necessary
if x.shape[2] != 299 or x.shape[3] != 299:
x = F.interpolate(x, size=(299, 299), mode='bilinear', align_corners=True)
# 299 x 299 x 3
x = self.net.Conv2d_1a_3x3(x)
# 149 x 149 x 32
x = self.net.Conv2d_2a_3x3(x)
# 147 x 147 x 32
x = self.net.Conv2d_2b_3x3(x)
# 147 x 147 x 64
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 73 x 73 x 64
x = self.net.Conv2d_3b_1x1(x)
# 73 x 73 x 80
x = self.net.Conv2d_4a_3x3(x)
# 71 x 71 x 192
x = F.max_pool2d(x, kernel_size=3, stride=2)
# 35 x 35 x 192
x = self.net.Mixed_5b(x)
# 35 x 35 x 256
x = self.net.Mixed_5c(x)
# 35 x 35 x 288
x = self.net.Mixed_5d(x)
# 35 x 35 x 288
x = self.net.Mixed_6a(x)
# 17 x 17 x 768
x = self.net.Mixed_6b(x)
# 17 x 17 x 768
x = self.net.Mixed_6c(x)
# 17 x 17 x 768
x = self.net.Mixed_6d(x)
# 17 x 17 x 768
x = self.net.Mixed_6e(x)
# 17 x 17 x 768
# 17 x 17 x 768
x = self.net.Mixed_7a(x)
# 8 x 8 x 1280
x = self.net.Mixed_7b(x)
# 8 x 8 x 2048
x = self.net.Mixed_7c(x)
# 8 x 8 x 2048
pool = torch.mean(x.view(x.size(0), x.size(1), -1), 2)
# 1 x 1 x 2048
logits = self.net.fc(F.dropout(pool, training=False).view(pool.size(0), -1))
# 1000 (num_classes)
return pool, logits
# Load and wrap the Inception model
def load_inception_net(parallel=False):
inception_model = inception_v3(pretrained=True, transform_input=False)
inception_model = WrapInception(inception_model.eval()).cuda()
if parallel:
inception_model = nn.DataParallel(inception_model)
return inception_model
|
57543
|
import tensorflow as tf
def maxPoolLayer(x, ksize, stride, padding='VALID', name=None):
return tf.nn.max_pool(x,
ksize=[1, ksize, ksize, 1],
strides=[1, stride, stride, 1],
padding=padding,
name=name)
def LRN(x, R=2, alpha=2e-5, beta=0.75, bias=1.0, name=None):
return tf.nn.local_response_normalization(x,
depth_radius=R,
alpha=alpha,
beta=beta,
bias=bias,
name=name)
def fcLayer(x, outputD, name, std_init=0.005, bias_init=0.0, reluFlag=True):
inputD = int(x.get_shape()[-1])
with tf.variable_scope(name, reuse=tf.AUTO_REUSE) as scope:
w = tf.get_variable("w",
shape=[inputD, outputD],
dtype="float",
initializer=tf.random_normal_initializer(
stddev=std_init),
regularizer=tf.contrib.layers.l2_regularizer(5e-4))
b = tf.get_variable("b",
[outputD],
dtype="float",
initializer=tf.constant_initializer(bias_init))
out = tf.nn.xw_plus_b(x, w, b, name=scope.name)
if reluFlag:
return tf.nn.relu(out)
else:
return out
def convLayer(x, ksize, stride, feature, padding='SAME', bias_init=0.0, groups=1, name=None):
channel = int(x.get_shape()[-1])
def conv(a, b): return tf.nn.conv2d(a,
b,
strides=[1, stride, stride, 1],
padding=padding)
with tf.variable_scope(name) as scope:
w = tf.get_variable("w",
shape=[ksize, ksize, channel / groups, feature],
initializer=tf.random_normal_initializer(
stddev=0.01),
regularizer=tf.contrib.layers.l2_regularizer(5e-4))
b = tf.get_variable("b",
shape=[feature],
initializer=tf.constant_initializer(bias_init))
xNew = tf.split(value=x, num_or_size_splits=groups, axis=3)
wNew = tf.split(value=w, num_or_size_splits=groups, axis=3)
featureMap = [conv(t1, t2) for t1, t2 in zip(xNew, wNew)]
mergeFeatureMap = tf.concat(values=featureMap, axis=3)
out = tf.nn.bias_add(mergeFeatureMap, b)
return tf.nn.relu(out, name=scope.name)
def alexnet(input, is_training, root_conv_stride=4):
end_points = {}
with tf.variable_scope('alexnet', reuse=tf.AUTO_REUSE):
conv1 = convLayer(input, 11, root_conv_stride,
96, "VALID", name="conv1")
end_points['conv1'] = conv1
pool1 = maxPoolLayer(conv1, 3, 2, name="pool1")
lrn1 = LRN(pool1, name="lrn1")
conv2 = convLayer(lrn1, 5, 1, 256, groups=2,
bias_init=1.0, name="conv2")
end_points['conv2'] = conv2
pool2 = maxPoolLayer(conv2, 3, 2, name="pool2")
lrn2 = LRN(pool2, name="lrn2")
conv3 = convLayer(lrn2, 3, 1, 384, name="conv3")
end_points['conv3'] = conv3
conv4 = convLayer(conv3, 3, 1, 384, groups=2,
bias_init=1.0, name="conv4")
end_points['conv4'] = conv4
conv5 = convLayer(conv4, 3, 1, 256, groups=2,
bias_init=1.0, name="conv5")
end_points['conv5'] = conv5
conv5 = tf.pad(conv5, paddings=[[0, 0], [1, 0], [1, 0], [0, 0]])
pool5 = maxPoolLayer(conv5, 3, 2, name="pool5")
#fc1 = fcLayer(tf.layers.flatten(pool5), 1024, name="fc6")
fc1 = convLayer(pool5, 3, 1, 1024, "VALID", bias_init=1.0, name="fc6")
end_points['fc6'] = fc1
return fc1, end_points
|
57580
|
import pandas as pd
import time
from contextlib import contextmanager
from tqdm import tqdm
tqdm.pandas()
# nice way to report running times
@contextmanager
def timer(name):
t0 = time.time()
yield
print(f'[{name}] done in {time.time() - t0:.0f} s')
def get_named_entities(df):
"""
Count the named entities that are neither A nor B.
Hopefully this correlates with class "Neither".
:param df: competition data with one extra field spacy_nlp_doc: precomputed nlp(text)
:return:
"""
named_df = pd.DataFrame(0, index=df.index, columns=["named_ent"])
with timer('Extracting named entities'):
for i in range(len(df)):
doc = df.loc[i, "spacy_nlp_doc"]
A = df.loc[i, "A"]
B = df.loc[i, "B"]
A_offset = df.loc[i, "A-offset"]
B_offset = df.loc[i, "B-offset"]
P_offset = df.loc[i, "Pronoun-offset"]
# count persons that are not A or B
# spacy's entities are spans, not tokens
# e.g. "<NAME>" is one entity
ent_list = [ent for ent in doc.ents if (ent.label_ == "PERSON" and ent.text != A and ent.text != B)]
named_df.loc[i, "named_ent"] = len(ent_list)
return named_df
|
57601
|
import numpy as np
import sys,os,glob
######################################### INPUT ########################################
root = '/simons/scratch/fvillaescusa/pdf_information/Pk'
folders = ['Om_p', 'Ob_p', 'Ob2_p', 'h_p', 'ns_p', 's8_p',
'Om_m', 'Ob_m', 'Ob2_m', 'h_m', 'ns_m', 's8_m',
'Mnu_p', 'Mnu_pp', 'Mnu_ppp', 'fiducial']
########################################################################################
# do a loop over the different folders
for cosmo in folders:
files = glob.glob('%s/matter/%s/*/Pk_*z=127.txt'%(root,cosmo))
print 'Found %d files for %s at z=127'%(len(files),cosmo)
|
57608
|
from copy import deepcopy
from logics.utils.parsers import parser_utils
from logics.classes.exceptions import NotWellFormed
from logics.classes.predicate import PredicateFormula
from logics.utils.parsers.standard_parser import StandardParser
class PredicateParser(StandardParser):
"""Parser for predicate languages.
Extends ``StandardParser``. Has two additional parameters to specify infix predicates and functions.
Also includes some changes in the format of the valid input:
* Atomics must be given in format ``"R(a, b, c)"`` for prefix predicates, or ``"a = b"`` for infix predicates
* Infix predicate formuale must come without outer parentheses, e.g. ``"(a = b)"`` is not well formed
* Outermost parentheses in infix function terms can be ommited, e.g. both ``"0+(0+0)"`` and ``"(0+(0+0))"`` are ok
* Infix predicates and function symbols CANNOT be given in prefix notation
* Quantified formulae come in format ∀x (A) or ∀x ∈ T (A) - Always add parentheses to the quantified formula
Parameters
----------
language: logics.classes.propositional.Language or logics.classes.propositional.InfiniteLanguage
Instance of Language or InfiniteLanguage
parse_replacement_dict: dict, optional
Dictionary of the form ({string: string, ...}). See below for an explanation
unparse_replacement_dict: dict, optional
Same as the above parameter
infix_cts: list of str, optional
The list of constants that will be written in infix notation
infix_pred: list of str, optional
The list of predicates that will be written in infix notation
infix_func: list of str, optional
The list of function symbols that will be written in infix notation
comma_separator: str, optional
Character (preferrably of len 1) used to separate the premises or separate the conclusions within an inference
inference_separator: str, optional
Character (preferrably of len 1) used to separate between the premises and conclusions in an inference
derivation_step_separator: str, optional
Character (preferrably of len 1) used to separate the components of a derivation step
Examples
--------
>>> from logics.instances.predicate.languages import real_number_arithmetic_language
>>> from logics.utils.parsers.predicate_parser import PredicateParser
>>> replacement_dict = {
... '¬': '~', 'not ': '~',
... '&': '∧', ' and ': '∧', # notice the spaces before and after 'and'
... 'v': '∨', ' or ': '∨',
... ' then ': '→', '-->': '→', 'if ': '', # 'if p then q' it will convert to 'p then q'
... ' iff ': '↔', '<->': '↔',
... 'forall ': '∀', 'exists ': '∃', ' in ': '∈'
... }
>>> real_number_arithmetic_parser = PredicateParser(language=real_number_arithmetic_language,
... parse_replacement_dict=replacement_dict,
... infix_cts=['∧', '∨', '→', '↔'],
... infix_pred=['=', '<', '>'], infix_func=['+', '*', '**'])
>>> real_number_arithmetic_parser.parse("0.5 + 0.5 = 1")
['=', ('+', '0.5', '0.5'), '1']
>>> f = real_number_arithmetic_parser.parse("1 + 1 = 2 or exists x (x + 1 = 2)")
>>> f
['∨', ['=', ('+', '1', '1'), '2'], ['∃', 'x', ['=', ('+', 'x', '1'), '2']]]
>>> type(f)
<class 'logics.classes.predicate.formula.PredicateFormula'>
>>> real_number_arithmetic_parser.unparse(f)
'1 + 1 = 2 ∨ ∃x (x + 1 = 2)'
>>> # Infix predicates and function symbols cannot be given in prefix notation
>>> real_number_arithmetic_parser.parse("=(+(1,1),2)")
Traceback (most recent call last):
...
IndexError: string index out of range
Examples with a predefined parser for a language with prefix predicates and function symbols (see below for
more predefined instances):
>>> from logics.utils.parsers.predicate_parser import classical_predicate_parser
>>> classical_predicate_parser.parse("R(a, b) or P(f(a))")
['∨', ['R', 'a', 'b'], ['P', ('f', 'a')]]
>>> classical_predicate_parser.parse("forall x in f(a) (if ~P(x) then P(x))")
['∀', 'x', '∈', ('f', 'a'), ['→', ['~', ['P', 'x']], ['P', 'x']]]
"""
def __init__(self, language, parse_replacement_dict, unparse_replacement_dict=None, infix_cts=None, infix_pred=None,
infix_func=None, comma_separator=',', inference_separator='/', derivation_step_separator=';'):
if infix_pred is None:
infix_pred = list()
if infix_func is None:
infix_func = list()
self.infix_pred = infix_pred
self.infix_func = infix_func
super().__init__(language=language, parse_replacement_dict=parse_replacement_dict,
unparse_replacement_dict=unparse_replacement_dict,
infix_cts=infix_cts, comma_separator=comma_separator, inference_separator=inference_separator,
derivation_step_separator=derivation_step_separator)
# ------------------------------------------------------------------------------------------------------------------
# PARSE FORMULA METHODS
def _is_atomic(self, string):
"""To identify if a string as an atomic formula, check that it does not contain constants and quantifiers"""
for quant in self.language.quantifiers:
if quant in string:
return False
for ctt in self.language.constants():
if ctt in string:
return False
return True
def _parse_atomic(self, string):
# First check if it is a sentential constant
if self.language.is_sentential_constant_string(string):
return PredicateFormula([string])
# Check for an infix predicate
# There can only be one, so this will suffice, no need to call parser_utils.get_main_constant
infix_predicate = False
for pred in self.infix_pred:
if pred in string:
infix_predicate = True
pred_index = string.index(pred)
break
if infix_predicate:
# Infix predicate formulae are always binary
return PredicateFormula([pred, self.parse_term(string[:pred_index], replace=False),
self.parse_term(string[pred_index+len(pred):], replace=False)])
# Non-infix predicate
for pred in self.language.predicates() | set(self.language.predicate_variables):
if string[:len(pred) + 1] == pred + '(':
arity = self.language.arity(pred)
unparsed_terms = parser_utils.separate_arguments(string[len(pred):], ',')
if len(unparsed_terms) != arity:
raise NotWellFormed(f'Incorrect arity for predicate {pred} in atomic {string}')
parsed_arguments = [self.parse_term(term, replace=False) for term in unparsed_terms]
return PredicateFormula([pred] + parsed_arguments)
# If you did not return thus far, string is not a wff
raise NotWellFormed(f'String {string} is not a valid atomic formula')
def parse_term(self, string, replace=True):
"""Parses an individual term
If `replace` is ``True``, will apply the `parse_replacement_dict` to the string before parsing the term.
Otherwise, it will not.
Examples
--------
>>> from logics.utils.parsers.predicate_parser import realnumber_arithmetic_parser
>>> realnumber_arithmetic_parser.parse_term("1+1")
('+', '1', '1')
>>> realnumber_arithmetic_parser.parse_term("1+(1+2)")
('+', '1', ('+', '1', '2'))
>>> realnumber_arithmetic_parser.parse_term("(1+(1+2))")
('+', '1', ('+', '1', '2'))
"""
# If a valid individual variable or constant, return it as it came
if replace:
string = self._prepare_to_parse(string)
if self.language._is_valid_individual_constant_or_variable(string):
return string
# Search for an infix operator
# First try adding external parentheses (in order to avoid giving external ones)
infix_term = self._parse_infix_term(f'({string})')
if infix_term is not None:
return infix_term
# Then without adding external parentheses
infix_term = self._parse_infix_term(string)
if infix_term is not None:
return infix_term
# If it did not find infix operators, must be a prefix one
for func_symbol in self.language.function_symbols:
if string[:len(func_symbol) + 1] == func_symbol + '(':
arity = self.language.arity(func_symbol)
unparsed_arguments = parser_utils.separate_arguments(string[len(func_symbol):], ',')
if len(unparsed_arguments) != arity:
raise NotWellFormed(f'Incorrect arity for function symbol {func_symbol} in term {string}')
parsed_arguments = tuple(self.parse_term(term, replace=False) for term in unparsed_arguments)
return (func_symbol,) + parsed_arguments
# If you did not return thus far, string is not a term
raise NotWellFormed(f'String {string} is not a valid term')
def _parse_infix_term(self, string):
# If not between parentheses, its something of the form 's(0+0)' and not '(0+0)'
if string[0] != '(' or string[-1] != ')':
return None
infix_function, index = parser_utils.get_main_constant(string, self.infix_func)
if infix_function is not None:
return (infix_function, self.parse_term(string[1:index], replace=False),
self.parse_term(string[index + len(infix_function):-1], replace=False))
return None
def _parse_molecular(self, string, Formula=PredicateFormula):
"""Here we need only add the quantifier case and call super"""
for quantifier in self.language.quantifiers:
# The string begins with the quantifier
if string[:len(quantifier)] == quantifier:
current_index = len(quantifier) # The current index is the position after the quantifier
# Get the variable
variable = None
for char_index in range(current_index, len(string)):
if self.language._is_valid_variable(string[len(quantifier):char_index+1]):
variable = string[len(quantifier):char_index+1]
current_index = char_index + 1 # The current index is the position after the variable
else:
break
if variable is None:
raise NotWellFormed(f'Incorrect variable specification in quantified formula {string}')
# See if the quantifier is bounded and parse the bound
bounded = False
formula_opening_parenthesis_index = parser_utils.get_last_opening_parenthesis(string)
if formula_opening_parenthesis_index is None:
raise NotWellFormed(f'Quantified formula in {string} must come between parentheses')
if string[current_index] == '∈':
bounded = True
current_index += 1
unparsed_term = string[current_index:formula_opening_parenthesis_index]
parsed_term = self.parse_term(unparsed_term, replace=False)
# Lastly, parse the formula
unparsed_formula = string[formula_opening_parenthesis_index+1:-1]
parsed_formula = self.parse(unparsed_formula)
if not bounded:
return PredicateFormula([quantifier, variable, parsed_formula])
else:
return PredicateFormula([quantifier, variable, '∈', parsed_term, parsed_formula])
return super()._parse_molecular(string, PredicateFormula)
# ------------------------------------------------------------------------------------------------------------------
# UNPARSE FORMULA METHODS
def _unparse_term(self, term, add_parentheses=False):
# Atomic term
if not isinstance(term, tuple):
return term
# Molecular term (function symbol with arguments)
# Prefix function symbol
if term[0] not in self.infix_func:
unparsed_term = term[0] + '('
for arg in term[1:]:
unparsed_term += self._unparse_term(arg) + ', '
return unparsed_term[:-2] + ')'
# Infix (and thus binary) function symbol
else:
if not add_parentheses:
return f'{self._unparse_term(term[1], True)} {term[0]} {self._unparse_term(term[2], True)}'
else:
# Infix terms inside other infix terms must come between parentheses
return f'({self._unparse_term(term[1], True)} {term[0]} {self._unparse_term(term[2], True)})'
def _unparse_atomic(self, formula):
# Prefix predicate symbol
if formula[0] not in self.infix_pred:
unparsed_formula = formula[0] + '('
for arg in formula[1:]:
unparsed_formula += self._unparse_term(arg) + ', '
return unparsed_formula[:-2] + ')'
# Infix (and thus binary) predicate symbol
return f'{self._unparse_term(formula[1])} {formula[0]} {self._unparse_term(formula[2])}'
def _unparse_molecular(self, formula, remove_external_parentheses):
# Quantified formula
if formula.main_symbol in self.language.quantifiers:
# Bounded
if formula[2] == '∈':
return f'{formula[0]}{formula[1]} ∈ {self._unparse_term(formula[3])} ({self._unparse_formula(formula[4], remove_external_parentheses=True)})'
# Unbounded
return f'{formula[0]}{formula[1]} ({self._unparse_formula(formula[2], remove_external_parentheses=True)})'
# Non-quantified formula
return super()._unparse_molecular(formula, remove_external_parentheses)
# ----------------------------------------------------------------------------------------------------------------------
# Parser for arithmetic truth, does Godel coding of things inside Tr predicate
# For example, Tr(⌜x=x⌝) will be parsed as PredicateFormula(['Tr', '514951']).
class ArithmeticTruthParser(PredicateParser):
"""Parser for arithmetic truth
Subclasses PredicateParser, but does Godel coding of things inside Tr predicate
Parameters
----------
godel_encoding_function: callable
The function with which you wish to encode sentences inside Tr predicates
godel_decoding_function: callable
The function with which you wish to decode sentences inside Tr predicates
everything_else_in_PredicateParser
Everything else present in the parent PredicateParser class
Examples
--------
>>> from logics.instances.predicate.languages import arithmetic_truth_language
>>> from logics.utils.parsers.parser_utils import godel_encode, godel_decode
>>> from logics.utils.parsers.predicate_parser import ArithmeticTruthParser
>>> replacement_dict = {
... '¬': '~', 'not ': '~',
... '&': '∧', ' and ': '∧', # notice the spaces before and after 'and'
... 'v': '∨', ' or ': '∨',
... ' then ': '→', '-->': '→', 'if ': '', # 'if p then q' it will convert to 'p then q'
... ' iff ': '↔', '<->': '↔',
... 'forall ': '∀', 'exists ': '∃', ' in ': '∈'
... }
>>> replacement_dict.update({
... '⌜': 'quote(',
... '⌝': ')'
... })
>>> arithmetic_truth_parser = ArithmeticTruthParser(godel_encoding_function=godel_encode,
... godel_decoding_function=godel_decode,
... language=arithmetic_truth_language,
... parse_replacement_dict=replacement_dict,
... infix_cts=['∧', '∨', '→', '↔'],
... infix_pred=['=', '<', '>'], infix_func=['+', '*', '**'])
>>> arithmetic_truth_parser.parse('0=0+0')
['=', '0', ('+', '0', '0')]
>>> arithmetic_truth_parser.parse('Tr(⌜0=0+0⌝)')
['Tr', '04908990']
>>> arithmetic_truth_parser.parse('Tr(⌜Tr(⌜0=0⌝)⌝)')
['Tr', '4999919899999190490199199']
>>> arithmetic_truth_parser.parse('λ iff ~Tr(⌜λ⌝)')
['↔', ['λ'], ['~', ['Tr', '79999']]]
"""
def __init__(self, godel_encoding_function, godel_decoding_function, *args, **kwargs):
# These are two functions that take a string (an UNPARSED formula) and return another string (its code)
self.godel_encode = godel_encoding_function
self.godel_decode = godel_decoding_function
super().__init__(*args, **kwargs)
def _prepare_to_parse(self, string):
"""Replaces quote(sentence) for code_of_sentence"""
string = super()._prepare_to_parse(string)
string = self._remove_quotations(string)
return string
def _remove_quotations(self, string):
# Search for the first apparition of quote and encode the content
while 'quote(' in string:
opening_parenthesis_index = string.index('quote(') + 5 # index of the opening parenthesis
# Get where the closing parenthesis is
closing_parenthesis_index = parser_utils.get_closing_parenthesis(string[opening_parenthesis_index:]) \
+ opening_parenthesis_index
string_to_encode = string[opening_parenthesis_index+1:closing_parenthesis_index]
codified_string = self.godel_encode(string_to_encode)
string = string[:string.index('quote(')] + codified_string + string[closing_parenthesis_index+1:]
return string
def _parse_atomic(self, string):
"""Since codes are numerals like 514951 and not s(s(...)) we need to provide a special clause for the truth pred
otherwise Tr(514951) will raise NotWellFormed
"""
if string[:3] == 'Tr(':
arity = 1
unparsed_terms = parser_utils.separate_arguments(string[2:], ',')
if len(unparsed_terms) != arity:
raise NotWellFormed(f'Incorrect arity for predicate Tr in atomic {string}')
code = unparsed_terms[0]
try:
int(code)
except ValueError:
raise NotWellFormed(f'String {string} must have a numeral as the argument of Tr')
# Do not parse the term, just return the numeral
return PredicateFormula(['Tr', code])
return super()._parse_atomic(string)
# ----------------------------------------------------------------------------------------------------------------------
# INSTANCES
from logics.instances.predicate.languages import classical_function_language, \
arithmetic_language, real_number_arithmetic_language, arithmetic_truth_language
from logics.utils.parsers.standard_parser import classical_parse_replacement_dict
predicate_replacement_dict = deepcopy(classical_parse_replacement_dict)
predicate_replacement_dict.update({
' in ': '∈',
'forall ': '∀',
'exists ': '∃'
})
classical_predicate_parser = PredicateParser(language=classical_function_language,
parse_replacement_dict=predicate_replacement_dict,
infix_cts=['∧', '∨', '→', '↔'])
arithmetic_parser = PredicateParser(language=arithmetic_language,
parse_replacement_dict=predicate_replacement_dict,
infix_cts=['∧', '∨', '→', '↔'],
infix_pred=['=', '<', '>'], infix_func=['+', '*', '**'])
realnumber_arithmetic_parser = PredicateParser(language=real_number_arithmetic_language,
parse_replacement_dict=predicate_replacement_dict,
infix_cts=['∧', '∨', '→', '↔'],
infix_pred=['=', '<', '>'], infix_func=['+', '-', '*', '**', '/', '//'])
truth_predicate_replacement_dict = deepcopy(classical_parse_replacement_dict)
truth_predicate_replacement_dict.update({
'⌜': 'quote(',
'⌝': ')'
})
arithmetic_truth_parser = ArithmeticTruthParser(godel_encoding_function=parser_utils.godel_encode,
godel_decoding_function=parser_utils.godel_decode,
language=arithmetic_truth_language,
parse_replacement_dict=truth_predicate_replacement_dict,
infix_cts=['∧', '∨', '→', '↔'],
infix_pred=['=', '<', '>'], infix_func=['+', '*', '**'])
|
57639
|
import hashlib
import os
import pickle
from zoltpy.quantile_io import json_io_dict_from_quantile_csv_file
from zoltpy import util
from zoltpy.connection import ZoltarConnection
from zoltpy.covid19 import COVID_TARGETS, covid19_row_validator, validate_quantile_csv_file
import glob
import json
import sys
UPDATE = False
if len(sys.argv) >1:
if sys.argv[1].lower() == 'update':
print('Only updating')
UPDATE = True
# util function to get filename from the path
def get_filename_from_path(path):
print(path, path.split(os.path.sep)[-1])
return path.split(os.path.sep)[-1]
g_db = None
def get_db():
global g_db
if g_db is None:
g_db = json.load(open('code/zoltar_scripts/validated_file_db.json'))
return g_db
def dump_db():
global g_db
with open('code/zoltar_scripts/validated_file_db.json', 'w') as fw:
json.dump(g_db, fw, indent=4)
list_of_model_directories = os.listdir('./data-processed/')
for directory in list_of_model_directories:
if "." in directory:
continue
# Get all forecasts in the directory of this model
path = './data-processed/'+directory+'/'
forecasts = glob.glob(path + "*.csv")
for forecast in forecasts:
with open(forecast, "rb") as f:
# Get the current hash of a processed file
checksum = hashlib.md5(f.read()).hexdigest()
db = get_db()
# Validate covid19 file
if UPDATE and db.get(get_filename_from_path(forecast), None) == checksum:
continue
errors_from_validation = validate_quantile_csv_file(forecast)
# Upload forecast
if "no errors" == errors_from_validation:
# Check this hash against the previous version of hash
if db.get(get_filename_from_path(forecast), None) != checksum:
db[get_filename_from_path(forecast)] = checksum
else:
print(errors_from_validation)
print('Dumping db')
dump_db()
|
57653
|
import MetaTrader5 as _mt5
from collections import namedtuple
from typing import Callable
from typing import Iterable
from typing import Tuple
from typing import Union
from typing import Any
from typing import Optional
from typing import Type
# custom namedtuples
CopyRate = namedtuple("CopyRate", "time, open, high, low, close, tick_volume, spread, real_volume")
CopyTick = namedtuple("CopyTick", "time, bid, ask, last, volume, time_msc, flags, volume_real")
# MT5 namedtuple objects for typing
Tick = _mt5.Tick
AccountInfo = _mt5.AccountInfo
SymbolInfo = _mt5.SymbolInfo
TerminalInfo = _mt5.TerminalInfo
OrderCheckResult = _mt5.OrderCheckResult
OrderSendResult = _mt5.OrderSendResult
TradeOrder = _mt5.TradeOrder
TradeDeal = _mt5.TradeDeal
TradeRequest = _mt5.TradeRequest
TradePosition = _mt5.TradePosition
|
57667
|
import torch
from torch.nn.functional import leaky_relu
from rational.torch import Rational
import numpy as np
t = torch.tensor([-2., -1, 0., 1., 2.])
expected_res = np.array(leaky_relu(t))
inp = torch.from_numpy(np.array(t)).reshape(-1)
cuda_inp = torch.tensor(np.array(t), dtype=torch.float, device="cuda").reshape(-1)
rationalA_lrelu_gpu = Rational(version='A', cuda=True)(cuda_inp).clone().detach().cpu().numpy()
rationalB_lrelu_gpu = Rational(version='B', cuda=True)(cuda_inp).clone().detach().cpu().numpy()
rationalC_lrelu_gpu = Rational(version='C', cuda=True)(cuda_inp).clone().detach().cpu().numpy()
rationalD_lrelu_gpu = Rational(version='D', cuda=True, trainable=False)(cuda_inp).clone().detach().cpu().numpy()
# Tests on GPU
def test_rationalA_gpu_lrelu():
assert np.all(np.isclose(rationalA_lrelu_gpu, expected_res, atol=5e-02))
def test_rationalB_gpu_lrelu():
assert np.all(np.isclose(rationalB_lrelu_gpu, expected_res, atol=5e-02))
def test_rationalC_gpu_lrelu():
assert np.all(np.isclose(rationalC_lrelu_gpu, expected_res, atol=5e-02))
def test_rationalD_gpu_lrelu():
assert np.all(np.isclose(rationalD_lrelu_gpu, expected_res, atol=5e-02))
|
57676
|
import random as random_lib
import copy
from opsbro.evaluater import export_evaluater_function
FUNCTION_GROUP = 'random'
@export_evaluater_function(function_group=FUNCTION_GROUP)
def random():
"""**random()** -> Returns a random float between 0 and 1
<code>
Example:
random()
Returns:
0.6988342144113194
</code>
"""
return random_lib.random()
@export_evaluater_function(function_group=FUNCTION_GROUP)
def randomint_between(int_start, int_end):
"""**randomint_between(int_start, int_end)** -> Returns a random int between the start and the end
<code>
Example:
randomint_between(1, 100)
Returns:
69
</code>
"""
return random_lib.randint(int_start, int_end)
@export_evaluater_function(function_group=FUNCTION_GROUP)
def shuffle(list):
"""**shuffle(list)** -> Return a copy of the list suffle randomly
<code>
Example:
suffle([ 1, 2, 3, 4 ])
Returns:
[ 3, 1, 4, 2 ]
</code>
"""
# NOTE random.shuffle is in place
n_list = copy.copy(list)
random_lib.shuffle(n_list)
return n_list
|
57681
|
import locale
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
import tornado.httpserver
import tornado.ioloop
import tornado.web
import os
import tornado.options
import json
import ipaddress
import functools
import subprocess
import user_agents
from collections import namedtuple
import models
import dispatch
import endpoints
import api_endpoints
import enums
import starlight
import analytics
import webutil
from starlight import private_data_path
def early_init():
os.chdir(os.path.dirname(os.path.realpath(__file__)))
if not os.environ.get("DISABLE_HTTPS_ENFORCEMENT", "") and not os.environ.get("DEV", ""):
# production mode: force https usage due to local storage issues
# also we don't want the NSA knowing you play chinese cartoon games
def _swizzle_RequestHandler_prepare(self):
if self.request.protocol != "https":
self.redirect(
"https://{0}{1}".format(self.request.host, self.request.uri))
tornado.web.RequestHandler.prepare = _swizzle_RequestHandler_prepare
if os.environ.get("BEHIND_CLOUDFLARE") == "1":
cloudflare_ranges = []
with open("cloudflare.txt", "r") as cf:
for line in cf:
cloudflare_ranges.append(ipaddress.ip_network(line.strip()))
_super_RequestHandler_prepare2 = tornado.web.RequestHandler.prepare
def _swizzle_RequestHandler_prepare2(self):
for net in cloudflare_ranges:
if ipaddress.ip_address(self.request.remote_ip) in net:
if "CF-Connecting-IP" in self.request.headers:
self.request.remote_ip = self.request.headers[
"CF-Connecting-IP"]
break
_super_RequestHandler_prepare2(self)
tornado.web.RequestHandler.prepare = _swizzle_RequestHandler_prepare2
_super_RequestHandler_prepare3 = tornado.web.RequestHandler.prepare
def _swizzle_RequestHandler_prepare3(self):
self.request.is_low_bandwidth = 0
if "User-Agent" in self.request.headers:
ua = user_agents.parse(self.request.headers["User-Agent"])
if ua.is_mobile or ua.is_tablet:
self.request.is_low_bandwidth = 1
_super_RequestHandler_prepare3(self)
tornado.web.RequestHandler.prepare = _swizzle_RequestHandler_prepare3
def main():
starlight.init()
early_init()
in_dev_mode = os.environ.get("DEV")
image_server = os.environ.get("IMAGE_HOST", "")
tornado.options.parse_command_line()
application = tornado.web.Application(dispatch.ROUTES,
template_path="webui",
static_path="static",
image_host=image_server,
debug=in_dev_mode,
is_dev=in_dev_mode,
tle=models.TranslationEngine(starlight),
enums=enums,
starlight=starlight,
tlable=webutil.tlable,
webutil=webutil,
analytics=analytics.Analytics(),
# Change every etag when the server restarts, in case we change what the output looks like.
instance_random=os.urandom(8))
http_server = tornado.httpserver.HTTPServer(application, xheaders=1)
addr = os.environ.get("ADDRESS", "0.0.0.0")
port = int(os.environ.get("PORT", 5000))
http_server.listen(port, addr)
print("Current APP_VER:", os.environ.get("VC_APP_VER",
"1.9.1 (warning: Truth updates will fail in the future if an accurate VC_APP_VER "
"is not set. Export VC_APP_VER to suppress this warning.)"))
print("Ready.")
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
|
57704
|
FULL_ACCESS_GMAIL_SCOPE = "https://mail.google.com/"
LABELS_GMAIL_SCOPE = "https://www.googleapis.com/auth/gmail.labels"
SEND_GMAIL_SCOPE = "https://www.googleapis.com/auth/gmail.send"
READ_ONLY_GMAIL_SCOPE = "https://www.googleapis.com/auth/gmail.readonly"
COMPOSE_GMAIL_SCOPE = "https://www.googleapis.com/auth/gmail.compose"
INSERT_GMAIL_SCOPE = "https://www.googleapis.com/auth/gmail.insert"
MODIFY_GMAIL_SCOPE = "https://www.googleapis.com/auth/gmail.modify"
METADATA_GMAIL_SCOPE = "https://www.googleapis.com/auth/gmail.metadata"
SETTINGS_BASIC_GMAIL_SCOPE = "https://www.googleapis.com/auth/gmail.settings.basic"
SETTINGS_SHARING_GMAIL_SCOPE = "https://www.googleapis.com/auth/gmail.settings.sharing"
|
57736
|
import click
import os
import yaml
from panoptes_client import Panoptes
@click.version_option(prog_name='Panoptes CLI')
@click.group()
@click.option(
'--endpoint',
'-e',
help="Overides the default API endpoint",
type=str,
)
@click.option(
'--admin',
'-a',
help=(
"Enables admin mode. Ignored if you're not logged in as an "
"administrator."
),
is_flag=True,
)
@click.pass_context
def cli(ctx, endpoint, admin):
ctx.config_dir = os.path.expanduser('~/.panoptes/')
ctx.config_file = os.path.join(ctx.config_dir, 'config.yml')
ctx.config = {
'endpoint': 'https://www.zooniverse.org',
'username': '',
'password': '',
}
try:
with open(ctx.config_file) as conf_f:
ctx.config.update(yaml.full_load(conf_f))
except IOError:
pass
if endpoint:
ctx.config['endpoint'] = endpoint
if ctx.invoked_subcommand != 'configure':
Panoptes.connect(
endpoint=ctx.config['endpoint'],
username=ctx.config['username'],
password=ctx.config['password'],
admin=admin,
)
from panoptes_cli.commands.configure import *
from panoptes_cli.commands.info import *
from panoptes_cli.commands.project import *
from panoptes_cli.commands.subject import *
from panoptes_cli.commands.subject_set import *
from panoptes_cli.commands.user import *
from panoptes_cli.commands.workflow import *
|
57765
|
from torchvision import models
import numpy as np
import torch
import os
from moviepy.editor import VideoFileClip
SKIP_FRAME_RATE = 10
MINIMAX_FRAME = 4
# 함수에서 documentaiton 읽기
model = models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
model.eval()
os.environ['KMP_DUPLICATE_LIB_OK']='True'
def extract_boxes(reference_clip, compare_clip):
clips = [reference_clip, compare_clip]
clips_frame_info = []
for clip in clips:
i = 0
every_frame_info = []
# loop over the frames from the video stream
while True:
i+=SKIP_FRAME_RATE # 1초에 60 fps가 있으므로 몇개는 skip해도 될거 같음!
if (i*1.0/clip.fps)> clip.duration:
break
frame = clip.get_frame(i*1.0/clip.fps)
frame = frame/255 # image, and should be in ``0-1`` range.
frame = np.transpose(frame, (2,0,1)) # HWC -> CHW(그 위치에 몇차원 애를 넣을거냔?)
x = [torch.from_numpy(frame).float()]
# label list https://github.com/tensorflow/models/blob/master/research/object_detection/data/mscoco_label_map.pbtxt
predictions = model(x)
prediction= predictions[0]
each_box_list = zip(prediction['boxes'].tolist(), prediction['labels'].tolist(), prediction['scores'].tolist())
# 0.95 정도 올려야 까맣게 보이는 관중이 없어짐!
filtered_box_list = filter(lambda x: x[1]==1 and x[2] >= 0.95, each_box_list)
filtered_center_dot_list = list(map(lambda x: [(x[0][0]+x[0][2])/2, (x[0][1]+x[0][3])/2], filtered_box_list))
# x좌표로 정렬하기(대형이 가로로 늘어져 있다고 가정하고 순서대로 정렬)
sorted_dot_list = sorted(filtered_center_dot_list, key = lambda x: x[0])
every_frame_info.append(sorted_dot_list) # 프레임별 정보
clips_frame_info.append(np.array(every_frame_info)) # 각 영상별로 붙이기
return clips_frame_info
def calculate_pose_distance(reference_clip, compare_clip):
clips_frame_info = extract_boxes(reference_clip, compare_clip) # 모든 프레임마다 길이 계산해줌
min_size = min(len(clips_frame_info[0]),len(clips_frame_info[1]))
dist_arr = list()
# Calculate distance (by frame)
for i in range(min_size):
if len(clips_frame_info[0][i])>0 and len(clips_frame_info[1][i])>0: # 둘다 있으면
# x축 값이 가장 가까운걸로 찾고 그거랑 비교(어차피 대형이 중요한거니까)
ref_frame_dots = clips_frame_info[0][i] # 해당 frame의 정보
compare_frame_dots = clips_frame_info[1][i] # 해당 frame의 정보
min_dot_num = min(len(ref_frame_dots), len(compare_frame_dots)) # reference 기준으로 계산할거양
penalty = ((reference_clip.w **2 + reference_clip.h**2)**0.5) * abs(len(ref_frame_dots)-len(compare_frame_dots)) # 개수가 다를때 주는 패널티
total_diff = penalty
for dot_idx in range(min_dot_num):
ref_frame_dots[dot_idx] and compare_frame_dots[dot_idx]
total_diff += ((ref_frame_dots[dot_idx][0] - compare_frame_dots[dot_idx][0])**2 + (ref_frame_dots[dot_idx][1] - compare_frame_dots[dot_idx][1])**2)**0.5
dist_arr.append(total_diff)
else:
dist_arr.append(None)
# Minimize max distance in (minimax_frames) frames
min_diff = np.float('Inf')
min_idx = 0
max_dist = []
for i in range(min_size-(MINIMAX_FRAME-1)):
if None in dist_arr[i:i+MINIMAX_FRAME]:
max_dist.append(None)
else:
tmp_max = np.max(dist_arr[i:i+MINIMAX_FRAME])
max_dist.append(tmp_max)
if min_diff > tmp_max:
min_diff = tmp_max
min_idx = i
# return distance, second, additional_info
return min_diff, (min_idx*SKIP_FRAME_RATE)/reference_clip.fps, {}
|
57775
|
from common.BaseCommand import BaseCommand
from common.ResultAndData import *
from models.CalEvent import CalEvent
import argparse
from argparse import Namespace
from msgraph import helpers
from tabulate import tabulate
import datetime
import os
class WeekCommand(BaseCommand):
def add_parser(self, subparsers):
list_cmd = subparsers.add_parser(
"week", description="Gets your week at a glance"
)
return list_cmd
def do_command_with_args(self, instance, args):
# type: (Instance, Namespace) -> ResultAndData
db = instance.get_db()
instance.login_to_graph()
rd = instance.get_current_user()
if not rd.success:
return Error("no logged in user")
current_user = rd.data
graph = instance.get_graph_session()
today = datetime.date.today()
start = today - datetime.timedelta(days=today.weekday())
end = start + datetime.timedelta(days=6)
startdt = datetime.datetime.combine(start, datetime.datetime.min.time())
enddt = datetime.datetime.combine(end, datetime.datetime.max.time())
blobs = helpers.list_events_in_time_range(graph, start=startdt, end=enddt)
events = []
for blob in blobs["value"]:
e = CalEvent.from_json(blob)
events.append(e)
table = []
for e in events:
row = [
e.subject,
e.start.strftime("%c"),
e.end.strftime("%c"),
e.location,
e.organizer,
]
table.append(row)
print(
tabulate(
table,
headers=["Title", "Start Time", "End Time", "Location", "Created By"],
)
)
return Success()
|
57778
|
from fastapi.routing import APIRouter
from lnbits.db import Database
db = Database("database")
core_app: APIRouter = APIRouter()
from .views.api import * # noqa
from .views.generic import * # noqa
from .views.public_api import * # noqa
|
57832
|
from __future__ import division, absolute_import, print_function
from .prototype import *
from .repeating import *
|
57838
|
from ..tweet_sentiment_classifier import Classifier, tokenizer_filter
import pickle as pkl
import numpy as np
import json
import os
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
class BoW_Model(Classifier):
def __init__(self, vocab_size=100000, max_iter=10000, validation_split=0.2, accuracy=0, bootstrap=1,
remove_stopwords=True, remove_punctuation=True, lemmatize=True, **kwargs):
"""
Constructor for BoW_Model
Be sure to add additional parameters to export()
:param vocab_size: (int) Maximum vocabulary size. Default 1E6
:param max_iter: (int) Maximum number of fit iterations
:param remove_punctuation: (Bool) Remove punctuation. Recommended.
:param remove_stopwords: (Bool) Remove stopwords. Recommended.
:param lemmatize: (Bool) Lemmatize words. Recommended.
"""
self.package = 'twitter_nlp_toolkit.tweet_sentiment_classifier.models.bow_models'
self.type = 'BoW_Model'
self.vectorizer = None
self.classifier = None
self.vocab_size = vocab_size
self.max_iter = max_iter
self.validation_split = validation_split
self.accuracy = accuracy
self.bootstrap = bootstrap
self.remove_punctuation = remove_punctuation
self.remove_stopwords = remove_stopwords
self.lemmatize = lemmatize
def fit(self, train_data, y, weights=None, custom_vocabulary=None):
"""
Fit the model (from scratch)
:param train_data: (List-like) List of strings to train on
:param y: (vector) Targets
:param weights: (vector) Training weights. Optional
:param custom_vocabulary: (List of Strings) Custom vocabulary. Not recommended
"""
if weights is not None:
try:
y = np.hstack(y, weights)
except:
print('Weights not accepted')
if 1 < self.bootstrap < len(y):
train_data, y = resample(train_data, y, n_samples=self.bootstrap, stratify=y, replace=False)
elif self.bootstrap < 1:
n_samples = int(self.bootstrap * len(y))
train_data, y = resample(train_data, y, n_samples=n_samples, stratify=y, replace=False)
filtered_data = tokenizer_filter(train_data, remove_punctuation=self.remove_punctuation,
remove_stopwords=self.remove_stopwords, lemmatize=self.lemmatize)
self.vectorizer = TfidfVectorizer(analyzer=str.split, max_features=self.vocab_size)
cleaned_data = [' '.join(tweet) for tweet in filtered_data]
X = self.vectorizer.fit_transform(cleaned_data)
trainX, testX, trainY, testY = train_test_split(X, y, test_size=self.validation_split, stratify=y)
print('Fitting BoW model')
self.classifier = LogisticRegression(max_iter=self.max_iter).fit(trainX, trainY)
self.accuracy = accuracy_score(testY, self.classifier.predict(testX))
def refine(self, train_data, y, bootstrap=True, weights=None, max_iter=500, preprocess=True):
"""
Train the models further on new data. Note that it is not possible to increase the vocabulary
:param train_data: (List-like of Strings) List of strings to train on
:param y: (vector) Targets
:param max_iter: (int) Maximum number of fit iterations. Default: 500
"""
if weights is not None:
try:
y = np.hstack(y, weights)
except:
print('Weights not accepted')
if bootstrap and 1 < self.bootstrap < len(y):
train_data, y = resample(train_data, y, n_samples=self.bootstrap, stratify=y, replace=False)
elif bootstrap and self.bootstrap < 1:
n_samples = int(self.bootstrap * len(y))
train_data, y = resample(train_data, y, n_samples=n_samples, stratify=y, replace=False)
if preprocess:
filtered_data = tokenizer_filter(train_data, remove_punctuation=self.remove_punctuation,
remove_stopwords=self.remove_stopwords, lemmatize=self.lemmatize)
print('\n Filtered data')
else:
filtered_data = train_data
cleaned_data = [' '.join(tweet) for tweet in filtered_data]
X = self.vectorizer.transform(cleaned_data)
self.classifier = LogisticRegression(random_state=0, max_iter=max_iter).fit(X, y)
self.classifier.fit(X, y)
def predict(self, data, **kwargs):
"""
Predict the binary sentiment of a list of tweets
:param data: (list of Strings) Input tweets
:param kwargs: Keywords for predict_proba
:return: (list of bool) Predictions
"""
return np.round(self.predict_proba(data, **kwargs))
def predict_proba(self, data):
"""
Makes predictions
:param data: (List-like) List of strings to predict sentiment
:return: (vector) Un-binarized Predictions
"""
if self.classifier is None:
raise ValueError('Model has not been trained!')
filtered_data = tokenizer_filter(data, remove_punctuation=self.remove_punctuation,
remove_stopwords=self.remove_stopwords, lemmatize=self.lemmatize,
verbose=False)
cleaned_data = [' '.join(tweet) for tweet in filtered_data]
X = self.vectorizer.transform(cleaned_data)
return self.classifier.predict(X)
def export(self, filename):
"""
Saves the model to disk
:param filename: (String) Path to file
"""
parameters = {'Classifier': self.type,
'package': self.package,
'vocab_size': int(self.vocab_size),
'max_iter': int(self.max_iter),
'validation_split': float(self.validation_split),
'accuracy': float(self.accuracy),
'remove_punctuation': self.remove_punctuation,
'remove_stopwords': self.remove_stopwords,
'lemmatize': self.lemmatize,
'bootstrap': self.bootstrap
}
if parameters['bootstrap'] < 1:
parameters['bootstrap'] = float(parameters['bootstrap'])
else:
parameters['bootstrap'] = int(parameters['bootstrap'])
os.makedirs(filename, exist_ok=True)
with open(filename + '/param.json', 'w+') as outfile:
json.dump(parameters, outfile)
with open(filename + '/bow_vectorizer.pkl', 'wb+') as outfile:
pkl.dump(self.vectorizer, outfile)
with open(filename + '/bow_classifier.pkl', 'wb+') as outfile:
pkl.dump(self.classifier, outfile)
def load_model(self, filename):
"""
# TODO revise to properly close pkl files
:param filename: (String) Path to file
"""
self.vectorizer = pkl.load(open(filename + '/bow_vectorizer.pkl', 'rb'))
self.classifier = pkl.load(open(filename + '/bow_classifier.pkl', 'rb'))
|
57857
|
import numpy as np
arr = np.array([[2, 5], [1, 3]])
arr_inv = np.linalg.inv(arr)
print(arr_inv)
# [[ 3. -5.]
# [-1. 2.]]
mat = np.matrix([[2, 5], [1, 3]])
mat_inv = np.linalg.inv(mat)
print(mat_inv)
# [[ 3. -5.]
# [-1. 2.]]
mat_inv = mat**-1
print(mat_inv)
# [[ 3. -5.]
# [-1. 2.]]
mat_inv = mat.I
print(mat_inv)
# [[ 3. -5.]
# [-1. 2.]]
result = mat * mat.I
print(result)
# [[1. 0.]
# [0. 1.]]
# print(arr.I)
# AttributeError: 'numpy.ndarray' object has no attribute 'I'
arr_s = np.array([[0, 0], [1, 3]])
# print(np.linalg.inv(arr_s))
# LinAlgError: Singular matrix
arr_pinv = np.linalg.pinv(arr_s)
print(arr_pinv)
# [[0. 0.1]
# [0. 0.3]]
print(arr_s @ arr_inv)
# [[0. 0.]
# [0. 1.]]
print(np.linalg.pinv(arr_pinv))
# [[0. 0.]
# [1. 3.]]
print(np.linalg.inv(arr))
# [[ 3. -5.]
# [-1. 2.]]
print(np.linalg.pinv(arr))
# [[ 3. -5.]
# [-1. 2.]]
mat_s = np.mat([[0, 0], [1, 3]])
# print(np.linalg.inv(mat_s))
# LinAlgError: Singular matrix
# print(mat_s**-1)
# LinAlgError: Singular matrix
# print(mat_s.I)
# LinAlgError: Singular matrix
print(np.linalg.pinv(mat_s))
# [[0. 0.1]
# [0. 0.3]]
|
57868
|
import os
import time
import subprocess
import pyblish.api
class MyAction(pyblish.api.Action):
label = "My Action"
on = "processed"
def process(self, context, plugin):
self.log.info("Running!")
class MyOtherAction(pyblish.api.Action):
label = "My Other Action"
def process(self, context, plugin):
self.log.info("Running!")
class CollectComment(pyblish.api.ContextPlugin):
"""This collector has a very long comment.
The idea is that this comment should either be elided, or word-
wrapped in the corresponding view.
"""
order = pyblish.api.CollectorOrder
def process(self, context):
context.data["comment"] = ""
class MyCollector(pyblish.api.ContextPlugin):
label = "My Collector"
order = pyblish.api.CollectorOrder
def process(self, context):
context.create_instance("MyInstance 1", families=["myFamily"])
context.create_instance("MyInstance 2", families=["myFamily 2"])
context.create_instance(
"MyInstance 3",
families=["myFamily 2"],
publish=False
)
class MyValidator(pyblish.api.InstancePlugin):
order = pyblish.api.ValidatorOrder
active = False
label = "My Validator"
actions = [MyAction,
MyOtherAction]
def process(self, instance):
self.log.info("Validating: %s" % instance)
class MyExtractor(pyblish.api.InstancePlugin):
order = pyblish.api.ExtractorOrder
families = ["myFamily"]
label = "My Extractor"
def process(self, instance):
self.log.info("Extracting: %s" % instance)
class CollectRenamed(pyblish.api.Collector):
def process(self, context):
i = context.create_instance("MyInstanceXYZ", family="MyFamily")
i.set_data("name", "My instance")
class CollectNegatron(pyblish.api.Collector):
"""Negative collector adds Negatron"""
order = pyblish.api.Collector.order - 0.49
def process_context(self, context):
self.log.info("Collecting Negatron")
context.create_instance("Negatron", family="MyFamily")
class CollectPositron(pyblish.api.Collector):
"""Positive collector adds Positron"""
order = pyblish.api.Collector.order + 0.49
def process_context(self, context):
self.log.info("Collecting Positron")
context.create_instance("Positron", family="MyFamily")
class SelectInstances(pyblish.api.Selector):
"""Select debugging instances
These instances are part of the evil plan to destroy the world.
Be weary, be vigilant, be sexy.
"""
def process_context(self, context):
self.log.info("Selecting instances..")
for instance in instances[:-1]:
name, data = instance["name"], instance["data"]
self.log.info("Selecting: %s" % name)
instance = context.create_instance(name)
for key, value in data.items():
instance.set_data(key, value)
class SelectDiInstances(pyblish.api.Selector):
"""Select DI instances"""
name = "Select Dependency Instances"
def process(self, context):
name, data = instances[-1]["name"], instances[-1]["data"]
self.log.info("Selecting: %s" % name)
instance = context.create_instance(name)
for key, value in data.items():
instance.set_data(key, value)
class SelectInstancesFailure(pyblish.api.Selector):
"""Select some instances, but fail before adding anything to the context.
That's right. I'm programmed to fail. Try me.
"""
__fail__ = True
def process_context(self, context):
self.log.warning("I'm about to fail")
raise AssertionError("I was programmed to fail")
class SelectInstances2(pyblish.api.Selector):
def process(self, context):
self.log.warning("I'm good")
class ValidateNamespace(pyblish.api.Validator):
"""Namespaces must be orange
In case a namespace is not orange, report immediately to
your officer in charge, ask for a refund, do a backflip.
This has been an example of:
- A long doc-string
- With a list
- And plenty of newlines and tabs.
"""
families = ["B"]
def process(self, instance):
self.log.info("Validating the namespace of %s" % instance.data("name"))
self.log.info("""And here's another message, quite long, in fact it's
too long to be displayed in a single row of text.
But that's how we roll down here. It's got \nnew lines\nas well.
- And lists
- And more lists
""")
class ValidateContext(pyblish.api.Validator):
families = ["A", "B"]
def process_context(self, context):
self.log.info("Processing context..")
class ValidateContextFailure(pyblish.api.Validator):
optional = True
families = ["C"]
__fail__ = True
def process_context(self, context):
self.log.info("About to fail..")
raise AssertionError("""I was programmed to fail
The reason I failed was because the sun was not aligned with the tides,
and the moon is gray; not yellow. Try again when the moon is yellow.""")
class Validator1(pyblish.api.Validator):
"""Test of the order attribute"""
order = pyblish.api.Validator.order + 0.1
families = ["A"]
def process_instance(self, instance):
pass
class Validator2(pyblish.api.Validator):
order = pyblish.api.Validator.order + 0.2
families = ["B"]
def process_instance(self, instance):
pass
class Validator3(pyblish.api.Validator):
order = pyblish.api.Validator.order + 0.3
families = ["B"]
def process_instance(self, instance):
pass
class ValidateFailureMock(pyblish.api.Validator):
"""Plug-in that always fails"""
optional = True
order = pyblish.api.Validator.order + 0.1
families = ["C"]
__fail__ = True
def process_instance(self, instance):
self.log.debug("e = mc^2")
self.log.info("About to fail..")
self.log.warning("Failing.. soooon..")
self.log.critical("Ok, you're done.")
raise AssertionError("""ValidateFailureMock was destined to fail..
Here's some extended information about what went wrong.
It has quite the long string associated with it, including
a few newlines and a list.
- Item 1
- Item 2
""")
class ValidateIsIncompatible(pyblish.api.Validator):
"""This plug-in should never appear.."""
requires = False # This is invalid
class ValidateWithRepair(pyblish.api.Validator):
"""A validator with repair functionality"""
optional = True
families = ["C"]
__fail__ = True
def process_instance(self, instance):
raise AssertionError(
"%s is invalid, try repairing it!" % instance.name
)
def repair_instance(self, instance):
self.log.info("Attempting to repair..")
self.log.info("Success!")
class ValidateWithRepairFailure(pyblish.api.Validator):
"""A validator with repair functionality that fails"""
optional = True
families = ["C"]
__fail__ = True
def process_instance(self, instance):
raise AssertionError(
"%s is invalid, try repairing it!" % instance.name
)
def repair_instance(self, instance):
self.log.info("Attempting to repair..")
raise AssertionError("Could not repair due to X")
class ValidateWithVeryVeryVeryLongLongNaaaaame(pyblish.api.Validator):
"""A validator with repair functionality that fails"""
families = ["A"]
class ValidateWithRepairContext(pyblish.api.Validator):
"""A validator with repair functionality that fails"""
optional = True
families = ["C"]
__fail__ = True
def process_context(self, context):
raise AssertionError("Could not validate context, try repairing it")
def repair_context(self, context):
self.log.info("Attempting to repair..")
raise AssertionError("Could not repair")
class ExtractAsMa(pyblish.api.Extractor):
"""Extract contents of each instance into .ma
Serialise scene using Maya's own facilities and then put
it on the hard-disk. Once complete, this plug-in relies
on a Conformer to put it in it's final location, as this
extractor merely positions it in the users local temp-
directory.
"""
optional = True
__expected__ = {
"logCount": ">=4"
}
def process_instance(self, instance):
self.log.info("About to extract scene to .ma..")
self.log.info("Extraction went well, now verifying the data..")
if instance.name == "Richard05":
self.log.warning("You're almost running out of disk space!")
self.log.info("About to finish up")
self.log.info("Finished successfully")
class ConformAsset(pyblish.api.Conformer):
"""Conform the world
Step 1: Conform all humans and Step 2: Conform all non-humans.
Once conforming has completed, rinse and repeat.
"""
optional = True
def process_instance(self, instance):
self.log.info("About to conform all humans..")
if instance.name == "Richard05":
self.log.warning("Richard05 is a conformist!")
self.log.info("About to conform all non-humans..")
self.log.info("Conformed Successfully")
class ValidateInstancesDI(pyblish.api.Validator):
"""Validate using the DI interface"""
families = ["diFamily"]
def process(self, instance):
self.log.info("Validating %s.." % instance.data("name"))
class ValidateDIWithRepair(pyblish.api.Validator):
families = ["diFamily"]
optional = True
__fail__ = True
def process(self, instance):
raise AssertionError("I was programmed to fail, for repair")
def repair(self, instance):
self.log.info("Repairing %s" % instance.data("name"))
class ExtractInstancesDI(pyblish.api.Extractor):
"""Extract using the DI interface"""
families = ["diFamily"]
def process(self, instance):
self.log.info("Extracting %s.." % instance.data("name"))
class ValidateWithLabel(pyblish.api.Validator):
"""Validate using the DI interface"""
label = "Validate with Label"
class ValidateWithLongLabel(pyblish.api.Validator):
"""Validate using the DI interface"""
label = "Validate with Loooooooooooooooooooooong Label"
class SimplePlugin1(pyblish.api.Plugin):
"""Validate using the simple-plugin interface"""
def process(self):
self.log.info("I'm a simple plug-in, only processed once")
class SimplePlugin2(pyblish.api.Plugin):
"""Validate using the simple-plugin interface
It doesn't have an order, and will likely end up *before* all
other plug-ins. (due to how sorted([1, 2, 3, None]) works)
"""
def process(self, context):
self.log.info("Processing the context, simply: %s" % context)
class SimplePlugin3(pyblish.api.Plugin):
"""Simply process every instance"""
def process(self, instance):
self.log.info("Processing the instance, simply: %s" % instance)
class ContextAction(pyblish.api.Action):
label = "Context action"
def process(self, context):
self.log.info("I have access to the context")
self.log.info("Context.instances: %s" % str(list(context)))
class FailingAction(pyblish.api.Action):
label = "Failing action"
def process(self):
self.log.info("About to fail..")
raise Exception("I failed")
class LongRunningAction(pyblish.api.Action):
label = "Long-running action"
def process(self):
self.log.info("Sleeping for 2 seconds..")
time.sleep(2)
self.log.info("Ah, that's better")
class IconAction(pyblish.api.Action):
label = "Icon action"
icon = "crop"
def process(self):
self.log.info("I have an icon")
class PluginAction(pyblish.api.Action):
label = "Plugin action"
def process(self, plugin):
self.log.info("I have access to my parent plug-in")
self.log.info("Which is %s" % plugin.id)
class LaunchExplorerAction(pyblish.api.Action):
label = "Open in Explorer"
icon = "folder-open"
def process(self, context):
cwd = os.getcwd()
self.log.info("Opening %s in Explorer" % cwd)
result = subprocess.call("start .", cwd=cwd, shell=True)
self.log.debug(result)
class ProcessedAction(pyblish.api.Action):
label = "Success action"
icon = "check"
on = "processed"
def process(self):
self.log.info("I am only available on a successful plug-in")
class FailedAction(pyblish.api.Action):
label = "Failure action"
icon = "close"
on = "failed"
class SucceededAction(pyblish.api.Action):
label = "Success action"
icon = "check"
on = "succeeded"
def process(self):
self.log.info("I am only available on a successful plug-in")
class LongLabelAction(pyblish.api.Action):
label = "An incredibly, incredicly looooon label. Very long."
icon = "close"
class BadEventAction(pyblish.api.Action):
label = "Bad event action"
on = "not exist"
class InactiveAction(pyblish.api.Action):
active = False
class PluginWithActions(pyblish.api.Validator):
optional = True
actions = [
pyblish.api.Category("General"),
ContextAction,
FailingAction,
LongRunningAction,
IconAction,
PluginAction,
pyblish.api.Category("Empty"),
pyblish.api.Category("OS"),
LaunchExplorerAction,
pyblish.api.Separator,
FailedAction,
SucceededAction,
pyblish.api.Category("Debug"),
BadEventAction,
InactiveAction,
LongLabelAction,
pyblish.api.Category("Empty"),
]
def process(self):
self.log.info("Ran PluginWithActions")
class FailingPluginWithActions(pyblish.api.Validator):
optional = True
actions = [
FailedAction,
SucceededAction,
]
def process(self):
raise Exception("I was programmed to fail")
class ValidateDefaultOff(pyblish.api.Validator):
families = ["A", "B"]
active = False
optional = True
def process(self, instance):
self.log.info("Processing instance..")
class ValidateWithHyperlinks(pyblish.api.Validator):
"""To learn about Pyblish
<a href="http://pyblish.com">click here</a> (http://pyblish.com)
"""
families = ["A", "B"]
def process(self, instance):
self.log.info("Processing instance..")
msg = "To learn about Pyblish, <a href='http://pyblish.com'>"
msg += "click here</a> (http://pyblish.com)"
self.log.info(msg)
class LongRunningCollector(pyblish.api.Collector):
"""I will take at least 2 seconds..."""
def process(self, context):
self.log.info("Sleeping for 2 seconds..")
time.sleep(2)
self.log.info("Good morning")
class LongRunningValidator(pyblish.api.Validator):
"""I will take at least 2 seconds..."""
def process(self, context):
self.log.info("Sleeping for 2 seconds..")
time.sleep(2)
self.log.info("Good morning")
class RearrangingPlugin(pyblish.api.ContextPlugin):
"""Sort plug-ins by family, and then reverse it"""
order = pyblish.api.CollectorOrder + 0.2
def process(self, context):
self.log.info("Reversing instances in the context..")
context[:] = sorted(
context,
key=lambda i: i.data["family"],
reverse=True
)
self.log.info("Reversed!")
class InactiveInstanceCollectorPlugin(pyblish.api.InstancePlugin):
"""Special case of an InstancePlugin running as a Collector"""
order = pyblish.api.CollectorOrder + 0.1
active = False
def process(self, instance):
raise TypeError("I shouldn't have run in the first place")
class CollectWithIcon(pyblish.api.ContextPlugin):
order = pyblish.api.CollectorOrder
def process(self, context):
instance = context.create_instance("With Icon")
instance.data["icon"] = "play"
instances = [
{
"name": "Peter01",
"data": {
"family": "A",
"publish": False
}
},
{
"name": "Richard05",
"data": {
"family": "A",
}
},
{
"name": "Steven11",
"data": {
"family": "B",
}
},
{
"name": "Piraya12",
"data": {
"family": "B",
}
},
{
"name": "Marcus",
"data": {
"family": "C",
}
},
{
"name": "Extra1",
"data": {
"family": "C",
}
},
{
"name": "DependencyInstance",
"data": {
"family": "diFamily"
}
},
{
"name": "NoFamily",
"data": {}
},
{
"name": "Failure 1",
"data": {
"family": "failure",
"fail": False
}
},
{
"name": "Failure 2",
"data": {
"family": "failure",
"fail": True
}
}
]
plugins = [
MyCollector,
MyValidator,
MyExtractor,
CollectRenamed,
CollectNegatron,
CollectPositron,
SelectInstances,
SelectInstances2,
SelectDiInstances,
SelectInstancesFailure,
ValidateFailureMock,
ValidateNamespace,
# ValidateIsIncompatible,
ValidateWithVeryVeryVeryLongLongNaaaaame,
ValidateContext,
ValidateContextFailure,
Validator1,
Validator2,
Validator3,
ValidateWithRepair,
ValidateWithRepairFailure,
ValidateWithRepairContext,
ValidateWithLabel,
ValidateWithLongLabel,
ValidateDefaultOff,
ValidateWithHyperlinks,
ExtractAsMa,
ConformAsset,
SimplePlugin1,
SimplePlugin2,
SimplePlugin3,
ValidateInstancesDI,
ExtractInstancesDI,
ValidateDIWithRepair,
PluginWithActions,
FailingPluginWithActions,
# LongRunningCollector,
# LongRunningValidator,
RearrangingPlugin,
InactiveInstanceCollectorPlugin,
CollectComment,
CollectWithIcon,
]
pyblish.api.sort_plugins(plugins)
|
57908
|
EVENT_ALGO_LOG = "eAlgoLog"
EVENT_ALGO_SETTING = "eAlgoSetting"
EVENT_ALGO_VARIABLES = "eAlgoVariables"
EVENT_ALGO_PARAMETERS = "eAlgoParameters"
APP_NAME = "AlgoTrading"
|
57916
|
from django.shortcuts import render
from django.http import HttpResponse
# Include the `fusioncharts.py` file which has required functions to embed the charts in html page
from ..fusioncharts import FusionCharts
from ..fusioncharts import FusionTable
from ..fusioncharts import TimeSeries
import requests
# Loading Data and schema from a Static JSON String url
# The `chart` method is defined to load chart data from an JSON string.
def chart(request):
data = requests.get('https://s3.eu-central-1.amazonaws.com/fusion.store/ft/data/single-event-overlay-data.json').text
schema = requests.get('https://s3.eu-central-1.amazonaws.com/fusion.store/ft/schema/single-event-overlay-schema.json').text
fusionTable = FusionTable(schema, data)
timeSeries = TimeSeries(fusionTable)
timeSeries.AddAttribute("caption", """{
text: 'Interest Rate Analysis'
}""")
timeSeries.AddAttribute("subCaption", """{
text: 'Federal Reserve (USA)'
}""")
timeSeries.AddAttribute("yAxis", """[{
plot: 'Interest Rate',
format:{
suffix: '%'
},
title: 'Interest Rate'
}]""")
timeSeries.AddAttribute("xAxis", """{
plot: 'Time',
timemarker: [{
start: 'Mar-1980',
label: 'US inflation peaked at 14.8%.',
timeFormat: ' %b -%Y',
style: {
marker:
{
fill: '#D0D6F4'
}
}
}, {
start: 'May-1981',
label: 'To control inflation, the Fed started {br} raising interest rates to over {br} 20%.',
timeFormat: '%b-%Y'
}, {
start: 'Jun-1983',
label: 'By proactive actions of Mr.Volcker, {br} the inflation falls to 2.4% {br} from the peak of over 14% {br} just three years ago.',
timeFormat: '%b-%Y',
style: {
marker: {
fill: '#D0D6F4'
}
}
}, {
start: 'Oct-1987',
label: 'The Dow Jones Industrial Average lost {br} about 30% of it’s value.',
timeFormat: '%b-%Y',
style: {
marker: {
fill: '#FBEFCC'
}
}
}, {
start: 'Jan-1989',
label: '<NAME> becomes {br} the 41st president of US!',
timeFormat: '%b-%Y'
}, {
start: 'Aug-1990',
label: 'The oil prices spiked to $35 {br} per barrel from $15 per barrel {br} because of the Gulf War.',
timeFormat: '%b-%Y'
}, {
start: 'Dec-1996',
label: '<NAME> warns of the dangers {br} of \"irrational exuberance\" in financial markets, {br} an admonition that goes unheeded',
timeFormat: '%b-%Y'
}, {
start: 'Sep-2008',
label: '<NAME> collapsed!',
timeFormat: '%b-%Y'
},{
start: 'Mar-2009',
label: 'The net worth of US households {br} stood at a trough of $55 trillion.',
timeFormat: '%b-%Y',
style: {
marker: {
fill: '#FBEFCC'
}
}
}, {
start: 'Oct-2009',
label: 'Unemployment rate peaked {br} in given times to 10%.',
timeFormat: '%b-%Y'
}]
}""")
# Create an object for the chart using the FusionCharts class constructor
fcChart = FusionCharts("timeseries", "ex1", 700, 450, "chart-1", "json", timeSeries)
# returning complete JavaScript and HTML code, which is used to generate chart in the browsers.
return render(request, 'index.html', {'output' : fcChart.render(),'chartTitle': "Single event overlay"})
|
57917
|
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(name='pymixconsole',
version='0.0.1',
description='Headless multitrack mixing console in Python',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/csteinmetz1/pymixconsole',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
package_data={'pymixconsole': ['irs/*.wav']},
include_package_data=True,
install_requires=['scipy>=1.0.1',
'numpy>=1.14.2',
'numba>=0.46.0',
'graphviz>=0.13.2'],
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
)
)
|
57947
|
import qq
class MyClient(qq.Client):
async def on_ready(self):
print(f'以 {self.user} 身份登录(ID:{self.user.id})')
print('------')
async def on_message(self, message):
# 我们不希望机器人回复自己
if message.author.id == self.user.id:
return
if message.content.startswith('!hello'):
await message.reply('你好!', mention_author=message.author)
client = MyClient()
client.run('token')
|
57983
|
from tests.unit import base
from src import database
class AppActiveRepositoryTest(base.TestCase):
def test_has_db(self):
self.assertTrue(hasattr(database.AppActiveRepository, 'db'))
def test_has_db_default_none(self):
# TODO: How to test this? Because the import of initialize on test base made the dependency injection
pass
|
58001
|
def incrementing_time(start=2000, increment=1):
while True:
yield start
start += increment
def monotonic_time(start=2000):
return incrementing_time(start, increment=0.000001)
def static_time(value):
while True:
yield value
|
58039
|
from typing import List
class ReportIndice():
def __init__(self, alias, nombre, tipo,columnas:List[str], consideracion, fila, columna):
self.alias = alias
self.nombre = nombre
self.tipo = tipo
self.columnas:List[str] = columnas
self.consideracion = consideracion
self.fila = fila
self.columna = columna
|
58064
|
from typing import Optional
import pandas as pd
from episuite import data
class GoogleMobility:
"""This is a class implementing a client for the Google
Community Mobility Reports.
.. seealso::
`Google Community Mobility Report <https://www.google.com/covid19/mobility/>`_
Community Mobility Report website.
:param report_url: alternative report download link
"""
DEFAULT_REPORT_URL: str = \
"https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv"
def __init__(self, report_url: Optional[str] = None):
self.report_url = report_url or GoogleMobility.DEFAULT_REPORT_URL
def load_report(self, country_region_code: Optional[str] = None,
show_progress: bool = True, cache: bool = True) -> pd.DataFrame:
"""Load the report from Google and optionally cache it or fitler
by a country code. Given that the mobility report is a large file,
it is highly recommended to specify the country region code.
:param country_region_code: The country region code, i.e. "BR"
for Brazil.
:param show_progress: Show a progress bar for the download
:param cache: If cache should be done or not, default to True
:returns: a dataframe with the results already filtered and parsed
"""
fpath = data.load_from_cache(self.report_url, "google_mobility.csv",
"Google Mobility Report",
show_progress=show_progress,
invalidate=not cache)
if country_region_code is None:
df = pd.read_csv(fpath, low_memory=False,
parse_dates=["date"])
else:
iter_csv = pd.read_csv(fpath, low_memory=False,
parse_dates=["date"],
iterator=True, chunksize=5000)
df = pd.concat([chunk[chunk['country_region_code'] == country_region_code]
for chunk in iter_csv])
return df
|
58090
|
import os
import re
import time
import shutil
from tempfile import mkdtemp
import operator
from collections.abc import Mapping
from pathlib import Path
import datetime
from .log import Handle
logger = Handle(__name__)
_FLAG_FIRST = object()
class Timewith:
def __init__(self, name=""):
"""Timewith context manager."""
self.name = name
self.start = time.time()
self.checkpoints = []
@property
def elapsed(self):
return time.time() - self.start
def checkpoint(self, name=""):
elapsed = self.elapsed
msg = "{time} {timer}: {checkpoint} in {elapsed:.3f} s.".format(
timer=self.name,
time=datetime.datetime.now().strftime("%H:%M:%S"),
checkpoint=name,
elapsed=elapsed,
).strip()
logger.info(msg)
self.checkpoints.append((name, elapsed))
def __enter__(self):
"""Object returned on entry."""
return self
def __exit__(self, type, value, traceback):
"""Code to execute on exit."""
self.checkpoint("Finished")
self.checkpoints.append(("Finished", self.elapsed))
def temp_path(suffix=""):
"""Return the path of a temporary directory."""
directory = mkdtemp(suffix=suffix)
return Path(directory)
def flatten_dict(d, climb=False, safemode=False):
"""
Flattens a nested dictionary containing only string keys.
This will work for dictionaries which don't have two equivalent
keys at the same level. If you're worried about this, use safemode=True.
Partially taken from https://stackoverflow.com/a/6043835.
Parameters
----------
climb: :class:`bool`, :code:`False`
Whether to keep trunk or leaf-values, for items with the same key.
safemode: :class:`bool`, :code:`True`
Whether to keep all keys as a tuple index, to avoid issues with
conflicts.
Returns
-------
:class:`dict`
Flattened dictionary.
"""
lift = lambda x: (x,)
join = operator.add
results = []
def visit(subdict, results, partialKey):
for k, v in subdict.items():
if partialKey == _FLAG_FIRST:
newKey = lift(k)
else:
newKey = join(partialKey, lift(k))
if isinstance(v, Mapping):
visit(v, results, newKey)
else:
results.append((newKey, v))
visit(d, results, _FLAG_FIRST)
if safemode:
pick_key = lambda keys: keys
else:
pick_key = lambda keys: keys[-1]
sort = map(
lambda x: x[:2],
sorted([(pick_key(k), v, len(k)) for k, v in results], key=lambda x: x[-1]),
) # sorted by depth
if not climb:
# We go down the tree, and prioritise the trunk values
items = sort
else:
# We prioritise the leaf values
items = [i for i in sort][::-1]
return dict(items)
def swap_item(startlist: list, pull: object, push: object):
"""
Swap a specified item in a list for another.
Parameters
----------
startlist : :class:`list`
List to replace item within.
pull
Item to replace in the list.
push
Item to add into the list.
Returns
-------
list
"""
return [[i, push][i == pull] for i in startlist]
def copy_file(src, dst, ext=None, permissions=None):
"""
Copy a file from one place to another.
Uses the full filepath including name.
Parameters
----------
src : :class:`str` | :class:`pathlib.Path`
Source filepath.
dst : :class:`str` | :class:`pathlib.Path`
Destination filepath or directory.
ext : :class:`str`, :code:`None`
Optional file extension specification.
"""
src = Path(src)
dst = Path(dst)
if dst.is_dir():
dst = dst / src.name
if ext is not None:
src = src.with_suffix(ext)
dst = dst.with_suffix(ext)
logger.debug("Copying from {} to {}".format(src, dst))
with open(str(src), "rb") as fin:
with open(str(dst), "wb") as fout:
shutil.copyfileobj(fin, fout)
if permissions is not None:
os.chmod(str(dst), permissions)
def remove_tempdir(directory):
"""
Remove a specific directory, contained files and sub-directories.
Parameters
----------
directory: str, Path
Path to directory.
"""
directory = Path(directory)
try:
shutil.rmtree(str(directory))
assert not directory.exists()
except PermissionError:
pass
|
58108
|
import sys
sys.path.append('../../')
import keras2caffe
DATA_DIR='../../data/'
import caffe
import cv2
import numpy as np
import sys
sys.path.append('/media/toshiba_ml/models/keras-models/keras-squeezenet')
from keras_squeezenet import SqueezeNet
#TensorFlow backend uses all GPU memory by default, so we need limit
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
set_session(tf.Session(config=config))
#converting
keras_model = SqueezeNet()
keras2caffe.convert(keras_model, 'deploy.prototxt', 'SqueezeNet.caffemodel')
#testing the model
caffe.set_mode_gpu()
net = caffe.Net('deploy.prototxt', 'SqueezeNet.caffemodel', caffe.TEST)
img = cv2.imread(DATA_DIR+'bear.jpg')
img = cv2.resize(img, (227, 227))
img = img[...,::-1] #RGB 2 BGR
data = np.array(img, dtype=np.float32)
data = data.transpose((2, 0, 1))
data.shape = (1,) + data.shape
data -= 128
net.blobs['data'].data[...] = data
out = net.forward()
preds = out['global_average_pooling2d_1']
classes = eval(open(DATA_DIR+'class_names.txt', 'r').read())
print("Class is: " + classes[np.argmax(preds)])
print("Certainty is: " + str(preds[0][np.argmax(preds)]))
|
58123
|
import sys
#print sys.argv[0], len( sys.argv )
if len(sys.argv) > 1:
with open(sys.argv[1], 'r') as f_in:
result = 0
for line in f_in:
data = line.strip().split()
# print('data:', data)
if data[0] == "+":
result += float(data[1])
elif data[0] == "-":
result -= float(data[1])
elif data[0] == "=":
print("RESULT:", result)
result = 0
else:
print('unknow:', data)
|
58174
|
import sys
class CommandError(Exception):
def __init__(self, message):
super().__init__(message)
self.message = message
class BaseCommand:
def run(self):
parser = self.get_optparser()
(options, names) = parser.parse_args()
try:
self.handle(names, options)
except CommandError as e:
print(e.message)
parser.print_usage()
sys.exit(1)
def handle(self, args, options):
raise NotImplementedError
def get_optparser(self):
raise NotImplementedError
|
58175
|
import pytest
# from https://github.com/ethereum/tests/blob/c951a3c105d600ccd8f1c3fc87856b2bcca3df0a/BasicTests/txtest.json # noqa: E501
TRANSACTION_FIXTURES = [
{
"chainId": None,
"key": "c85ef7d79691fe79573b1a7064c19c1a9819ebdbd1faaab1a8ec92344438aaf4",
"nonce": 0,
"gasPrice": 1000000000000,
"gas": 10000,
"to": "13978aee95f38490e9769c39b2773ed763d9cd5f",
"value": 10000000000000000,
"data": "",
"signed": "f86b8085e8d4a510008227109413978aee95f38490e9769c39b2773ed763d9cd5f872386f26fc10000801ba0eab47c1a49bf2fe5d40e01d313900e19ca485867d462fe06e139e3a536c6d4f4a014a569d327dcda4b29f74f93c0e9729d2f49ad726e703f9cd90dbb0fbf6649f1" # noqa: E501
},
{
"chainId": None,
"key": "c87f65ff3f271bf5dc8643484f66b200109caffe4bf98c4cb393dc35740b28c0",
"nonce": 0,
"gasPrice": 1000000000000,
"gas": 10000,
"to": "",
"value": 0,
"data": "<KEY>", # noqa: E501
"signed": "<KEY>" # noqa: E501
},
{
"chainId": 1,
"key": "0x4c0883a69102937d6231471b5dbb6204fe5129617082792ae468d01a3f362318",
"nonce": 0,
"gasPrice": 234567897654321,
"gas": 2000000,
"to": "0xF0109fC8DF283027b6285cc889F5aA624EaC1F55",
"value": 1000000000,
"data": "",
"signed": "0xf86a8086d55698372431831e848094f0109fc8df283027b6285cc889f5aa624eac1f55843b9aca008025a009ebb6ca057a0535d6186462bc0b465b561c94a295bdb0621fc19208ab149a9ca0440ffd775ce91a833ab410777204d5341a6f9fa91216a6f3ee2c051fea6a0428", # noqa: E501
},
]
# Hand-built for 2930
TYPED_TRANSACTION_FIXTURES = [
{
"chainId": 1,
"nonce": 3,
"gasPrice": 1,
"gas": 25000,
"to": "b94f5374fce5edbc8e2a8697c15331677e6ebf0b",
"value": 10,
"data": "5544",
"access_list": [
[b'\xf0' * 20, [b'\0' * 32, b'\xff' * 32]],
],
"key": (b'\0' * 31) + b'\x01',
"sender": b'~_ER\t\x1ai\x12]]\xfc\xb7\xb8\xc2e\x90)9[\xdf',
"intrinsic_gas": 21000 + 32 + 2400 + 1900 * 2,
"for_signing": '01f87a0103018261a894b94f5374fce5edbc8e2a8697c15331677e6ebf0b0a825544f85994f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f842a00000000000000000000000000000000000000000000000000000000000000000a0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff', # noqa: E501
"signed": '01f8bf0103018261a894b94f5374fce5edbc8e2a8697c15331677e6ebf0b0a825544f85bf85994f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f842a00000000000000000000000000000000000000000000000000000000000000000a0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80a017047e844eef895a876778a828731a33b67863aea7b9591a0001651ee47322faa043b4d0e8d59e8663c813ffa1bb99f020278a139f07c47f3858653071b3cec6b3', # noqa: E501
"hash": "13ab8b6371d8873405db20104705d7fecee2f9083f247250519e4b4c568b17fb",
}
]
@pytest.fixture(params=range(len(TRANSACTION_FIXTURES)))
def txn_fixture(request):
return TRANSACTION_FIXTURES[request.param]
@pytest.fixture(params=range(len(TYPED_TRANSACTION_FIXTURES)))
def typed_txn_fixture(request):
return TYPED_TRANSACTION_FIXTURES[request.param]
|
58189
|
import tarfile
import os
tar_content_files = [ {"name": "config", "arc_name": "config"},
{"name": "out/chart-verifier", "arc_name": "chart-verifier"} ]
def create(release):
tgz_name = f"chart-verifier-{release}.tgz"
if os.path.exists(tgz_name):
os.remove(tgz_name)
with tarfile.open(tgz_name, "x:gz") as tar:
for tar_content_file in tar_content_files:
tar.add(os.path.join(os.getcwd(),tar_content_file["name"]),arcname=tar_content_file["arc_name"])
return os.path.join(os.getcwd(),tgz_name)
|
58193
|
import numpy as np
import pandas as pd
import pytest
from sklearn.base import is_classifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import OneHotEncoder
from sklearndf.classification import RandomForestClassifierDF
from sklearndf.pipeline import ClassifierPipelineDF
from test.sklearndf.pipeline import make_simple_transformer
def test_classification_pipeline_df(
iris_features: pd.DataFrame, iris_target_sr: pd.DataFrame
) -> None:
cls_p_df = ClassifierPipelineDF(
classifier=RandomForestClassifierDF(),
preprocessing=make_simple_transformer(
impute_median_columns=iris_features.select_dtypes(
include=np.number
).columns,
one_hot_encode_columns=iris_features.select_dtypes(include=object).columns,
),
)
assert is_classifier(cls_p_df)
cls_p_df.fit(X=iris_features, y=iris_target_sr)
cls_p_df.predict(X=iris_features)
# test-type check within constructor:
with pytest.raises(TypeError):
# noinspection PyTypeChecker
ClassifierPipelineDF(
classifier=RandomForestClassifier(), preprocessing=OneHotEncoder()
)
|
58197
|
from yaml.serializer import Serializer as YamlSerializer
from yaml.events import DocumentStartEvent, DocumentEndEvent
# override serialzier class to store data needed
# for extra data on anchor lines
class Serializer(YamlSerializer):
def __init__(self, encoding=None,
explicit_start=None, explicit_end=None, version=None, tags=None):
super().__init__(encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end, version=version,
tags=tags)
self.extra_anchor_data = {}
def serialize(self, node):
if self.closed is None:
raise SerializerError("serializer is not opened")
elif self.closed:
raise SerializerError("serializer is closed")
self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
version=self.use_version, tags=self.use_tags))
self.anchor_node(node)
self.serialize_node(node, None, None)
self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
self.serialized_nodes = {}
self.anchors = {}
self.extra_anchor_data = {}
self.last_anchor_id = 0
|
58230
|
from librosa import cqt, icqt
import numpy as np
def gl_cqt(S, n_iter=32, sr=22050, hop_length=512, bins_per_octave=12, fmin=None, window='hann',
dtype=np.float32, length=None, momentum=0.99, random_state=None, res_type='kaiser_fast'):
if fmin is None:
fmin = librosa.note_to_hz('C1')
if random_state is None:
rng = np.random
elif isinstance(random_state, int):
rng = np.random.RandomState(seed=random_state)
elif isinstance(random_state, np.random.RandomState):
rng = random_state
if momentum > 1:
warnings.warn('Griffin-Lim with momentum={} > 1 can be unstable. Proceed with caution!'.format(momentum))
elif momentum < 0:
raise ParameterError('griffinlim() called with momentum={} < 0'.format(momentum))
# randomly initialize the phase
angles = np.exp(2j * np.pi * rng.rand(*S.shape))
# And initialize the previous iterate to 0
rebuilt = 0.
for _ in range(n_iter):
# Store the previous iterate
tprev = rebuilt
__import__('pdb').set_trace()
# Invert with our current estimate of the phases
inverse = icqt(S * angles, sr=sr, hop_length=hop_length, bins_per_octave=bins_per_octave, fmin=fmin,
#window=window, length=length, res_type=res_type)
window=window)
# Rebuild the spectrogram
rebuilt = cqt(inverse, sr=sr, bins_per_octave=bins_per_octave, n_bins=S.shape[0],
hop_length=hop_length, fmin=fmin,
window=window, res_type=res_type)
# Update our phase estimates
angles[:] = rebuilt - (momentum / (1 + momentum)) * tprev
angles[:] /= np.abs(angles) + 1e-16
# Return the final phase estimates
return icqt(S * angles, sr=sr, hop_length=hop_length, bins_per_octave=bins_per_octave, fmin=fmin,
window=window,length=length, res_type=res_type)
|
58263
|
import math
import numpy
def hill_chart_parametrisation(h, turbine_specs):
"""
Calculates power and flow rate through bulb turbines based on Aggidis and Feather (2012)
f_g = grid frequency, g_p = generator poles,
t_cap = Turbine capacity, h = head difference, dens = water density
"""
turb_sp = 2 * 60 * turbine_specs["f_g"] / turbine_specs["g_p"]
# Step 1: Calculate Hill Chart based on empirical equations
n_11 = turb_sp * turbine_specs["t_d"] / math.sqrt(h)
if n_11 < 255:
q_11 = 0.0166 * n_11 + 0.4861
else:
q_11 = 4.75
q = q_11 * (turbine_specs["t_d"] ** 2) * math.sqrt(h)
h_efficiency = -0.0019 * n_11 + 1.2461
# h_efficiency = 1
p1 = turbine_specs["dens"] * turbine_specs["g"] * q * h / (10 ** 6)
# Step 2 - Adjust Curve according to capacity
if p1 * h_efficiency < turbine_specs["t_cap"]: # 97.25% Gearbox efficiency
p2 = p1 * 0.9725 * h_efficiency
else:
p2 = turbine_specs["t_cap"] * 0.9725
p1 = p2 / (h_efficiency * 0.9725)
q = p1 * (10 ** 6) / (turbine_specs["dens"] * turbine_specs["g"] * h)
return p2, q
def ideal_turbine_parametrisation(h, turbine_specs):
"""
Calculates power and flow through a bulb turbine excluding efficiency loses
"""
q = math.pi * ((turbine_specs["t_d"] / 2)**2) * math.sqrt(2 * turbine_specs["g"] * h)
p1 = turbine_specs["dens"] * turbine_specs["g"] * q * h / (10 ** 6)
if p1 < turbine_specs["t_cap"]:
p2 = p1
else:
p2 = turbine_specs["t_cap"]
q = p2 * (10 ** 6) / (turbine_specs["dens"] * turbine_specs["g"] * h)
return p2, q
def turbine_parametrisation(h, turbine_specs):
"""
Chooses between hill chart or idealised turbine parameterisation.
"""
if turbine_specs["options"] == 0:
p, q = hill_chart_parametrisation(h, turbine_specs)
else:
p, q = ideal_turbine_parametrisation(h, turbine_specs)
return p, q
def gate_sluicing(h, ramp_f, N_s, q_s0, sluice_specs, flux_limiter=0.2):
"""
Calculates overall flow through power plant sluice gates given the status of the operation
"""
temp = ramp_f ** 2 * N_s * sluice_specs["c_d"] * sluice_specs["a_s"] * math.sqrt(2 * sluice_specs["g"] * abs(h))
if ramp_f >= 0.5 and abs(temp) >= abs(q_s0) > 0.:
q_s = -numpy.sign(h) * min(abs((1 + flux_limiter) * q_s0), abs(temp))
elif ramp_f >= 0.5 and abs(q_s0) >= abs(temp):
q_s = -numpy.sign(h) * max(abs((1 - flux_limiter) * q_s0), abs(temp))
else:
q_s = -numpy.sign(h) * temp
return q_s
def turbine_sluicing(h, ramp_f, N_t, q_t0, sluice_specs, turbine_specs, flux_limiter=0.2):
"""
Calculates flow through turbines operating in sluicing mode
"""
temp = ramp_f ** 2 * N_t * sluice_specs["c_t"] * (math.pi * (turbine_specs["t_d"] / 2) ** 2) *\
math.sqrt(2 * sluice_specs["g"] * abs(h))
if ramp_f >= 0.5 and abs(temp) >= abs(q_t0):
q_t = -numpy.sign(h) * min(abs((1 + flux_limiter) * q_t0), abs(temp))
elif ramp_f >= 0.5 and abs(q_t0) >= abs(temp):
q_t = -numpy.sign(h) * max(abs((1 - flux_limiter) * q_t0), abs(temp))
else:
q_t = -numpy.sign(h) * temp
if abs(h) != 0.0 and ramp_f >= 0.95 and q_t == 0.:
q_t = -numpy.sign(h) * temp
return q_t
|
58406
|
import numpy as np
import pathlib
import Vox
import os
import sys
sys.path.append("../base")
import JSONHelper
def save_output(batch_size, rootdir, samples, outputs, is_testtime=False):
for i in range(batch_size):
is_match = outputs["match"][i].item()
if True:
sdf_scan = samples["sdf_scan"][i].numpy()
df_cad = samples["df_cad"][i].numpy()
heatmap_pred = outputs["heatmap"][i].data.cpu().numpy()
grid2world_scan = samples["grid2world_scan"][i].numpy()
grid2world_cad = samples["grid2world_cad"][i].numpy()
basename_save = samples["basename_save"][i]
voxres_scan = samples["voxres_scan"][i]
voxres_cad = samples["voxres_cad"][i]
scale = outputs["scale"][i].data.cpu().numpy().tolist()
p_scan = samples["p_scan"][i].numpy().tolist()
savedir = rootdir + "/" + basename_save
pathlib.Path(savedir).mkdir(parents=False, exist_ok=True)
dims_cad = [df_cad.shape[1], df_cad.shape[2], df_cad.shape[3]]
vox = Vox.Vox(dims_cad, voxres_cad, grid2world_cad, df_cad, heatmap_pred)
Vox.write_vox(savedir + "/predict-heatmap.vox2", vox)
item = {"match" : is_match, "scale" : scale, "p_scan" : p_scan}
JSONHelper.write(savedir + "/predict.json", item)
force_symlink(savedir + "/input-center.vox", samples["filename_vox_center"][i])
if is_testtime:
continue
#if is_match > 0.95:
# print(savedir)
# print(scale)
# dim_scan = [sdf_scan.shape[1], sdf_scan.shape[2], sdf_scan.shape[3]]
# vox = Vox.Vox(dim_scan, voxres_scan, grid2world_scan, sdf_scan)
# Vox.write_vox(savedir + "/input-center.vox", vox)
# quit()
heatmap_gt = outputs["heatmap_gt"][i].data.cpu().numpy()
dim_cad = [df_cad.shape[1], df_cad.shape[2], df_cad.shape[3]]
vox = Vox.Vox(dim_cad, voxres_cad, grid2world_cad, df_cad, heatmap_gt)
Vox.write_vox(savedir + "/gt-heatmap.vox2", vox)
def force_symlink(linkname, target):
try:
os.symlink(target, linkname)
except:
os.remove(linkname)
os.symlink(target, linkname)
|
58441
|
from SEAL import SplineSpace, create_knots
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
p = 2
n = 10
t = create_knots(0, 1, p, n)
S = SplineSpace(p, t)
c = [(0, 1, 0), (1, 2, 1), (1.5, 3, 2), (1.7, -1, 3), (1, -1.5, 4), (3, 3, 3), (4, 4, 3), (5, 2, 2), (6, 5, 4), (7, -1, 5)]
f = S(c)
x = S.parameter_values()
y = f(x)
cp = f.visualize(iterations=4)
fig = plt.figure()
axs = Axes3D(fig)
axs.plot(*zip(*cp))
axs.plot(*zip(*y))
plt.show()
|
58460
|
from base64 import b64encode
def get_token(custos_settings):
tokenStr = custos_settings.CUSTOS_CLIENT_ID + ":" + custos_settings.CUSTOS_CLIENT_SEC
tokenByte = tokenStr.encode('utf-8')
encodedBytes = b64encode(tokenByte)
return encodedBytes.decode('utf-8')
|
58514
|
from kairon import cli
import logging
if __name__ == "__main__":
logging.basicConfig(level="DEBUG")
cli()
|
58524
|
import sys
import pyshorteners
def shorten_url(url):
s = pyshorteners.Shortener()
short_url = s.tinyurl.short(url)
return short_url
def get_code(authorize_url):
sys.stderr.write("\x1b[2J\x1b[H")
short_url = shorten_url(authorize_url)
"""Show authorization URL and return the code the user wrote."""
message = "Check this link in your browser: " + short_url
sys.stderr.write("\n")
sys.stderr.write("\n")
sys.stderr.write("Youtube authentication required!\n")
sys.stderr.write(message + "\n")
try: input = raw_input #For Python2 compatability
except NameError:
#For Python3 on Windows compatability
try: from builtins import input as input
except ImportError: pass
return input("Enter verification code: ")
|
58611
|
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
k %= len(nums)
if k > 0:
for i, v in enumerate(nums[-k:] + nums[0:-k]):
nums[i] = v
|
58615
|
import pathlib
import typing
import urllib.parse
_PLUGIN_DIR = pathlib.Path(__file__).parent
PLUGIN_DIR = str(_PLUGIN_DIR)
CONFIGS_DIR = str(_PLUGIN_DIR.joinpath('configs'))
SCRIPTS_DIR = str(_PLUGIN_DIR.joinpath('scripts'))
def scan_sql_directory(root: str) -> typing.List[pathlib.Path]:
return [
path
for path in sorted(pathlib.Path(root).iterdir())
if path.is_file() and path.suffix == '.sql'
]
def connstr_replace_dbname(connstr: str, dbname: str) -> str:
"""Replace dbname in existing connection string."""
if connstr.endswith(' dbname='):
return connstr + dbname
if connstr.startswith('postgresql://'):
url = urllib.parse.urlparse(connstr)
url = url._replace(path=dbname) # pylint: disable=protected-access
return url.geturl()
raise RuntimeError(
f'Unsupported PostgreSQL connection string format {connstr!r}',
)
|
58696
|
from __future__ import print_function
import os
from setuptools import setup, find_packages
here = os.path.dirname(os.path.abspath(__file__))
node_root = os.path.join(here, 'js')
is_repo = os.path.exists(os.path.join(here, '.git'))
from distutils import log
log.set_verbosity(log.DEBUG)
log.info('setup.py entered')
log.info('$PATH=%s' % os.environ['PATH'])
LONG_DESCRIPTION = 'iclientpy ext jupyterhub'
version_ns = {}
with open(os.path.join(here, 'iclientpyjupyterhubext', '_version.py')) as f:
exec(f.read(), {}, version_ns)
setup_args = {
'name': 'iclientpy.jupyterhub.ext',
'version': version_ns['__version__'],
'description': 'iclientpy for jupyterhub',
'long_description': LONG_DESCRIPTION,
'include_package_data': True,
'install_requires': [
'pamela>=0.3.0',
'python_dateutil>=2.6.1',
'tornado>=4.5.3',
'jupyterhub>=0.8.1'
],
'packages': find_packages(exclude=("*.test", "*.test.*", "test.*", "test")),
'zip_safe': False,
'author': 'supermap',
'author_email': '<EMAIL>',
'classifiers': [
'Development Status :: 4 - Beta',
'Framework :: IPython',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Multimedia :: Graphics',
'Programming Language :: Python :: 3.6',
],
}
setup(**setup_args)
|
58702
|
from . import halos as hal
from .pyutils import deprecated
@deprecated(hal.HaloProfileNFW)
def nfw_profile_3d(cosmo, concentration, halo_mass, odelta, a, r):
"""Calculate the 3D NFW halo profile at a given radius or an array of radii,
for a halo with a given mass, mass definition, and concentration,
at a given scale factor, with a cosmology dependence.
.. note:: Note that this function is deprecated. Please use the
functionality in the :mod:`~pyccl.halos.profiles` module.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): cosmological parameters.
concentration (float): halo concentration.
halo_mass (float): halo masses; in units of Msun.
odelta (float): overdensity with respect to mean matter density.
a (float): scale factor.
r (float or array_like): radius or radii to calculate profile for,
in units of Mpc.
Returns:
float or array_like: 3D NFW density at r, in units of Msun/Mpc^3.
"""
mdef = hal.MassDef(odelta, 'matter')
c = hal.ConcentrationConstant(c=concentration,
mdef=mdef)
p = hal.HaloProfileNFW(c, truncated=False)
return p.real(cosmo, r, halo_mass, a, mdef)
@deprecated(hal.HaloProfileEinasto)
def einasto_profile_3d(cosmo, concentration, halo_mass, odelta, a, r):
"""Calculate the 3D Einasto halo profile
at a given radius or an array of radii,
for a halo with a given mass, mass definition, and concentration,
at a given scale factor, with a cosmology dependence.
The alpha parameter is calibrated using the relation with peak height in
https://arxiv.org/pdf/1401.1216.pdf eqn5, assuming virial mass.
.. note:: Note that this function is deprecated. Please use the
functionality in the :mod:`~pyccl.halos.profiles` module.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): cosmological parameters.
concentration (float): halo concentration.
halo_mass (float): halo masses; in units of Msun.
odelta (float): overdensity with respect to mean matter density.
a (float): scale factor.
r (float or array_like): radius or radii to calculate profile for,
in units of Mpc.
Returns:
float or array_like: 3D NFW density at r, in units of Msun/Mpc^3.
"""
mdef = hal.MassDef(odelta, 'matter')
c = hal.ConcentrationConstant(c=concentration,
mdef=mdef)
mdef = hal.MassDef(odelta, 'matter',
c_m_relation=c)
p = hal.HaloProfileEinasto(c, truncated=False)
return p.real(cosmo, r, halo_mass, a, mdef)
@deprecated(hal.HaloProfileHernquist)
def hernquist_profile_3d(cosmo, concentration, halo_mass, odelta, a, r):
"""Calculate the 3D Hernquist halo profile
at a given radius or an array of radii,
for a halo with a given mass, mass definition, and concentration,
at a given scale factor, with a cosmology dependence.
.. note:: Note that this function is deprecated. Please use the
functionality in the :mod:`~pyccl.halos.profiles` module.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): cosmological parameters.
concentration (float): halo concentration.
halo_mass (float): halo masses; in units of Msun.
odelta (float): overdensity with respect to mean matter density.
a (float): scale factor.
r (float or array_like): radius or radii to calculate profile for,
in units of Mpc.
Returns:
float or array_like: 3D NFW density at r, in units of Msun/Mpc^3.
"""
mdef = hal.MassDef(odelta, 'matter')
c = hal.ConcentrationConstant(c=concentration,
mdef=mdef)
p = hal.HaloProfileHernquist(c, truncated=False)
return p.real(cosmo, r, halo_mass, a, mdef)
@deprecated(hal.HaloProfileNFW)
def nfw_profile_2d(cosmo, concentration, halo_mass, odelta, a, r):
"""Calculate the 2D projected NFW halo profile
at a given radius or an array of radii,
for a halo with a given mass, mass definition, and concentration,
at a given scale factor, with a cosmology dependence.
.. note:: Note that this function is deprecated. Please use the
functionality in the :mod:`~pyccl.halos.profiles` module.
Args:
cosmo (:class:`~pyccl.core.Cosmology`): cosmological parameters.
concentration (float): halo concentration.
halo_mass (float): halo masses; in units of Msun.
odelta (float): overdensity with respect to mean matter density.
a (float): scale factor.
r (float or array_like): radius or radii to calculate profile for,
in units of Mpc.
Returns:
float or array_like: 2D projected NFW density at r, \
in units of Msun/Mpc^2.
"""
mdef = hal.MassDef(odelta, 'matter')
c = hal.ConcentrationConstant(c=concentration,
mdef=mdef)
p = hal.HaloProfileNFW(c, truncated=False,
projected_analytic=True)
return p.projected(cosmo, r, halo_mass, a, mdef)
|
58708
|
import numpy as np
import json
from os.path import join
from tqdm import tqdm
from scipy.optimize import least_squares
from pose_optimize.multiview_geo import reproject_error
DEBUG=False
def reproject_error_loss(p3d, p4, p6, cam_proj_4, cam_proj_6, num_kpt=23):
'''
Return:
kp4_e, kp6_e: error array, both (23,) shape
'''
assert p3d.shape == (num_kpt, 3)
assert p4.shape == (num_kpt, 2)
assert p6.shape == (num_kpt, 2)
kp4_recon = np.dot(cam_proj_4[0:3,0:3],p3d.T) + cam_proj_4[0:3,3].reshape([-1,1])
kp6_recon = np.dot(cam_proj_6[0:3,0:3],p3d.T) + cam_proj_6[0:3,3].reshape([-1,1])
kp4_recon = kp4_recon[0:2,:]/kp4_recon[2,:]
kp6_recon = kp6_recon[0:2,:]/kp6_recon[2,:]
# kp4_e = np.linalg.norm(kp4_recon.T - p4, axis=1)
# kp6_e = np.linalg.norm(kp6_recon.T - p6, axis=1)
kp4_e = np.sqrt(np.sum(np.square(kp4_recon.T - p4), axis=1))
kp6_e = np.sqrt(np.sum(np.square(kp6_recon.T - p6), axis=1))
return kp4_e, kp6_e
def reproject_error_loss_score(p3d, p4, p6, cam_proj_4, cam_proj_6, num_kpt=23):
'''
Return:
kp4_e, kp6_e: error array, both (23,) shape
'''
assert p3d.shape == (num_kpt, 3)
assert p4.shape == (num_kpt, 3)
assert p6.shape == (num_kpt, 3)
kp4_recon = np.dot(cam_proj_4[0:3,0:3],p3d.T) + cam_proj_4[0:3,3].reshape([-1,1])
kp6_recon = np.dot(cam_proj_6[0:3,0:3],p3d.T) + cam_proj_6[0:3,3].reshape([-1,1])
kp4_recon = kp4_recon[0:2,:]/kp4_recon[2,:]
kp6_recon = kp6_recon[0:2,:]/kp6_recon[2,:]
# kp4_e = np.linalg.norm(kp4_recon.T - p4, axis=1)
# kp6_e = np.linalg.norm(kp6_recon.T - p6, axis=1)
kp4_e = p4[:,2]*np.sqrt(np.sum(np.square(kp4_recon.T - p4[:,:2]), axis=1))
kp6_e = p6[:,2]*np.sqrt(np.sum(np.square(kp6_recon.T - p6[:,:2]), axis=1))
return kp4_e, kp6_e
def optimze_loss_2d(p3d_faltten, p4, p6, cam_proj_4, cam_proj_6, num_kpt=23, lambda_reproj = 1):
'''
Only consider reprojection loss
'''
l1 = lambda_reproj
p3d = p3d_faltten.reshape([-1,3])
kp4_e, kp6_e = reproject_error_loss(p3d, p4, p6, cam_proj_4, cam_proj_6, num_kpt=23)
return np.concatenate((l1*kp4_e, l1*kp6_e))
def shape_dis_loss(kpt_3d_array, median_bone, left_list, right_list, num_kpt=23):
'''
Shape loss given prior shape information
'''
assert kpt_3d_array.shape == (num_kpt, 3)
assert len(left_list) == len(right_list)
assert len(left_list) == len(median_bone.keys())
num_bone = len(left_list)
left_error = []
right_error = []
left_error = np.zeros(num_bone)
right_error = np.zeros(num_bone)
for i in range(num_bone):
bon_vec_left = kpt_3d_array[left_list[i][1],:] - kpt_3d_array[left_list[i][0],:]
left_error_i = np.sqrt(np.dot(bon_vec_left, bon_vec_left)) - median_bone[str(i)]
left_error[i] = abs(left_error_i)
bon_vec_right = kpt_3d_array[right_list[i][1],:] - kpt_3d_array[right_list[i][0],:]
right_error_i = np.sqrt(np.dot(bon_vec_right, bon_vec_right)) - median_bone[str(i)]
right_error[i] = abs(right_error_i)
return left_error, right_error
def optimze_loss(p3d_faltten, p4, p6, cam_proj_4, cam_proj_6, left_list, right_list, median_bone, num_kpt=23, lambda_reproj = 0.1, lambda_shape=5.0):
'''
Full Loss with shape prior
'''
l1 = lambda_reproj
l2 = lambda_shape
p3d = p3d_faltten.reshape([-1,3])
kp4_e, kp6_e = reproject_error_loss_score(p3d, p4, p6, cam_proj_4, cam_proj_6, num_kpt=23)
left_error, right_error = shape_dis_loss(p3d, median_bone, left_list, right_list, num_kpt=23)
return np.concatenate((l1*kp4_e, l1*kp6_e, l2*left_error, l2*right_error))
def optimze_loss_no_score(p3d_faltten, p4, p6, cam_proj_4, cam_proj_6, left_list, right_list, median_bone, num_kpt=23, lambda_reproj = 0.1, lambda_shape=1.0):
'''
Full Loss with shape prior
'''
l1 = lambda_reproj
l2 = lambda_shape
p3d = p3d_faltten.reshape([-1,3])
kp4_e, kp6_e = reproject_error_loss(p3d, p4, p6, cam_proj_4, cam_proj_6, num_kpt=23)
left_error, right_error = shape_dis_loss(p3d, median_bone, left_list, right_list, num_kpt=23)
return np.concatenate((l1*kp4_e, l1*kp6_e, l2*left_error, l2*right_error))
def centerize_keypoint(p1, p2, norm_dst):
'''
Centeralize two points
'''
assert p1.shape == (3,)
assert p2.shape == (3,)
p_center = (p1+p2)/2
p_vec = (p1-p2)
p_dis = np.sqrt(np.dot(p_vec, p_vec))
p1_shift = p_center + 0.5*p_vec/p_dis
p2_shift = p_center - 0.5*p_vec/p_dis
return p1_shift, p2_shift
def shape_initialize(left_list, right_list, median_bone, kpt_3d_array, num_kpt=23):
'''
Initialize human joints 3D position from shape prior
'''
assert kpt_3d_array.shape == (num_kpt,3)
assert len(left_list) == len(right_list)
assert len(left_list) == len(median_bone.keys())
num_bone = len(left_list)
left_ratio_list, right_ratio_list = [],[]
vec_left_list, vec_right_list = [], []
ratio_outlier = 1.5
ratio_draw_back = 1.1
for i in range(num_bone):
bon_vec_left = kpt_3d_array[left_list[i][1],:] - kpt_3d_array[left_list[i][0],:]
ratio_left = np.sqrt(np.dot(bon_vec_left, bon_vec_left))/ median_bone[str(i)]
left_ratio_list += [ratio_left]
vec_left_list += [bon_vec_left]
for i in range(num_bone):
bon_vec_right = kpt_3d_array[right_list[i][1],:] - kpt_3d_array[right_list[i][0],:]
ratio_right = np.sqrt(np.dot(bon_vec_right, bon_vec_right))/median_bone[str(i)]
right_ratio_list += [ratio_right]
vec_right_list += [bon_vec_right]
kp_3d_new = np.zeros(kpt_3d_array.shape)
# Adjust Shoulder to hip
kp_3d_new[left_list[2][0], :], kp_3d_new[left_list[2][1], :] = centerize_keypoint(kpt_3d_array[left_list[2][0], :], kpt_3d_array[left_list[2][1], :] , median_bone["2"])
kp_3d_new[right_list[2][0], :], kp_3d_new[right_list[2][1], :] = centerize_keypoint(kpt_3d_array[right_list[2][0], :], kpt_3d_array[right_list[2][1], :] , median_bone["2"])
# Adjust shoulder and Hip pair
sh_p = left_list[0]
hi_p = left_list[1]
kp_3d_new[sh_p[0]], kp_3d_new[sh_p[1]] = centerize_keypoint(kp_3d_new[sh_p[0]], kp_3d_new[sh_p[1]], median_bone["0"]) # shoulder
kp_3d_new[hi_p[0]], kp_3d_new[hi_p[1]] = centerize_keypoint(kp_3d_new[hi_p[0]], kp_3d_new[hi_p[1]], median_bone["1"]) # hip
# left part
for i in range(2, num_bone):
start_indx, end_indx = tuple(left_list[i])
if left_ratio_list[i] < ratio_outlier:
kp_3d_new[end_indx, :] = kp_3d_new[start_indx, :] + vec_left_list[i]
else:
kp_3d_new[end_indx, :] = kp_3d_new[start_indx, :] + vec_left_list[i]/left_ratio_list[i]*ratio_draw_back
for i in range(2, num_bone):
start_indx, end_indx = tuple(right_list[i])
if right_ratio_list[i] < ratio_outlier:
kp_3d_new[end_indx, :] = kp_3d_new[start_indx, :] + vec_right_list[i]
else:
kp_3d_new[end_indx, :] = kp_3d_new[start_indx, :] + vec_right_list[i]/right_ratio_list[i]*ratio_draw_back
# left_error, right_error = loss_kpt_3d(kp_3d_new, median_bone, left_list, right_list)
# print(left_error)
# print(right_error)
# print("OK")
return kp_3d_new
def fintune_human_keypoint_2d(P4, P6, path4, path6, path3D, path_finetune=None):
with open(path3D,"r") as f:
data_3d = json.load(f)
with open(path4, "r") as f:
data_dict4 = json.load(f)
with open(path6, "r") as f:
data_dict6 = json.load(f)
# frame_id = next(iter(data_3d["3D"].keys()))
# person_id = next(iter(data_3d["3D"][frame_id].keys()))
# # frame_id = "000005"
# # person_id = "000"
cam_proj_4 = np.array(data_3d["P4"])
cam_proj_6 = np.array(data_3d["P6"])
data_3d_dict = {}
data_3d_dict["P4"] = data_3d["P4"]
data_3d_dict["P6"] = data_3d["P6"]
data_3d_dict["3D"] = {}
data_3d_dict["kp4_e"] = {}
data_3d_dict["kp6_e"] = {}
frame_list = [k for k in data_dict4.keys()]
frame_list.sort()
for i, frame_id in enumerate(tqdm(frame_list)):
frame_3d_dict = {}
kp4_dict = {}
kp6_dict = {}
person_list = [k for k in data_dict4[frame_id].keys()]
person_list.sort()
for person_id in person_list:
p3d_flatten = np.array(data_3d["3D"][frame_id][person_id]).ravel()
p4_homo = np.array(data_dict4[frame_id][person_id]).reshape([-1,3])
p6_homo = np.array(data_dict6[frame_id][person_id]).reshape([-1,3])
p4 = p4_homo[:,:2]
p6 = p6_homo[:,:2]
if DEBUG:
loss_init = optimze_loss_2d(p3d_flatten, p4, p6, cam_proj_4, cam_proj_6)
print("Initial error", str(np.sqrt(np.sum(np.square(loss_init)))) )
res = least_squares(optimze_loss_2d, p3d_flatten, verbose=0, x_scale='jac', ftol=1e-4, method='trf',args=(p4, p6, cam_proj_4, cam_proj_6))
if DEBUG:
loss_final = res.fun
print("Final error", str(np.sqrt(np.sum(np.square(loss_final)))) )
loss_final = optimze_loss_2d(res.x, p4, p6, cam_proj_4, cam_proj_6)
print("Final error", str(np.sqrt(np.sum(np.square(loss_final)))) )
p3d_tune = res.x.reshape([-1,3])
kp4_recon, kp6_recon, kp4_e, kp6_e = reproject_error(p3d_tune, p4, p6, cam_proj_4, cam_proj_6)
frame_3d_dict[person_id] = p3d_tune.tolist()
kp4_dict[person_id] = kp4_e.tolist()
kp6_dict[person_id] = kp6_e.tolist()
data_3d_dict["3D"][frame_id] = frame_3d_dict
data_3d_dict["kp4_e"][frame_id] = kp4_dict
data_3d_dict["kp6_e"][frame_id] = kp6_dict
if path_finetune is not None:
with open(path_finetune, "w") as f:
json.dump(data_3d_dict, f)
return data_3d_dict
def finetune_human_3d(path_finetune_input, path4, path6, shape_prior_path, shape_prior_finetune_output, frame_list=None):
'''
path_finetune_input:
path4: data_C4.json
path6: data_C6.json
shape_prior_path:
shape_prior_finetune_output:
'''
with open(path_finetune_input,"r") as f:
data_3d = json.load(f)
with open(path4, "r") as f:
data_dict4 = json.load(f)
with open(path6, "r") as f:
data_dict6 = json.load(f)
with open(shape_prior_path, 'r') as f:
data_prior = json.load(f)
left_list = data_prior["left_list"]
right_list = data_prior["right_list"]
median_bone = data_prior["median_bone"]
cam_proj_4 = np.array(data_3d["P4"])
cam_proj_6 = np.array(data_3d["P6"])
data_3d_dict = {}
data_3d_dict["P4"] = data_3d["P4"]
data_3d_dict["P6"] = data_3d["P6"]
data_3d_dict["3D"] = {}
data_3d_dict["kp4_e"] = {}
data_3d_dict["kp6_e"] = {}
if frame_list:
for f in frame_list:
if f not in data_dict4.keys():
print("KEY ERROR!")
assert 0
else:
frame_list = [k for k in data_dict4.keys()]
frame_list.sort()
for i, frame_id in enumerate(tqdm(frame_list)):
frame_3d_dict = {}
kp4_dict = {}
kp6_dict = {}
person_list = [k for k in data_dict4[frame_id].keys()]
person_list.sort()
for person_id in person_list:
p3d = np.array(data_3d["3D"][frame_id][person_id]).reshape([-1,3])
p3d_init = shape_initialize(left_list, right_list, median_bone, p3d)
p4_homo = np.array(data_dict4[frame_id][person_id]).reshape([-1,3])
p6_homo = np.array(data_dict6[frame_id][person_id]).reshape([-1,3])
p4 = p4_homo
p6 = p6_homo
p3d_flatten = p3d_init.flatten()
# loss_init = optimze_loss(p3d_flatten, p4, p6, cam_proj_4, cam_proj_6, left_list, right_list, median_bone)
#print(np.linalg.norm(loss_init))
res = least_squares(optimze_loss, p3d_flatten, verbose=0, x_scale='jac', ftol=1e-2, method='trf',args=(p4, p6, cam_proj_4, cam_proj_6, left_list, right_list, median_bone))
p3d_tune = res.x.reshape([-1,3])
# loss_res = optimze_loss(res.x, p4, p6, cam_proj_4, cam_proj_6, left_list, right_list, median_bone)
# print(np.linalg.norm(loss_res))
kp4_recon, kp6_recon, kp4_e, kp6_e = reproject_error(p3d_tune, p4[:,:2], p6[:,:2], cam_proj_4, cam_proj_6)
frame_3d_dict[person_id] = p3d_tune.tolist()
kp4_dict[person_id] = kp4_e.tolist()
kp6_dict[person_id] = kp6_e.tolist()
data_3d_dict["3D"][frame_id] = frame_3d_dict
data_3d_dict["kp4_e"][frame_id] = kp4_dict
data_3d_dict["kp6_e"][frame_id] = kp6_dict
with open(shape_prior_finetune_output, "w") as f:
json.dump(data_3d_dict, f)
def finetune_human_3d_no_score(path_finetune_input, path4, path6, shape_prior_path, shape_prior_finetune_output, frame_list=None):
'''
path_finetune_input:
path4: data_C4.json
path6: data_C6.json
shape_prior_path:
shape_prior_finetune_output:
'''
with open(path_finetune_input,"r") as f:
data_3d = json.load(f)
with open(path4, "r") as f:
data_dict4 = json.load(f)
with open(path6, "r") as f:
data_dict6 = json.load(f)
with open(shape_prior_path, 'r') as f:
data_prior = json.load(f)
left_list = data_prior["left_list"]
right_list = data_prior["right_list"]
median_bone = data_prior["median_bone"]
cam_proj_4 = np.array(data_3d["P4"])
cam_proj_6 = np.array(data_3d["P6"])
data_3d_dict = {}
data_3d_dict["P4"] = data_3d["P4"]
data_3d_dict["P6"] = data_3d["P6"]
data_3d_dict["3D"] = {}
data_3d_dict["kp4_e"] = {}
data_3d_dict["kp6_e"] = {}
if frame_list:
for f in frame_list:
if f not in data_dict4.keys():
print("KEY ERROR!")
assert 0
else:
frame_list = [k for k in data_dict4.keys()]
frame_list.sort()
for i, frame_id in enumerate(tqdm(frame_list)):
if i > 300:
import sys
sys.exit()
frame_3d_dict = {}
kp4_dict = {}
kp6_dict = {}
person_list = [k for k in data_dict4[frame_id].keys()]
person_list.sort()
for person_id in person_list:
try:
p3d = np.array(data_3d["3D"][frame_id][person_id]).reshape([-1,3])
p3d_init = shape_initialize(left_list, right_list, median_bone, p3d)
p4_homo = np.array(data_dict4[frame_id][person_id]).reshape([-1,3])
p6_homo = np.array(data_dict6[frame_id][person_id]).reshape([-1,3])
p4 = p4_homo[:,:2]
p6 = p6_homo[:,:2]
p3d_flatten = p3d_init.flatten()
# loss_init = optimze_loss(p3d_flatten, p4, p6, cam_proj_4, cam_proj_6, left_list, right_list, median_bone)
#print(np.linalg.norm(loss_init))
res = least_squares(optimze_loss_no_score, p3d_flatten, verbose=2, x_scale='jac', ftol=1e-2, method='trf',args=(p4, p6, cam_proj_4, cam_proj_6, left_list, right_list, median_bone))
p3d_tune = res.x.reshape([-1,3])
# loss_res = optimze_loss(res.x, p4, p6, cam_proj_4, cam_proj_6, left_list, right_list, median_bone)
# print(np.linalg.norm(loss_res))
kp4_recon, kp6_recon, kp4_e, kp6_e = reproject_error(p3d_tune, p4[:,:2], p6[:,:2], cam_proj_4, cam_proj_6)
frame_3d_dict[person_id] = p3d_tune.tolist()
kp4_dict[person_id] = kp4_e.tolist()
kp6_dict[person_id] = kp6_e.tolist()
except:
print("Error")
data_3d_dict["3D"][frame_id] = frame_3d_dict
data_3d_dict["kp4_e"][frame_id] = kp4_dict
data_3d_dict["kp6_e"][frame_id] = kp6_dict
with open(shape_prior_finetune_output, "w") as f:
json.dump(data_3d_dict, f)
|
58765
|
from django.conf import settings
from rest_framework.permissions import IsAdminUser
from rest_framework import status, viewsets, decorators
from quser.permissions import CURDPermissionsOrReadOnly
from rest_framework.response import Response
from . import models, serializers
from .filters import FileFilter
class TagViewSet(viewsets.ModelViewSet):
queryset = models.Tag.objects.all()
serializer_class = serializers.TagSerializer
permission_classes = (IsAdminUser,)
class FileViewSet(viewsets.ModelViewSet):
queryset = models.File.objects.filter(active=True).order_by("-id")
serializer_class = serializers.FileSerializer
permission_classes = (CURDPermissionsOrReadOnly,)
filterset_class = FileFilter
def perform_destroy(self, instance):
instance.active = False
instance.save()
@decorators.action(methods=['delete'], detail=False, serializer_class=serializers.BulkDestroySerializer,
permission_classes=(IsAdminUser,))
def bulk_destroy(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data, context=dict(request=request))
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(status=status.HTTP_204_NO_CONTENT)
@decorators.action(methods=['post',], detail=False, serializer_class=serializers.BulkUploadSerializer,
permission_classes=(IsAdminUser,))
def bulk_upload(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data, context=dict(request=request))
serializer.is_valid(raise_exception=True)
res = serializer.save()
return Response(res, status=status.HTTP_201_CREATED)
|
58780
|
from django.conf.urls.defaults import patterns, url, include
from pycash.controllers import TaxController as controller
urlpatterns = patterns('',
(r'^upcomingList$', controller.upcomingList),
(r'^upcoming$', controller.upcoming),
url(r'^pay$', controller.pay, name="tax_pay"),
(r'^list$', controller.list),
url(r'^save$', controller.save_or_update, name="tax_save"),
(r'^update$', controller.save_or_update),
url(r'^delete$', controller.delete, name="tax_delete"),
(r'^$', controller.index)
)
|
58818
|
import sphinx_rtd_theme
project = 'Bitcoin DCA'
copyright = '2021, <NAME>'
author = '<NAME>'
extensions = []
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
pygments_style = 'sphinx'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {
'navigation_depth': 4,
}
master_doc = 'index'
html_logo = '../resources/images/logo-white.png'
# I use a privacy focussed service https://usefathom.com/ to track how the documentation
# is being used. This allows me to improve its contents.
html_js_files = [('https://krill.jorijn.com/script.js', {'data-site': 'MXGDAIWO'})]
|
58829
|
from typing import Optional
class TransactionFailedError(Exception):
"""
Base exception for transaction failure
"""
def __init__(
self,
code: Optional[str] = None,
message: Optional[str] = "Unknown starknet error",
):
self.code = code
self.message = message
super().__init__(self.message)
def __str__(self):
return (
f"Transaction failed with following starknet error: "
f"{self.code + ':' if self.code is not None else ''}{self.message}."
)
class TransactionRejectedError(TransactionFailedError):
"""
Exception for transactions rejected by starknet
"""
def __str__(self):
return (
f"Transaction was rejected with following starknet error: "
f"{self.code + ':' if self.code is not None else ''}{self.message}."
)
class TransactionNotReceivedError(TransactionFailedError):
"""
Exception for transactions not received on starknet
"""
def __init__(self):
super().__init__(message="Transaction not received")
def __str__(self):
return "Transaction was not received on starknet"
|
58838
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
def loss2(logits, labels, num_classes, scope, head=None):
with tf.name_scope(scope):
logits = tf.reshape(logits, (-1, num_classes))
softmax = tf.nn.softmax(logits) + 1e-4
labels = tf.to_float(tf.one_hot(tf.reshape(labels, [-1]), num_classes))
eps = 1e-2
labels = (1-eps)*tf.to_float(tf.reshape(labels, (-1, num_classes))) + eps/num_classes
if head is not None:
cross_entropy = -tf.reduce_sum(tf.multiply(labels * tf.log(softmax),
head), reduction_indices=[1])
else:
cross_entropy = -tf.reduce_sum(
labels * tf.log(softmax), reduction_indices=[1])
return tf.reduce_mean(cross_entropy)
|
58865
|
import json
import logging
import os
import re
from collections import namedtuple
from copy import deepcopy
from typing import Any, Dict, List, Tuple
import numpy as np
import pandas as pd
import spacy
from scirex_utilities.analyse_pwc_entity_results import *
from scirex_utilities.entity_utils import *
from spacy.tokens import Doc
from tqdm import tqdm
tqdm.pandas()
LabelSpan = namedtuple("Span", ["start", "end", "token_start", "token_end", "entity", "links", "modified"])
logging.basicConfig(level=logging.INFO)
class WhitespaceTokenizer(object):
def __init__(self, vocab):
self.vocab = vocab
def __call__(self, text):
words = text.split()
# All tokens 'own' a subsequent space character in this tokenizer
spaces = [True] * len(words)
return Doc(self.vocab, words=words, spaces=spaces)
nlp = spacy.load("en")
nlp.tokenizer = WhitespaceTokenizer(nlp.vocab)
def process_folder(folder: str) -> Tuple[dict, str]:
span_labels = {}
map_T_to_span = {}
if not os.path.isdir(folder) or "document.txt" not in os.listdir(folder):
print(folder, " have not document")
return None
doc_text = open(os.path.join(folder, "document.txt")).read()
ann_file = open(os.path.join(folder, "document.ann")).read().strip()
annotations = [x.split("\t", 1) for x in ann_file.split("\n")]
annotations = sorted(annotations, key=lambda x: 0 if x[0] == "T" else 1)
for ann_type, ann in annotations:
if ann_type[0] == "T":
ann, ann_text = ann.split("\t")
if ";" in ann:
continue
else:
enttype, span_start, span_end = ann.split()
span_start, span_end = int(span_start), int(span_end)
if (span_start, span_end) in span_labels:
assert "Span already present"
else:
span_labels[(span_start, span_end)] = {"E": enttype, "A": set(), "T": ann_text}
map_T_to_span[ann_type] = (span_start, span_end)
if ann_type[0] == "A":
ann, ann_T = ann.split()
if ann_T in map_T_to_span:
span_labels[map_T_to_span[ann_T]]["A"].add(ann)
else:
print("Attribute before Trigger")
return span_labels, doc_text
def get_all_document_annotations(brat_folder: str) -> Dict[str, Tuple[dict, str]]:
map_id_to_ann = {}
for f in tqdm(os.listdir(brat_folder)):
try:
map_id_to_ann[f] = process_folder(os.path.join(brat_folder, f))
except Exception as e:
print(f)
return map_id_to_ann
def process_back_to_dataframe(span_labels: Dict[Tuple[int, int], dict], doc_text: str):
sentences = doc_text.split("\n ")
assert sentences[-1] == ""
sentences = [x + "\n " for x in sentences[:-1]]
sentence_limits = np.cumsum([len(x) for x in sentences])
sentence_limits = list(zip([0] + list(sentence_limits)[:-1], sentence_limits))
for s, e in sentence_limits:
assert doc_text[e - 2 : e] == "\n "
assert doc_text[s] != " "
span_labels = list(map(lambda x: [list(x[0]), x[1]], sorted(span_labels.items(), key=lambda x: x[0][0])))
sl_ix = 0
map_sentence_limits_to_spans = {}
for ss, se in sentence_limits:
map_sentence_limits_to_spans[(ss, se)] = []
while sl_ix < len(span_labels) and span_labels[sl_ix][0][0] >= ss and span_labels[sl_ix][0][1] <= se:
map_sentence_limits_to_spans[(ss, se)].append(span_labels[sl_ix])
sl_ix += 1
spans_in_l = 0
for k, v in map_sentence_limits_to_spans.items():
for span, _ in v:
assert k[0] <= span[0] and k[1] >= span[1]
spans_in_l += 1
assert span[1] < k[1] - 1
assert spans_in_l == len(span_labels)
for k, v in map_sentence_limits_to_spans.items():
for span, _ in v:
span[0] -= k[0]
span[1] -= k[0]
df = []
for sent_id, ((ss, se), st) in enumerate(zip(sentence_limits, sentences)):
for span, d in map_sentence_limits_to_spans[(ss, se)]:
assert st[-2:] == "\n ", st[-2:]
assert span[1] < len(st) - 2
assert st[span[0] : span[1]] == d["T"] and len(d["T"]) > 0, (st[span[0] : span[1]], d["T"])
df.append({"sentence": st, "spans": map_sentence_limits_to_spans[(ss, se)], "sentence_id": sent_id})
assert df[4]["sentence"].strip() == "", breakpoint()
df = df[5:]
df = pd.DataFrame(df)
return df
def get_dataframe_from_folder(brat_folder):
logging.info("Generating DataFrame ...")
map_changes = get_all_document_annotations(brat_folder)
logging.info("Done generating DataFrame")
doc_df = []
for k in tqdm(map_changes):
if map_changes[k] is None:
continue
df = process_back_to_dataframe(*map_changes[k])
df["doc_id"] = k
doc_df.append(df)
doc_df = pd.concat(doc_df)
return doc_df
def overlap(span_1, span_2):
if span_1[0] >= span_2[1] or span_2[0] >= span_1[1]:
return False
return True
def process_cluster(cluster):
stats = {
"new_spans": len([x for x in cluster if "pre" not in x[1]]),
"old_spans": len([x for x in cluster if "pre" in x[1]]),
"type_change": 0,
"change_attributes": 0,
}
old_spans = [x for x in cluster if "pre" in x[1]]
new_spans = [x for x in cluster if "pre" not in x[1]]
old_spans_modified, old_spans_unmodified = [], []
for span, info in old_spans:
if [info[k] for k in ["E", "T", "A"]] == [info["pre"][k] for k in ["E", "T", "A"]]:
del info["pre"]
if any(overlap(span, n_span) for n_span, _ in new_spans):
continue
old_spans_unmodified.append((span, info))
else:
del info["pre"]
if any(overlap(span, n_span) for n_span, _ in new_spans):
continue
old_spans_modified.append((span, info))
assert all((si == sj or not overlap(si[0], sj[0])) for si in new_spans for sj in new_spans), breakpoint()
assert len(old_spans_unmodified) == 0 or len(old_spans_modified) == 0, breakpoint()
assert all(
(not overlap(ospan, nspan)) for ospan, _ in old_spans_modified for nspan, _ in new_spans
), breakpoint()
assert all(
(not overlap(ospan, nspan)) for ospan, _ in old_spans_unmodified for nspan, _ in new_spans
), breakpoint()
if len(old_spans_modified + old_spans_unmodified) > 0 and len(new_spans) > 0:
breakpoint()
new_spans = [
LabelSpan(
start=x[0][0],
end=x[0][1],
entity=x[1]["E"],
links=x[1]["A"],
token_start=None,
token_end=None,
modified=True,
)._asdict()
for x in new_spans + old_spans_modified
]
new_spans += [
LabelSpan(
start=x[0][0],
end=x[0][1],
entity=x[1]["E"],
links=x[1]["A"],
token_start=None,
token_end=None,
modified=False,
)._asdict()
for x in old_spans_unmodified
]
stats["spans_kept"] = len(new_spans)
return new_spans, stats
# Cases 1 : Pre entity have labels / post don't -> copy labels / delete pre entity
# Cases 2 : Pre entity have labels / post also have labels -> don't copy labels / delete pre entity
# Cases 3 : If post entity have different type than pre entity, remove pre entity
def normalize_spans(row):
span_list_1, span_list_2 = row["spans_old"], row["spans_new"]
map_1_span_to_ix = {tuple(k): v for k, v in span_list_1}
if len(span_list_2) == 0:
return [], None
spans = [tuple(x[0]) for x in span_list_2]
if len(spans) != len(set(spans)):
assert "Duplicate spans", span_list_2
span_list_2 = sorted(span_list_2, key=lambda x: x[0])
stats = []
clusters = []
curr_cluster = []
cstart, cend = -1, -1
for (start, end), span_info in span_list_2:
cspan = ((start, end), span_info)
if (start, end) in map_1_span_to_ix:
span_info["pre"] = map_1_span_to_ix[(start, end)]
if cstart == -1: # (Start First Cluster)
curr_cluster.append(cspan)
cstart, cend = start, end
elif start < cend: # Append to current cluster
curr_cluster.append(cspan)
cend = max(cend, end)
else: # Start new cluster
curr_cluster, cluster_stats = process_cluster(curr_cluster)
stats.append(cluster_stats)
clusters.append(curr_cluster)
curr_cluster = [cspan]
cstart, cend = start, end
curr_cluster, cluster_stats = process_cluster(curr_cluster)
stats.append(cluster_stats)
clusters.append(curr_cluster)
clusters = sorted([z for x in clusters for z in x], key=lambda x: (x["start"], x["end"]))
for i in range(len(clusters) - 1):
if clusters[i]["end"] > clusters[i + 1]["start"]:
breakpoint()
stats_reduced = {}
for s in stats:
for k, v in s.items():
if k not in stats_reduced:
stats_reduced[k] = v
else:
stats_reduced[k] += v
return clusters, stats_reduced
def add_token_index(row):
if len(row["cluster"]) == 0:
return []
sentence = row["sentence_old"]
words = row["words"]
word_indices = row["word_indices"]
sentence_start = row["sentence_start"]
starts, ends = list(zip(*word_indices))
for i, (start, end) in enumerate(zip(starts, ends)):
assert sentence[start:end] == words[i], breakpoint()
new_cluster = []
cluster = row["cluster"]
for i, span in enumerate(cluster):
assert "start" in span, breakpoint()
assert "end" in span, breakpoint()
if not (span["start"] in starts):
if sentence[span["start"]].strip() == "":
span["start"] += 1
else:
span["start"] = min(
starts, key=lambda x: abs(x - span["start"]) if x < span["start"] else float("inf")
)
if not (span["end"] in ends):
if sentence[span["end"] - 1].strip() == "":
span["end"] -= 1
else:
span["end"] = min(
ends, key=lambda x: abs(x - span["end"]) if x > span["end"] else float("inf")
)
span["token_start"] = starts.index(span["start"]) + sentence_start - len(words)
span["token_end"] = ends.index(span["end"]) + 1 + sentence_start - len(words)
for cleaned_span in new_cluster:
if overlap(
(span["token_start"], span["token_end"]),
(cleaned_span["token_start"], cleaned_span["token_end"]),
):
print(row["doc_id"])
print(" ".join(row["words"]))
print("=" * 20)
new_cluster.append(span)
return new_cluster
def generate_token_and_indices(sentence):
words = sorted(
[(m.group(0), (m.start(), m.end())) for m in re.finditer(r"[^\s\+\-/\(\)&\[\],]+", sentence)]
+ [(m.group(0), (m.start(), m.end())) for m in re.finditer(r"[\+\-/\(\)&\[\],]+", sentence)]
+ [(m.group(0), (m.start(), m.end())) for m in re.finditer(r"\s+", sentence)],
key=lambda x: x[1],
)
if len(words) == 0 or sentence.strip() == "":
return [], []
try:
words, indices = list(zip(*[(t, i) for t, i in words if t.strip() != ""]))
except:
breakpoint()
return words, indices
def compare_brat_annotations(ann_old_df, ann_new_df):
df_merged = ann_old_df.merge(ann_new_df, on=["doc_id", "sentence_id"], suffixes=("_old", "_new"))
logging.info("Applying Normalize Spans ...")
output = df_merged.progress_apply(normalize_spans, axis=1)
df_merged["cluster"], df_merged["stats"] = list(zip(*output))
df_merged = df_merged.sort_values(["doc_id", "sentence_id"]).reset_index(drop=True)
logging.info("Applying Add Token Index ...")
df_merged["words"], df_merged["word_indices"] = list(
zip(*df_merged["sentence_old"].progress_apply(generate_token_and_indices))
)
df_merged["num_words"] = df_merged["words"].progress_apply(len)
df_merged["sentence_start"] = df_merged.groupby("doc_id")["num_words"].cumsum()
df_merged["entities"] = df_merged.apply(add_token_index, axis=1)
df_merged = (
df_merged.sort_values(["doc_id", "sentence_id"])
.reset_index(drop=True)
.drop(columns=["spans_old", "spans_new", "sentence_new", "cluster"])
.rename(columns={"sentence_old": "sentence"})
)
return df_merged
def generate_relations_in_pwc_df(pwc_df):
pwc_df_keep = pwc_df[["s2_paper_id"] + true_entities + ["score"]].rename(
columns=map_true_entity_to_available
)
pwc_df_keep = (
pwc_df_keep[(~pwc_df_keep.duplicated()) & (pwc_df_keep.s2_paper_id != "not_found")]
.sort_values(["s2_paper_id"] + used_entities + ["score"])
.reset_index(drop=True)
)
# pwc_df_keep[used_entities] = pwc_df_keep[used_entities].applymap(lambda x: re.sub(r"[^\w-]", "_", x))
pwc_df_keep = (
pwc_df_keep.groupby("s2_paper_id")
.apply(lambda x: list(x[used_entities + ["score"]].itertuples(index=False, name="Relation")))
.reset_index()
.rename(columns={0: "Relations"})
)
return pwc_df_keep
def combine_brat_to_original_data(
pwc_doc_file,
pwc_sentence_file,
pwc_prediction_file,
original_brat_anno_folder,
annotated_brat_anno_folder,
):
logging.info("Loading pwc docs ... ")
pwc_df = load_pwc_full_text(pwc_doc_file)
pwc_grouped = (
pwc_df.groupby("s2_paper_id")[["dataset", "task", "model_name", "metric"]]
.aggregate(lambda x: list(set(tuple(x))))
.reset_index()
)
pwc_df_relations = generate_relations_in_pwc_df(pwc_df)
pwc_df_relations = pwc_df_relations.rename(columns={"s2_paper_id": "doc_id"})[["doc_id", "Relations"]]
pwc_df_relations.index = pwc_df_relations.doc_id
pwc_df_relations = pwc_df_relations.drop(columns=["doc_id"])
pwc_df_relations: Dict[str, Relation] = pwc_df_relations.to_dict()["Relations"]
method_breaks = {
d: {
clean_name(rel.Method): [(i, clean_name(x)) for i, x in chunk_string(rel.Method)]
for rel in relations
}
for d, relations in pwc_df_relations.items()
}
pwc_df_relations = {
d: [{k: clean_name(x) if k != "score" else x for k, x in rel._asdict().items()} for rel in relations]
for d, relations in pwc_df_relations.items()
}
logging.info("Loading PwC Sentence Predictions ... ")
pwc_sentences = load_pwc_sentence_predictions(pwc_sentence_file, pwc_prediction_file)
pwc_sentences = pwc_sentences.merge(pwc_grouped, left_on="doc_id", right_on="s2_paper_id")
pwc_sentences = pwc_sentences.sort_values(
by=["doc_id", "section_id", "para_id", "sentence_id"]
).reset_index(drop=True)
pwc_sentences["words"] = pwc_sentences["words"].progress_apply(
lambda x: generate_token_and_indices(" ".join(x))[0]
)
df_changed = get_dataframe_from_folder(annotated_brat_anno_folder)
df_original = get_dataframe_from_folder(original_brat_anno_folder)
df_merged = compare_brat_annotations(df_original, df_changed)
assert (
pwc_sentences.groupby("doc_id")["words"].agg(lambda words: [x for y in words for x in y])
!= df_merged.groupby("doc_id")["words"].agg(lambda words: [x for y in words for x in y])
).sum() == 0, breakpoint()
def add_nums(rows, columns, name):
rows[name] = list(rows.groupby(columns).grouper.group_info[0])
return rows
pwc_sentences["para_num"] = None
pwc_sentences["sentence_num"] = None
pwc_sentences = pwc_sentences.groupby("doc_id").progress_apply(
lambda x: add_nums(x, ["section_id", "para_id"], "para_num")
)
pwc_sentences = pwc_sentences.groupby("doc_id").progress_apply(
lambda x: add_nums(x, ["section_id", "para_id", "sentence_id"], "sentence_num")
)
words: Dict[str, List[str]] = pwc_sentences.groupby("doc_id")["words"].agg(
lambda words: [x for y in words for x in y]
).to_dict()
pwc_sentences["num_words"] = pwc_sentences["words"].apply(len)
sentences = pwc_sentences.groupby(["doc_id", "sentence_num"])["num_words"].agg(sum)
sections = pwc_sentences.groupby(["doc_id", "section_id"])["num_words"].agg(sum)
sections: Dict[str, Dict[int, int]] = {
level: sections.xs(level).to_dict() for level in sections.index.levels[0]
}
sentences: Dict[str, Dict[int, int]] = {
level: sentences.xs(level).to_dict() for level in sentences.index.levels[0]
}
words_merged = (
df_merged.groupby("doc_id")["words"].agg(lambda words: [x for y in words for x in y]).to_dict()
)
entities = (
df_merged.groupby("doc_id")["entities"].agg(lambda ents: [x for y in ents for x in y]).to_dict()
)
def compute_start_end(cards):
ends = list(np.cumsum(cards))
starts = [0] + ends
return list(zip([int(x) for x in starts], [int(x) for x in ends]))
combined_information = {}
for d in words:
assert words[d] == words_merged[d], breakpoint()
assert list(sentences[d].keys()) == list(range(max(sentences[d].keys()) + 1)), breakpoint()
assert list(sections[d].keys()) == list(range(max(sections[d].keys()) + 1)), breakpoint()
sent = compute_start_end([sentences[d][i] for i in range(len(sentences[d]))])
sec = compute_start_end([sections[d][i] for i in range(len(sections[d]))])
for e in entities[d]:
del e["start"]
del e["end"]
combined_information[d] = {
"words": words[d],
"sentences": sent,
"sections": sec,
"relations": pwc_df_relations[d],
"entities": entities[d],
"doc_id": d,
"method_subrelations": method_breaks[d],
}
return combined_information
def _annotation_to_dict(dc):
# convenience method
if isinstance(dc, dict):
ret = dict()
for k, v in dc.items():
k = _annotation_to_dict(k)
v = _annotation_to_dict(v)
ret[k] = v
return ret
elif isinstance(dc, str):
return dc
elif isinstance(dc, (set, frozenset, list, tuple)):
ret = []
for x in dc:
ret.append(_annotation_to_dict(x))
return tuple(ret)
else:
return dc
def annotations_to_jsonl(annotations, output_file, key="doc_id"):
with open(output_file, "w") as of:
for ann in sorted(annotations, key=lambda x: x[key]):
as_json = _annotation_to_dict(ann)
as_str = json.dumps(as_json, sort_keys=True)
of.write(as_str)
of.write("\n")
def propagate_annotations(data_dict: Dict[str, Any]):
words = data_dict["words"]
entities = data_dict["entities"]
entities = {(e["token_start"], e["token_end"]): e for e in entities}
assert not any(e != f and overlap(e, f) for e in entities for f in entities), breakpoint()
new_entities = {}
for (s, e) in entities:
if entities[(s, e)]["modified"] == True:
span_text = words[s:e]
possible_matches = [
(i, i + len(span_text))
for i in range(len(words))
if words[i : i + len(span_text)] == span_text
]
for match in possible_matches:
add_match = False
if match in entities:
if entities[match].get("proped", False):
continue
if entities[match]["modified"] == False: # Propagate the changes
for k in ["entity", "links", "modified"]:
entities[match][k] = deepcopy(entities[(s, e)][k])
elif entities[match]["entity"] != entities[(s, e)]["entity"]:
if match > (s, e):
for k in ["entity", "links", "modified"]:
entities[match][k] = deepcopy(entities[(s, e)][k])
elif set(entities[match]["links"]) != set(
entities[(s, e)]["links"]
): # Two entities with same text have different annotations. BAD !!!
merged_links = set(entities[match]["links"]) | set(entities[(s, e)]["links"])
entities[match]["links"] = deepcopy(list(merged_links))
entities[(s, e)]["links"] = deepcopy(list(merged_links))
entities[match]["proped"] = True
add_match = False
else:
for span in entities:
if overlap(span, match):
if entities[span]["modified"] == True:
add_match = False
if entities[span]["entity"] != entities[(s, e)]["entity"]:
break
elif set(entities[span]["links"]) != set(entities[(s, e)]["links"]):
diff_links = set(entities[(s, e)]["links"]) ^ set(entities[span]["links"])
canon_name = set(["Canonical_Name"])
if (
diff_links != canon_name
and set(entities[(s, e)]["links"]) != canon_name
and set(entities[span]["links"]) != canon_name
):
break
else:
merged_links = set(entities[(s, e)]["links"]) | set(
entities[span]["links"]
)
entities[(s, e)]["links"] = deepcopy(list(merged_links))
entities[span]["links"] = deepcopy(list(merged_links))
break
break
else:
add_match = True
if match in new_entities:
if new_entities[match]["entity"] != entities[(s, e)]["entity"]:
breakpoint()
elif set(new_entities[match]["links"]) != set(
entities[(s, e)]["links"]
): # Two entities with same text have different annotations. BAD !!!
diff_links = set(new_entities[match]["links"]) & set(entities[(s, e)]["links"])
if (
len(diff_links) == 0
and len(set(new_entities[match]["links"])) > 0
and len(set(entities[(s, e)]["links"])) > 0
):
breakpoint()
else:
merged_links = set(new_entities[match]["links"] + entities[(s, e)]["links"])
entities[(s, e)]["links"] = deepcopy(list(merged_links))
new_entities[match]["links"] = deepcopy(list(merged_links))
else:
add_match = False
if add_match:
new_entities[match] = {
k: deepcopy(entities[(s, e)][k]) for k in ["entity", "links", "modified"]
}
new_entities[match]["token_start"] = match[0]
new_entities[match]["token_end"] = match[1]
for match in list(new_entities.keys()):
for span in list(entities.keys()):
if overlap(match, span):
assert entities[span]["modified"] == False or entities[span]["proped"], breakpoint()
if entities[span].get("proped", False):
if match in new_entities:
del new_entities[match]
elif not entities[span]["modified"]:
del entities[span]
new_entities = sorted(list(new_entities.items()), key=lambda x: x[0][1])
disjoint_new_entities = []
for e in new_entities:
if len(disjoint_new_entities) == 0:
disjoint_new_entities.append(e)
else:
if e[0][0] >= disjoint_new_entities[-1][0][1]:
disjoint_new_entities.append(e)
assert not any(
e[0] != f[0] and overlap(e[0], f[0]) for e in disjoint_new_entities for f in disjoint_new_entities
)
disjoint_new_entities = dict(disjoint_new_entities)
assert not any(overlap(e, f) for e in disjoint_new_entities for f in entities), breakpoint()
entities.update(disjoint_new_entities)
assert not any(e != f and overlap(e, f) for e in entities for f in entities), breakpoint()
assert all(v["token_start"] == s and v["token_end"] == e for (s, e), v in entities.items()), breakpoint()
data_dict["entities"] = [x for x in entities.values()]
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--annotator")
if __name__ == "__main__":
args = parser.parse_args()
annotations_dict = combine_brat_to_original_data(
"data/pwc_s2_cleaned_text_v2.jsonl",
"data/pwc_s2_cleaned_text_v2_sentences.jsonl",
"outputs/pwc_s2_cleaned_text_v2_sentences_predictions.jsonl.clean",
"/home/sarthakj/brat/brat/data/result_extraction/outputs/second_phase_annotations_"
+ args.annotator
+ "/",
"/home/sarthakj/brat/brat/data/result_extraction/outputs/second_phase_annotations_original/",
)
annotations_to_jsonl(list(annotations_dict.values()), "model_data/all_data_" + args.annotator + ".jsonl")
data = [json.loads(line) for line in open("model_data/all_data_" + args.annotator + ".jsonl")]
for d in tqdm(data):
names = [v for rel in d["relations"] for k, v in rel.items() if k != "score"]
names += [n for m, subm in d["method_subrelations"].items() for idx, n in subm]
names = set(names)
propagate_annotations(d)
coreference = {n: [] for n in names}
ner = []
for e in d["entities"]:
e["links"] = set(e["links"])
e["canon"] = "Canonical_Name" in e["links"]
if e["canon"]:
e["links"].remove("Canonical_Name")
if "proped" in e:
del e["proped"]
del e["modified"]
e["links"] = e["links"] & names
for l in e["links"]:
coreference[l].append([e["token_start"], e["token_end"]])
ner.append((e["token_start"], e["token_end"], e["entity"]))
del d["entities"]
d["n_ary_relations"] = d["relations"]
del d["relations"]
d["coref"] = coreference
d["ner"] = ner
assert d["sentences"][-1][-1] == len(d["words"]), breakpoint()
assert d["sections"][-1][-1] == len(d["words"]), breakpoint()
annotations_to_jsonl(data, "model_data/all_data_" + args.annotator + "_propagated.jsonl")
|
58906
|
from .parseexp_koff_a import get as get_a
from .parseexp_koff_b import get as get_b
from .parseexp_koff_c import get as get_c
def parse_exp(input_str):
index_a = get_a(input_str)
index_b = get_b(input_str)
index_c = get_c(input_str)
return list(map(float, [index_a, index_b, index_c]))
|
58915
|
import sys
import os
from PyQt5.QtWidgets import (QTabWidget, QMessageBox)
from codeeditor import CodeEditor
from widgets import MessageBox
class TabWidget(QTabWidget):
def __init__(self, parent=None):
super().__init__()
self.mainWindow = parent
self.setStyleSheet(
'''
background-color: #2c2c2c;
color: white;
alternate-background-color: #FFFFFF;
selection-background-color: #3b5784;
''')
self.setStyleSheet('''
QTabBar::tab:selected {background: darkgreen;}
''')
self.setMovable(True)
self.setTabsClosable(True)
# signals
self.tabCloseRequested.connect(self.closeTab)
self.currentChanged.connect(self.changeTab)
self.textPad = None
self.codeView = None
def newTab(self, editor=None, codeView=None):
if not editor:
editor = CodeEditor(parent=self.mainWindow)
self.addTab(editor, "noname")
editor.filename = None
if self.mainWindow:
self.codeView = self.mainWindow.codeView
else:
if editor.filename == None:
self.addTab(editor, "noname")
else:
self.addTab(editor, os.path.basename(editor.filename))
x = self.count() - 1
self.setTabToolTip(x, editor.filename)
self.codeView = self.mainWindow.codeView
def closeTab(self, index):
x = self.currentIndex()
if x != index:
self.setCurrentIndex(index)
tabText = self.tabText(index)
if '*' in tabText:
q = MessageBox(QMessageBox.Warning, 'Warning',
'File not saved\n\nSave now ?',
QMessageBox.Yes | QMessageBox.No)
if (q.exec_() == QMessageBox.Yes):
self.mainWindow.save()
self.removeTab(index)
else:
self.removeTab(index)
else:
self.removeTab(index)
x = self.currentIndex()
self.setCurrentIndex(x)
if x == -1:
self.refreshCodeView('')
self.mainWindow.setWindowTitle('CrossCobra - Python IDE')
def changeTab(self, index):
x = self.count()
y = x - 1
if y >= 0:
self.setCurrentIndex(index)
textPad = self.currentWidget()
self.textPad = textPad
text = self.textPad.text()
if self.codeView:
self.refreshCodeView(text)
else:
self.codeView = self.mainWindow.codeView
self.refreshCodeView(text)
if self.textPad:
self.mainWindow.refresh(self.textPad)
def refreshCodeView(self, text=None):
text = text
codeViewDict = self.codeView.makeDictForCodeView(text)
self.codeView.updateCodeView(codeViewDict)
def getCurrentTextPad(self):
textPad = self.currentWidget()
return textPad
|
58924
|
import copy as _copy
import math as _math
import os as _os
import cv2 as _cv2
import numpy as _np
from PIL import Image as _IMG
from easytorch.utils.logger import *
"""
##################################################################################################
Very useful image related utilities
##################################################################################################
"""
def _same_file(x):
return x
class Image:
def __init__(self, dtype=_np.uint8):
self.dir = None
self.file = None
self.array = None
self.mask = None
self.ground_truth = None
self.extras = {}
self.dtype = dtype
def load(self, dir, file):
try:
self.dir = dir
self.file = file
self.array = _np.array(_IMG.open(self.path), dtype=self.dtype)
except Exception as e:
error('Fail to load file: ' + self.file + ': ' + str(e))
def load_mask(self, mask_dir=None, fget_mask=_same_file):
if fget_mask is None:
fget_mask = _same_file
try:
mask_file = fget_mask(self.file)
self.mask = _np.array(_IMG.open(_os.path.join(mask_dir, mask_file)), dtype=self.dtype)
except Exception as e:
error('Fail to load mask: ' + str(e))
def load_ground_truth(self, gt_dir=None, fget_ground_truth=_same_file):
if fget_ground_truth is None:
fget_ground_truth = _same_file
try:
gt_file = fget_ground_truth(self.file)
self.ground_truth = _np.array(_IMG.open(_os.path.join(gt_dir, gt_file)), dtype=self.dtype)
except Exception as e:
error('Fail to load ground truth: ' + str(e))
def get_array(self, dir='', getter=_same_file, file=None):
if getter is None:
getter = _same_file
if not file:
file = self.file
arr = _np.array(_IMG.open(_os.path.join(dir, getter(file))), dtype=self.dtype)
return arr
def apply_mask(self):
if self.mask is not None:
self.array[self.mask == 0] = 0
def apply_clahe(self, clip_limit=2.0, tile_shape=(8, 8)):
enhancer = _cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=tile_shape)
if len(self.array.shape) == 2:
self.array = enhancer.apply(self.array)
elif len(self.array.shape) == 3:
self.array[:, :, 0] = enhancer.apply(self.array[:, :, 0])
self.array[:, :, 1] = enhancer.apply(self.array[:, :, 1])
self.array[:, :, 2] = enhancer.apply(self.array[:, :, 2])
else:
error('More than three channels')
def __copy__(self):
copy_obj = Image()
copy_obj.file = _copy.copy(self.file)
copy_obj.array = _copy.copy(self.array)
copy_obj.mask = _copy.copy(self.mask)
copy_obj.ground_truth = _copy.copy(self.ground_truth)
copy_obj.extras = _copy.deepcopy(self.extras)
copy_obj.dtype = _copy.deepcopy(self.dtype)
return copy_obj
@property
def path(self):
return _os.path.join(self.dir, self.file)
def get_rgb_scores(arr_2d=None, truth=None):
"""
Returns a rgb image of pixelwise separation between ground truth and arr_2d
(predicted image) with different color codes
Easy when needed to inspect segmentation result against ground truth.
:param arr_2d:
:param truth:
:return:
"""
arr_rgb = _np.zeros([arr_2d.shape[0], arr_2d.shape[1], 3], dtype=_np.uint8)
x = arr_2d.copy()
y = truth.copy()
x[x == 255] = 1
y[y == 255] = 1
xy = x + (y * 2)
arr_rgb[xy == 3] = [255, 255, 255]
arr_rgb[xy == 1] = [0, 255, 0]
arr_rgb[xy == 2] = [255, 0, 0]
arr_rgb[xy == 0] = [0, 0, 0]
return arr_rgb
def get_praf1(arr_2d=None, truth=None):
"""
Returns precision, recall, f1 and accuracy score between two binary arrays upto five precision.
:param arr_2d:
:param truth:
:return:
"""
x = arr_2d.copy()
y = truth.copy()
x[x == 255] = 1
y[y == 255] = 1
xy = x + (y * 2)
tp = xy[xy == 3].shape[0]
fp = xy[xy == 1].shape[0]
tn = xy[xy == 0].shape[0]
fn = xy[xy == 2].shape[0]
try:
p = tp / (tp + fp)
except ZeroDivisionError:
p = 0
try:
r = tp / (tp + fn)
except ZeroDivisionError:
r = 0
try:
a = (tp + tn) / (tp + fp + fn + tn)
except ZeroDivisionError:
a = 0
try:
f1 = 2 * p * r / (p + r)
except ZeroDivisionError:
f1 = 0
return {
'Precision': round(p, 5),
'Recall': round(r, 5),
'Accuracy': round(a, 5),
'F1': round(f1, 5)
}
def rescale2d(arr):
m = _np.max(arr)
n = _np.min(arr)
return (arr - n) / (m - n)
def rescale3d(arrays):
return list(rescale2d(arr) for arr in arrays)
def get_signed_diff_int8(image_arr1=None, image_arr2=None):
signed_diff = _np.array(image_arr1 - image_arr2, dtype=_np.int8)
fx = _np.array(signed_diff - _np.min(signed_diff), _np.uint8)
fx = rescale2d(fx)
return _np.array(fx * 255, _np.uint8)
def whiten_image2d(img_arr2d=None):
img_arr2d = img_arr2d.copy()
img_arr2d = (img_arr2d - img_arr2d.mean()) / img_arr2d.std()
return _np.array(rescale2d(img_arr2d) * 255, dtype=_np.uint8)
def get_chunk_indexes(img_shape=(0, 0), chunk_shape=(0, 0), offset_row_col=None):
"""
Returns a generator for four corners of each patch within image as specified.
:param img_shape: Shape of the original image
:param chunk_shape: Shape of desired patch
:param offset_row_col: Offset for each patch on both x, y directions
:return:
"""
img_rows, img_cols = img_shape
chunk_row, chunk_col = chunk_shape
offset_row, offset_col = offset_row_col
row_end = False
for i in range(0, img_rows, offset_row):
if row_end:
continue
row_from, row_to = i, i + chunk_row
if row_to > img_rows:
row_to = img_rows
row_from = img_rows - chunk_row
row_end = True
col_end = False
for j in range(0, img_cols, offset_col):
if col_end:
continue
col_from, col_to = j, j + chunk_col
if col_to > img_cols:
col_to = img_cols
col_from = img_cols - chunk_col
col_end = True
yield [int(row_from), int(row_to), int(col_from), int(col_to)]
def get_chunk_indices_by_index(img_shape=(0, 0), chunk_shape=(0, 0), indices=None):
x, y = chunk_shape
ix = []
for (c1, c2) in indices:
w, h = img_shape
p, q, r, s = c1 - x // 2, c1 + x // 2, c2 - y // 2, c2 + y // 2
if p < 0:
p, q = 0, x
if q > w:
p, q = w - x, w
if r < 0:
r, s = 0, y
if s > h:
r, s = h - y, h
ix.append([int(p), int(q), int(r), int(s)])
return ix
def merge_patches(patches=None, image_size=(0, 0), patch_size=(0, 0), offset_row_col=None):
"""
Merge different pieces of image to form a full image. Overlapped regions are averaged.
:param patches: List of all patches to merge in order (left to right).
:param image_size: Full image size
:param patch_size: A patch size(Patches must be uniform in size to be able to merge)
:param offset_row_col: Offset used to chunk the patches.
:return:
"""
padded_sum = _np.zeros([image_size[0], image_size[1]])
non_zero_count = _np.zeros_like(padded_sum)
for i, chunk_ix in enumerate(get_chunk_indexes(image_size, patch_size, offset_row_col)):
row_from, row_to, col_from, col_to = chunk_ix
patch = _np.array(patches[i, :, :]).squeeze()
padded = _np.pad(patch, [(row_from, image_size[0] - row_to), (col_from, image_size[1] - col_to)],
'constant')
padded_sum = padded + padded_sum
non_zero_count = non_zero_count + _np.array(padded > 0).astype(int)
non_zero_count[non_zero_count == 0] = 1
return _np.array(padded_sum / non_zero_count, dtype=_np.uint8)
def expand_and_mirror_patch(full_img_shape=None, orig_patch_indices=None, expand_by=None):
"""
Given a patch within an image, this function select a speciified region around it if present, else mirros it.
It is useful in neuralnetworks like u-net which look for wide range of area than the actual input image.
:param full_img_shape: Full image shape
:param orig_patch_indices: Four cornets of the actual patch
:param expand_by: Expand by (x, y ) in each dimension
:return:
"""
i, j = int(expand_by[0] / 2), int(expand_by[1] / 2)
p, q, r, s = orig_patch_indices
a, b, c, d = p - i, q + i, r - j, s + j
pad_a, pad_b, pad_c, pad_d = [0] * 4
if a < 0:
pad_a = i - p
a = 0
if b > full_img_shape[0]:
pad_b = b - full_img_shape[0]
b = full_img_shape[0]
if c < 0:
pad_c = j - r
c = 0
if d > full_img_shape[1]:
pad_d = d - full_img_shape[1]
d = full_img_shape[1]
return a, b, c, d, [(pad_a, pad_b), (pad_c, pad_d)]
def largest_cc(binary_arr=None):
from skimage.measure import label
labels = label(binary_arr)
if labels.max() != 0: # assume at least 1 CC
largest = labels == _np.argmax(_np.bincount(labels.flat)[1:]) + 1
return largest
def map_img_to_img2d(map_to, img):
arr = map_to.copy()
rgb = arr.copy()
if len(arr.shape) == 2:
rgb = _np.zeros((arr.shape[0], arr.shape[1], 3), dtype=_np.uint8)
rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] = arr, arr, arr
rgb[:, :, 0][img == 255] = 255
rgb[:, :, 1][img == 255] = 0
rgb[:, :, 2][img == 255] = 0
return rgb
def remove_connected_comp(segmented_img, connected_comp_diam_limit=20):
"""
Remove connected components of a binary image that are less than smaller than specified diameter.
:param segmented_img: Binary image.
:param connected_comp_diam_limit: Diameter limit
:return:
"""
from scipy.ndimage.measurements import label
img = segmented_img.copy()
structure = _np.ones((3, 3), dtype=_np.int)
labeled, n_components = label(img, structure)
for i in range(n_components):
ixy = _np.array(list(zip(*_np.where(labeled == i))))
x1, y1 = ixy[0]
x2, y2 = ixy[-1]
dst = _math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
if dst < connected_comp_diam_limit:
for u, v in ixy:
img[u, v] = 0
return img
def get_pix_neigh(i, j, eight=False):
"""
Get four/ eight neighbors of an image.
:param i: x position of pixel
:param j: y position of pixel
:param eight: Eight neighbors? Else four
:return:
"""
n1 = (i - 1, j - 1)
n2 = (i - 1, j)
n3 = (i - 1, j + 1)
n4 = (i, j - 1)
n5 = (i, j + 1)
n6 = (i + 1, j - 1)
n7 = (i + 1, j)
n8 = (i + 1, j + 1)
if eight:
return [n1, n2, n3, n4, n5, n6, n7, n8]
else:
return [n2, n5, n7, n4]
|
58930
|
from zeus.config import db
from zeus.db.mixins import ApiTokenMixin, RepositoryMixin, StandardAttributes
from zeus.db.utils import model_repr
class RepositoryApiToken(StandardAttributes, RepositoryMixin, ApiTokenMixin, db.Model):
"""
An API token associated to a repository.
"""
__tablename__ = "repository_api_token"
__repr__ = model_repr("repository_id", "key")
def get_token_key(self):
return "r"
|
58934
|
from .__version__ import __description__, __title__, __version__
from ._exceptions import LifespanNotSupported
from ._manager import LifespanManager
__all__ = [
"__description__",
"__title__",
"__version__",
"LifespanManager",
"LifespanNotSupported",
]
|
58969
|
import numpy as np
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.cross_validation import train_test_split
import theanets
import climate
climate.enable_default_logging()
X_orig = np.load('/Users/bzamecnik/Documents/music-processing/music-processing-experiments/c-scale-piano_spectrogram_2048_hamming.npy')
sample_count, feature_count = X_orig.shape
X = MinMaxScaler().fit_transform(X_orig)
X = X.astype(np.float32)
X_train, X_test = train_test_split(X, test_size=0.4, random_state=42)
X_val, X_test = train_test_split(X_test, test_size=0.5, random_state=42)
# (np.maximum(0, 44100/512*np.arange(13)-2)).astype('int')
#blocks = [0, 84, 170, 256, 342, 428, 514, 600, 687, 773, 859, 945, 1031, 1205]
blocks = [0, 48, 98, 148, 198, 248, 298, 348, 398, 448, 498, 548, 598, 700]
def make_labels(blocks):
label_count = len(blocks) - 1
labels = np.zeros(blocks[-1])
for i in range(label_count):
labels[blocks[i]:blocks[i+1]] = i
return labels
y = make_labels(blocks)
def score(exp, Xs):
X_train, X_val, X_test = Xs
def sc(exp, X):
return r2_score(X, exp.network.predict(X))
print("training: ", sc(exp, X_train))
# NOTE: only optimize to validation dataset's score!
print("validation:", sc(exp, X_val))
print("test: ", sc(exp, X_test))
exp1 = theanets.Experiment( theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
hidden_l1=0.1)
exp1.train(X_train, X_val, optimize='nag', learning_rate=1e-3, momentum=0.9)
exp2 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
hidden_l1=0.1)
exp2.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
# gives quite nice prediction, trains slow
exp3 = theanets.Experiment( theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
hidden_l1=0.1, hidden_activation='relu')
exp3.train(X_train, X_val, optimize='nag', learning_rate=1e-3, momentum=0.9)
exp4 = theanets.Experiment( theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
hidden_l1=0.1, input_dropout=0.3)
exp4.train(X_train, X_val, optimize='nag', learning_rate=1e-3, momentum=0.9)
# rmsprop - converges faster in this case than nag
exp5 = theanets.Experiment( theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
hidden_l1=0.1)
exp5.train(X_train, X_val, optimize='rmsprop', learning_rate=1e-3, momentum=0.9)
# tied weighs - work good, much lower loss function values
# r2: 0.75037549551862703
exp6 = theanets.Experiment( theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
hidden_l1=0.1, tied_weights=True)
exp6.train(X_train, X_val, optimize='rmsprop', learning_rate=1e-3, momentum=0.9)
# higher hidden L1 penalty - worse
exp7 = theanets.Experiment( theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
hidden_l1=0.7, tied_weights=True)
exp7.train(X_train, X_val, optimize='rmsprop', learning_rate=1e-3, momentum=0.9)
# hidden L2 penalty - a bit worse
exp8 = theanets.Experiment( theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
hidden_l1=0.1, hidden_l2=0.1, tied_weights=True)
exp8.train(X_train, X_val, optimize='rmsprop', learning_rate=1e-3, momentum=0.9)
# no regularization - in this case better
# r2: 0.82211329411744094
exp10 = theanets.Experiment( theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
tied_weights=True)
exp10.train(X_train, X_val, optimize='rmsprop', learning_rate=1e-3, momentum=0.9)
# layerwise autoencoder training
exp11 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 500, feature_count), tied_weights=True)
exp11.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
# wow - this actually is able to to a 2D visualization
exp12 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 100, 10, 2, 10, 100, feature_count),
tied_weights=True)
exp12.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
def compute_middle_layer(X, model):
X_pred_ff = model.feed_forward(X)
middle = int(len(X_pred_ff)/2)
X_middle = X_pred_ff[middle]
return X_middle
def visualize_2d(X, y=None):
colors = y/max(y) if y is not None else np.linspace(0,1,len(X))
scatter(X[:,0], X[:,1],
c=colors, alpha=0.2, edgecolors='none', cmap='rainbow')
# same visualization, a little bit better r2
exp13 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 256, 64, 16, 2, 16, 64, 256, feature_count),
tied_weights=True)
exp13.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
# contractive - better than without
# r2: 0.82820148664941162
exp14 = theanets.Experiment( theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
tied_weights=True, contractive=0.8)
exp14.train(X_train, X_val, optimize='rmsprop', learning_rate=1e-3, momentum=0.9)
# tanh - bad
exp15 = theanets.Experiment( theanets.Autoencoder,
layers=(feature_count, 500, feature_count),
tied_weights=True, hidden_activation='tanh')
exp15.train(X_train, X_val, optimize='rmsprop', learning_rate=1e-3, momentum=0.9)
# relu, contractive
exp16 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 128, 16, 2, 16, 128, feature_count),
tied_weights=True, hidden_activation='relu', contractive=0.5)
exp16.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
exp17 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 128, 16, 2, 16, 128, feature_count),
tied_weights=True, contractive=0.8)
exp17.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
exp18 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, input_dropout=0.8)
exp18.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
# r2: 0.83371355062803953
exp19 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, input_dropout=0.8, hidden_dropout=0.8)
exp19.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
exp20 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, input_dropout=0.9, hidden_dropout=0.9)
exp20.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
# -----------------
# animate the 2D point movement
import matplotlib.animation as animation
def export_animation(X_2d, y, filename):
fig = plt.figure()
# 854x480 px (480p) in inches, note that 8.54 gives 853px width :/
fig.set_size_inches(8.545, 4.80)
plt.axis('equal')
# plt.tight_layout()
# plt.xlim(-0.1, 1.1)
# plt.ylim(-0.1, 1.1)
images = []
im1 = scatter(X_2d[:, 0], X_2d[:, 1], c=y/max(y), cmap='rainbow', alpha=0.2)
for i in range(len(X_2d)):
im2 = scatter(X_2d[i, 0], X_2d[i, 1], c=y[i]/max(y), cmap='rainbow')
images.append([im1, im2])
ani = animation.ArtistAnimation(fig, images,
interval=20, blit=False, repeat=False)
writer = animation.writers['ffmpeg'](fps=50, bitrate=5000)
ani.save(filename, writer=writer, dpi=100)
export_animation(X_tsne, y, 'piano-tsne.mp4')
#----------------------
exp21 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, input_dropout=0.3, hidden_dropout=0.5,
batch_size=len(X_train))
exp21.train(X_train, X_val, optimize='rmsprop', learning_rate=1e-3, momentum=0.9)
exp22 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, input_dropout=0.3, hidden_dropout=0.5)
exp22.train(X_train, X_val, optimize='rmsprop', learning_rate=1e-3, momentum=0.9)
exp23 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, 256, 128, 64, 32, 16, 8, 4, 2,
4, 8, 16, 32, 64, 128, 256, 512, feature_count),
tied_weights=True, input_dropout=0.3, hidden_dropout=0.5)
exp23.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
exp24 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, input_dropout=0.3, hidden_dropout=0.5,
hidden_activation='linear')
exp24.train(X_train, X_val, optimize='rmsprop', learning_rate=1e-3, momentum=0.9)
# r2: 0.833454635805
exp25 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True)
exp25.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.9)
# r2: 0.731835366439
exp26 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True)
exp26.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.1)
# r2: 0.854741515141 (*)
exp27 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True)
exp27.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
# r2: 0.84260338122
exp28 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True)
exp28.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.7)
exp29 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True)
exp29.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp30 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, input_dropout=0.9)
exp30.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp31 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 100, feature_count),
tied_weights=True)
exp31.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp32 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 200, 20, 2, 20, 200, feature_count),
tied_weights=True, input_dropout=0.5, hidden_dropout=0.5)
exp32.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
# bad - makes a single curve
exp33 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 200, 20, 2, 20, 200, feature_count),
tied_weights=True, hidden_l1=0.1)
exp33.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
# bad - makes a non-discriminative curve
exp34 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 200, 20, 2, 20, 200, feature_count),
tied_weights=True, input_dropout=0.5)
exp34.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp35 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 200, 20, 2, 20, 200, feature_count),
tied_weights=True, hidden_dropout=0.5)
exp35.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp36 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 200, 20, 2, 20, 200, feature_count),
tied_weights=True)
exp36.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp33 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, 256, 128, 64, 32, 16, 8, 4, 2,
4, 8, 16, 32, 64, 128, 256, 512, feature_count),
tied_weights=True)
exp33.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
X_zca_train, X_zca_test = train_test_split(X_zca, test_size=0.4, random_state=42)
X_zca_val, X_zca_test = train_test_split(X_zca_test, test_size=0.5, random_state=42)
exp34 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True)
exp34.train(X_zca_train, X_zca_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp35 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, 256, 128, 64, 32, 16, 8, 4, 2,
4, 8, 16, 32, 64, 128, 256, 512, feature_count),
tied_weights=True, hidden_activation='relu')
exp35.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
# - try tanh and relu for deeper networks
# - try other normalization (mean-std instead od min-max)
X_ms = StandardScaler().fit_transform(X_orig).astype(np.float32)
X_ms_train, X_ms_test = train_test_split(X_ms, test_size=0.4, random_state=42)
X_ms_val, X_ms_test = train_test_split(X_ms_test, test_size=0.5, random_state=42)
exp36 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True)
exp36.train(X_ms_train, X_ms_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp37 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, hidden_activation='tanh')
exp37.train(X_ms_train, X_ms_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp38 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True)
exp38.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
X_orig_train, X_orig_test = train_test_split(X_orig.astype('float32'), test_size=0.4, random_state=42)
X_orig_val, X_orig_test = train_test_split(X_orig_test, test_size=0.5, random_state=42)
exp39 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True)
exp39.train(X_orig_train, X_orig_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp40 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, hidden_activation='linear', hidden_l1=0.5)
exp40.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp41 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, hidden_activation='relu', hidden_l1=0.5)
exp41.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp42 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, hidden_activation='relu', weight_l1=0.5)
exp42.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
# bad
exp43 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, hidden_activation='relu', contractive=0.9)
exp43.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
# not bad
exp44 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, hidden_activation='relu')
exp45.train(X_ms_train, X_ms_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp45 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, hidden_activation='relu', contractive=0.5)
exp45.train(X_ms_train, X_ms_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
# r2: 0.849283267068
exp46 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, hidden_activation='linear', contractive=0.5)
exp46.train(X_ms_train, X_ms_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
exp47 = theanets.Experiment(theanets.Autoencoder,
layers=(feature_count, 512, feature_count),
tied_weights=True, hidden_activation='linear', contractive=0.5)
exp47.train(X_train, X_val, optimize='layerwise', learning_rate=1e-3, momentum=0.5)
|
59021
|
from di.container import Container
from di.dependant import Dependant, Injectable
from di.executors import SyncExecutor
class UsersRepo(Injectable, scope="app"):
pass
def endpoint(repo: UsersRepo) -> UsersRepo:
return repo
def framework():
container = Container()
solved = container.solve(
Dependant(endpoint, scope="request"), scopes=["app", "request"]
)
executor = SyncExecutor()
with container.enter_scope("app") as app_state:
with container.enter_scope("request", state=app_state) as request_state:
repo1 = container.execute_sync(
solved, executor=executor, state=request_state
)
with container.enter_scope("request"):
repo2 = container.execute_sync(
solved, executor=executor, state=request_state
)
assert repo1 is repo2
|
59059
|
from LoopStructural.utils import LoopImportError, LoopTypeError, LoopValueError
try:
from LoopProjectFile import ProjectFile
except ImportError:
raise LoopImportError("LoopProjectFile cannot be imported")
from .process_data import ProcessInputData
import numpy as np
import pandas as pd
import networkx
from LoopStructural.utils import getLogger
logger = getLogger(__name__)
class Map2LoopProcessor(ProcessInputData):
def __init__(self,projectife,use_thickness=None):
if isinstance(projectife,ProjectFile) == False:
raise LoopTypeError("projectife must be of type ProjectFile")
self.projectife = projectife
# super().__init__(
# self.projectfile.contacts,
# self.projectfile.orientations,
# stratigraphic_order,
# thicknesses=thicknesses,
# fault_orientations=fault_orientations,
# fault_locations=fault_locations,
# fault_properties=fault_properties,
# fault_edges=list(fault_graph.edges),
# colours=dict(zip(groups['code'],groups['colour'])),
# fault_stratigraphy=None,
# intrusions=None,
# use_thickness=use_thickness,
# fault_edge_properties=fault_edge_properties
# )
|
59079
|
from twisted.internet import reactor
from twisted.trial import unittest
from comet.utility import coerce_to_client_endpoint, coerce_to_server_endpoint
class coerce_to_client_endpoint_TestCase(unittest.TestCase):
HOST, PORT, DEFAULT_PORT = "test", 1234, 4321
def test_good_tcp_parse(self):
ep = coerce_to_client_endpoint(
reactor, f"tcp:{self.HOST}:{self.PORT}", self.DEFAULT_PORT
)
self.assertEqual(ep._host, self.HOST)
self.assertEqual(ep._port, self.PORT)
def test_good_unix_parse(self):
filename = "/dev/null"
ep = coerce_to_client_endpoint(reactor, f"unix:{filename}", self.DEFAULT_PORT)
self.assertEqual(ep._path, filename)
def test_missing_protocol(self):
ep = coerce_to_client_endpoint(
reactor, f"{self.HOST}:{self.PORT}", self.DEFAULT_PORT
)
self.assertEqual(ep._host, self.HOST)
self.assertEqual(ep._port, self.PORT)
def test_missing_port(self):
ep = coerce_to_client_endpoint(reactor, f"tcp:{self.HOST}", self.DEFAULT_PORT)
self.assertEqual(ep._host, self.HOST)
self.assertEqual(ep._port, self.DEFAULT_PORT)
def test_missing_both(self):
ep = coerce_to_client_endpoint(reactor, self.HOST, self.DEFAULT_PORT)
self.assertEqual(ep._host, self.HOST)
self.assertEqual(ep._port, self.DEFAULT_PORT)
def test_bad_parse(self):
self.assertRaises(
ValueError,
coerce_to_client_endpoint,
reactor,
"tcp:tcp:tcp",
self.DEFAULT_PORT,
)
class coerce_to_server_endpoint_TestCase(unittest.TestCase):
PORT = 1234
def test_good_tcp_parse(self):
ep = coerce_to_server_endpoint(reactor, f"tcp:{self.PORT}")
self.assertEqual(ep._port, self.PORT)
def test_good_unix_parse(self):
filename = "/dev/null"
ep = coerce_to_server_endpoint(reactor, f"unix:{filename}")
self.assertEqual(ep._address, filename)
def test_missing_protocol(self):
ep = coerce_to_server_endpoint(reactor, self.PORT)
self.assertEqual(ep._port, self.PORT)
def test_bad_parse(self):
self.assertRaises(ValueError, coerce_to_server_endpoint, reactor, "tcp:")
|
59119
|
from unittest.mock import patch
import slack
from harvey.messages import Message
@patch('harvey.messages.SLACK_CHANNEL', 'mock-channel')
@patch('harvey.messages.SLACK_BOT_TOKEN', '<PASSWORD>')
@patch('logging.Logger.debug')
@patch('slack.WebClient.chat_postMessage')
def test_send_slack_message_success(mock_slack, mock_logger):
message = 'mock message'
Message.send_slack_message(message)
mock_logger.assert_called()
mock_slack.assert_called_once_with(channel='mock-channel', text=message)
@patch('logging.Logger.error')
@patch('sys.exit')
@patch(
'slack.WebClient.chat_postMessage',
side_effect=slack.errors.SlackApiError(
message='The request to the Slack API failed.',
response={
'ok': False,
'error': 'not_authed',
},
),
)
def test_send_slack_message_exception(mock_slack, mock_sys_exit, mock_logger):
message = 'mock message'
Message.send_slack_message(message)
mock_logger.assert_called()
mock_sys_exit.assert_called_once()
|
59139
|
from __future__ import division
import os
import cv2 as cv
import numpy as np
from cv_bridge import CvBridge
def map_linear(value, in_min, in_max, out_min, out_max):
return (value - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
def process_image(img, equalize_hist=False):
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
if equalize_hist:
gray = cv.equalizeHist(gray)
return gray
cv_bridge=None
def ros_msg_to_image(msg):
global cv_bridge
if cv_bridge is None:
cv_bridge = CvBridge()
return cv_bridge.imgmsg_to_cv2(msg, desired_encoding="bgr8")
def image_to_ros_msg(image):
global cv_bridge
if cv_bridge is None:
cv_bridge = CvBridge()
return cv_bridge.cv2_to_imgmsg(image, encoding="bgr8")
def image_to_ros_compressed_image(image):
global cv_bridge
if cv_bridge is None:
cv_bridge = CvBridge()
return cv_bridge.cv2_to_compressed_imgmsg(image)
script_dir = os.path.dirname(os.path.realpath(__file__))
#face_cascade = cv.CascadeClassifier('data/haarcascade_frontalface_alt2.xml')
face_cascade = cv.CascadeClassifier(
script_dir + '/data/haarcascade_frontalface_default.xml')
eye_cascade = cv.CascadeClassifier(script_dir + '/data/haarcascade_eye.xml')
def detect_faces(gray, image_to_draw_on=None, detect_eyes=False):
global face_cascade
faces = face_cascade.detectMultiScale(gray, 1.3, 3)
if image_to_draw_on is not None:
for (x, y, w, h) in faces:
cv.rectangle(image_to_draw_on, (x, y), (x+w, y+h), (255, 0, 0), 2)
if detect_eyes:
face_roi_gray = gray[y:y+h, x:x+w]
face_roi_color = img[y:y+h, x:x+w]
detect_eyes(face_roi_gray, face_roi_color)
return faces
def detect_eyes(gray, image):
global eye_cascade
eyes = eye_cascade.detectMultiScale(roi_gray, 1.1, 2)
for (ex, ey, ew, eh) in eyes:
cv.rectangle(roi_color, (ex, ey), (ex+ew, ey+eh), (0, 255, 0), 2)
return eyes
def get_biggest_face(faces):
return sorted(faces, key=lambda (x, y, w, h): w*h, reverse=True)[0]
def find_features(gray, faces, image_to_draw_on=None):
# create masking image that is all black
face_mask = np.zeros_like(gray)
keypoints = []
if len(faces) > 0:
x, y, w, h = get_biggest_face(faces)
# set face ROI to white
face_mask[y:y+h, x:x+h] = 255
corners = cv.goodFeaturesToTrack(gray, 200, 0.02, 7, mask=face_mask)
corners = np.int0(corners)
for corner in corners:
x, y = corner.ravel()
keypoints.append((x, y))
if image_to_draw_on is not None:
cv.circle(image_to_draw_on, (x, y), 3, 255, -1)
return keypoints
def track_points_move(gray, previous_gray, keypoints, image_to_draw_on=None):
re_keypoints = np.float32([p for p in keypoints]).reshape(-1, 1, 2)
optical_flow, st, err = cv.calcOpticalFlowPyrLK(previous_gray,
gray,
re_keypoints,
None,
winSize=(10, 10),
maxLevel=2,
criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 20, 0.01))
optical_flow_reversed, st, err = cv.calcOpticalFlowPyrLK(gray,
previous_gray,
optical_flow,
None,
winSize=(10, 10),
maxLevel=2,
criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 20, 0.01))
distances = abs(re_keypoints-optical_flow_reversed).reshape(-1, 2).max(-1)
good_points = distances < 1
new_keypoints = []
for (x, y), good_flag in zip(optical_flow.reshape(-1, 2), good_points):
if not good_flag:
continue
new_keypoints.append((x, y))
if image_to_draw_on is not None:
cv.circle(image_to_draw_on, (x, y), 3, 255, -1)
return new_keypoints
def find_bounding_rect(keypoints, image_to_draw_on=None):
bounding_rectangle = cv.boundingRect(np.float32(keypoints).reshape(-1, 1, 2))
top_left = bounding_rectangle[:2]
bottom_right = (top_left[0] + bounding_rectangle[2],
top_left[1] + bounding_rectangle[3])
if image_to_draw_on is not None:
cv.rectangle(image_to_draw_on, top_left, bottom_right, 255, 4)
center_x = top_left[0] + bounding_rectangle[2] / 2.0
center_y = top_left[1] + bounding_rectangle[3] / 2.0
return (center_x, center_y)
def unite_images(images):
img1 = cv.hconcat(images[:2])
img2 = cv.hconcat(images[2:])
img = cv.vconcat((img1, img2))
return img
def resize_image(image, width=320, height=240):
return cv.resize(image, (width, height))
def display_image(image, text="image"):
cv.imshow(text, image)
return cv.waitKey(1)
|
59142
|
import json
import logging
import tempfile
import shapely.geometry as sgeo
import shapely.ops as ops
from pyproj.crs import CRS
from pywps import FORMATS, ComplexOutput, LiteralInput, Process
from ravenpy.utilities.analysis import dem_prop
from ravenpy.utilities.checks import boundary_check, single_file_check
from ravenpy.utilities.geo import (
generic_raster_clip,
generic_raster_warp,
generic_vector_reproject,
)
from ravenpy.utilities.io import archive_sniffer, crs_sniffer
from ..utils import gather_dem_tile
from . import wpsio as wio
LOGGER = logging.getLogger("PYWPS")
class TerrainAnalysisProcess(Process):
"""Given a file containing vector data and a DEM, analyze terrain characteristics."""
def __init__(self):
inputs = [
wio.dem_raster,
wio.shape,
LiteralInput(
"projected_crs",
"Coordinate Reference System for terrain analysis (Default: EPSG:6622, 'NAD83(CSRS) /"
" Quebec Lambert'. The CRS chosen should be projected and appropriate for the region"
" of interest.",
data_type="integer",
default=6622,
min_occurs=1,
max_occurs=1,
),
wio.select_all_touching,
]
outputs = [
ComplexOutput(
"properties",
"Feature schemas",
abstract="DEM properties (mean elevation, slope, and aspect) for each geometry.",
supported_formats=[FORMATS.JSON],
),
ComplexOutput(
"dem",
"Subsetted digital elevation model",
abstract="DEM GeoTIFF image",
as_reference=True,
supported_formats=[FORMATS.GEOTIFF, FORMATS.META4],
),
]
super(TerrainAnalysisProcess, self).__init__(
self._handler,
identifier="terrain-analysis",
title="Terrain Analysis",
version="1.0",
abstract="Return shape area in square metres based on line boundaries of a polygonal vector file.",
metadata=[],
inputs=inputs,
outputs=outputs,
status_supported=True,
store_supported=True,
)
def _handler(self, request, response):
# Process inputs
# ---------------
shape_url = request.inputs["shape"][0].file
destination_crs = request.inputs["projected_crs"][0].data
touches = request.inputs["select_all_touching"][0].data
# Checks for valid CRS and that CRS is projected
# -----------------------------------------------
projection = CRS.from_user_input(destination_crs)
if not projection.is_projected:
msg = f"Destination CRS {projection.to_epsg()} is not projected. Terrain analysis values will not be valid."
LOGGER.error(ValueError(msg))
raise ValueError(msg)
# Collect and process the shape
# -----------------------------
vectors = [".gml", ".shp", ".gpkg", ".geojson", ".json"]
vector_file = single_file_check(
archive_sniffer(shape_url, working_dir=self.workdir, extensions=vectors)
)
vec_crs = crs_sniffer(vector_file)
# Check that boundaries within 60N and 60S
boundary_check(vector_file)
if "raster" in request.inputs:
raster_url = request.inputs["raster"][0].file
rasters = [".tiff", ".tif"]
raster_file = single_file_check(
archive_sniffer(
raster_url, working_dir=self.workdir, extensions=rasters
)
)
else:
# Assuming that the shape coordinate are in WGS84
raster_file = gather_dem_tile(vector_file, self.workdir)
ras_crs = crs_sniffer(raster_file)
# Reproject raster
# ----------------
if ras_crs != projection.to_epsg():
msg = f"CRS for {raster_file} is not {projection}. Reprojecting raster..."
LOGGER.warning(msg)
warped_fn = tempfile.NamedTemporaryFile(
prefix="warped_", suffix=".tiff", delete=False, dir=self.workdir
).name
generic_raster_warp(raster_file, warped_fn, projection)
else:
warped_fn = raster_file
# Perform the terrain analysis
# ----------------------------
rpj = tempfile.NamedTemporaryFile(
prefix="reproj_", suffix=".json", delete=False, dir=self.workdir
).name
generic_vector_reproject(
vector_file, rpj, source_crs=vec_crs, target_crs=projection.to_epsg()
)
with open(rpj) as src:
geo = json.load(src)
features = [sgeo.shape(feat["geometry"]) for feat in geo["features"]]
union = ops.unary_union(features)
clipped_fn = tempfile.NamedTemporaryFile(
prefix="clipped_", suffix=".tiff", delete=False, dir=self.workdir
).name
# Ensure that values for regions outside of clip are kept
generic_raster_clip(
raster=warped_fn,
output=clipped_fn,
geometry=union,
touches=touches,
fill_with_nodata=True,
padded=True,
)
# Compute DEM properties for each feature.
properties = []
for i in range(len(features)):
properties.append(
dem_prop(clipped_fn, geom=features[i], directory=self.workdir)
)
properties.append(dem_prop(clipped_fn, directory=self.workdir))
response.outputs["properties"].data = json.dumps(properties)
response.outputs["dem"].file = clipped_fn
return response
|
59176
|
from hashlib import blake2s
def hash(x):
return blake2s(x).digest()[:32]
def get_primes(givenNumber):
# Initialize a list
primes = []
for possiblePrime in range(2, givenNumber + 1):
# Assume number is prime until shown it is not.
isPrime = True
for num in range(2, int(possiblePrime ** 0.5) + 1):
if possiblePrime % num == 0:
isPrime = False
break
if isPrime:
primes.append(possiblePrime)
return(primes)
def get_B_value(base, result):
return int.from_bytes(
hash(base.to_bytes(1024, 'big') + result.to_bytes(1024, 'big')),
'big'
)
def prove_exponentiation(base, exponent, result):
B = get_B_value(base, result)
b = pow(base, exponent // B, mod)
remainder = exponent % B
return (b, remainder)
def verify_proof(base, result, b, remainder):
B = get_B_value(base, result)
return pow(b, B, mod) * pow(base, remainder, mod) % mod == result
mod = 25195908475657893494027183240048398571429282126204032027777137836043662020707595556264018525880784406918290641249515082189298559149176184502808489120072844992687392807287776735971418347270261896375014971824691165077613379859095700097330459748808428401797429100642458691817195118746121515172654632282216869987549182422433637259085141865462043576798423387184774447920739934236584823824281198163815010674810451660377306056201619676256133844143603833904414952634432190114657544454178424020924616515723350778707749817125772467962926386356373289912154831438167899885040445364023527381951378636564391212010397122822120720357
acc_values = []
g = 3
acc = g
full_exponent = 1
for v in get_primes(100):
acc_values.append(v)
full_exponent = full_exponent * v
acc = pow(acc, v, mod)
prime_to_prove = acc_values[8]
b, remainder = prove_exponentiation(g, full_exponent, acc)
print(verify_proof(g, acc, b, remainder))
|
59180
|
import magma as m
import magma.testing
def test_2d_array_from_verilog():
main = m.define_from_verilog(f"""
module transpose_buffer (
input logic clk,
output logic [2:0] index_inner,
output logic [2:0] index_outer,
input logic [3:0] input_data [63:0],
input logic [2:0] range_inner,
input logic [2:0] range_outer,
input logic rst_n,
input logic [2:0] stride
);
always_ff @(posedge clk, negedge rst_n) begin
if (~rst_n) begin
index_outer <= 3'h0;
index_inner <= 3'h0;
end
else begin
if (index_outer == (range_outer - 3'h1)) begin
index_outer <= 3'h0;
end
else index_outer <= index_outer + 3'h1;
if (index_inner == (range_inner - 3'h1)) begin
index_inner <= 3'h0;
end
else index_inner <= index_inner + 3'h1;
end
end
endmodule // transpose_buffer
""")[0]
m.compile("build/2d_array_from_verilog", main, output="verilog")
assert m.testing.check_files_equal(__file__,
f"build/2d_array_from_verilog.v",
f"gold/2d_array_from_verilog.v")
|
59184
|
import torch
import os
from glob import glob
import numpy as np
from torch.nn import functional as F
import time
class Generator(object):
def __init__(self, model, exp_name, threshold = 0.1, checkpoint = None, device = torch.device("cuda")):
self.model = model.to(device)
self.model.eval()
self.device = device
self.checkpoint_path = os.path.dirname(__file__) + '/../experiments/{}/checkpoints/'.format( exp_name)
self.load_checkpoint(checkpoint)
self.threshold = threshold
def generate_point_cloud(self, data, num_steps = 10, num_points = 900000, filter_val = 0.009):
start = time.time()
inputs = data['inputs'].to(self.device)
for param in self.model.parameters():
param.requires_grad = False
sample_num = 200000
samples_cpu = np.zeros((0, 3))
samples = torch.rand(1, sample_num, 3).float().to(self.device) * 3 - 1.5
samples.requires_grad = True
encoding = self.model.encoder(inputs)
i = 0
while len(samples_cpu) < num_points:
print('iteration', i)
for j in range(num_steps):
print('refinement', j)
df_pred = torch.clamp(self.model.decoder(samples, *encoding), max=self.threshold)
df_pred.sum().backward()
gradient = samples.grad.detach()
samples = samples.detach()
df_pred = df_pred.detach()
inputs = inputs.detach()
samples = samples - F.normalize(gradient, dim=2) * df_pred.reshape(-1, 1) # better use Tensor.copy method?
samples = samples.detach()
samples.requires_grad = True
print('finished refinement')
if not i == 0:
samples_cpu = np.vstack((samples_cpu, samples[df_pred < filter_val].detach().cpu().numpy()))
samples = samples[df_pred < 0.03].unsqueeze(0)
indices = torch.randint(samples.shape[1], (1, sample_num))
samples = samples[[[0, ] * sample_num], indices]
samples += (self.threshold / 3) * torch.randn(samples.shape).to(self.device) # 3 sigma rule
samples = samples.detach()
samples.requires_grad = True
i += 1
print(samples_cpu.shape)
duration = time.time() - start
return samples_cpu, duration
def load_checkpoint(self, checkpoint):
checkpoints = glob(self.checkpoint_path + '/*')
if checkpoint is None:
if len(checkpoints) == 0:
print('No checkpoints found at {}'.format(self.checkpoint_path))
return 0, 0
checkpoints = [os.path.splitext(os.path.basename(path))[0].split('_')[-1] for path in checkpoints]
checkpoints = np.array(checkpoints, dtype=float)
checkpoints = np.sort(checkpoints)
path = self.checkpoint_path + 'checkpoint_{}h:{}m:{}s_{}.tar'.format(
*[*convertSecs(checkpoints[-1]), checkpoints[-1]])
else:
path = self.checkpoint_path + '{}.tar'.format(checkpoint)
print('Loaded checkpoint from: {}'.format(path))
checkpoint = torch.load(path)
self.model.load_state_dict(checkpoint['model_state_dict'])
epoch = checkpoint['epoch']
training_time = checkpoint['training_time']
return epoch, training_time
def convertMillis(millis):
seconds = int((millis / 1000) % 60)
minutes = int((millis / (1000 * 60)) % 60)
hours = int((millis / (1000 * 60 * 60)))
return hours, minutes, seconds
def convertSecs(sec):
seconds = int(sec % 60)
minutes = int((sec / 60) % 60)
hours = int((sec / (60 * 60)))
return hours, minutes, seconds
|
59194
|
import numpy as np
def process_actions(actions, l_action):
n_steps = len(actions)
actions_1hot = np.zeros([n_steps, l_action], dtype=int)
actions_1hot[np.arange(n_steps), actions] = 1
return actions_1hot
def get_action_others_1hot(action_all, agent_id, l_action):
action_all = list(action_all)
del action_all[agent_id]
num_others = len(action_all)
actions_1hot = np.zeros([num_others, l_action], dtype=int)
actions_1hot[np.arange(num_others), action_all] = 1
return actions_1hot.flatten()
def get_action_others_1hot_batch(list_action_all, agent_id, l_action):
n_steps = len(list_action_all)
n_agents = len(list_action_all[0])
matrix = np.stack(list_action_all) # [n_steps, n_agents]
self_removed = np.delete(matrix, agent_id, axis=1)
actions_1hot = np.zeros([n_steps, n_agents-1, l_action], dtype=np.float32)
grid = np.indices((n_steps, n_agents-1))
actions_1hot[grid[0], grid[1], self_removed] = 1
actions_1hot = np.reshape(actions_1hot, [n_steps, l_action*(n_agents-1)])
return actions_1hot
def process_rewards(rewards, gamma):
n_steps = len(rewards)
gamma_prod = np.cumprod(np.ones(n_steps) * gamma)
returns = np.cumsum((rewards * gamma_prod)[::-1])[::-1]
returns = returns / gamma_prod
return returns
|
59210
|
import pandas as pd
import os, sys
from pandas.tseries.holiday import USFederalHolidayCalendar
from pandas.tseries.offsets import CustomBusinessDay
from sklearn.utils import check_array
import numpy as np
from datetime import timedelta
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))+'/'
def mean_absolute_percentage_error(y_true, y_pred):
mask = y_true != 0
return (np.fabs(y_true - y_pred)/y_true)[mask].mean()
# function that returns a list of days not including weekends, holidays, or event day
# if pge == True will return weekdays for PG&E otherwise it will return weekdays for SCE
def get_workdays(start,end):
start = pd.to_datetime(start).date()
end = pd.to_datetime(end).date()
us_bd = CustomBusinessDay(calendar=USFederalHolidayCalendar())
workdays = pd.DatetimeIndex(start=start, end=end, freq=us_bd)
return workdays
# Returns the start and end timestamp of a single day
def get_window_of_day(date):
date = pd.to_datetime(date).date()
start, end = pd.date_range(start=date, periods=2, freq='1d', tz='US/Pacific')
start_ts = start.isoformat()
end_ts = end.isoformat()
return start_ts, end_ts
def get_closest_station(site):
stations = pd.read_csv(os.path.join(PROJECT_ROOT, 'weather_stations.csv'), index_col='site')
try:
uuid = stations.loc[site].values[0]
return uuid
except:
print("couldn't find closest weather station for %s" % site)
return None
def get_date_str(date):
date = pd.to_datetime(date).date()
return format(date)
def get_month_window(date):
end_date = pd.to_datetime(date).date() + timedelta(days=2)
start_date = end_date - timedelta(days=30)
start_ts = pd.to_datetime(start_date).tz_localize('US/Pacific').isoformat()
end_ts = pd.to_datetime(end_date).tz_localize('US/Pacific').isoformat()
return start_ts, end_ts
|
59227
|
import bourgeon
import ragnarok_client as client
from bourgeon import ui
from ragnarok_client import Mode
class BasicInfoWindow:
def __init__(self, name: str) -> None:
self._hp_text = ui.Text("--")
self._sp_text = ui.Text("--")
self.window = ui.Window(name, [[
ui.Text("HP"),
self._hp_text,
ui.Text("| SP"),
self._sp_text,
]], 0)
def open(self) -> None:
ui.register_window(self.window)
def close(self) -> None:
ui.unregister_window(self.window)
def update(self, hp: int, max_hp: int, sp: int, max_sp: int) -> None:
self._hp_text.set_text(f"{hp} / {max_hp}")
self._sp_text.set_text(f"{sp} / {max_sp}")
basic_info_window = None
def on_tick() -> None:
"""
OnTick callback.
"""
global basic_info_window
if basic_info_window:
basic_info_window.update(client.get_hp(), client.get_max_hp(),
client.get_sp(), client.get_max_sp())
def on_mode_switch(mode_type: Mode, _map_name: str) -> None:
"""
OnModeSwitch callback.
"""
global basic_info_window
if mode_type == Mode.Game:
basic_info_window = BasicInfoWindow(client.get_char_name())
basic_info_window.open()
elif mode_type == Mode.Login:
if basic_info_window:
basic_info_window.close()
bourgeon.register_callback("OnTick", on_tick)
bourgeon.register_callback("OnModeSwitch", on_mode_switch)
|
59250
|
import os
import ssl
import socket
from tempfile import NamedTemporaryFile
try:
from httplib import HTTPSConnection
except ImportError:
from http.client import HTTPSConnection
class ValidatedHTTPSConnection(HTTPSConnection):
CA_ROOT_CERT_FALLBACK = '''
DigiCert Global Root G2
-----BEGIN CERTIFICATE-----
MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh
MQ<KEY>
<KEY>Q
q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz
tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ
vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP
BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV
5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY
1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4
NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG
Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91
8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe
pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl
MrY=
-----END CERTIFICATE-----
'''
def get_ca_cert_bundle(self):
via_env = os.getenv('SSL_CERT_FILE')
if via_env is not None and os.path.exists(via_env):
return via_env
probe_paths = [
"/etc/ssl/certs/ca-certificates.crt",
"/etc/ssl/certs/ca-bundle.crt",
"/etc/pki/tls/certs/ca-bundle.crt",
]
for path in probe_paths:
if os.path.exists(path):
return path
return None
def connect(self):
sock = socket.create_connection((self.host, self.port),
self.timeout,
self.source_address)
bundle = cafile = self.get_ca_cert_bundle()
if bundle is None:
ca_certs = NamedTemporaryFile()
ca_certs.write('\n'.join(
map(str.strip, self.CA_ROOT_CERT_FALLBACK.splitlines())
).encode('ascii'))
ca_certs.flush()
cafile = ca_certs.name
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=cafile)
if bundle is None:
ca_certs.close()
|
59449
|
pkgname = "firmware-ipw2100"
pkgver = "1.3"
pkgrel = 0
pkgdesc = "Firmware for the Intel PRO/Wireless 2100 wifi cards"
maintainer = "q66 <<EMAIL>>"
license = "custom:ipw2100"
url = "http://ipw2100.sourceforge.net"
source = f"http://firmware.openbsd.org/firmware-dist/ipw2100-fw-{pkgver}.tgz"
sha256 = "e1107c455e48d324a616b47a622593bc8413dcce72026f72731c0b03dae3a7a2"
options = ["!strip", "foreignelf"]
def do_install(self):
for f in self.cwd.glob("*.fw"):
self.install_file(f, "usr/lib/firmware")
self.install_license("LICENSE")
|
59493
|
import torch
import numpy as np
import torch.nn as nn
import torch.distributed as dist
import torch.nn.functional as F
from torch import Tensor
from typing import Any
from typing import Dict
from typing import Tuple
from typing import Optional
from cftool.misc import update_dict
from cftool.misc import shallow_copy_dict
from torch.nn.parallel import DistributedDataParallel as DDP
from ..encoder import Encoder1DBase
from ....data import CVLoader
from ....types import tensor_dict_type
from ....protocol import StepOutputs
from ....protocol import TrainerState
from ....protocol import MetricsOutputs
from ....protocol import ModelWithCustomSteps
from ....constants import LOSS_KEY
from ....constants import INPUT_KEY
from ....constants import LATENT_KEY
from ....misc.toolkit import to_device
from ....misc.toolkit import l2_normalize
from ....misc.toolkit import get_world_size
from ....misc.toolkit import has_batch_norms
def _get_dino_defaults(name: str) -> Dict[str, Any]:
if name == "vit":
return {"patch_size": 16, "drop_path_rate": 0.1}
return {}
class Scheduler:
def __init__(self, values: np.ndarray):
self.values = values
self.max_idx = len(values) - 1
def __getitem__(self, index: int) -> Any:
return self.values[min(index, self.max_idx)]
def cosine_scheduler(
base_value: float,
final_value: float,
epochs: int,
num_step_per_epoch: int,
warmup_epochs: int = 0,
start_warmup_value: int = 0,
) -> Scheduler:
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * num_step_per_epoch
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * num_step_per_epoch - warmup_iters)
diff = base_value - final_value
schedule = final_value + 0.5 * diff * (1.0 + np.cos(np.pi * iters / len(iters)))
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * num_step_per_epoch
return Scheduler(schedule)
class MultiCropWrapper(nn.Module):
def __init__(self, backbone: nn.Module, head: nn.Module):
super().__init__()
backbone.fc, backbone.head = nn.Identity(), nn.Identity()
self.backbone = backbone
self.head = head
def forward(
self,
batch_idx: int,
batch: tensor_dict_type,
state: Optional[TrainerState] = None,
*,
img_end_idx: Optional[int] = None,
**kwargs: Any,
) -> Tensor:
img_crops = batch[INPUT_KEY]
if not isinstance(img_crops, list):
img_crops = batch[INPUT_KEY] = [img_crops]
if img_end_idx is not None:
img_crops = img_crops[:img_end_idx]
idx_crops = torch.cumsum(
torch.unique_consecutive(
torch.tensor([img_crop.shape[-1] for img_crop in img_crops]),
return_counts=True,
)[1],
0,
)
outputs = []
start_idx = 0
for end_idx in idx_crops:
local_batch = shallow_copy_dict(batch)
local_batch[INPUT_KEY] = torch.cat(img_crops[start_idx:end_idx])
idx_rs = self.backbone(batch_idx, local_batch, state, **kwargs)
idx_out = idx_rs[LATENT_KEY]
if isinstance(idx_out, tuple):
idx_out = idx_out[0]
outputs.append(idx_out)
start_idx = end_idx
return self.head(torch.cat(outputs))
class DINOHead(nn.Module):
def __init__(
self,
in_dim: int,
out_dim: int,
batch_norm: bool = False,
norm_last_layer: bool = True,
*,
num_layers: int = 3,
latent_dim: int = 2048,
bottleneck_dim: int = 256,
):
super().__init__()
num_layers = max(num_layers, 1)
if num_layers == 1:
self.mapping = nn.Linear(in_dim, bottleneck_dim)
else:
blocks = [nn.Linear(in_dim, latent_dim)]
if batch_norm:
blocks.append(nn.BatchNorm1d(latent_dim))
blocks.append(nn.GELU())
for _ in range(num_layers - 2):
blocks.append(nn.Linear(latent_dim, latent_dim))
if batch_norm:
blocks.append(nn.BatchNorm1d(latent_dim))
blocks.append(nn.GELU())
blocks.append(nn.Linear(latent_dim, bottleneck_dim))
self.mapping = nn.Sequential(*blocks)
self.apply(self._init_weights)
last = nn.Linear(bottleneck_dim, out_dim, bias=False)
self.last_layer = nn.utils.weight_norm(last)
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _init_weights(self, m: nn.Module) -> None:
if isinstance(m, nn.Linear):
nn.init.trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, net: Tensor) -> Tensor:
net = self.mapping(net)
net = nn.functional.normalize(net, dim=-1, p=2)
net = self.last_layer(net)
return net
class DINOLoss(nn.Module):
center: torch.Tensor
def __init__(
self,
out_dim: int,
teacher_temp: float,
warmup_teacher_temp: float,
warmup_teacher_temp_epochs: int,
teacher_temp_epochs: int,
*,
student_temp: float = 0.1,
center_momentum: float = 0.9,
):
super().__init__()
self.student_temp = student_temp
self.center_momentum = center_momentum
self.register_buffer("center", torch.zeros(1, out_dim))
teacher_temp_constant_epochs = teacher_temp_epochs - warmup_teacher_temp_epochs
self.teacher_temp_schedule = Scheduler(
np.concatenate(
(
np.linspace(
warmup_teacher_temp,
teacher_temp,
warmup_teacher_temp_epochs,
),
np.ones(teacher_temp_constant_epochs) * teacher_temp,
)
)
)
self.num_epochs = teacher_temp_epochs
def forward(
self,
epoch: int,
num_crops: int,
student_output: Tensor,
teacher_output: Tensor,
) -> Tensor:
student_logits = student_output / self.student_temp
student_logits_list = student_logits.chunk(num_crops)
temp = self.teacher_temp_schedule[epoch]
teacher_logits = F.softmax((teacher_output - self.center) / temp, dim=-1)
teacher_logits_list = teacher_logits.detach().chunk(2)
total_loss = 0.0
num_loss_terms = 0
for it, t_logit in enumerate(teacher_logits_list):
for iv, v_logit in enumerate(student_logits_list):
if iv == it:
continue
loss = torch.sum(-t_logit * F.log_softmax(v_logit, dim=-1), dim=-1)
total_loss += loss.mean()
num_loss_terms += 1
total_loss /= num_loss_terms
self.update_center(teacher_output)
return total_loss
@torch.no_grad()
def update_center(self, teacher_output: Tensor) -> None:
batch_center = torch.sum(teacher_output, dim=0, keepdim=True)
if dist.is_initialized():
dist.all_reduce(batch_center)
batch_center = batch_center / (len(teacher_output) * get_world_size())
m = self.center_momentum
self.center = self.center * m + batch_center * (1.0 - m)
class DINOEvaluateLoss:
def __init__(self, train_loss: DINOLoss):
self.train_loss = train_loss
def __call__(
self,
epoch: int,
student_output: Tensor,
teacher_output: Tensor,
) -> float:
s_logits = student_output / self.train_loss.student_temp
temp = self.train_loss.teacher_temp_schedule[epoch]
centered = teacher_output - self.train_loss.center
t_logits = F.softmax(centered / temp, dim=-1)
loss = torch.sum(-t_logits * F.log_softmax(s_logits, dim=-1), dim=-1).mean()
return loss.item()
@ModelWithCustomSteps.register("dino")
class DINO(ModelWithCustomSteps):
custom_params_groups = True
custom_ddp_initialization = True
lr_schedule: Optional[Scheduler]
wd_schedule: Optional[Scheduler]
momentum_schedule: Optional[Scheduler]
def __init__(
self,
encoder1d: str = "vit",
encoder1d_config: Optional[Dict[str, Any]] = None,
student_specific: Optional[Dict[str, Any]] = None,
teacher_specific: Optional[Dict[str, Any]] = None,
*,
out_dim: int = 65536,
use_bn_in_head: bool = False,
norm_last_layer: bool = True,
teacher_temp: float = 0.07,
momentum_teacher: float = 0.996,
warmup_teacher_temp: float = 0.04,
warmup_teacher_temp_epochs: int = 30,
teacher_temp_epochs: int,
freeze_last_layer: int = 1,
weight_decay: float = 0.04,
weight_decay_end: float = 0.4,
warmup_epochs: int = 10,
):
super().__init__()
base = update_dict(encoder1d_config or {}, _get_dino_defaults(encoder1d))
student_cfg = update_dict(student_specific or {}, shallow_copy_dict(base))
teacher_cfg = update_dict(teacher_specific or {}, shallow_copy_dict(base))
student = Encoder1DBase.make(encoder1d, student_cfg)
teacher = Encoder1DBase.make(encoder1d, teacher_cfg)
self.ddp_student = self.ddp_teacher = None
self.student = MultiCropWrapper(
student,
DINOHead(
student.latent_dim,
out_dim,
use_bn_in_head,
norm_last_layer,
),
)
self.teacher = MultiCropWrapper(
teacher,
DINOHead(teacher.latent_dim, out_dim, use_bn_in_head),
)
self.freeze_last_layer = freeze_last_layer
self.teacher.load_state_dict(self.student.state_dict())
self.loss = DINOLoss(
out_dim,
teacher_temp,
warmup_teacher_temp,
warmup_teacher_temp_epochs,
teacher_temp_epochs,
)
self.evaluate_loss = DINOEvaluateLoss(self.loss)
self.momentum_teacher = momentum_teacher
self.teacher_temp_epochs = teacher_temp_epochs
self.weight_decay = weight_decay
self.weight_decay_end = weight_decay_end
self.warmup_epochs = warmup_epochs
self.lr_schedule = None
self.wd_schedule = None
self.momentum_schedule = None
@property
def student_for_training(self) -> MultiCropWrapper:
return self.ddp_student or self.student
@property
def teacher_for_training(self) -> MultiCropWrapper:
return self.ddp_teacher or self.teacher
def forward(
self,
batch_idx: int,
batch: tensor_dict_type,
state: Optional[TrainerState] = None,
**kwargs: Any,
) -> tensor_dict_type:
net = self.student.backbone(batch_idx, batch, state, **kwargs)[LATENT_KEY]
net = l2_normalize(net)
return {LATENT_KEY: net}
def onnx_forward(self, batch: tensor_dict_type) -> Any:
inp = batch[INPUT_KEY]
net = self.get_latent(inp, determinate=True)
return net.view(inp.shape[0], self.student.backbone.latent_dim)
def get_latent(self, net: Tensor, **kwargs: Any) -> Tensor:
return self.forward(0, {INPUT_KEY: net}, **kwargs)[LATENT_KEY]
def get_logits(self, net: Tensor) -> Tensor:
return self.student(0, {INPUT_KEY: net})
def state_dict(
self,
destination: Any = None,
prefix: str = "",
keep_vars: bool = False,
) -> Any:
states = super().state_dict(destination, prefix, keep_vars)
for k in list(states.keys()):
if k.startswith("ddp"):
states.pop(k)
return states
def summary_forward(self, batch_idx: int, batch: tensor_dict_type) -> None:
self.student(batch_idx, to_device(batch, self.device))
def _get_outputs(
self,
batch_idx: int,
batch: tensor_dict_type,
trainer: Any,
forward_kwargs: Dict[str, Any],
) -> tensor_dict_type:
teacher_output = self.teacher_for_training(
batch_idx,
batch,
trainer.state,
img_end_idx=2,
**forward_kwargs,
)
student_output = self.student_for_training(
batch_idx,
batch,
trainer.state,
**forward_kwargs,
)
return {"student": student_output, "teacher": teacher_output}
def _get_loss(
self,
batch_idx: int,
batch: tensor_dict_type,
trainer: Any,
forward_kwargs: Dict[str, Any],
) -> Tuple[tensor_dict_type, Tensor]:
with torch.cuda.amp.autocast(enabled=trainer.use_amp):
outputs = self._get_outputs(batch_idx, batch, trainer, forward_kwargs)
epoch = trainer.state.epoch
num_crops = len(batch[INPUT_KEY])
student_output = outputs["student"]
teacher_output = outputs["teacher"]
loss = self.loss(epoch, num_crops, student_output, teacher_output)
return outputs, loss
def train_step(
self,
batch_idx: int,
batch: tensor_dict_type,
trainer: Any,
forward_kwargs: Dict[str, Any],
loss_kwargs: Dict[str, Any],
) -> StepOutputs:
state = trainer.state
if self.lr_schedule is None:
self.lr_schedule = cosine_scheduler(
self.lr * (len(batch[INPUT_KEY][0]) * get_world_size()) / 256.0, # type: ignore
self.min_lr,
self.teacher_temp_epochs,
state.num_step_per_epoch,
warmup_epochs=self.warmup_epochs,
)
if self.wd_schedule is None:
self.wd_schedule = cosine_scheduler(
self.weight_decay,
self.weight_decay_end,
self.teacher_temp_epochs,
state.num_step_per_epoch,
)
# manual scheduling
optimizer = trainer.optimizers["all"]
for i, param_group in enumerate(optimizer.param_groups):
param_group["lr"] = self.lr_schedule[state.step]
if i == 0:
param_group["weight_decay"] = self.wd_schedule[state.step]
# forward pass
rs, loss = self._get_loss(batch_idx, batch, trainer, forward_kwargs)
# backward pass
optimizer.zero_grad()
trainer.grad_scaler.scale(loss).backward()
# clip norm
if trainer.clip_norm > 0.0:
trainer.grad_scaler.unscale_(optimizer)
nn.utils.clip_grad_norm_(
self.student_for_training.parameters(),
max_norm=trainer.clip_norm,
)
# freeze last layer
if state.epoch <= self.freeze_last_layer:
for n, p in self.student.named_parameters():
if "last_layer" in n:
p.grad = None
# update parameters
trainer.grad_scaler.step(optimizer)
trainer.grad_scaler.update()
# update momentum teacher
if self.momentum_schedule is None:
self.momentum_schedule = cosine_scheduler(
self.momentum_teacher,
1.0,
self.teacher_temp_epochs,
state.num_step_per_epoch,
)
with torch.no_grad():
m = self.momentum_schedule[state.step]
for param_q, param_k in zip(
self.student.parameters(),
self.teacher.parameters(),
):
param_k.data.mul_(m).add_((1.0 - m) * param_q.detach().data)
# return
return StepOutputs(rs, {LOSS_KEY: loss.item()})
def evaluate_step( # type: ignore
self,
loader: CVLoader,
portion: float,
trainer: Any,
) -> MetricsOutputs:
losses = []
for i, batch in enumerate(loader):
if i / len(loader) >= portion:
break
batch = to_device(batch, self.device)
outputs = self._get_outputs(i, batch, trainer, {})
losses.append(
self.evaluate_loss(
trainer.state.epoch,
outputs["student"],
outputs["teacher"],
)
)
# gather
mean_loss = sum(losses) / len(losses)
return MetricsOutputs(
-mean_loss,
{
"loss": mean_loss,
"lr": self.lr_schedule[trainer.state.step], # type: ignore
"wd": self.wd_schedule[trainer.state.step], # type: ignore
},
)
@staticmethod
def params_groups(m: nn.Module) -> Any:
regularized = []
bias_and_norm = []
for name, param in m.named_parameters():
if not param.requires_grad:
continue
if name.endswith(".bias") or len(param.shape) == 1:
bias_and_norm.append(param)
else:
regularized.append(param)
return [{"params": regularized}, {"params": bias_and_norm, "weight_decay": 0.0}]
def _init_with_trainer(self, trainer: Any) -> None:
self.teacher_for_training.requires_grad_(False)
def init_ddp(self, trainer: Any) -> None:
if has_batch_norms(self.student):
self.student = nn.SyncBatchNorm.convert_sync_batchnorm(self.student)
self.teacher = nn.SyncBatchNorm.convert_sync_batchnorm(self.teacher)
self.ddp_student = DDP(self.student, device_ids=[trainer.rank])
self.ddp_teacher = DDP(self.teacher, device_ids=[trainer.rank])
self.ddp_teacher.requires_grad_(False) # type: ignore
def permute_trainer_config(self, trainer_config: Dict[str, Any]) -> None:
# TODO : make `permute_trainer_config` more general
if trainer_config["clip_norm"] == 0.0:
trainer_config["clip_norm"] = 3.0
if trainer_config["lr"] is None:
trainer_config["lr"] = 0.0005
self.lr = trainer_config["lr"]
self.min_lr = trainer_config.pop("min_lr", 1.0e-6)
if trainer_config["optimizer_name"] is None:
trainer_config["optimizer_name"] = "adamw"
trainer_config["scheduler_name"] = "none"
__all__ = [
"DINO",
]
|
59494
|
from __future__ import absolute_import
# external modules
from past.builtins import basestring
import numpy as num
# ANUGA modules
import anuga.utilities.log as log
from anuga.config import netcdf_mode_r, netcdf_mode_w, netcdf_mode_a, \
netcdf_float
from .asc2dem import asc2dem
def dem2array(filename, variable_name='elevation',
easting_min=None, easting_max=None,
northing_min=None, northing_max=None,
use_cache=False, verbose=False,):
"""Read Digitial Elevation model from the following NetCDF format (.dem)
Example:
ncols 3121
nrows 1800
xllcorner 722000
yllcorner 5893000
cellsize 25
NODATA_value -9999
138.3698 137.4194 136.5062 135.5558 ..........
name_in should be .dem file to be read.
"""
import os
from anuga.file.netcdf import NetCDFFile
msg = 'Filename must be a text string'
assert isinstance(filename, basestring), msg
msg = 'Extension should be .dem'
assert os.path.splitext(filename)[1] in ['.dem'], msg
msg = 'Variable name must be a text string'
assert isinstance(variable_name, basestring), msg
# Get NetCDF
infile = NetCDFFile(filename, netcdf_mode_r)
if verbose: log.critical('Reading DEM from %s' % (filename))
ncols = int(infile.ncols)
nrows = int(infile.nrows)
xllcorner = float(infile.xllcorner) # Easting of lower left corner
yllcorner = float(infile.yllcorner) # Northing of lower left corner
cellsize = float(infile.cellsize)
NODATA_value = float(infile.NODATA_value)
zone = int(infile.zone)
false_easting = float(infile.false_easting)
false_northing = float(infile.false_northing)
# Text strings
projection = infile.projection
datum = infile.datum
units = infile.units
Z = infile.variables[variable_name][:]
Z = Z.reshape(nrows,ncols)
Z = num.where(Z == NODATA_value , num.nan, Z)
#changed the orientation of Z array to make it consistent with grd2array result
Z = num.fliplr(Z.T)
#print ncols, nrows, xllcorner,yllcorner, cellsize, NODATA_value, zone
x = num.linspace(xllcorner, xllcorner+(ncols-1)*cellsize, ncols)
y = num.linspace(yllcorner, yllcorner+(nrows-1)*cellsize, nrows)
return x,y, Z
|
59502
|
from typing import List
import pytest
from pathlib import Path
from graphtik.sphinxext import DocFilesPurgatory, _image_formats
@pytest.fixture
def img_docs() -> List[str]:
return [f"d{i}" for i in range(3)]
@pytest.fixture
def img_files(tmpdir) -> List[Path]:
files = [tmpdir.join(f"f{i}") for i in range(3)]
for f in files:
f.ensure()
return [Path(i) for i in files]
@pytest.fixture
def img_reg(img_docs, img_files) -> DocFilesPurgatory:
img_reg = DocFilesPurgatory()
img_reg.register_doc_fpath(img_docs[0], img_files[0])
img_reg.register_doc_fpath(img_docs[0], img_files[1])
img_reg.register_doc_fpath(img_docs[1], img_files[0])
img_reg.register_doc_fpath(img_docs[2], img_files[2])
return img_reg
def test_image_purgatory(img_docs, img_files, img_reg):
for _ in range(2):
img_reg.purge_doc(img_docs[2])
assert list(img_reg.doc_fpaths) == img_docs[:2]
assert img_files[0].exists()
assert img_files[1].exists()
assert not img_files[2].exists()
for _ in range(2):
img_reg.purge_doc(img_docs[1])
assert list(img_reg.doc_fpaths) == img_docs[:1]
assert img_files[0].exists()
assert img_files[1].exists()
assert not img_files[2].exists()
img_reg.purge_doc(img_docs[0])
assert not img_reg.doc_fpaths
assert not img_files[0].exists()
assert not img_files[1].exists()
assert not img_files[2].exists()
img_reg.purge_doc(img_docs[0])
img_reg.purge_doc(img_docs[1])
img_reg.purge_doc(img_docs[2])
|
59507
|
from collections import namedtuple
from itertools import chain
from typing import List, Dict, Tuple, Any
from valacefgen import utils
from valacefgen.vala import VALA_TYPES, VALA_ALIASES, GLIB_TYPES
TypeInfo = utils.TypeInfo
EnumValue = namedtuple("EnumValue", 'c_name vala_name comment')
class Type:
def __init__(self, c_name: str, vala_name: str, c_header: str, comment: str = None):
self.comment = utils.reformat_comment(comment)
self.c_name = c_name
self.vala_name = vala_name
self.c_header = c_header
def is_simple_type(self, repo: "Repository") -> bool:
raise NotImplementedError
def gen_vala_code(self, repo: "Repository") -> List[str]:
raise NotImplementedError
class SimpleType(Type):
def __init__(self, c_name: str, vala_name: str, c_header: str, comment: str = None):
super().__init__(c_name, vala_name, c_header, comment)
def gen_vala_code(self, repo: "Repository") -> List[str]:
return []
def is_simple_type(self, repo: "Repository") -> bool:
return True
class Enum(Type):
def __init__(self, c_name: str, vala_name: str, c_header: str, values: List[EnumValue], comment: str = None):
super().__init__(c_name, vala_name, c_header, comment)
self.values = values
def is_simple_type(self, repo: "Repository") -> bool:
return True
def __repr__(self):
return "enum %s" % self.vala_name
def gen_vala_code(self, repo: "Repository") -> List[str]:
buf = []
if self.comment:
buf.extend(utils.vala_comment(self.comment, valadoc=True))
buf.extend([
'[CCode (cname="%s", cheader_filename="%s", has_type_id=false)]' % (self.c_name, self.c_header),
'public enum %s {' % self.vala_name,
])
n_values = len(self.values)
for i, value in enumerate(self.values):
if value.comment:
buf.extend(' ' + line for line in utils.vala_comment(value.comment, valadoc=True))
buf.append(' [CCode (cname="%s")]' % value.c_name)
buf.append(' %s%s' % (value.vala_name, "," if i < n_values - 1 else ";"))
buf.append('}')
return buf
class Function(Type):
def __init__(self, c_name: str, vala_name: str, c_header: str, ret_type: str = None,
params: List[Tuple[str, str]] = None, body: List[str] = None, comment: str = None,
vala_generics: List[str] = None, vala_simple_generics: bool = False):
super().__init__(c_name, vala_name, c_header, comment)
self.vala_simple_generics = vala_simple_generics
self.vala_generics = vala_generics
self.params = params
self.ret_type = ret_type if ret_type != 'void' else None
self.body = body
self.construct = False
def gen_vala_code(self, repo: "Repository") -> List[str]:
params = repo.vala_param_list(self.params, self.c_name, generics=self.vala_generics)
ret_type = repo.vala_ret_type(self.ret_type, generics=self.vala_generics)
buf = []
if self.comment:
buf.extend(utils.vala_comment(self.comment, valadoc=True))
buf.extend([
'[CCode (cname="%s", cheader_filename="%s"%s)]' % (
self.c_name, self.c_header,
', simple_generics=true' if self.vala_simple_generics else ''),
'public %s %s%s(%s)%s' % (
ret_type if not self.construct else '',
self.vala_name,
'<%s>' % (','.join(self.vala_generics),) if self.vala_generics else '',
', '.join(params),
';' if self.body is None else ' {'
)
])
if self.body is not None:
body: List[str] = self.body
buf.extend(' ' + line for line in body)
buf.append("}")
return buf
def gen_c_header(self, repo: "Repository") -> List[str]:
return self._gen_c_code(repo, False)
def gen_c_code(self, repo: "Repository") -> List[str]:
return self._gen_c_code(repo, True)
def _gen_c_code(self, repo: "Repository", gen_body: bool) -> List[str]:
params = repo.c_param_list(self.params)
ret_type = repo.c_ret_type(self.ret_type)
buf = []
if self.c_header:
buf.extend('#include "%s"' % h for h in self.c_header.split(';'))
buf.extend([
'%s %s(%s)%s' % (
ret_type,
self.c_name,
', '.join(params),
';' if not gen_body or self.body is None else ' {'
)
])
if gen_body and self.body is not None:
body: List[str] = self.body
buf.extend(' ' + line for line in body)
buf.append("}")
return buf
def is_simple_type(self, repo: "Repository") -> bool:
return False
class OpaqueClass(Type):
def __init__(self, basename: str, c_type: str, c_name: str, vala_name: str, c_header: str, comment: str = None):
super().__init__(c_name, vala_name, c_header, comment)
self.basename = basename
self.c_type = c_type
self.create_func = None
self.free_func = basename + "free"
self.copy_func = basename + "copy"
self.methods = []
def is_simple_type(self, repo: "Repository") -> bool:
return False
def add_method(self, func: "Function"):
if self.c_name == func.ret_type and not func.params:
self.create_func = func.c_name
elif func.params[0][0] == self.c_name:
self.methods.append(func)
else:
raise NotImplementedError(func)
def gen_vala_code(self, repo: "Repository") -> List[str]:
buf = []
if self.comment:
buf.extend(utils.vala_comment(self.comment, valadoc=True))
ccode = {
'cname': '"%s"' % self.c_type,
'cheader_filename': '"%s"' % self.c_header,
'has_type_id': 'false',
}
if self.free_func:
ccode['free_function'] = '"%s"' % self.free_func
if self.copy_func:
ccode['copy_function'] = '"%s"' % self.copy_func
buf.append('[CCode (%s)]' % ', '.join('%s=%s' % e for e in ccode.items()))
buf.append('[Compact]')
buf.append('public class %s {' % self.vala_name)
if self.create_func:
buf.append(' [CCode (cname="%s")]' % self.create_func)
buf.append(' public %s();' % self.vala_name)
if self.methods:
for method in self.methods:
del method.params[0]
method.vala_name = method.vala_name[len(self.basename)-4:]
for line in method.gen_vala_code(repo):
buf.append(" " + line)
buf.append('}')
return buf
class Struct(Type):
def __init__(self, c_name: str, vala_name: str, c_header: str, members: List["StructMember"], comment: str = None,
virtual_funcs: List["StructVirtualFunc"] = None):
super().__init__(c_name, vala_name, c_header, comment)
self.virtual_funcs = virtual_funcs
self.members = members
self.parent: Struct = None
self.methods: List[Function] = []
self.is_class: bool = False
self.ref_func: str = None
self.unref_func: str = None
def set_parent(self, parent: "Struct"):
self.parent = parent
def set_is_class(self, is_class: bool):
self.is_class = is_class
def set_ref_counting(self, ref_func: str, unref_func: str):
self.ref_func = ref_func
self.unref_func = unref_func
def add_method(self, method: Function):
self.methods.append(method)
def is_simple_type(self, repo: "Repository") -> bool:
return False
def gen_vala_code(self, repo: "Repository") -> List[str]:
buf = []
if self.comment:
buf.extend(utils.vala_comment(self.comment, valadoc=True))
ccode = {
'cname': '"%s"' % self.c_name,
'cheader_filename': '"%s,valacef.h"' % self.c_header,
'has_type_id': 'false',
}
if self.is_class:
buf.append('[Compact]')
struct_type = 'class'
if self.ref_func:
ccode['ref_function'] = '"%s"' % self.ref_func
if self.unref_func:
ccode['unref_function'] = '"%s"' % self.unref_func
else:
struct_type = 'struct'
ccode['destroy_function'] = '""'
buf.append('[CCode (%s)]' % ', '.join('%s=%s' % e for e in ccode.items()))
if self.parent:
buf.append('public %s %s: %s {' % (struct_type, self.vala_name, self.parent.vala_name))
else:
buf.append('public %s %s {' % (struct_type, self.vala_name))
for member in self.members:
type_info = utils.parse_c_type(member.c_type)
vala_type = repo.resolve_c_type(type_info.c_type)
if 'char' in member.c_type:
print("!!!", member.c_type)
if member.c_type == 'char*':
m_type = 'string?'
elif member.c_type == 'char**':
m_type = 'char**'
else:
m_type = vala_type.vala_name
if type_info.pointer:
m_type += '?'
if member.comment:
buf.extend(' ' + line for line in utils.vala_comment(member.comment, valadoc=True))
if member.c_name != member.vala_name:
buf.append(' [CCode (cname="%s")]' % member.c_name)
buf.append(' public %s %s;' % (m_type, member.vala_name))
for method in self.methods:
if method.construct:
break
else:
buf.append(' protected %s(){}' % self.vala_name)
for method in self.methods:
buf.extend(' ' + line for line in method.gen_vala_code(repo))
for vfunc in self.virtual_funcs or []:
params = repo.vala_param_list(vfunc.params[1:], vfunc_of_class=self.c_name)
ret_type = repo.vala_ret_type(vfunc.ret_type)
if ret_type == "StringUserfree":
ret_type = "string?"
if vfunc.comment:
buf.extend(' ' + line for line in utils.vala_comment(vfunc.comment, valadoc=True))
buf.extend([
' [CCode (cname="%s", cheader_filename="valacef_api.h")]' % vfunc.c_name,
' public %s %s(%s);' % (ret_type, vfunc.vala_name, ', '.join(params)),
])
buf.append('}')
return buf
def gen_c_header(self, repo: "Repository") -> List[str]:
return self._gen_c_code(repo, 'gen_c_header')
def gen_c_code(self, repo: "Repository") -> List[str]:
return self._gen_c_code(repo, 'gen_c_code')
def _gen_c_code(self, repo: "Repository", generator: str) -> List[str]:
buf = [
'#include "%s"' % self.parent.c_header,
]
if self.c_header:
buf.extend('#include "%s"' % h for h in self.c_header.split(';'))
buf.extend([
'typedef struct {',
' %s parent;' % self.parent.c_name,
])
for member in self.members:
type_info = utils.parse_c_type(member.c_type)
vala_type = repo.resolve_c_type(type_info.c_type)
buf.append(' %s%s %s;' % ('volatile ' if type_info.volatile else '', vala_type.c_name, member.c_name))
buf.append('} %s;' % self.c_name)
for method in self.methods:
buf.extend(' ' + line for line in getattr(method, generator)(repo))
return buf
class StructMember:
def __init__(self, c_type: str, c_name: str, vala_name: str, comment: str = None):
self.comment = utils.reformat_comment(comment, strip_chars=5)
self.c_type = c_type
self.c_name = c_name
self.vala_name = vala_name
class StructVirtualFunc:
def __init__(self, c_name: str, vala_name: str, ret_type: str = None, params: List[Tuple[str, str]] = None,
comment: str = None):
self.comment = utils.reformat_comment(comment, strip_chars=5)
self.c_name = c_name
self.vala_name = vala_name
self.ret_type = ret_type if ret_type != 'void' else None
self.params = params
class Typedef(Type):
def __init__(self, c_name: str, vala_name: str, c_type: str, c_header: str):
super().__init__(c_name, vala_name, c_header)
self.c_type = c_type
def is_simple_type(self, repo: "Repository") -> bool:
c_type = self.c_type
if c_type in VALA_TYPES or c_type in VALA_ALIASES:
return True
return repo.c_types[c_type].is_simple_type(repo)
def gen_vala_code(self, repo: "Repository") -> List[str]:
buf = []
c_type = self.c_type
if c_type != 'void*':
simple_type = self.is_simple_type(repo)
if c_type in VALA_TYPES:
base_type = c_type
elif c_type in VALA_ALIASES:
base_type = VALA_ALIASES[c_type]
else:
c_type_obj = repo.c_types[c_type]
base_type = c_type_obj.vala_name
if simple_type:
buf.append('[SimpleType]')
buf.append('[CCode (cname="%s", has_type_id=false)]' % self.c_name)
buf.append('public struct %s : %s {' % (self.vala_name, base_type))
buf.append('}')
else:
buf.append('[CCode (cname="%s", has_type_id=false)]' % self.c_name)
buf.append('public struct %s{' % self.vala_name)
buf.append('}')
return buf
class Delegate(Type):
def __init__(self, c_name: str, vala_name: str, c_header: str, ret_type: str = None,
params: List[Tuple[str, str]] = None, vfunc_of_class=None, vfunc_name=None):
super().__init__(c_name, vala_name, c_header)
self.vfunc_name = vfunc_name
self.ret_type = ret_type if ret_type != 'void' else None
self.params = params
self.vfunc_of_class = vfunc_of_class
def gen_vala_code(self, repo: "Repository") -> List[str]:
params = repo.vala_param_list(self.params, vfunc_of_class=self.vfunc_of_class)
ret_type = repo.vala_ret_type(self.ret_type)
buf = [
'[CCode (cname="%s", cheader_filename="%s", has_target = false)]' % (
self.c_name, self.c_header),
'public delegate %s %s(%s);' % (ret_type, self.vala_name, ', '.join(params)),
]
return buf
def _gen_c_code(self, repo: "Repository", body: bool) -> List[str]:
params = repo.c_param_list(self.params)
ret_type = repo.c_ret_type(self.ret_type)
buf = []
if self.c_header:
buf.extend('#include "%s"' % h for h in self.c_header.split(';'))
if self.ret_type:
header = repo.resolve_c_type(utils.parse_c_type(ret_type).c_type).c_header
if header:
buf.append('#include "%s"' % header)
if self.params:
headers = (repo.resolve_c_type(utils.parse_c_type(h[0]).c_type).c_header for h in self.params)
buf.extend('#include "%s"' % h for h in headers if h)
buf.extend([
'typedef %s (*%s)(%s);' % (
ret_type,
self.c_name,
', '.join(params)
)
])
if self.vfunc_name:
buf.extend([
'%s %s_%s(%s)%s' % (
ret_type if ret_type != 'cef_string_userfree_t' else 'char*',
self.vfunc_of_class,
self.vfunc_name,
', '.join(params),
' {' if body else ';'
)
])
if body:
call = 'self->%s(%s);' % (self.vfunc_name, ', '.join(p[1] for p in self.params))
if ret_type == 'void':
buf.append(' ' + call)
elif ret_type == 'cef_string_userfree_t':
buf.extend([
' %s __utf16_str__ = %s' % (ret_type, call),
' if (__utf16_str__ == NULL) return NULL;',
' cef_string_utf8_t __utf8_str__ = {};',
' cef_string_utf16_to_utf8(__utf16_str__->str, __utf16_str__->length, &__utf8_str__);',
' cef_string_userfree_free(__utf16_str__);',
' return __utf8_str__.str;'
])
else:
buf.append(' return ' + call)
buf.append('}')
return buf
def gen_c_code(self, repo: "Repository") -> List[str]:
return self._gen_c_code(repo, True)
def gen_c_header(self, repo: "Repository") -> List[str]:
return self._gen_c_code(repo, False)
def is_simple_type(self, repo: "Repository") -> bool:
return True
class Repository:
enums: Dict[str, Enum]
structs: Dict[str, Struct]
typedefs: Dict[str, Typedef]
c_types: Dict[str, Type]
def __init__(self, vala_namespace: str, overrides: Any = None):
self.overrides = overrides
self.vala_namespace = vala_namespace
self.enums: Dict[str, Enum] = {}
self.structs: Dict[str, Struct] = {}
self.opaque_classes: Dict[str, OpaqueClass] = {}
self.typedefs: Dict[str, Typedef] = {}
self.delegates: Dict[str, Delegate] = {}
self.functions: Dict[str, Function] = {}
self.c_types: Dict[str, Type] = {}
self.basenames = {}
def add_enum(self, enum: Enum):
self.enums[enum.c_name] = enum
self.c_types[enum.c_name] = enum
def add_struct(self, *structs: Struct):
for struct in structs:
self.structs[struct.c_name] = struct
self.c_types[struct.c_name] = struct
def add_opaque_class(self, *classes: OpaqueClass):
for klass in classes:
self.opaque_classes[klass.c_name] = klass
self.c_types[klass.c_name] = klass
self.basenames[klass.basename] = klass
def add_typedef(self, typedef: Typedef):
self.typedefs[typedef.c_name] = typedef
self.c_types[typedef.c_name] = typedef
def add_delegate(self, delegate: Delegate):
self.delegates[delegate.c_name or delegate.vala_name] = delegate
self.c_types[delegate.c_name or delegate.vala_name] = delegate
def add_function(self, *functions: Function):
for func in functions:
self.functions[func.c_name] = func
self.c_types[func.c_name] = func
def resolve_c_type(self, c_type: str) -> Type:
c_type = utils.bare_c_type(c_type)
if c_type in VALA_TYPES:
return SimpleType(c_type, c_type, "")
if c_type in VALA_ALIASES:
return self.resolve_c_type(VALA_ALIASES[c_type])
if c_type in GLIB_TYPES:
return SimpleType(c_type + "*", GLIB_TYPES[c_type], "")
try:
return self.c_types[c_type]
except KeyError:
raise NotImplementedError(c_type)
def __repr__(self):
buf = []
for enum in self.enums.values():
buf.append(repr(enum))
return '\n'.join(buf)
def gen_vala_code(self):
buf = ['namespace %s {\n' % self.vala_namespace]
entries = self.enums, self.delegates, self.functions, self.typedefs, self.opaque_classes, self.structs
for entry in chain.from_iterable(e.values() for e in entries):
for line in entry.gen_vala_code(self):
buf.extend((' ', line, '\n'))
buf.append('} // namespace %s\n' % self.vala_namespace)
return ''.join(buf)
def c_ret_type(self, c_type: str = None) -> str:
return c_type if c_type else 'void'
def vala_ret_type(self, c_type: str = None, generics: List[str] = None) -> str:
if generics and c_type in generics:
return "unowned " + c_type
if c_type == 'char*':
return 'string?'
if c_type is None:
return "void"
type_info = utils.parse_c_type(c_type)
ret_type = self.resolve_c_type(type_info.c_type).vala_name
if type_info.pointer:
ret_type += "?"
return ret_type
def vala_param_list(self, params: List[Tuple[str, str]] = None, name: str = None, vfunc_of_class: str = None,
generics: List[str] = None) -> List[str]:
vala_params = []
if params is not None:
array_size = None
skipped_params = 0
for i, (p_type, p_name) in enumerate(params):
i -= skipped_params
if p_type == "size_t" and p_name.lower().endswith("count"):
array_size = (i + 1 - 0.1, p_type, p_name)
skipped_params += 1
continue
if generics and p_type in generics:
param = "owned " + p_type
assert not array_size
else:
type_info = utils.parse_c_type(p_type)
if name:
self.override_param(name, p_name, type_info)
param = ""
if array_size:
if type_info.out:
type_info.out = False
elif type_info.pointer:
type_info.pointer = False
type_info.array = True
param = '[CCode(array_length_pos=%s, array_length_type="%s")] ' % array_size[0:2]
assert p_name == array_size[2][:-5], (p_name, array_size[2])
array_size = None
elif type_info.ref:
param += 'ref '
elif type_info.out:
param += 'out '
else:
try:
# CEF reference counting: When passing a struct to delegate/function,
# increase ref unless it is a self-param of vfunc of that struct.
if self.structs[type_info.c_type].is_class and type_info.c_type != vfunc_of_class:
param += "owned "
except KeyError:
pass
vala_type = self.resolve_c_type(type_info.c_type).vala_name
if vala_type == 'String' and type_info.pointer:
param += '' + vala_type + '*'
elif vala_type == 'char' and type_info.pointer:
param += 'string?'
else:
param += vala_type
if type_info.pointer:
param += "?"
if type_info.array:
param += "[]"
param += ' ' + p_name
vala_params.append(param)
return vala_params
def c_param_list(self, params: List[Tuple[str, str]] = None) -> List[str]:
c_params = []
if params is not None:
for p_type, p_name in params:
c_params.append('%s %s' % (p_type, p_name))
return c_params
def override_param(self, name: str, p_name: str, type_info: TypeInfo) -> TypeInfo:
try:
return getattr(self.overrides, 'param__%s__%s' % (name, p_name))(type_info)
except AttributeError as e:
return type_info
|
59523
|
import pytest
import tempfile
import os
import io
import logging
from cellpy import log
from cellpy import prms
from cellpy import prmreader
from . import fdv
log.setup_logging(default_level="DEBUG")
config_file_txt = """---
Batch:
color_style_label: seaborn-deep
dpi: 300
fig_extension: png
figure_type: unlimited
markersize: 4
symbol_label: simple
DataSet:
nom_cap: 3579
Db:
db_data_start_row: 2
db_header_row: 0
db_search_end_row: -1
db_search_start_row: 2
db_table_name: db_table
db_type: simple_excel_reader
db_unit_row: 1
DbCols:
active_material: !!python/tuple
- mass_active_material
- float
batch: !!python/tuple
- batch
- str
cell_name: !!python/tuple
- cell
- str
cell_type: !!python/tuple
- cell_type
- cat
cellpy_file_name: !!python/tuple
- cellpy_file_name
- str
comment_cell: !!python/tuple
- comment_cell
- str
comment_general: !!python/tuple
- comment_general
- str
comment_slurry: !!python/tuple
- comment_slurry
- str
exists: !!python/tuple
- exists
- bol
experiment_type: !!python/tuple
- experiment_type
- cat
file_name_indicator: !!python/tuple
- file_name_indicator
- str
freeze: !!python/tuple
- freeze
- bol
group: !!python/tuple
- group
- int
id: !!python/tuple
- id
- int
label: !!python/tuple
- label
- str
loading: !!python/tuple
- loading_active_material
- float
project: !!python/tuple
- project
- str
raw_file_names: !!python/tuple
- raw_file_names
- list
selected: !!python/tuple
- selected
- bol
sub_batch_01: !!python/tuple
- b01
- str
sub_batch_02: !!python/tuple
- b02
- str
sub_batch_03: !!python/tuple
- b03
- str
sub_batch_04: !!python/tuple
- b04
- str
sub_batch_05: !!python/tuple
- b05
- str
sub_batch_06: !!python/tuple
- b06
- str
sub_batch_07: !!python/tuple
- b07
- str
total_material: !!python/tuple
- mass_total
- float
FileNames: {}
Instruments:
custom_instrument_definitions_file: null
tester: arbin
Arbin:
chunk_size: null
detect_subprocess_need: false
max_chunks: null
max_res_filesize: 150000000
office_version: 64bit
sub_process_path: None
use_subprocess: false
Paths:
cellpydatadir: cellpy_data/h5
db_filename: cellpy_db.xlsx
db_path: cellpy_data/db
filelogdir: cellpy_data/log
outdatadir: cellpy_data/out
rawdatadir: cellpy_data/raw
Reader:
auto_dirs: true
cellpy_datadir: null
chunk_size: null
cycle_mode: anode
daniel_number: 5
ensure_step_table: false
filestatuschecker: size
force_all: false
force_step_table_creation: true
last_chunk: null
limit_loaded_cycles: null
load_only_summary: false
max_chunks: null
max_res_filesize: 150000000
raw_datadir: null
select_minimal: false
sep: ;
sorted_data: true
use_cellpy_stat_file: false
...
"""
config_file = io.StringIO(config_file_txt)
@pytest.fixture(scope="module")
def cellpy_data_instance():
from cellpy import cellreader
return cellreader.CellpyData()
@pytest.fixture()
def clean_dir():
new_path = tempfile.mkdtemp()
return new_path
def test_set_prm_inside_cellpy(cellpy_data_instance):
pass
def test_save_prm_file(clean_dir):
tmp_config_file_name = os.path.join(clean_dir, "cellpy_test_config_1.yml")
with open(tmp_config_file_name, "w") as f:
f.write(config_file_txt)
prmreader._read_prm_file(tmp_config_file_name)
prms.Instruments["tester"] = "biologics"
prms.Reader.cycle_mode = "cathode"
prmreader._write_prm_file(tmp_config_file_name)
prmreader._read_prm_file(tmp_config_file_name)
assert prms.Instruments.tester == "biologics"
# with open(tmp_config_file_name, "r") as f:
# lines = f.readlines()
# for line in lines:
# print(line, end="")
|
59539
|
import asyncio
class Barrier(object):
def __init__(self, parties, action=lambda: None):
self._parties = parties
self._action = action
self._cond = asyncio.Condition()
self._count = 0
async def wait(self):
self._count += 1
with (await self._cond):
if self._maybe_release():
return
await self._cond.wait()
async def deregister(self):
self._parties -= 1
with (await self._cond):
self._maybe_release()
@property
def empty(self):
return self._parties == 0
@property
def n_waiting(self):
return self._count
@property
def parties(self):
return self._parties
def _maybe_release(self):
if self._count == self._parties:
# Release everyone
self._cond.notify_all()
self._count = 0
self._action()
return True
return False
|
59550
|
import datetime
import json
from nameko.events import EventDispatcher, event_handler
from simplebank.chassis import init_logger, init_statsd
class FeesService:
name = "fees_service"
statsd = init_statsd('simplebank-demo.fees', 'statsd')
logger = init_logger()
@event_handler("market_service", "order_placed")
@statsd.timer('charge_fee')
def charge_fee(self, payload):
self.logger.debug(
"this is a debug message from fees service", extra={"uuid": payload})
self.logger.info("charging fees", extra={
"uuid": payload})
return payload
|
59680
|
import sys
sys.path.append('..')
import torch as th
import torch.nn as nn
import geoopt as gt
from util.hyperop import *
class hyperRNN(nn.Module):
def __init__(self, input_size, hidden_size, d_ball, default_dtype=th.float64):
super(hyperRNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.d_ball = d_ball
self.default_dtype = default_dtype
k = (1 / hidden_size)**0.5
self.w = gt.ManifoldParameter(gt.ManifoldTensor(hidden_size, d_ball, hidden_size, d_ball).uniform_(-k, k))
self.u = gt.ManifoldParameter(gt.ManifoldTensor(input_size, d_ball, hidden_size, d_ball).uniform_(-k, k))
self.b = gt.ManifoldParameter(gt.ManifoldTensor(hidden_size, d_ball, manifold=gt.PoincareBall()).zero_())
def transition(self, x, h):
W_otimes_h = mob_mat_mul_d(self.w, h, self.d_ball)
U_otimes_x = mob_mat_mul_d(self.u, x, self.d_ball)
Wh_plus_Ux = mob_add(W_otimes_h, U_otimes_x)
return mob_add(Wh_plus_Ux, self.b)
def init_rnn_state(self, batch_size, hidden_size, cuda_device):
return th.zeros((batch_size, hidden_size, d_ball), dtype=self.default_dtype, device=cuda_device)
def forward(self, inputs):
hidden = self.init_rnn_state(inputs.shape[0], self.hidden_size, inputs.device)
outputs = []
for x in inputs.transpose(0, 1):
hidden = self.transition(x, hidden)
outputs += [hidden]
return th.stack(outputs).transpose(0, 1)
class GRUCell(nn.Module):
def __init__(self, input_size, hidden_size, d_ball):
super(GRUCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.d_ball = d_ball
k = (1 / hidden_size)**0.5
self.w_z = gt.ManifoldParameter(gt.ManifoldTensor(hidden_size, d_ball, hidden_size, d_ball).uniform_(-k, k))
self.w_r = gt.ManifoldParameter(gt.ManifoldTensor(hidden_size, d_ball, hidden_size, d_ball).uniform_(-k, k))
self.w_h = gt.ManifoldParameter(gt.ManifoldTensor(hidden_size, d_ball, hidden_size, d_ball).uniform_(-k, k))
self.u_z = gt.ManifoldParameter(gt.ManifoldTensor(input_size, d_ball, hidden_size, d_ball).uniform_(-k, k))
self.u_r = gt.ManifoldParameter(gt.ManifoldTensor(input_size, d_ball, hidden_size, d_ball).uniform_(-k, k))
self.u_h = gt.ManifoldParameter(gt.ManifoldTensor(input_size, d_ball, hidden_size, d_ball).uniform_(-k, k))
self.b_z = gt.ManifoldParameter(gt.ManifoldTensor(hidden_size, d_ball, manifold=gt.PoincareBall()).zero_())
self.b_r = gt.ManifoldParameter(gt.ManifoldTensor(hidden_size, d_ball, manifold=gt.PoincareBall()).zero_())
self.b_h = gt.ManifoldParameter(gt.ManifoldTensor(hidden_size, d_ball, manifold=gt.PoincareBall()).zero_())
def transition(self, W, h, U, x, hyp_b):
W_otimes_h = mob_mat_mul_d(W, h, self.d_ball)
U_otimes_x = mob_mat_mul_d(U, x, self.d_ball)
Wh_plus_Ux = mob_add(W_otimes_h, U_otimes_x)
return mob_add(Wh_plus_Ux, hyp_b)
def forward(self, hyp_x, hidden):
z = self.transition(self.w_z, hidden, self.u_z, hyp_x, self.b_z)
z = th.sigmoid(log_map_zero(z))
r = self.transition(self.w_r, hidden, self.u_r, hyp_x, self.b_r)
r = th.sigmoid(log_map_zero(r))
r_point_h = mob_pointwise_prod(hidden, r)
h_tilde = self.transition(self.w_h, r_point_h, self.u_r, hyp_x, self.b_h)
# h_tilde = th.tanh(log_map_zero(h_tilde)) # non-linearity
minus_h_oplus_htilde = mob_add(-hidden, h_tilde)
new_h = mob_add(hidden, mob_pointwise_prod(minus_h_oplus_htilde, z))
return new_h
class hyperGRU(nn.Module):
def __init__(self, input_size, hidden_size, d_ball, default_dtype=th.float64):
super(hyperGRU, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.default_dtype = default_dtype
self.d_ball = d_ball
self.gru_cell = GRUCell(input_size, hidden_size, d_ball)
def init_gru_state(self, batch_size, hidden_size, cuda_device):
return th.zeros((batch_size, hidden_size, self.d_ball), dtype=self.default_dtype, device=cuda_device)
def forward(self, inputs):
hidden = self.init_gru_state(inputs.shape[0], self.hidden_size, inputs.device)
outputs = []
for x in inputs.transpose(0, 1):
hidden = self.gru_cell(x, hidden)
outputs += [hidden]
return th.stack(outputs).transpose(0, 1)
|
59686
|
from threading import Lock
from typing import Dict, Set, Union
from zemberek.core.turkish.phonetic_attribute import PhoneticAttribute
class AttributeToSurfaceCache:
def __init__(self):
self.attribute_map: Dict[int, str] = {}
self.lock = Lock()
def add_surface(self, attributes: Set[PhoneticAttribute], surface: str):
"""
Method changed. Instead of original, this method uses hash value of concatenated short form of phonetic
attributes as key
:param attributes:
:param surface:
:return:
"""
key_string = ""
for attribute in attributes:
key_string += attribute.get_string_form()
with self.lock:
self.attribute_map[hash(key_string)] = surface
def get_surface(self, attributes: Set[PhoneticAttribute]) -> Union[str, None]:
"""
Method changed. Instead of original, this method uses hash value of concatenated short form of phonetic
attributes as key
:param attributes:
:return:
"""
key_string = ""
for attribute in attributes:
key_string += attribute.get_string_form()
return self.attribute_map.get(hash(key_string))
|
59691
|
from .embedding.pca import run_pca
from .embedding.umap import run_umap
from .embedding.ica import run_ica
# from .embedding.scvi import run_ldvae
from .embedding.fa import run_fa
from .embedding.diffmap import run_diffmap
|
59724
|
import click
import svdtools
@click.group()
@click.version_option(svdtools.__version__, prog_name="svdtools")
def svdtools_cli():
pass
@click.command()
@click.argument("yaml-file")
def patch(yaml_file):
"""Patches an SVD file as specified by a YAML file"""
svdtools.patch.main(yaml_file)
@click.command()
@click.argument("yaml-file")
@click.argument("deps-file")
def makedeps(yaml_file, deps_file):
"""Generate Make dependency file listing dependencies for a YAML file."""
svdtools.makedeps.main(yaml_file, deps_file)
@click.command()
@click.argument("svd-file")
@click.option(
"--gaps/--no-gaps",
default=True,
help="Whether to print gaps in interrupt number sequence",
)
def interrupts(svd_file, gaps):
"""Print list of all interrupts described by an SVD file."""
print(svdtools.interrupts.main(svd_file, gaps))
@click.command()
@click.argument("svd-file")
def mmap(svd_file):
"""Generate text-based memory map of an SVD file."""
print(svdtools.mmap.main(svd_file))
@click.command()
def version():
"""Version of svdtools library and tool."""
print(svdtools.__version__)
svdtools_cli.add_command(patch)
svdtools_cli.add_command(makedeps)
svdtools_cli.add_command(interrupts)
svdtools_cli.add_command(mmap)
svdtools_cli.add_command(version)
|
59776
|
from flask import g, current_app, jsonify
from sqlalchemy import asc, desc, func
from apps.interface.models.interfaceapimsg import InterfaceApiMsg
from apps.interface.models.interfacecase import InterfaceCase
from apps.interface.models.interfacemodule import InterfaceModule
from apps.interface.models.interfaceproject import InterfaceProject
from apps.interface.util.utils import *
from library.api.db import db
from library.api.transfer import transfer2json
class InterfaceModuleBusiness(object):
@classmethod
def project_permission(cls, pid=None, id=None):
if g.is_admin:
return 0
if pid:
return 0 if pid in g.projectid else 1
else:
ret = InterfaceModule.query.add_columns(InterfaceModule.project_id.label('projectid')).filter(
InterfaceModule.id == id).first()
return 0 if ret.projectid in g.projectid else 1
@classmethod
def _query(cls):
return InterfaceModule.query.add_columns(
InterfaceModule.id.label('id'),
InterfaceModule.name.label('name'),
InterfaceModule.project_id.label('projectid'),
InterfaceModule.num.label('num'),
InterfaceModule.weight.label('weight'),
InterfaceModule.status.label('status'),
)
@classmethod
@transfer2json('?id|!name|!projectid|!num|!weight|!status')
def query_all_json(cls, limit, offset):
ret = cls._query().filter(InterfaceModule.status == InterfaceModule.ACTIVE) \
.order_by(desc(InterfaceModule.id)) \
.limit(limit).offset(offset).all()
return ret
@classmethod
def module_create(cls, name, project_id, num):
try:
m = InterfaceModule(
name=name,
project_id=project_id,
num=num,
)
db.session.add(m)
db.session.commit()
return 0, None
except Exception as e:
current_app.logger.error(str(e))
return 102, str(e)
@classmethod
def module_delete(cls, id):
try:
m = InterfaceModule.query.get(id)
m.status = InterfaceModule.DISABLE
db.session.add(m)
db.session.commit()
return 0
except Exception as e:
current_app.logger.error(str(e))
return 105, str(e)
@classmethod
def module_modify(cls, id, name, project_id):
try:
m = InterfaceModule.query.get(id)
m.name = name
m.project_id = project_id
db.session.add(m)
db.session.commit()
return 0, None
except Exception as e:
current_app.logger.error(str(e))
return 102, str(e)
@classmethod
@transfer2json('?id|!name|!projectid|!num|!weight|!status')
def query_json_by_id(cls, id):
ret = cls._query().filter(InterfaceModule.status == InterfaceModule.ACTIVE,
InterfaceModule.id == id).all()
return ret
@classmethod
def _query_total(cls):
return InterfaceModule.query.outerjoin(
InterfaceCase, InterfaceCase.module_id == InterfaceModule.id).add_columns(
InterfaceModule.id.label('id'),
InterfaceModule.name.label('name'),
InterfaceModule.project_id.label('projectid'),
InterfaceModule.num.label('num'),
InterfaceModule.weight.label('weight'),
InterfaceModule.status.label('status'),
func.count('*').label('total'),
)
@classmethod
@transfer2json('?id|!name|!projectid|!num|!weight|!status|!total')
def query_by_project_id_total(cls, pid):
# TODO : here need case import
# ret = cls._query_total().filter(InterfaceModule.status == InterfaceModule.ACTIVE,
# InterfaceModule.project_id == pid, Case.status != Case.DISABLE).order_by(
# desc(InterfaceModule.id)).group_by(Case.module_id).all()
ret = []
return ret
@classmethod
@transfer2json('?id|!name|!projectid|!num|!weight|!status')
def query_by_project_ids(cls, pid):
ret = cls._query().filter(InterfaceModule.status == InterfaceModule.ACTIVE,
InterfaceModule.project_id == pid).order_by(desc(InterfaceModule.id)).all()
return ret
@classmethod
def query_by_project_id(cls, pid):
tlist = []
total_ret = cls.query_by_project_id_total(pid)
for a in total_ret:
tlist.append(a['id'])
ret = cls.query_by_project_ids(pid)
for i in range(len(ret)):
if ret[i]['id'] not in tlist:
ret[i]['total'] = 0
total_ret.append(ret[i])
total_ret = sorted(total_ret, key=lambda x: x['id'], reverse=True)
return total_ret
@classmethod
def find_model(cls, page, per_page, project_name):
if not project_name:
return jsonify({'msg': '请先选择项目', 'status': 0})
peoject_id = InterfaceProject.query.filter_by(name=project_name, status=InterfaceProject.ACTIVE).first().id
all_module = InterfaceModule.query.filter_by(status=InterfaceModule.ACTIVE, project_id=peoject_id).order_by(
InterfaceModule.num.asc())
pagination = all_module.paginate(page, per_page=per_page, error_out=False)
my_module = pagination.items
total = pagination.total
my_module = [{'name': c.name, 'moduleId': c.id, 'num': c.num} for c in my_module]
# 查询出所有的接口模块是为了接口录入的时候可以选所有的模块
_all_module = [{'name': s.name, 'moduleId': s.id, 'num': s.num} for s in all_module.all()]
return jsonify({'data': my_module, 'total': total, 'status': 1, 'all_module': _all_module})
@classmethod
def add_model(cls, project_name, name, ids, number):
if not project_name:
return jsonify({'msg': '请先创建项目', 'status': 0})
if not name:
return jsonify({'msg': '模块名称不能为空', 'status': 0})
project_id = InterfaceProject.query.filter_by(name=project_name, status=InterfaceProject.ACTIVE).first().id
num = auto_num(number, InterfaceModule, project_id=project_id, status=InterfaceModule.ACTIVE)
if ids:
old_data = InterfaceModule.query.filter_by(id=ids, status=InterfaceModule.ACTIVE).first()
old_num = old_data.num
list_data = InterfaceModule.query.filter(InterfaceModule.status == InterfaceModule.ACTIVE,
InterfaceModule.project_id == project_id).order_by(
InterfaceModule.num.asc()).all()
if InterfaceModule.query.filter_by(name=name, project_id=project_id,
status=InterfaceModule.ACTIVE).first() and name != old_data.name:
return jsonify({'msg': '模块名字重复', 'status': 0})
num_sort(num, old_num, list_data, old_data)
InterfaceModuleBusiness.module_modify(ids, name, project_id)
return jsonify({'msg': '修改成功', 'status': 1})
else:
if InterfaceModule.query.filter_by(name=name, project_id=project_id, status=InterfaceModule.ACTIVE).first():
return jsonify({'msg': '模块名字重复', 'status': 0})
else:
InterfaceModuleBusiness.module_create(name, project_id, num)
return jsonify({'msg': '新建成功', 'status': 1})
@classmethod
def del_model(cls, ids):
# _edit = InterfaceModule.query.filter_by(id=ids).first()
# if current_user.id != Project.query.filter_by(id=_edit.project_id).first().user_id:
# return jsonify({'msg': '不能删除别人项目下的模块', 'status': 0})
if InterfaceApiMsg.query.filter(
InterfaceApiMsg.module_id == ids,
InterfaceApiMsg.status == InterfaceApiMsg.ACTIVE
).order_by(asc(InterfaceApiMsg.num)).all():
return jsonify({'msg': '请先删除模块下的接口用例', 'status': 0})
InterfaceModuleBusiness.module_delete(ids)
return jsonify({'msg': '删除成功', 'status': 1})
@classmethod
def stick_module(cls, module_id, project_name):
old_data = InterfaceModule.query.filter_by(id=module_id, status=InterfaceModule.ACTIVE).first()
old_num = old_data.num
list_data_id = InterfaceProject.query.filter_by(name=project_name, status=InterfaceProject.ACTIVE).first().id
list_data = InterfaceModule.query.filter_by(project_id=list_data_id, status=InterfaceModule.ACTIVE).order_by(
InterfaceModule.num.asc()).all()
num_sort(1, old_num, list_data, old_data)
db.session.commit()
return jsonify({'msg': '置顶完成', 'status': 1})
|
59811
|
from django import template
register = template.Library()
@register.simple_tag
def format_date_range(date_from, date_to, separator=" - ",
format_str="%B %d, %Y", year_f=", %Y", month_f="%B", date_f=" %d"):
""" Takes a start date, end date, separator and formatting strings and
returns a pretty date range string
"""
if (date_to and date_to != date_from):
from_format = to_format = format_str
if (date_from.year == date_to.year):
from_format = from_format.replace(year_f, '')
if (date_from.month == date_to.month):
to_format = to_format.replace(month_f, '')
return separator.join((date_from.strftime(from_format), date_to.strftime(to_format)))
else:
return date_from.strftime(format_str)
|
59815
|
from typing import Optional
from infrastructure.cqrs.decorators.requestclass import requestclass
from domain.common.request_parameter.OrderByParameter import OrderByParameter
from domain.common.request_parameter.PagingParameter import PagingParameter
@requestclass
class GetDataOperationJobListRequest(PagingParameter, OrderByParameter):
DataOperationId: Optional[int] = None
DataOperationName: Optional[str] = None
OnlyCron: Optional[bool] = None
OnlyUndeleted: Optional[bool] = None
|
59872
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
import json
import os
import codecs
from collections import Counter
import numpy as np
import tensorflow as tf
from parser.structs.vocabs.base_vocabs import CountVocab
from parser.structs.vocabs.token_vocabs import TokenVocab,GraphTokenVocab
from parser.structs.vocabs.index_vocabs import IndexVocab,GraphIndexVocab
from parser.structs.vocabs.second_order_vocab import GraphSecondIndexVocab
from parser.structs.vocabs.pointer_generator import PointerGenerator
from . import mrp_vocabs as mv
from parser.neural import nn, nonlin, embeddings, classifiers, recurrent
import sys
sys.path.append('./THUMT')
import thumt.layers as layers
from thumt.models.rnnsearch import _decoder as seq2seq_decoder
# from THUMT.thumt.models.rnnsearch import _decoder as seq2seq_decoder
import pdb
class RNNDecoderVocab(TokenVocab):
"""docstring for RNNDecoderVocab"""
#_save_str = 'tokens'
#=============================================================
def __init__(self, *args, **kwargs):
""""""
if 'placeholder_shape' not in kwargs:
kwargs['placeholder_shape'] = [None, None]
super(RNNDecoderVocab, self).__init__(*args, **kwargs)
return
def forward(self, layers, decoder_embeddings, sentence_feat, token_weights, sequence_length, input_feed=None, target_copy_hidden_states=None, coverage=None,\
variable_scope=None, reuse=False, debug=False):
"""
decoder embeddings [batch_size, decoder_seq_length, embedding_size]
layers: outputs of BiLSTM [batch_size, seq_length, hidden_size]
sentence_feat: the final output state of RNN [num_encoder_layers, batch_size, hidden_size]
token_weights: mask
input_feed: None or [batch_size, 1, hidden_size]
target_copy_hidden_states: None or [batch_size, seq_length, hidden_size]
coverage: None or [batch_size, 1, encode_seq_length]
"""
#pdb.set_trace()
with tf.variable_scope('Seq2SeqDecoder'):
with tf.variable_scope('linear'):
sentence_feat = classifiers.hidden(sentence_feat, self.recur_size,hidden_func=self.hidden_func,hidden_keep_prob=self.hidden_keep_prob)
with tf.variable_scope('memory_linear'):
layers = classifiers.hidden(layers, self.recur_size,hidden_func=self.hidden_func,hidden_keep_prob=self.hidden_keep_prob)
with tf.variable_scope('embedding_linear'):
decoder_embeddings = classifiers.hidden(decoder_embeddings, self.recur_size,hidden_func=self.hidden_func,hidden_keep_prob=self.hidden_keep_prob)
result = seq2seq_decoder(self.cell,decoder_embeddings,layers,sequence_length,sentence_feat)
return result
def count(self, mrp):
""""""
# pdb.set_trace()
mrp_file=json.load(open(mrp))
for sentence_id in mrp_file:
for current_data in mrp_file[sentence_id]['nodes']:
token = current_data[self.field]
self._count(token)
self.index_by_counts()
return True
def count_mrp(self, mrp):
""""""
return True
def _count(self, token):
if not self.cased:
token = token.lower()
self.counts[token] += 1
return
def get_root(self):
""""""
return 0
def add_sequence(self,tokens):
indices=[x if x!='' else 0 for x in tokens]
return indices
@property
def recur_size(self):
return self._config.getint(self, 'recur_size')
@property
def get_nodes_path(self):
return self._config.get('BaseNetwork', 'nodes_path')
class Seq2SeqIDVocab(RNNDecoderVocab, mv.NodeIDVocab):
def set_placeholders(self, indices, feed_dict={}):
""""""
feed_dict[self.placeholder] = indices
return feed_dict
#=============================================================
def get_bos(self):
""""""
return 0
#=============================================================
def get_eos(self):
""""""
return 0
class Seq2SeqNodeLabelPredictionVocab(TokenVocab, mv.LabelVocab):
def __init__(self, *args, **kwargs):
""""""
kwargs['placeholder_shape'] = [None, None]
super(Seq2SeqNodeLabelPredictionVocab, self).__init__(*args, **kwargs)
return
#=============================================================
def get_bos(self):
""""""
return '<BOS>'
#=============================================================
def get_eos(self):
""""""
return '<EOS>'
def forward(self, hiddens, source_attentions, target_attentions, pointer_generator_inputs, invalid_indexes=None,\
variable_scope=None, reuse=False, debug=False):
"""
Compute a distribution over the target dictionary
extended by the dynamic dictionary implied by copying target nodes.
:param hiddens: decoder outputs, [batch_size, num_target_nodes, hidden_size]
:param source_attentions: attention of each source node,
[batch_size, num_target_nodes, num_source_nodes]
:param source_attention_maps: a sparse indicator matrix
mapping each source node to its index in the dynamic vocabulary.
[batch_size, num_source_nodes, dynamic_vocab_size]
:param target_attentions: attention of each target node,
[batch_size, num_target_nodes, num_target_nodes]
:param target_attention_maps: a sparse indicator matrix
mapping each target node to its index in the dynamic vocabulary.
[batch_size, num_target_nodes, dynamic_vocab_size]
:param invalid_indexes: indexes which are not considered in prediction.
"""
#pdb.set_trace()
# target=self.placeholder['vocab_targets']
# copy_targets=self.placeholder['copy_targets']
# coref_targets=self.placeholder['coref_targets']
with tf.variable_scope('Seq2SeqNodeLabelPredictionVocab'):
source_attention_maps=pointer_generator_inputs['SrcCopyMap']
target_attention_maps=pointer_generator_inputs['TgtCopyMap'][:,1:]
outputs=self.predictor.forward(hiddens, source_attentions, source_attention_maps, target_attentions, target_attention_maps, invalid_indexes=None,debug=debug)
copy_targets=pointer_generator_inputs['SrcCopyIndices'][:,1:]
coref_targets=pointer_generator_inputs['TgtCopyIndices'][:,1:]
# pdb.set_trace()
loss_outputs = self.predictor.compute_loss(outputs['probabilities'],outputs['predictions'],self.placeholder,copy_targets,outputs['source_dynamic_vocab_size'],coref_targets,outputs['source_dynamic_vocab_size'],None,target_attentions,debug=debug)
outputs.update(loss_outputs)
outputs['loss'] = outputs['loss']*self.loss_interpolation
# outputs['loss']=tf.zeros(1,tf.float32)[0]
# outputs['n_correct_tokens']=tf.zeros(1,tf.float32)[0]
# outputs['n_correct_sequences'] = tf.zeros(1,tf.float32)[0]
return outputs
def decode(self, memory_bank, mask, states, copy_attention_maps, copy_vocabs, tag_luts, invalid_indexes, decoder_inputs):
# [batch_size, 1]
batch_size = tf.shape(memory_bank)[0]
tokens = tt.ones([batch_size, 1]) * self.index('<BOS>')
pos_tags = torch.ones(batch_size, 1) * self.index('<EOS>')
corefs = torch.zeros(batch_size, 1)
decoder_input_history = []
decoder_outputs = []
rnn_outputs = []
copy_attentions = []
coref_attentions = []
predictions = []
coref_indexes = []
decoder_mask = []
input_feed = None
coref_inputs = []
# A sparse indicator matrix mapping each node to its index in the dynamic vocab.
# Here the maximum size of the dynamic vocab is just max_decode_length.
coref_attention_maps = tf.cast(tf.zeros([batch_size, self.max_decode_length, self.max_decode_length + 1]), tf.float32)
# A matrix D where the element D_{ij} is for instance i the real vocab index of
# the generated node at the decoding step `i'.
coref_vocab_maps = tf.zeros([batch_size, self.max_decode_length + 1])
coverage = None
if self.use_coverage:
coverage = memory_bank.new_zeros(batch_size, 1, memory_bank.size(1))
for step_i in range(self.max_decode_length):
# 2. Decode one step.
decoder_output_dict = self.decoder(
decoder_inputs, memory_bank, mask, states, input_feed, coref_inputs, coverage)
_decoder_outputs = decoder_output_dict['decoder_hidden_states']
_rnn_outputs = decoder_output_dict['rnn_hidden_states']
_copy_attentions = decoder_output_dict['source_copy_attentions']
_coref_attentions = decoder_output_dict['target_copy_attentions']
states = decoder_output_dict['last_hidden_state']
input_feed = decoder_output_dict['input_feed']
coverage = decoder_output_dict['coverage']
# 3. Run pointer/generator.
if step_i == 0:
_coref_attention_maps = coref_attention_maps[:, :step_i + 1]
else:
_coref_attention_maps = coref_attention_maps[:, :step_i]
generator_output = self.generator(
_decoder_outputs, _copy_attentions, copy_attention_maps,
_coref_attentions, _coref_attention_maps, invalid_indexes)
_predictions = generator_output['predictions']
# 4. Update maps and get the next token input.
tokens, _predictions, pos_tags, corefs, _mask = self._update_maps_and_get_next_input(
step_i,
generator_output['predictions'].squeeze(1),
generator_output['source_dynamic_vocab_size'],
coref_attention_maps,
coref_vocab_maps,
copy_vocabs,
decoder_mask,
tag_luts,
invalid_indexes
)
# 5. Update variables.
decoder_input_history += [decoder_inputs]
decoder_outputs += [_decoder_outputs]
rnn_outputs += [_rnn_outputs]
copy_attentions += [_copy_attentions]
coref_attentions += [_coref_attentions]
predictions += [_predictions]
# Add the coref info for the next input.
coref_indexes += [corefs]
# Add the mask for the next input.
decoder_mask += [_mask]
# 6. Do the following chunking for the graph decoding input.
# Exclude the hidden state for BOS.
decoder_input_history = torch.cat(decoder_input_history[1:], dim=1)
decoder_outputs = torch.cat(decoder_outputs[1:], dim=1)
rnn_outputs = torch.cat(rnn_outputs[1:], dim=1)
# Exclude coref/mask for EOS.
# TODO: Answer "What if the last one is not EOS?"
predictions = torch.cat(predictions[:-1], dim=1)
coref_indexes = torch.cat(coref_indexes[:-1], dim=1)
decoder_mask = 1 - torch.cat(decoder_mask[:-1], dim=1)
return dict(
# [batch_size, max_decode_length]
predictions=predictions,
coref_indexes=coref_indexes,
decoder_mask=decoder_mask,
# [batch_size, max_decode_length, hidden_size]
decoder_inputs=decoder_input_history,
decoder_memory_bank=decoder_outputs,
decoder_rnn_memory_bank=rnn_outputs,
# [batch_size, max_decode_length, encoder_length]
copy_attentions=copy_attentions,
coref_attentions=coref_attentions
)
class Seq2SeqSrcCopyMapVocab(RNNDecoderVocab, mv.SrcCopyMapVocab):
def __init__(self, *args, **kwargs):
""""""
self._depth=-2
kwargs['placeholder_shape'] = [None, None, None]
super(Seq2SeqSrcCopyMapVocab, self).__init__(*args, **kwargs)
return
class Seq2SeqTgtCopyMapVocab(RNNDecoderVocab, mv.TgtCopyMapVocab):
def __init__(self, *args, **kwargs):
""""""
self._depth=-2
kwargs['placeholder_shape'] = [None, None, None]
super(Seq2SeqTgtCopyMapVocab, self).__init__(*args, **kwargs)
return
class Seq2SeqSrcCopyIndicesVocab(RNNDecoderVocab, mv.SrcCopyIndicesVocab):
def __init__(self, *args, **kwargs):
""""""
kwargs['placeholder_shape'] = [None, None]
super(Seq2SeqSrcCopyIndicesVocab, self).__init__(*args, **kwargs)
return
class Seq2SeqTgtCopyIndicesVocab(RNNDecoderVocab, mv.TgtCopyIndicesVocab):
def __init__(self, *args, **kwargs):
""""""
kwargs['placeholder_shape'] = [None, None]
super(Seq2SeqTgtCopyIndicesVocab, self).__init__(*args, **kwargs)
return
class Seq2SeqDecoderVocab(RNNDecoderVocab, mv.WordVocab):
def __init__(self, *args, **kwargs):
""""""
kwargs['placeholder_shape'] = [None, None]
super(Seq2SeqDecoderVocab, self).__init__(*args, **kwargs)
self.cell = layers.rnn_cell.LegacyGRUCell(self.recur_size)
# self.predictor = PointerGenerator(self, input_size, switch_input_size, vocab_size, vocab_pad_idx, force_copy)
return
#=============================================================
def get_bos(self):
""""""
return 0
#=============================================================
def get_eos(self):
""""""
return 0
def forward(self, layers, decoder_embeddings, sentence_feat, token_weights, sequence_length, input_feed=None, target_copy_hidden_states=None, coverage=None,\
variable_scope=None, reuse=False, debug=False):
"""
decoder embeddings [batch_size, decoder_seq_length, embedding_size]
layers: outputs of BiLSTM [batch_size, seq_length, hidden_size]
sentence_feat: the final output state of RNN [num_encoder_layers, batch_size, hidden_size]
token_weights: mask
input_feed: None or [batch_size, 1, hidden_size]
target_copy_hidden_states: None or [batch_size, seq_length, hidden_size]
coverage: None or [batch_size, 1, encode_seq_length]
"""
with tf.variable_scope('Seq2SeqDecoder'):
with tf.variable_scope('linear'):
sentence_feat = classifiers.hidden(sentence_feat, self.recur_size,hidden_func=self.hidden_func,hidden_keep_prob=self.hidden_keep_prob)
with tf.variable_scope('memory_linear'):
layers = classifiers.hidden(layers, self.recur_size,hidden_func=self.hidden_func,hidden_keep_prob=self.hidden_keep_prob)
with tf.variable_scope('embedding_linear'):
decoder_embeddings = classifiers.hidden(decoder_embeddings, self.recur_size,hidden_func=self.hidden_func,hidden_keep_prob=self.hidden_keep_prob)
result = seq2seq_decoder(self.cell,decoder_embeddings,layers,sequence_length,sentence_feat)
return result
class Seq2SeqAnchorPredictionVocab(RNNDecoderVocab, mv.AnchorVocab):
pass
class Seq2SeqGraphTokenVocab(GraphTokenVocab, mv.SemrelVocab):
def count(self, mrp):
""""""
# pdb.set_trace()
mrp_file=json.load(open(mrp))
for sentence_id in mrp_file:
for current_data in mrp_file[sentence_id]['nodes']:
token = current_data[self.field]
self._count(token)
self.index_by_counts()
return True
def _count(self, node):
if node not in ('_', ''):
node = node.split('|')
for edge in node:
edge = edge.split(':', 1)
head, rel = edge
self.counts[rel] += 1
return
#=============================================================
def get_bos(self):
""""""
return '_'
#=============================================================
def get_eos(self):
""""""
return '_'
#=============================================================
# def add(self, token):
# """"""
# indices=self.index(token)
# indices=[(index[0]+1,index[1]) for index in indices]
# return indices
class Seq2SeqGraphIndexVocab(GraphIndexVocab, mv.SemheadVocab):
def count(self, mrp):
""""""
# pdb.set_trace()
mrp_file=json.load(open(mrp))
for sentence_id in mrp_file:
for current_data in mrp_file[sentence_id]['nodes']:
token = current_data[self.field]
self._count(token)
self.index_by_counts()
return True
def _count(self, node):
if node not in ('_', ''):
node = node.split('|')
for edge in node:
edge = edge.split(':', 1)
head, rel = edge
self.counts[rel] += 1
return
# def add(self, token):
# """"""
# indices=self.index(token)
# indices=[index+1 for index in indices]
# return indices
#=============================================================
def get_bos(self):
""""""
return '_'
#=============================================================
def get_eos(self):
""""""
return '_'
class Seq2SeqSecondOrderGraphIndexVocab(GraphSecondIndexVocab, mv.SemheadVocab):
def count(self, mrp):
""""""
# pdb.set_trace()
mrp_file=json.load(open(mrp))
for sentence_id in mrp_file:
for current_data in mrp_file[sentence_id]['nodes']:
token = current_data[self.field]
self._count(token)
self.index_by_counts()
return True
def _count(self, node):
if node not in ('_', ''):
node = node.split('|')
for edge in node:
edge = edge.split(':', 1)
head, rel = edge
self.counts[rel] += 1
return
#=============================================================
def get_bos(self):
""""""
return '_'
#=============================================================
def get_eos(self):
""""""
return '_'
# def add(self, token):
# """"""
# indices=self.index(token)
# indices=[index+1 for index in indices]
# return indices
|
59876
|
from __future__ import print_function, absolute_import
import os.path as osp
import numpy as np
from ..utils.data import Dataset
from ..utils.osutils import mkdir_if_missing
from ..utils.serialization import write_json, read_json
from ..utils.data.dataset import _pluck
class SynergyReID(Dataset):
md5 = '05050b5d9388563021315a81b531db7d'
def __init__(self, root, split_id=0, num_val=100, download=True):
super(SynergyReID, self).__init__(root, split_id=split_id)
if download:
self.download()
if not self._check_integrity():
raise RuntimeError("Dataset not found or corrupted. " +
"You can use download=True to download it.")
self.load(num_val)
def download(self):
if self._check_integrity():
print("Files already downloaded and verified")
return
import hashlib
import shutil
from glob import glob
from zipfile import ZipFile
raw_dir = osp.join(self.root, 'raw')
mkdir_if_missing(raw_dir)
# Open the raw zip file
fpath = osp.join(raw_dir, 'synergyreid_data.zip')
if osp.isfile(fpath) and \
hashlib.md5(open(fpath, 'rb').read()).hexdigest() == self.md5:
print("Using downloaded file: " + fpath)
else:
raise RuntimeError("Please move data to {} "
.format(fpath))
# Extract the file
exdir = osp.join(raw_dir, 'data_reid')
if not osp.isdir(exdir):
print("Extracting zip file")
with ZipFile(fpath) as z:
z.extractall(path=raw_dir)
# Format
images_dir = osp.join(self.root, 'images')
mkdir_if_missing(images_dir)
# 487 identities (+1 for background) with 2 camera views each
# Here we use the convention that camera 0 is for query and
# camera 1 is for gallery
identities = [[[] for _ in range(2)] for _ in range(487)]
def register(subdir):
fpaths = sorted(glob(osp.join(exdir, subdir, '*.jpeg')))
pids = set()
for fpath in fpaths:
fname = osp.basename(fpath)
pid = int(fname.split('_')[0])
cam = 1 if 'gallery' in subdir else 0
pids.add(pid)
fname = ('{:08d}_{:02d}_{:04d}.jpg'
.format(pid, cam, len(identities[pid][cam])))
identities[pid][cam].append(fname)
shutil.copy(fpath, osp.join(images_dir, fname))
return pids
trainval_pids = register('reid_training')
query_val_pids = register('reid_val/query')
gallery_val_pids = register('reid_val/gallery')
assert query_val_pids <= gallery_val_pids
assert trainval_pids.isdisjoint(query_val_pids)
identities_test = [[[] for _ in range(2)] for _ in range(9172)]
def register_test(subdir, n=0):
fpaths = sorted(glob(osp.join(exdir, subdir, '*.jpeg')))
pids = set()
for pindx, fpath in enumerate(fpaths):
fname = osp.basename(fpath)
pid = int(fname.split('.')[0])
cam = 1 if 'gallery' in subdir else 0
pids.add(pid)
fname = ('{:08d}_{:02d}_{:04d}.jpg'
.format(pid, cam, 0))
identities_test[pindx+n][cam].append(fname)
shutil.copy(fpath, osp.join(images_dir, fname))
return pids
query_test_pids = register_test('reid_test/query')
gallery_test_pids = register_test('reid_test/gallery',
n=len(query_test_pids))
# Save the training / val / test splits
splits = [{
'trainval': sorted(list(trainval_pids)),
'query_val': sorted(list(query_val_pids)),
'gallery_val': sorted(list(gallery_val_pids)),
'query_test': sorted(list(query_test_pids)),
'gallery_test': sorted(list(gallery_test_pids))}]
write_json(splits, osp.join(self.root, 'splits.json'))
# Save meta information into a json file
meta = {'name': 'SynergyReID', 'shot': 'multiple', 'num_cameras': 2,
'identities': identities, 'identities_test': identities_test}
write_json(meta, osp.join(self.root, 'meta.json'))
def load(self, verbose=True):
splits = read_json(osp.join(self.root, 'splits.json'))
if self.split_id >= len(splits):
raise ValueError("split_id exceeds total splits {}"
.format(len(splits)))
self.split = splits[self.split_id]
trainval_pids = np.concatenate((np.asarray(self.split['trainval']),
np.asarray(self.split['query_val'])))
def _pluck_val(identities, indices, relabel=False, cam=0):
ret = []
for index, pid in enumerate(indices):
pid_images = identities[pid]
for camid, cam_images in enumerate(pid_images):
if camid == cam:
for fname in cam_images:
name = osp.splitext(fname)[0]
x, y, _ = map(int, name.split('_'))
assert pid == x and camid == y
if relabel:
ret.append((fname, index, camid))
else:
ret.append((fname, pid, camid))
return ret
def _pluck_test(identities, indices, n=0):
ret = []
for index, pid in enumerate(indices):
pid_images = identities[index+n]
for camid, cam_images in enumerate(pid_images):
for fname in cam_images:
ret.append((fname, pid, camid))
return ret
self.meta = read_json(osp.join(self.root, 'meta.json'))
identities = self.meta['identities']
identities_test = self.meta['identities_test']
self.train = _pluck(identities, self.split['trainval'], relabel=True)
self.trainval = _pluck(identities, trainval_pids, relabel=True)
self.query_val = _pluck_val(identities, self.split['query_val'], cam=0)
self.gallery_val = _pluck_val(identities, self.split['gallery_val'], cam=1)
self.query_test = _pluck_test(identities_test, self.split['query_test'])
self.gallery_test = _pluck_test(identities_test, self.split['gallery_test'], n=len(self.split['query_test']))
self.num_train_ids = len(self.split['trainval'])
self.num_val_ids = len(self.split['query_val'])
self.num_trainval_ids = len(trainval_pids)
if verbose:
print(self.__class__.__name__, "dataset loaded")
print(" subset | # ids | # images")
print(" ---------------------------")
print(" train | {:5d} | {:8d}"
.format(self.num_train_ids, len(self.train)))
print(" query val | {:5d} | {:8d}"
.format(len(self.split['query_val']), len(self.query_val)))
print(" gallery val | {:5d} | {:8d}"
.format(len(self.split['gallery_val']), len(self.gallery_val)))
print(" trainval | {:5d} | {:8d}"
.format(self.num_trainval_ids, len(self.trainval)))
print(" ---------------------------")
print(" query test | {:5d} | {:8d}"
.format(len(self.split['query_test']), len(self.query_test)))
print(" gallery test | {:5d} | {:8d}"
.format(len(self.split['gallery_test']), len(self.gallery_test)))
|
59908
|
from pathlib import Path
DEBUG = True
USE_TZ = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "very-secret"
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": "db.sqlite3"}}
ROOT_URLCONF = "tests.urls"
DJANGO_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.messages",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.staticfiles",
]
THIRD_PARTY_APPS = [
"django_extensions",
"allauth_ui",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.github",
"allauth.socialaccount.providers.facebook",
"allauth.socialaccount.providers.linkedin",
"allauth.socialaccount.providers.digitalocean",
"widget_tweaks",
"django_browser_reload",
"debug_toolbar",
]
LOCAL_APPS = ["tests"]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django_browser_reload.middleware.BrowserReloadMiddleware",
"debug_toolbar.middleware.DebugToolbarMiddleware",
]
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
INTERNAL_IPS = ["127.0.0.1"]
ALLOWED_HOSTS = ["*"]
SITE_ID = 1
STATIC_URL = "/static/"
MEDIA_URL = "/media/"
MEDIA_ROOT = Path(__file__).parent / "media"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_LOGIN_ATTEMPTS_LIMIT = 1000
|
59920
|
import pickle
import pandas as pd
method_columns = ['model_class', 'config', 'loss_function', 'q_dist', 'sample_from_q',
'detach', 'add_noise', 'noise_type', 'warm_up', 'is_loaded', 'method_name']
hparam_columns = ['grad_l1_penalty', 'grad_weight_decay',
'lamb', 'loss_function_param', 'noise_std', 'lr', 'weight_decay']
data_columns = ['dataset', 'label_noise_level', 'label_noise_type', 'num_train_examples',
'remove_prob', 'transform_function', 'data_augmentation']
ignore_columns = ['device', 'batch_size', 'epochs', 'stopping_param', 'save_iter', 'vis_iter',
'clean_validation', 'pretrained_arg', 'load_from']
not_listed_columns = ['seed', 'log_dir']
method_order = {
'CE': 0,
'CE-noisy-grad-Gaussian': 0.1,
'CE-noisy-grad-Laplace': 0.2,
'MAE': 1,
'FW': 2,
'DMI': 3,
'Penalize': 3.5,
'Predict-Gaussian': 4,
'Predict-Gaussian-sample': 4.1,
'Predict-Laplace': 5,
'Predict-Laplace-sample': 5.1,
'Predict-Gaussian-loaded': 6,
'Predict-Laplace-loaded': 7
}
def load_result_tables(list_of_datasets):
""" Loads results datasets from stored .pkl files. """
datasets = []
df = None
for dataset_path in list_of_datasets:
with open(dataset_path, 'rb') as f:
df = pickle.load(f)
datasets.append(df)
df = df.drop(labels=ignore_columns, axis=1) # drop columns that do not matter
df = pd.concat(datasets, sort=False).reset_index(drop=True)
df['num_train_examples'].fillna('N/A', inplace=True)
df['transform_function'].fillna('N/A', inplace=True)
df['detach'].fillna(1.0, inplace=True)
df['load_from'].fillna('N/A', inplace=True)
df['is_loaded'] = (df.load_from != 'N/A')
df['pretrained_arg'].fillna('N/A', inplace=True)
df['lr'].fillna('1e-3', inplace=True)
if 'warm_up' in df.columns:
df['warm_up'].fillna(0, inplace=True)
else:
df['warm_up'] = 0
if 'weight_decay' is df.columns:
df['weight_decay'].fillna(0.0, inplace=True)
else:
df['weight_decay'] = 0.0
df['method_name'] = 'unknown'
return df
def infer_method_name(row):
if row.model_class == 'StandardClassifier':
if row.loss_function == 'dmi':
return 'DMI'
if row.loss_function == 'fw':
return 'FW'
if row.loss_function == 'mae':
return 'MAE'
assert row.loss_function == 'ce'
if row.add_noise == 1.0:
return 'CE-noisy-grad-{}'.format(row.noise_type)
return 'CE'
if row.model_class == 'PredictGradOutput':
ret = 'Predict'
ret += f"-{row.q_dist}"
if row.sample_from_q:
ret += '-sample'
if row.loss_function != 'ce':
ret += f"-{row.loss_function}"
if row.detach == 0.0:
ret += '-nodetach'
if row.is_loaded:
ret += '-loaded'
if row.warm_up != 0:
ret += f"-warm_up{row['warm_up']}"
return ret
if row.model_class == 'PenalizeLastLayerFixedForm':
return 'Penalize'
return 'unknown'
def fill_short_names(df):
for idx, row in df.iterrows():
df.at[idx, 'method_name'] = infer_method_name(row)
return df
def get_agg_results(df):
""" Takes a dataframe containing all results and computes aggregate results. """
grouped = df.groupby(method_columns + hparam_columns + data_columns)
total_size = 0
for key, item in grouped:
group = grouped.get_group(key)
assert len(group) <= 5 # less than 5 seeds always
assert len(set(group['seed'])) == len(group) # all seeds are distinct
if item.dataset.iloc[0] == 'mnist' and item.label_noise_type.iloc[0] == 'error':
if item.sample_from_q.iloc[0] == True:
assert len(group) == 3
elif item.model_class.iloc[0] == 'PenalizeLastLayerFixedForm':
assert len(group) == 3
else:
assert len(group) == 5
total_size += len(group)
assert total_size == len(df)
agg_results = grouped.agg({'test_accuracy': ['mean', 'std'], 'val_accuracy': ['mean', 'std']})
agg_results = agg_results.reset_index()
agg_results.columns = ['_'.join(tup).rstrip('_') for tup in agg_results.columns.values]
return agg_results
def do_model_selection_by_val_score(df):
""" Takes aggregate results and selects best model by val_accuracy_mean. """
def select(group):
idx = group['val_accuracy_mean'].idxmax()
return group.loc[idx]
grouped = df.groupby(method_columns + data_columns)
best_results = grouped.apply(select)
best_results = best_results.reset_index(drop=True)
return best_results
|
60006
|
from datetime import datetime, date
from marqeta.response_models import datetime_object
import json
import re
class Pos(object):
def __init__(self, json_response):
self.json_response = json_response
def __str__(self):
return json.dumps(self.json_response, default=self.json_serial)
@staticmethod
def json_serial(o):
if isinstance(o, datetime) or isinstance(o, date):
return o.__str__()
@property
def pan_entry_mode(self):
return self.json_response.get('pan_entry_mode', None)
@property
def pin_entry_mode(self):
return self.json_response.get('pin_entry_mode', None)
@property
def terminal_id(self):
return self.json_response.get('terminal_id', None)
@property
def terminal_attendance(self):
return self.json_response.get('terminal_attendance', None)
@property
def terminal_location(self):
return self.json_response.get('terminal_location', None)
@property
def card_holder_presence(self):
return self.json_response.get('card_holder_presence', None)
@property
def cardholder_authentication_method(self):
return self.json_response.get('cardholder_authentication_method', None)
@property
def card_presence(self):
return self.json_response.get('card_presence', None)
@property
def terminal_type(self):
return self.json_response.get('terminal_type', None)
@property
def card_data_input_capability(self):
return self.json_response.get('card_data_input_capability', None)
@property
def country_code(self):
return self.json_response.get('country_code', None)
@property
def zip(self):
return self.json_response.get('zip', None)
@property
def partial_approval_capable(self):
return self.json_response.get('partial_approval_capable', None)
@property
def purchase_amount_only(self):
return self.json_response.get('purchase_amount_only', None)
@property
def is_recurring(self):
return self.json_response.get('is_recurring', None)
def __repr__(self):
return '<Marqeta.response_models.pos.Pos>' + self.__str__()
|
60011
|
import numpy as np
from deap import benchmarks
from BayesOpt import BO
from BayesOpt.Surrogate import RandomForest
from BayesOpt.SearchSpace import ContinuousSpace, OrdinalSpace, NominalSpace
from BayesOpt.base import Solution
np.random.seed(42)
def obj_func(x):
x_r, x_i, x_d = np.array(x[:2]), x[2], x[3]
if x_d == 'OK':
tmp = 0
else:
tmp = 1
return np.sum((x_r + np.array([2, 2])) ** 2) + abs(x_i - 10) * 10 + tmp
def eq_func(x):
x_r = np.array(x[:2])
return np.sum(x_r ** 2) - 2
def ineq_func(x):
x_r = np.array(x[:2])
return np.sum(x_r) + 1
space = ((ContinuousSpace([-10, 10]) * 2) + OrdinalSpace([5, 15])
+ NominalSpace(['OK', 'A', 'B', 'C', 'D', 'E', 'F', 'G']))
warm_data = Solution([4.6827082694127835, 9.87885354178838, 5, 'A'], var_name=["r_0", "r_1", "i", "d"], n_eval=1, fitness=236.76575128)
warm_data += Solution([-8.99187067168115, 8.317469942991558, 5, 'D'], var_name=["r_0", "r_1", "i", "d"], n_eval=1, fitness=206.33644151)
warm_data += Solution([-2.50919762305275, 9.014286128198322, 12, 'G'], var_name=["r_0", "r_1", "i", "d"], n_eval=1, fitness=142.57378113)
warm_data += Solution([4.639878836228101, 1.973169683940732, 9, 'G'], var_name=["r_0", "r_1", "i", "d"], n_eval=1, fitness=70.8740683)
if 11 < 2:
model = RandomForest(levels=space.levels)
opt = BO(space, obj_func, model, minimize=True,
n_init_sample=3, max_eval=50, verbose=True, optimizer='MIES',
warm_data=warm_data)
xopt, fopt, stop_dict = opt.run()
else:
model = RandomForest(levels=space.levels)
opt = BO(space, obj_func, model, minimize=True,
n_init_sample=3, max_eval=50, verbose=True, optimizer='MIES',
warm_data="test_warmdata.data")
xopt, fopt, stop_dict = opt.run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.