content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# File:
import numpy as np
import unittest
import torch
from detectron2.data import MetadataCatalog
from detectron2.structures import Instances, RotatedBoxes, BoxMode
from detectron2.utils.visualizer import Visualizer
class TestVisualizer(unittest.TestCase):
def _random_data(self):
H, W = 100, 100
N = 10
img = np.random.rand(H, W, 3) * 255
boxxy = np.random.rand(N, 2) * (H // 2)
boxes = np.concatenate((boxxy, boxxy + H // 2), axis=1)
def _rand_poly():
return np.random.rand(3, 2).flatten() * H
polygons = [[_rand_poly() for _ in range(np.random.randint(1, 5))] for _ in range(N)]
mask = np.zeros_like(img[:, :, 0], dtype=np.bool)
mask[:10, 10:20] = 1
labels = [str(i) for i in range(N)]
return img, boxes, labels, polygons, [mask] * N
@property
def metadata(self):
return MetadataCatalog.get("coco_2017_train")
def test_draw_dataset_dict(self):
img = np.random.rand(512, 512, 3) * 255
dic = {'annotations': [{'bbox': [368.9946492271106,
330.891438763377,
13.148537455410235,
13.644708680142685],
'bbox_mode': BoxMode.XYWH_ABS,
'category_id': 0,
'iscrowd': 1,
'segmentation': {'counts': '_jh52m?2N2N2N2O100O10O001N1O2MceP2',
'size': [512, 512]}}],
'height': 512,
'image_id': 1,
'width': 512}
v = Visualizer(img, self.metadata)
v.draw_dataset_dict(dic)
def test_overlay_instances(self):
img, boxes, labels, polygons, masks = self._random_data()
v = Visualizer(img, self.metadata)
output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image()
self.assertEqual(output.shape, img.shape)
# Test 2x scaling
v = Visualizer(img, self.metadata, scale=2.0)
output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image()
self.assertEqual(output.shape[0], img.shape[0] * 2)
# Test overlay masks
v = Visualizer(img, self.metadata)
output = v.overlay_instances(masks=masks, boxes=boxes, labels=labels).get_image()
self.assertEqual(output.shape, img.shape)
def test_overlay_instances_no_boxes(self):
img, boxes, labels, polygons, _ = self._random_data()
v = Visualizer(img, self.metadata)
v.overlay_instances(masks=polygons, boxes=None, labels=labels).get_image()
def test_draw_instance_predictions(self):
img, boxes, _, _, masks = self._random_data()
num_inst = len(boxes)
inst = Instances((img.shape[0], img.shape[1]))
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
inst.scores = torch.rand(num_inst)
inst.pred_boxes = torch.from_numpy(boxes)
inst.pred_masks = torch.from_numpy(np.asarray(masks))
v = Visualizer(img, self.metadata)
v.draw_instance_predictions(inst)
def test_draw_empty_mask_predictions(self):
img, boxes, _, _, masks = self._random_data()
num_inst = len(boxes)
inst = Instances((img.shape[0], img.shape[1]))
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
inst.scores = torch.rand(num_inst)
inst.pred_boxes = torch.from_numpy(boxes)
inst.pred_masks = torch.from_numpy(np.zeros_like(np.asarray(masks)))
v = Visualizer(img, self.metadata)
v.draw_instance_predictions(inst)
def test_correct_output_shape(self):
img = np.random.rand(928, 928, 3) * 255
v = Visualizer(img, self.metadata)
out = v.output.get_image()
self.assertEqual(out.shape, img.shape)
def test_overlay_rotated_instances(self):
H, W = 100, 150
img = np.random.rand(H, W, 3) * 255
num_boxes = 50
boxes_5d = torch.zeros(num_boxes, 5)
boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-0.1 * W, 1.1 * W)
boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-0.1 * H, 1.1 * H)
boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H))
boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H))
boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800)
rotated_boxes = RotatedBoxes(boxes_5d)
labels = [str(i) for i in range(num_boxes)]
v = Visualizer(img, self.metadata)
output = v.overlay_instances(boxes=rotated_boxes, labels=labels).get_image()
self.assertEqual(output.shape, img.shape)
def test_draw_no_metadata(self):
img, boxes, _, _, masks = self._random_data()
num_inst = len(boxes)
inst = Instances((img.shape[0], img.shape[1]))
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
inst.scores = torch.rand(num_inst)
inst.pred_boxes = torch.from_numpy(boxes)
inst.pred_masks = torch.from_numpy(np.asarray(masks))
v = Visualizer(img, MetadataCatalog.get("asdfasdf"))
v.draw_instance_predictions(inst)
| tests/test_visualizer.py | 5,414 | -*- coding: utf-8 -*- Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved File: Test 2x scaling Test overlay masks | 131 | en | 0.873946 |
from selenium.webdriver.common.by import By
# for maintainability we can seperate web objects by page name
class MainPageLocators(object):
LOGO = (By.ID, 'nav-logo')
ACCOUNT = (By.ID, 'nav-link-accountList')
SIGNUP = (By.CSS_SELECTOR, '#nav-signin-tooltip > div > a')
LOGIN = (By.CSS_SELECTOR, '#nav-signin-tooltip > a')
SEARCH = (By.ID, 'twotabsearchtextbox')
SEARCH_LIST = (By.CSS_SELECTOR, 'div[data-component-type="s-search-result"]')
class LoginPageLocators(object):
EMAIL = (By.ID, 'ap_email')
PASSWORD = (By.ID, 'ap_password')
SUBMIT = (By.ID, 'signInSubmit-input')
ERROR_MESSAGE = (By.ID, 'message_error')
| utils/locators.py | 659 | for maintainability we can seperate web objects by page name | 60 | en | 0.806989 |
#
# builder.py - PJSIP test scenarios builder
#
# Copyright (C) 2008-2009 Teluu Inc. (http://www.teluu.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import ccdash
import os
import platform
import re
import subprocess
import sys
import time
class Operation:
"""\
The Operation class describes the individual ccdash operation to be
performed.
"""
# Types:
UPDATE = "update" # Update operation
CONFIGURE = "configure" # Configure operation
BUILD = "build" # Build operation
TEST = "test" # Unit test operation
def __init__(self, type, cmdline, name="", wdir=""):
self.type = type
self.cmdline = cmdline
self.name = name
self.wdir = wdir
if self.type==self.TEST and not self.name:
raise "name required for tests"
def encode(self, base_dir):
s = [self.type]
if self.type == self.TEST:
s.append(self.name)
if self.type != self.UPDATE:
s.append(self.cmdline)
s.append("-w")
if self.wdir:
s.append(base_dir + "/" + self.wdir)
else:
s.append(base_dir)
return s
#
# Update operation
#
update_ops = [Operation(Operation.UPDATE, "")]
#
# The standard library tests (e.g. pjlib-test, pjsip-test, etc.)
#
std_test_ops= [
Operation(Operation.TEST, "./pjlib-test$SUFFIX", name="pjlib test",
wdir="pjlib/bin"),
Operation(Operation.TEST, "./pjlib-util-test$SUFFIX",
name="pjlib-util test", wdir="pjlib-util/bin"),
Operation(Operation.TEST, "./pjnath-test$SUFFIX", name="pjnath test",
wdir="pjnath/bin"),
Operation(Operation.TEST, "./pjmedia-test$SUFFIX", name="pjmedia test",
wdir="pjmedia/bin"),
Operation(Operation.TEST, "./pjsip-test$SUFFIX", name="pjsip test",
wdir="pjsip/bin")
]
#
# These are pjsua Python based unit test operations
#
def build_pjsua_test_ops(pjsua_exe=""):
ops = []
if pjsua_exe:
exe = " -e ../../pjsip-apps/bin/" + pjsua_exe
else:
exe = ""
cwd = os.getcwd()
os.chdir("../pjsua")
os.system("python runall.py --list > list")
f = open("list", "r")
for e in f:
e = e.rstrip("\r\n ")
(mod,param) = e.split(None,2)
name = mod[4:mod.find(".py")] + "_" + \
param[param.find("/")+1:param.find(".py")]
ops.append(Operation(Operation.TEST, "python run.py" + exe + " " + \
e, name=name, wdir="tests/pjsua"))
f.close()
os.remove("list")
os.chdir(cwd)
return ops
#
# Get gcc version
#
def gcc_version(gcc):
proc = subprocess.Popen(gcc + " -v", stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
ver = ""
while True:
s = proc.stdout.readline()
if not s:
break
if s.find("gcc version") >= 0:
ver = s.split(None, 3)[2]
break
proc.wait()
return "gcc-" + ver
#
# Get Visual Studio version
#
def vs_get_version():
proc = subprocess.Popen("cl", stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
while True:
s = proc.stdout.readline()
if s=="":
break
pos = s.find("Version")
if pos > 0:
proc.wait()
s = s[pos+8:]
ver = s.split(None, 1)[0]
major = ver[0:2]
if major=="12":
return "vs6"
elif major=="13":
return "vs2003"
elif major=="14":
return "vs2005"
elif major=="15":
return "vs2008"
else:
return "vs-" + major
proc.wait()
return "vs-unknown"
#
# Test config
#
class BaseConfig:
def __init__(self, base_dir, url, site, group, options=None):
self.base_dir = base_dir
self.url = url
self.site = site
self.group = group
self.options = options
#
# Base class for test configurator
#
class TestBuilder:
def __init__(self, config, build_config_name="",
user_mak="", config_site="", exclude=[], not_exclude=[]):
self.config = config # BaseConfig instance
self.build_config_name = build_config_name # Optional build suffix
self.user_mak = user_mak # To be put in user.mak
self.config_site = config_site # To be put in config_s..
self.saved_user_mak = "" # To restore user.mak
self.saved_config_site = "" # To restore config_s..
self.exclude = exclude # List of exclude pattern
self.not_exclude = not_exclude # List of include pattern
self.ccdash_args = [] # ccdash cmd line
def stamp(self):
return time.strftime("%Y%m%d-%H%M", time.localtime())
def pre_action(self):
# Override user.mak
name = self.config.base_dir + "/user.mak"
if os.access(name, os.F_OK):
f = open(name, "r")
self.saved_user_mak = f.read()
f.close()
if True:
f = open(name, "w")
f.write(self.user_mak)
f.close()
# Override config_site.h
name = self.config.base_dir + "/pjlib/include/pj/config_site.h"
if os.access(name, os.F_OK):
f = open(name, "r")
self.saved_config_site= f.read()
f.close()
if True:
f = open(name, "wt")
f.write(self.config_site)
f.close()
def post_action(self):
# Restore user.mak
name = self.config.base_dir + "/user.mak"
f = open(name, "wt")
f.write(self.saved_user_mak)
f.close()
# Restore config_site.h
name = self.config.base_dir + "/pjlib/include/pj/config_site.h"
f = open(name, "wt")
f.write(self.saved_config_site)
f.close()
def build_tests(self):
# This should be overridden by subclasses
pass
def execute(self):
if len(self.ccdash_args)==0:
self.build_tests()
self.pre_action()
mandatory_op = ["update", "configure", "build"]
counter = 0
for a in self.ccdash_args:
# Check if this test is in exclusion list
fullcmd = " ".join(a)
excluded = False
included = False
for pat in self.exclude:
if pat and re.search(pat, fullcmd) != None:
excluded = True
break
if excluded:
for pat in self.not_exclude:
if pat and re.search(pat, fullcmd) != None:
included = True
break
if excluded and not included:
if len(fullcmd)>60:
fullcmd = fullcmd[0:60] + ".."
print "Skipping '%s'" % (fullcmd)
continue
b = ["ccdash.py"]
b.extend(a)
a = b
#print a
try:
rc = ccdash.main(a)
except Exception, e:
errmsg = str(e)
print "**** Error: ccdash got exception %s ****" % errmsg
rc = -1
except:
print "**** Error: ccdash got unknown exception ****"
rc = -1
if rc!=0 and a[1] in mandatory_op:
print "Stopping because of error.."
break
counter = counter + 1
self.post_action()
#
# GNU test configurator
#
class GNUTestBuilder(TestBuilder):
"""\
This class creates list of tests suitable for GNU targets.
"""
def __init__(self, config, build_config_name="", user_mak="", \
config_site="", cross_compile="", exclude=[], not_exclude=[]):
"""\
Parameters:
config - BaseConfig instance
build_config_name - Optional name to be added as suffix to the build
name. Sample: "min-size", "O4", "TLS", etc.
user_mak - Contents to be put on user.mak
config_site - Contents to be put on config_site.h
cross_compile - Optional cross-compile prefix. Must include the
trailing dash, e.g. "arm-unknown-linux-"
exclude - List of regular expression patterns for tests
that will be excluded from the run
not_exclude - List of regular expression patterns for tests
that will be run regardless of whether they
match the excluded pattern.
"""
TestBuilder.__init__(self, config, build_config_name=build_config_name,
user_mak=user_mak, config_site=config_site,
exclude=exclude, not_exclude=not_exclude)
self.cross_compile = cross_compile
if self.cross_compile and self.cross_compile[-1] != '-':
self.cross_compile.append("-")
def build_tests(self):
if self.cross_compile:
suffix = "-" + self.cross_compile[0:-1]
build_name = self.cross_compile + \
gcc_version(self.cross_compile + "gcc")
else:
proc = subprocess.Popen("sh "+self.config.base_dir+"/config.guess",
shell=True, stdout=subprocess.PIPE)
plat = proc.stdout.readline().rstrip(" \r\n")
build_name = plat + "-"+gcc_version(self.cross_compile + "gcc")
suffix = "-" + plat
if self.build_config_name:
build_name = build_name + "-" + self.build_config_name
cmds = []
cmds.extend(update_ops)
cmds.append(Operation(Operation.CONFIGURE, "sh ./configure"))
if sys.platform=="win32":
# Don't build python module on Mingw
cmds.append(Operation(Operation.BUILD,
"sh -c 'make distclean && make dep && make'"))
else:
cmds.append(Operation(Operation.BUILD,
"sh -c 'make distclean && make dep && make" + \
" && cd pjsip-apps/src/python && " + \
"python setup.py clean build'"))
cmds.extend(std_test_ops)
cmds.extend(build_pjsua_test_ops())
self.ccdash_args = []
for c in cmds:
c.cmdline = c.cmdline.replace("$SUFFIX", suffix)
args = c.encode(self.config.base_dir)
args.extend(["-U", self.config.url,
"-S", self.config.site,
"-T", self.stamp(),
"-B", build_name,
"-G", self.config.group])
args.extend(self.config.options)
self.ccdash_args.append(args)
#
# MSVC test configurator
#
class MSVCTestBuilder(TestBuilder):
"""\
This class creates list of tests suitable for Visual Studio builds.
You need to set the MSVC environment variables (typically by calling
vcvars32.bat) prior to running this class.
"""
def __init__(self, config, target="Release|Win32", build_config_name="",
config_site="", exclude=[], not_exclude=[]):
"""\
Parameters:
config - BaseConfig instance
target - Visual Studio build configuration to build.
Sample: "Debug|Win32", "Release|Win32".
build_config_name - Optional name to be added as suffix to the build
name. Sample: "Debug", "Release", "IPv6", etc.
config_site - Contents to be put on config_site.h
exclude - List of regular expression patterns for tests
that will be excluded from the run
not_exclude - List of regular expression patterns for tests
that will be run regardless of whether they
match the excluded pattern.
"""
TestBuilder.__init__(self, config, build_config_name=build_config_name,
config_site=config_site, exclude=exclude,
not_exclude=not_exclude)
self.target = target.lower()
def build_tests(self):
(vsbuild,sys) = self.target.split("|",2)
build_name = sys + "-" + vs_get_version() + "-" + vsbuild
if self.build_config_name:
build_name = build_name + "-" + self.build_config_name
vccmd = "vcbuild.exe /nologo /nohtmllog /nocolor /rebuild " + \
"pjproject-vs8.sln " + " \"" + self.target + "\""
suffix = "-i386-win32-vc8-" + vsbuild
pjsua = "pjsua_vc8"
if vsbuild=="debug":
pjsua = pjsua + "d"
cmds = []
cmds.extend(update_ops)
cmds.append(Operation(Operation.CONFIGURE, "CMD /C echo Nothing to do"))
cmds.append(Operation(Operation.BUILD, vccmd))
cmds.extend(std_test_ops)
cmds.extend(build_pjsua_test_ops(pjsua))
self.ccdash_args = []
for c in cmds:
c.cmdline = c.cmdline.replace("$SUFFIX", suffix)
args = c.encode(self.config.base_dir)
args.extend(["-U", self.config.url,
"-S", self.config.site,
"-T", self.stamp(),
"-B", build_name,
"-G", self.config.group])
args.extend(self.config.options)
self.ccdash_args.append(args)
#
# Symbian test configurator
#
class SymbianTestBuilder(TestBuilder):
"""\
This class creates list of tests suitable for Symbian builds. You need to
set the command line build settings prior to running this class (typically
that involves setting the EPOCROOT variable and current device).
"""
def __init__(self, config, target="gcce urel", build_config_name="",
config_site="", exclude=[], not_exclude=[]):
"""\
Parameters:
config - BaseConfig instance
target - Symbian target to build. Default is "gcce urel".
build_config_name - Optional name to be added as suffix to the build
name. Sample: "APS", "VAS", etc.
config_site - Contents to be put on config_site.h
exclude - List of regular expression patterns for tests
that will be excluded from the run
not_exclude - List of regular expression patterns for tests
that will be run regardless of whether they
match the excluded pattern.
"""
TestBuilder.__init__(self, config, build_config_name=build_config_name,
config_site=config_site, exclude=exclude,
not_exclude=not_exclude)
self.target = target.lower()
def build_tests(self):
# Check that EPOCROOT is set
if not "EPOCROOT" in os.environ:
print "Error: EPOCROOT environment variable is not set"
sys.exit(1)
epocroot = os.environ["EPOCROOT"]
# EPOCROOT must have trailing backslash
if epocroot[-1] != "\\":
epocroot = epocroot + "\\"
os.environ["EPOCROOT"] = epocroot
sdk1 = epocroot.split("\\")[-2]
# Check that correct device is set
proc = subprocess.Popen("devices", stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
sdk2 = ""
while True:
line = proc.stdout.readline()
if line.find("- default") > 0:
sdk2 = line.split(":",1)[0]
break
proc.wait()
if sdk1 != sdk2:
print "Error: default SDK in device doesn't match EPOCROOT"
print "Default device SDK =", sdk2
print "EPOCROOT SDK =", sdk1
sys.exit(1)
build_name = sdk2.replace("_", "-") + "-" + \
self.target.replace(" ", "-")
if self.build_config_name:
build_name = build_name + "-" + self.build_config_name
cmdline = "cmd /C \"cd build.symbian && bldmake bldfiles && abld build %s\"" % (self.target)
cmds = []
cmds.extend(update_ops)
cmds.append(Operation(Operation.CONFIGURE, "CMD /C echo Nothing to do"))
cmds.extend([Operation(Operation.BUILD, cmdline)])
self.ccdash_args = []
suffix = ""
for c in cmds:
c.cmdline = c.cmdline.replace("$SUFFIX", suffix)
args = c.encode(self.config.base_dir)
args.extend(["-U", self.config.url,
"-S", self.config.site,
"-T", self.stamp(),
"-B", build_name,
"-G", self.config.group])
args.extend(self.config.options)
self.ccdash_args.append(args)
| pjproject-2.2.1/tests/cdash/builder.py | 17,919 | builder.py - PJSIP test scenarios builder Copyright (C) 2008-2009 Teluu Inc. (http://www.teluu.com) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Types: Update operation Configure operation Build operation Unit test operation Update operation The standard library tests (e.g. pjlib-test, pjsip-test, etc.) These are pjsua Python based unit test operations Get gcc version Get Visual Studio version Test config Base class for test configurator BaseConfig instance Optional build suffix To be put in user.mak To be put in config_s.. To restore user.mak To restore config_s.. List of exclude pattern List of include pattern ccdash cmd line Override user.mak Override config_site.h Restore user.mak Restore config_site.h This should be overridden by subclasses Check if this test is in exclusion listprint a GNU test configurator Don't build python module on Mingw MSVC test configurator Symbian test configurator Check that EPOCROOT is set EPOCROOT must have trailing backslash Check that correct device is set | 1,635 | en | 0.787422 |
import copy
import gym
import numpy as np
import torch.nn as nn
import railrl.misc.hyperparameter as hyp
import railrl.torch.pytorch_util as ptu
from railrl.data_management.obs_dict_replay_buffer import \
ObsDictReplayBuffer
from railrl.launchers.launcher_util import run_experiment
# from railrl.samplers.data_collector import MdpPathCollector
# from railrl.samplers.data_collector.step_collector import MdpStepCollector
from railrl.samplers.data_collector.path_collector import ObsDictPathCollector
from railrl.samplers.data_collector.step_collector import ObsDictStepCollector
from railrl.visualization.video import VideoSaveFunctionBullet
from railrl.misc.buffer_save import BufferSaveFunction
from railrl.torch.networks import (
CNN,
MlpQfWithObsProcessor,
Split,
FlattenEach,
Concat,
Flatten,
)
from railrl.torch.sac.policies import (
MakeDeterministic, TanhGaussianPolicyAdapter,
)
from railrl.torch.sac.sac import SACTrainer
from railrl.torch.torch_rl_algorithm import (
TorchBatchRLAlgorithm,
TorchOnlineRLAlgorithm,
)
import os.path as osp
from experiments.avi.env_wrappers import FlatEnv
PARENT_DIR = '/media/avi/data/Work/github/'
import sys
env_file = osp.join(PARENT_DIR, 'avisingh599/google-research/dql_grasping/')
sys.path.insert(1, env_file)
from grasping_env import KukaGraspingProceduralEnv
def experiment(variant):
env_params = dict(
block_random=0.3,
camera_random=0,
simple_observations=False,
continuous=True,
remove_height_hack=True,
render_mode="DIRECT",
# render_mode="GUI",
num_objects=5,
max_num_training_models=900,
target=False,
test=False,
)
expl_env = FlatEnv(KukaGraspingProceduralEnv(**env_params))
eval_env = expl_env
img_width, img_height = eval_env.image_shape
num_channels = 3
action_dim = int(np.prod(eval_env.action_space.shape))
cnn_params = variant['cnn_params']
cnn_params.update(
input_width=img_width,
input_height=img_height,
input_channels=num_channels,
added_fc_input_size=0,
output_conv_channels=True,
output_size=None,
)
qf_cnn = CNN(**cnn_params)
qf_obs_processor = nn.Sequential(
qf_cnn,
Flatten(),
)
qf_kwargs = copy.deepcopy(variant['qf_kwargs'])
qf_kwargs['obs_processor'] = qf_obs_processor
qf_kwargs['output_size'] = 1
qf_kwargs['input_size'] = (
action_dim + qf_cnn.conv_output_flat_size
)
qf1 = MlpQfWithObsProcessor(**qf_kwargs)
qf2 = MlpQfWithObsProcessor(**qf_kwargs)
target_qf_cnn = CNN(**cnn_params)
target_qf_obs_processor = nn.Sequential(
target_qf_cnn,
Flatten(),
)
target_qf_kwargs = copy.deepcopy(variant['qf_kwargs'])
target_qf_kwargs['obs_processor'] = target_qf_obs_processor
target_qf_kwargs['output_size'] = 1
target_qf_kwargs['input_size'] = (
action_dim + target_qf_cnn.conv_output_flat_size
)
target_qf1 = MlpQfWithObsProcessor(**target_qf_kwargs)
target_qf2 = MlpQfWithObsProcessor(**target_qf_kwargs)
action_dim = int(np.prod(eval_env.action_space.shape))
policy_cnn = CNN(**cnn_params)
policy_obs_processor = nn.Sequential(
policy_cnn,
Flatten(),
)
policy = TanhGaussianPolicyAdapter(
policy_obs_processor,
policy_cnn.conv_output_flat_size,
action_dim,
**variant['policy_kwargs']
)
observation_key = 'image'
eval_policy = MakeDeterministic(policy)
eval_path_collector = ObsDictPathCollector(
eval_env,
eval_policy,
observation_key=observation_key,
**variant['eval_path_collector_kwargs']
)
replay_buffer = ObsDictReplayBuffer(
variant['replay_buffer_size'],
expl_env,
observation_key=observation_key,
)
trainer = SACTrainer(
env=eval_env,
policy=policy,
qf1=qf1,
qf2=qf2,
target_qf1=target_qf1,
target_qf2=target_qf2,
**variant['trainer_kwargs']
)
if variant['collection_mode'] == 'batch':
expl_path_collector = ObsDictPathCollector(
expl_env,
policy,
observation_key=observation_key,
**variant['expl_path_collector_kwargs']
)
algorithm = TorchBatchRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algo_kwargs']
)
elif variant['collection_mode'] == 'online':
expl_path_collector = ObsDictStepCollector(
expl_env,
policy,
observation_key=observation_key,
**variant['expl_path_collector_kwargs']
)
algorithm = TorchOnlineRLAlgorithm(
trainer=trainer,
exploration_env=expl_env,
evaluation_env=eval_env,
exploration_data_collector=expl_path_collector,
evaluation_data_collector=eval_path_collector,
replay_buffer=replay_buffer,
**variant['algo_kwargs']
)
else:
raise NotImplementedError
video_func = VideoSaveFunctionBullet(variant)
algorithm.post_train_funcs.append(video_func)
# dump_buffer_func = BufferSaveFunction(variant)
# algorithm.post_train_funcs.append(dump_buffer_func)
algorithm.to(ptu.device)
algorithm.train()
if __name__ == "__main__":
variant = dict(
trainer_kwargs=dict(
discount=0.99,
# soft_target_tau=5e-3,
# target_update_period=1,
soft_target_tau=1.0,
target_update_period=1000,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
use_automatic_entropy_tuning=True,
),
algo_kwargs=dict(
batch_size=256,
max_path_length=15,
num_epochs=5000,
num_eval_steps_per_epoch=45,
num_expl_steps_per_train_loop=300,
num_trains_per_train_loop=300,
min_num_steps_before_training=10*300,
# max_path_length=10,
# num_epochs=100,
# num_eval_steps_per_epoch=100,
# num_expl_steps_per_train_loop=100,
# num_trains_per_train_loop=100,
# min_num_steps_before_training=100,
),
cnn_params=dict(
kernel_sizes=[3, 3],
n_channels=[4, 4],
strides=[1, 1],
hidden_sizes=[32, 32],
paddings=[1, 1],
pool_type='max2d',
pool_sizes=[2, 2],
pool_strides=[2, 2],
pool_paddings=[0, 0],
),
# replay_buffer_size=int(1E6),
qf_kwargs=dict(
hidden_sizes=[256, 256],
),
policy_kwargs=dict(
hidden_sizes=[256, 256],
),
dump_video_kwargs=dict(
imsize=48,
save_video_period=1,
),
logger_config=dict(
snapshot_gap=10,
),
dump_buffer_kwargs=dict(
dump_buffer_period=50,
),
replay_buffer_size=int(5E5),
expl_path_collector_kwargs=dict(),
eval_path_collector_kwargs=dict(),
shared_qf_conv=False,
use_robot_state=False,
randomize_env=True,
)
import argparse
parser = argparse.ArgumentParser()
# parser.add_argument("--env", type=str, required=True,
# choices=('SawyerReach-v0', 'SawyerGraspOne-v0'))
# parser.add_argument("--obs", required=True, type=str, choices=('pixels', 'pixels_debug'))
parser.add_argument("--gpu", type=int, default=1)
args = parser.parse_args()
variant['env'] = 'KukaGraspingProceduralEnv'
variant['obs'] = 'pixels'
n_seeds = 1
mode = 'local'
exp_prefix = 'dev-{}'.format(
__file__.replace('/', '-').replace('_', '-').split('.')[0]
)
exp_prefix = 'railrl-bullet-{}-{}'.format(variant['env'], variant['obs'])
# n_seeds = 5
# mode = 'ec2'
# exp_prefix = 'railrl-bullet-sawyer-image-reach'
search_space = {
'shared_qf_conv': [
True,
# False,
],
'collection_mode': [
# 'batch',
'online',
]
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for _ in range(n_seeds):
run_experiment(
experiment,
exp_name=exp_prefix,
mode=mode,
variant=variant,
use_gpu=True,
gpu_id=args.gpu,
unpack_variant=False,
)
| experiments/avi/eric_grasp_sac_pixel.py | 8,992 | from railrl.samplers.data_collector import MdpPathCollector from railrl.samplers.data_collector.step_collector import MdpStepCollector render_mode="GUI", dump_buffer_func = BufferSaveFunction(variant) algorithm.post_train_funcs.append(dump_buffer_func) soft_target_tau=5e-3, target_update_period=1, max_path_length=10, num_epochs=100, num_eval_steps_per_epoch=100, num_expl_steps_per_train_loop=100, num_trains_per_train_loop=100, min_num_steps_before_training=100, replay_buffer_size=int(1E6), parser.add_argument("--env", type=str, required=True, choices=('SawyerReach-v0', 'SawyerGraspOne-v0')) parser.add_argument("--obs", required=True, type=str, choices=('pixels', 'pixels_debug')) n_seeds = 5 mode = 'ec2' exp_prefix = 'railrl-bullet-sawyer-image-reach' False, 'batch', | 796 | en | 0.303823 |
"""
.. module:: Katna.image_filters.text_detector
:platform: OS X
:synopsis: This module is implementation of text detector filter
"""
import os
import cv2
import numpy as np
import time
import requests
import random
from imutils.object_detection import non_max_suppression
from Katna.image_filters.filter import Filter
import Katna.config as config
class TextDetector(Filter):
"""TextDetector Class: Class for implementation of text detector filter, inherit from Filter class
"""
def __init__(self, weight=1.0):
"""Constructor for this class does following tasks, if not already downloaded\
, it first downloads text detector dnn weights file from public URL\
ands save it at USER_HOME/.katna directory, or /tmp/.katna directory.\
After this initializer code initializes internal parameter: \
min_confidence (for text detection)
"""
super().__init__(weight)
self.min_confidence = config.TextDetector.min_confidence
self.merge_threshold = config.TextDetector.merge_threshold
self.layerNames = config.TextDetector.layerNames
self.frozen_weights = config.TextDetector.frozen_weights
self.cache_subdir = config.TextDetector.cache_subdir
try:
self.network_folder_path = os.path.join(os.path.expanduser("~"), ".katna")
if not os.access(self.network_folder_path, os.W_OK):
self.network_folder_path = os.path.join("/tmp", ".katna")
self.datadir = os.path.join(self.network_folder_path, self.cache_subdir)
if not os.path.exists(self.datadir):
os.makedirs(self.datadir)
self.network_file_path = os.path.join(self.datadir, self.frozen_weights)
if not os.path.exists(self.network_file_path):
self.download_data()
self.net = cv2.dnn.readNet(self.network_file_path)
except Exception:
raise FileNotFoundError(
self.frozen_weights
+ " seems to be missing.\
Download the file and specify the full path\
while initializing TextDetector class"
)
def download_data(self):
"""Public function for downloading the network weight from the URL link, to be used for
text detection functionality.
Troubleshooting tip: If you get FileNotFound error during text detector initialization,
initialize the text detector and call this function directly to download the model file from public URL link.
"""
# create response object
link = config.TextDetector.model_download_link
r = requests.get(link, stream=True)
# download started
print("Downloading model file...")
# if not os.path.isfile(self.network_file_path) or not os.path.exists(self.network_file_path):
with open(os.path.join(self.datadir, self.frozen_weights), "wb") as f:
for chunk in r.iter_content(chunk_size=1024 * 1024):
if chunk:
f.write(chunk)
print("Model file downloaded.")
def __decode_predictions(self, scores, geometry):
"""Internal Function for getting bounding box and confidence values
from text detector dnn network output (scores, geometry)
function takes the number of rows and columns from the scores volume, then
initializes set of bounding box rectangles and corresponding confidence scores
"""
(numRows, numCols) = scores.shape[2:4]
rects = []
confidences = []
# loop over the number of rows
for y in range(0, numRows):
# extract the scores (probabilities), followed by the
# geometrical data used to derive potential bounding box
# coordinates that surround text
scoresData = scores[0, 0, y]
xData0 = geometry[0, 0, y]
xData1 = geometry[0, 1, y]
xData2 = geometry[0, 2, y]
xData3 = geometry[0, 3, y]
anglesData = geometry[0, 4, y]
# loop over the number of columns
for x in range(0, numCols):
# if our score does not have sufficient probability,
# ignore it
if scoresData[x] < self.min_confidence:
continue
# compute the offset factor as our resulting feature
# maps will be 4x smaller than the input image
(offsetX, offsetY) = (x * 4.0, y * 4.0)
# extract the rotation angle for the prediction and
# then compute the sin and cosine
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)
# use the geometry volume to derive the width and height
# of the bounding box
h = xData0[x] + xData2[x]
w = xData1[x] + xData3[x]
# compute both the starting and ending (x, y)-coordinates
# for the text prediction bounding box
endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x]))
endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x]))
startX = int(endX - w)
startY = int(endY - h)
# add the bounding box coordinates and probability score
# to our respective lists
rects.append((startX, startY, endX, endY))
confidences.append(scoresData[x])
# return a tuple of the bounding boxes and associated confidences
return (rects, confidences)
def __merge_boxes(self, rects):
"""main function to detect text boxes from image
:param rects: list of
:type rects: numpy array
:param rectsUsed: image file in numpy array/opencv format
:type rectsUsed: numpy array
:return: output image with the list of text boxes
:rtype: file, list
"""
def grouper(iterable, interval=2):
prev = None
group = []
for item in iterable:
if not prev or abs(item[1] - prev[1]) <= interval:
group.append(item)
else:
yield group
group = [item]
prev = item
if group:
yield group
rects_used = []
heights = list()
for bbox in rects:
heights.append(bbox[3] - bbox[1])
heights = sorted(heights) # Sort heights
median_height = heights[len(heights) // 2] / 2 # Find half of the median height
bboxes_list = sorted(
rects, key=lambda k: k[1]
) # Sort the bounding boxes based on y1 coordinate ( y of the left-top coordinate )
combined_bboxes = grouper(
bboxes_list, median_height
) # Group the bounding boxes
for group in combined_bboxes:
x_min = min(group, key=lambda k: k[0])[0] # Find min of x1
x_max = max(group, key=lambda k: k[2])[2] # Find max of x2
y_min = min(group, key=lambda k: k[1])[1] # Find min of y1
y_max = max(group, key=lambda k: k[3])[3] # Find max of y2
rects_used.append([x_min, y_min, x_max, y_max])
return rects_used
def __detect_text(self):
"""Internal function to detect text bounding boxes from input image.
Returns list of bounding boxes of each detected text field in input image.
:param image: image file in numpy array/opencv format
:type image: numpy array
:param output_image: image file in numpy array/opencv format
:type output_image: numpy array
:return: output image with the list of text boxes
:rtype: file, list
"""
(H, W) = self.image.shape[:2]
rW = W / 320
rH = H / 320
image = cv2.resize(self.image, (320, 320))
(H, W) = image.shape[:2]
# construct a blob from the image and then perform a forward pass of
# the model to obtain the two output layer sets
blob = cv2.dnn.blobFromImage(
self.image, 1.0, (W, H), (123.68, 116.78, 103.94), swapRB=True, crop=False
)
self.net.setInput(blob)
(scores, geometry) = self.net.forward(self.layerNames)
rects, confidences = self.__decode_predictions(scores, geometry)
# apply non-maxima suppression to suppress weak, overlapping bounding
# boxes
boxes = non_max_suppression(np.array(rects), probs=confidences)
text_rects = []
# loop over the bounding boxes
for (startX, startY, endX, endY) in boxes:
# scale the bounding box coordinates based on the respective
# ratios
startX = int(startX * rW)
startY = int(startY * rH)
endX = int(endX * rW)
endY = int(endY * rH)
cv2.rectangle(self.image, (startX, startY), (endX, endY), (0, 0, 255), 3)
text_rects.append([startX, startY, endX, endY])
text_rects = sorted(text_rects, key=lambda item: item[0])
final_rects = text_rects
if len(text_rects) > 0:
final_rects = self.__merge_boxes(text_rects)
return final_rects
def set_image(self, image):
"""Public set_image function, This will detect all text boxes in input image and
will saves them as internal list of text_rect to be used in get_filter_result
:param image: input image from which needs to be cropped
:type image: numpy array(opencv)
"""
if image is None:
return None
self.image = image
self.text_rects = self.__detect_text()
def get_filter_result(self, crop):
"""Main public function of TextDetector filter class,
this filter Returns false if crop contains no text, additionally
checks for overlap between input crop rectangle and the detected
text bounding box, returns True if No overlap (Filter will not discard input crop)
otherwise returns False (signal for discarding input crop).
:param crop: input crop rectangle to test
:type crop: crop_rect
:return: True if No overlap (Filter will not discard input crop) otherwise returns False
:rtype: bool
"""
# rect: xs,ys,xe,ye
# crop: x,y,w,h
if self.text_rects is None or len(self.text_rects) == 0:
return True
for rect in self.text_rects:
if not (
(rect[2]) <= (crop.x + crop.w)
and (rect[0]) >= (crop.x)
and (rect[1]) >= (crop.y)
and (rect[3]) <= (crop.y + crop.h)
):
return False
else:
return True
return True
| Katna/image_filters/text_detector.py | 10,936 | TextDetector Class: Class for implementation of text detector filter, inherit from Filter class
Internal Function for getting bounding box and confidence values
from text detector dnn network output (scores, geometry)
function takes the number of rows and columns from the scores volume, then
initializes set of bounding box rectangles and corresponding confidence scores
Internal function to detect text bounding boxes from input image.
Returns list of bounding boxes of each detected text field in input image.
:param image: image file in numpy array/opencv format
:type image: numpy array
:param output_image: image file in numpy array/opencv format
:type output_image: numpy array
:return: output image with the list of text boxes
:rtype: file, list
Constructor for this class does following tasks, if not already downloaded , it first downloads text detector dnn weights file from public URL ands save it at USER_HOME/.katna directory, or /tmp/.katna directory. After this initializer code initializes internal parameter: min_confidence (for text detection)
main function to detect text boxes from image
:param rects: list of
:type rects: numpy array
:param rectsUsed: image file in numpy array/opencv format
:type rectsUsed: numpy array
:return: output image with the list of text boxes
:rtype: file, list
Public function for downloading the network weight from the URL link, to be used for
text detection functionality.
Troubleshooting tip: If you get FileNotFound error during text detector initialization,
initialize the text detector and call this function directly to download the model file from public URL link.
Main public function of TextDetector filter class,
this filter Returns false if crop contains no text, additionally
checks for overlap between input crop rectangle and the detected
text bounding box, returns True if No overlap (Filter will not discard input crop)
otherwise returns False (signal for discarding input crop).
:param crop: input crop rectangle to test
:type crop: crop_rect
:return: True if No overlap (Filter will not discard input crop) otherwise returns False
:rtype: bool
Public set_image function, This will detect all text boxes in input image and
will saves them as internal list of text_rect to be used in get_filter_result
:param image: input image from which needs to be cropped
:type image: numpy array(opencv)
.. module:: Katna.image_filters.text_detector
:platform: OS X
:synopsis: This module is implementation of text detector filter
create response object download started if not os.path.isfile(self.network_file_path) or not os.path.exists(self.network_file_path): loop over the number of rows extract the scores (probabilities), followed by the geometrical data used to derive potential bounding box coordinates that surround text loop over the number of columns if our score does not have sufficient probability, ignore it compute the offset factor as our resulting feature maps will be 4x smaller than the input image extract the rotation angle for the prediction and then compute the sin and cosine use the geometry volume to derive the width and height of the bounding box compute both the starting and ending (x, y)-coordinates for the text prediction bounding box add the bounding box coordinates and probability score to our respective lists return a tuple of the bounding boxes and associated confidences Sort heights Find half of the median height Sort the bounding boxes based on y1 coordinate ( y of the left-top coordinate ) Group the bounding boxes Find min of x1 Find max of x2 Find min of y1 Find max of y2 construct a blob from the image and then perform a forward pass of the model to obtain the two output layer sets apply non-maxima suppression to suppress weak, overlapping bounding boxes loop over the bounding boxes scale the bounding box coordinates based on the respective ratios rect: xs,ys,xe,ye crop: x,y,w,h | 3,955 | en | 0.738474 |
from distutils.core import setup
extra_requires = {
'celery': ["celery[redis]"],
'flower': ["flower"]
}
setup(name="terra",
packages=["terra"],
description="Terra",
extra_requires=extra_requires,
install_requires=[
"pyyaml",
"jstyleson",
# I use signal and task from celery, no matter what
"celery",
"filelock"
]
)
| setup.py | 391 | I use signal and task from celery, no matter what | 49 | en | 0.914871 |
from lightning_conceptnet.uri import concept_uri
from wordfreq import simple_tokenize
from wordfreq.preprocess import preprocess_text
STOPWORDS = [
'the', 'a', 'an'
]
DROP_FIRST = ['to']
def english_filter(tokens):
"""
Given a list of tokens, remove a small list of English stopwords.
"""
non_stopwords = [token for token in tokens if token not in STOPWORDS]
while non_stopwords and non_stopwords[0] in DROP_FIRST:
non_stopwords = non_stopwords[1:]
if non_stopwords:
return non_stopwords
else:
return tokens
def standardized_concept_uri(lang, text, *more):
"""
Make the appropriate URI for a concept in a particular language, including
removing English stopwords, normalizing the text in a way appropriate
to that language (using the text normalization from wordfreq), and joining
its tokens with underscores in a concept URI.
This text normalization can smooth over some writing differences: for
example, it removes vowel points from Arabic words, and it transliterates
Serbian written in the Cyrillic alphabet to the Latin alphabet so that it
can match other words written in Latin letters.
'more' contains information to distinguish word senses, such as a part
of speech or a WordNet domain. The items in 'more' get lowercased and
joined with underscores, but skip many of the other steps -- for example,
they won't have stopwords removed.
>>> standardized_concept_uri('en', 'this is a test')
'/c/en/this_is_test'
>>> standardized_concept_uri('en', 'this is a test', 'n', 'example phrase')
'/c/en/this_is_test/n/example_phrase'
>>> standardized_concept_uri('sh', 'симетрија')
'/c/sh/simetrija'
"""
lang = lang.lower()
if lang == 'en':
token_filter = english_filter
else:
token_filter = None
text = preprocess_text(text.replace('_', ' '), lang)
tokens = simple_tokenize(text)
if token_filter is not None:
tokens = token_filter(tokens)
norm_text = '_'.join(tokens)
more_text = []
for item in more:
if item is not None:
tokens = simple_tokenize(item.replace('_', ' '))
if token_filter is not None:
tokens = token_filter(tokens)
more_text.append('_'.join(tokens))
return concept_uri(lang, norm_text, *more_text)
| lightning_conceptnet/nodes.py | 2,392 | Given a list of tokens, remove a small list of English stopwords.
Make the appropriate URI for a concept in a particular language, including
removing English stopwords, normalizing the text in a way appropriate
to that language (using the text normalization from wordfreq), and joining
its tokens with underscores in a concept URI.
This text normalization can smooth over some writing differences: for
example, it removes vowel points from Arabic words, and it transliterates
Serbian written in the Cyrillic alphabet to the Latin alphabet so that it
can match other words written in Latin letters.
'more' contains information to distinguish word senses, such as a part
of speech or a WordNet domain. The items in 'more' get lowercased and
joined with underscores, but skip many of the other steps -- for example,
they won't have stopwords removed.
>>> standardized_concept_uri('en', 'this is a test')
'/c/en/this_is_test'
>>> standardized_concept_uri('en', 'this is a test', 'n', 'example phrase')
'/c/en/this_is_test/n/example_phrase'
>>> standardized_concept_uri('sh', 'симетрија')
'/c/sh/simetrija' | 1,104 | en | 0.844263 |
# Copyright 2014 DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DreamHost Neutron Extensions
# @author: Murali Raju, New Dream Network, LLC (DreamHost)
# @author: Rosario Disomma, New Dream Network, LLC (DreamHost)
import logging
from openstack_dashboard.api import nova
from openstack_dashboard.api import neutron
from openstack_dashboard.api.neutron import neutronclient
from neutronclient.common.exceptions import PortNotFoundClient
from akanda.horizon.common import (
NEW_PROTOCOL_CHOICES_DICT, POLICY_CHOICES_DICT)
LOG = logging.getLogger(__name__)
def get_protocol(value):
return NEW_PROTOCOL_CHOICES_DICT[value]
class Port(object):
def __init__(self, alias_name, protocol, port, id=None):
self.alias_name = alias_name
self.protocol = protocol
self.port = port
self.id = id
def display_protocol(self):
return get_protocol(self.protocol)
class AddressGroup(object):
def __init__(self, name, id=None):
self.name = name
self.id = id
class Network(object):
def __init__(self, alias_name, cidr, id=None):
self.alias_name = alias_name
self.cidr = cidr
self.id = id
class FilterRule(object):
def __init__(self, source, source_public_port,
destination, destination_public_port,
protocol, policy, request, id=None):
self.policy = policy
self.source = source
self.source_public_port = source_public_port
self.destination = destination
self.destination_public_port = destination_public_port
self.protocol = protocol
self.request = request
self.id = id
def display_policy(self):
return POLICY_CHOICES_DICT[self.policy]
def display_source_group(self):
if self.source:
return self.source['name']
return ''
def display_destination_group(self):
if self.destination:
return self.destination['name']
return ''
def display_source_port(self):
return "%s %s" % (get_protocol(self.protocol),
self.source_public_port)
def display_destination_port(self):
return "%s %s" % (get_protocol(self.protocol),
self.destination_public_port)
class PortForwardingRule(object):
def __init__(self, rule_name, public_port,
protocol, private_port, port,
request, id=None):
self.rule_name = rule_name
self.public_port = public_port
self.protocol = protocol
self.private_port = private_port
self.port = port
self.request = request
self.id = id
def display_public_port(self):
return "%s %s" % (get_protocol(self.protocol),
self.public_port)
def display_private_port(self):
return "%s %s" % (get_protocol(self.protocol),
self.private_port)
def display_instance(self):
try:
instance = nova.server_get(self.request, self.port['device_id'])
return instance.name
except:
return '--'
def _mk_url(*args):
path = '/'.join(args).lstrip('/')
if not path.startswith('/'):
path = '/' + path
return path
def _list(request, path):
return neutronclient(request).get(_mk_url(path))
def _get(request, path, obj_id):
return neutronclient(request).get(_mk_url(path, obj_id))
def _create(request, path, body):
return neutronclient(request).post(_mk_url(path), body=body)
def _put(request, path, obj_id, body):
return neutronclient(request).put(_mk_url(path, obj_id), body=body)
def _delete(request, path, obj_id):
return neutronclient(request).delete(_mk_url(path, obj_id))
def portalias_list(request):
r = _list(request, 'dhportalias')
return [Port(item['name'], item['protocol'], item['port'], item['id'])
for item in r.get('portaliases', {})]
def portalias_get(request, obj_id):
r = _get(request, 'dhportalias', obj_id)
return r.get('portalias', {})
def portalias_create(request, body):
portalias = {'portalias': {
'name': body['alias_name'],
'protocol': body['protocol'],
'port': body['port'],
}}
LOG.debug("portalias_create(): body = %s" % body)
return _create(request, 'dhportalias', portalias)
def portalias_update(request, body):
obj_id = body.pop('id', '')
portalias = {'portalias': {
'name': body['alias_name'],
'protocol': body['protocol'],
'port': body['port'],
}}
LOG.debug("portalias_update(): body = %s" % body)
return _put(request, 'dhportalias', obj_id, portalias)
def portalias_delete(request, obj_id):
return _delete(request, 'dhportalias', obj_id)
def addressgroup_list(request):
r = _list(request, 'dhaddressgroup')
return [AddressGroup(item['name'], item['id'])
for item in r.get('addressgroups', {})]
def addressgroup_get(request, obj_id):
r = _get(request, 'dhaddressgroup', obj_id)
return r.get('addressgroup', {})
def addressgroup_create(request, body):
addressgroup = {'addressgroup': {
'name': body['name'],
}}
LOG.debug("addressgroup_create(): body = %s" % body)
return _create(request, 'dhaddressgroup', addressgroup)
def addressgroup_update(request, body):
obj_id = body.pop('id', '')
addressgroup = {'addressgroup': {
'name': body['name'],
}}
LOG.debug("addressgroup_update(): body = %s" % body)
return _put(request, 'dhaddressgroup', obj_id, addressgroup)
def addressgroup_delete(request, obj_id):
return _delete(request, 'dhaddressgroup', obj_id)
def networkalias_list(request):
r = _list(request, 'dhaddressentry')
return [Network(item['name'], item['cidr'], item['id'])
for item in r.get('addressentries', {})]
def networkalias_get(request, obj_id):
r = _get(request, 'dhaddressentry', obj_id)
return r.get('addressentry', {})
def networkalias_create(request, body):
networkalias = {'addressentry': {
'name': body['name'],
'cidr': body['cidr'],
'group_id': body['group']
}}
LOG.debug("networkalias_create(): body = %s" % body)
return _create(request, 'dhaddressentry', networkalias)
def networkalias_update(request, body):
obj_id = body.pop('id', '')
networkalias = {'addressentry': {
'name': body['name'],
'cidr': body['cidr'],
}}
LOG.debug("networkalias_update(): body = %s" % body)
return _put(request, 'dhaddressentry', obj_id, networkalias)
def networkalias_delete(request, obj_id):
return _delete(request, 'dhaddressentry', obj_id)
def filterrule_list(request):
r = _list(request, 'dhfilterrule')
return [FilterRule(item.get('source'), item['source_port'],
item.get('destination'), item['destination_port'],
item['protocol'], item['action'], request, item['id'])
for item in r.get('filterrules', {})]
def filterrule_get(request, obj_id):
r = _get(request, 'dhfilterrule', obj_id)
return r.get('filterrule', {})
def filterrule_create(request, body):
filterrule = {'filterrule': {
'source_id': body['source_id'],
'destination_id': body['destination_id'],
'source_port': body['source_public_port'],
'destination_port': body['destination_public_port'],
'protocol': body['source_protocol'],
'action': body['policy'],
}}
LOG.debug("filterrule_create(): body = %s" % body)
return _create(request, 'dhfilterrule', filterrule)
def filterrule_update(request, body):
obj_id = body.pop('id', '')
filterrule = {'filterrule': {
'source_id': body['source_id'],
'destination_id': body['destination_id'],
'source_port': body['source_public_port'],
'destination_port': body['destination_public_port'],
'protocol': body['source_protocol'],
'action': body['policy'],
}}
LOG.debug("filterrule_update(): body = %s" % body)
return _put(request, 'dhfilterrule', obj_id, filterrule)
def filterrule_delete(request, obj_id):
return _delete(request, 'dhfilterrule', obj_id)
def portforward_list(request):
r = _list(request, 'dhportforward')
return [PortForwardingRule(item['name'], item['public_port'],
item['protocol'], item['private_port'],
item['port'], request, item['id'])
for item in r.get('portforwards', {})]
def portforward_get(request, obj_id):
r = _get(request, 'dhportforward', obj_id)
return r.get('portforward', {})
def portforward_create(request, body):
port_list = neutron.port_list(request, device_id=body['instance'])
try:
port = port_list[0]
except IndexError:
raise PortNotFoundClient
portforward = {'portforward': {
'name': body['rule_name'],
'protocol': body['public_protocol'],
'public_port': body['public_port'],
'private_port': body['private_port'],
'port_id': port.id
}}
LOG.debug("portforward_create(): body = %s" % body)
return _create(request, 'dhportforward', portforward)
def portforward_update(request, body):
obj_id = body.pop('id', '')
port_list = neutron.port_list(request, device_id=body['instance'])
try:
port = port_list[0]
except IndexError:
raise PortNotFoundClient
portforward = {'portforward': {
'name': body['rule_name'],
'instance_id': body['instance'],
'protocol': body['public_protocol'],
'public_port': body['public_port'],
'private_port': body['private_port'],
'port_id': port.id
}}
LOG.debug("portforward_update(): body = %s" % body)
return _put(request, 'dhportforward', obj_id, portforward)
def portforward_delete(request, obj_id):
return _delete(request, 'dhportforward', obj_id)
| akanda/horizon/api/neutron_extensions_client.py | 10,516 | Copyright 2014 DreamHost, LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. DreamHost Neutron Extensions @author: Murali Raju, New Dream Network, LLC (DreamHost) @author: Rosario Disomma, New Dream Network, LLC (DreamHost) | 697 | en | 0.824197 |
"""Django models utilities."""
#Django
from django.db import models
class CRideModel(models.Model):
"""
CrideModel acts as an abastract base class from
which every other model in the project will inherit.
This class provides every table with de following
attributes:
+ created (Datetime): store the datetime the object was created
+ modified (Datetime): store the last datetime the object was modified
"""
created = models.DateTimeField(
'created at',
auto_now_add=True, # set the date auto when the model is created
help_text='Date time on which the object was created.'
)
modified = models.DateTimeField(
'modified at',
auto_now=True, # set the date when the model is called
help_text='Date time on which the object was last modified.'
)
class Meta:
"""Meta options."""
abstract = True
# Set class config when it is called
get_latest_by = 'created'
ordering = ['-created', '-modified']
| cride/utils/models.py | 1,042 | CrideModel acts as an abastract base class from
which every other model in the project will inherit.
This class provides every table with de following
attributes:
+ created (Datetime): store the datetime the object was created
+ modified (Datetime): store the last datetime the object was modified
Meta options.
Django models utilities.
Django set the date auto when the model is created set the date when the model is called Set class config when it is called | 469 | en | 0.930129 |
#!/usr/bin/env python3
# Copyright 2019, Alex Wiens <awiens@mail.upb.de>, Achim Lösch <achim.loesch@upb.de>
# SPDX-License-Identifier: BSD-2-Clause
import os
import os.path
import subprocess
import test as schedtest
import plot
def hostname():
return subprocess.getoutput("hostname")
if __name__ == "__main__":
cwd = os.getcwd()
testname = os.path.basename(cwd)
host = os.environ if "SCHED_HOST" in os.environ else hostname()
for testtype in ["sim","exp"]:
test = schedtest.SchedTest.loadTest(testtype, testname=testname, resultdir=cwd, host=host)
if test != None and test.loadTestLog():
test.generate_report()
else:
print("log for",testtype,"not found")
| scripts/report_gen.py | 681 | !/usr/bin/env python3 Copyright 2019, Alex Wiens <awiens@mail.upb.de>, Achim Lösch <achim.loesch@upb.de> SPDX-License-Identifier: BSD-2-Clause | 142 | de | 0.48939 |
# Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy, os, re
from collections import OrderedDict
import itertools, pathlib
import hashlib
import pickle
from functools import lru_cache
from . import environment
from . import dependencies
from . import mlog
from .mesonlib import (
File, MesonException, listify, extract_as_list, OrderedSet,
typeslistify, stringlistify, classify_unity_sources,
get_filenames_templates_dict, substitute_values,
for_windows, for_darwin, for_cygwin, for_android, has_path_sep
)
from .compilers import is_object, clink_langs, sort_clink, lang_suffixes, get_macos_dylib_install_name
from .interpreterbase import FeatureNew
pch_kwargs = set(['c_pch', 'cpp_pch'])
lang_arg_kwargs = set([
'c_args',
'cpp_args',
'd_args',
'd_import_dirs',
'd_unittest',
'd_module_versions',
'd_debug',
'fortran_args',
'java_args',
'objc_args',
'objcpp_args',
'rust_args',
'vala_args',
'cs_args',
])
vala_kwargs = set(['vala_header', 'vala_gir', 'vala_vapi'])
rust_kwargs = set(['rust_crate_type'])
cs_kwargs = set(['resources', 'cs_args'])
buildtarget_kwargs = set([
'build_by_default',
'build_rpath',
'dependencies',
'extra_files',
'gui_app',
'link_with',
'link_whole',
'link_args',
'link_depends',
'implicit_include_directories',
'include_directories',
'install',
'install_rpath',
'install_dir',
'install_mode',
'name_prefix',
'name_suffix',
'native',
'objects',
'override_options',
'sources',
'gnu_symbol_visibility',
])
known_build_target_kwargs = (
buildtarget_kwargs |
lang_arg_kwargs |
pch_kwargs |
vala_kwargs |
rust_kwargs |
cs_kwargs)
known_exe_kwargs = known_build_target_kwargs | {'implib', 'export_dynamic', 'pie'}
known_shlib_kwargs = known_build_target_kwargs | {'version', 'soversion', 'vs_module_defs', 'darwin_versions'}
known_shmod_kwargs = known_build_target_kwargs
known_stlib_kwargs = known_build_target_kwargs | {'pic'}
known_jar_kwargs = known_exe_kwargs | {'main_class'}
@lru_cache(maxsize=None)
def get_target_macos_dylib_install_name(ld):
return get_macos_dylib_install_name(ld.prefix, ld.name, ld.suffix, ld.soversion)
class InvalidArguments(MesonException):
pass
class Build:
"""A class that holds the status of one build including
all dependencies and so on.
"""
def __init__(self, environment):
self.project_name = 'name of master project'
self.project_version = None
self.environment = environment
self.projects = {}
self.targets = OrderedDict()
# Coredata holds the state. This is just here for convenience.
self.compilers = environment.coredata.compilers
self.cross_compilers = environment.coredata.cross_compilers
self.global_args = {}
self.projects_args = {}
self.global_link_args = {}
self.projects_link_args = {}
self.cross_global_args = {}
self.cross_projects_args = {}
self.cross_global_link_args = {}
self.cross_projects_link_args = {}
self.tests = []
self.benchmarks = []
self.headers = []
self.man = []
self.data = []
self.static_linker = None
self.static_cross_linker = None
self.subprojects = {}
self.subproject_dir = ''
self.install_scripts = []
self.postconf_scripts = []
self.dist_scripts = []
self.install_dirs = []
self.dep_manifest_name = None
self.dep_manifest = {}
self.cross_stdlibs = {}
self.test_setups = {}
self.test_setup_default_name = None
self.find_overrides = {}
self.searched_programs = set() # The list of all programs that have been searched for.
def copy(self):
other = Build(self.environment)
for k, v in self.__dict__.items():
if k in ['compilers', 'cross_compilers']:
# These alias coredata's fields of the same name, and must not
# become copies.
continue
if isinstance(v, (list, dict, set, OrderedDict)):
other.__dict__[k] = v.copy()
else:
other.__dict__[k] = v
return other
def merge(self, other):
for k, v in other.__dict__.items():
self.__dict__[k] = v
def ensure_static_linker(self, compiler):
if self.static_linker is None and compiler.needs_static_linker():
self.static_linker = self.environment.detect_static_linker(compiler)
def ensure_static_cross_linker(self, compiler):
if self.static_cross_linker is None and compiler.needs_static_linker():
self.static_cross_linker = self.environment.detect_static_linker(compiler)
def get_project(self):
return self.projects['']
def get_subproject_dir(self):
return self.subproject_dir
def get_targets(self):
return self.targets
def get_tests(self):
return self.tests
def get_benchmarks(self):
return self.benchmarks
def get_headers(self):
return self.headers
def get_man(self):
return self.man
def get_data(self):
return self.data
def get_install_subdirs(self):
return self.install_dirs
def get_global_args(self, compiler, for_cross):
d = self.cross_global_args if for_cross else self.global_args
return d.get(compiler.get_language(), [])
def get_project_args(self, compiler, project, for_cross):
d = self.cross_projects_args if for_cross else self.projects_args
args = d.get(project)
if not args:
return []
return args.get(compiler.get_language(), [])
def get_global_link_args(self, compiler, for_cross):
d = self.cross_global_link_args if for_cross else self.global_link_args
return d.get(compiler.get_language(), [])
def get_project_link_args(self, compiler, project, for_cross):
d = self.cross_projects_link_args if for_cross else self.projects_link_args
link_args = d.get(project)
if not link_args:
return []
return link_args.get(compiler.get_language(), [])
class IncludeDirs:
def __init__(self, curdir, dirs, is_system, extra_build_dirs=None):
self.curdir = curdir
self.incdirs = dirs
self.is_system = is_system
# Interpreter has validated that all given directories
# actually exist.
if extra_build_dirs is None:
self.extra_build_dirs = []
else:
self.extra_build_dirs = extra_build_dirs
def __repr__(self):
r = '<{} {}/{}>'
return r.format(self.__class__.__name__, self.curdir, self.incdirs)
def get_curdir(self):
return self.curdir
def get_incdirs(self):
return self.incdirs
def get_extra_build_dirs(self):
return self.extra_build_dirs
class ExtractedObjects:
'''
Holds a list of sources for which the objects must be extracted
'''
def __init__(self, target, srclist=[], genlist=[], objlist=[], recursive=True):
self.target = target
self.recursive = recursive
self.srclist = srclist
self.genlist = genlist
self.objlist = objlist
if self.target.is_unity:
self.check_unity_compatible()
def __repr__(self):
r = '<{0} {1!r}: {2}>'
return r.format(self.__class__.__name__, self.target.name, self.srclist)
def classify_all_sources(self, sources, generated_sources):
# Merge sources and generated sources
sources = list(sources)
for gensrc in generated_sources:
for s in gensrc.get_outputs():
# We cannot know the path where this source will be generated,
# but all we need here is the file extension to determine the
# compiler.
sources.append(s)
# Filter out headers and all non-source files
sources = [s for s in sources if environment.is_source(s) and not environment.is_header(s)]
return classify_unity_sources(self.target.compilers.values(), sources)
def check_unity_compatible(self):
# Figure out if the extracted object list is compatible with a Unity
# build. When we're doing a Unified build, we go through the sources,
# and create a single source file from each subset of the sources that
# can be compiled with a specific compiler. Then we create one object
# from each unified source file. So for each compiler we can either
# extra all its sources or none.
cmpsrcs = self.classify_all_sources(self.target.sources, self.target.generated)
extracted_cmpsrcs = self.classify_all_sources(self.srclist, self.genlist)
for comp, srcs in extracted_cmpsrcs.items():
if set(srcs) != set(cmpsrcs[comp]):
raise MesonException('Single object files can not be extracted '
'in Unity builds. You can only extract all '
'the object files for each compiler at once.')
class EnvironmentVariables:
def __init__(self):
self.envvars = []
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.envvars)
def get_value(self, values, kwargs):
separator = kwargs.get('separator', os.pathsep)
value = ''
for var in values:
value += separator + var
return separator, value.strip(separator)
def set(self, env, name, values, kwargs):
return self.get_value(values, kwargs)[1]
def append(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return env[name] + sep + value
return value
def prepend(self, env, name, values, kwargs):
sep, value = self.get_value(values, kwargs)
if name in env:
return value + sep + env[name]
return value
def get_env(self, full_env):
env = full_env.copy()
for method, name, values, kwargs in self.envvars:
env[name] = method(full_env, name, values, kwargs)
return env
class Target:
def __init__(self, name, subdir, subproject, build_by_default):
if has_path_sep(name):
# Fix failing test 53 when this becomes an error.
mlog.warning('''Target "%s" has a path separator in its name.
This is not supported, it can cause unexpected failures and will become
a hard error in the future.''' % name)
self.name = name
self.subdir = subdir
self.subproject = subproject
self.build_by_default = build_by_default
self.install = False
self.build_always_stale = False
self.option_overrides = {}
if not hasattr(self, 'typename'):
raise RuntimeError('Target type is not set for target class "{}". This is a bug'.format(type(self).__name__))
def get_install_dir(self, environment):
# Find the installation directory.
default_install_dir = self.get_default_install_dir(environment)
outdirs = self.get_custom_install_dir()
if outdirs[0] is not None and outdirs[0] != default_install_dir and outdirs[0] is not True:
# Either the value is set to a non-default value, or is set to
# False (which means we want this specific output out of many
# outputs to not be installed).
custom_install_dir = True
else:
custom_install_dir = False
outdirs[0] = default_install_dir
return outdirs, custom_install_dir
def get_basename(self):
return self.name
def get_subdir(self):
return self.subdir
def get_typename(self):
return self.typename
@staticmethod
def _get_id_hash(target_id):
# We don't really need cryptographic security here.
# Small-digest hash function with unlikely collision is good enough.
h = hashlib.sha256()
h.update(target_id.encode(encoding='utf-8', errors='replace'))
# This ID should be case-insensitive and should work in Visual Studio,
# e.g. it should not start with leading '-'.
return h.hexdigest()[:7]
@staticmethod
def construct_id_from_path(subdir, name, type_suffix):
"""Construct target ID from subdir, name and type suffix.
This helper function is made public mostly for tests."""
# This ID must also be a valid file name on all OSs.
# It should also avoid shell metacharacters for obvious
# reasons. '@' is not used as often as '_' in source code names.
# In case of collisions consider using checksums.
# FIXME replace with assert when slash in names is prohibited
name_part = name.replace('/', '@').replace('\\', '@')
assert not has_path_sep(type_suffix)
my_id = name_part + type_suffix
if subdir:
subdir_part = Target._get_id_hash(subdir)
# preserve myid for better debuggability
return subdir_part + '@@' + my_id
return my_id
def get_id(self):
return self.construct_id_from_path(
self.subdir, self.name, self.type_suffix())
def process_kwargs(self, kwargs):
if 'build_by_default' in kwargs:
self.build_by_default = kwargs['build_by_default']
if not isinstance(self.build_by_default, bool):
raise InvalidArguments('build_by_default must be a boolean value.')
elif kwargs.get('install', False):
# For backward compatibility, if build_by_default is not explicitly
# set, use the value of 'install' if it's enabled.
self.build_by_default = True
self.option_overrides = self.parse_overrides(kwargs)
def parse_overrides(self, kwargs):
result = {}
overrides = stringlistify(kwargs.get('override_options', []))
for o in overrides:
if '=' not in o:
raise InvalidArguments('Overrides must be of form "key=value"')
k, v = o.split('=', 1)
k = k.strip()
v = v.strip()
result[k] = v
return result
def is_linkable_target(self):
return False
class BuildTarget(Target):
known_kwargs = known_build_target_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
super().__init__(name, subdir, subproject, True)
self.is_cross = is_cross
unity_opt = environment.coredata.get_builtin_option('unity')
self.is_unity = unity_opt == 'on' or (unity_opt == 'subprojects' and subproject != '')
self.environment = environment
self.sources = []
self.compilers = OrderedDict()
self.objects = []
self.external_deps = []
self.include_dirs = []
self.link_targets = []
self.link_whole_targets = []
self.link_depends = []
self.name_prefix_set = False
self.name_suffix_set = False
self.filename = 'no_name'
# The list of all files outputted by this target. Useful in cases such
# as Vala which generates .vapi and .h besides the compiled output.
self.outputs = [self.filename]
self.need_install = False
self.pch = {}
self.extra_args = {}
self.generated = []
self.extra_files = []
self.d_features = {}
self.pic = False
self.pie = False
# Sources can be:
# 1. Pre-existing source files in the source tree
# 2. Pre-existing sources generated by configure_file in the build tree
# 3. Sources files generated by another target or a Generator
self.process_sourcelist(sources)
# Objects can be:
# 1. Pre-existing objects provided by the user with the `objects:` kwarg
# 2. Compiled objects created by and extracted from another target
self.process_objectlist(objects)
self.process_kwargs(kwargs, environment)
self.check_unknown_kwargs(kwargs)
self.process_compilers()
if not any([self.sources, self.generated, self.objects, self.link_whole]):
raise InvalidArguments('Build target %s has no sources.' % name)
self.process_compilers_late()
self.validate_sources()
self.validate_cross_install(environment)
self.check_module_linking()
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.filename)
def validate_cross_install(self, environment):
if environment.is_cross_build() and not self.is_cross and self.need_install:
raise InvalidArguments('Tried to install a natively built target in a cross build.')
def check_unknown_kwargs(self, kwargs):
# Override this method in derived classes that have more
# keywords.
self.check_unknown_kwargs_int(kwargs, self.known_kwargs)
def check_unknown_kwargs_int(self, kwargs, known_kwargs):
unknowns = []
for k in kwargs:
if k not in known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword argument(s) in target %s: %s.' %
(self.name, ', '.join(unknowns)))
def process_objectlist(self, objects):
assert(isinstance(objects, list))
for s in objects:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, (str, File, ExtractedObjects)):
self.objects.append(s)
elif isinstance(s, (GeneratedList, CustomTarget)):
msg = 'Generated files are not allowed in the \'objects\' kwarg ' + \
'for target {!r}.\nIt is meant only for '.format(self.name) + \
'pre-built object files that are shipped with the\nsource ' + \
'tree. Try adding it in the list of sources.'
raise InvalidArguments(msg)
else:
msg = 'Bad object of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
def process_sourcelist(self, sources):
sources = listify(sources)
added_sources = {} # If the same source is defined multiple times, use it only once.
for s in sources:
# Holder unpacking. Ugly.
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
if s not in added_sources:
self.sources.append(s)
added_sources[s] = True
elif isinstance(s, (GeneratedList, CustomTarget, CustomTargetIndex)):
self.generated.append(s)
else:
msg = 'Bad source of type {!r} in target {!r}.'.format(type(s).__name__, self.name)
raise InvalidArguments(msg)
@staticmethod
def can_compile_remove_sources(compiler, sources):
removed = False
for s in sources[:]:
if compiler.can_compile(s):
sources.remove(s)
removed = True
return removed
def process_compilers_late(self):
"""Processes additional compilers after kwargs have been evaluated.
This can add extra compilers that might be required by keyword
arguments, such as link_with or dependencies. It will also try to guess
which compiler to use if one hasn't been selected already.
"""
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# If this library is linked against another library we need to consider
# the languages of those libraries as well.
if self.link_targets or self.link_whole_targets:
extra = set()
for t in itertools.chain(self.link_targets, self.link_whole_targets):
for name, compiler in t.compilers.items():
if name in clink_langs:
extra.add((name, compiler))
for name, compiler in sorted(extra, key=lambda p: sort_clink(p[0])):
self.compilers[name] = compiler
if not self.compilers:
# No source files or parent targets, target consists of only object
# files of unknown origin. Just add the first clink compiler
# that we have and hope that it can link these objects
for lang in clink_langs:
if lang in compilers:
self.compilers[lang] = compilers[lang]
break
def process_compilers(self):
'''
Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination.
'''
if not self.sources and not self.generated and not self.objects:
return
# Populate list of compilers
if self.is_cross:
compilers = self.environment.coredata.cross_compilers
else:
compilers = self.environment.coredata.compilers
# Pre-existing sources
sources = list(self.sources)
# All generated sources
for gensrc in self.generated:
for s in gensrc.get_outputs():
# Generated objects can't be compiled, so don't use them for
# compiler detection. If our target only has generated objects,
# we will fall back to using the first c-like compiler we find,
# which is what we need.
if not is_object(s):
sources.append(s)
for d in self.external_deps:
if hasattr(d, 'held_object'):
d = d.held_object
for s in d.sources:
if isinstance(s, (str, File)):
sources.append(s)
# Sources that were used to create our extracted objects
for o in self.objects:
if not isinstance(o, ExtractedObjects):
continue
for s in o.srclist:
# Don't add Vala sources since that will pull in the Vala
# compiler even though we will never use it since we are
# dealing with compiled C code.
if not s.endswith(lang_suffixes['vala']):
sources.append(s)
if sources:
# For each source, try to add one compiler that can compile it.
# It's ok if no compilers can do so, because users are expected to
# be able to add arbitrary non-source files to the sources list.
for s in sources:
for lang, compiler in compilers.items():
if compiler.can_compile(s):
if lang not in self.compilers:
self.compilers[lang] = compiler
break
# Re-sort according to clink_langs
self.compilers = OrderedDict(sorted(self.compilers.items(),
key=lambda t: sort_clink(t[0])))
# If all our sources are Vala, our target also needs the C compiler but
# it won't get added above.
if 'vala' in self.compilers and 'c' not in self.compilers:
self.compilers['c'] = compilers['c']
def validate_sources(self):
if not self.sources:
return
for lang in ('cs', 'java'):
if lang in self.compilers:
check_sources = list(self.sources)
compiler = self.compilers[lang]
if not self.can_compile_remove_sources(compiler, check_sources):
m = 'No {} sources found in target {!r}'.format(lang, self.name)
raise InvalidArguments(m)
if check_sources:
m = '{0} targets can only contain {0} files:\n'.format(lang.capitalize())
m += '\n'.join([repr(c) for c in check_sources])
raise InvalidArguments(m)
# CSharp and Java targets can't contain any other file types
assert(len(self.compilers) == 1)
return
def process_link_depends(self, sources, environment):
"""Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends.
"""
sources = listify(sources)
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, File):
self.link_depends.append(s)
elif isinstance(s, str):
self.link_depends.append(
File.from_source_file(environment.source_dir, self.subdir, s))
elif hasattr(s, 'get_outputs'):
self.link_depends.extend(
[File.from_built_file(s.subdir, p) for p in s.get_outputs()])
else:
raise InvalidArguments(
'Link_depends arguments must be strings, Files, '
'or a Custom Target, or lists thereof.')
def get_original_kwargs(self):
return self.kwargs
def unpack_holder(self, d):
d = listify(d)
newd = []
for i in d:
if isinstance(i, list):
i = self.unpack_holder(i)
elif hasattr(i, 'held_object'):
i = i.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if hasattr(i, t):
setattr(i, t, self.unpack_holder(getattr(i, t)))
newd.append(i)
return newd
def copy_kwargs(self, kwargs):
self.kwargs = copy.copy(kwargs)
# This sucks quite badly. Arguments
# are holders but they can't be pickled
# so unpack those known.
for k, v in self.kwargs.items():
if isinstance(v, list):
self.kwargs[k] = self.unpack_holder(v)
if hasattr(v, 'held_object'):
self.kwargs[k] = v.held_object
for t in ['dependencies', 'link_with', 'include_directories', 'sources']:
if t in self.kwargs:
self.kwargs[t] = self.unpack_holder(self.kwargs[t])
def extract_objects(self, srclist):
obj_src = []
for src in srclist:
if not isinstance(src, str):
raise MesonException('Object extraction arguments must be strings.')
src = File(False, self.subdir, src)
# FIXME: It could be a generated source
if src not in self.sources:
raise MesonException('Tried to extract unknown source %s.' % src)
obj_src.append(src)
return ExtractedObjects(self, obj_src)
def extract_all_objects(self, recursive=True):
return ExtractedObjects(self, self.sources, self.generated, self.objects,
recursive)
def get_all_link_deps(self):
return self.get_transitive_link_deps()
@lru_cache(maxsize=None)
def get_transitive_link_deps(self):
result = []
for i in self.link_targets:
result += i.get_all_link_deps()
return result
def get_link_deps_mapping(self, prefix, environment):
return self.get_transitive_link_deps_mapping(prefix, environment)
@lru_cache(maxsize=None)
def get_transitive_link_deps_mapping(self, prefix, environment):
result = {}
for i in self.link_targets:
mapping = i.get_link_deps_mapping(prefix, environment)
#we are merging two dictionaries, while keeping the earlier one dominant
result_tmp = mapping.copy()
result_tmp.update(result)
result = result_tmp
return result
@lru_cache(maxsize=None)
def get_link_dep_subdirs(self):
result = OrderedSet()
for i in self.link_targets:
result.add(i.get_subdir())
result.update(i.get_link_dep_subdirs())
return result
def get_default_install_dir(self, environment):
return environment.get_libdir()
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs)
self.copy_kwargs(kwargs)
kwargs.get('modules', [])
self.need_install = kwargs.get('install', self.need_install)
llist = extract_as_list(kwargs, 'link_with')
for linktarget in llist:
# Sorry for this hack. Keyword targets are kept in holders
# in kwargs. Unpack here without looking at the exact type.
if hasattr(linktarget, "held_object"):
linktarget = linktarget.held_object
if isinstance(linktarget, dependencies.ExternalLibrary):
raise MesonException('''An external library was used in link_with keyword argument, which
is reserved for libraries built as part of this project. External
libraries must be passed using the dependencies keyword argument
instead, because they are conceptually "external dependencies",
just like those detected with the dependency() function.''')
self.link(linktarget)
lwhole = extract_as_list(kwargs, 'link_whole')
for linktarget in lwhole:
self.link_whole(linktarget)
c_pchlist, cpp_pchlist, clist, cpplist, cslist, valalist, objclist, objcpplist, fortranlist, rustlist \
= extract_as_list(kwargs, 'c_pch', 'cpp_pch', 'c_args', 'cpp_args', 'cs_args', 'vala_args', 'objc_args',
'objcpp_args', 'fortran_args', 'rust_args')
self.add_pch('c', c_pchlist)
self.add_pch('cpp', cpp_pchlist)
compiler_args = {'c': clist, 'cpp': cpplist, 'cs': cslist, 'vala': valalist, 'objc': objclist, 'objcpp': objcpplist,
'fortran': fortranlist, 'rust': rustlist
}
for key, value in compiler_args.items():
self.add_compiler_args(key, value)
if not isinstance(self, Executable) or 'export_dynamic' in kwargs:
self.vala_header = kwargs.get('vala_header', self.name + '.h')
self.vala_vapi = kwargs.get('vala_vapi', self.name + '.vapi')
self.vala_gir = kwargs.get('vala_gir', None)
dlist = stringlistify(kwargs.get('d_args', []))
self.add_compiler_args('d', dlist)
dfeatures = dict()
dfeature_unittest = kwargs.get('d_unittest', False)
if dfeature_unittest:
dfeatures['unittest'] = dfeature_unittest
dfeature_versions = kwargs.get('d_module_versions', [])
if dfeature_versions:
dfeatures['versions'] = dfeature_versions
dfeature_debug = kwargs.get('d_debug', [])
if dfeature_debug:
dfeatures['debug'] = dfeature_debug
if 'd_import_dirs' in kwargs:
dfeature_import_dirs = extract_as_list(kwargs, 'd_import_dirs', unholder=True)
for d in dfeature_import_dirs:
if not isinstance(d, IncludeDirs):
raise InvalidArguments('Arguments to d_import_dirs must be include_directories.')
dfeatures['import_dirs'] = dfeature_import_dirs
if dfeatures:
self.d_features = dfeatures
self.link_args = extract_as_list(kwargs, 'link_args')
for i in self.link_args:
if not isinstance(i, str):
raise InvalidArguments('Link_args arguments must be strings.')
for l in self.link_args:
if '-Wl,-rpath' in l or l.startswith('-rpath'):
mlog.warning('''Please do not define rpath with a linker argument, use install_rpath or build_rpath properties instead.
This will become a hard error in a future Meson release.''')
self.process_link_depends(kwargs.get('link_depends', []), environment)
# Target-specific include dirs must be added BEFORE include dirs from
# internal deps (added inside self.add_deps()) to override them.
inclist = extract_as_list(kwargs, 'include_directories')
self.add_include_dirs(inclist)
# Add dependencies (which also have include_directories)
deplist = extract_as_list(kwargs, 'dependencies')
self.add_deps(deplist)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs.get('install_dir', [None]),
(str, bool))
self.install_mode = kwargs.get('install_mode', None)
main_class = kwargs.get('main_class', '')
if not isinstance(main_class, str):
raise InvalidArguments('Main class must be a string')
self.main_class = main_class
if isinstance(self, Executable):
self.gui_app = kwargs.get('gui_app', False)
if not isinstance(self.gui_app, bool):
raise InvalidArguments('Argument gui_app must be boolean.')
elif 'gui_app' in kwargs:
raise InvalidArguments('Argument gui_app can only be used on executables.')
extra_files = extract_as_list(kwargs, 'extra_files')
for i in extra_files:
assert(isinstance(i, File))
trial = os.path.join(environment.get_source_dir(), i.subdir, i.fname)
if not(os.path.isfile(trial)):
raise InvalidArguments('Tried to add non-existing extra file %s.' % i)
self.extra_files = extra_files
self.install_rpath = kwargs.get('install_rpath', '')
if not isinstance(self.install_rpath, str):
raise InvalidArguments('Install_rpath is not a string.')
self.build_rpath = kwargs.get('build_rpath', '')
if not isinstance(self.build_rpath, str):
raise InvalidArguments('Build_rpath is not a string.')
resources = extract_as_list(kwargs, 'resources')
for r in resources:
if not isinstance(r, str):
raise InvalidArguments('Resource argument is not a string.')
trial = os.path.join(environment.get_source_dir(), self.subdir, r)
if not os.path.isfile(trial):
raise InvalidArguments('Tried to add non-existing resource %s.' % r)
self.resources = resources
if 'name_prefix' in kwargs:
name_prefix = kwargs['name_prefix']
if isinstance(name_prefix, list):
if name_prefix:
raise InvalidArguments('name_prefix array must be empty to signify null.')
elif not isinstance(name_prefix, str):
raise InvalidArguments('name_prefix must be a string.')
self.prefix = name_prefix
self.name_prefix_set = True
if 'name_suffix' in kwargs:
name_suffix = kwargs['name_suffix']
if isinstance(name_suffix, list):
if name_suffix:
raise InvalidArguments('name_suffix array must be empty to signify null.')
else:
if not isinstance(name_suffix, str):
raise InvalidArguments('name_suffix must be a string.')
if name_suffix == '':
raise InvalidArguments('name_suffix should not be an empty string. '
'If you want meson to use the default behaviour '
'for each platform pass `[]` (empty array)')
self.suffix = name_suffix
self.name_suffix_set = True
if isinstance(self, StaticLibrary):
# You can't disable PIC on OS X. The compiler ignores -fno-PIC.
# PIC is always on for Windows (all code is position-independent
# since library loading is done differently)
if for_darwin(self.is_cross, self.environment) or for_windows(self.is_cross, self.environment):
self.pic = True
else:
self.pic = self._extract_pic_pie(kwargs, 'pic')
if isinstance(self, Executable):
# Executables must be PIE on Android
if for_android(self.is_cross, self.environment):
self.pie = True
else:
self.pie = self._extract_pic_pie(kwargs, 'pie')
self.implicit_include_directories = kwargs.get('implicit_include_directories', True)
if not isinstance(self.implicit_include_directories, bool):
raise InvalidArguments('Implicit_include_directories must be a boolean.')
self.gnu_symbol_visibility = kwargs.get('gnu_symbol_visibility', '')
if not isinstance(self.gnu_symbol_visibility, str):
raise InvalidArguments('GNU symbol visibility must be a string.')
if self.gnu_symbol_visibility != '':
permitted = ['default', 'internal', 'hidden', 'protected', 'inlineshidden']
if self.gnu_symbol_visibility not in permitted:
raise InvalidArguments('GNU symbol visibility arg %s not one of: %s',
self.symbol_visibility, ', '.join(permitted))
def _extract_pic_pie(self, kwargs, arg):
# Check if we have -fPIC, -fpic, -fPIE, or -fpie in cflags
all_flags = self.extra_args['c'] + self.extra_args['cpp']
if '-f' + arg.lower() in all_flags or '-f' + arg.upper() in all_flags:
mlog.warning("Use the '{}' kwarg instead of passing '{}' manually to {!r}".format(arg, '-f' + arg, self.name))
return True
val = kwargs.get(arg, False)
if not isinstance(val, bool):
raise InvalidArguments('Argument {} to {!r} must be boolean'.format(arg, self.name))
return val
def get_filename(self):
return self.filename
def get_outputs(self):
return self.outputs
def get_extra_args(self, language):
return self.extra_args.get(language, [])
def get_dependencies(self, exclude=None, internal=True):
transitive_deps = []
if exclude is None:
exclude = []
if internal:
link_targets = itertools.chain(self.link_targets, self.link_whole_targets)
else:
# We don't want the 'internal' libraries when generating the
# `Libs:` and `Libs.private:` lists in pkg-config files.
link_targets = self.link_targets
for t in link_targets:
if t in transitive_deps or t in exclude:
continue
transitive_deps.append(t)
if isinstance(t, StaticLibrary):
transitive_deps += t.get_dependencies(transitive_deps + exclude, internal)
return transitive_deps
def get_source_subdir(self):
return self.subdir
def get_sources(self):
return self.sources
def get_objects(self):
return self.objects
def get_generated_sources(self):
return self.generated
def should_install(self):
return self.need_install
def has_pch(self):
return len(self.pch) > 0
def get_pch(self, language):
try:
return self.pch[language]
except KeyError:
return[]
def get_include_dirs(self):
return self.include_dirs
def add_deps(self, deps):
deps = listify(deps)
for dep in deps:
if hasattr(dep, 'held_object'):
dep = dep.held_object
if isinstance(dep, dependencies.InternalDependency):
# Those parts that are internal.
self.process_sourcelist(dep.sources)
self.add_include_dirs(dep.include_directories)
for l in dep.libraries:
self.link(l)
for l in dep.whole_libraries:
self.link_whole(l)
if dep.compile_args or dep.link_args:
# Those parts that are external.
extpart = dependencies.InternalDependency('undefined',
[],
dep.compile_args,
dep.link_args,
[], [], [], [])
self.external_deps.append(extpart)
# Deps of deps.
self.add_deps(dep.ext_deps)
elif isinstance(dep, dependencies.Dependency):
self.external_deps.append(dep)
self.process_sourcelist(dep.get_sources())
elif isinstance(dep, BuildTarget):
raise InvalidArguments('''Tried to use a build target as a dependency.
You probably should put it in link_with instead.''')
else:
# This is a bit of a hack. We do not want Build to know anything
# about the interpreter so we can't import it and use isinstance.
# This should be reliable enough.
if hasattr(dep, 'project_args_frozen') or hasattr(dep, 'global_args_frozen'):
raise InvalidArguments('Tried to use subproject object as a dependency.\n'
'You probably wanted to use a dependency declared in it instead.\n'
'Access it by calling get_variable() on the subproject object.')
raise InvalidArguments('Argument is of an unacceptable type {!r}.\nMust be '
'either an external dependency (returned by find_library() or '
'dependency()) or an internal dependency (returned by '
'declare_dependency()).'.format(type(dep).__name__))
def get_external_deps(self):
return self.external_deps
def link(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, Target):
raise InvalidArguments('{!r} is not a target.'.format(t))
if not t.is_linkable_target():
raise InvalidArguments('Link target {!r} is not linkable.'.format(t))
if isinstance(self, SharedLibrary) and isinstance(t, StaticLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_targets.append(t)
def link_whole(self, target):
for t in listify(target, unholder=True):
if not isinstance(t, StaticLibrary):
raise InvalidArguments('{!r} is not a static library.'.format(t))
if isinstance(self, SharedLibrary) and not t.pic:
msg = "Can't link non-PIC static library {!r} into shared library {!r}. ".format(t.name, self.name)
msg += "Use the 'pic' option to static_library to build with PIC."
raise InvalidArguments(msg)
if self.is_cross != t.is_cross:
raise InvalidArguments('Tried to mix cross built and native libraries in target {!r}'.format(self.name))
self.link_whole_targets.append(t)
def add_pch(self, language, pchlist):
if not pchlist:
return
elif len(pchlist) == 1:
if not environment.is_header(pchlist[0]):
raise InvalidArguments('PCH argument %s is not a header.' % pchlist[0])
elif len(pchlist) == 2:
if environment.is_header(pchlist[0]):
if not environment.is_source(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
elif environment.is_source(pchlist[0]):
if not environment.is_header(pchlist[1]):
raise InvalidArguments('PCH definition must contain one header and at most one source.')
pchlist = [pchlist[1], pchlist[0]]
else:
raise InvalidArguments('PCH argument %s is of unknown type.' % pchlist[0])
if (os.path.dirname(pchlist[0]) != os.path.dirname(pchlist[1])):
raise InvalidArguments('PCH files must be stored in the same folder.')
elif len(pchlist) > 2:
raise InvalidArguments('PCH definition may have a maximum of 2 files.')
for f in pchlist:
if not os.path.isfile(os.path.join(self.environment.source_dir, self.subdir, f)):
raise MesonException('File %s does not exist.' % f)
self.pch[language] = pchlist
def add_include_dirs(self, args):
ids = []
for a in args:
# FIXME same hack, forcibly unpack from holder.
if hasattr(a, 'held_object'):
a = a.held_object
if not isinstance(a, IncludeDirs):
raise InvalidArguments('Include directory to be added is not an include directory object.')
ids.append(a)
self.include_dirs += ids
def add_compiler_args(self, language, args):
args = listify(args)
for a in args:
if not isinstance(a, (str, File)):
raise InvalidArguments('A non-string passed to compiler args.')
if language in self.extra_args:
self.extra_args[language] += args
else:
self.extra_args[language] = args
def get_aliases(self):
return {}
def get_langs_used_by_deps(self):
'''
Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653
'''
langs = []
# Check if any of the external libraries were written in this language
for dep in self.external_deps:
if dep.language is None:
continue
if dep.language not in langs:
langs.append(dep.language)
# Check if any of the internal libraries this target links to were
# written in this language
for link_target in itertools.chain(self.link_targets, self.link_whole_targets):
for language in link_target.compilers:
if language not in langs:
langs.append(language)
return langs
def get_clink_dynamic_linker_and_stdlibs(self):
'''
We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that.
'''
# Populate list of all compilers, not just those being used to compile
# sources in this target
if self.is_cross:
all_compilers = self.environment.coredata.cross_compilers
else:
all_compilers = self.environment.coredata.compilers
# Languages used by dependencies
dep_langs = self.get_langs_used_by_deps()
# Pick a compiler based on the language priority-order
for l in clink_langs:
if l in self.compilers or l in dep_langs:
try:
linker = all_compilers[l]
except KeyError:
raise MesonException(
'Could not get a dynamic linker for build target {!r}. '
'Requires a linker for language "{}", but that is not '
'a project language.'.format(self.name, l))
stdlib_args = []
added_languages = set()
for dl in itertools.chain(self.compilers, dep_langs):
if dl != linker.language:
stdlib_args += all_compilers[dl].language_stdlib_only_link_flags()
added_languages.add(dl)
return linker, stdlib_args
m = 'Could not get a dynamic linker for build target {!r}'
raise AssertionError(m.format(self.name))
def get_using_msvc(self):
'''
Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right.
'''
linker, _ = self.get_clink_dynamic_linker_and_stdlibs()
# Mixing many languages with MSVC is not supported yet so ignore stdlibs.
if linker and linker.get_id() in ['msvc', 'clang-cl', 'llvm', 'dmd']:
return True
return False
def check_module_linking(self):
'''
Warn if shared modules are linked with target: (link_with) #2865
'''
for link_target in self.link_targets:
if isinstance(link_target, SharedModule):
if for_darwin(self.is_cross, self.environment):
raise MesonException('''target links against shared modules.
This is not permitted on OSX''')
else:
mlog.warning('''target links against shared modules. This is not
recommended as it is not supported on some platforms''')
return
class Generator:
def __init__(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('Generator requires exactly one positional argument: the executable')
exe = args[0]
if hasattr(exe, 'held_object'):
exe = exe.held_object
if not isinstance(exe, (Executable, dependencies.ExternalProgram)):
raise InvalidArguments('First generator argument must be an executable.')
self.exe = exe
self.depfile = None
self.capture = False
self.process_kwargs(kwargs)
def __repr__(self):
repr_str = "<{0}: {1}>"
return repr_str.format(self.__class__.__name__, self.exe)
def get_exe(self):
return self.exe
def process_kwargs(self, kwargs):
if 'arguments' not in kwargs:
raise InvalidArguments('Generator must have "arguments" keyword argument.')
args = kwargs['arguments']
if isinstance(args, str):
args = [args]
if not isinstance(args, list):
raise InvalidArguments('"Arguments" keyword argument must be a string or a list of strings.')
for a in args:
if not isinstance(a, str):
raise InvalidArguments('A non-string object in "arguments" keyword argument.')
self.arglist = args
if 'output' not in kwargs:
raise InvalidArguments('Generator must have "output" keyword argument.')
outputs = listify(kwargs['output'])
for rule in outputs:
if not isinstance(rule, str):
raise InvalidArguments('"output" may only contain strings.')
if '@BASENAME@' not in rule and '@PLAINNAME@' not in rule:
raise InvalidArguments('Every element of "output" must contain @BASENAME@ or @PLAINNAME@.')
if has_path_sep(rule):
raise InvalidArguments('"outputs" must not contain a directory separator.')
if len(outputs) > 1:
for o in outputs:
if '@OUTPUT@' in o:
raise InvalidArguments('Tried to use @OUTPUT@ in a rule with more than one output.')
self.outputs = outputs
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
if 'capture' in kwargs:
capture = kwargs['capture']
if not isinstance(capture, bool):
raise InvalidArguments('Capture must be boolean.')
self.capture = capture
def get_base_outnames(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
bases = [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.outputs]
return bases
def get_dep_outname(self, inname):
if self.depfile is None:
raise InvalidArguments('Tried to get dep name for rule that does not have dependency file defined.')
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
def get_arglist(self, inname):
plainname = os.path.basename(inname)
basename = os.path.splitext(plainname)[0]
return [x.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname) for x in self.arglist]
def is_parent_path(self, parent, trial):
relpath = pathlib.PurePath(trial).relative_to(parent)
return relpath.parts[0] != '..' # For subdirs we can only go "down".
def process_files(self, name, files, state, preserve_path_from=None, extra_args=[]):
output = GeneratedList(self, state.subdir, preserve_path_from, extra_args=extra_args)
for f in files:
if isinstance(f, str):
f = File.from_source_file(state.environment.source_dir, state.subdir, f)
elif not isinstance(f, File):
raise InvalidArguments('{} arguments must be strings or files not {!r}.'.format(name, f))
if preserve_path_from:
abs_f = f.absolute_path(state.environment.source_dir, state.environment.build_dir)
if not self.is_parent_path(preserve_path_from, abs_f):
raise InvalidArguments('When using preserve_path_from, all input files must be in a subdirectory of the given dir.')
output.add_file(f, state)
return output
class GeneratedList:
def __init__(self, generator, subdir, preserve_path_from=None, extra_args=[]):
if hasattr(generator, 'held_object'):
generator = generator.held_object
self.generator = generator
self.name = self.generator.exe
self.subdir = subdir
self.infilelist = []
self.outfilelist = []
self.outmap = {}
self.extra_depends = []
self.preserve_path_from = preserve_path_from
self.extra_args = extra_args
def add_preserved_path_segment(self, infile, outfiles, state):
result = []
in_abs = infile.absolute_path(state.environment.source_dir, state.environment.build_dir)
assert(os.path.isabs(self.preserve_path_from))
rel = os.path.relpath(in_abs, self.preserve_path_from)
path_segment = os.path.dirname(rel)
for of in outfiles:
result.append(os.path.join(path_segment, of))
return result
def add_file(self, newfile, state):
self.infilelist.append(newfile)
outfiles = self.generator.get_base_outnames(newfile.fname)
if self.preserve_path_from:
outfiles = self.add_preserved_path_segment(newfile, outfiles, state)
self.outfilelist += outfiles
self.outmap[newfile] = outfiles
def get_inputs(self):
return self.infilelist
def get_outputs(self):
return self.outfilelist
def get_outputs_for(self, filename):
return self.outmap[filename]
def get_generator(self):
return self.generator
def get_extra_args(self):
return self.extra_args
class Executable(BuildTarget):
known_kwargs = known_exe_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'executable'
if 'pie' not in kwargs and 'b_pie' in environment.coredata.base_options:
kwargs['pie'] = environment.coredata.base_options['b_pie'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
# Unless overridden, executables have no suffix or prefix. Except on
# Windows and with C#/Mono executables where the suffix is 'exe'
if not hasattr(self, 'prefix'):
self.prefix = ''
if not hasattr(self, 'suffix'):
# Executable for Windows or C#/Mono
if (for_windows(is_cross, environment) or
for_cygwin(is_cross, environment) or 'cs' in self.compilers):
self.suffix = 'exe'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('arm') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('arm')):
self.suffix = 'axf'
elif ('c' in self.compilers and self.compilers['c'].get_id().startswith('ccrx') or
'cpp' in self.compilers and self.compilers['cpp'].get_id().startswith('ccrx')):
self.suffix = 'abs'
else:
self.suffix = ''
self.filename = self.name
if self.suffix:
self.filename += '.' + self.suffix
self.outputs = [self.filename]
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
# Check for export_dynamic
self.export_dynamic = False
if kwargs.get('export_dynamic'):
if not isinstance(kwargs['export_dynamic'], bool):
raise InvalidArguments('"export_dynamic" keyword argument must be a boolean')
self.export_dynamic = True
if kwargs.get('implib'):
self.export_dynamic = True
if self.export_dynamic and kwargs.get('implib') is False:
raise InvalidArguments('"implib" keyword argument must not be false for if "export_dynamic" is true')
# If using export_dynamic, set the import library name
if self.export_dynamic:
implib_basename = self.name + '.exe'
if not isinstance(kwargs.get('implib', False), bool):
implib_basename = kwargs['implib']
if for_windows(is_cross, environment) or for_cygwin(is_cross, environment):
self.vs_import_filename = '{0}.lib'.format(implib_basename)
self.gcc_import_filename = 'lib{0}.a'.format(implib_basename)
if self.get_using_msvc():
self.import_filename = self.vs_import_filename
else:
self.import_filename = self.gcc_import_filename
# Only linkwithable if using export_dynamic
self.is_linkwithable = self.export_dynamic
def get_default_install_dir(self, environment):
return environment.get_bindir()
def description(self):
'''Human friendly description of the executable'''
return self.name
def type_suffix(self):
return "@exe"
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def is_linkable_target(self):
return self.is_linkwithable
class StaticLibrary(BuildTarget):
known_kwargs = known_stlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'static library'
if 'pic' not in kwargs and 'b_staticpic' in environment.coredata.base_options:
kwargs['pic'] = environment.coredata.base_options['b_staticpic'].value
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'cs' in self.compilers:
raise InvalidArguments('Static libraries not supported for C#.')
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use rlib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust static library target crate type to rlib')
self.rust_crate_type = 'rlib'
# Don't let configuration proceed with a non-static crate type
elif self.rust_crate_type not in ['rlib', 'staticlib']:
raise InvalidArguments('Crate type "{0}" invalid for static libraries; must be "rlib" or "staticlib"'.format(self.rust_crate_type))
# By default a static library is named libfoo.a even on Windows because
# MSVC does not have a consistent convention for what static libraries
# are called. The MSVC CRT uses libfoo.lib syntax but nothing else uses
# it and GCC only looks for static libraries called foo.lib and
# libfoo.a. However, we cannot use foo.lib because that's the same as
# the import library. Using libfoo.a is ok because people using MSVC
# always pass the library filename while linking anyway.
if not hasattr(self, 'prefix'):
self.prefix = 'lib'
if not hasattr(self, 'suffix'):
if 'rust' in self.compilers:
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'rlib':
# default Rust static library suffix
self.suffix = 'rlib'
elif self.rust_crate_type == 'staticlib':
self.suffix = 'a'
else:
self.suffix = 'a'
self.filename = self.prefix + self.name + '.' + self.suffix
self.outputs = [self.filename]
def get_link_deps_mapping(self, prefix, environment):
return {}
def get_default_install_dir(self, environment):
return environment.get_static_lib_dir()
def type_suffix(self):
return "@sta"
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def is_linkable_target(self):
return True
class SharedLibrary(BuildTarget):
known_kwargs = known_shlib_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'shared library'
self.soversion = None
self.ltversion = None
# Max length 2, first element is compatibility_version, second is current_version
self.darwin_versions = []
self.vs_module_defs = None
# The import library this target will generate
self.import_filename = None
# The import library that Visual Studio would generate (and accept)
self.vs_import_filename = None
# The import library that GCC would generate (and prefer)
self.gcc_import_filename = None
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
if 'rust' in self.compilers:
# If no crate type is specified, or it's the generic lib type, use dylib
if not hasattr(self, 'rust_crate_type') or self.rust_crate_type == 'lib':
mlog.debug('Defaulting Rust dynamic library target crate type to "dylib"')
self.rust_crate_type = 'dylib'
# Don't let configuration proceed with a non-dynamic crate type
elif self.rust_crate_type not in ['dylib', 'cdylib']:
raise InvalidArguments('Crate type "{0}" invalid for dynamic libraries; must be "dylib" or "cdylib"'.format(self.rust_crate_type))
if not hasattr(self, 'prefix'):
self.prefix = None
if not hasattr(self, 'suffix'):
self.suffix = None
self.basic_filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
self.determine_filenames(is_cross, environment)
def get_link_deps_mapping(self, prefix, environment):
result = {}
mappings = self.get_transitive_link_deps_mapping(prefix, environment)
old = get_target_macos_dylib_install_name(self)
if old not in mappings:
fname = self.get_filename()
outdirs, _ = self.get_install_dir(self.environment)
new = os.path.join(prefix, outdirs[0], fname)
result.update({old: new})
mappings.update(result)
return mappings
def get_default_install_dir(self, environment):
return environment.get_shared_lib_dir()
def determine_filenames(self, is_cross, env):
"""
See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate.
"""
prefix = ''
suffix = ''
self.filename_tpl = self.basic_filename_tpl
# NOTE: manual prefix/suffix override is currently only tested for C/C++
# C# and Mono
if 'cs' in self.compilers:
prefix = ''
suffix = 'dll'
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
# C, C++, Swift, Vala
# Only Windows uses a separate import library for linking
# For all other targets/platforms import_filename stays None
elif for_windows(is_cross, env):
suffix = 'dll'
self.vs_import_filename = '{0}{1}.lib'.format(self.prefix if self.prefix is not None else '', self.name)
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
if self.get_using_msvc():
# Shared library is of the form foo.dll
prefix = ''
# Import library is called foo.lib
self.import_filename = self.vs_import_filename
# Assume GCC-compatible naming
else:
# Shared library is of the form libfoo.dll
prefix = 'lib'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
# Shared library has the soversion if it is defined
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_cygwin(is_cross, env):
suffix = 'dll'
self.gcc_import_filename = '{0}{1}.dll.a'.format(self.prefix if self.prefix is not None else 'lib', self.name)
# Shared library is of the form cygfoo.dll
# (ld --dll-search-prefix=cyg is the default)
prefix = 'cyg'
# Import library is called libfoo.dll.a
self.import_filename = self.gcc_import_filename
if self.soversion:
self.filename_tpl = '{0.prefix}{0.name}-{0.soversion}.{0.suffix}'
else:
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_darwin(is_cross, env):
prefix = 'lib'
suffix = 'dylib'
# On macOS, the filename can only contain the major version
if self.soversion:
# libfoo.X.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.soversion}.{0.suffix}'
else:
# libfoo.dylib
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
elif for_android(is_cross, env):
prefix = 'lib'
suffix = 'so'
# Android doesn't support shared_library versioning
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
else:
prefix = 'lib'
suffix = 'so'
if self.ltversion:
# libfoo.so.X[.Y[.Z]] (.Y and .Z are optional)
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.ltversion}'
elif self.soversion:
# libfoo.so.X
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}.{0.soversion}'
else:
# No versioning, libfoo.so
self.filename_tpl = '{0.prefix}{0.name}.{0.suffix}'
if self.prefix is None:
self.prefix = prefix
if self.suffix is None:
self.suffix = suffix
self.filename = self.filename_tpl.format(self)
self.outputs = [self.filename]
@staticmethod
def _validate_darwin_versions(darwin_versions):
try:
if isinstance(darwin_versions, int):
darwin_versions = str(darwin_versions)
if isinstance(darwin_versions, str):
darwin_versions = 2 * [darwin_versions]
if not isinstance(darwin_versions, list):
raise InvalidArguments('Shared library darwin_versions: must be a string, integer,'
'or a list, not {!r}'.format(darwin_versions))
if len(darwin_versions) > 2:
raise InvalidArguments('Shared library darwin_versions: list must contain 2 or fewer elements')
if len(darwin_versions) == 1:
darwin_versions = 2 * darwin_versions
for i, v in enumerate(darwin_versions[:]):
if isinstance(v, int):
v = str(v)
if not isinstance(v, str):
raise InvalidArguments('Shared library darwin_versions: list elements '
'must be strings or integers, not {!r}'.format(v))
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', v):
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z where '
'X, Y, Z are numbers, and Y and Z are optional')
parts = v.split('.')
if len(parts) in (1, 2, 3) and int(parts[0]) > 65535:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where X is [0, 65535] and Y, Z are optional')
if len(parts) in (2, 3) and int(parts[1]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Y is [0, 255] and Y, Z are optional')
if len(parts) == 3 and int(parts[2]) > 255:
raise InvalidArguments('Shared library darwin_versions: must be X.Y.Z '
'where Z is [0, 255] and Y, Z are optional')
darwin_versions[i] = v
except ValueError:
raise InvalidArguments('Shared library darwin_versions: value is invalid')
return darwin_versions
def process_kwargs(self, kwargs, environment):
super().process_kwargs(kwargs, environment)
if not for_android(self.is_cross, self.environment):
supports_versioning = True
else:
supports_versioning = False
if supports_versioning:
# Shared library version
if 'version' in kwargs:
self.ltversion = kwargs['version']
if not isinstance(self.ltversion, str):
raise InvalidArguments('Shared library version needs to be a string, not ' + type(self.ltversion).__name__)
if not re.fullmatch(r'[0-9]+(\.[0-9]+){0,2}', self.ltversion):
raise InvalidArguments('Invalid Shared library version "{0}". Must be of the form X.Y.Z where all three are numbers. Y and Z are optional.'.format(self.ltversion))
# Try to extract/deduce the soversion
if 'soversion' in kwargs:
self.soversion = kwargs['soversion']
if isinstance(self.soversion, int):
self.soversion = str(self.soversion)
if not isinstance(self.soversion, str):
raise InvalidArguments('Shared library soversion is not a string or integer.')
elif self.ltversion:
# library version is defined, get the soversion from that
# We replicate what Autotools does here and take the first
# number of the version by default.
self.soversion = self.ltversion.split('.')[0]
# macOS and iOS dylib compatibility_version and current_version
if 'darwin_versions' in kwargs:
self.darwin_versions = self._validate_darwin_versions(kwargs['darwin_versions'])
elif self.soversion:
# If unspecified, pick the soversion
self.darwin_versions = 2 * [self.soversion]
# Visual Studio module-definitions file
if 'vs_module_defs' in kwargs:
path = kwargs['vs_module_defs']
if hasattr(path, 'held_object'):
path = path.held_object
if isinstance(path, str):
if os.path.isabs(path):
self.vs_module_defs = File.from_absolute_file(path)
else:
self.vs_module_defs = File.from_source_file(environment.source_dir, self.subdir, path)
self.link_depends.append(self.vs_module_defs)
elif isinstance(path, File):
# When passing a generated file.
self.vs_module_defs = path
self.link_depends.append(path)
elif hasattr(path, 'get_filename'):
# When passing output of a Custom Target
path = File.from_built_file(path.subdir, path.get_filename())
self.vs_module_defs = path
self.link_depends.append(path)
else:
raise InvalidArguments(
'Shared library vs_module_defs must be either a string, '
'a file object or a Custom Target')
if 'rust_crate_type' in kwargs:
rust_crate_type = kwargs['rust_crate_type']
if isinstance(rust_crate_type, str):
self.rust_crate_type = rust_crate_type
else:
raise InvalidArguments('Invalid rust_crate_type "{0}": must be a string.'.format(rust_crate_type))
def get_import_filename(self):
"""
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
"""
return self.import_filename
def get_import_filenameslist(self):
if self.import_filename:
return [self.vs_import_filename, self.gcc_import_filename]
return []
def get_all_link_deps(self):
return [self] + self.get_transitive_link_deps()
def get_aliases(self):
"""
If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib
"""
aliases = {}
# Aliases are only useful with .so and .dylib libraries. Also if
# there's no self.soversion (no versioning), we don't need aliases.
if self.suffix not in ('so', 'dylib') or not self.soversion:
return {}
# With .so libraries, the minor and micro versions are also in the
# filename. If ltversion != soversion we create an soversion alias:
# libfoo.so.0 -> libfoo.so.0.100.0
# Where libfoo.so.0.100.0 is the actual library
if self.suffix == 'so' and self.ltversion and self.ltversion != self.soversion:
alias_tpl = self.filename_tpl.replace('ltversion', 'soversion')
ltversion_filename = alias_tpl.format(self)
aliases[ltversion_filename] = self.filename
# libfoo.so.0/libfoo.0.dylib is the actual library
else:
ltversion_filename = self.filename
# Unversioned alias:
# libfoo.so -> libfoo.so.0
# libfoo.dylib -> libfoo.0.dylib
aliases[self.basic_filename_tpl.format(self)] = ltversion_filename
return aliases
def type_suffix(self):
return "@sha"
def is_linkable_target(self):
return True
# A shared library that is meant to be used with dlopen rather than linking
# into something else.
class SharedModule(SharedLibrary):
known_kwargs = known_shmod_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
if 'version' in kwargs:
raise MesonException('Shared modules must not specify the version kwarg.')
if 'soversion' in kwargs:
raise MesonException('Shared modules must not specify the soversion kwarg.')
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
self.typename = 'shared module'
def get_default_install_dir(self, environment):
return environment.get_shared_module_dir()
class CustomTarget(Target):
known_kwargs = set([
'input',
'output',
'command',
'capture',
'install',
'install_dir',
'install_mode',
'build_always',
'build_always_stale',
'depends',
'depend_files',
'depfile',
'build_by_default',
'override_options',
'console',
])
def __init__(self, name, subdir, subproject, kwargs, absolute_paths=False):
self.typename = 'custom'
super().__init__(name, subdir, subproject, False)
self.dependencies = []
self.extra_depends = []
self.depend_files = [] # Files that this target depends on but are not on the command line.
self.depfile = None
self.process_kwargs(kwargs)
self.extra_files = []
# Whether to use absolute paths for all files on the commandline
self.absolute_paths = absolute_paths
unknowns = []
for k in kwargs:
if k not in CustomTarget.known_kwargs:
unknowns.append(k)
if len(unknowns) > 0:
mlog.warning('Unknown keyword arguments in target %s: %s' %
(self.name, ', '.join(unknowns)))
def get_default_install_dir(self, environment):
return None
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_id(self):
return self.name + self.type_suffix()
def get_target_dependencies(self):
deps = self.dependencies[:]
deps += self.extra_depends
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, (BuildTarget, CustomTarget)):
deps.append(c)
return deps
def get_transitive_build_target_deps(self):
'''
Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project.
'''
bdeps = set()
deps = self.get_target_dependencies()
for d in deps:
if isinstance(d, BuildTarget):
bdeps.add(d)
elif isinstance(d, CustomTarget):
bdeps.update(d.get_transitive_build_target_deps())
return bdeps
def flatten_command(self, cmd):
cmd = listify(cmd, unholder=True)
final_cmd = []
for c in cmd:
if isinstance(c, str):
final_cmd.append(c)
elif isinstance(c, File):
self.depend_files.append(c)
final_cmd.append(c)
elif isinstance(c, dependencies.ExternalProgram):
if not c.found():
m = 'Tried to use not-found external program {!r} in "command"'
raise InvalidArguments(m.format(c.name))
path = c.get_path()
if os.path.isabs(path):
# Can only add a dependency on an external program which we
# know the absolute path of
self.depend_files.append(File.from_absolute_file(path))
final_cmd += c.get_command()
elif isinstance(c, (BuildTarget, CustomTarget)):
self.dependencies.append(c)
final_cmd.append(c)
elif isinstance(c, list):
final_cmd += self.flatten_command(c)
else:
raise InvalidArguments('Argument {!r} in "command" is invalid'.format(c))
return final_cmd
def process_kwargs(self, kwargs):
super().process_kwargs(kwargs)
self.sources = extract_as_list(kwargs, 'input', unholder=True)
if 'output' not in kwargs:
raise InvalidArguments('Missing keyword argument "output".')
self.outputs = listify(kwargs['output'])
# This will substitute values from the input into output and return it.
inputs = get_sources_string_names(self.sources)
values = get_filenames_templates_dict(inputs, [])
for i in self.outputs:
if not(isinstance(i, str)):
raise InvalidArguments('Output argument not a string.')
if i == '':
raise InvalidArguments('Output must not be empty.')
if i.strip() == '':
raise InvalidArguments('Output must not consist only of whitespace.')
if has_path_sep(i):
raise InvalidArguments('Output {!r} must not contain a path segment.'.format(i))
if '@INPUT@' in i or '@INPUT0@' in i:
m = 'Output cannot contain @INPUT@ or @INPUT0@, did you ' \
'mean @PLAINNAME@ or @BASENAME@?'
raise InvalidArguments(m)
# We already check this during substitution, but the error message
# will be unclear/confusing, so check it here.
if len(inputs) != 1 and ('@PLAINNAME@' in i or '@BASENAME@' in i):
m = "Output cannot contain @PLAINNAME@ or @BASENAME@ when " \
"there is more than one input (we can't know which to use)"
raise InvalidArguments(m)
self.outputs = substitute_values(self.outputs, values)
self.capture = kwargs.get('capture', False)
if self.capture and len(self.outputs) != 1:
raise InvalidArguments('Capturing can only output to a single file.')
self.console = kwargs.get('console', False)
if not isinstance(self.console, bool):
raise InvalidArguments('"console" kwarg only accepts booleans')
if self.capture and self.console:
raise InvalidArguments("Can't both capture output and output to console")
if 'command' not in kwargs:
raise InvalidArguments('Missing keyword argument "command".')
if 'depfile' in kwargs:
depfile = kwargs['depfile']
if not isinstance(depfile, str):
raise InvalidArguments('Depfile must be a string.')
if os.path.basename(depfile) != depfile:
raise InvalidArguments('Depfile must be a plain filename without a subdirectory.')
self.depfile = depfile
self.command = self.flatten_command(kwargs['command'])
if self.capture:
for c in self.command:
if isinstance(c, str) and '@OUTPUT@' in c:
raise InvalidArguments('@OUTPUT@ is not allowed when capturing output.')
if 'install' in kwargs:
self.install = kwargs['install']
if not isinstance(self.install, bool):
raise InvalidArguments('"install" must be boolean.')
if self.install:
if 'install_dir' not in kwargs:
raise InvalidArguments('"install_dir" must be specified '
'when installing a target')
if isinstance(kwargs['install_dir'], list):
FeatureNew('multiple install_dir for custom_target', '0.40.0').use(self.subproject)
# If an item in this list is False, the output corresponding to
# the list index of that item will not be installed
self.install_dir = typeslistify(kwargs['install_dir'], (str, bool))
self.install_mode = kwargs.get('install_mode', None)
else:
self.install = False
self.install_dir = [None]
self.install_mode = None
if 'build_always' in kwargs and 'build_always_stale' in kwargs:
raise InvalidArguments('build_always and build_always_stale are mutually exclusive. Combine build_by_default and build_always_stale.')
elif 'build_always' in kwargs:
mlog.deprecation('build_always is deprecated. Combine build_by_default and build_always_stale instead.')
if 'build_by_default' not in kwargs:
self.build_by_default = kwargs['build_always']
self.build_always_stale = kwargs['build_always']
elif 'build_always_stale' in kwargs:
self.build_always_stale = kwargs['build_always_stale']
if not isinstance(self.build_always_stale, bool):
raise InvalidArguments('Argument build_always_stale must be a boolean.')
extra_deps, depend_files = extract_as_list(kwargs, 'depends', 'depend_files', pop = False)
for ed in extra_deps:
while hasattr(ed, 'held_object'):
ed = ed.held_object
if not isinstance(ed, (CustomTarget, BuildTarget)):
raise InvalidArguments('Can only depend on toplevel targets: custom_target or build_target (executable or a library) got: %s(%s)'
% (type(ed), ed))
self.extra_depends.append(ed)
for i in depend_files:
if isinstance(i, (File, str)):
self.depend_files.append(i)
else:
mlog.debug(i)
raise InvalidArguments('Unknown type {!r} in depend_files.'.format(type(i).__name__))
def get_dependencies(self):
return self.dependencies
def should_install(self):
return self.install
def get_custom_install_dir(self):
return self.install_dir
def get_custom_install_mode(self):
return self.install_mode
def get_outputs(self):
return self.outputs
def get_filename(self):
return self.outputs[0]
def get_sources(self):
return self.sources
def get_generated_lists(self):
genlists = []
for c in self.sources:
if hasattr(c, 'held_object'):
c = c.held_object
if isinstance(c, GeneratedList):
genlists.append(c)
return genlists
def get_generated_sources(self):
return self.get_generated_lists()
def get_dep_outname(self, infilenames):
if self.depfile is None:
raise InvalidArguments('Tried to get depfile name for custom_target that does not have depfile defined.')
if len(infilenames):
plainname = os.path.basename(infilenames[0])
basename = os.path.splitext(plainname)[0]
return self.depfile.replace('@BASENAME@', basename).replace('@PLAINNAME@', plainname)
else:
if '@BASENAME@' in self.depfile or '@PLAINNAME@' in self.depfile:
raise InvalidArguments('Substitution in depfile for custom_target that does not have an input file.')
return self.depfile
def type_suffix(self):
return "@cus"
def __getitem__(self, index):
return CustomTargetIndex(self, self.outputs[index])
def __setitem__(self, index, value):
raise NotImplementedError
def __delitem__(self, index):
raise NotImplementedError
class RunTarget(Target):
def __init__(self, name, command, args, dependencies, subdir, subproject):
self.typename = 'run'
super().__init__(name, subdir, subproject, False)
self.command = command
self.args = args
self.dependencies = dependencies
def __lt__(self, other):
return self.get_id() < other.get_id()
def __repr__(self):
repr_str = "<{0} {1}: {2}>"
return repr_str.format(self.__class__.__name__, self.get_id(), self.command)
def get_dependencies(self):
return self.dependencies
def get_generated_sources(self):
return []
def get_sources(self):
return []
def should_install(self):
return False
def get_filename(self):
return self.name
def get_outputs(self):
if isinstance(self.name, str):
return [self.name]
elif isinstance(self.name, list):
return self.name
else:
raise RuntimeError('RunTarget: self.name is neither a list nor a string. This is a bug')
def type_suffix(self):
return "@run"
class Jar(BuildTarget):
known_kwargs = known_jar_kwargs
def __init__(self, name, subdir, subproject, is_cross, sources, objects, environment, kwargs):
self.typename = 'jar'
super().__init__(name, subdir, subproject, is_cross, sources, objects, environment, kwargs)
for s in self.sources:
if not s.endswith('.java'):
raise InvalidArguments('Jar source %s is not a java file.' % s)
for t in self.link_targets:
if not isinstance(t, Jar):
raise InvalidArguments('Link target %s is not a jar target.' % t)
self.filename = self.name + '.jar'
self.outputs = [self.filename]
self.java_args = kwargs.get('java_args', [])
def get_main_class(self):
return self.main_class
def type_suffix(self):
return "@jar"
def get_java_args(self):
return self.java_args
def validate_cross_install(self, environment):
# All jar targets are installable.
pass
def is_linkable_target(self):
return True
def get_classpath_args(self):
cp_paths = [os.path.join(l.get_subdir(), l.get_filename()) for l in self.link_targets]
cp_string = os.pathsep.join(cp_paths)
if cp_string:
return ['-cp', os.pathsep.join(cp_paths)]
return []
class CustomTargetIndex:
"""A special opaque object returned by indexing a CustomTarget. This object
exists in meson, but acts as a proxy in the backends, making targets depend
on the CustomTarget it's derived from, but only adding one source file to
the sources.
"""
def __init__(self, target, output):
self.typename = 'custom'
self.target = target
self.output = output
def __repr__(self):
return '<CustomTargetIndex: {!r}[{}]>'.format(
self.target, self.target.get_outputs().index(self.output))
def get_outputs(self):
return [self.output]
def get_subdir(self):
return self.target.get_subdir()
class ConfigureFile:
def __init__(self, subdir, sourcename, targetname, configuration_data):
self.subdir = subdir
self.sourcename = sourcename
self.targetname = targetname
self.configuration_data = configuration_data
def __repr__(self):
repr_str = "<{0}: {1} -> {2}>"
src = os.path.join(self.subdir, self.sourcename)
dst = os.path.join(self.subdir, self.targetname)
return repr_str.format(self.__class__.__name__, src, dst)
def get_configuration_data(self):
return self.configuration_data
def get_subdir(self):
return self.subdir
def get_source_name(self):
return self.sourcename
def get_target_name(self):
return self.targetname
class ConfigurationData:
def __init__(self):
super().__init__()
self.values = {}
def __repr__(self):
return repr(self.values)
def __contains__(self, value):
return value in self.values
def get(self, name):
return self.values[name] # (val, desc)
def keys(self):
return self.values.keys()
# A bit poorly named, but this represents plain data files to copy
# during install.
class Data:
def __init__(self, sources, install_dir, install_mode=None, rename=None):
self.sources = sources
self.install_dir = install_dir
self.install_mode = install_mode
self.sources = listify(self.sources)
for s in self.sources:
assert(isinstance(s, File))
if rename is None:
self.rename = [os.path.basename(f.fname) for f in self.sources]
else:
self.rename = stringlistify(rename)
if len(self.rename) != len(self.sources):
raise MesonException('Size of rename argument is different from number of sources')
class RunScript(dict):
def __init__(self, script, args):
super().__init__()
assert(isinstance(script, list))
assert(isinstance(args, list))
self['exe'] = script
self['args'] = args
class TestSetup:
def __init__(self, *, exe_wrapper=None, gdb=None, timeout_multiplier=None, env=None):
self.exe_wrapper = exe_wrapper
self.gdb = gdb
self.timeout_multiplier = timeout_multiplier
self.env = env
def get_sources_string_names(sources):
'''
For the specified list of @sources which can be strings, Files, or targets,
get all the output basenames.
'''
names = []
for s in sources:
if hasattr(s, 'held_object'):
s = s.held_object
if isinstance(s, str):
names.append(s)
elif isinstance(s, (BuildTarget, CustomTarget, CustomTargetIndex, GeneratedList)):
names += s.get_outputs()
elif isinstance(s, File):
names.append(s.fname)
else:
raise AssertionError('Unknown source type: {!r}'.format(s))
return names
def load(build_dir):
filename = os.path.join(build_dir, 'meson-private', 'build.dat')
load_fail_msg = 'Build data file {!r} is corrupted. Try with a fresh build tree.'.format(filename)
nonexisting_fail_msg = 'No such build data file as "{!r}".'.format(filename)
try:
with open(filename, 'rb') as f:
obj = pickle.load(f)
except FileNotFoundError:
raise MesonException(nonexisting_fail_msg)
except pickle.UnpicklingError:
raise MesonException(load_fail_msg)
if not isinstance(obj, Build):
raise MesonException(load_fail_msg)
return obj
def save(obj, filename):
with open(filename, 'wb') as f:
pickle.dump(obj, f)
| mesonbuild/build.py | 99,028 | A class that holds the status of one build including
all dependencies and so on.
A special opaque object returned by indexing a CustomTarget. This object
exists in meson, but acts as a proxy in the backends, making targets depend
on the CustomTarget it's derived from, but only adding one source file to
the sources.
Holds a list of sources for which the objects must be extracted
Warn if shared modules are linked with target: (link_with) #2865
Construct target ID from subdir, name and type suffix.
This helper function is made public mostly for tests.
Human friendly description of the executable
See https://github.com/mesonbuild/meson/pull/417 for details.
First we determine the filename template (self.filename_tpl), then we
set the output filename (self.filename).
The template is needed while creating aliases (self.get_aliases),
which are needed while generating .so shared libraries for Linux.
Besides this, there's also the import library name, which is only used
on Windows since on that platform the linker uses a separate library
called the "import library" during linking instead of the shared
library (DLL). The toolchain will output an import library in one of
two formats: GCC or Visual Studio.
When we're building with Visual Studio, the import library that will be
generated by the toolchain is self.vs_import_filename, and with
MinGW/GCC, it's self.gcc_import_filename. self.import_filename will
always contain the import library name this target will generate.
If the versioned library name is libfoo.so.0.100.0, aliases are:
* libfoo.so.0 (soversion) -> libfoo.so.0.100.0
* libfoo.so (unversioned; for linking) -> libfoo.so.0
Same for dylib:
* libfoo.dylib (unversioned; for linking) -> libfoo.0.dylib
We use the order of languages in `clink_langs` to determine which
linker to use in case the target has sources compiled with multiple
compilers. All languages other than those in this list have their own
linker.
Note that Vala outputs C code, so Vala sources can use any linker
that can link compiled C. We don't actually need to add an exception
for Vala here because of that.
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
The name of the import library that will be outputted by the compiler
Returns None if there is no import library required for this platform
Sometimes you want to link to a C++ library that exports C API, which
means the linker must link in the C++ stdlib, and we must use a C++
compiler for linking. The same is also applicable for objc/objc++, etc,
so we can keep using clink_langs for the priority order.
See: https://github.com/mesonbuild/meson/issues/1653
For the specified list of @sources which can be strings, Files, or targets,
get all the output basenames.
Recursively fetch the build targets that this custom target depends on,
whether through `command:`, `depends:`, or `sources:` The recursion is
only performed on custom targets.
This is useful for setting PATH on Windows for finding required DLLs.
F.ex, if you have a python script that loads a C module that links to
other DLLs in your project.
Check if the dynamic linker is MSVC. Used by Executable, StaticLibrary,
and SharedLibrary for deciding when to use MSVC-specific file naming
and debug filenames.
If at least some code is built with MSVC and the final library is
linked with MSVC, we can be sure that some debug info will be
generated. We only check the dynamic linker here because the static
linker is guaranteed to be of the same type.
Interesting cases:
1. The Vala compiler outputs C code to be compiled by whatever
C compiler we're using, so all objects will still be created by the
MSVC compiler.
2. If the target contains only objects, process_compilers guesses and
picks the first compiler that smells right.
Populate self.compilers, which is the list of compilers that this
target will use for compiling all its sources.
We also add compilers that were used by extracted objects to simplify
dynamic linker determination.
Processes additional compilers after kwargs have been evaluated.
This can add extra compilers that might be required by keyword
arguments, such as link_with or dependencies. It will also try to guess
which compiler to use if one hasn't been selected already.
Process the link_depends keyword argument.
This is designed to handle strings, Files, and the output of Custom
Targets. Notably it doesn't handle generator() returned objects, since
adding them as a link depends would inherently cause them to be
generated twice, since the output needs to be passed to the ld_args and
link_depends.
Copyright 2012-2017 The Meson development team Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Coredata holds the state. This is just here for convenience. The list of all programs that have been searched for. These alias coredata's fields of the same name, and must not become copies. Interpreter has validated that all given directories actually exist. Merge sources and generated sources We cannot know the path where this source will be generated, but all we need here is the file extension to determine the compiler. Filter out headers and all non-source files Figure out if the extracted object list is compatible with a Unity build. When we're doing a Unified build, we go through the sources, and create a single source file from each subset of the sources that can be compiled with a specific compiler. Then we create one object from each unified source file. So for each compiler we can either extra all its sources or none. Fix failing test 53 when this becomes an error. Find the installation directory. Either the value is set to a non-default value, or is set to False (which means we want this specific output out of many outputs to not be installed). We don't really need cryptographic security here. Small-digest hash function with unlikely collision is good enough. This ID should be case-insensitive and should work in Visual Studio, e.g. it should not start with leading '-'. This ID must also be a valid file name on all OSs. It should also avoid shell metacharacters for obvious reasons. '@' is not used as often as '_' in source code names. In case of collisions consider using checksums. FIXME replace with assert when slash in names is prohibited preserve myid for better debuggability For backward compatibility, if build_by_default is not explicitly set, use the value of 'install' if it's enabled. The list of all files outputted by this target. Useful in cases such as Vala which generates .vapi and .h besides the compiled output. Sources can be: 1. Pre-existing source files in the source tree 2. Pre-existing sources generated by configure_file in the build tree 3. Sources files generated by another target or a Generator Objects can be: 1. Pre-existing objects provided by the user with the `objects:` kwarg 2. Compiled objects created by and extracted from another target Override this method in derived classes that have more keywords. If the same source is defined multiple times, use it only once. Holder unpacking. Ugly. Populate list of compilers If this library is linked against another library we need to consider the languages of those libraries as well. No source files or parent targets, target consists of only object files of unknown origin. Just add the first clink compiler that we have and hope that it can link these objects Populate list of compilers Pre-existing sources All generated sources Generated objects can't be compiled, so don't use them for compiler detection. If our target only has generated objects, we will fall back to using the first c-like compiler we find, which is what we need. Sources that were used to create our extracted objects Don't add Vala sources since that will pull in the Vala compiler even though we will never use it since we are dealing with compiled C code. For each source, try to add one compiler that can compile it. It's ok if no compilers can do so, because users are expected to be able to add arbitrary non-source files to the sources list. Re-sort according to clink_langs If all our sources are Vala, our target also needs the C compiler but it won't get added above. CSharp and Java targets can't contain any other file types This sucks quite badly. Arguments are holders but they can't be pickled so unpack those known. FIXME: It could be a generated sourcewe are merging two dictionaries, while keeping the earlier one dominant Sorry for this hack. Keyword targets are kept in holders in kwargs. Unpack here without looking at the exact type. Target-specific include dirs must be added BEFORE include dirs from internal deps (added inside self.add_deps()) to override them. Add dependencies (which also have include_directories) If an item in this list is False, the output corresponding to the list index of that item will not be installed You can't disable PIC on OS X. The compiler ignores -fno-PIC. PIC is always on for Windows (all code is position-independent since library loading is done differently) Executables must be PIE on Android Check if we have -fPIC, -fpic, -fPIE, or -fpie in cflags We don't want the 'internal' libraries when generating the `Libs:` and `Libs.private:` lists in pkg-config files. Those parts that are internal. Those parts that are external. Deps of deps. This is a bit of a hack. We do not want Build to know anything about the interpreter so we can't import it and use isinstance. This should be reliable enough. FIXME same hack, forcibly unpack from holder. Check if any of the external libraries were written in this language Check if any of the internal libraries this target links to were written in this language Populate list of all compilers, not just those being used to compile sources in this target Languages used by dependencies Pick a compiler based on the language priority-order Mixing many languages with MSVC is not supported yet so ignore stdlibs. For subdirs we can only go "down". Unless overridden, executables have no suffix or prefix. Except on Windows and with C/Mono executables where the suffix is 'exe' Executable for Windows or C/Mono The import library this target will generate The import library that Visual Studio would generate (and accept) The import library that GCC would generate (and prefer) Check for export_dynamic If using export_dynamic, set the import library name Only linkwithable if using export_dynamic If no crate type is specified, or it's the generic lib type, use rlib Don't let configuration proceed with a non-static crate type By default a static library is named libfoo.a even on Windows because MSVC does not have a consistent convention for what static libraries are called. The MSVC CRT uses libfoo.lib syntax but nothing else uses it and GCC only looks for static libraries called foo.lib and libfoo.a. However, we cannot use foo.lib because that's the same as the import library. Using libfoo.a is ok because people using MSVC always pass the library filename while linking anyway. default Rust static library suffix Max length 2, first element is compatibility_version, second is current_version The import library this target will generate The import library that Visual Studio would generate (and accept) The import library that GCC would generate (and prefer) If no crate type is specified, or it's the generic lib type, use dylib Don't let configuration proceed with a non-dynamic crate type NOTE: manual prefix/suffix override is currently only tested for C/C++ C and Mono C, C++, Swift, Vala Only Windows uses a separate import library for linking For all other targets/platforms import_filename stays None Shared library is of the form foo.dll Import library is called foo.lib Assume GCC-compatible naming Shared library is of the form libfoo.dll Import library is called libfoo.dll.a Shared library has the soversion if it is defined Shared library is of the form cygfoo.dll (ld --dll-search-prefix=cyg is the default) Import library is called libfoo.dll.a On macOS, the filename can only contain the major version libfoo.X.dylib libfoo.dylib Android doesn't support shared_library versioning libfoo.so.X[.Y[.Z]] (.Y and .Z are optional) libfoo.so.X No versioning, libfoo.so Shared library version Try to extract/deduce the soversion library version is defined, get the soversion from that We replicate what Autotools does here and take the first number of the version by default. macOS and iOS dylib compatibility_version and current_version If unspecified, pick the soversion Visual Studio module-definitions file When passing a generated file. When passing output of a Custom Target Aliases are only useful with .so and .dylib libraries. Also if there's no self.soversion (no versioning), we don't need aliases. With .so libraries, the minor and micro versions are also in the filename. If ltversion != soversion we create an soversion alias: libfoo.so.0 -> libfoo.so.0.100.0 Where libfoo.so.0.100.0 is the actual library libfoo.so.0/libfoo.0.dylib is the actual library Unversioned alias: libfoo.so -> libfoo.so.0 libfoo.dylib -> libfoo.0.dylib A shared library that is meant to be used with dlopen rather than linking into something else. Files that this target depends on but are not on the command line. Whether to use absolute paths for all files on the commandline Can only add a dependency on an external program which we know the absolute path of This will substitute values from the input into output and return it. We already check this during substitution, but the error message will be unclear/confusing, so check it here. If an item in this list is False, the output corresponding to the list index of that item will not be installed All jar targets are installable. (val, desc) A bit poorly named, but this represents plain data files to copy during install. | 14,324 | en | 0.896549 |
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import array
import asyncio
import collections.abc
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
ForwardRef,
Generic,
Iterable,
Iterator,
List,
Literal,
Mapping,
Optional,
Protocol,
Sequence,
Tuple,
Type,
TypeVar,
Union,
overload,
TYPE_CHECKING,
)
import unicodedata
from base64 import b64encode
from bisect import bisect_left
import datetime
import functools
from inspect import isawaitable as _isawaitable, signature as _signature
from operator import attrgetter
import json
import re
import sys
import types
import warnings
from .errors import InvalidArgument
try:
import orjson
except ModuleNotFoundError:
HAS_ORJSON = False
else:
HAS_ORJSON = True
__all__ = (
"oauth_url",
"snowflake_time",
"time_snowflake",
"find",
"get",
"sleep_until",
"utcnow",
"remove_markdown",
"escape_markdown",
"escape_mentions",
"as_chunks",
"format_dt",
)
DISCORD_EPOCH = 1420070400000
class _MissingSentinel:
def __eq__(self, other):
return False
def __bool__(self):
return False
def __repr__(self):
return "..."
MISSING: Any = _MissingSentinel()
class _cached_property:
def __init__(self, function):
self.function = function
self.__doc__ = getattr(function, "__doc__")
def __get__(self, instance, owner):
if instance is None:
return self
value = self.function(instance)
setattr(instance, self.function.__name__, value)
return value
if TYPE_CHECKING:
from functools import cached_property as cached_property
from typing_extensions import ParamSpec
from .permissions import Permissions
from .abc import Snowflake
from .invite import Invite
from .template import Template
class _RequestLike(Protocol):
headers: Mapping[str, Any]
P = ParamSpec("P")
else:
cached_property = _cached_property
T = TypeVar("T")
T_co = TypeVar("T_co", covariant=True)
_Iter = Union[Iterator[T], AsyncIterator[T]]
class CachedSlotProperty(Generic[T, T_co]):
def __init__(self, name: str, function: Callable[[T], T_co]) -> None:
self.name = name
self.function = function
self.__doc__ = getattr(function, "__doc__")
@overload
def __get__(self, instance: None, owner: Type[T]) -> CachedSlotProperty[T, T_co]:
...
@overload
def __get__(self, instance: T, owner: Type[T]) -> T_co:
...
def __get__(self, instance: Optional[T], owner: Type[T]) -> Any:
if instance is None:
return self
try:
return getattr(instance, self.name)
except AttributeError:
value = self.function(instance)
setattr(instance, self.name, value)
return value
class classproperty(Generic[T_co]):
def __init__(self, fget: Callable[[Any], T_co]) -> None:
self.fget = fget
def __get__(self, instance: Optional[Any], owner: Type[Any]) -> T_co:
return self.fget(owner)
def __set__(self, instance, value) -> None:
raise AttributeError("cannot set attribute")
def cached_slot_property(name: str) -> Callable[[Callable[[T], T_co]], CachedSlotProperty[T, T_co]]:
def decorator(func: Callable[[T], T_co]) -> CachedSlotProperty[T, T_co]:
return CachedSlotProperty(name, func)
return decorator
class SequenceProxy(Generic[T_co], collections.abc.Sequence):
"""Read-only proxy of a Sequence."""
def __init__(self, proxied: Sequence[T_co]):
self.__proxied = proxied
def __getitem__(self, idx: int) -> T_co:
return self.__proxied[idx]
def __len__(self) -> int:
return len(self.__proxied)
def __contains__(self, item: Any) -> bool:
return item in self.__proxied
def __iter__(self) -> Iterator[T_co]:
return iter(self.__proxied)
def __reversed__(self) -> Iterator[T_co]:
return reversed(self.__proxied)
def index(self, value: Any, *args, **kwargs) -> int:
return self.__proxied.index(value, *args, **kwargs)
def count(self, value: Any) -> int:
return self.__proxied.count(value)
@overload
def parse_time(timestamp: None) -> None:
...
@overload
def parse_time(timestamp: str) -> datetime.datetime:
...
@overload
def parse_time(timestamp: Optional[str]) -> Optional[datetime.datetime]:
...
def parse_time(timestamp: Optional[str]) -> Optional[datetime.datetime]:
if timestamp:
return datetime.datetime.fromisoformat(timestamp)
return None
def copy_doc(original: Callable) -> Callable[[T], T]:
def decorator(overriden: T) -> T:
overriden.__doc__ = original.__doc__
overriden.__signature__ = _signature(original) # type: ignore
return overriden
return decorator
def deprecated(instead: Optional[str] = None) -> Callable[[Callable[P, T]], Callable[P, T]]:
def actual_decorator(func: Callable[P, T]) -> Callable[P, T]:
@functools.wraps(func)
def decorated(*args: P.args, **kwargs: P.kwargs) -> T:
warnings.simplefilter("always", DeprecationWarning) # turn off filter
if instead:
fmt = "{0.__name__} is deprecated, use {1} instead."
else:
fmt = "{0.__name__} is deprecated."
warnings.warn(fmt.format(func, instead), stacklevel=3, category=DeprecationWarning)
warnings.simplefilter("default", DeprecationWarning) # reset filter
return func(*args, **kwargs)
return decorated
return actual_decorator
def oauth_url(
client_id: Union[int, str],
*,
permissions: Permissions = MISSING,
guild: Snowflake = MISSING,
redirect_uri: str = MISSING,
scopes: Iterable[str] = MISSING,
disable_guild_select: bool = False,
) -> str:
"""A helper function that returns the OAuth2 URL for inviting the bot
into guilds.
Parameters
-----------
client_id: Union[:class:`int`, :class:`str`]
The client ID for your bot.
permissions: :class:`~discord.Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
guild: :class:`~discord.abc.Snowflake`
The guild to pre-select in the authorization screen, if available.
redirect_uri: :class:`str`
An optional valid redirect URI.
scopes: Iterable[:class:`str`]
An optional valid list of scopes. Defaults to ``('bot',)``.
.. versionadded:: 1.7
disable_guild_select: :class:`bool`
Whether to disallow the user from changing the guild dropdown.
.. versionadded:: 2.0
Returns
--------
:class:`str`
The OAuth2 URL for inviting the bot into guilds.
"""
url = f"https://discord.com/oauth2/authorize?client_id={client_id}"
url += "&scope=" + "+".join(scopes or ("bot",))
if permissions is not MISSING:
url += f"&permissions={permissions.value}"
if guild is not MISSING:
url += f"&guild_id={guild.id}"
if redirect_uri is not MISSING:
from urllib.parse import urlencode
url += "&response_type=code&" + urlencode({"redirect_uri": redirect_uri})
if disable_guild_select:
url += "&disable_guild_select=true"
return url
def snowflake_time(id: int) -> datetime.datetime:
"""
Parameters
-----------
id: :class:`int`
The snowflake ID.
Returns
--------
:class:`datetime.datetime`
An aware datetime in UTC representing the creation time of the snowflake.
"""
timestamp = ((id >> 22) + DISCORD_EPOCH) / 1000
return datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc)
def time_snowflake(dt: datetime.datetime, high: bool = False) -> int:
"""Returns a numeric snowflake pretending to be created at the given date.
When using as the lower end of a range, use ``time_snowflake(high=False) - 1``
to be inclusive, ``high=True`` to be exclusive.
When using as the higher end of a range, use ``time_snowflake(high=True) + 1``
to be inclusive, ``high=False`` to be exclusive
Parameters
-----------
dt: :class:`datetime.datetime`
A datetime object to convert to a snowflake.
If naive, the timezone is assumed to be local time.
high: :class:`bool`
Whether or not to set the lower 22 bit to high or low.
Returns
--------
:class:`int`
The snowflake representing the time given.
"""
discord_millis = int(dt.timestamp() * 1000 - DISCORD_EPOCH)
return (discord_millis << 22) + (2 ** 22 - 1 if high else 0)
def find(predicate: Callable[[T], Any], seq: Iterable[T]) -> Optional[T]:
"""A helper to return the first element found in the sequence
that meets the predicate. For example: ::
member = discord.utils.find(lambda m: m.name == 'Mighty', channel.guild.members)
would find the first :class:`~discord.Member` whose name is 'Mighty' and return it.
If an entry is not found, then ``None`` is returned.
This is different from :func:`py:filter` due to the fact it stops the moment it finds
a valid entry.
Parameters
-----------
predicate
A function that returns a boolean-like result.
seq: :class:`collections.abc.Iterable`
The iterable to search through.
"""
for element in seq:
if predicate(element):
return element
return None
def get(iterable: Iterable[T], **attrs: Any) -> Optional[T]:
r"""A helper that returns the first element in the iterable that meets
all the traits passed in ``attrs``. This is an alternative for
:func:`~discord.utils.find`.
When multiple attributes are specified, they are checked using
logical AND, not logical OR. Meaning they have to meet every
attribute passed in and not one of them.
To have a nested attribute search (i.e. search by ``x.y``) then
pass in ``x__y`` as the keyword argument.
If nothing is found that matches the attributes passed, then
``None`` is returned.
Examples
---------
Basic usage:
.. code-block:: python3
member = discord.utils.get(message.guild.members, name='Foo')
Multiple attribute matching:
.. code-block:: python3
channel = discord.utils.get(guild.voice_channels, name='Foo', bitrate=64000)
Nested attribute matching:
.. code-block:: python3
channel = discord.utils.get(client.get_all_channels(), guild__name='Cool', name='general')
Parameters
-----------
iterable
An iterable to search through.
\*\*attrs
Keyword arguments that denote attributes to search with.
"""
# global -> local
_all = all
attrget = attrgetter
# Special case the single element call
if len(attrs) == 1:
k, v = attrs.popitem()
pred = attrget(k.replace("__", "."))
for elem in iterable:
if pred(elem) == v:
return elem
return None
converted = [(attrget(attr.replace("__", ".")), value) for attr, value in attrs.items()]
for elem in iterable:
if _all(pred(elem) == value for pred, value in converted):
return elem
return None
def _unique(iterable: Iterable[T]) -> List[T]:
return [x for x in dict.fromkeys(iterable)]
def _get_as_snowflake(data: Any, key: str) -> Optional[int]:
try:
value = data[key]
except KeyError:
return None
else:
return value and int(value)
def _get_mime_type_for_image(data: bytes):
if data.startswith(b"\x89\x50\x4E\x47\x0D\x0A\x1A\x0A"):
return "image/png"
elif data[0:3] == b"\xff\xd8\xff" or data[6:10] in (b"JFIF", b"Exif"):
return "image/jpeg"
elif data.startswith((b"\x47\x49\x46\x38\x37\x61", b"\x47\x49\x46\x38\x39\x61")):
return "image/gif"
elif data.startswith(b"RIFF") and data[8:12] == b"WEBP":
return "image/webp"
else:
raise InvalidArgument("Unsupported image type given")
def _bytes_to_base64_data(data: bytes) -> str:
fmt = "data:{mime};base64,{data}"
mime = _get_mime_type_for_image(data)
b64 = b64encode(data).decode("ascii")
return fmt.format(mime=mime, data=b64)
if HAS_ORJSON:
def _to_json(obj: Any) -> str: # type: ignore
return orjson.dumps(obj).decode("utf-8")
_from_json = orjson.loads # type: ignore
else:
def _to_json(obj: Any) -> str:
return json.dumps(obj, separators=(",", ":"), ensure_ascii=True)
_from_json = json.loads
def _parse_ratelimit_header(request: Any, *, use_clock: bool = False) -> float:
reset_after: Optional[str] = request.headers.get("X-Ratelimit-Reset-After")
if use_clock or not reset_after:
utc = datetime.timezone.utc
now = datetime.datetime.now(utc)
reset = datetime.datetime.fromtimestamp(float(request.headers["X-Ratelimit-Reset"]), utc)
return (reset - now).total_seconds()
else:
return float(reset_after)
async def maybe_coroutine(f, *args, **kwargs):
value = f(*args, **kwargs)
if _isawaitable(value):
return await value
else:
return value
async def async_all(gen, *, check=_isawaitable):
for elem in gen:
if check(elem):
elem = await elem
if not elem:
return False
return True
async def sane_wait_for(futures, *, timeout):
ensured = [asyncio.ensure_future(fut) for fut in futures]
done, pending = await asyncio.wait(ensured, timeout=timeout, return_when=asyncio.ALL_COMPLETED)
if len(pending) != 0:
raise asyncio.TimeoutError()
return done
def get_slots(cls: Type[Any]) -> Iterator[str]:
for mro in reversed(cls.__mro__):
try:
yield from mro.__slots__
except AttributeError:
continue
def compute_timedelta(dt: datetime.datetime):
if dt.tzinfo is None:
dt = dt.astimezone()
now = datetime.datetime.now(datetime.timezone.utc)
return max((dt - now).total_seconds(), 0)
async def sleep_until(when: datetime.datetime, result: Optional[T] = None) -> Optional[T]:
"""|coro|
Sleep until a specified time.
If the time supplied is in the past this function will yield instantly.
.. versionadded:: 1.3
Parameters
-----------
when: :class:`datetime.datetime`
The timestamp in which to sleep until. If the datetime is naive then
it is assumed to be local time.
result: Any
If provided is returned to the caller when the coroutine completes.
"""
delta = compute_timedelta(when)
return await asyncio.sleep(delta, result)
def utcnow() -> datetime.datetime:
"""A helper function to return an aware UTC datetime representing the current time.
This should be preferred to :meth:`datetime.datetime.utcnow` since it is an aware
datetime, compared to the naive datetime in the standard library.
.. versionadded:: 2.0
Returns
--------
:class:`datetime.datetime`
The current aware datetime in UTC.
"""
return datetime.datetime.now(datetime.timezone.utc)
def valid_icon_size(size: int) -> bool:
"""Icons must be power of 2 within [16, 4096]."""
return not size & (size - 1) and 4096 >= size >= 16
class SnowflakeList(array.array):
"""Internal data storage class to efficiently store a list of snowflakes.
This should have the following characteristics:
- Low memory usage
- O(n) iteration (obviously)
- O(n log n) initial creation if data is unsorted
- O(log n) search and indexing
- O(n) insertion
"""
__slots__ = ()
if TYPE_CHECKING:
def __init__(self, data: Iterable[int], *, is_sorted: bool = False):
...
def __new__(cls, data: Iterable[int], *, is_sorted: bool = False):
return array.array.__new__(cls, "Q", data if is_sorted else sorted(data)) # type: ignore
def add(self, element: int) -> None:
i = bisect_left(self, element)
self.insert(i, element)
def get(self, element: int) -> Optional[int]:
i = bisect_left(self, element)
return self[i] if i != len(self) and self[i] == element else None
def has(self, element: int) -> bool:
i = bisect_left(self, element)
return i != len(self) and self[i] == element
_IS_ASCII = re.compile(r"^[\x00-\x7f]+$")
def _string_width(string: str, *, _IS_ASCII=_IS_ASCII) -> int:
"""Returns string's width."""
match = _IS_ASCII.match(string)
if match:
return match.endpos
UNICODE_WIDE_CHAR_TYPE = "WFA"
func = unicodedata.east_asian_width
return sum(2 if func(char) in UNICODE_WIDE_CHAR_TYPE else 1 for char in string)
def resolve_invite(invite: Union[Invite, str]) -> str:
"""
Resolves an invite from a :class:`~discord.Invite`, URL or code.
Parameters
-----------
invite: Union[:class:`~discord.Invite`, :class:`str`]
The invite.
Returns
--------
:class:`str`
The invite code.
"""
from .invite import Invite # circular import
if isinstance(invite, Invite):
return invite.code
else:
rx = r"(?:https?\:\/\/)?discord(?:\.gg|(?:app)?\.com\/invite)\/(.+)"
m = re.match(rx, invite)
if m:
return m.group(1)
return invite
def resolve_template(code: Union[Template, str]) -> str:
"""
Resolves a template code from a :class:`~discord.Template`, URL or code.
.. versionadded:: 1.4
Parameters
-----------
code: Union[:class:`~discord.Template`, :class:`str`]
The code.
Returns
--------
:class:`str`
The template code.
"""
from .template import Template # circular import
if isinstance(code, Template):
return code.code
else:
rx = r"(?:https?\:\/\/)?discord(?:\.new|(?:app)?\.com\/template)\/(.+)"
m = re.match(rx, code)
if m:
return m.group(1)
return code
_MARKDOWN_ESCAPE_SUBREGEX = "|".join(r"\{0}(?=([\s\S]*((?<!\{0})\{0})))".format(c) for c in ("*", "`", "_", "~", "|"))
_MARKDOWN_ESCAPE_COMMON = r"^>(?:>>)?\s|\[.+\]\(.+\)"
_MARKDOWN_ESCAPE_REGEX = re.compile(
fr"(?P<markdown>{_MARKDOWN_ESCAPE_SUBREGEX}|{_MARKDOWN_ESCAPE_COMMON})", re.MULTILINE
)
_URL_REGEX = r"(?P<url><[^: >]+:\/[^ >]+>|(?:https?|steam):\/\/[^\s<]+[^<.,:;\"\'\]\s])"
_MARKDOWN_STOCK_REGEX = fr"(?P<markdown>[_\\~|\*`]|{_MARKDOWN_ESCAPE_COMMON})"
def remove_markdown(text: str, *, ignore_links: bool = True) -> str:
"""A helper function that removes markdown characters.
.. versionadded:: 1.7
.. note::
This function is not markdown aware and may remove meaning from the original text. For example,
if the input contains ``10 * 5`` then it will be converted into ``10 5``.
Parameters
-----------
text: :class:`str`
The text to remove markdown from.
ignore_links: :class:`bool`
Whether to leave links alone when removing markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone. Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters removed.
"""
def replacement(match):
groupdict = match.groupdict()
return groupdict.get("url", "")
regex = _MARKDOWN_STOCK_REGEX
if ignore_links:
regex = f"(?:{_URL_REGEX}|{regex})"
return re.sub(regex, replacement, text, 0, re.MULTILINE)
def escape_markdown(text: str, *, as_needed: bool = False, ignore_links: bool = True) -> str:
r"""A helper function that escapes Discord's markdown.
Parameters
-----------
text: :class:`str`
The text to escape markdown from.
as_needed: :class:`bool`
Whether to escape the markdown characters as needed. This
means that it does not escape extraneous characters if it's
not necessary, e.g. ``**hello**`` is escaped into ``\*\*hello**``
instead of ``\*\*hello\*\*``. Note however that this can open
you up to some clever syntax abuse. Defaults to ``False``.
ignore_links: :class:`bool`
Whether to leave links alone when escaping markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone. This option is not supported with ``as_needed``.
Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters escaped with a slash.
"""
if not as_needed:
def replacement(match):
groupdict = match.groupdict()
is_url = groupdict.get("url")
if is_url:
return is_url
return "\\" + groupdict["markdown"]
regex = _MARKDOWN_STOCK_REGEX
if ignore_links:
regex = f"(?:{_URL_REGEX}|{regex})"
return re.sub(regex, replacement, text, 0, re.MULTILINE)
else:
text = re.sub(r"\\", r"\\\\", text)
return _MARKDOWN_ESCAPE_REGEX.sub(r"\\\1", text)
def escape_mentions(text: str) -> str:
"""A helper function that escapes everyone, here, role, and user mentions.
.. note::
This does not include channel mentions.
.. note::
For more granular control over what mentions should be escaped
within messages, refer to the :class:`~discord.AllowedMentions`
class.
Parameters
-----------
text: :class:`str`
The text to escape mentions from.
Returns
--------
:class:`str`
The text with the mentions removed.
"""
return re.sub(r"@(everyone|here|[!&]?[0-9]{17,20})", "@\u200b\\1", text)
def _chunk(iterator: Iterator[T], max_size: int) -> Iterator[List[T]]:
ret = []
n = 0
for item in iterator:
ret.append(item)
n += 1
if n == max_size:
yield ret
ret = []
n = 0
if ret:
yield ret
async def _achunk(iterator: AsyncIterator[T], max_size: int) -> AsyncIterator[List[T]]:
ret = []
n = 0
async for item in iterator:
ret.append(item)
n += 1
if n == max_size:
yield ret
ret = []
n = 0
if ret:
yield ret
@overload
def as_chunks(iterator: Iterator[T], max_size: int) -> Iterator[List[T]]:
...
@overload
def as_chunks(iterator: AsyncIterator[T], max_size: int) -> AsyncIterator[List[T]]:
...
def as_chunks(iterator: _Iter[T], max_size: int) -> _Iter[List[T]]:
"""A helper function that collects an iterator into chunks of a given size.
.. versionadded:: 2.0
Parameters
----------
iterator: Union[:class:`collections.abc.Iterator`, :class:`collections.abc.AsyncIterator`]
The iterator to chunk, can be sync or async.
max_size: :class:`int`
The maximum chunk size.
.. warning::
The last chunk collected may not be as large as ``max_size``.
Returns
--------
Union[:class:`Iterator`, :class:`AsyncIterator`]
A new iterator which yields chunks of a given size.
"""
if max_size <= 0:
raise ValueError("Chunk sizes must be greater than 0.")
if isinstance(iterator, AsyncIterator):
return _achunk(iterator, max_size)
return _chunk(iterator, max_size)
PY_310 = sys.version_info >= (3, 10)
def flatten_literal_params(parameters: Iterable[Any]) -> Tuple[Any, ...]:
params = []
literal_cls = type(Literal[0])
for p in parameters:
if isinstance(p, literal_cls):
params.extend(p.__args__)
else:
params.append(p)
return tuple(params)
def normalise_optional_params(parameters: Iterable[Any]) -> Tuple[Any, ...]:
none_cls = type(None)
return tuple(p for p in parameters if p is not none_cls) + (none_cls,)
def evaluate_annotation(
tp: Any,
globals: Dict[str, Any],
locals: Dict[str, Any],
cache: Dict[str, Any],
*,
implicit_str: bool = True,
):
if isinstance(tp, ForwardRef):
tp = tp.__forward_arg__
# ForwardRefs always evaluate their internals
implicit_str = True
if implicit_str and isinstance(tp, str):
if tp in cache:
return cache[tp]
evaluated = eval(tp, globals, locals)
cache[tp] = evaluated
return evaluate_annotation(evaluated, globals, locals, cache)
if hasattr(tp, "__args__"):
implicit_str = True
is_literal = False
args = tp.__args__
if not hasattr(tp, "__origin__"):
if PY_310 and tp.__class__ is types.UnionType: # type: ignore
converted = Union[args] # type: ignore
return evaluate_annotation(converted, globals, locals, cache)
return tp
if tp.__origin__ is Union:
try:
if args.index(type(None)) != len(args) - 1:
args = normalise_optional_params(tp.__args__)
except ValueError:
pass
if tp.__origin__ is Literal:
if not PY_310:
args = flatten_literal_params(tp.__args__)
implicit_str = False
is_literal = True
evaluated_args = tuple(
evaluate_annotation(arg, globals, locals, cache, implicit_str=implicit_str) for arg in args
)
if is_literal and not all(isinstance(x, (str, int, bool, type(None))) for x in evaluated_args):
raise TypeError("Literal arguments must be of type str, int, bool, or NoneType.")
if evaluated_args == args:
return tp
try:
return tp.copy_with(evaluated_args)
except AttributeError:
return tp.__origin__[evaluated_args]
return tp
def resolve_annotation(
annotation: Any,
globalns: Dict[str, Any],
localns: Optional[Dict[str, Any]],
cache: Optional[Dict[str, Any]],
) -> Any:
if annotation is None:
return type(None)
if isinstance(annotation, str):
annotation = ForwardRef(annotation)
locals = globalns if localns is None else localns
if cache is None:
cache = {}
return evaluate_annotation(annotation, globalns, locals, cache)
TimestampStyle = Literal["f", "F", "d", "D", "t", "T", "R"]
def format_dt(dt: datetime.datetime, /, style: Optional[TimestampStyle] = None) -> str:
"""A helper function to format a :class:`datetime.datetime` for presentation within Discord.
This allows for a locale-independent way of presenting data using Discord specific Markdown.
+-------------+----------------------------+-----------------+
| Style | Example Output | Description |
+=============+============================+=================+
| t | 22:57 | Short Time |
+-------------+----------------------------+-----------------+
| T | 22:57:58 | Long Time |
+-------------+----------------------------+-----------------+
| d | 17/05/2016 | Short Date |
+-------------+----------------------------+-----------------+
| D | 17 May 2016 | Long Date |
+-------------+----------------------------+-----------------+
| f (default) | 17 May 2016 22:57 | Short Date Time |
+-------------+----------------------------+-----------------+
| F | Tuesday, 17 May 2016 22:57 | Long Date Time |
+-------------+----------------------------+-----------------+
| R | 5 years ago | Relative Time |
+-------------+----------------------------+-----------------+
Note that the exact output depends on the user's locale setting in the client. The example output
presented is using the ``en-GB`` locale.
.. versionadded:: 2.0
Parameters
-----------
dt: :class:`datetime.datetime`
The datetime to format.
style: :class:`str`
The style to format the datetime with.
Returns
--------
:class:`str`
The formatted string.
"""
if style is None:
return f"<t:{int(dt.timestamp())}>"
return f"<t:{int(dt.timestamp())}:{style}>"
| discord/utils.py | 29,531 | Read-only proxy of a Sequence.
Internal data storage class to efficiently store a list of snowflakes.
This should have the following characteristics:
- Low memory usage
- O(n) iteration (obviously)
- O(n log n) initial creation if data is unsorted
- O(log n) search and indexing
- O(n) insertion
Returns string's width.
A helper function that collects an iterator into chunks of a given size.
.. versionadded:: 2.0
Parameters
----------
iterator: Union[:class:`collections.abc.Iterator`, :class:`collections.abc.AsyncIterator`]
The iterator to chunk, can be sync or async.
max_size: :class:`int`
The maximum chunk size.
.. warning::
The last chunk collected may not be as large as ``max_size``.
Returns
--------
Union[:class:`Iterator`, :class:`AsyncIterator`]
A new iterator which yields chunks of a given size.
A helper function that escapes Discord's markdown.
Parameters
-----------
text: :class:`str`
The text to escape markdown from.
as_needed: :class:`bool`
Whether to escape the markdown characters as needed. This
means that it does not escape extraneous characters if it's
not necessary, e.g. ``**hello**`` is escaped into ``\*\*hello**``
instead of ``\*\*hello\*\*``. Note however that this can open
you up to some clever syntax abuse. Defaults to ``False``.
ignore_links: :class:`bool`
Whether to leave links alone when escaping markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone. This option is not supported with ``as_needed``.
Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters escaped with a slash.
A helper function that escapes everyone, here, role, and user mentions.
.. note::
This does not include channel mentions.
.. note::
For more granular control over what mentions should be escaped
within messages, refer to the :class:`~discord.AllowedMentions`
class.
Parameters
-----------
text: :class:`str`
The text to escape mentions from.
Returns
--------
:class:`str`
The text with the mentions removed.
A helper to return the first element found in the sequence
that meets the predicate. For example: ::
member = discord.utils.find(lambda m: m.name == 'Mighty', channel.guild.members)
would find the first :class:`~discord.Member` whose name is 'Mighty' and return it.
If an entry is not found, then ``None`` is returned.
This is different from :func:`py:filter` due to the fact it stops the moment it finds
a valid entry.
Parameters
-----------
predicate
A function that returns a boolean-like result.
seq: :class:`collections.abc.Iterable`
The iterable to search through.
A helper function to format a :class:`datetime.datetime` for presentation within Discord.
This allows for a locale-independent way of presenting data using Discord specific Markdown.
+-------------+----------------------------+-----------------+
| Style | Example Output | Description |
+=============+============================+=================+
| t | 22:57 | Short Time |
+-------------+----------------------------+-----------------+
| T | 22:57:58 | Long Time |
+-------------+----------------------------+-----------------+
| d | 17/05/2016 | Short Date |
+-------------+----------------------------+-----------------+
| D | 17 May 2016 | Long Date |
+-------------+----------------------------+-----------------+
| f (default) | 17 May 2016 22:57 | Short Date Time |
+-------------+----------------------------+-----------------+
| F | Tuesday, 17 May 2016 22:57 | Long Date Time |
+-------------+----------------------------+-----------------+
| R | 5 years ago | Relative Time |
+-------------+----------------------------+-----------------+
Note that the exact output depends on the user's locale setting in the client. The example output
presented is using the ``en-GB`` locale.
.. versionadded:: 2.0
Parameters
-----------
dt: :class:`datetime.datetime`
The datetime to format.
style: :class:`str`
The style to format the datetime with.
Returns
--------
:class:`str`
The formatted string.
A helper that returns the first element in the iterable that meets
all the traits passed in ``attrs``. This is an alternative for
:func:`~discord.utils.find`.
When multiple attributes are specified, they are checked using
logical AND, not logical OR. Meaning they have to meet every
attribute passed in and not one of them.
To have a nested attribute search (i.e. search by ``x.y``) then
pass in ``x__y`` as the keyword argument.
If nothing is found that matches the attributes passed, then
``None`` is returned.
Examples
---------
Basic usage:
.. code-block:: python3
member = discord.utils.get(message.guild.members, name='Foo')
Multiple attribute matching:
.. code-block:: python3
channel = discord.utils.get(guild.voice_channels, name='Foo', bitrate=64000)
Nested attribute matching:
.. code-block:: python3
channel = discord.utils.get(client.get_all_channels(), guild__name='Cool', name='general')
Parameters
-----------
iterable
An iterable to search through.
\*\*attrs
Keyword arguments that denote attributes to search with.
A helper function that returns the OAuth2 URL for inviting the bot
into guilds.
Parameters
-----------
client_id: Union[:class:`int`, :class:`str`]
The client ID for your bot.
permissions: :class:`~discord.Permissions`
The permissions you're requesting. If not given then you won't be requesting any
permissions.
guild: :class:`~discord.abc.Snowflake`
The guild to pre-select in the authorization screen, if available.
redirect_uri: :class:`str`
An optional valid redirect URI.
scopes: Iterable[:class:`str`]
An optional valid list of scopes. Defaults to ``('bot',)``.
.. versionadded:: 1.7
disable_guild_select: :class:`bool`
Whether to disallow the user from changing the guild dropdown.
.. versionadded:: 2.0
Returns
--------
:class:`str`
The OAuth2 URL for inviting the bot into guilds.
A helper function that removes markdown characters.
.. versionadded:: 1.7
.. note::
This function is not markdown aware and may remove meaning from the original text. For example,
if the input contains ``10 * 5`` then it will be converted into ``10 5``.
Parameters
-----------
text: :class:`str`
The text to remove markdown from.
ignore_links: :class:`bool`
Whether to leave links alone when removing markdown. For example,
if a URL in the text contains characters such as ``_`` then it will
be left alone. Defaults to ``True``.
Returns
--------
:class:`str`
The text with the markdown special characters removed.
Resolves an invite from a :class:`~discord.Invite`, URL or code.
Parameters
-----------
invite: Union[:class:`~discord.Invite`, :class:`str`]
The invite.
Returns
--------
:class:`str`
The invite code.
Resolves a template code from a :class:`~discord.Template`, URL or code.
.. versionadded:: 1.4
Parameters
-----------
code: Union[:class:`~discord.Template`, :class:`str`]
The code.
Returns
--------
:class:`str`
The template code.
Parameters
-----------
id: :class:`int`
The snowflake ID.
Returns
--------
:class:`datetime.datetime`
An aware datetime in UTC representing the creation time of the snowflake.
Returns a numeric snowflake pretending to be created at the given date.
When using as the lower end of a range, use ``time_snowflake(high=False) - 1``
to be inclusive, ``high=True`` to be exclusive.
When using as the higher end of a range, use ``time_snowflake(high=True) + 1``
to be inclusive, ``high=False`` to be exclusive
Parameters
-----------
dt: :class:`datetime.datetime`
A datetime object to convert to a snowflake.
If naive, the timezone is assumed to be local time.
high: :class:`bool`
Whether or not to set the lower 22 bit to high or low.
Returns
--------
:class:`int`
The snowflake representing the time given.
A helper function to return an aware UTC datetime representing the current time.
This should be preferred to :meth:`datetime.datetime.utcnow` since it is an aware
datetime, compared to the naive datetime in the standard library.
.. versionadded:: 2.0
Returns
--------
:class:`datetime.datetime`
The current aware datetime in UTC.
Icons must be power of 2 within [16, 4096].
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
type: ignore turn off filter reset filter global -> local Special case the single element call type: ignore type: ignore type: ignore circular import circular import ForwardRefs always evaluate their internals type: ignore type: ignore | 9,872 | en | 0.663573 |
# Implements I/O over asynchronous sockets
from time import time
from sys import exc_info
from traceback import format_exception
from asyncore import socket_map
from asyncore import loop
from pysnmp.carrier.base import AbstractTransportDispatcher
from pysnmp.error import PySnmpError
class AsyncoreDispatcher(AbstractTransportDispatcher):
def __init__(self):
self.__sockMap = {} # use own map for MT safety
self.timeout = 0.5
AbstractTransportDispatcher.__init__(self)
def getSocketMap(self): return self.__sockMap
def setSocketMap(self, sockMap=socket_map): self.__sockMap = sockMap
def registerTransport(self, tDomain, t):
AbstractTransportDispatcher.registerTransport(self, tDomain, t)
t.registerSocket(self.__sockMap)
def unregisterTransport(self, tDomain):
self.getTransport(tDomain).unregisterSocket(self.__sockMap)
AbstractTransportDispatcher.unregisterTransport(self, tDomain)
def transportsAreWorking(self):
for transport in self.__sockMap.values():
if transport.writable():
return 1
return 0
def runDispatcher(self, timeout=0.0):
while self.jobsArePending() or self.transportsAreWorking():
try:
loop(timeout and timeout or self.timeout,
use_poll=True, map=self.__sockMap, count=1)
except KeyboardInterrupt:
raise
except:
raise PySnmpError('poll error: %s' % ';'.join(format_exception(*exc_info())))
self.handleTimerTick(time())
| scalyr_agent/third_party/pysnmp/carrier/asyncore/dispatch.py | 1,605 | Implements I/O over asynchronous sockets use own map for MT safety | 66 | en | 0.609869 |
import tensorflow as tf
import os
import sklearn.metrics
import numpy as np
import sys
import math
import time
from . import framework
import network
class policy_agent(framework.re_model):
def __init__(self, train_data_loader, batch_size, max_length=120):
framework.re_model.__init__(self, train_data_loader, batch_size, max_length)
self.weights = tf.placeholder(tf.float32, shape=(), name="weights_scalar")
x = network.embedding.word_position_embedding(self.word, self.word_vec_mat, self.pos1, self.pos2)
x_train = network.encoder.cnn(x, keep_prob=0.5)
x_test = network.encoder.cnn(x, keep_prob=1.0)
self._train_logit = network.selector.instance(x_train, 2, keep_prob=0.5)
self._test_logit = network.selector.instance(x_test, 2, keep_prob=1.0)
self._loss = network.classifier.softmax_cross_entropy(self._train_logit, self.ins_label, 2, weights=self.weights)
def loss(self):
return self._loss
def train_logit(self):
return self._train_logit
def test_logit(self):
return self._test_logit
class rl_re_framework(framework.re_framework):
def __init__(self, train_data_loader, test_data_loader, max_length=120, batch_size=160):
framework.re_framework.__init__(self, train_data_loader, test_data_loader, max_length, batch_size)
def agent_one_step(self, sess, agent_model, batch_data, run_array, weights=1):
feed_dict = {
agent_model.word: batch_data['word'],
agent_model.pos1: batch_data['pos1'],
agent_model.pos2: batch_data['pos2'],
agent_model.ins_label: batch_data['agent_label'],
agent_model.length: batch_data['length'],
agent_model.weights: weights
}
if 'mask' in batch_data and hasattr(agent_model, "mask"):
feed_dict.update({agent_model.mask: batch_data['mask']})
result = sess.run(run_array, feed_dict)
return result
def pretrain_main_model(self, max_epoch):
for epoch in range(max_epoch):
print('###### Epoch ' + str(epoch) + ' ######')
tot_correct = 0
tot_not_na_correct = 0
tot = 0
tot_not_na = 0
i = 0
time_sum = 0
for i, batch_data in enumerate(self.train_data_loader):
time_start = time.time()
iter_loss, iter_logit, _train_op = self.one_step(self.sess, self.model, batch_data, [self.model.loss(), self.model.train_logit(), self.train_op])
time_end = time.time()
t = time_end - time_start
time_sum += t
iter_output = iter_logit.argmax(-1)
iter_label = batch_data['rel']
iter_correct = (iter_output == iter_label).sum()
iter_not_na_correct = np.logical_and(iter_output == iter_label, iter_label != 0).sum()
tot_correct += iter_correct
tot_not_na_correct += iter_not_na_correct
tot += iter_label.shape[0]
tot_not_na += (iter_label != 0).sum()
if tot_not_na > 0:
sys.stdout.write("[pretrain main model] epoch %d step %d time %.2f | loss: %f, not NA accuracy: %f, accuracy: %f\r" % (epoch, i, t, iter_loss, float(tot_not_na_correct) / tot_not_na, float(tot_correct) / tot))
sys.stdout.flush()
i += 1
print("\nAverage iteration time: %f" % (time_sum / i))
def pretrain_agent_model(self, max_epoch):
# Pre-train policy agent
for epoch in range(max_epoch):
print('###### [Pre-train Policy Agent] Epoch ' + str(epoch) + ' ######')
tot_correct = 0
tot_not_na_correct = 0
tot = 0
tot_not_na = 0
time_sum = 0
for i, batch_data in enumerate(self.train_data_loader):
time_start = time.time()
batch_data['agent_label'] = batch_data['ins_rel'] + 0
batch_data['agent_label'][batch_data['agent_label'] > 0] = 1
iter_loss, iter_logit, _train_op = self.agent_one_step(self.sess, self.agent_model, batch_data, [self.agent_model.loss(), self.agent_model.train_logit(), self.agent_train_op])
time_end = time.time()
t = time_end - time_start
time_sum += t
iter_output = iter_logit.argmax(-1)
iter_label = batch_data['ins_rel']
iter_correct = (iter_output == iter_label).sum()
iter_not_na_correct = np.logical_and(iter_output == iter_label, iter_label != 0).sum()
tot_correct += iter_correct
tot_not_na_correct += iter_not_na_correct
tot += iter_label.shape[0]
tot_not_na += (iter_label != 0).sum()
if tot_not_na > 0:
sys.stdout.write("[pretrain policy agent] epoch %d step %d time %.2f | loss: %f, not NA accuracy: %f, accuracy: %f\r" % (epoch, i, t, iter_loss, float(tot_not_na_correct) / tot_not_na, float(tot_correct) / tot))
sys.stdout.flush()
i += 1
def train(self,
model, # The main model
agent_model, # The model of policy agent
model_name,
ckpt_dir='./checkpoint',
summary_dir='./summary',
test_result_dir='./test_result',
learning_rate=0.5,
max_epoch=60,
pretrain_agent_epoch=1,
pretrain_model=None,
test_epoch=1,
optimizer=tf.train.GradientDescentOptimizer):
print("Start training...")
# Init
self.model = model(self.train_data_loader, self.train_data_loader.batch_size, self.train_data_loader.max_length)
model_optimizer = optimizer(learning_rate)
grads = model_optimizer.compute_gradients(self.model.loss())
self.train_op = model_optimizer.apply_gradients(grads)
# Init policy agent
self.agent_model = agent_model(self.train_data_loader, self.train_data_loader.batch_size, self.train_data_loader.max_length)
agent_optimizer = optimizer(learning_rate)
agent_grads = agent_optimizer.compute_gradients(self.agent_model.loss())
self.agent_train_op = agent_optimizer.apply_gradients(agent_grads)
# Session, writer and saver
self.sess = tf.Session()
summary_writer = tf.summary.FileWriter(summary_dir, self.sess.graph)
saver = tf.train.Saver(max_to_keep=None)
if pretrain_model is None:
self.sess.run(tf.global_variables_initializer())
else:
saver.restore(self.sess, pretrain_model)
self.pretrain_main_model(max_epoch=5) # Pre-train main model
self.pretrain_agent_model(max_epoch=1) # Pre-train policy agent
# Train
tot_delete = 0
batch_count = 0
instance_count = 0
reward = 0.0
best_metric = 0
best_prec = None
best_recall = None
not_best_count = 0 # Stop training after several epochs without improvement.
for epoch in range(max_epoch):
print('###### Epoch ' + str(epoch) + ' ######')
tot_correct = 0
tot_not_na_correct = 0
tot = 0
tot_not_na = 0
i = 0
time_sum = 0
batch_stack = []
# Update policy agent
for i, batch_data in enumerate(self.train_data_loader):
# Make action
batch_data['agent_label'] = batch_data['ins_rel'] + 0
batch_data['agent_label'][batch_data['agent_label'] > 0] = 1
batch_stack.append(batch_data)
iter_logit = self.agent_one_step(self.sess, self.agent_model, batch_data, [self.agent_model.train_logit()])[0]
action_result = iter_logit.argmax(-1)
# Calculate reward
batch_delete = np.sum(np.logical_and(batch_data['ins_rel'] != 0, action_result == 0))
batch_data['ins_rel'][action_result == 0] = 0
iter_loss = self.one_step(self.sess, self.model, batch_data, [self.model.loss()])[0]
reward += iter_loss
tot_delete += batch_delete
batch_count += 1
# Update parameters of policy agent
alpha = 0.1
if batch_count == 100:
reward = reward / float(batch_count)
average_loss = reward
reward = - math.log(1 - math.e ** (-reward))
sys.stdout.write('tot delete : %f | reward : %f | average loss : %f\r' % (tot_delete, reward, average_loss))
sys.stdout.flush()
for batch_data in batch_stack:
self.agent_one_step(self.sess, self.agent_model, batch_data, [self.agent_train_op], weights=reward * alpha)
batch_count = 0
reward = 0
tot_delete = 0
batch_stack = []
i += 1
# Train the main model
for i, batch_data in enumerate(self.train_data_loader):
batch_data['agent_label'] = batch_data['ins_rel'] + 0
batch_data['agent_label'][batch_data['agent_label'] > 0] = 1
time_start = time.time()
# Make actions
iter_logit = self.agent_one_step(self.sess, self.agent_model, batch_data, [self.agent_model.train_logit()])[0]
action_result = iter_logit.argmax(-1)
batch_data['ins_rel'][action_result == 0] = 0
# Real training
iter_loss, iter_logit, _train_op = self.agent_one_step(self.sess, self.agent_model, batch_data, [self.agent_model.loss(), self.agent_model.train_logit(), self.agent_train_op])
time_end = time.time()
t = time_end - time_start
time_sum += t
iter_output = iter_logit.argmax(-1)
if tot_not_na > 0:
sys.stdout.write("epoch %d step %d time %.2f | loss: %f, not NA accuracy: %f, accuracy: %f\r" % (epoch, i, t, iter_loss, float(tot_not_na_correct) / tot_not_na, float(tot_correct) / tot))
sys.stdout.flush()
i += 1
print("\nAverage iteration time: %f" % (time_sum / i))
if (epoch + 1) % test_epoch == 0:
metric = self.test(model)
if metric > best_metric:
best_metric = metric
best_prec = self.cur_prec
best_recall = self.cur_recall
print("Best model, storing...")
if not os.path.isdir(ckpt_dir):
os.mkdir(ckpt_dir)
path = saver.save(self.sess, os.path.join(ckpt_dir, model_name))
print("Finish storing")
not_best_count = 0
else:
not_best_count += 1
if not_best_count >= 20:
break
print("######")
print("Finish training " + model_name)
print("Best epoch auc = %f" % (best_metric))
if (not best_prec is None) and (not best_recall is None):
if not os.path.isdir(test_result_dir):
os.mkdir(test_result_dir)
np.save(os.path.join(test_result_dir, model_name + "_x.npy"), best_recall)
np.save(os.path.join(test_result_dir, model_name + "_y.npy"), best_prec)
| nrekit/rl.py | 11,771 | Pre-train policy agent The main model The model of policy agent Init Init policy agent Session, writer and saver Pre-train main model Pre-train policy agent Train Stop training after several epochs without improvement. Update policy agent Make action Calculate reward Update parameters of policy agent Train the main model Make actions Real training | 350 | en | 0.827664 |
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import six
import mock
from jinja2.exceptions import UndefinedError
from st2common import log as logging
from st2common.content.loader import MetaLoader
from st2common.models.db.rule import RuleDB
from st2common.models.db.trigger import TriggerDB
from st2common.models.db.trigger import TriggerInstanceDB
from st2common.models.system.common import ResourceReference
from st2common.persistence.reactor import Rule, TriggerInstance, Trigger
from st2reactor.rules.enforcer import RuleEnforcer
from st2reactor.rules.matcher import RulesMatcher
__all__ = [
'RuleTester'
]
LOG = logging.getLogger(__name__)
class RuleTester(object):
def __init__(self, rule_file_path=None, rule_ref=None, trigger_instance_file_path=None,
trigger_instance_id=None):
"""
:param rule_file_path: Path to the file containing rule definition.
:type rule_file_path: ``str``
:param trigger_instance_file_path: Path to the file containg trigger instance definition.
:type trigger_instance_file_path: ``str``
"""
self._rule_file_path = rule_file_path
self._rule_ref = rule_ref
self._trigger_instance_file_path = trigger_instance_file_path
self._trigger_instance_id = trigger_instance_id
self._meta_loader = MetaLoader()
def evaluate(self):
"""
Evaluate trigger instance against the rule.
:return: ``True`` if the rule matches, ``False`` otherwise.
:rtype: ``boolean``
"""
rule_db = self._get_rule_db()
trigger_instance_db, trigger_db = self._get_trigger_instance_db()
# The trigger check needs to be performed here as that is not performed
# by RulesMatcher.
if rule_db.trigger != trigger_db.ref:
LOG.info('rule.trigger "%s" and trigger.ref "%s" do not match.',
rule_db.trigger, trigger_db.ref)
return False
# Check if rule matches criteria.
matcher = RulesMatcher(trigger_instance=trigger_instance_db, trigger=trigger_db,
rules=[rule_db], extra_info=True)
matching_rules = matcher.get_matching_rules()
# Rule does not match so early exit.
if len(matching_rules) < 1:
return False
# Check if rule can be enforced
enforcer = RuleEnforcer(trigger_instance=trigger_instance_db, rule=rule_db)
runner_type_db = mock.Mock()
runner_type_db.runner_parameters = {}
action_db = mock.Mock()
action_db.parameters = {}
params = rule_db.action.parameters # pylint: disable=no-member
context, additional_contexts = enforcer.get_action_execution_context(action_db=action_db,
trace_context=None)
# Note: We only return partially resolved parameters.
# To be able to return all parameters we would need access to corresponding ActionDB,
# RunnerTypeDB and ConfigDB object, but this would add a dependency on the database and the
# tool is meant to be used standalone.
try:
params = enforcer.get_resolved_parameters(action_db=action_db,
runnertype_db=runner_type_db,
params=params,
context=context,
additional_contexts=additional_contexts)
LOG.info('Action parameters resolved to:')
for param in six.iteritems(params):
LOG.info('\t%s: %s', param[0], param[1])
return True
except (UndefinedError, ValueError) as e:
LOG.error('Failed to resolve parameters\n\tOriginal error : %s', six.text_type(e))
return False
except:
LOG.exception('Failed to resolve parameters.')
return False
def _get_rule_db(self):
if self._rule_file_path:
return self._get_rule_db_from_file(
file_path=os.path.realpath(self._rule_file_path))
elif self._rule_ref:
return Rule.get_by_ref(self._rule_ref)
raise ValueError('One of _rule_file_path or _rule_ref should be specified.')
def _get_trigger_instance_db(self):
if self._trigger_instance_file_path:
return self._get_trigger_instance_db_from_file(
file_path=os.path.realpath(self._trigger_instance_file_path))
elif self._trigger_instance_id:
trigger_instance_db = TriggerInstance.get_by_id(self._trigger_instance_id)
trigger_db = Trigger.get_by_ref(trigger_instance_db.trigger)
return trigger_instance_db, trigger_db
raise ValueError('One of _trigger_instance_file_path or'
'_trigger_instance_id should be specified.')
def _get_rule_db_from_file(self, file_path):
data = self._meta_loader.load(file_path=file_path)
pack = data.get('pack', 'unknown')
name = data.get('name', 'unknown')
trigger = data['trigger']['type']
criteria = data.get('criteria', None)
action = data.get('action', {})
rule_db = RuleDB(pack=pack, name=name, trigger=trigger, criteria=criteria, action=action,
enabled=True)
rule_db.id = 'rule_tester_rule'
return rule_db
def _get_trigger_instance_db_from_file(self, file_path):
data = self._meta_loader.load(file_path=file_path)
instance = TriggerInstanceDB(**data)
instance.id = 'rule_tester_instance'
trigger_ref = ResourceReference.from_string_reference(instance['trigger'])
trigger_db = TriggerDB(pack=trigger_ref.pack, name=trigger_ref.name, type=trigger_ref.ref)
return instance, trigger_db
| st2reactor/st2reactor/rules/tester.py | 6,580 | :param rule_file_path: Path to the file containing rule definition.
:type rule_file_path: ``str``
:param trigger_instance_file_path: Path to the file containg trigger instance definition.
:type trigger_instance_file_path: ``str``
Evaluate trigger instance against the rule.
:return: ``True`` if the rule matches, ``False`` otherwise.
:rtype: ``boolean``
Copyright 2020 The StackStorm Authors. Copyright 2019 Extreme Networks, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. The trigger check needs to be performed here as that is not performed by RulesMatcher. Check if rule matches criteria. Rule does not match so early exit. Check if rule can be enforced pylint: disable=no-member Note: We only return partially resolved parameters. To be able to return all parameters we would need access to corresponding ActionDB, RunnerTypeDB and ConfigDB object, but this would add a dependency on the database and the tool is meant to be used standalone. | 1,429 | en | 0.865998 |
"""
Support for the Dyson 360 eye vacuum cleaner robot.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/vacuum.dyson/
"""
import logging
from homeassistant.components.vacuum import (
SUPPORT_BATTERY, SUPPORT_FAN_SPEED, SUPPORT_PAUSE, SUPPORT_RETURN_HOME,
SUPPORT_STATUS, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
VacuumDevice)
from homeassistant.helpers.icon import icon_for_battery_level
from . import DYSON_DEVICES
_LOGGER = logging.getLogger(__name__)
ATTR_CLEAN_ID = 'clean_id'
ATTR_FULL_CLEAN_TYPE = 'full_clean_type'
ATTR_POSITION = 'position'
DEPENDENCIES = ['dyson']
DYSON_360_EYE_DEVICES = "dyson_360_eye_devices"
SUPPORT_DYSON = SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PAUSE | \
SUPPORT_RETURN_HOME | SUPPORT_FAN_SPEED | SUPPORT_STATUS | \
SUPPORT_BATTERY | SUPPORT_STOP
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Dyson 360 Eye robot vacuum platform."""
from libpurecoollink.dyson_360_eye import Dyson360Eye
_LOGGER.debug("Creating new Dyson 360 Eye robot vacuum")
if DYSON_360_EYE_DEVICES not in hass.data:
hass.data[DYSON_360_EYE_DEVICES] = []
# Get Dyson Devices from parent component
for device in [d for d in hass.data[DYSON_DEVICES] if
isinstance(d, Dyson360Eye)]:
dyson_entity = Dyson360EyeDevice(device)
hass.data[DYSON_360_EYE_DEVICES].append(dyson_entity)
add_entities(hass.data[DYSON_360_EYE_DEVICES])
return True
class Dyson360EyeDevice(VacuumDevice):
"""Dyson 360 Eye robot vacuum device."""
def __init__(self, device):
"""Dyson 360 Eye robot vacuum device."""
_LOGGER.debug("Creating device %s", device.name)
self._device = device
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.async_add_job(
self._device.add_message_listener, self.on_message)
def on_message(self, message):
"""Handle a new messages that was received from the vacuum."""
_LOGGER.debug("Message received for %s device: %s", self.name, message)
self.schedule_update_ha_state()
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return False
@property
def name(self):
"""Return the name of the device."""
return self._device.name
@property
def status(self):
"""Return the status of the vacuum cleaner."""
from libpurecoollink.const import Dyson360EyeMode
dyson_labels = {
Dyson360EyeMode.INACTIVE_CHARGING: "Stopped - Charging",
Dyson360EyeMode.INACTIVE_CHARGED: "Stopped - Charged",
Dyson360EyeMode.FULL_CLEAN_PAUSED: "Paused",
Dyson360EyeMode.FULL_CLEAN_RUNNING: "Cleaning",
Dyson360EyeMode.FULL_CLEAN_ABORTED: "Returning home",
Dyson360EyeMode.FULL_CLEAN_INITIATED: "Start cleaning",
Dyson360EyeMode.FAULT_USER_RECOVERABLE: "Error - device blocked",
Dyson360EyeMode.FAULT_REPLACE_ON_DOCK:
"Error - Replace device on dock",
Dyson360EyeMode.FULL_CLEAN_FINISHED: "Finished",
Dyson360EyeMode.FULL_CLEAN_NEEDS_CHARGE: "Need charging"
}
return dyson_labels.get(
self._device.state.state, self._device.state.state)
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
return self._device.state.battery_level
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
from libpurecoollink.const import PowerMode
speed_labels = {
PowerMode.MAX: "Max",
PowerMode.QUIET: "Quiet"
}
return speed_labels[self._device.state.power_mode]
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return ["Quiet", "Max"]
@property
def device_state_attributes(self):
"""Return the specific state attributes of this vacuum cleaner."""
return {
ATTR_POSITION: str(self._device.state.position)
}
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
from libpurecoollink.const import Dyson360EyeMode
return self._device.state.state in [
Dyson360EyeMode.FULL_CLEAN_INITIATED,
Dyson360EyeMode.FULL_CLEAN_ABORTED,
Dyson360EyeMode.FULL_CLEAN_RUNNING
]
@property
def available(self) -> bool:
"""Return True if entity is available."""
return True
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_DYSON
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
from libpurecoollink.const import Dyson360EyeMode
charging = self._device.state.state in [
Dyson360EyeMode.INACTIVE_CHARGING]
return icon_for_battery_level(
battery_level=self.battery_level, charging=charging)
def turn_on(self, **kwargs):
"""Turn the vacuum on."""
from libpurecoollink.const import Dyson360EyeMode
_LOGGER.debug("Turn on device %s", self.name)
if self._device.state.state in [Dyson360EyeMode.FULL_CLEAN_PAUSED]:
self._device.resume()
else:
self._device.start()
def turn_off(self, **kwargs):
"""Turn the vacuum off and return to home."""
_LOGGER.debug("Turn off device %s", self.name)
self._device.pause()
def stop(self, **kwargs):
"""Stop the vacuum cleaner."""
_LOGGER.debug("Stop device %s", self.name)
self._device.pause()
def set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
from libpurecoollink.const import PowerMode
_LOGGER.debug("Set fan speed %s on device %s", fan_speed, self.name)
power_modes = {
"Quiet": PowerMode.QUIET,
"Max": PowerMode.MAX
}
self._device.set_power_mode(power_modes[fan_speed])
def start_pause(self, **kwargs):
"""Start, pause or resume the cleaning task."""
from libpurecoollink.const import Dyson360EyeMode
if self._device.state.state in [Dyson360EyeMode.FULL_CLEAN_PAUSED]:
_LOGGER.debug("Resume device %s", self.name)
self._device.resume()
elif self._device.state.state in [Dyson360EyeMode.INACTIVE_CHARGED,
Dyson360EyeMode.INACTIVE_CHARGING]:
_LOGGER.debug("Start device %s", self.name)
self._device.start()
else:
_LOGGER.debug("Pause device %s", self.name)
self._device.pause()
def return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
_LOGGER.debug("Return to base device %s", self.name)
self._device.abort()
| homeassistant/components/dyson/vacuum.py | 7,243 | Dyson 360 Eye robot vacuum device.
Dyson 360 Eye robot vacuum device.
Return True if entity is available.
Return the battery icon for the vacuum cleaner.
Return the battery level of the vacuum cleaner.
Return the specific state attributes of this vacuum cleaner.
Return the fan speed of the vacuum cleaner.
Get the list of available fan speed steps of the vacuum cleaner.
Return True if entity is on.
Return the name of the device.
Handle a new messages that was received from the vacuum.
Set the vacuum cleaner to return to the dock.
Set fan speed.
Set up the Dyson 360 Eye robot vacuum platform.
Return True if entity has to be polled for state.
False if entity pushes its state to HA.
Start, pause or resume the cleaning task.
Return the status of the vacuum cleaner.
Stop the vacuum cleaner.
Flag vacuum cleaner robot features that are supported.
Turn the vacuum off and return to home.
Turn the vacuum on.
Support for the Dyson 360 eye vacuum cleaner robot.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/vacuum.dyson/
Get Dyson Devices from parent component | 1,129 | en | 0.855951 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the PyMVPA package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Unit tests for PyMVPA misc.plot"""
from mvpa2.testing import *
skip_if_no_external("pylab")
import pylab as pl
from matplotlib.figure import Figure
from mvpa2.misc.plot.base import plot_dataset_chunks
import numpy as np
from glob import glob
from mock import patch
from os.path import join as pjoin
data2d = np.random.randn(2, 4, 4)
data3d = np.random.randn(3, 4, 4)
data2d_3d = np.random.randn(2, 4, 4, 4)
data2d_4d = np.random.randn(2, 4, 4, 4, 2)
data2d_5d = np.random.randn(2, 4, 4, 4, 2, 3)
from mvpa2.testing.datasets import datasets
@sweepargs(dsp=list(datasets.items()))
def test_plot_dataset_chunks(dsp):
dsname, ds = dsp
if ds.targets.dtype.kind == "f":
return
# smoke test for now
if "chunks" not in ds.sa:
return # nothing to plot in this one
print(dsname)
plot_dataset_chunks(ds[:, :2]) # could only plot two
pl.close(pl.gcf())
if ds.nfeatures > 2:
assert_raises(ValueError, plot_dataset_chunks, ds)
| mvpa2/tests/test_misc_plot.py | 1,363 | Unit tests for PyMVPA misc.plot
emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- vi: set ft=python sts=4 ts=4 sw=4 et: See COPYING file distributed along with the PyMVPA package for the copyright and license terms. smoke test for now nothing to plot in this one could only plot two | 348 | en | 0.770396 |
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from collections import OrderedDict
import numpy as np
import warnings
import scipy.constants
import re
__author__ = "Sudarsan Surendralal"
__copyright__ = (
"Copyright 2021, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "Sudarsan Surendralal"
__email__ = "surendralal@mpie.de"
__status__ = "production"
__date__ = "Sep 1, 2017"
KBAR_TO_EVA = (
scipy.constants.physical_constants["joule-electron volt relationship"][0] / 1e22
)
class Outcar(object):
"""
This module is used to parse VASP OUTCAR files.
Attributes:
parse_dict (dict): A dictionary with all the useful quantities parsed from an OUTCAR file after from_file() is
executed
"""
def __init__(self):
self.parse_dict = dict()
def from_file(self, filename="OUTCAR"):
"""
Parse and store relevant quantities from the OUTCAR file into parse_dict.
Args:
filename (str): Filename of the OUTCAR file to parse
"""
with open(filename, "r") as f:
lines = f.readlines()
energies = self.get_total_energies(filename=filename, lines=lines)
energies_int = self.get_energy_without_entropy(filename=filename, lines=lines)
energies_zero = self.get_energy_sigma_0(filename=filename, lines=lines)
scf_energies = self.get_all_total_energies(filename=filename, lines=lines)
n_atoms = self.get_number_of_atoms(filename=filename, lines=lines)
forces = self.get_forces(filename=filename, lines=lines, n_atoms=n_atoms)
positions = self.get_positions(filename=filename, lines=lines, n_atoms=n_atoms)
cells = self.get_cells(filename=filename, lines=lines)
steps = self.get_steps(filename=filename, lines=lines)
temperatures = self.get_temperatures(filename=filename, lines=lines)
time = self.get_time(filename=filename, lines=lines)
fermi_level = self.get_fermi_level(filename=filename, lines=lines)
scf_moments = self.get_dipole_moments(filename=filename, lines=lines)
kin_energy_error = self.get_kinetic_energy_error(filename=filename, lines=lines)
stresses = self.get_stresses(filename=filename, si_unit=False, lines=lines)
n_elect = self.get_nelect(filename=filename, lines=lines)
e_fermi_list, vbm_list, cbm_list = self.get_band_properties(filename=filename, lines=lines)
elastic_constants = self.get_elastic_constants(filename=filename, lines=lines)
try:
irreducible_kpoints = self.get_irreducible_kpoints(
filename=filename, lines=lines
)
except ValueError:
print("irreducible kpoints not parsed !")
irreducible_kpoints = None
magnetization, final_magmom_lst = self.get_magnetization(
filename=filename, lines=lines
)
broyden_mixing = self.get_broyden_mixing_mesh(filename=filename, lines=lines)
self.parse_dict["energies"] = energies
self.parse_dict["energies_int"] = energies_int
self.parse_dict["energies_zero"] = energies_zero
self.parse_dict["scf_energies"] = scf_energies
self.parse_dict["forces"] = forces
self.parse_dict["positions"] = positions
self.parse_dict["cells"] = cells
self.parse_dict["steps"] = steps
self.parse_dict["temperatures"] = temperatures
self.parse_dict["time"] = time
self.parse_dict["fermi_level"] = fermi_level
self.parse_dict["scf_dipole_moments"] = scf_moments
self.parse_dict["kin_energy_error"] = kin_energy_error
self.parse_dict["stresses"] = stresses
self.parse_dict["irreducible_kpoints"] = irreducible_kpoints
self.parse_dict["magnetization"] = magnetization
self.parse_dict["final_magmoms"] = final_magmom_lst
self.parse_dict["broyden_mixing"] = broyden_mixing
self.parse_dict["n_elect"] = n_elect
self.parse_dict["e_fermi_list"] = e_fermi_list
self.parse_dict["vbm_list"] = vbm_list
self.parse_dict["cbm_list"] = cbm_list
self.parse_dict["elastic_constants"] = elastic_constants
try:
self.parse_dict["pressures"] = (
np.average(stresses[:, 0:3], axis=1) * KBAR_TO_EVA
)
except IndexError:
self.parse_dict["pressures"] = np.zeros(len(steps))
def to_hdf(self, hdf, group_name="outcar"):
"""
Store output in an HDF5 file
Args:
hdf (pyiron_base.generic.hdfio.FileHDFio): HDF5 group or file
group_name (str): Name of the HDF5 group
"""
with hdf.open(group_name) as hdf5_output:
for key in self.parse_dict.keys():
hdf5_output[key] = self.parse_dict[key]
def to_hdf_minimal(self, hdf, group_name="outcar"):
"""
Store minimal output in an HDF5 file (output unique to OUTCAR)
Args:
hdf (pyiron_base.generic.hdfio.FileHDFio): HDF5 group or file
group_name (str): Name of the HDF5 group
"""
unique_quantities = [
"kin_energy_error",
"broyden_mixing",
"stresses",
"irreducible_kpoints",
]
with hdf.open(group_name) as hdf5_output:
for key in self.parse_dict.keys():
if key in unique_quantities:
hdf5_output[key] = self.parse_dict[key]
def from_hdf(self, hdf, group_name="outcar"):
"""
Load output from an HDF5 file
Args:
hdf (pyiron_base.generic.hdfio.FileHDFio): HDF5 group or file
group_name (str): Name of the HDF5 group
"""
with hdf.open(group_name) as hdf5_output:
for key in hdf5_output.list_nodes():
self.parse_dict[key] = hdf5_output[key]
def get_positions_and_forces(self, filename="OUTCAR", lines=None, n_atoms=None):
"""
Gets the forces and positions for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
n_atoms (int/None): number of ions in OUTCAR
Returns:
[positions, forces] (sequence)
numpy.ndarray: A Nx3xM array of positions in $\AA$
numpy.ndarray: A Nx3xM array of forces in $eV / \AA$
where N is the number of atoms and M is the number of time steps
"""
if n_atoms is None:
n_atoms = self.get_number_of_atoms(filename=filename, lines=lines)
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="TOTAL-FORCE (eV/Angst)"
)
return self._get_positions_and_forces_parser(
lines=lines,
trigger_indices=trigger_indices,
n_atoms=n_atoms,
pos_flag=True,
force_flag=True,
)
def get_positions(self, filename="OUTCAR", lines=None, n_atoms=None):
"""
Gets the positions for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
n_atoms (int/None): number of ions in OUTCAR
Returns:
numpy.ndarray: A Nx3xM array of positions in $\AA$
where N is the number of atoms and M is the number of time steps
"""
if n_atoms is None:
n_atoms = self.get_number_of_atoms(filename=filename, lines=lines)
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="TOTAL-FORCE (eV/Angst)"
)
return self._get_positions_and_forces_parser(
lines=lines,
trigger_indices=trigger_indices,
n_atoms=n_atoms,
pos_flag=True,
force_flag=False,
)
def get_forces(self, filename="OUTCAR", lines=None, n_atoms=None):
"""
Gets the forces for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
n_atoms (int/None): number of ions in OUTCAR
Returns:
numpy.ndarray: A Nx3xM array of forces in $eV / \AA$
where N is the number of atoms and M is the number of time steps
"""
if n_atoms is None:
n_atoms = self.get_number_of_atoms(filename=filename, lines=lines)
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="TOTAL-FORCE (eV/Angst)"
)
return self._get_positions_and_forces_parser(
lines=lines,
trigger_indices=trigger_indices,
n_atoms=n_atoms,
pos_flag=False,
force_flag=True,
)
def get_cells(self, filename="OUTCAR", lines=None):
"""
Gets the cell size and shape for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: A 3x3xM array of the cell shape in $\AA$
where M is the number of time steps
"""
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="VOLUME and BASIS-vectors are now :"
)
return self._get_cells_praser(lines=lines, trigger_indices=trigger_indices)
@staticmethod
def get_stresses(filename="OUTCAR", lines=None, si_unit=True):
"""
Args:
filename (str): Input filename
lines (list/None): lines read from the file
si_unit (bool): True SI units are used
Returns:
numpy.ndarray: An array of stress values
"""
trigger_indices, lines = _get_trigger(
lines=lines,
filename=filename,
trigger="FORCE on cell =-STRESS in cart. coord. units (eV):",
)
pullay_stress_lst = []
for j in trigger_indices:
try:
if si_unit:
pullay_stress_lst.append(
[float(l) for l in lines[j + 13].split()[1:7]]
)
else:
pullay_stress_lst.append(
[float(l) for l in lines[j + 14].split()[2:8]]
)
except ValueError:
if si_unit:
pullay_stress_lst.append([float("NaN")] * 6)
else:
pullay_stress_lst.append([float("NaN")] * 6)
return np.array(pullay_stress_lst)
@staticmethod
def get_irreducible_kpoints(
filename="OUTCAR", reciprocal=True, weight=True, planewaves=True, lines=None
):
"""
Function to extract the irreducible kpoints from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
reciprocal (bool): Get either the reciprocal or the cartesian coordinates
weight (bool): Get the weight assigned to the irreducible kpoints
planewaves (bool): Get the planewaves assigned to the irreducible kpoints
lines (list/None): lines read from the file
Returns:
numpy.ndarray: An array of k-points
"""
kpoint_lst = []
weight_lst = []
planewaves_lst = []
trigger_number_str = "Subroutine IBZKPT returns following result:"
trigger_plane_waves_str = "k-point 1 :"
trigger_number = 0
trigger_plane_waves = 0
lines = _get_lines_from_file(filename=filename, lines=lines)
for i, line in enumerate(lines):
line = line.strip()
if trigger_number_str in line:
trigger_number = int(i)
elif planewaves:
if trigger_plane_waves_str in line:
trigger_plane_waves = int(i)
number_irr_kpoints = int(lines[trigger_number + 3].split()[1])
if reciprocal:
trigger_start = trigger_number + 7
else:
trigger_start = trigger_number + 10 + number_irr_kpoints
for line in lines[trigger_start : trigger_start + number_irr_kpoints]:
line = line.strip()
line = _clean_line(line)
kpoint_lst.append([float(l) for l in line.split()[0:3]])
if weight:
weight_lst.append(float(line.split()[3]))
if planewaves and trigger_plane_waves != 0:
for line in lines[
trigger_plane_waves : trigger_plane_waves + number_irr_kpoints
]:
line = line.strip()
line = _clean_line(line)
planewaves_lst.append(float(line.split()[-1]))
if weight and planewaves:
return np.array(kpoint_lst), np.array(weight_lst), np.array(planewaves_lst)
elif weight:
return np.array(kpoint_lst), np.array(weight_lst)
elif planewaves:
return np.array(kpoint_lst), np.array(planewaves_lst)
else:
return np.array(kpoint_lst)
@staticmethod
def get_total_energies(filename="OUTCAR", lines=None):
"""
Gets the total energy for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: A 1xM array of the total energies in $eV$
where M is the number of time steps
"""
def get_total_energies_from_line(line):
return float(_clean_line(line.strip()).split()[-2])
trigger_indices, lines = _get_trigger(
lines=lines,
filename=filename,
trigger="FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)",
)
return np.array(
[get_total_energies_from_line(lines[j + 2]) for j in trigger_indices]
)
@staticmethod
def get_energy_without_entropy(filename="OUTCAR", lines=None):
"""
Gets the total energy for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: A 1xM array of the total energies in $eV$
where M is the number of time steps
"""
def get_energy_without_entropy_from_line(line):
return float(_clean_line(line.strip()).split()[3])
trigger_indices, lines = _get_trigger(
lines=lines,
filename=filename,
trigger="FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)",
)
return np.array(
[
get_energy_without_entropy_from_line(lines[j + 4])
for j in trigger_indices
]
)
@staticmethod
def get_energy_sigma_0(filename="OUTCAR", lines=None):
"""
Gets the total energy for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: A 1xM array of the total energies in $eV$
where M is the number of time steps
"""
def get_energy_sigma_0_from_line(line):
return float(_clean_line(line.strip()).split()[-1])
trigger_indices, lines = _get_trigger(
lines=lines,
filename=filename,
trigger="FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)",
)
return np.array(
[get_energy_sigma_0_from_line(lines[j + 4]) for j in trigger_indices]
)
@staticmethod
def get_all_total_energies(filename="OUTCAR", lines=None):
"""
Gets the energy at every electronic step
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
list: A list of energie for every electronic step at every ionic step
"""
ionic_trigger = "FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)"
electronic_trigger = "free energy TOTEN ="
scf_energies = list()
lines = _get_lines_from_file(filename=filename, lines=lines)
istep_energies = list()
for i, line in enumerate(lines):
line = line.strip()
if ionic_trigger in line:
scf_energies.append(np.array(istep_energies))
istep_energies = list()
if electronic_trigger in line:
line = _clean_line(line)
ene = float(line.split()[-2])
istep_energies.append(ene)
return scf_energies
@staticmethod
def get_magnetization(filename="OUTCAR", lines=None):
"""
Gets the magnetization
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
list: A list with the mgnetization values
"""
ionic_trigger = "FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)"
electronic_trigger = "eigenvalue-minimisations"
nion_trigger = "NIONS ="
mag_lst = list()
local_spin_trigger = False
n_atoms = None
mag_dict = dict()
mag_dict["x"] = list()
mag_dict["y"] = list()
mag_dict["z"] = list()
lines = _get_lines_from_file(filename=filename, lines=lines)
istep_energies = list()
final_magmom_lst = list()
for i, line in enumerate(lines):
line = line.strip()
if ionic_trigger in line:
mag_lst.append(np.array(istep_energies))
istep_energies = list()
if "Atomic Wigner-Seitz radii" in line:
local_spin_trigger = True
if electronic_trigger in line:
try:
line = lines[i + 2].split("magnetization")[-1]
if line != " \n":
spin_str_lst = line.split()
spin_str_len = len(spin_str_lst)
if spin_str_len == 1:
ene = float(line)
elif spin_str_len == 3:
ene = [
float(spin_str_lst[0]),
float(spin_str_lst[1]),
float(spin_str_lst[2]),
]
else:
warnings.warn("Unrecognized spin configuration.")
return mag_lst, final_magmom_lst
istep_energies.append(ene)
except ValueError:
warnings.warn("Something went wrong in parsing the magnetization")
if n_atoms is None:
if nion_trigger in line:
n_atoms = int(line.split(nion_trigger)[-1])
if local_spin_trigger:
try:
for ind_dir, direc in enumerate(["x", "y", "z"]):
if "magnetization ({})".format(direc) in line:
mag_dict[direc].append(
[
float(lines[i + 4 + atom_index].split()[-1])
for atom_index in range(n_atoms)
]
)
except ValueError:
warnings.warn(
"Something went wrong in parsing the magnetic moments"
)
if len(mag_dict["x"]) > 0:
if len(mag_dict["y"]) == 0:
final_mag = np.array(mag_dict["x"])
else:
n_ionic_steps = np.array(mag_dict["x"]).shape[0]
final_mag = np.abs(np.zeros((n_ionic_steps, n_atoms, 3)))
final_mag[:, :, 0] = np.array(mag_dict["x"])
final_mag[:, :, 1] = np.array(mag_dict["y"])
final_mag[:, :, 2] = np.array(mag_dict["z"])
final_magmom_lst = final_mag.tolist()
return mag_lst, final_magmom_lst
@staticmethod
def get_broyden_mixing_mesh(filename="OUTCAR", lines=None):
"""
Gets the Broyden mixing mesh size
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
int: Mesh size
"""
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="gives a total of "
)
if len(trigger_indices) > 0:
line_ngx = lines[trigger_indices[0] - 2]
else:
warnings.warn(
"Unable to parse the Broyden mixing mesh. Returning 0 instead"
)
return 0
# Exclude all alphabets, and spaces. Then split based on '='
str_list = re.sub(
r"[a-zA-Z]", r"", line_ngx.replace(" ", "").replace("\n", "")
).split("=")
return np.prod([int(val) for val in str_list[1:]])
@staticmethod
def get_temperatures(filename="OUTCAR", lines=None):
"""
Gets the temperature at each ionic step (applicable for MD)
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: An array of temperatures in Kelvin
"""
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger="kin. lattice EKIN_LAT= "
)
temperatures = []
if len(trigger_indices) > 0:
for j in trigger_indices:
line = lines[j].strip()
line = _clean_line(line)
temperatures.append(float(line.split()[-2]))
else:
temperatures = np.zeros(
len(
_get_trigger(
lines=lines,
trigger="FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)",
return_lines=False,
)
)
)
return np.array(temperatures)
@staticmethod
def get_steps(filename="OUTCAR", lines=None):
"""
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: Steps during the simulation
"""
nblock_trigger = "NBLOCK ="
trigger = "FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)"
trigger_indices = list()
read_nblock = True
n_block = 1
lines = _get_lines_from_file(filename=filename, lines=lines)
for i, line in enumerate(lines):
line = line.strip()
if trigger in line:
trigger_indices.append(i)
if read_nblock is None:
if nblock_trigger in line:
line = _clean_line(line)
n_block = int(line.split(nblock_trigger)[-1])
return n_block * np.linspace(0, len(trigger_indices))
def get_time(self, filename="OUTCAR", lines=None):
"""
Time after each simulation step (for MD)
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: An array of time values in fs
"""
potim_trigger = "POTIM ="
read_potim = True
potim = 1.0
lines = _get_lines_from_file(filename=filename, lines=lines)
for i, line in enumerate(lines):
line = line.strip()
if read_potim is None:
if potim_trigger in line:
line = _clean_line(line)
potim = float(line.split(potim_trigger)[0])
return potim * self.get_steps(filename)
@staticmethod
def get_kinetic_energy_error(filename="OUTCAR", lines=None):
"""
Get the kinetic energy error
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
float: The kinetic energy error in eV
"""
trigger = "kinetic energy error for atom="
e_kin_err = list()
n_species_list = list()
nion_trigger = "ions per type ="
tot_kin_error = 0.0
lines = _get_lines_from_file(filename=filename, lines=lines)
for i, line in enumerate(lines):
line = line.strip()
if trigger in line:
e_kin_err.append(float(line.split()[5]))
if nion_trigger in line:
n_species_list = [
float(val) for val in line.split(nion_trigger)[-1].strip().split()
]
if len(n_species_list) > 0 and len(n_species_list) == len(e_kin_err):
tot_kin_error = np.sum(np.array(n_species_list) * np.array(e_kin_err))
return tot_kin_error
@staticmethod
def get_fermi_level(filename="OUTCAR", lines=None):
"""
Getting the Fermi-level (Kohn_Sham) from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
float: The Kohn-Sham Fermi level in eV
"""
trigger = "E-fermi :"
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger=trigger
)
if len(trigger_indices) != 0:
try:
return float(lines[trigger_indices[-1]].split(trigger)[-1].split()[0])
except ValueError:
return
else:
return
@staticmethod
def get_dipole_moments(filename="OUTCAR", lines=None):
"""
Get the electric dipole moment at every electronic step
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
list: A list of dipole moments in (eA) for each electronic step
"""
moment_trigger = "dipolmoment"
istep_trigger = "FREE ENERGIE OF THE ION-ELECTRON SYSTEM (eV)"
dip_moms = list()
lines = _get_lines_from_file(filename=filename, lines=lines)
istep_mom = list()
for i, line in enumerate(lines):
line = line.strip()
if istep_trigger in line:
dip_moms.append(np.array(istep_mom))
istep_mom = list()
if moment_trigger in line:
line = _clean_line(line)
mom = np.array([float(val) for val in line.split()[1:4]])
istep_mom.append(mom)
return dip_moms
@staticmethod
def get_nelect(filename="OUTCAR", lines=None):
"""
Returns the number of electrons in the simulation
Args:
filename (str): OUTCAR filename
lines (list/None): lines read from the file
Returns:
float: The number of electrons in the simulation
"""
nelect_trigger = "NELECT"
lines = _get_lines_from_file(filename=filename, lines=lines)
for i, line in enumerate(lines):
line = line.strip()
if nelect_trigger in line:
return float(line.split()[2])
@staticmethod
def get_number_of_atoms(filename="OUTCAR", lines=None):
"""
Returns the number of ions in the simulation
Args:
filename (str): OUTCAR filename
lines (list/None): lines read from the file
Returns:
int: The number of ions in the simulation
"""
ions_trigger = "NIONS ="
trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger=ions_trigger
)
if len(trigger_indices) != 0:
return int(lines[trigger_indices[0]].split(ions_trigger)[-1])
else:
raise ValueError()
@staticmethod
def get_band_properties(filename="OUTCAR", lines=None):
fermi_trigger = "E-fermi"
fermi_trigger_indices, lines = _get_trigger(
lines=lines, filename=filename, trigger=fermi_trigger
)
fermi_level_list = list()
vbm_level_dict = OrderedDict()
cbm_level_dict = OrderedDict()
for ind in fermi_trigger_indices:
fermi_level_list.append(float(lines[ind].strip().split()[2]))
band_trigger = "band No. band energies occupation"
is_spin_polarized = False
for n, ind in enumerate(fermi_trigger_indices):
if n == len(fermi_trigger_indices) - 1:
trigger_indices, lines_new = _get_trigger(
lines=lines[ind:-1], filename=filename, trigger=band_trigger
)
else:
trigger_indices, lines_new = _get_trigger(
lines=lines[ind:fermi_trigger_indices[n+1]], filename=filename, trigger=band_trigger
)
band_data = list()
for ind in trigger_indices:
if "spin component" in lines_new[ind-3]:
is_spin_polarized = True
for line in lines_new[ind+1:]:
data = line.strip().split()
if len(data) != 3:
break
band_data.append([float(d) for d in data[1:]])
if is_spin_polarized:
band_data_per_spin = [np.array(band_data[0:int(len(band_data)/2)]).tolist(),
np.array(band_data[int(len(band_data)/2):]).tolist()]
else:
band_data_per_spin = [band_data]
for spin, band_data in enumerate(band_data_per_spin):
if spin in cbm_level_dict.keys():
pass
else:
cbm_level_dict[spin] = list()
if spin in vbm_level_dict.keys():
pass
else:
vbm_level_dict[spin] = list()
if len(band_data) > 0:
band_energy, band_occ = [np.array(band_data)[:, i] for i in range(2)]
args = np.argsort(band_energy)
band_occ = band_occ[args]
band_energy = band_energy[args]
cbm_bool = np.abs(band_occ) < 1e-6
if any(cbm_bool):
cbm_level_dict[spin].append(band_energy[np.abs(band_occ) < 1e-6][0])
else:
cbm_level_dict[spin].append(band_energy[-1])
# If spin channel is completely empty, setting vbm=cbm
if all(cbm_bool):
vbm_level_dict[spin].append(cbm_level_dict[spin][-1])
else:
vbm_level_dict[spin].append(band_energy[~cbm_bool][-1])
return np.array(fermi_level_list), np.array([val for val
in vbm_level_dict.values()]), np.array([val
for val in
cbm_level_dict.values()])
@staticmethod
def get_elastic_constants(filename="OUTCAR", lines=None):
lines = _get_lines_from_file(filename=filename, lines=lines)
trigger_indices = _get_trigger(lines=lines, filename=filename, trigger="TOTAL ELASTIC MODULI (kBar)", return_lines=False)
if len(trigger_indices) != 1:
return None
else:
start_index = trigger_indices[0] + 3
end_index = start_index + 6
elastic_constants = []
for line in lines[start_index:end_index]:
elastic_constants.append(line.split()[1:])
elastic_GPa = np.array(elastic_constants, dtype=float) / 10
return elastic_GPa
@staticmethod
def _get_positions_and_forces_parser(
lines, trigger_indices, n_atoms, pos_flag=True, force_flag=True
):
"""
Parser to get the forces and or positions for every ionic step from the OUTCAR file
Args:
lines (list): lines read from the file
trigger_indices (list): list of line indices where the trigger was found.
n_atoms (int): number of atoms
pos_flag (bool): parse position
force_flag (bool): parse forces
Returns:
[positions, forces] (sequence)
numpy.ndarray: A Nx3xM array of positions in $\AA$
numpy.ndarray: A Nx3xM array of forces in $eV / \AA$
where N is the number of atoms and M is the number of time steps
"""
positions = []
forces = []
for j in trigger_indices:
pos = []
force = []
for line in lines[j + 2 : j + n_atoms + 2]:
line = line.strip()
line = _clean_line(line)
if pos_flag:
pos.append([float(l) for l in line.split()[0:3]])
if force_flag:
force.append([float(l) for l in line.split()[3:]])
forces.append(force)
positions.append(pos)
if pos_flag and force_flag:
return np.array(positions), np.array(forces)
elif pos_flag:
return np.array(positions)
elif force_flag:
return np.array(forces)
@staticmethod
def _get_cells_praser(lines, trigger_indices):
"""
Parser to get the cell size and shape for every ionic step from the OUTCAR file
Args:
lines (list): lines read from the file
trigger_indices (list): list of line indices where the trigger was found.
n_atoms (int): number of atoms
Returns:
numpy.ndarray: A 3x3xM array of the cell shape in $\AA$
where M is the number of time steps
"""
cells = []
try:
for j in trigger_indices:
cell = []
for line in lines[j + 5: j + 8]:
line = line.strip()
line = _clean_line(line)
cell.append([float(l) for l in line.split()[0:3]])
cells.append(cell)
return np.array(cells)
except ValueError:
warnings.warn("Unable to parse the cells from the OUTCAR file")
return
def _clean_line(line):
return line.replace("-", " -")
def _get_trigger(trigger, filename=None, lines=None, return_lines=True):
"""
Find the lines where a specific trigger appears.
Args:
trigger (str): string pattern to search for
lines (list/None): list of lines
filename (str/None): file to read lines from
Returns:
list: indicies of the lines where the trigger string was found and list of lines
"""
lines = _get_lines_from_file(filename=filename, lines=lines)
trigger_indicies = [i for i, line in enumerate(lines) if trigger in line.strip()]
if return_lines:
return trigger_indicies, lines
else:
return trigger_indicies
def _get_lines_from_file(filename, lines=None):
"""
If lines is None read the lines from the file with the filename filename.
Args:
filename (str): file to read lines from
lines (list/ None): list of lines
Returns:
list: list of lines
"""
if lines is None:
with open(filename, "r") as f:
lines = f.readlines()
return lines
| pyiron_atomistics/vasp/outcar.py | 36,471 | This module is used to parse VASP OUTCAR files.
Attributes:
parse_dict (dict): A dictionary with all the useful quantities parsed from an OUTCAR file after from_file() is
executed
Parser to get the cell size and shape for every ionic step from the OUTCAR file
Args:
lines (list): lines read from the file
trigger_indices (list): list of line indices where the trigger was found.
n_atoms (int): number of atoms
Returns:
numpy.ndarray: A 3x3xM array of the cell shape in $\AA$
where M is the number of time steps
If lines is None read the lines from the file with the filename filename.
Args:
filename (str): file to read lines from
lines (list/ None): list of lines
Returns:
list: list of lines
Parser to get the forces and or positions for every ionic step from the OUTCAR file
Args:
lines (list): lines read from the file
trigger_indices (list): list of line indices where the trigger was found.
n_atoms (int): number of atoms
pos_flag (bool): parse position
force_flag (bool): parse forces
Returns:
[positions, forces] (sequence)
numpy.ndarray: A Nx3xM array of positions in $\AA$
numpy.ndarray: A Nx3xM array of forces in $eV / \AA$
where N is the number of atoms and M is the number of time steps
Find the lines where a specific trigger appears.
Args:
trigger (str): string pattern to search for
lines (list/None): list of lines
filename (str/None): file to read lines from
Returns:
list: indicies of the lines where the trigger string was found and list of lines
Parse and store relevant quantities from the OUTCAR file into parse_dict.
Args:
filename (str): Filename of the OUTCAR file to parse
Load output from an HDF5 file
Args:
hdf (pyiron_base.generic.hdfio.FileHDFio): HDF5 group or file
group_name (str): Name of the HDF5 group
Gets the energy at every electronic step
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
list: A list of energie for every electronic step at every ionic step
Gets the Broyden mixing mesh size
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
int: Mesh size
Gets the cell size and shape for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: A 3x3xM array of the cell shape in $\AA$
where M is the number of time steps
Get the electric dipole moment at every electronic step
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
list: A list of dipole moments in (eA) for each electronic step
Gets the total energy for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: A 1xM array of the total energies in $eV$
where M is the number of time steps
Gets the total energy for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: A 1xM array of the total energies in $eV$
where M is the number of time steps
Getting the Fermi-level (Kohn_Sham) from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
float: The Kohn-Sham Fermi level in eV
Gets the forces for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
n_atoms (int/None): number of ions in OUTCAR
Returns:
numpy.ndarray: A Nx3xM array of forces in $eV / \AA$
where N is the number of atoms and M is the number of time steps
Function to extract the irreducible kpoints from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
reciprocal (bool): Get either the reciprocal or the cartesian coordinates
weight (bool): Get the weight assigned to the irreducible kpoints
planewaves (bool): Get the planewaves assigned to the irreducible kpoints
lines (list/None): lines read from the file
Returns:
numpy.ndarray: An array of k-points
Get the kinetic energy error
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
float: The kinetic energy error in eV
Gets the magnetization
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
list: A list with the mgnetization values
Returns the number of electrons in the simulation
Args:
filename (str): OUTCAR filename
lines (list/None): lines read from the file
Returns:
float: The number of electrons in the simulation
Returns the number of ions in the simulation
Args:
filename (str): OUTCAR filename
lines (list/None): lines read from the file
Returns:
int: The number of ions in the simulation
Gets the positions for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
n_atoms (int/None): number of ions in OUTCAR
Returns:
numpy.ndarray: A Nx3xM array of positions in $\AA$
where N is the number of atoms and M is the number of time steps
Gets the forces and positions for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
n_atoms (int/None): number of ions in OUTCAR
Returns:
[positions, forces] (sequence)
numpy.ndarray: A Nx3xM array of positions in $\AA$
numpy.ndarray: A Nx3xM array of forces in $eV / \AA$
where N is the number of atoms and M is the number of time steps
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: Steps during the simulation
Args:
filename (str): Input filename
lines (list/None): lines read from the file
si_unit (bool): True SI units are used
Returns:
numpy.ndarray: An array of stress values
Gets the temperature at each ionic step (applicable for MD)
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: An array of temperatures in Kelvin
Time after each simulation step (for MD)
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: An array of time values in fs
Gets the total energy for every ionic step from the OUTCAR file
Args:
filename (str): Filename of the OUTCAR file to parse
lines (list/None): lines read from the file
Returns:
numpy.ndarray: A 1xM array of the total energies in $eV$
where M is the number of time steps
Store output in an HDF5 file
Args:
hdf (pyiron_base.generic.hdfio.FileHDFio): HDF5 group or file
group_name (str): Name of the HDF5 group
Store minimal output in an HDF5 file (output unique to OUTCAR)
Args:
hdf (pyiron_base.generic.hdfio.FileHDFio): HDF5 group or file
group_name (str): Name of the HDF5 group
coding: utf-8 Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department Distributed under the terms of "New BSD License", see the LICENSE file. Exclude all alphabets, and spaces. Then split based on '=' If spin channel is completely empty, setting vbm=cbm | 7,759 | en | 0.788384 |
"""
All things CloudFormation Init.
"""
from paco.models.base import Named
from paco.models import schemas
from zope.interface import implementer
from zope.schema.fieldproperty import FieldProperty
import troposphere.cloudformation
def export_attrs_as_dicts(obj, attrs):
out = {}
for name in attrs:
value = getattr(obj, name, None)
if value:
out[name] = dict(value)
return out
@implementer(schemas.ICloudFormationParameters)
class CloudFormationParameters(Named, dict):
pass
@implementer(schemas.ICloudFormationConfigSets)
class CloudFormationConfigSets(Named, dict):
def export_as_troposphere(self):
# plain dict of list values
return dict(self)
@implementer(schemas.ICloudFormationConfigurations)
class CloudFormationConfigurations(Named, dict):
def export_as_troposphere(self):
out = {}
for key, value in self.items():
out[key] = troposphere.cloudformation.InitConfig(
**self[key].export_as_troposphere()
)
return out
@implementer(schemas.ICloudFormationInitVersionedPackageSet)
class CloudFormationInitVersionedPackageSet(dict):
pass
@implementer(schemas.ICloudFormationInitPathOrUrlPackageSet)
class CloudFormationInitPathOrUrlPackageSet(dict):
pass
@implementer(schemas.ICloudFormationInitPackages)
class CloudFormationInitPackages(Named):
apt = FieldProperty(schemas.ICloudFormationInitPackages['apt'])
msi = FieldProperty(schemas.ICloudFormationInitPackages['msi'])
python = FieldProperty(schemas.ICloudFormationInitPackages['python'])
rpm = FieldProperty(schemas.ICloudFormationInitPackages['rpm'])
rubygems = FieldProperty(schemas.ICloudFormationInitPackages['rubygems'])
yum = FieldProperty(schemas.ICloudFormationInitPackages['yum'])
def __init__(self, name, parent):
super().__init__(name, parent)
self.apt = CloudFormationInitVersionedPackageSet()
self.msi = CloudFormationInitPathOrUrlPackageSet()
self.python = CloudFormationInitVersionedPackageSet()
self.rpm = CloudFormationInitPathOrUrlPackageSet()
self.rubygems = CloudFormationInitVersionedPackageSet()
self.yum = CloudFormationInitVersionedPackageSet()
def export_as_troposphere(self):
return export_attrs_as_dicts(
self,
('apt', 'msi', 'python', 'rpm', 'rubygems', 'yum')
)
@implementer(schemas.ICloudFormationInitGroup)
class CloudFormationInitGroup(Named):
gid = FieldProperty(schemas.ICloudFormationInitGroup['gid'])
def export_as_troposphere(self):
out = {}
for name in ('gid'):
value = getattr(self, name, None)
if value != None:
out[name] = value
return out
@implementer(schemas.ICloudFormationInitGroups)
class CloudFormationInitGroups(Named, dict):
def export_as_troposphere(self):
out = {}
for key, value in self.items():
out[key] = self[key].export_as_troposphere()
return out
@implementer(schemas.ICloudFormationInitUser)
class CloudFormationInitUser(Named):
groups = FieldProperty(schemas.ICloudFormationInitUser['groups'])
uid = FieldProperty(schemas.ICloudFormationInitUser['uid'])
home_dir = FieldProperty(schemas.ICloudFormationInitUser['home_dir'])
def __init__(self, name, parent):
super().__init__(name, parent)
self.groups = []
def export_as_troposphere(self):
out = {}
for name in ('groups', 'uid', 'home_dir'):
value = getattr(self, name, None)
if name == 'home_dir':
name = 'homeDir'
if name == 'uid':
value = str(value)
if value != None:
out[name] = value
return out
@implementer(schemas.ICloudFormationInitUsers)
class CloudFormationInitUsers(Named, dict):
def export_as_troposphere(self):
out = {}
for key, value in self.items():
out[key] = self[key].export_as_troposphere()
return out
@implementer(schemas.ICloudFormationInitSources)
class CloudFormationInitSources(Named, dict):
def export_as_troposphere(self):
out = {}
for key, value in self.items():
out[key] = value
return out
@implementer(schemas.ICloudFormationInitFiles)
class CloudFormationInitFiles(Named, dict):
def export_as_troposphere(self):
out = {}
for key, value in self.items():
out[key] = self[key].export_as_troposphere()
return out
@implementer(schemas.ICloudFormationInitFile)
class CloudFormationInitFile(Named):
content_cfn_file = FieldProperty(schemas.ICloudFormationInitFile['content_cfn_file'])
content_file = FieldProperty(schemas.ICloudFormationInitFile['content_file'])
source = FieldProperty(schemas.ICloudFormationInitFile['source'])
encoding = FieldProperty(schemas.ICloudFormationInitFile['encoding'])
group = FieldProperty(schemas.ICloudFormationInitFile['group'])
owner = FieldProperty(schemas.ICloudFormationInitFile['owner'])
mode = FieldProperty(schemas.ICloudFormationInitFile['mode'])
authentication = FieldProperty(schemas.ICloudFormationInitFile['authentication'])
context = FieldProperty(schemas.ICloudFormationInitFile['context'])
_content = None
@property
def content(self):
"Return a string or a Troposphere CFN Function object"
if self.content_file:
return self.content_file
elif self.content_cfn_file:
return self.content_cfn_file
return self._content
@content.setter
def content(self, value):
self._content = value
def export_as_troposphere(self):
out = {}
for name in ('content', 'source', 'encoding', 'group', 'owner', 'mode', 'authentication', 'context'):
value = getattr(self, name, None)
if value != None:
out[name] = value
return out
@implementer(schemas.ICloudFormationInitCommands)
class CloudFormationInitCommands(Named, dict):
def export_as_troposphere(self):
out = {}
for key, command_obj in self.items():
command_dict = {}
for name in ('command', 'env', 'cwd', 'test', 'ignore_errors'):
value = getattr(command_obj, name)
if name == 'ignore_errors':
name = 'ignoreErrors'
if value != None:
command_dict[name] = value
out[key] = command_dict
return out
@implementer(schemas.ICloudFormationInitCommand)
class CloudFormationInitCommand(Named):
command = FieldProperty(schemas.ICloudFormationInitCommand['command'])
env = FieldProperty(schemas.ICloudFormationInitCommand['env'])
cwd = FieldProperty(schemas.ICloudFormationInitCommand['cwd'])
test = FieldProperty(schemas.ICloudFormationInitCommand['test'])
ignore_errors = FieldProperty(schemas.ICloudFormationInitCommand['ignore_errors'])
def __init__(self, name, parent):
super().__init__(name, parent)
self.env = {}
@implementer(schemas.ICloudFormationInitService)
class CloudFormationInitService(Named, dict):
ensure_running = FieldProperty(schemas.ICloudFormationInitService['ensure_running'])
enabled = FieldProperty(schemas.ICloudFormationInitService['enabled'])
files = FieldProperty(schemas.ICloudFormationInitService['files'])
sources = FieldProperty(schemas.ICloudFormationInitService['sources'])
packages = FieldProperty(schemas.ICloudFormationInitService['packages'])
commands = FieldProperty(schemas.ICloudFormationInitService['commands'])
def __init__(self, name, parent):
super().__init__(name, parent)
self.files = []
self.packages = {}
self.commands = []
self.sources = []
@implementer(schemas.ICloudFormationInitServiceCollection)
class CloudFormationInitServiceCollection(Named, dict):
def export_as_troposphere(self):
out = {}
for key, service_obj in self.items():
service_dict = {}
for name in ('ensure_running', 'enabled', 'files', 'sources', 'packages', 'commands'):
value = getattr(service_obj, name)
if name == 'ensure_running':
name = 'ensureRunning'
if value != None:
service_dict[name] = value
out[key] = service_dict
return out
@implementer(schemas.ICloudFormationInitServices)
class CloudFormationInitServices(Named):
sysvinit = FieldProperty(schemas.ICloudFormationInitServices['sysvinit'])
windows = FieldProperty(schemas.ICloudFormationInitServices['windows'])
def __init__(self, name, parent):
super().__init__(name, parent)
self.sysvinit = CloudFormationInitServiceCollection('sysvinit', self)
self.windows = CloudFormationInitServiceCollection('windows', self)
def export_as_troposphere(self):
out = {}
if self.sysvinit:
out['sysvinit'] = self.sysvinit.export_as_troposphere()
if self.windows:
out['windows'] = self.windows.export_as_troposphere()
return out
@implementer(schemas.ICloudFormationConfiguration)
class CloudFormationConfiguration(Named):
packages = FieldProperty(schemas.ICloudFormationConfiguration['packages'])
groups = FieldProperty(schemas.ICloudFormationConfiguration['groups'])
users = FieldProperty(schemas.ICloudFormationConfiguration['users'])
sources = FieldProperty(schemas.ICloudFormationConfiguration['sources'])
files = FieldProperty(schemas.ICloudFormationConfiguration['files'])
commands = FieldProperty(schemas.ICloudFormationConfiguration['commands'])
services = FieldProperty(schemas.ICloudFormationConfiguration['services'])
def __init__(self, name, parent):
super().__init__(name, parent)
self.packages = CloudFormationInitPackages('packages', self)
self.files = CloudFormationInitFiles('files', self)
self.commands = CloudFormationInitCommands('commands', self)
self.services = CloudFormationInitServices('services', self)
self.sources = CloudFormationInitSources('sources', self)
self.groups = CloudFormationInitGroups('groups', self)
self.users = CloudFormationInitUsers('users', self)
def export_as_troposphere(self):
out = {}
for name in ('packages', 'files', 'commands', 'services', 'sources', 'groups', 'users'):
obj = getattr(self, name, None)
if obj:
out[name] = obj.export_as_troposphere()
return out
@implementer(schemas.ICloudFormationInit)
class CloudFormationInit(Named):
config_sets = FieldProperty(schemas.ICloudFormationInit['config_sets'])
configurations = FieldProperty(schemas.ICloudFormationInit['configurations'])
parameters = FieldProperty(schemas.ICloudFormationInit['parameters'])
def __init__(self, name, parent):
super().__init__(name, parent)
self.parameters = {}
def export_as_troposphere(self):
init_resource = troposphere.cloudformation.Init(
troposphere.cloudformation.InitConfigSets(
**self.config_sets.export_as_troposphere()
),
**self.configurations.export_as_troposphere()
)
return init_resource | src/paco/models/cfn_init.py | 11,438 | Return a string or a Troposphere CFN Function object
All things CloudFormation Init.
plain dict of list values | 112 | en | 0.216759 |
import numpy as np
import os, time
import random
import tensorflow as tf
from lookalike_model.trainer.model_new import Model
import argparse
random.seed(1234)
# adding arguments for tfrecord directory and the checkpoint directory
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, help="input data tfrecords dir location")
parser.add_argument("--check_point_dir", type=str, help="Check Point dir location")
args, unknown = parser.parse_known_args()
if len(unknown) != 0:
print("unknown args:%s", unknown)
# tfrecord location and the check point directory location
tfrecord_location =args.data_dir + "/tf_records_lookalike_data_08july"
output = args.check_point_dir
def __data_parser(serialized_example):
features = tf.parse_single_example(serialized_example,
features={'keywords_list': tf.FixedLenSequenceFeature([], tf.int64, allow_missing=True),
'ucdoc': tf.FixedLenFeature([], tf.int64),
'keyword': tf.FixedLenFeature([], tf.int64),
'is_click': tf.FixedLenFeature([], tf.float32),
'sl': tf.FixedLenFeature([], tf.int64),
'lr': tf.FixedLenFeature([], tf.float32)
})
keywords_list = tf.cast(features['keywords_list'], tf.int32)
ucdoc = tf.cast(features['ucdoc'], tf.int32)
keyword = tf.cast(features['keyword'], tf.int32)
is_click = tf.cast(features['is_click'], tf.float32)
sl = tf.cast(features['sl'], tf.int32)
lr = tf.cast(features['lr'], tf.float32)
return ucdoc, keyword, keywords_list, is_click,sl,lr
names = []
for file in os.listdir(tfrecord_location):
if file.startswith("part"):
names.append(file)
file_paths = [os.path.join(tfrecord_location, name) for name in names]
dataset = tf.data.TFRecordDataset(file_paths)
shuffle_value = 2000
repeat_value = 10
batch_size = 1000
prefetch_buffer = 2000
dataset = dataset.map(__data_parser)
dataset = dataset.repeat(repeat_value).shuffle(shuffle_value).prefetch(buffer_size=prefetch_buffer).batch(batch_size)
iterator = dataset.make_one_shot_iterator()
tf_ucdoc, tf_keyword, tf_keywords_list, tf_is_click, tf_sl, tf_lr = iterator.get_next()
unique_keywords = 811
cate_list = np.array([x for x in range(unique_keywords)])
user_count = 1349500103
item_count, cate_count = unique_keywords, unique_keywords
predict_batch_size = 5000
predict_ads_num = 30
total_iterations = int((user_count * epoch)//batch_size)
print('total iterations = {}'.format(total_iterations))
max_epochs = 500
model = Model(user_count, item_count, cate_count, cate_list, predict_batch_size, predict_ads_num,tf_ucdoc,tf_keyword,tf_is_click,tf_keywords_list,tf_sl)
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
start_time = time.time()
count_epoch = 0
last_100_loss = []
print('shuffle = {}, epochs = {}, batch_size = {}, predict_batch_size = {}'.format(shuffle_value, epoch, batch_size, predict_batch_size))
for i in range(max_epochs*500):
loss, _,sl = sess.run([model.loss, model.train_op, tf_sl])
loss = round(loss, 2)
last_100_loss.append(loss)
if len(last_100_loss) == 101:
del last_100_loss[0]
if i%500==0:
print('Epoch {} DONE Iteration: {} Cost time: {} Model Loss: {} Average Loss: {}'.format(count_epoch, i, time.time()-start_time, loss,
round(sum(last_100_loss)/100, 2)))
model.save(sess, output)
count_epoch += 1
# print("i: ",i," loss: ",loss)
model.save(sess, output)
| Model/lookalike-model/lookalike_model/trainer/lookalike_trainer_tfrecords.py | 3,996 | adding arguments for tfrecord directory and the checkpoint directory tfrecord location and the check point directory location print("i: ",i," loss: ",loss) | 166 | en | 0.738779 |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
import numpy
import copy
from dcase_util.ui import FancyStringifier, FancyLogger
from dcase_util.containers import ContainerMixin
from dcase_util.data import DataBuffer
def get_keras_data_sequence_class():
# Use getter method to avoid importing Keras when importing dcase_util. This allows user to decide when import
# Keras, so that user can set random seeds before Keras import.
from keras.utils import Sequence
class KerasDataSequence(Sequence, ContainerMixin):
def __init__(self, item_list=None, batch_size=64,
buffer_size=None,
data_processing_chain=None, meta_processing_chain=None,
data_processing_chain_callback_on_epoch_end=None, meta_processing_chain_callback_on_epoch_end=None,
transformer_callbacks=None,
refresh_buffer_on_epoch=False,
data_format='channels_last',
target_format='single_target_per_sequence',
**kwargs):
"""Constructor
Parameters
----------
item_list : list or dict
Items in the data sequence. List containing multi-level dictionary with first level key
'data' and 'meta'. Second level should contain parameters for process method in the processing chain.
Default value None
batch_size : int
Batch size (item count).
Default value 64
buffer_size : int
Internal buffer size (item count). By setting this sufficiently high, data sequence generator can
possibly fit all sequence items into internal buffer and can fetch without loading from disk.
Set to None, if no internal buffer used.
Default value None
data_processing_chain : ProcessingChain
Data processing chain.
Default value None
meta_processing_chain : ProcessingChain
Meta processing chain.
Default value None
data_processing_chain_callback_on_epoch_end : list of dict
Can be used to call methods with parameters for processing chain at the end of epoch. This can be
used to control processing chain's internal status (e.g. roll the data).
Default value None
meta_processing_chain_callback_on_epoch_end : list of dict
Can be used to call methods with parameters for processing chain at the end of epoch. This can be
used to control processing chain's internal status (e.g. roll the data).
Default value None
transformer_callbacks : list of func
Transformer callbacks to jointly process data and meta. This can be used for local data modification and
data augmentation.
Default value None
refresh_buffer_on_epoch : bool
In case internal data buffer is used, force data and meta refresh at the end of each epoch. Use this if
data is modified/augmented differently for each epoch.
In case data_processing_chain_callback_on_epoch_end or meta_processing_chain_callback_on_epoch_end is
used, this parameter is automatically set to True.
Default value False
data_format : str
Keras like data format, controls where channel should be added.
Possible values ['channels_first', 'channels_last']
Default value 'channels_last'
target_format : str
Meta data interpretation in the relation to the data items.
Default value 'single_target_per_segment'
"""
# Run ContainerMixin init
ContainerMixin.__init__(self, **kwargs)
self._data_shape = None
self._data_axis = None
self.item_list = copy.copy(item_list)
self.batch_size = batch_size
self.buffer_size = buffer_size
self.data_refresh_on_epoch = refresh_buffer_on_epoch
if data_format is None:
data_format = 'channels_last'
self.data_format = data_format
if self.data_format not in ['channels_first', 'channels_last']:
message = '{name}: Unknown data_format [{data_format}].'.format(
name=self.__class__.__name__,
data_format=self.data_format
)
self.logger.exception(message)
raise NotImplementedError(message)
if target_format is None:
target_format = 'single_target_per_sequence'
self.target_format = target_format
if self.target_format not in ['same', 'single_target_per_sequence']:
message = '{name}: Unknown target_format [{target_format}].'.format(
name=self.__class__.__name__,
target_format=self.target_format
)
self.logger.exception(message)
raise NotImplementedError(message)
if data_processing_chain_callback_on_epoch_end is None:
data_processing_chain_callback_on_epoch_end = []
self.data_processing_chain_callback_on_epoch_end = data_processing_chain_callback_on_epoch_end
if self.data_processing_chain_callback_on_epoch_end:
self.data_refresh_on_epoch = True
if meta_processing_chain_callback_on_epoch_end is None:
meta_processing_chain_callback_on_epoch_end = []
self.meta_processing_chain_callback_on_epoch_end = meta_processing_chain_callback_on_epoch_end
if transformer_callbacks is None:
transformer_callbacks = []
self.transformer_callbacks = transformer_callbacks
# Processing chains
self.data_processing_chain = data_processing_chain
self.meta_processing_chain = meta_processing_chain
if self.buffer_size is not None:
# Initialize data buffer
self.data_buffer = DataBuffer(
size=self.buffer_size
)
else:
self.data_buffer = None
def __str__(self):
ui = FancyStringifier()
output = ''
output += ui.class_name(self.__class__.__name__) + '\n'
output += ui.data(
indent=2,
field='Batch size',
value=self.batch_size
) + '\n'
output += ui.data(
indent=2,
field='Epoch size',
value=len(self), unit='batches'
) + '\n'
shape = self.data_shape
axis = self.data_axis
output += ui.data(field='Data item shape', value=shape) + '\n'
output += ui.data(
indent=4,
field='Time',
value=shape[axis['time_axis']]
) + '\n'
output += ui.data(
indent=4,
field='Data',
value=shape[axis['data_axis']]
) + '\n'
if 'sequence_axis' in axis:
output += ui.data(
indent=4,
field='Sequence',
value=shape[axis['sequence_axis']]
) + '\n'
output += ui.data(
indent=4,
field='Axis',
value=axis
) + '\n'
if self.buffer_size is not None:
output += ui.line(field='Buffer') + '\n'
output += ui.data(
indent=4,
field='buffer_size',
value=self.buffer_size,
unit='items'
) + '\n'
output += ui.data(
indent=4,
field='buffer usage',
value=self.data_buffer.count,
unit='items'
) + '\n'
output += ui.data(
indent=4,
field='buffer usage',
value=(self.data_buffer.count / float(self.buffer_size)) * 100,
unit='%'
) + '\n'
return output
def __getitem__(self, index):
start_index = index * self.batch_size
stop_index = (index + 1) * self.batch_size
batch_buffer_data = []
batch_buffer_meta = []
for item_index in range(start_index, stop_index):
if item_index < len(self.item_list):
item = self.item_list[item_index]
# Load item data
data, meta = self.process_item(item=item)
if self.transformer_callbacks:
# Apply transformer callbacks
for callback in self.transformer_callbacks:
data, meta = callback(
data=data,
meta=meta
)
# Collect data
batch_buffer_data.append(data.data)
# Collect meta
if self.target_format == 'single_target_per_sequence':
# Collect single target per sequence
for i in range(0, data.shape[data.sequence_axis]):
batch_buffer_meta.append(meta.data[:, 0])
elif self.target_format == 'same':
# Collect single target per sequence
batch_buffer_meta.append(
numpy.repeat(
a=meta.data,
repeats=data.length,
axis=1
)
)
if len(data.shape) == 2:
# Prepare 2D data, stack along time_axis
if data.time_axis == 0:
batch_buffer_data = numpy.vstack(batch_buffer_data)
elif data.time_axis == 1:
batch_buffer_data = numpy.hstack(batch_buffer_data)
elif len(data.shape) == 3:
# Prepare 3D data, stack along sequence_axis
if data.sequence_axis == 0:
batch_buffer_data = numpy.vstack(batch_buffer_data)
elif data.sequence_axis == 1:
batch_buffer_data = numpy.hstack(batch_buffer_data)
elif data.sequence_axis == 2:
batch_buffer_data = numpy.dstack(batch_buffer_data)
# Add channel dimension to the data
if self.data_format == 'channels_first':
batch_buffer_data = numpy.expand_dims(
batch_buffer_data,
axis=0
)
elif self.data_format == 'channels_last':
batch_buffer_data = numpy.expand_dims(
batch_buffer_data,
axis=3
)
# Prepare meta
if self.target_format == 'single_target_per_sequence':
batch_buffer_meta = numpy.vstack(batch_buffer_meta)
elif self.target_format == 'same':
batch_buffer_meta = numpy.hstack(batch_buffer_meta).T
return batch_buffer_data, batch_buffer_meta
def __len__(self):
num_batches = int(numpy.ceil(len(self.item_list) / float(self.batch_size)))
if num_batches > 0:
return num_batches
else:
return 1
@property
def data_shape(self):
if self._data_shape is None:
# Load first item and get data length
data = self.process_item(
item=self.item_list[0]
)[0]
self._data_shape = data.shape
self._data_axis = {
'time_axis': data.time_axis,
'data_axis': data.data_axis
}
if hasattr(data,'sequence_axis'):
self._data_axis['sequence_axis']= data.sequence_axis
return self._data_shape
@property
def data_axis(self):
if self._data_axis is None:
# Load first item and get data length
data = self.process_item(
item=self.item_list[0]
)[0]
self._data_shape = data.shape
self._data_axis = {
'time_axis': data.time_axis,
'data_axis': data.data_axis
}
if hasattr(data, 'sequence_axis'):
self._data_axis['sequence_axis'] = data.sequence_axis
return self._data_axis
@property
def data_size(self):
shape = self.data_shape
axis = self.data_axis
size = {
'time': shape[axis['time_axis']],
'data': shape[axis['data_axis']],
}
if 'sequence_axis' in axis:
size['sequence'] = shape[axis['sequence_axis']]
return size
def process_item(self, item):
if self.data_buffer is not None:
# Fetch data and meta through internal buffer
if not self.data_buffer.key_exists(key=item):
data = self.data_processing_chain.process(**item['data'])
meta = self.meta_processing_chain.process(**item['meta'])
self.data_buffer.set(
key=item,
data=data,
meta=meta
)
else:
data, meta = self.data_buffer.get(key=item)
else:
# Fetch data and meta directly.
data = self.data_processing_chain.process(**item['data'])
meta = self.meta_processing_chain.process(**item['meta'])
return data, meta
def on_epoch_end(self):
if self.data_processing_chain_callback_on_epoch_end:
for callback_parameters in self.data_processing_chain_callback_on_epoch_end:
if 'method_name' in callback_parameters:
self.data_processing_chain.call_method(
method_name=callback_parameters['method_name'],
parameters=callback_parameters.get('parameters', {})
)
if self.meta_processing_chain_callback_on_epoch_end:
for callback_parameters in self.meta_processing_chain_callback_on_epoch_end:
if 'method_name' in callback_parameters:
self.data_processing_chain.call_method(
method_name=callback_parameters['method_name'],
parameters=callback_parameters.get('parameters', {})
)
if self.data_buffer is not None and self.data_refresh_on_epoch:
# Force reload of data
self.data_buffer.clear()
return KerasDataSequence
def data_collector(item_list=None,
data_processing_chain=None, meta_processing_chain=None,
target_format='single_target_per_sequence',
channel_dimension='channels_last',
verbose=True,
print_indent=2
):
"""Data collector
Collects data and meta into matrices while processing them through processing chains.
Parameters
----------
item_list : list or dict
Items in the data sequence. List containing multi-level dictionary with first level key
'data' and 'meta'. Second level should contain parameters for process method in the processing chain.
Default value None
data_processing_chain : ProcessingChain
Data processing chain.
Default value None
meta_processing_chain : ProcessingChain
Meta processing chain.
Default value None
channel_dimension : str
Controls where channel dimension should be added. Similar to Keras data format parameter.
If None given, no channel dimension is added.
Possible values [None, 'channels_first', 'channels_last']
Default value None
target_format : str
Meta data interpretation in the relation to the data items.
Default value 'single_target_per_segment'
verbose : bool
Print information about the data
Default value True
print_indent : int
Default value 2
Returns
-------
numpy.ndarray
data
numpy.ndarray
meta
dict
data size information
"""
if item_list:
# Collect all data and meta
X = []
Y = []
for item in item_list:
data = data_processing_chain.process(**item['data'])
meta = meta_processing_chain.process(**item['meta'])
X.append(data.data)
# Collect meta
if target_format == 'single_target_per_sequence':
# Collect single target per sequence
for i in range(0, data.shape[data.sequence_axis]):
Y.append(meta.data[:, 0])
elif target_format == 'same':
# Collect single target per sequence
Y.append(
numpy.repeat(
a=meta.data,
repeats=data.length,
axis=1
).T
)
data_size = {}
if len(data.shape) == 2:
# Stack collected data and meta correct way
if data.time_axis == 0:
X = numpy.vstack(X)
Y = numpy.vstack(Y)
else:
X = numpy.hstack(X)
Y = numpy.hstack(Y)
# Get data item size
data_size = {
'data': X.shape[data.data_axis],
'time': X.shape[data.time_axis],
}
elif len(data.shape) == 3:
# Stack collected data and meta correct way
if data.sequence_axis == 0:
X = numpy.vstack(X)
Y = numpy.vstack(Y)
elif data.sequence_axis == 1:
X = numpy.hstack(X)
Y = numpy.hstack(Y)
elif data.sequence_axis == 2:
X = numpy.dstack(X)
Y = numpy.dstack(Y)
if channel_dimension:
# Add channel dimension to the data
if channel_dimension == 'channels_first':
X = numpy.expand_dims(X, axis=1)
elif channel_dimension == 'channels_last':
X = numpy.expand_dims(X, axis=3)
# Get data item size
data_size = {
'data': X.shape[data.data_axis],
'time': X.shape[data.time_axis],
'sequence': X.shape[data.sequence_axis],
}
if verbose:
data_shape = data.shape
data_axis = {
'time_axis': data.time_axis,
'data_axis': data.data_axis
}
if hasattr(data, 'sequence_axis'):
data_axis['sequence_axis'] = data.sequence_axis
meta_shape = meta.shape
meta_axis = {
'time_axis': meta.time_axis,
'data_axis': meta.data_axis
}
if hasattr(meta, 'sequence_axis'):
meta_axis['sequence_axis'] = meta.sequence_axis
logger = FancyLogger()
# Data information
logger.line('Data', indent=print_indent)
# Matrix
logger.data(
field='Matrix shape',
value=X.shape,
indent=print_indent + 2
)
# Item
logger.data(
field='Item shape',
value=data_shape,
indent=print_indent + 2
)
logger.data(
field='Time',
value=data_shape[data_axis['time_axis']],
indent=print_indent + 4
)
logger.data(
field='Data',
value=data_shape[data_axis['data_axis']],
indent=print_indent + 4
)
if 'sequence_axis' in data_axis:
logger.data(
field='Sequence',
value=data_shape[data_axis['sequence_axis']],
indent=print_indent + 4
)
# Meta information
logger.line('Meta', indent=print_indent)
# Matrix
logger.data(
field='Matrix shape',
value=Y.shape,
indent=print_indent + 2
)
# Item
logger.data(
field='Item shape',
value=meta_shape,
indent=print_indent + 2
)
logger.data(
field='Time',
value=meta_shape[meta_axis['time_axis']],
indent=print_indent + 4
)
logger.data(
field='Data',
value=meta_shape[meta_axis['data_axis']],
indent=print_indent + 4
)
if 'sequence_axis' in meta_axis:
logger.data(
field='Sequence',
value=meta_shape[meta_axis['sequence_axis']],
indent=print_indent + 4
)
return X, Y, data_size
| venv/Lib/site-packages/dcase_util/keras/data.py | 22,229 | Constructor
Parameters
----------
item_list : list or dict
Items in the data sequence. List containing multi-level dictionary with first level key
'data' and 'meta'. Second level should contain parameters for process method in the processing chain.
Default value None
batch_size : int
Batch size (item count).
Default value 64
buffer_size : int
Internal buffer size (item count). By setting this sufficiently high, data sequence generator can
possibly fit all sequence items into internal buffer and can fetch without loading from disk.
Set to None, if no internal buffer used.
Default value None
data_processing_chain : ProcessingChain
Data processing chain.
Default value None
meta_processing_chain : ProcessingChain
Meta processing chain.
Default value None
data_processing_chain_callback_on_epoch_end : list of dict
Can be used to call methods with parameters for processing chain at the end of epoch. This can be
used to control processing chain's internal status (e.g. roll the data).
Default value None
meta_processing_chain_callback_on_epoch_end : list of dict
Can be used to call methods with parameters for processing chain at the end of epoch. This can be
used to control processing chain's internal status (e.g. roll the data).
Default value None
transformer_callbacks : list of func
Transformer callbacks to jointly process data and meta. This can be used for local data modification and
data augmentation.
Default value None
refresh_buffer_on_epoch : bool
In case internal data buffer is used, force data and meta refresh at the end of each epoch. Use this if
data is modified/augmented differently for each epoch.
In case data_processing_chain_callback_on_epoch_end or meta_processing_chain_callback_on_epoch_end is
used, this parameter is automatically set to True.
Default value False
data_format : str
Keras like data format, controls where channel should be added.
Possible values ['channels_first', 'channels_last']
Default value 'channels_last'
target_format : str
Meta data interpretation in the relation to the data items.
Default value 'single_target_per_segment'
Data collector
Collects data and meta into matrices while processing them through processing chains.
Parameters
----------
item_list : list or dict
Items in the data sequence. List containing multi-level dictionary with first level key
'data' and 'meta'. Second level should contain parameters for process method in the processing chain.
Default value None
data_processing_chain : ProcessingChain
Data processing chain.
Default value None
meta_processing_chain : ProcessingChain
Meta processing chain.
Default value None
channel_dimension : str
Controls where channel dimension should be added. Similar to Keras data format parameter.
If None given, no channel dimension is added.
Possible values [None, 'channels_first', 'channels_last']
Default value None
target_format : str
Meta data interpretation in the relation to the data items.
Default value 'single_target_per_segment'
verbose : bool
Print information about the data
Default value True
print_indent : int
Default value 2
Returns
-------
numpy.ndarray
data
numpy.ndarray
meta
dict
data size information
!/usr/bin/env python -*- coding: utf-8 -*- Use getter method to avoid importing Keras when importing dcase_util. This allows user to decide when import Keras, so that user can set random seeds before Keras import. Run ContainerMixin init Processing chains Initialize data buffer Load item data Apply transformer callbacks Collect data Collect meta Collect single target per sequence Collect single target per sequence Prepare 2D data, stack along time_axis Prepare 3D data, stack along sequence_axis Add channel dimension to the data Prepare meta Load first item and get data length Load first item and get data length Fetch data and meta through internal buffer Fetch data and meta directly. Force reload of data Collect all data and meta Collect meta Collect single target per sequence Collect single target per sequence Stack collected data and meta correct way Get data item size Stack collected data and meta correct way Add channel dimension to the data Get data item size Data information Matrix Item Meta information Matrix Item | 4,415 | en | 0.508244 |
import dataclasses
import os
from typing import List
import hydra
@dataclasses.dataclass
class ModelConfig:
"""Configuration for the model.
Note that `block_sizes` must be specified using the `dataclasses.field`
function, as you are not allowed to supply default values for mutable fields.
Instead, the default value is supplied through a default factory function which
creates a new list every time.
"""
architecture: str = 'lenet'
hidden_size: int = 20
block_sizes: List[int] = dataclasses.field(default_factory=lambda: [10, 10, 10])
@dataclasses.dataclass
class TrainingConfig:
model: ModelConfig = ModelConfig()
num_epochs: int = 10
data_path: str = 'data.npy'
@hydra.main(config_path=None, config_name='config')
def main(config: TrainingConfig):
print(f'Got configuration: {config}')
# Note here: when loading data, should convert to absolute path
data_path = hydra.utils.to_absolute_path(config.data_path)
print(f'Loading data from {data_path}')
# Note here: saving to relative path is set to output folder
result_path = os.path.abspath('result.txt')
print(f'Saving results to {result_path}')
if __name__ == '__main__':
from hydra.core.config_store import ConfigStore
cs = ConfigStore()
cs.store('config', node=TrainingConfig)
main()
| lecture3/bootcamp3/script.py | 1,344 | Configuration for the model.
Note that `block_sizes` must be specified using the `dataclasses.field`
function, as you are not allowed to supply default values for mutable fields.
Instead, the default value is supplied through a default factory function which
creates a new list every time.
Note here: when loading data, should convert to absolute path Note here: saving to relative path is set to output folder | 413 | en | 0.789156 |
import numpy as np
import h5py as py
import matplotlib.pyplot as plt
import sys
hdf5_file = py.File("..\\Build\\TestsWithGL\\t2d_mpm_chm_t_bar_conference_restart.hdf5", "r")
frame_id = 0
th_grp = hdf5_file['TimeHistory']['penetration']
pcl_dset = th_grp['frame_%d' % frame_id]['ParticleData']
pcl_num = pcl_dset.attrs['pcl_num']
print(pcl_num)
pcl_stress = np.zeros([pcl_num, 4])
p_min_id = 0
p_min = sys.float_info.min
p_max_id = 0
p_max = -sys.float_info.max
for pcl_id in range(pcl_num):
pcl_data = pcl_dset[pcl_id]
pcl_stress[pcl_id][0] = pcl_data['s11']
pcl_stress[pcl_id][1] = pcl_data['s22']
pcl_stress[pcl_id][2] = pcl_data['s12']
pcl_stress[pcl_id][3] = pcl_data['p']
#p = pcl_stress[pcl_id][3]
p = (pcl_stress[pcl_id][0] + pcl_stress[pcl_id][1] + pcl_stress[pcl_id][2]) / 3.0
if (p < p_min):
p_min = p
p_min_id = pcl_id
if (p > p_max):
p_max = p
p_max_id = pcl_id
print("p min: %f pcl %d\np max: %f pcl %d" % (p_min, p_min_id, p_max, p_max_id))
hdf5_file.close()
| PyUtilities/hdf5_stress_range.py | 1,046 | p = pcl_stress[pcl_id][3] | 25 | ro | 0.293514 |
# Script to convert json into proprietary .mat files
# Licensed under Apache v2 (see LICENSE)
import sys
import os
import glob
import json
from scipy.io import savemat
def main(json_dir, out_dir):
""" Script to convert all .json files in json_dir into corresponding .mat
files in out_dir
.mat files have the same basename as the .json files
This script is meant for data files that contain data from
OpenSauce / VoiceSauce variables.
"""
# Find all .json files in json_dir
json_files = glob.glob(os.path.join(json_dir, '*.json'))
# Iterate through each .mat file
for json_file in json_files:
with open(json_file) as f:
json_dict = json.load(f)
# Write json dict to mat
# Check that output directory exists, if not create it
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
fn = os.path.join(out_dir, os.path.splitext(os.path.basename(json_file))[0]) + '.mat'
savemat(fn, json_dict)
print('Wrote data in {} to {}'.format(json_file, fn))
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
| tools/convert_json_to_mat.py | 1,133 | Script to convert all .json files in json_dir into corresponding .mat
files in out_dir
.mat files have the same basename as the .json files
This script is meant for data files that contain data from
OpenSauce / VoiceSauce variables.
Script to convert json into proprietary .mat files Licensed under Apache v2 (see LICENSE) Find all .json files in json_dir Iterate through each .mat file Write json dict to mat Check that output directory exists, if not create it | 470 | en | 0.717836 |
"""Model module for images."""
from django.db import models
from django.contrib.auth.models import User
from imager_profile.models import ImagerProfile
# Create your models here.
class ImageBaseClass(models.Model):
"""Base class for Photo and Album classes."""
PRIVATE = 'PRVT'
SHARED = 'SHRD'
PUBLIC = 'PBLC'
PUBLISHED = ((PRIVATE, 'private'),
(SHARED, 'shared'),
(PUBLIC, 'public'))
title = models.CharField(max_length=180)
description = models.TextField(max_length=500, blank=True, null=True)
date_modified = models.DateField(auto_now=True)
date_published = models.DateField(blank=True, null=True)
published = models.CharField(choices=PUBLISHED, max_length=8)
class Meta:
"""Meta."""
abstract = True
class Photo(ImageBaseClass):
"""Photo model."""
user = models.ForeignKey(User, on_delete=models.CASCADE,
related_name='photo')
image = models.ImageField(upload_to='images')
date_uploaded = models.DateField(editable=False, auto_now_add=True)
def __str__(self):
"""Print function displays username."""
return self.title
class Album(ImageBaseClass):
"""Album model."""
user = models.ForeignKey(User, on_delete=models.CASCADE,
related_name='album')
cover = models.ImageField(upload_to='images')
date_created = models.DateField(editable=False, auto_now_add=True)
photos = models.ManyToManyField(Photo, related_name='albums', blank=True)
def __str__(self):
"""Print function displays username."""
return self.title
| imagersite/imager_images/models.py | 1,659 | Album model.
Base class for Photo and Album classes.
Meta.
Photo model.
Print function displays username.
Print function displays username.
Model module for images.
Create your models here. | 191 | en | 0.593561 |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.decimal_to_precision import ROUND
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.decimal_to_precision import DECIMAL_PLACES
from ccxt.base.decimal_to_precision import SIGNIFICANT_DIGITS
class bitvavo(Exchange):
def describe(self):
return self.deep_extend(super(bitvavo, self).describe(), {
'id': 'bitvavo',
'name': 'Bitvavo',
'countries': ['NL'], # Netherlands
'rateLimit': 60, # 1000 requests per minute
'version': 'v2',
'certified': True,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'createReduceOnlyOrder': False,
'createStopLimitOrder': True,
'createStopMarketOrder': True,
'createStopOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPosition': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': True,
'fetchTransfer': False,
'fetchTransfers': False,
'fetchWithdrawals': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'transfer': False,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/169202626-bd130fc5-fcf9-41bb-8d97-6093225c73cd.jpg',
'api': {
'public': 'https://api.bitvavo.com',
'private': 'https://api.bitvavo.com',
},
'www': 'https://bitvavo.com/',
'doc': 'https://docs.bitvavo.com/',
'fees': 'https://bitvavo.com/en/fees',
'referral': 'https://bitvavo.com/?a=24F34952F7',
},
'api': {
'public': {
'get': {
'time': 1,
'markets': 1,
'assets': 1,
'{market}/book': 1,
'{market}/trades': 5,
'{market}/candles': 1,
'ticker/price': 1,
'ticker/book': 1,
'ticker/24h': {'cost': 1, 'noMarket': 25},
},
},
'private': {
'get': {
'account': 1,
'order': 1,
'orders': 5,
'ordersOpen': {'cost': 1, 'noMarket': 25},
'trades': 5,
'balance': 5,
'deposit': 1,
'depositHistory': 5,
'withdrawalHistory': 5,
},
'post': {
'order': 1,
'withdrawal': 1,
},
'put': {
'order': 1,
},
'delete': {
'order': 1,
'orders': 1,
},
},
},
'fees': {
'trading': {
'tierBased': True,
'percentage': True,
'taker': self.parse_number('0.0025'),
'maker': self.parse_number('0.002'),
'tiers': {
'taker': [
[self.parse_number('0'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0020')],
[self.parse_number('250000'), self.parse_number('0.0016')],
[self.parse_number('500000'), self.parse_number('0.0012')],
[self.parse_number('1000000'), self.parse_number('0.0010')],
[self.parse_number('2500000'), self.parse_number('0.0008')],
[self.parse_number('5000000'), self.parse_number('0.0006')],
[self.parse_number('10000000'), self.parse_number('0.0005')],
[self.parse_number('25000000'), self.parse_number('0.0004')],
],
'maker': [
[self.parse_number('0'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.0010')],
[self.parse_number('250000'), self.parse_number('0.0008')],
[self.parse_number('500000'), self.parse_number('0.0006')],
[self.parse_number('1000000'), self.parse_number('0.0005')],
[self.parse_number('2500000'), self.parse_number('0.0004')],
[self.parse_number('5000000'), self.parse_number('0.0004')],
[self.parse_number('10000000'), self.parse_number('0.0003')],
[self.parse_number('25000000'), self.parse_number('0.0003')],
],
},
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'exceptions': {
'exact': {
'101': ExchangeError, # Unknown error. Operation may or may not have succeeded.
'102': BadRequest, # Invalid JSON.
'103': RateLimitExceeded, # You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit.
'104': RateLimitExceeded, # You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit.
'105': PermissionDenied, # Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}.
'107': ExchangeNotAvailable, # The matching engine is overloaded. Please wait 500ms and resubmit your order.
'108': ExchangeNotAvailable, # The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order.
'109': ExchangeNotAvailable, # The matching engine did not respond in time. Operation may or may not have succeeded.
'110': BadRequest, # Invalid endpoint. Please check url and HTTP method.
'200': BadRequest, # ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests.
'201': BadRequest, # ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests.
'202': BadRequest, # ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders.
'203': BadSymbol, # {"errorCode":203,"error":"symbol parameter is required."}
'204': BadRequest, # ${param} parameter is not supported.
'205': BadRequest, # ${param} parameter is invalid.
'206': BadRequest, # Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported.
'210': InvalidOrder, # Amount exceeds the maximum allowed amount(1000000000).
'211': InvalidOrder, # Price exceeds the maximum allowed amount(100000000000).
'212': InvalidOrder, # Amount is below the minimum allowed amount for self asset.
'213': InvalidOrder, # Price is below the minimum allowed amount(0.000000000000001).
'214': InvalidOrder, # Price is too detailed
'215': InvalidOrder, # Price is too detailed. A maximum of 15 digits behind the decimal point are allowed.
'216': InsufficientFunds, # {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."}
'217': InvalidOrder, # {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."}
'230': ExchangeError, # The order is rejected by the matching engine.
'231': ExchangeError, # The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused.
'232': BadRequest, # You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly.
'233': InvalidOrder, # {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."}
'234': InvalidOrder, # Market orders cannot be updated.
'235': ExchangeError, # You can only have 100 open orders on each book.
'236': BadRequest, # You can only update amount or amountRemaining, not both.
'240': OrderNotFound, # {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."}
'300': AuthenticationError, # Authentication is required for self endpoint.
'301': AuthenticationError, # {"errorCode":301,"error":"API Key must be of length 64."}
'302': AuthenticationError, # Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket.
'303': AuthenticationError, # Window must be between 100 and 60000 ms.
'304': AuthenticationError, # Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket).
# '304': AuthenticationError, # Authentication is required for self endpoint.
'305': AuthenticationError, # {"errorCode":305,"error":"No active API key found."}
'306': AuthenticationError, # No active API key found. Please ensure that you have confirmed the API key by e-mail.
'307': PermissionDenied, # This key does not allow access from self IP.
'308': AuthenticationError, # {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
'309': AuthenticationError, # {"errorCode":309,"error":"The signature is invalid."}
'310': PermissionDenied, # This key does not allow trading actions.
'311': PermissionDenied, # This key does not allow showing account information.
'312': PermissionDenied, # This key does not allow withdrawal of funds.
'315': BadRequest, # Websocket connections may not be used in a browser. Please use REST requests for self.
'317': AccountSuspended, # This account is locked. Please contact support.
'400': ExchangeError, # Unknown error. Please contact support with a copy of your request.
'401': ExchangeError, # Deposits for self asset are not available at self time.
'402': PermissionDenied, # You need to verify your identitiy before you can deposit and withdraw digital assets.
'403': PermissionDenied, # You need to verify your phone number before you can deposit and withdraw digital assets.
'404': OnMaintenance, # Could not complete self operation, because our node cannot be reached. Possibly under maintenance.
'405': ExchangeError, # You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts.
'406': BadRequest, # {"errorCode":406,"error":"Your withdrawal is too small."}
'407': ExchangeError, # Internal transfer is not possible.
'408': InsufficientFunds, # {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."}
'409': InvalidAddress, # {"errorCode":409,"error":"This is not a verified bank account."}
'410': ExchangeError, # Withdrawals for self asset are not available at self time.
'411': BadRequest, # You can not transfer assets to yourself.
'412': InvalidAddress, # {"errorCode":412,"error":"eth_address_invalid."}
'413': InvalidAddress, # This address violates the whitelist.
'414': ExchangeError, # You cannot withdraw assets within 2 minutes of logging in.
},
'broad': {
'start parameter is invalid': BadRequest, # {"errorCode":205,"error":"start parameter is invalid."}
'symbol parameter is invalid': BadSymbol, # {"errorCode":205,"error":"symbol parameter is invalid."}
'amount parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"amount parameter is invalid."}
'orderId parameter is invalid': InvalidOrder, # {"errorCode":205,"error":"orderId parameter is invalid."}
},
},
'options': {
'BITVAVO-ACCESS-WINDOW': 10000, # default 10 sec
'fetchCurrencies': {
'expires': 1000, # 1 second
},
},
'precisionMode': SIGNIFICANT_DIGITS,
'commonCurrencies': {
'MIOTA': 'IOTA', # https://github.com/ccxt/ccxt/issues/7487
},
})
def currency_to_precision(self, code, fee, networkCode=None):
return self.decimal_to_precision(fee, 0, self.currencies[code]['precision'])
def amount_to_precision(self, symbol, amount):
# https://docs.bitfinex.com/docs/introduction#amount-precision
# The amount field allows up to 8 decimals.
# Anything exceeding self will be rounded to the 8th decimal.
return self.decimal_to_precision(amount, TRUNCATE, self.markets[symbol]['precision']['amount'], DECIMAL_PLACES)
def price_to_precision(self, symbol, price):
price = self.decimal_to_precision(price, ROUND, self.markets[symbol]['precision']['price'], self.precisionMode)
# https://docs.bitfinex.com/docs/introduction#price-precision
# The precision level of all trading prices is based on significant figures.
# All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345).
# Prices submit with a precision larger than 5 will be cut by the API.
return self.decimal_to_precision(price, TRUNCATE, 8, DECIMAL_PLACES)
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.publicGetTime(params)
#
# {"time": 1590379519148}
#
return self.safe_integer(response, 'time')
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for bitvavo
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
response = await self.publicGetMarkets(params)
currencies = await self.fetch_currencies_from_cache(params)
currenciesById = self.index_by(currencies, 'symbol')
#
# [
# {
# "market":"ADA-BTC",
# "status":"trading", # "trading" "halted" "auction"
# "base":"ADA",
# "quote":"BTC",
# "pricePrecision":5,
# "minOrderInBaseAsset":"100",
# "minOrderInQuoteAsset":"0.001",
# "orderTypes": ["market", "limit"]
# }
# ]
#
result = []
for i in range(0, len(response)):
market = response[i]
id = self.safe_string(market, 'market')
baseId = self.safe_string(market, 'base')
quoteId = self.safe_string(market, 'quote')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
baseCurrency = self.safe_value(currenciesById, baseId)
amountPrecision = None
if baseCurrency is not None:
amountPrecision = self.safe_integer(baseCurrency, 'decimals', 8)
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'trading'),
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': self.safe_integer(market, 'pricePrecision'),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'minOrderInBaseAsset'),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': self.safe_number(market, 'minOrderInQuoteAsset'),
'max': None,
},
},
'info': market,
})
return result
async def fetch_currencies_from_cache(self, params={}):
# self method is now redundant
# currencies are now fetched before markets
options = self.safe_value(self.options, 'fetchCurrencies', {})
timestamp = self.safe_integer(options, 'timestamp')
expires = self.safe_integer(options, 'expires', 1000)
now = self.milliseconds()
if (timestamp is None) or ((now - timestamp) > expires):
response = await self.publicGetAssets(params)
self.options['fetchCurrencies'] = self.extend(options, {
'response': response,
'timestamp': now,
})
return self.safe_value(self.options['fetchCurrencies'], 'response')
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an associative dictionary of currencies
"""
response = await self.fetch_currencies_from_cache(params)
#
# [
# {
# "symbol":"ADA",
# "name":"Cardano",
# "decimals":6,
# "depositFee":"0",
# "depositConfirmations":15,
# "depositStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "withdrawalFee":"0.2",
# "withdrawalMinAmount":"0.2",
# "withdrawalStatus":"OK", # "OK", "MAINTENANCE", "DELISTED"
# "networks": ["Mainnet"], # "ETH", "NEO", "ONT", "SEPA", "VET"
# "message":"",
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
depositStatus = self.safe_value(currency, 'depositStatus')
deposit = (depositStatus == 'OK')
withdrawalStatus = self.safe_value(currency, 'withdrawalStatus')
withdrawal = (withdrawalStatus == 'OK')
active = deposit and withdrawal
name = self.safe_string(currency, 'name')
precision = self.safe_integer(currency, 'decimals', 8)
result[code] = {
'id': id,
'info': currency,
'code': code,
'name': name,
'active': active,
'deposit': deposit,
'withdraw': withdrawal,
'fee': self.safe_number(currency, 'withdrawalFee'),
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawalMinAmount'),
'max': None,
},
},
}
return result
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
}
response = await self.publicGetTicker24h(self.extend(request, params))
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
return self.parse_ticker(response, market)
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "market":"ETH-BTC",
# "open":"0.022578",
# "high":"0.023019",
# "low":"0.022573",
# "last":"0.023019",
# "volume":"25.16366324",
# "volumeQuote":"0.57333305",
# "bid":"0.023039",
# "bidSize":"0.53500578",
# "ask":"0.023041",
# "askSize":"0.47859202",
# "timestamp":1590381666900
# }
#
marketId = self.safe_string(ticker, 'market')
symbol = self.safe_symbol(marketId, market, '-')
timestamp = self.safe_integer(ticker, 'timestamp')
last = self.safe_string(ticker, 'last')
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'volumeQuote')
open = self.safe_string(ticker, 'open')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None, # previous day close
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
response = await self.publicGetTicker24h(params)
#
# [
# {
# "market":"ADA-BTC",
# "open":"0.0000059595",
# "high":"0.0000059765",
# "low":"0.0000059595",
# "last":"0.0000059765",
# "volume":"2923.172",
# "volumeQuote":"0.01743483",
# "bid":"0.0000059515",
# "bidSize":"1117.630919",
# "ask":"0.0000059585",
# "askSize":"809.999739",
# "timestamp":1590382266324
# }
# ]
#
return self.parse_tickers(response, symbols)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
# 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf',
}
if limit is not None:
request['limit'] = limit
if since is not None:
request['start'] = since
response = await self.publicGetMarketTrades(self.extend(request, params))
#
# [
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id":"94154c98-6e8b-4e33-92a8-74e33fc05650",
# "timestamp":1590382761859,
# "amount":"0.06026079",
# "price":"8095.3",
# "side":"buy"
# }
#
# createOrder, fetchOpenOrders, fetchOrders, editOrder(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# fetchMyTrades(private)
#
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
#
# watchMyTrades(private)
#
# {
# event: 'fill',
# timestamp: 1590964470132,
# market: 'ETH-EUR',
# orderId: '85d082e1-eda4-4209-9580-248281a29a9a',
# fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211',
# side: 'sell',
# amount: '0.1',
# price: '211.46',
# taker: True,
# fee: '0.056',
# feeCurrency: 'EUR'
# }
#
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'amount')
timestamp = self.safe_integer(trade, 'timestamp')
side = self.safe_string(trade, 'side')
id = self.safe_string_2(trade, 'id', 'fillId')
marketId = self.safe_string(trade, 'market')
symbol = self.safe_symbol(marketId, market, '-')
taker = self.safe_value(trade, 'taker')
takerOrMaker = None
if taker is not None:
takerOrMaker = 'taker' if taker else 'maker'
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrencyId = self.safe_string(trade, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
}
orderId = self.safe_string(trade, 'orderId')
return self.safe_trade({
'info': trade,
'id': id,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'order': orderId,
'type': None,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
await self.load_markets()
response = await self.privateGetAccount(params)
#
# {
# "fees": {
# "taker": "0.0025",
# "maker": "0.0015",
# "volume": "10000.00"
# }
# }
#
fees = self.safe_value(response, 'fees')
maker = self.safe_number(fees, 'maker')
taker = self.safe_number(fees, 'taker')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'info': response,
'symbol': symbol,
'maker': maker,
'taker': taker,
'percentage': True,
'tierBased': True,
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
request = {
'market': self.market_id(symbol),
}
if limit is not None:
request['depth'] = limit
response = await self.publicGetMarketBook(self.extend(request, params))
#
# {
# "market":"BTC-EUR",
# "nonce":35883831,
# "bids":[
# ["8097.4","0.6229099"],
# ["8097.2","0.64151283"],
# ["8097.1","0.24966294"],
# ],
# "asks":[
# ["8097.5","1.36916911"],
# ["8098.8","0.33462248"],
# ["8099.3","1.12908646"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'nonce')
return orderbook
def parse_ohlcv(self, ohlcv, market=None):
#
# [
# 1590383700000,
# "8088.5",
# "8088.5",
# "8088.5",
# "8088.5",
# "0.04788623"
# ]
#
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'interval': self.timeframes[timeframe],
# 'limit': 1440, # default 1440, max 1440
# 'start': since,
# 'end': self.milliseconds(),
}
if since is not None:
# https://github.com/ccxt/ccxt/issues/9227
duration = self.parse_timeframe(timeframe)
request['start'] = since
if limit is None:
limit = 1440
request['end'] = self.sum(since, limit * duration * 1000)
if limit is not None:
request['limit'] = limit # default 1440, max 1440
response = await self.publicGetMarketCandles(self.extend(request, params))
#
# [
# [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"],
# [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"],
# [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"],
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_balance(self, response):
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'symbol')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'inOrder')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
response = await self.privateGetBalance(params)
#
# [
# {
# "symbol": "BTC",
# "available": "1.57593193",
# "inOrder": "0.74832374"
# }
# ]
#
return self.parse_balance(response)
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
}
response = await self.privateGetDeposit(self.extend(request, params))
#
# {
# "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567",
# "paymentId": "10002653"
# }
#
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'paymentId')
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'network': None,
'info': response,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
'side': side,
'orderType': type, # 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit'
# 'amount': self.amount_to_precision(symbol, amount),
# 'price': self.price_to_precision(symbol, price),
# 'amountQuote': self.cost_to_precision(symbol, cost),
# 'timeInForce': 'GTC', # 'GTC', 'IOC', 'FOK'
# 'selfTradePrevention': 'decrementAndCancel', # 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth'
# 'postOnly': False,
# 'disableMarketProtection': False, # don't cancel if the next fill price is 10% worse than the best fill price
# 'responseRequired': True, # False is faster
}
isStopLimit = (type == 'stopLossLimit') or (type == 'takeProfitLimit')
isStopMarket = (type == 'stopLoss') or (type == 'takeProfit')
if type == 'market':
cost = None
if price is not None:
cost = amount * price
else:
cost = self.safe_number_2(params, 'cost', 'amountQuote')
if cost is not None:
precision = market['precision']['price']
request['amountQuote'] = self.decimal_to_precision(cost, TRUNCATE, precision, self.precisionMode)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
params = self.omit(params, ['cost', 'amountQuote'])
elif type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['amount'] = self.amount_to_precision(symbol, amount)
elif isStopMarket or isStopLimit:
stopPrice = self.safe_number_2(params, 'stopPrice', 'triggerAmount')
if stopPrice is None:
if isStopLimit:
raise ArgumentsRequired(self.id + ' createOrder() requires a stopPrice parameter for a ' + type + ' order')
elif isStopMarket:
if price is None:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument or a stopPrice parameter for a ' + type + ' order')
else:
stopPrice = price
if isStopLimit:
request['price'] = self.price_to_precision(symbol, price)
params = self.omit(params, ['stopPrice', 'triggerAmount'])
request['triggerAmount'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = 'price'
request['amount'] = self.amount_to_precision(symbol, amount)
response = await self.privatePostOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {}
amountRemaining = self.safe_number(params, 'amountRemaining')
params = self.omit(params, 'amountRemaining')
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
if amount is not None:
request['amount'] = self.amount_to_precision(symbol, amount)
if amountRemaining is not None:
request['amountRemaining'] = self.amount_to_precision(symbol, amountRemaining)
request = self.extend(request, params)
if request:
request['orderId'] = id
request['market'] = market['id']
response = await self.privatePutOrder(self.extend(request, params))
return self.parse_order(response, market)
else:
raise ArgumentsRequired(self.id + ' editOrder() requires an amount argument, or a price argument, or non-empty params')
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateDeleteOrder(self.extend(request, params))
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
return self.parse_order(response, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateDeleteOrders(self.extend(request, params))
#
# [
# {
# "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6"
# }
# ]
#
return self.parse_orders(response, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'orderId': id,
'market': market['id'],
}
response = await self.privateGetOrder(self.extend(request, params))
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
#
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetOrders(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
request = {
# 'market': market['id'], # rate limit 25 without a market, 1 with market specified
}
market = None
if symbol is not None:
market = self.market(symbol)
request['market'] = market['id']
response = await self.privateGetOrdersOpen(self.extend(request, params))
#
# [
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# }
# ]
#
return self.parse_orders(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'new': 'open',
'canceled': 'canceled',
'canceledAuction': 'canceled',
'canceledSelfTradePrevention': 'canceled',
'canceledIOC': 'canceled',
'canceledFOK': 'canceled',
'canceledMarketProtection': 'canceled',
'canceledPostOnly': 'canceled',
'filled': 'closed',
'partiallyFilled': 'open',
'expired': 'canceled',
'rejected': 'canceled',
'awaitingTrigger': 'open', # https://github.com/ccxt/ccxt/issues/8489
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# cancelOrder, cancelAllOrders
#
# {
# "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61"
# }
#
# createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder
#
# {
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "market":"ETH-EUR",
# "created":1590505649241,
# "updated":1590505649241,
# "status":"filled",
# "side":"sell",
# "orderType":"market",
# "amount":"0.249825",
# "amountRemaining":"0",
# "price": "183.49", # limit orders only
# "onHold":"0",
# "onHoldCurrency":"ETH",
# "filledAmount":"0.249825",
# "filledAmountQuote":"45.84038925",
# "feePaid":"0.12038925",
# "feeCurrency":"EUR",
# "fills":[
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "timestamp":1590505649245,
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ],
# "selfTradePrevention":"decrementAndCancel",
# "visible":false,
# "disableMarketProtection":false
# "timeInForce": "GTC",
# "postOnly": True,
# }
#
id = self.safe_string(order, 'orderId')
timestamp = self.safe_integer(order, 'created')
marketId = self.safe_string(order, 'market')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
status = self.parse_order_status(self.safe_string(order, 'status'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'orderType')
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'amount')
remaining = self.safe_string(order, 'amountRemaining')
filled = self.safe_string(order, 'filledAmount')
cost = self.safe_string(order, 'filledAmountQuote')
fee = None
feeCost = self.safe_number(order, 'feePaid')
if feeCost is not None:
feeCurrencyId = self.safe_string(order, 'feeCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
rawTrades = self.safe_value(order, 'fills', [])
timeInForce = self.safe_string(order, 'timeInForce')
postOnly = self.safe_value(order, 'postOnly')
# https://github.com/ccxt/ccxt/issues/8489
stopPrice = self.safe_number(order, 'triggerPrice')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': None,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': rawTrades,
}, market)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'market': market['id'],
# 'limit': 500,
# 'start': since,
# 'end': self.milliseconds(),
# 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
# 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0',
}
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetTrades(self.extend(request, params))
#
# [
# {
# "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4",
# "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0",
# "timestamp":1590505649245,
# "market":"ETH-EUR",
# "side":"sell",
# "amount":"0.249825",
# "price":"183.49",
# "taker":true,
# "fee":"0.12038925",
# "feeCurrency":"EUR",
# "settled":true
# }
# ]
#
return self.parse_trades(response, market, since, limit)
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
request = {
'symbol': currency['id'],
'amount': self.currency_to_precision(code, amount),
'address': address, # address or IBAN
# 'internal': False, # transfer to another Bitvavo user address, no fees
# 'addWithdrawalFee': False, # True = add the fee on top, otherwise the fee is subtracted from the amount
}
if tag is not None:
request['paymentId'] = tag
response = await self.privatePostWithdrawal(self.extend(request, params))
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
return self.parse_transaction(response, currency)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetWithdrawalHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590531212000,
# "symbol":"ETH",
# "amount":"0.091",
# "fee":"0.009",
# "status":"awaiting_bitvavo_inspection",
# "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'withdrawal'})
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the bitvavo api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {
# 'symbol': currency['id'],
# 'limit': 500, # default 500, max 1000
# 'start': since,
# 'end': self.milliseconds(),
}
currency = None
if code is not None:
currency = self.currency(code)
request['symbol'] = currency['id']
if since is not None:
request['start'] = since
if limit is not None:
request['limit'] = limit # default 500, max 1000
response = await self.privateGetDepositHistory(self.extend(request, params))
#
# [
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit, {'type': 'deposit'})
def parse_transaction_status(self, status):
statuses = {
'awaiting_processing': 'pending',
'awaiting_email_confirmation': 'pending',
'awaiting_bitvavo_inspection': 'pending',
'approved': 'pending',
'sending': 'pending',
'in_mempool': 'pending',
'processed': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "success": True,
# "symbol": "BTC",
# "amount": "1.5"
# }
#
# fetchWithdrawals
#
# {
# "timestamp": 1542967486256,
# "symbol": "BTC",
# "amount": "0.99994",
# "address": "BitcoinAddress",
# "paymentId": "10002653",
# "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3",
# "fee": "0.00006",
# "status": "awaiting_processing"
# }
#
# fetchDeposits
#
# {
# "timestamp":1590492401000,
# "symbol":"ETH",
# "amount":"0.249825",
# "fee":"0",
# "status":"completed",
# "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2"
# }
#
id = None
timestamp = self.safe_integer(transaction, 'timestamp')
currencyId = self.safe_string(transaction, 'symbol')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
amount = self.safe_number(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'txId')
fee = None
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
type = None
if ('success' in transaction) or ('address' in transaction):
type = 'withdrawal'
else:
type = 'deposit'
tag = self.safe_string(transaction, 'paymentId')
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'addressFrom': None,
'address': address,
'addressTo': address,
'tagFrom': None,
'tag': tag,
'tagTo': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = '/' + self.version + '/' + self.implode_params(path, params)
getOrDelete = (method == 'GET') or (method == 'DELETE')
if getOrDelete:
if query:
url += '?' + self.urlencode(query)
if api == 'private':
self.check_required_credentials()
payload = ''
if not getOrDelete:
if query:
body = self.json(query)
payload = body
timestamp = str(self.milliseconds())
auth = timestamp + method + url + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
accessWindow = self.safe_string(self.options, 'BITVAVO-ACCESS-WINDOW', '10000')
headers = {
'BITVAVO-ACCESS-KEY': self.apiKey,
'BITVAVO-ACCESS-SIGNATURE': signature,
'BITVAVO-ACCESS-TIMESTAMP': timestamp,
'BITVAVO-ACCESS-WINDOW': accessWindow,
}
if not getOrDelete:
headers['Content-Type'] = 'application/json'
url = self.urls['api'][api] + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
#
# {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."}
# {"errorCode":203,"error":"symbol parameter is required."}
# {"errorCode":205,"error":"symbol parameter is invalid."}
#
errorCode = self.safe_string(response, 'errorCode')
error = self.safe_string(response, 'error')
if errorCode is not None:
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], error, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(feedback) # unknown message
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
if ('noMarket' in config) and not ('market' in params):
return config['noMarket']
return self.safe_value(config, 'cost', 1)
| python/ccxt/async_support/bitvavo.py | 74,026 | -*- coding: utf-8 -*- PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN: https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.mdhow-to-contribute-code Netherlands 1000 requests per minute Unknown error. Operation may or may not have succeeded. Invalid JSON. You have been rate limited. Please observe the Bitvavo-Ratelimit-AllowAt header to see when you can send requests again. Failure to respect self limit will result in an IP ban. The default value is 1000 weighted requests per minute. Please contact support if you wish to increase self limit. You have been rate limited by the number of new orders. The default value is 100 new orders per second or 100.000 new orders per day. Please update existing orders instead of cancelling and creating orders. Please contact support if you wish to increase self limit. Your IP or API key has been banned for not respecting the rate limit. The ban expires at ${expiryInMs}. The matching engine is overloaded. Please wait 500ms and resubmit your order. The matching engine could not process your order in time. Please consider increasing the access window or resubmit your order. The matching engine did not respond in time. Operation may or may not have succeeded. Invalid endpoint. Please check url and HTTP method. ${param} url parameter is not supported. Please note that parameters are case-sensitive and use body parameters for PUT and POST requests. ${param} body parameter is not supported. Please note that parameters are case-sensitive and use url parameters for GET and DELETE requests. ${param} order parameter is not supported. Please note that certain parameters are only allowed for market or limit orders. {"errorCode":203,"error":"symbol parameter is required."} ${param} parameter is not supported. ${param} parameter is invalid. Use either ${paramA} or ${paramB}. The usage of both parameters at the same time is not supported. Amount exceeds the maximum allowed amount(1000000000). Price exceeds the maximum allowed amount(100000000000). Amount is below the minimum allowed amount for self asset. Price is below the minimum allowed amount(0.000000000000001). Price is too detailed Price is too detailed. A maximum of 15 digits behind the decimal point are allowed. {"errorCode":216,"error":"You do not have sufficient balance to complete self operation."} {"errorCode":217,"error":"Minimum order size in quote currency is 5 EUR or 0.001 BTC."} The order is rejected by the matching engine. The order is rejected by the matching engine. TimeInForce must be GTC when markets are paused. You must change at least one of amount, amountRemaining, price, timeInForce, selfTradePrevention or postOnly. {"errorCode":233,"error":"Order must be active(status new or partiallyFilled) to allow updating/cancelling."} Market orders cannot be updated. You can only have 100 open orders on each book. You can only update amount or amountRemaining, not both. {"errorCode":240,"error":"No order found. Please be aware that simultaneously updating the same order may return self error."} Authentication is required for self endpoint. {"errorCode":301,"error":"API Key must be of length 64."} Timestamp is invalid. This must be a timestamp in ms. See Bitvavo-Access-Timestamp header or timestamp parameter for websocket. Window must be between 100 and 60000 ms. Request was not received within acceptable window(default 30s, or custom with Bitvavo-Access-Window header) of Bitvavo-Access-Timestamp header(or timestamp parameter for websocket). '304': AuthenticationError, Authentication is required for self endpoint. {"errorCode":305,"error":"No active API key found."} No active API key found. Please ensure that you have confirmed the API key by e-mail. This key does not allow access from self IP. {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."} {"errorCode":309,"error":"The signature is invalid."} This key does not allow trading actions. This key does not allow showing account information. This key does not allow withdrawal of funds. Websocket connections may not be used in a browser. Please use REST requests for self. This account is locked. Please contact support. Unknown error. Please contact support with a copy of your request. Deposits for self asset are not available at self time. You need to verify your identitiy before you can deposit and withdraw digital assets. You need to verify your phone number before you can deposit and withdraw digital assets. Could not complete self operation, because our node cannot be reached. Possibly under maintenance. You cannot withdraw digital assets during a cooldown period. This is the result of newly added bank accounts. {"errorCode":406,"error":"Your withdrawal is too small."} Internal transfer is not possible. {"errorCode":408,"error":"You do not have sufficient balance to complete self operation."} {"errorCode":409,"error":"This is not a verified bank account."} Withdrawals for self asset are not available at self time. You can not transfer assets to yourself. {"errorCode":412,"error":"eth_address_invalid."} This address violates the whitelist. You cannot withdraw assets within 2 minutes of logging in. {"errorCode":205,"error":"start parameter is invalid."} {"errorCode":205,"error":"symbol parameter is invalid."} {"errorCode":205,"error":"amount parameter is invalid."} {"errorCode":205,"error":"orderId parameter is invalid."} default 10 sec 1 second https://github.com/ccxt/ccxt/issues/7487 https://docs.bitfinex.com/docs/introductionamount-precision The amount field allows up to 8 decimals. Anything exceeding self will be rounded to the 8th decimal. https://docs.bitfinex.com/docs/introductionprice-precision The precision level of all trading prices is based on significant figures. All pairs on Bitfinex use up to 5 significant digits and up to 8 decimals(e.g. 1.2345, 123.45, 1234.5, 0.00012345). Prices submit with a precision larger than 5 will be cut by the API. {"time": 1590379519148} [ { "market":"ADA-BTC", "status":"trading", "trading" "halted" "auction" "base":"ADA", "quote":"BTC", "pricePrecision":5, "minOrderInBaseAsset":"100", "minOrderInQuoteAsset":"0.001", "orderTypes": ["market", "limit"] } ] self method is now redundant currencies are now fetched before markets [ { "symbol":"ADA", "name":"Cardano", "decimals":6, "depositFee":"0", "depositConfirmations":15, "depositStatus":"OK", "OK", "MAINTENANCE", "DELISTED" "withdrawalFee":"0.2", "withdrawalMinAmount":"0.2", "withdrawalStatus":"OK", "OK", "MAINTENANCE", "DELISTED" "networks": ["Mainnet"], "ETH", "NEO", "ONT", "SEPA", "VET" "message":"", }, ] { "market":"ETH-BTC", "open":"0.022578", "high":"0.023019", "low":"0.022573", "last":"0.023019", "volume":"25.16366324", "volumeQuote":"0.57333305", "bid":"0.023039", "bidSize":"0.53500578", "ask":"0.023041", "askSize":"0.47859202", "timestamp":1590381666900 } fetchTicker { "market":"ETH-BTC", "open":"0.022578", "high":"0.023019", "low":"0.022573", "last":"0.023019", "volume":"25.16366324", "volumeQuote":"0.57333305", "bid":"0.023039", "bidSize":"0.53500578", "ask":"0.023041", "askSize":"0.47859202", "timestamp":1590381666900 } previous day close [ { "market":"ADA-BTC", "open":"0.0000059595", "high":"0.0000059765", "low":"0.0000059595", "last":"0.0000059765", "volume":"2923.172", "volumeQuote":"0.01743483", "bid":"0.0000059515", "bidSize":"1117.630919", "ask":"0.0000059585", "askSize":"809.999739", "timestamp":1590382266324 } ] 'limit': 500, default 500, max 1000 'start': since, 'end': self.milliseconds(), 'tradeIdFrom': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf', 'tradeIdTo': '57b1159b-6bf5-4cde-9e2c-6bd6a5678baf', [ { "id":"94154c98-6e8b-4e33-92a8-74e33fc05650", "timestamp":1590382761859, "amount":"0.06026079", "price":"8095.3", "side":"buy" } ] fetchTrades(public) { "id":"94154c98-6e8b-4e33-92a8-74e33fc05650", "timestamp":1590382761859, "amount":"0.06026079", "price":"8095.3", "side":"buy" } createOrder, fetchOpenOrders, fetchOrders, editOrder(private) { "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4", "timestamp":1590505649245, "amount":"0.249825", "price":"183.49", "taker":true, "fee":"0.12038925", "feeCurrency":"EUR", "settled":true } fetchMyTrades(private) { "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4", "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0", "timestamp":1590505649245, "market":"ETH-EUR", "side":"sell", "amount":"0.249825", "price":"183.49", "taker":true, "fee":"0.12038925", "feeCurrency":"EUR", "settled":true } watchMyTrades(private) { event: 'fill', timestamp: 1590964470132, market: 'ETH-EUR', orderId: '85d082e1-eda4-4209-9580-248281a29a9a', fillId: '861d2da5-aa93-475c-8d9a-dce431bd4211', side: 'sell', amount: '0.1', price: '211.46', taker: True, fee: '0.056', feeCurrency: 'EUR' } { "fees": { "taker": "0.0025", "maker": "0.0015", "volume": "10000.00" } } { "market":"BTC-EUR", "nonce":35883831, "bids":[ ["8097.4","0.6229099"], ["8097.2","0.64151283"], ["8097.1","0.24966294"], ], "asks":[ ["8097.5","1.36916911"], ["8098.8","0.33462248"], ["8099.3","1.12908646"], ] } [ 1590383700000, "8088.5", "8088.5", "8088.5", "8088.5", "0.04788623" ] 'limit': 1440, default 1440, max 1440 'start': since, 'end': self.milliseconds(), https://github.com/ccxt/ccxt/issues/9227 default 1440, max 1440 [ [1590383700000,"8088.5","8088.5","8088.5","8088.5","0.04788623"], [1590383580000,"8091.3","8091.5","8091.3","8091.5","0.04931221"], [1590383520000,"8090.3","8092.7","8090.3","8092.5","0.04001286"], ] [ { "symbol": "BTC", "available": "1.57593193", "inOrder": "0.74832374" } ] { "address": "0x449889e3234514c45d57f7c5a571feba0c7ad567", "paymentId": "10002653" } 'market', 'limit', 'stopLoss', 'stopLossLimit', 'takeProfit', 'takeProfitLimit' 'amount': self.amount_to_precision(symbol, amount), 'price': self.price_to_precision(symbol, price), 'amountQuote': self.cost_to_precision(symbol, cost), 'timeInForce': 'GTC', 'GTC', 'IOC', 'FOK' 'selfTradePrevention': 'decrementAndCancel', 'decrementAndCancel', 'cancelOldest', 'cancelNewest', 'cancelBoth' 'postOnly': False, 'disableMarketProtection': False, don't cancel if the next fill price is 10% worse than the best fill price 'responseRequired': True, False is faster { "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0", "market":"ETH-EUR", "created":1590505649241, "updated":1590505649241, "status":"filled", "side":"sell", "orderType":"market", "amount":"0.249825", "amountRemaining":"0", "onHold":"0", "onHoldCurrency":"ETH", "filledAmount":"0.249825", "filledAmountQuote":"45.84038925", "feePaid":"0.12038925", "feeCurrency":"EUR", "fills":[ { "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4", "timestamp":1590505649245, "amount":"0.249825", "price":"183.49", "taker":true, "fee":"0.12038925", "feeCurrency":"EUR", "settled":true } ], "selfTradePrevention":"decrementAndCancel", "visible":false, "disableMarketProtection":false } { "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61" } [ { "orderId": "1be6d0df-d5dc-4b53-a250-3376f3b393e6" } ] { "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0", "market":"ETH-EUR", "created":1590505649241, "updated":1590505649241, "status":"filled", "side":"sell", "orderType":"market", "amount":"0.249825", "amountRemaining":"0", "onHold":"0", "onHoldCurrency":"ETH", "filledAmount":"0.249825", "filledAmountQuote":"45.84038925", "feePaid":"0.12038925", "feeCurrency":"EUR", "fills":[ { "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4", "timestamp":1590505649245, "amount":"0.249825", "price":"183.49", "taker":true, "fee":"0.12038925", "feeCurrency":"EUR", "settled":true } ], "selfTradePrevention":"decrementAndCancel", "visible":false, "disableMarketProtection":false } 'limit': 500, 'start': since, 'end': self.milliseconds(), 'orderIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0', 'orderIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0', default 500, max 1000 [ { "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0", "market":"ETH-EUR", "created":1590505649241, "updated":1590505649241, "status":"filled", "side":"sell", "orderType":"market", "amount":"0.249825", "amountRemaining":"0", "onHold":"0", "onHoldCurrency":"ETH", "filledAmount":"0.249825", "filledAmountQuote":"45.84038925", "feePaid":"0.12038925", "feeCurrency":"EUR", "fills":[ { "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4", "timestamp":1590505649245, "amount":"0.249825", "price":"183.49", "taker":true, "fee":"0.12038925", "feeCurrency":"EUR", "settled":true } ], "selfTradePrevention":"decrementAndCancel", "visible":false, "disableMarketProtection":false } ] 'market': market['id'], rate limit 25 without a market, 1 with market specified [ { "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0", "market":"ETH-EUR", "created":1590505649241, "updated":1590505649241, "status":"filled", "side":"sell", "orderType":"market", "amount":"0.249825", "amountRemaining":"0", "onHold":"0", "onHoldCurrency":"ETH", "filledAmount":"0.249825", "filledAmountQuote":"45.84038925", "feePaid":"0.12038925", "feeCurrency":"EUR", "fills":[ { "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4", "timestamp":1590505649245, "amount":"0.249825", "price":"183.49", "taker":true, "fee":"0.12038925", "feeCurrency":"EUR", "settled":true } ], "selfTradePrevention":"decrementAndCancel", "visible":false, "disableMarketProtection":false } ] https://github.com/ccxt/ccxt/issues/8489 cancelOrder, cancelAllOrders { "orderId": "2e7ce7fc-44e2-4d80-a4a7-d079c4750b61" } createOrder, fetchOrder, fetchOpenOrders, fetchOrders, editOrder { "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0", "market":"ETH-EUR", "created":1590505649241, "updated":1590505649241, "status":"filled", "side":"sell", "orderType":"market", "amount":"0.249825", "amountRemaining":"0", "price": "183.49", limit orders only "onHold":"0", "onHoldCurrency":"ETH", "filledAmount":"0.249825", "filledAmountQuote":"45.84038925", "feePaid":"0.12038925", "feeCurrency":"EUR", "fills":[ { "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4", "timestamp":1590505649245, "amount":"0.249825", "price":"183.49", "taker":true, "fee":"0.12038925", "feeCurrency":"EUR", "settled":true } ], "selfTradePrevention":"decrementAndCancel", "visible":false, "disableMarketProtection":false "timeInForce": "GTC", "postOnly": True, } https://github.com/ccxt/ccxt/issues/8489 'limit': 500, 'start': since, 'end': self.milliseconds(), 'tradeIdFrom': 'af76d6ce-9f7c-4006-b715-bb5d430652d0', 'tradeIdTo': 'af76d6ce-9f7c-4006-b715-bb5d430652d0', default 500, max 1000 [ { "id":"b0c86aa5-6ed3-4a2d-ba3a-be9a964220f4", "orderId":"af76d6ce-9f7c-4006-b715-bb5d430652d0", "timestamp":1590505649245, "market":"ETH-EUR", "side":"sell", "amount":"0.249825", "price":"183.49", "taker":true, "fee":"0.12038925", "feeCurrency":"EUR", "settled":true } ] address or IBAN 'internal': False, transfer to another Bitvavo user address, no fees 'addWithdrawalFee': False, True = add the fee on top, otherwise the fee is subtracted from the amount { "success": True, "symbol": "BTC", "amount": "1.5" } 'symbol': currency['id'], 'limit': 500, default 500, max 1000 'start': since, 'end': self.milliseconds(), default 500, max 1000 [ { "timestamp":1590531212000, "symbol":"ETH", "amount":"0.091", "fee":"0.009", "status":"awaiting_bitvavo_inspection", "address":"0xe42b309f1eE9F0cbf7f54CcF3bc2159eBfA6735b", "paymentId": "10002653", "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3", } ] 'symbol': currency['id'], 'limit': 500, default 500, max 1000 'start': since, 'end': self.milliseconds(), default 500, max 1000 [ { "timestamp":1590492401000, "symbol":"ETH", "amount":"0.249825", "fee":"0", "status":"completed", "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2" } ] withdraw { "success": True, "symbol": "BTC", "amount": "1.5" } fetchWithdrawals { "timestamp": 1542967486256, "symbol": "BTC", "amount": "0.99994", "address": "BitcoinAddress", "paymentId": "10002653", "txId": "927b3ea50c5bb52c6854152d305dfa1e27fc01d10464cf10825d96d69d235eb3", "fee": "0.00006", "status": "awaiting_processing" } fetchDeposits { "timestamp":1590492401000, "symbol":"ETH", "amount":"0.249825", "fee":"0", "status":"completed", "txId":"0x5167b473fd37811f9ef22364c3d54726a859ef9d98934b3a1e11d7baa8d2c2e2" } fallback to default error handler {"errorCode":308,"error":"The signature length is invalid(HMAC-SHA256 should return a 64 length hexadecimal string)."} {"errorCode":203,"error":"symbol parameter is required."} {"errorCode":205,"error":"symbol parameter is invalid."} unknown message | 20,401 | en | 0.578707 |
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Error Reporting Handler."""
import sys
import traceback
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.error_reporting import util
from googlecloudsdk.api_lib.util import apis as core_apis
from googlecloudsdk.calliope import backend
from googlecloudsdk.command_lib import error_reporting_util
from googlecloudsdk.core import config
from googlecloudsdk.core import http
from googlecloudsdk.core import log
from googlecloudsdk.core import metrics
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_attr
def _IsInstallationCorruption(err):
"""Determines if the error may be from installation corruption.
Args:
err: Exception err.
Returns:
bool, True if installation error, False otherwise
"""
return (isinstance(err, backend.CommandLoadFailure) and
isinstance(err.root_exception, ImportError))
def _PrintInstallationAction(err, err_string):
"""Prompts installation error action.
Args:
err: Exception err.
err_string: Exception err string.
"""
# This usually indicates installation corruption.
# We do want to suggest `gcloud components reinstall` here (ex. as opposed
# to the similar message in gcloud.py), because there's a good chance it'll
# work (rather than a manual reinstall).
# Don't suggest `gcloud feedback`, because this is probably an
# installation problem.
log.error(
('gcloud failed to load ({0}): {1}\n\n'
'This usually indicates corruption in your gcloud installation or '
'problems with your Python interpreter.\n\n'
'Please verify that the following is the path to a working Python 2.7 '
'executable:\n'
' {2}\n'
'If it is not, please set the CLOUDSDK_PYTHON environment variable to '
'point to a working Python 2.7 executable.\n\n'
'If you are still experiencing problems, please run the following '
'command to reinstall:\n'
' $ gcloud components reinstall\n\n'
'If that command fails, please reinstall the Cloud SDK using the '
'instructions here:\n'
' https://cloud.google.com/sdk/'
).format(err.command, err_string, sys.executable))
CRASH_SERVICE = 'gcloud'
ERROR_SERVICE = 'gcloud-user-error'
CRASH_PROJECT = 'cloud-sdk-errors'
CRASH_API_KEY = 'AIzaSyA45D7bA0Y1vyLmQ_Gl10G149M8jiwwK-s'
def _GetReportingClient():
"""Returns a client that uses an API key for Cloud SDK crash reports.
Returns:
An error reporting client that uses an API key for Cloud SDK crash reports.
"""
client_class = core_apis.GetClientClass(util.API_NAME, util.API_VERSION)
client_instance = client_class(get_credentials=False, http=http.Http())
client_instance.AddGlobalParam('key', CRASH_API_KEY)
return client_instance
def ReportError(err, is_crash):
"""Report the anonymous crash information to the Error Reporting service.
Args:
err: Exception, the error that caused the crash.
is_crash: bool, True if this is a crash, False if it is a user error.
"""
if properties.VALUES.core.disable_usage_reporting.GetBool():
return
stacktrace = traceback.format_exc(err)
stacktrace = error_reporting_util.RemovePrivateInformationFromTraceback(
stacktrace)
command = properties.VALUES.metrics.command_name.Get()
cid = metrics.GetCIDIfMetricsEnabled()
client = _GetReportingClient()
reporter = util.ErrorReporting(client)
try:
method_config = client.projects_events.GetMethodConfig('Report')
request = reporter.GenerateReportRequest(
error_message=stacktrace,
service=CRASH_SERVICE if is_crash else ERROR_SERVICE,
version=config.CLOUD_SDK_VERSION, project=CRASH_PROJECT,
request_url=command, user=cid)
http_request = client.projects_events.PrepareHttpRequest(
method_config, request)
metrics.CustomBeacon(http_request.url, http_request.http_method,
http_request.body, http_request.headers)
except apitools_exceptions.Error as e:
log.file_only_logger.error(
'Unable to report crash stacktrace:\n{0}'.format(
console_attr.EncodeForConsole(e)))
def HandleGcloudCrash(err):
"""Checks if installation error occurred, then proceeds with Error Reporting.
Args:
err: Exception err.
"""
err_string = console_attr.EncodeForConsole(err)
log.file_only_logger.exception('BEGIN CRASH STACKTRACE')
if _IsInstallationCorruption(err):
_PrintInstallationAction(err, err_string)
else:
log.error(u'gcloud crashed ({0}): {1}'.format(
getattr(err, 'error_name', type(err).__name__), err_string))
ReportError(err, is_crash=True)
log.err.Print('\nIf you would like to report this issue, please run the '
'following command:')
log.err.Print(' gcloud feedback')
log.err.Print('\nTo check gcloud for common problems, please run the '
'following command:')
log.err.Print(' gcloud info --run-diagnostics')
| google-cloud-sdk/lib/googlecloudsdk/command_lib/crash_handling.py | 5,593 | Checks if installation error occurred, then proceeds with Error Reporting.
Args:
err: Exception err.
Report the anonymous crash information to the Error Reporting service.
Args:
err: Exception, the error that caused the crash.
is_crash: bool, True if this is a crash, False if it is a user error.
Returns a client that uses an API key for Cloud SDK crash reports.
Returns:
An error reporting client that uses an API key for Cloud SDK crash reports.
Determines if the error may be from installation corruption.
Args:
err: Exception err.
Returns:
bool, True if installation error, False otherwise
Prompts installation error action.
Args:
err: Exception err.
err_string: Exception err string.
Error Reporting Handler.
Copyright 2013 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This usually indicates installation corruption. We do want to suggest `gcloud components reinstall` here (ex. as opposed to the similar message in gcloud.py), because there's a good chance it'll work (rather than a manual reinstall). Don't suggest `gcloud feedback`, because this is probably an installation problem. | 1,624 | en | 0.832251 |
import inspect
import textwrap
import pytest
from _pytest.compat import MODULE_NOT_FOUND_ERROR
from _pytest.doctest import _get_checker
from _pytest.doctest import _is_mocked
from _pytest.doctest import _patch_unwrap_mock_aware
from _pytest.doctest import DoctestItem
from _pytest.doctest import DoctestModule
from _pytest.doctest import DoctestTextfile
class TestDoctests:
def test_collect_testtextfile(self, testdir):
w = testdir.maketxtfile(whatever="")
checkfile = testdir.maketxtfile(
test_something="""
alskdjalsdk
>>> i = 5
>>> i-1
4
"""
)
for x in (testdir.tmpdir, checkfile):
# print "checking that %s returns custom items" % (x,)
items, reprec = testdir.inline_genitems(x)
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestTextfile)
# Empty file has no items.
items, reprec = testdir.inline_genitems(w)
assert len(items) == 0
def test_collect_module_empty(self, testdir):
path = testdir.makepyfile(whatever="#")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
assert len(items) == 0
def test_collect_module_single_modulelevel_doctest(self, testdir):
path = testdir.makepyfile(whatever='""">>> pass"""')
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
assert len(items) == 1
assert isinstance(items[0], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
def test_collect_module_two_doctest_one_modulelevel(self, testdir):
path = testdir.makepyfile(
whatever="""
'>>> x = None'
def my_func():
">>> magic = 42 "
"""
)
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_collect_module_two_doctest_no_modulelevel(self, testdir):
path = testdir.makepyfile(
whatever="""
'# Empty'
def my_func():
">>> magic = 42 "
def unuseful():
'''
# This is a function
# >>> # it doesn't have any doctest
'''
def another():
'''
# This is another function
>>> import os # this one does have a doctest
'''
"""
)
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
assert len(items) == 2
assert isinstance(items[0], DoctestItem)
assert isinstance(items[1], DoctestItem)
assert isinstance(items[0].parent, DoctestModule)
assert items[0].parent is items[1].parent
def test_simple_doctestfile(self, testdir):
p = testdir.maketxtfile(
test_doc="""
>>> x = 1
>>> x == 1
False
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(failed=1)
def test_new_pattern(self, testdir):
p = testdir.maketxtfile(
xdoc="""
>>> x = 1
>>> x == 1
False
"""
)
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1)
def test_multiple_patterns(self, testdir):
"""Test support for multiple --doctest-glob arguments (#1255).
"""
testdir.maketxtfile(
xdoc="""
>>> 1
1
"""
)
testdir.makefile(
".foo",
test="""
>>> 1
1
""",
)
testdir.maketxtfile(
test_normal="""
>>> 1
1
"""
)
expected = {"xdoc.txt", "test.foo", "test_normal.txt"}
assert {x.basename for x in testdir.tmpdir.listdir()} == expected
args = ["--doctest-glob=xdoc*.txt", "--doctest-glob=*.foo"]
result = testdir.runpytest(*args)
result.stdout.fnmatch_lines(["*test.foo *", "*xdoc.txt *", "*2 passed*"])
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*test_normal.txt *", "*1 passed*"])
@pytest.mark.parametrize(
" test_string, encoding",
[("foo", "ascii"), ("öäü", "latin1"), ("öäü", "utf-8")],
)
def test_encoding(self, testdir, test_string, encoding):
"""Test support for doctest_encoding ini option.
"""
testdir.makeini(
"""
[pytest]
doctest_encoding={}
""".format(
encoding
)
)
doctest = """
>>> "{}"
{}
""".format(
test_string, repr(test_string)
)
testdir._makefile(".txt", [doctest], {}, encoding=encoding)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 passed*"])
def test_doctest_unexpected_exception(self, testdir):
testdir.maketxtfile(
"""
>>> i = 0
>>> 0 / i
2
"""
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*unexpected_exception*",
"*>>> i = 0*",
"*>>> 0 / i*",
"*UNEXPECTED*ZeroDivision*",
]
)
def test_doctest_skip(self, testdir):
testdir.maketxtfile(
"""
>>> 1
1
>>> import pytest
>>> pytest.skip("")
"""
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["*1 skipped*"])
def test_docstring_partial_context_around_error(self, testdir):
"""Test that we show some context before the actual line of a failing
doctest.
"""
testdir.makepyfile(
'''
def foo():
"""
text-line-1
text-line-2
text-line-3
text-line-4
text-line-5
text-line-6
text-line-7
text-line-8
text-line-9
text-line-10
text-line-11
>>> 1 + 1
3
text-line-after
"""
'''
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*docstring_partial_context_around_error*",
"005*text-line-3",
"006*text-line-4",
"013*text-line-11",
"014*>>> 1 + 1",
"Expected:",
" 3",
"Got:",
" 2",
]
)
# lines below should be trimmed out
result.stdout.no_fnmatch_line("*text-line-2*")
result.stdout.no_fnmatch_line("*text-line-after*")
def test_docstring_full_context_around_error(self, testdir):
"""Test that we show the whole context before the actual line of a failing
doctest, provided that the context is up to 10 lines long.
"""
testdir.makepyfile(
'''
def foo():
"""
text-line-1
text-line-2
>>> 1 + 1
3
"""
'''
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*docstring_full_context_around_error*",
"003*text-line-1",
"004*text-line-2",
"006*>>> 1 + 1",
"Expected:",
" 3",
"Got:",
" 2",
]
)
def test_doctest_linedata_missing(self, testdir):
testdir.tmpdir.join("hello.py").write(
textwrap.dedent(
"""\
class Fun(object):
@property
def test(self):
'''
>>> a = 1
>>> 1/0
'''
"""
)
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(
[
"*hello*",
"*EXAMPLE LOCATION UNKNOWN, not showing all tests of that example*",
"*1/0*",
"*UNEXPECTED*ZeroDivision*",
"*1 failed*",
]
)
def test_doctest_unex_importerror_only_txt(self, testdir):
testdir.maketxtfile(
"""
>>> import asdalsdkjaslkdjasd
>>>
"""
)
result = testdir.runpytest()
# doctest is never executed because of error during hello.py collection
result.stdout.fnmatch_lines(
[
"*>>> import asdals*",
"*UNEXPECTED*{e}*".format(e=MODULE_NOT_FOUND_ERROR),
"{e}: No module named *asdal*".format(e=MODULE_NOT_FOUND_ERROR),
]
)
def test_doctest_unex_importerror_with_module(self, testdir):
testdir.tmpdir.join("hello.py").write(
textwrap.dedent(
"""\
import asdalsdkjaslkdjasd
"""
)
)
testdir.maketxtfile(
"""
>>> import hello
>>>
"""
)
result = testdir.runpytest("--doctest-modules")
# doctest is never executed because of error during hello.py collection
result.stdout.fnmatch_lines(
[
"*ERROR collecting hello.py*",
"*{e}: No module named *asdals*".format(e=MODULE_NOT_FOUND_ERROR),
"*Interrupted: 1 error during collection*",
]
)
def test_doctestmodule(self, testdir):
p = testdir.makepyfile(
"""
'''
>>> x = 1
>>> x == 1
False
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1)
def test_doctestmodule_external_and_issue116(self, testdir):
p = testdir.mkpydir("hello")
p.join("__init__.py").write(
textwrap.dedent(
"""\
def somefunc():
'''
>>> i = 0
>>> i + 1
2
'''
"""
)
)
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(
[
"003 *>>> i = 0",
"004 *>>> i + 1",
"*Expected:",
"* 2",
"*Got:",
"* 1",
"*:4: DocTestFailure",
]
)
def test_txtfile_failing(self, testdir):
p = testdir.maketxtfile(
"""
>>> i = 0
>>> i + 1
2
"""
)
result = testdir.runpytest(p, "-s")
result.stdout.fnmatch_lines(
[
"001 >>> i = 0",
"002 >>> i + 1",
"Expected:",
" 2",
"Got:",
" 1",
"*test_txtfile_failing.txt:2: DocTestFailure",
]
)
def test_txtfile_with_fixtures(self, testdir):
p = testdir.maketxtfile(
"""
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(passed=1)
def test_txtfile_with_usefixtures_in_ini(self, testdir):
testdir.makeini(
"""
[pytest]
usefixtures = myfixture
"""
)
testdir.makeconftest(
"""
import pytest
@pytest.fixture
def myfixture(monkeypatch):
monkeypatch.setenv("HELLO", "WORLD")
"""
)
p = testdir.maketxtfile(
"""
>>> import os
>>> os.environ["HELLO"]
'WORLD'
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(passed=1)
def test_doctestmodule_with_fixtures(self, testdir):
p = testdir.makepyfile(
"""
'''
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_doctestmodule_three_tests(self, testdir):
p = testdir.makepyfile(
"""
'''
>>> dir = getfixture('tmpdir')
>>> type(dir).__name__
'LocalPath'
'''
def my_func():
'''
>>> magic = 42
>>> magic - 42
0
'''
def unuseful():
pass
def another():
'''
>>> import os
>>> os is os
True
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=3)
def test_doctestmodule_two_tests_one_fail(self, testdir):
p = testdir.makepyfile(
"""
class MyClass(object):
def bad_meth(self):
'''
>>> magic = 42
>>> magic
0
'''
def nice_meth(self):
'''
>>> magic = 42
>>> magic - 42
0
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=1)
def test_ignored_whitespace(self, testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
"""
)
p = testdir.makepyfile(
"""
class MyClass(object):
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace(self, testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS
"""
)
p = testdir.makepyfile(
"""
class MyClass(object):
'''
>>> a = "foo "
>>> print(a)
foo
'''
pass
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1, passed=0)
def test_ignored_whitespace_glob(self, testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE
"""
)
p = testdir.maketxtfile(
xdoc="""
>>> a = "foo "
>>> print(a)
foo
"""
)
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(passed=1)
def test_non_ignored_whitespace_glob(self, testdir):
testdir.makeini(
"""
[pytest]
doctest_optionflags = ELLIPSIS
"""
)
p = testdir.maketxtfile(
xdoc="""
>>> a = "foo "
>>> print(a)
foo
"""
)
reprec = testdir.inline_run(p, "--doctest-glob=x*.txt")
reprec.assertoutcome(failed=1, passed=0)
def test_contains_unicode(self, testdir):
"""Fix internal error with docstrings containing non-ascii characters.
"""
testdir.makepyfile(
'''\
def foo():
"""
>>> name = 'с' # not letter 'c' but instead Cyrillic 's'.
'anything'
"""
'''
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["Got nothing", "* 1 failed in*"])
def test_ignore_import_errors_on_doctest(self, testdir):
p = testdir.makepyfile(
"""
import asdf
def add_one(x):
'''
>>> add_one(1)
2
'''
return x + 1
"""
)
reprec = testdir.inline_run(
p, "--doctest-modules", "--doctest-ignore-import-errors"
)
reprec.assertoutcome(skipped=1, failed=1, passed=0)
def test_junit_report_for_doctest(self, testdir):
"""
#713: Fix --junit-xml option when used with --doctest-modules.
"""
p = testdir.makepyfile(
"""
def foo():
'''
>>> 1 + 1
3
'''
pass
"""
)
reprec = testdir.inline_run(p, "--doctest-modules", "--junit-xml=junit.xml")
reprec.assertoutcome(failed=1)
def test_unicode_doctest(self, testdir):
"""
Test case for issue 2434: DecodeError on Python 2 when doctest contains non-ascii
characters.
"""
p = testdir.maketxtfile(
test_unicode_doctest="""
.. doctest::
>>> print(
... "Hi\\n\\nByé")
Hi
...
Byé
>>> 1/0 # Byé
1
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
["*UNEXPECTED EXCEPTION: ZeroDivisionError*", "*1 failed*"]
)
def test_unicode_doctest_module(self, testdir):
"""
Test case for issue 2434: DecodeError on Python 2 when doctest docstring
contains non-ascii characters.
"""
p = testdir.makepyfile(
test_unicode_doctest_module="""
def fix_bad_unicode(text):
'''
>>> print(fix_bad_unicode('único'))
único
'''
return "único"
"""
)
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["* 1 passed *"])
def test_print_unicode_value(self, testdir):
"""
Test case for issue 3583: Printing Unicode in doctest under Python 2.7
doesn't work
"""
p = testdir.maketxtfile(
test_print_unicode_value=r"""
Here is a doctest::
>>> print('\xE5\xE9\xEE\xF8\xFC')
åéîøü
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["* 1 passed *"])
def test_reportinfo(self, testdir):
"""
Test case to make sure that DoctestItem.reportinfo() returns lineno.
"""
p = testdir.makepyfile(
test_reportinfo="""
def foo(x):
'''
>>> foo('a')
'b'
'''
return 'c'
"""
)
items, reprec = testdir.inline_genitems(p, "--doctest-modules")
reportinfo = items[0].reportinfo()
assert reportinfo[1] == 1
def test_valid_setup_py(self, testdir):
"""
Test to make sure that pytest ignores valid setup.py files when ran
with --doctest-modules
"""
p = testdir.makepyfile(
setup="""
from setuptools import setup, find_packages
setup(name='sample',
version='0.0',
description='description',
packages=find_packages()
)
"""
)
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["*collected 0 items*"])
def test_invalid_setup_py(self, testdir):
"""
Test to make sure that pytest reads setup.py files that are not used
for python packages when ran with --doctest-modules
"""
p = testdir.makepyfile(
setup="""
def test_foo():
return 'bar'
"""
)
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines(["*collected 1 item*"])
class TestLiterals:
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_allow_unicode(self, testdir, config_mode):
"""Test that doctests which output unicode work in all python versions
tested by pytest when the ALLOW_UNICODE option is used (either in
the ini file or by an inline comment).
"""
if config_mode == "ini":
testdir.makeini(
"""
[pytest]
doctest_optionflags = ALLOW_UNICODE
"""
)
comment = ""
else:
comment = "#doctest: +ALLOW_UNICODE"
testdir.maketxtfile(
test_doc="""
>>> b'12'.decode('ascii') {comment}
'12'
""".format(
comment=comment
)
)
testdir.makepyfile(
foo="""
def foo():
'''
>>> b'12'.decode('ascii') {comment}
'12'
'''
""".format(
comment=comment
)
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=2)
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_allow_bytes(self, testdir, config_mode):
"""Test that doctests which output bytes work in all python versions
tested by pytest when the ALLOW_BYTES option is used (either in
the ini file or by an inline comment)(#1287).
"""
if config_mode == "ini":
testdir.makeini(
"""
[pytest]
doctest_optionflags = ALLOW_BYTES
"""
)
comment = ""
else:
comment = "#doctest: +ALLOW_BYTES"
testdir.maketxtfile(
test_doc="""
>>> b'foo' {comment}
'foo'
""".format(
comment=comment
)
)
testdir.makepyfile(
foo="""
def foo():
'''
>>> b'foo' {comment}
'foo'
'''
""".format(
comment=comment
)
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=2)
def test_unicode_string(self, testdir):
"""Test that doctests which output unicode fail in Python 2 when
the ALLOW_UNICODE option is not used. The same test should pass
in Python 3.
"""
testdir.maketxtfile(
test_doc="""
>>> b'12'.decode('ascii')
'12'
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_bytes_literal(self, testdir):
"""Test that doctests which output bytes fail in Python 3 when
the ALLOW_BYTES option is not used. (#1287).
"""
testdir.maketxtfile(
test_doc="""
>>> b'foo'
'foo'
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(failed=1)
def test_number_re(self) -> None:
_number_re = _get_checker()._number_re # type: ignore
for s in [
"1.",
"+1.",
"-1.",
".1",
"+.1",
"-.1",
"0.1",
"+0.1",
"-0.1",
"1e5",
"+1e5",
"1e+5",
"+1e+5",
"1e-5",
"+1e-5",
"-1e-5",
"1.2e3",
"-1.2e-3",
]:
print(s)
m = _number_re.match(s)
assert m is not None
assert float(m.group()) == pytest.approx(float(s))
for s in ["1", "abc"]:
print(s)
assert _number_re.match(s) is None
@pytest.mark.parametrize("config_mode", ["ini", "comment"])
def test_number_precision(self, testdir, config_mode):
"""Test the NUMBER option."""
if config_mode == "ini":
testdir.makeini(
"""
[pytest]
doctest_optionflags = NUMBER
"""
)
comment = ""
else:
comment = "#doctest: +NUMBER"
testdir.maketxtfile(
test_doc="""
Scalars:
>>> import math
>>> math.pi {comment}
3.141592653589793
>>> math.pi {comment}
3.1416
>>> math.pi {comment}
3.14
>>> -math.pi {comment}
-3.14
>>> math.pi {comment}
3.
>>> 3. {comment}
3.0
>>> 3. {comment}
3.
>>> 3. {comment}
3.01
>>> 3. {comment}
2.99
>>> .299 {comment}
.3
>>> .301 {comment}
.3
>>> 951. {comment}
1e3
>>> 1049. {comment}
1e3
>>> -1049. {comment}
-1e3
>>> 1e3 {comment}
1e3
>>> 1e3 {comment}
1000.
Lists:
>>> [3.1415, 0.097, 13.1, 7, 8.22222e5, 0.598e-2] {comment}
[3.14, 0.1, 13., 7, 8.22e5, 6.0e-3]
>>> [[0.333, 0.667], [0.999, 1.333]] {comment}
[[0.33, 0.667], [0.999, 1.333]]
>>> [[[0.101]]] {comment}
[[[0.1]]]
Doesn't barf on non-numbers:
>>> 'abc' {comment}
'abc'
>>> None {comment}
""".format(
comment=comment
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize(
"expression,output",
[
# ints shouldn't match floats:
("3.0", "3"),
("3e0", "3"),
("1e3", "1000"),
("3", "3.0"),
# Rounding:
("3.1", "3.0"),
("3.1", "3.2"),
("3.1", "4.0"),
("8.22e5", "810000.0"),
# Only the actual output is rounded up, not the expected output:
("3.0", "2.98"),
("1e3", "999"),
# The current implementation doesn't understand that numbers inside
# strings shouldn't be treated as numbers:
pytest.param("'3.1416'", "'3.14'", marks=pytest.mark.xfail),
],
)
def test_number_non_matches(self, testdir, expression, output):
testdir.maketxtfile(
test_doc="""
>>> {expression} #doctest: +NUMBER
{output}
""".format(
expression=expression, output=output
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=0, failed=1)
def test_number_and_allow_unicode(self, testdir):
testdir.maketxtfile(
test_doc="""
>>> from collections import namedtuple
>>> T = namedtuple('T', 'a b c')
>>> T(a=0.2330000001, b=u'str', c=b'bytes') # doctest: +ALLOW_UNICODE, +ALLOW_BYTES, +NUMBER
T(a=0.233, b=u'str', c='bytes')
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
class TestDoctestSkips:
"""
If all examples in a doctest are skipped due to the SKIP option, then
the tests should be SKIPPED rather than PASSED. (#957)
"""
@pytest.fixture(params=["text", "module"])
def makedoctest(self, testdir, request):
def makeit(doctest):
mode = request.param
if mode == "text":
testdir.maketxtfile(doctest)
else:
assert mode == "module"
testdir.makepyfile('"""\n%s"""' % doctest)
return makeit
def test_one_skipped(self, testdir, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2
4
"""
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=1)
def test_one_skipped_failed(self, testdir, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2
200
"""
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(failed=1)
def test_all_skipped(self, testdir, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2 # doctest: +SKIP
200
"""
)
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(skipped=1)
def test_vacuous_all_skipped(self, testdir, makedoctest):
makedoctest("")
reprec = testdir.inline_run("--doctest-modules")
reprec.assertoutcome(passed=0, skipped=0)
def test_continue_on_failure(self, testdir):
testdir.maketxtfile(
test_something="""
>>> i = 5
>>> def foo():
... raise ValueError('error1')
>>> foo()
>>> i
>>> i + 2
7
>>> i + 1
"""
)
result = testdir.runpytest("--doctest-modules", "--doctest-continue-on-failure")
result.assert_outcomes(passed=0, failed=1)
# The lines that contains the failure are 4, 5, and 8. The first one
# is a stack trace and the other two are mismatches.
result.stdout.fnmatch_lines(
["*4: UnexpectedException*", "*5: DocTestFailure*", "*8: DocTestFailure*"]
)
class TestDoctestAutoUseFixtures:
SCOPES = ["module", "session", "class", "function"]
def test_doctest_module_session_fixture(self, testdir):
"""Test that session fixtures are initialized for doctest modules (#768)
"""
# session fixture which changes some global data, which will
# be accessed by doctests in a module
testdir.makeconftest(
"""
import pytest
import sys
@pytest.yield_fixture(autouse=True, scope='session')
def myfixture():
assert not hasattr(sys, 'pytest_session_data')
sys.pytest_session_data = 1
yield
del sys.pytest_session_data
"""
)
testdir.makepyfile(
foo="""
import sys
def foo():
'''
>>> assert sys.pytest_session_data == 1
'''
def bar():
'''
>>> assert sys.pytest_session_data == 1
'''
"""
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["*2 passed*"])
@pytest.mark.parametrize("scope", SCOPES)
@pytest.mark.parametrize("enable_doctest", [True, False])
def test_fixture_scopes(self, testdir, scope, enable_doctest):
"""Test that auto-use fixtures work properly with doctest modules.
See #1057 and #1100.
"""
testdir.makeconftest(
"""
import pytest
@pytest.fixture(autouse=True, scope="{scope}")
def auto(request):
return 99
""".format(
scope=scope
)
)
testdir.makepyfile(
test_1='''
def test_foo():
"""
>>> getfixture('auto') + 1
100
"""
def test_bar():
assert 1
'''
)
params = ("--doctest-modules",) if enable_doctest else ()
passes = 3 if enable_doctest else 2
result = testdir.runpytest(*params)
result.stdout.fnmatch_lines(["*=== %d passed in *" % passes])
@pytest.mark.parametrize("scope", SCOPES)
@pytest.mark.parametrize("autouse", [True, False])
@pytest.mark.parametrize("use_fixture_in_doctest", [True, False])
def test_fixture_module_doctest_scopes(
self, testdir, scope, autouse, use_fixture_in_doctest
):
"""Test that auto-use fixtures work properly with doctest files.
See #1057 and #1100.
"""
testdir.makeconftest(
"""
import pytest
@pytest.fixture(autouse={autouse}, scope="{scope}")
def auto(request):
return 99
""".format(
scope=scope, autouse=autouse
)
)
if use_fixture_in_doctest:
testdir.maketxtfile(
test_doc="""
>>> getfixture('auto')
99
"""
)
else:
testdir.maketxtfile(
test_doc="""
>>> 1 + 1
2
"""
)
result = testdir.runpytest("--doctest-modules")
result.stdout.no_fnmatch_line("*FAILURES*")
result.stdout.fnmatch_lines(["*=== 1 passed in *"])
@pytest.mark.parametrize("scope", SCOPES)
def test_auto_use_request_attributes(self, testdir, scope):
"""Check that all attributes of a request in an autouse fixture
behave as expected when requested for a doctest item.
"""
testdir.makeconftest(
"""
import pytest
@pytest.fixture(autouse=True, scope="{scope}")
def auto(request):
if "{scope}" == 'module':
assert request.module is None
if "{scope}" == 'class':
assert request.cls is None
if "{scope}" == 'function':
assert request.function is None
return 99
""".format(
scope=scope
)
)
testdir.maketxtfile(
test_doc="""
>>> 1 + 1
2
"""
)
result = testdir.runpytest("--doctest-modules")
str(result.stdout.no_fnmatch_line("*FAILURES*"))
result.stdout.fnmatch_lines(["*=== 1 passed in *"])
class TestDoctestNamespaceFixture:
SCOPES = ["module", "session", "class", "function"]
@pytest.mark.parametrize("scope", SCOPES)
def test_namespace_doctestfile(self, testdir, scope):
"""
Check that inserting something into the namespace works in a
simple text file doctest
"""
testdir.makeconftest(
"""
import pytest
import contextlib
@pytest.fixture(autouse=True, scope="{scope}")
def add_contextlib(doctest_namespace):
doctest_namespace['cl'] = contextlib
""".format(
scope=scope
)
)
p = testdir.maketxtfile(
"""
>>> print(cl.__name__)
contextlib
"""
)
reprec = testdir.inline_run(p)
reprec.assertoutcome(passed=1)
@pytest.mark.parametrize("scope", SCOPES)
def test_namespace_pyfile(self, testdir, scope):
"""
Check that inserting something into the namespace works in a
simple Python file docstring doctest
"""
testdir.makeconftest(
"""
import pytest
import contextlib
@pytest.fixture(autouse=True, scope="{scope}")
def add_contextlib(doctest_namespace):
doctest_namespace['cl'] = contextlib
""".format(
scope=scope
)
)
p = testdir.makepyfile(
"""
def foo():
'''
>>> print(cl.__name__)
contextlib
'''
"""
)
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(passed=1)
class TestDoctestReportingOption:
def _run_doctest_report(self, testdir, format):
testdir.makepyfile(
"""
def foo():
'''
>>> foo()
a b
0 1 4
1 2 4
2 3 6
'''
print(' a b\\n'
'0 1 4\\n'
'1 2 5\\n'
'2 3 6')
"""
)
return testdir.runpytest("--doctest-modules", "--doctest-report", format)
@pytest.mark.parametrize("format", ["udiff", "UDIFF", "uDiFf"])
def test_doctest_report_udiff(self, testdir, format):
result = self._run_doctest_report(testdir, format)
result.stdout.fnmatch_lines(
[" 0 1 4", " -1 2 4", " +1 2 5", " 2 3 6"]
)
def test_doctest_report_cdiff(self, testdir):
result = self._run_doctest_report(testdir, "cdiff")
result.stdout.fnmatch_lines(
[
" a b",
" 0 1 4",
" ! 1 2 4",
" 2 3 6",
" --- 1,4 ----",
" a b",
" 0 1 4",
" ! 1 2 5",
" 2 3 6",
]
)
def test_doctest_report_ndiff(self, testdir):
result = self._run_doctest_report(testdir, "ndiff")
result.stdout.fnmatch_lines(
[
" a b",
" 0 1 4",
" - 1 2 4",
" ? ^",
" + 1 2 5",
" ? ^",
" 2 3 6",
]
)
@pytest.mark.parametrize("format", ["none", "only_first_failure"])
def test_doctest_report_none_or_only_first_failure(self, testdir, format):
result = self._run_doctest_report(testdir, format)
result.stdout.fnmatch_lines(
[
"Expected:",
" a b",
" 0 1 4",
" 1 2 4",
" 2 3 6",
"Got:",
" a b",
" 0 1 4",
" 1 2 5",
" 2 3 6",
]
)
def test_doctest_report_invalid(self, testdir):
result = self._run_doctest_report(testdir, "obviously_invalid_format")
result.stderr.fnmatch_lines(
[
"*error: argument --doctest-report: invalid choice: 'obviously_invalid_format' (choose from*"
]
)
@pytest.mark.parametrize("mock_module", ["mock", "unittest.mock"])
def test_doctest_mock_objects_dont_recurse_missbehaved(mock_module, testdir):
pytest.importorskip(mock_module)
testdir.makepyfile(
"""
from {mock_module} import call
class Example(object):
'''
>>> 1 + 1
2
'''
""".format(
mock_module=mock_module
)
)
result = testdir.runpytest("--doctest-modules")
result.stdout.fnmatch_lines(["* 1 passed *"])
class Broken:
def __getattr__(self, _):
raise KeyError("This should be an AttributeError")
@pytest.mark.parametrize( # pragma: no branch (lambdas are not called)
"stop", [None, _is_mocked, lambda f: None, lambda f: False, lambda f: True]
)
def test_warning_on_unwrap_of_broken_object(stop):
bad_instance = Broken()
assert inspect.unwrap.__module__ == "inspect"
with _patch_unwrap_mock_aware():
assert inspect.unwrap.__module__ != "inspect"
with pytest.warns(
pytest.PytestWarning, match="^Got KeyError.* when unwrapping"
):
with pytest.raises(KeyError):
inspect.unwrap(bad_instance, stop=stop)
assert inspect.unwrap.__module__ == "inspect"
| testing/test_doctest.py | 41,317 | If all examples in a doctest are skipped due to the SKIP option, then
the tests should be SKIPPED rather than PASSED. (#957)
Test that doctests which output bytes work in all python versions
tested by pytest when the ALLOW_BYTES option is used (either in
the ini file or by an inline comment)(#1287).
Test that doctests which output unicode work in all python versions
tested by pytest when the ALLOW_UNICODE option is used (either in
the ini file or by an inline comment).
Check that all attributes of a request in an autouse fixture
behave as expected when requested for a doctest item.
Test that doctests which output bytes fail in Python 3 when
the ALLOW_BYTES option is not used. (#1287).
Fix internal error with docstrings containing non-ascii characters.
Test that we show the whole context before the actual line of a failing
doctest, provided that the context is up to 10 lines long.
Test that we show some context before the actual line of a failing
doctest.
Test that session fixtures are initialized for doctest modules (#768)
Test support for doctest_encoding ini option.
Test that auto-use fixtures work properly with doctest files.
See #1057 and #1100.
Test that auto-use fixtures work properly with doctest modules.
See #1057 and #1100.
Test to make sure that pytest reads setup.py files that are not used
for python packages when ran with --doctest-modules
#713: Fix --junit-xml option when used with --doctest-modules.
Test support for multiple --doctest-glob arguments (#1255).
Check that inserting something into the namespace works in a
simple text file doctest
Check that inserting something into the namespace works in a
simple Python file docstring doctest
Test the NUMBER option.
Test case for issue 3583: Printing Unicode in doctest under Python 2.7
doesn't work
Test case to make sure that DoctestItem.reportinfo() returns lineno.
Test case for issue 2434: DecodeError on Python 2 when doctest contains non-ascii
characters.
Test case for issue 2434: DecodeError on Python 2 when doctest docstring
contains non-ascii characters.
Test that doctests which output unicode fail in Python 2 when
the ALLOW_UNICODE option is not used. The same test should pass
in Python 3.
Test to make sure that pytest ignores valid setup.py files when ran
with --doctest-modules
print "checking that %s returns custom items" % (x,) Empty file has no items. lines below should be trimmed out doctest is never executed because of error during hello.py collection doctest is never executed because of error during hello.py collection type: ignore ints shouldn't match floats: Rounding: Only the actual output is rounded up, not the expected output: The current implementation doesn't understand that numbers inside strings shouldn't be treated as numbers: The lines that contains the failure are 4, 5, and 8. The first one is a stack trace and the other two are mismatches. session fixture which changes some global data, which will be accessed by doctests in a module pragma: no branch (lambdas are not called) | 3,054 | en | 0.822893 |
symbols = [ '1288.HK', '3988.HK', '0883.HK', '0939.HK', '2628.HK', '3968.HK', '0941.HK', '0688.HK', '0386.HK', '1088.HK', '0728.HK', '0762.HK', '1398.HK', '0857.HK', '2318.HK', '0700.HK', 'GAZPq.L', 'LKOHyq.L', 'NKELyq.L', 'NVTKq.L', 'RELIq.L', 'ROSNq.L', 'SNGSyq.L', 'TATNxq.L', 'BSBR.N', 'BBD.N', 'ABV.N', 'CIG.N', 'SID.N', 'GGB.N', 'HDB.N', 'IBN.N', 'ITUB.N', 'MBT.N', 'PBR.N', 'TNE.N', 'VALE.N', 'VIP.N', 'BIDU.OQ', 'INFY.OQ']
#lineProcessor = CSVReutersAdaptative('BRIC_1min.csv')
textFormat = MessageFormat("{0}")
dateFormat = SimpleDateFormat('dd-MMM-yyyy')
timeFormat = SimpleDateFormat('HH:mm:ss.SSS')
doubleFormat = DecimalFormat('#.##')
lineProcessor = CSVSourceLineProcessor([textFormat,dateFormat,timeFormat,None,None,doubleFormat,doubleFormat,doubleFormat,doubleFormat,doubleFormat,doubleFormat,doubleFormat,doubleFormat],[None,None,None,None,None,OPEN(PRICE),HIGH(PRICE),LOW(PRICE),CLOSE(PRICE),VOLUME(PRICE),Field.EXTENDED(PRICE,"Ave. Price"),Field.EXTENDED(PRICE,"VWAP"),Field.EXTENDED(PRICE,"No. Trades")],0,[1,2])
source = SecondOrderSource('BRIC40_1min.csv', symbols, lineProcessor)
print "Ready"
class MyObserver(PricesListener):
def update(self, ss, when):
strLine = Long.toString(when.getTimeInMillis()).encode('utf-8')
strLine = strLine + when.toString().encode('utf-8')
for s in symbols:
if s in ss:
strLine = strLine + ',' \
+ str(market.getLastPrice(0,s+'-OPEN')) + ','\
+ str(market.getLastPrice(0,s+'-HIGH')) + ','\
+ str(market.getLastPrice(0,s+'-LOW')) + ','\
+ str(market.getLastPrice(0,s+'-CLOSE')) + ','\
+ str(market.getLastPrice(0,s+'-VOLUME')) + ','\
else:
strLine = strLine + ',-,-,-,-,-'
print strLine
market = RandomAccessMarket(0.0, 5000)
lineProcessor.addMarketListener(market)
lineProcessor.addPricesListener(MyObserver())
print "Go!"
strLine = 'milliseconds'
for s in symbols:
strLine = strLine + ',' + s + '-OPEN'
strLine = strLine + ',' + s + '-HIGH'
strLine = strLine + ',' + s + '-LOW'
strLine = strLine + ',' + s + '-CLOSE'
strLine = strLine + ',' + s + '-Volume'
print strLine
source.run()
| src/attic/attic-python/test/test-secondorder.py | 2,289 | lineProcessor = CSVReutersAdaptative('BRIC_1min.csv') | 53 | en | 0.267158 |
"""Example reStructuredText from Sphinx-Needs project.
From http://sphinxcontrib-needs.readthedocs.io/en/latest/
but will not work in isolation - cut down just to trigger
RST304.
**Some text**
Wohooo, we have created :need:`req_001`,
which is linked by :need_incoming:`req_001`.
"""
print("sphinx-needs defines its own reStructuredText roles.")
| tests/RST304/sphinx-roles.py | 350 | Example reStructuredText from Sphinx-Needs project.
From http://sphinxcontrib-needs.readthedocs.io/en/latest/
but will not work in isolation - cut down just to trigger
RST304.
**Some text**
Wohooo, we have created :need:`req_001`,
which is linked by :need_incoming:`req_001`. | 278 | en | 0.874147 |
# -*- coding: utf-8 -*-
from guillotina import configure
from guillotina.catalog.utils import get_index_fields
from guillotina.component import get_utilities_for
from guillotina.content import IResourceFactory
from guillotina.utils import get_dotted_name
from packaging import version
import aioelasticsearch
ES_CLIENT_VERSION = version.parse(aioelasticsearch.__version__)
ELASTIC6 = ES_CLIENT_VERSION.minor == 5
def default_refresh():
return False
app_settings = {
"elasticsearch": {
"bulk_size": 50,
"refresh": "guillotina_elasticsearch.default_refresh",
"index_name_prefix": "guillotina-",
"connection_settings": {"hosts": [], "timeout": 2},
"index": {},
"security_query_builder": "guillotina_elasticsearch.queries.build_security_query", # noqa
},
"load_utilities": {
"catalog": {
"provides": "guillotina_elasticsearch.interfaces.IElasticSearchUtility", # noqa
"factory": "guillotina_elasticsearch.utility.ElasticSearchUtility",
"settings": {},
}
},
"commands": {
"es-migrate": "guillotina_elasticsearch.commands.migrate.MigrateCommand", # noqa
"es-reindex": "guillotina_elasticsearch.commands.reindex.ReindexCommand", # noqa
"es-vacuum": "guillotina_elasticsearch.commands.vacuum.VacuumCommand",
"es-fields": "guillotina_elasticsearch.commands.fields.FieldsCommand",
},
}
def includeme(root):
configure.scan("guillotina_elasticsearch.utility")
configure.scan("guillotina_elasticsearch.manager")
configure.scan("guillotina_elasticsearch.parser")
# add store true to guillotina indexes
for name, utility in get_utilities_for(IResourceFactory):
if not get_dotted_name(utility._callable).startswith("guillotina."):
continue
for field_name, catalog_info in get_index_fields(name).items():
if field_name in (
"id",
"path",
"uuid",
"type_name",
"tid",
"creators",
"contributors",
"access_roles",
"access_users",
"parent_uuid",
"title",
"creation_date",
"modification_date",
"tags",
):
catalog_info["store"] = True
| guillotina_elasticsearch/__init__.py | 2,397 | -*- coding: utf-8 -*- noqa noqa noqa noqa add store true to guillotina indexes | 78 | en | 0.273407 |
import nltk
import numpy as np
#nltk.download('punkt') #downloading a package with a pretrained tokenizer
from nltk.stem.porter import PorterStemmer
stemmer = PorterStemmer()
def tokenize(sentence): #splitting a string into meaningful units
return nltk.word_tokenize(sentence)
def stem(word): #Generating the root form of the words
return stemmer.stem(word.lower())
def bag_of_words(tokenized_sentence, all_words):
tokenized_sentence = [stem(w) for w in tokenized_sentence]
bag = np.zeros(len(all_words), dtype = np.float32)
for idx, w in enumerate(all_words):
if w in tokenized_sentence:
bag[idx] = 1.0
return bag
| nltk_utils.py | 692 | nltk.download('punkt') downloading a package with a pretrained tokenizersplitting a string into meaningful unitsGenerating the root form of the words | 155 | en | 0.740978 |
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team and authors from University of Illinois at Chicago.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import argparse
import random
import json
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from pytorch_pretrained_bert.tokenization import BertTokenizer
import squad_data_utils as data_utils
import modelconfig
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
def gen(args):
tokenizer = BertTokenizer.from_pretrained(modelconfig.MODEL_ARCHIVE_MAP[args.bert_model] )
train_examples = data_utils.read_squad_examples(os.path.join(args.input_dir, "train.json"), is_training=True)
train_features = data_utils.convert_examples_to_features(
train_examples, tokenizer, args.max_seq_length, args.doc_stride, args.max_query_length, is_training=True)
logger.info("***** Running training *****")
logger.info(" Num orig examples = %d", len(train_examples))
logger.info(" Num split examples = %d", len(train_features))
input_ids_np = np.array([f.input_ids for f in train_features], dtype=np.int16)
segment_ids_np = np.array([f.segment_ids for f in train_features], dtype=np.int16)
input_mask_np = np.array([f.input_mask for f in train_features], dtype=np.int16)
start_positions_np = np.array([f.start_position for f in train_features], dtype=np.int16)
end_positions_np = np.array([f.end_position for f in train_features], dtype=np.int16)
np.savez_compressed(os.path.join(args.output_dir, "data.npz"),
input_ids=input_ids_np,
segment_ids = segment_ids_np,
input_mask = input_mask_np,
start_positions = start_positions_np,
end_positions = end_positions_np)
#>>>>> validation
valid_examples=data_utils.read_squad_examples(os.path.join(args.input_dir,"dev.json"), is_training=True)
valid_features = data_utils.convert_examples_to_features(
valid_examples, tokenizer, args.max_seq_length, args.doc_stride, args.max_query_length, is_training=True)
logger.info(" Num orig examples = %d", len(valid_examples))
logger.info(" Num split examples = %d", len(valid_features))
valid_input_ids_np = np.array([f.input_ids for f in valid_features], dtype=np.int16)
valid_segment_ids_np = np.array([f.segment_ids for f in valid_features], dtype=np.int16)
valid_input_mask_np = np.array([f.input_mask for f in valid_features], dtype=np.int16)
valid_start_positions_np = np.array([f.start_position for f in valid_features], dtype=np.int16)
valid_end_positions_np = np.array([f.end_position for f in valid_features], dtype=np.int16)
np.savez_compressed(os.path.join(args.output_dir, "dev.npz"),
input_ids=valid_input_ids_np,
segment_ids = valid_segment_ids_np,
input_mask = valid_input_mask_np,
start_positions = valid_start_positions_np,
end_positions = valid_end_positions_np)
#<<<<< end of validation declaration
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--bert-model", default='bert-base', type=str)
parser.add_argument("--input_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--max_seq_length",
default=320,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument('--seed',
type=int,
default=0,
help="random seed for initialization")
parser.add_argument('--doc_stride',
type=int,
default=128)
parser.add_argument('--max_query_length',
type=int,
default=30)
parser.add_argument('--max_answer_length',
type=int,
default=30)
args = parser.parse_args()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
os.makedirs(args.output_dir, exist_ok=True)
gen(args)
if __name__=="__main__":
main() | pytorch-pretrained-bert/src/gen_pt_squad.py | 5,852 | Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team and authors from University of Illinois at Chicago. Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.>>>>> validation<<<<< end of validation declaration Other parameters | 784 | en | 0.834002 |
from logging import getLogger
import gokart
import luigi
import swifter # noqa
from dajare_detector.utils.base_task import DajareTask
from dajare_detector.preprocessing.make_kana_pattern import MakeKanaPattern
from dajare_detector.preprocessing.make_splited_pattern import MakeSplitedPattern
from dajare_detector.preprocessing.decide_kana_pattern import DecideKanaPattern
from dajare_detector.preprocessing.normalize_kana_pattern import NormalizeKanaPattern
logger = getLogger(__name__)
class MakeDecideKanaFeature(DajareTask):
"""カタカナの繰り返しが発生したか"""
target = gokart.TaskInstanceParameter()
split_window_size = luigi.IntParameter()
def requires(self):
kana_task = NormalizeKanaPattern(target=MakeKanaPattern(
target=self.target))
split_task = MakeSplitedPattern(
target=kana_task, split_window_size=self.split_window_size)
return DecideKanaPattern(split_pattern_target=split_task,
kana_pattern_target=kana_task,
split_window_size=self.split_window_size)
def run(self):
df = self.load_data_frame().reset_index(drop=True)
df[f'decide_kana_{self.split_window_size}'] = df[
'decide_kana_flag_list'].swifter.apply(lambda x: 1
if any(x) else 0)
self.dump(df[['_id', f'decide_kana_{self.split_window_size}']])
| dajare_detector/featurize/make_decide_kana_feature.py | 1,461 | カタカナの繰り返しが発生したか
noqa | 22 | ja | 0.999066 |
#!/usr/bin/python3
# System imports
import argparse
import sys
import serial
# Data processing imports
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import seaborn as sns
def checkparams(pwm_freq, pwm_duty, num_samples):
check_ok = True
if pwm_freq < 20 or pwm_freq > 100:
print("Allowed PWM freq is between in [20, 100] kHz interval.")
check_ok = False
if pwm_duty < 5 or pwm_duty > 80:
print("Allowed PWM duty is between in [5, 80] percent interval.")
check_ok = False
if num_samples < 1 or num_samples > 20000:
print("Allowed samples num is between in [1, 8192] interval.")
check_ok = False
if check_ok == False:
sys.exit(1);
def main(baudrate, pwm_freq, pwm_duty, num_samples, delays_file):
ser = serial.Serial(
port='/dev/ttyUSB0',
baudrate=baudrate,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
rtscts=0
)
if not ser.is_open:
print("Error opening serial port device.")
sys.exit(1)
checkparams(pwm_freq, pwm_duty, num_samples)
print("Params OK!")
delays = np.empty(num_samples)
ser.write(str.encode('{},{},{}\r\n'.format(
pwm_freq,
pwm_duty,
num_samples)))
timer_frequency = int(ser.readline().strip()) # MHz
ser.write(str.encode('\n')); # start measurement
for i in range(num_samples):
delays[i] = int(ser.readline().strip())
ser.close()
delays *= (1e-6 / timer_frequency);
delays = np.delete(delays, 0);
delays = np.delete(delays, 0);
print("min: {}, avg: {}, max = {}".format(
np.min(delays),
np.mean(delays),
np.max(delays)));
print("std: ", np.std(delays))
LOG_FILE = open(delays_file, 'w')
np.save(delays_file, delays);
# mean = np.mean(delays);
# maxi = np.max(delays);
# mini = np.min(delays);
# # sns.distplot(delays, norm_hist=True);
# # plt.show();
# #
# delays *= 1e6;
# plt.plot(delays)
# plt.ylabel('Vrijeme kašnjenja (${\mu}s$)')
# plt.xlabel('Uzorci (padajući brid odziva)')
# plt.show()
# plt.figure(0)
# n, bins, patches = plt.hist(delays, 50, normed=True,
# histtype='step');
# y = mlab.normpdf(bins,
# np.mean(delays),
# np.std(delays))
# plt.show()
# plt.figure(1)
# plt.plot(bins, y)
# plt.xlabel('Vrijeme kašnjenja (${\mu}s$)')
# plt.ylabel('Funkcija gustoće vjerojatnosti')
# plt.show();
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--baudrate', type=int, default=115200)
parser.add_argument('--pwm_freq', type=int, default=20)
parser.add_argument('--pwm_duty', type=int, default=50)
parser.add_argument('--num_samples', type=int, default=20000)
parser.add_argument('--delays_file', type=str, default='novo.npy')
ARGS, other = parser.parse_known_args()
main(ARGS.baudrate, ARGS.pwm_freq, ARGS.pwm_duty, ARGS.num_samples,
ARGS.delays_file);
| scripts/test.py | 3,141 | !/usr/bin/python3 System imports Data processing imports MHz start measurement mean = np.mean(delays); maxi = np.max(delays); mini = np.min(delays); sns.distplot(delays, norm_hist=True); plt.show(); delays *= 1e6; plt.plot(delays) plt.ylabel('Vrijeme kašnjenja (${\mu}s$)') plt.xlabel('Uzorci (padajući brid odziva)') plt.show() plt.figure(0) n, bins, patches = plt.hist(delays, 50, normed=True, histtype='step'); y = mlab.normpdf(bins, np.mean(delays), np.std(delays)) plt.show() plt.figure(1) plt.plot(bins, y) plt.xlabel('Vrijeme kašnjenja (${\mu}s$)') plt.ylabel('Funkcija gustoće vjerojatnosti') plt.show(); | 631 | en | 0.222599 |
# -*- coding: utf-8 -*-
# 留言板
# 1、新建目录下一定要有__init__.py文件,否则不能被其它文件引用、不能沿路径读写文件。from ... 。
# 2、urls.py中,设置第一级路由名ask。 在.../mysite/mysite/urls.py中 url(r'^ask/', include('account.ask.urls')),
# 3、admin.py中,设置数据库显示。在.../mysite/account/admin.py中 @admin.register(Technologyask) ...
# 4、templates中,增加模板文件目录/ask
import datetime
import os
import json
from django.shortcuts import render
from django.http.response import HttpResponseRedirect,HttpResponse
from . models import Guestbook,Reply
from django.contrib.auth.decorators import login_required #使用注意在settings.py中设置 LOGIN_URL = '/login/'
from django.contrib.auth.models import User
from myAPI.pageAPI import djangoPage
from django.contrib import messages
PAGE_NUM = 20 #每页显示数
# http://localhost:9000/guestbook/reply/
#@login_required
def reply(request):
if request.method != 'POST':
return render(request, 'guestbook/reply.html', context=locals())
title = request.POST['title']
content = request.POST['content']
Guestbook.objects.filter(title=title).update(state=1)#更改回答状态
if request.user.username == 'admin': #admin回复
Reply.objects.filter(title=title).update(content=content )
Reply.objects.filter(title=title).update(username = 'admin' )
Reply.objects.filter(title=title).update(date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") )
return HttpResponseRedirect('/guestbook/showreply/')
@login_required
def gettitle(request):
title = request.GET.get('title','')
if title == '':
return HttpResponse('no')
return render(request, 'guestbook/reply.html', context=locals())
# http://localhost:9000/guestbook/create/
@login_required
def create(request):
if request.method != 'POST':
return render(request, 'guestbook/create.html', context=locals())
title = request.POST['title']
content = request.POST['content']
istitle = Guestbook.objects.filter(title = title)
if istitle:
messages.info(request, '告警:标题 '+ title + '已经被使用!')
return HttpResponseRedirect('/guestbook/show/')
if content:
guestbooks = Guestbook(username=request.user,title=title,content=content)
guestbooks.save()
guestbookname = Guestbook.objects.get(title=title).username
replys = Reply(guestbookname=guestbookname,title=title)
replys.save()
else:
messages.info(request,'告警:留言内容为空!')
return HttpResponseRedirect('/guestbook/show/')
# http://localhost:9000/guestbook/show/
@login_required
def show(request, page):
if request.user.is_superuser:
guestbooks = Guestbook.objects.filter().order_by('-date','-id')
guestbooks, pageList, paginator, page = djangoPage(guestbooks,page,PAGE_NUM) #调用分页函数
replys = Reply.objects.filter(guestbookname=request.user.username).order_by('-date', '-id')
offset = PAGE_NUM * (page - 1)
return render(request, 'guestbook/showall.html', context=locals())
guestbooks = Guestbook.objects.filter(username=request.user.username).order_by('-date', '-id')
guestbooks, pageList, paginator, page = djangoPage(guestbooks,page,PAGE_NUM) #调用分页函数
replys = Reply.objects.filter(guestbookname=request.user.username).order_by('-date', '-id')
offset = PAGE_NUM * (page - 1)
return render(request, 'guestbook/show.html', context=locals())
# http://localhost:9000/guestbook/showreply/
@login_required
def showreply(request, page):
title = request.GET.get('title','')
if title != '':
replys = Reply.objects.filter(title=title)
else:
replys = Reply.objects.filter(username=request.user).order_by('-date', '-id')
replys, pageList, paginator, page = djangoPage(replys,page,PAGE_NUM) #调用分页函数
offset = PAGE_NUM * (page - 1)
return render(request, 'guestbook/showreply.html', context=locals())
| mysite/guestbook/guestbook.py | 4,146 | -*- coding: utf-8 -*- 留言板 1、新建目录下一定要有__init__.py文件,否则不能被其它文件引用、不能沿路径读写文件。from ... 。 2、urls.py中,设置第一级路由名ask。 在.../mysite/mysite/urls.py中 url(r'^ask/', include('account.ask.urls')), 3、admin.py中,设置数据库显示。在.../mysite/account/admin.py中 @admin.register(Technologyask) ... 4、templates中,增加模板文件目录/ask使用注意在settings.py中设置 LOGIN_URL = '/login/'每页显示数 http://localhost:9000/guestbook/reply/@login_required更改回答状态 admin回复 http://localhost:9000/guestbook/create/ http://localhost:9000/guestbook/show/ 调用分页函数调用分页函数 http://localhost:9000/guestbook/showreply/ 调用分页函数 | 557 | zh | 0.570502 |
"""This module contains the HelpCommandHandler class."""
from telegram import Update
from telegram.ext import CommandHandler, CallbackContext
import utils.helper as helper
class HelpCommandHandler(CommandHandler):
"""Handler for /help command"""
def __init__(self):
CommandHandler.__init__(self, "help", callback)
def callback(update: Update, _: CallbackContext):
"""Print the help text for a /start or /help command"""
update.message.reply_text(helper.create_help_text())
| ongabot/handler/helpcommand.py | 503 | Handler for /help command
Print the help text for a /start or /help command
This module contains the HelpCommandHandler class. | 126 | en | 0.620114 |
#!/usr/bin/python
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Print the list of available maps according to the game."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from pysc2 import run_configs
def main(unused_argv):
with run_configs.get().start(want_rgb=False) as controller:
available_maps = controller.available_maps()
print("\n")
print("Local map paths:")
for m in sorted(available_maps.local_map_paths):
print(" ", m)
print()
print("Battle.net maps:")
for m in sorted(available_maps.battlenet_map_names):
print(" ", m)
if __name__ == "__main__":
app.run(main)
| pysc2/bin/battle_net_maps.py | 1,271 | Print the list of available maps according to the game.
!/usr/bin/python Copyright 2019 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS-IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 644 | en | 0.857442 |
#!/usr/bin/env python3
import fnmatch
import os
import re
import ntpath
import sys
import argparse
excluded_files = []
def check_config_style(filepath):
bad_count_file = 0
def pushClosing(t):
closingStack.append(closing.expr)
closing << Literal( closingFor[t[0]] )
def popClosing():
closing << closingStack.pop()
with open(filepath, 'r', encoding='utf-8', errors='ignore') as file:
content = file.read()
# Store all brackets we find in this file, so we can validate everything on the end
brackets_list = []
# To check if we are in a comment block
isInCommentBlock = False
checkIfInComment = False
# Used in case we are in a line comment (//)
ignoreTillEndOfLine = False
# Used in case we are in a comment block (/* */). This is true if we detect a * inside a comment block.
# If the next character is a /, it means we end our comment block.
checkIfNextIsClosingBlock = False
# We ignore everything inside a string
isInString = False
# Used to store the starting type of a string, so we can match that to the end of a string
inStringType = '';
lastIsCurlyBrace = False
checkForSemiColumn = False
# Extra information so we know what line we find errors at
lineNumber = 1
indexOfCharacter = 0
# Parse all characters in the content of this file to search for potential errors
for c in content:
if (lastIsCurlyBrace):
lastIsCurlyBrace = False
if c == '\n': # Keeping track of our line numbers
lineNumber += 1 # so we can print accurate line number information when we detect a possible error
if (isInString): # while we are in a string, we can ignore everything else, except the end of the string
if (c == inStringType):
isInString = False
# if we are not in a comment block, we will check if we are at the start of one or count the () {} and []
elif (isInCommentBlock == False):
# This means we have encountered a /, so we are now checking if this is an inline comment or a comment block
if (checkIfInComment):
checkIfInComment = False
if c == '*': # if the next character after / is a *, we are at the start of a comment block
isInCommentBlock = True
elif (c == '/'): # Otherwise, will check if we are in an line comment
ignoreTillEndOfLine = True # and an line comment is a / followed by another / (//) We won't care about anything that comes after it
if (isInCommentBlock == False):
if (ignoreTillEndOfLine): # we are in a line comment, just continue going through the characters until we find an end of line
if (c == '\n'):
ignoreTillEndOfLine = False
else: # validate brackets
if (c == '"' or c == "'"):
isInString = True
inStringType = c
elif (c == '/'):
checkIfInComment = True
elif (c == '('):
brackets_list.append('(')
elif (c == ')'):
if (len(brackets_list) > 0 and brackets_list[-1] in ['{', '[']):
print("ERROR: Possible missing round bracket ')' detected at {0} Line number: {1}".format(filepath,lineNumber))
bad_count_file += 1
brackets_list.append(')')
elif (c == '['):
brackets_list.append('[')
elif (c == ']'):
if (len(brackets_list) > 0 and brackets_list[-1] in ['{', '(']):
print("ERROR: Possible missing square bracket ']' detected at {0} Line number: {1}".format(filepath,lineNumber))
bad_count_file += 1
brackets_list.append(']')
elif (c == '{'):
brackets_list.append('{')
elif (c == '}'):
lastIsCurlyBrace = True
if (len(brackets_list) > 0 and brackets_list[-1] in ['(', '[']):
print("ERROR: Possible missing curly brace '}}' detected at {0} Line number: {1}".format(filepath,lineNumber))
bad_count_file += 1
brackets_list.append('}')
else: # Look for the end of our comment block
if (c == '*'):
checkIfNextIsClosingBlock = True;
elif (checkIfNextIsClosingBlock):
if (c == '/'):
isInCommentBlock = False
elif (c != '*'):
checkIfNextIsClosingBlock = False
indexOfCharacter += 1
if brackets_list.count('[') != brackets_list.count(']'):
print("ERROR: A possible missing square bracket [ or ] in file {0} [ = {1} ] = {2}".format(filepath,brackets_list.count('['),brackets_list.count(']')))
bad_count_file += 1
if brackets_list.count('(') != brackets_list.count(')'):
print("ERROR: A possible missing round bracket ( or ) in file {0} ( = {1} ) = {2}".format(filepath,brackets_list.count('('),brackets_list.count(')')))
bad_count_file += 1
if brackets_list.count('{') != brackets_list.count('}'):
print("ERROR: A possible missing curly brace {{ or }} in file {0} {{ = {1} }} = {2}".format(filepath,brackets_list.count('{'),brackets_list.count('}')))
bad_count_file += 1
return bad_count_file
def main():
print("Validating Config Style")
for test in excluded_files:
print("Excluded File: ",test)
sqf_list = []
bad_count = 0
parser = argparse.ArgumentParser()
parser.add_argument('-m','--module', help='only search specified module addon folder', required=False, default="")
args = parser.parse_args()
# Allow running from root directory as well as from inside the tools directory
rootDir = "Swamp Aux/"
for root, dirnames, filenames in os.walk(rootDir + '/' + args.module):
for filename in fnmatch.filter(filenames, '*.cpp'):
sqf_list.append(os.path.join(root, filename))
for filename in fnmatch.filter(filenames, '*.hpp'):
sqf_list.append(os.path.join(root, filename))
for filename in sqf_list:
if (filename not in excluded_files):
bad_count = bad_count + check_config_style(filename)
print("------\nChecked {0} files\nErrors detected: {1}".format(len(sqf_list), bad_count))
if (bad_count == 0):
print("Config validation PASSED")
else:
print("Config validation FAILED")
return bad_count
if __name__ == "__main__":
sys.exit(main()) | tools/config_style_checker.py | 7,213 | !/usr/bin/env python3 Store all brackets we find in this file, so we can validate everything on the end To check if we are in a comment block Used in case we are in a line comment (//) Used in case we are in a comment block (/* */). This is true if we detect a * inside a comment block. If the next character is a /, it means we end our comment block. We ignore everything inside a string Used to store the starting type of a string, so we can match that to the end of a string Extra information so we know what line we find errors at Parse all characters in the content of this file to search for potential errors Keeping track of our line numbers so we can print accurate line number information when we detect a possible error while we are in a string, we can ignore everything else, except the end of the string if we are not in a comment block, we will check if we are at the start of one or count the () {} and [] This means we have encountered a /, so we are now checking if this is an inline comment or a comment block if the next character after / is a *, we are at the start of a comment block Otherwise, will check if we are in an line comment and an line comment is a / followed by another / (//) We won't care about anything that comes after it we are in a line comment, just continue going through the characters until we find an end of line validate brackets Look for the end of our comment block Allow running from root directory as well as from inside the tools directory | 1,488 | en | 0.894674 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import django.dispatch
# Signal to inform application about ready .mo files, so server will know
# when to restart itself.
post_compilemessages = django.dispatch.Signal()
| django_c3po/signals.py | 219 | !/usr/bin/env python -*- coding: utf-8 -*- Signal to inform application about ready .mo files, so server will know when to restart itself. | 138 | en | 0.760384 |
# Copyright 2019 BlueCat Networks. All rights reserved.
import ipaddress
from flask import request, g, abort, jsonify
from bluecat.api_exception import PortalException, APIException
from bluecat import route, util
from main_app import app
# application config
# Define global variable to hold handle to API object
api = None
#
# GET, PUT or POST
#
@route(app, '/lumeta/getnetworklist', methods=['GET', 'PUT', 'POST'])
@util.rest_workflow_permission_required('lumeta_workflow_page')
@util.rest_exception_catcher
def get_networks_get_networks_page():
# are we authenticated?
g.user.logger.info('SUCCESS')
configurations = None
configurations_json = []
if g.user:
configurations = g.user.get_api().get_configurations()
for c in configurations:
print (c)
configuration_json = {"id": c.get_id(), "name": c.get_name()}
configurations_json.append(configuration_json)
return jsonify(configurations_json)
@route(app, '/lumeta/getiplist', methods=['GET', 'PUT', 'POST'])
@util.rest_workflow_permission_required('lumeta_workflow_page')
@util.rest_exception_catcher
def getiplist_getiplist_page():
# are we authenticated?
g.user.logger.info('SUCCESS')
networks = []
# Return object that contains all the networks (and eventually all ip addresses)
# list of all properties objects
ip_addresses = []
# If name is given, use get_configuration(name)
if g.user:
configurations = g.user.get_api().get_configurations()
for c in configurations:
print(c)
configuration_json = {"id": c.get_id(), "name": c.get_name()}
# FIXME - need code to get network list from configuration id. Is there a call to get children_of_types
# (['IP4Block', 'IP4Network', 'IP6Block', 'IP6Network'
# use get_by_object_types(*, ['IP4Block', 'IP4Network', 'IP6Block', 'IP6Network']) - returns flat list
# We might want to request IP4Network, IP6Network
# FIXME - extract below code in a function and call it for IP4Block and IP6Block
try:
for nw in c.get_children_of_type('IP4Block'):
print(nw)
# get all blocks and networks for block
for n in g.user.get_api().get_by_object_types(nw.get_property('CIDR'),
['IP4Network', 'IP4Block', 'IP6Network', 'IP6Block']):
if '6' in n.get_type():
networks.append({'network_id': n.get_id(), 'display_text': n.get_properties()['prefix']})
ip_addresses.extend(calculate_block_stats(n, c.get_id(), c.get_name()))
else:
networks.append({'network_id': n.get_id(), 'display_text': n.get_properties()['CIDR']})
ip_addresses.extend(calculate_block_stats(n, c.get_id(), c.get_name()))
except Exception as e:
app.loggererror('get_subnets: ' + e.message)
return jsonify(ip_addresses)
def calculate_network_stats(bam_network, config_id, config_name):
if bam_network.get_type() == 'IP4Network':
network_address = bam_network.get_property('CIDR')
network = ipaddress.ip_network(network_address)
else:
network_address = bam_network.get_property('prefix')
network = ipaddress.ip_network(network_address)
ip_addresses = []
ip_data = {}
if bam_network.get_type() == 'IP4Network':
# run below for IP4Address, IP6Address - properties will be populated as well
for n in bam_network.get_children_of_type('IP4Address'):
# Sometimes below list contains all ip addresses and sometimes only one for gateway address
# Look through n.get_properties() and add them to ip_data
ip_data = {}
ip_data.update({'ip_address': n.get_address()})
ip_data.update({'properties': n.get_properties()})
ip_data.update({'config_id': config_id})
ip_data.update({'config_name': config_name})
ip_data.update({'id': n.get_id()})
ip_addresses.append(ip_data)
next_address = bam_network.get_next_available_ip4_address()
else:
for n in bam_network.get_children_of_type('IP6Address'):
ip_data = {}
ip_data.update({'ip_address': n.get_address()})
ip_data.update({'properties': n.get_properties()})
ip_data.update({'config_id': config_id})
ip_data.update({'config_name': config_name})
ip_data.update({'id': n.get_id()})
ip_addresses.append(ip_data)
#return network_data
return ip_addresses
def calculate_block_stats(bam_block, config_id, config_name):
if bam_block.get_type() == 'IP6Block':
block_address = bam_block.get_property('prefix')
block = ipaddress.ip_network(block_address)
else:
block_address = bam_block.get_property('CIDR')
# block = ipaddress.ip_network(block_address, config_id, config_name)
block = ipaddress.ip_network(block_address)
block_data = {}
block_data_list = []
if bam_block.get_type() == 'IP4Block':
for network in bam_block.get_ip4_networks():
return_data = calculate_network_stats(network, config_id, config_name)
# This constructs adding network as key with all values that were returned from calculate network stats
block_data_list.extend(return_data)
for found_block in bam_block.get_ip4_blocks():
return_data = calculate_block_stats(found_block, config_id, config_name)
block_data_list.extend(return_data)
next_address = bam_block.get_next_available_ip4_address()
if next_address != '':
block_data.update({'next_available_address': next_address})
try:
next_available = bam_block.get_next_available_ip4_network(256, auto_create=False)
block_data.update({'next_available_network': next_available})
except APIException as e:
# Nothing to do here since we aren't adding anything to the object
next_available = ''
elif bam_block.get_type() == 'IP6Block':
for network in bam_block.get_ip6_networks():
return_data = calculate_network_stats(network, config_id, config_name)
for found_block in bam_block.get_ip6_blocks():
return_data = calculate_block_stats(found_block, config_id, config_name)
else:
next_available = ''
return block_data_list
# to tag address, add_ip4 - get back IP4Address object. Call object.link_entity(entity id of the tag)
#
# GET, PUT or POST
@route(app, '/lumeta/addiplist', methods=['GET', 'PUT', 'POST'])
# @util.rest_workflow_permission_required('addiplist_page')
@util.rest_workflow_permission_required('lumeta_workflow_page')
@util.rest_exception_catcher
def addiplist_addiplist_page():
# are we authenticated?
g.user.logger.info('SUCCESS')
rdata_arr = request.get_json()
stats = {}
global api
for rdata in rdata_arr:
config_name = rdata["config_name"]
add_network = rdata["add_network_block"]
device_list = rdata["deviceList"]
added_ips = 0
dup_ips = 0
# Get API object up front and use it going forward. That way, auth key doesn't expire on us
# when we are midway in processing
api = g.user.get_api()
print(add_network)
print(device_list)
config = api.get_configuration(config_name)
for device in device_list:
print(device["ip"])
(added_ip, dup_ip, ip) = add_device(device, config, add_network)
added_ips += added_ip
dup_ips += dup_ip
# Add tag if ip was added
if added_ip == 1:
add_tag(ip)
stats.update({config_name: {"added_ips": added_ips, "dup_ips": dup_ips}})
return jsonify(stats)
def add_device(device, config, add_network):
# Algorithm to add ip to BAM
# check if block exists for this ip address.
try:
ip = device["ip"]
mac = ''
mac = device["mac"]
family = device["family"]
blk_data = None
dup_ip = 0
added_ip = 0
ip_obj = None
if family == '4':
blk_data = config.get_ip_range_by_ip('IP4Block', ip)
else:
blk_data = config.get_ip_range_by_ip('IP6Block', ip)
# if block exists, check for network
network_data = None
if family == '4':
network_data = config.get_ip_range_by_ip('IP4Network', ip)
else:
network_data = config.get_ip_range_by_ip('IP6Network', ip)
# If Block and Network exists, add ip address
# currently, assigning ip address is throwing API exception:Server raised fault: "Duplicate of another item"
# Need to see how we can catch it
if blk_data is not None and network_data is not None:
# Add ip address
ip_obj = assign_ip(network_data, ip, mac, family)
added_ip += 1
# If no block exists and add_network is set to true, create Block with /32, create Network with /32 and then
# create ip with /32
except PortalException as e:
# No block address containing input ip address exists. Check the flag and create one
if add_network:
try:
# Add Block, then network and finally add ip
# Below line is returning BAMException - IPv4 Blocks cannot be in size of /31 and /32
# So, at this point, if there is no container, do not add ip address
# config.add_ip4_block_by_cidr(ip)
if blk_data is None:
# add /30 for addressblock
block_network = ipaddress.ip_network(ip + '/30', strict=False)
config.add_ip4_block_by_cidr(block_network.exploded)
blk_data = config.get_ip_range_by_ip('IP4Block', ip)
if blk_data is not None:
# create network in block
blk_data.add_ip4_network(ip + '/32')
# create ip under above created network
network_data = config.get_ip_range_by_ip('IP4Network', ip)
if network_data is not None:
# Add ip address
ip_obj = assign_ip(network_data, ip, mac, family)
added_ip += 1
except APIException as ex:
if "Duplicate" in ex.get_message():
dup_ip += 1
# else:
# Seeing intermittent error while adding address block, so had to stop logging error
# app.loggererror('add_ip: ' + ex.message)
except APIException as ex:
# when ip address already exists, it returns BAMException with message 'Server raised fault: "Duplicate of another item"'
# "Duplicate" in ex.get_message()
if "Duplicate" in ex.get_message():
dup_ip += 1
else:
# TODO - how to log info message and not error?
app.loggererror('add_ip: ' + ex.get_message())
return (added_ip, dup_ip, ip_obj)
def assign_ip(network_data, ip, mac, family):
if mac is not '':
if family == '4':
ip = network_data.assign_ip4_address(ip, mac, '', 'MAKE_DHCP_RESERVED')
else:
ip = network_data.assign_ip6_address(ip, mac, '', 'MAKE_DHCP_RESERVED')
else:
if family == '4':
ip = network_data.assign_ip4_address(ip, '', '', 'MAKE_STATIC')
else:
ip = network_data.assign_ip6_address(ip, '', '', 'MAKE_STATIC')
return ip
def add_tag(ip):
tag_group = None
tag = None
try:
tag_group = api.get_tag_group_by_name("Lumeta")
# If tag group exists, chances are that tag exists as well, but just in case if it doesn't
tag = tag_group.get_tag_by_name("Discovered Device")
except PortalException as e:
if tag_group is None:
# Tag group does not exist, create one
tag_group = api.add_tag_group("Lumeta")
if tag is None:
# Get tag group object. above API to add tag group is only returning object id instead of entire object
# Calling add_tag on it is throwing exception 'int' object has no attribute 'add_tag'
tag_group = api.get_tag_group_by_name("Lumeta")
# Create Tag under Lumeta
tag = tag_group.add_tag("Discovered Device")
try:
# assign tag to ip
ip.link_entity(tag)
except APIException as ex:
print(ex.get_message())
| Community/AssetManagement/lumeta_workflow_page.py | 12,833 | Copyright 2019 BlueCat Networks. All rights reserved. application config Define global variable to hold handle to API object GET, PUT or POST are we authenticated? are we authenticated? Return object that contains all the networks (and eventually all ip addresses) list of all properties objects If name is given, use get_configuration(name) FIXME - need code to get network list from configuration id. Is there a call to get children_of_types (['IP4Block', 'IP4Network', 'IP6Block', 'IP6Network' use get_by_object_types(*, ['IP4Block', 'IP4Network', 'IP6Block', 'IP6Network']) - returns flat list We might want to request IP4Network, IP6Network FIXME - extract below code in a function and call it for IP4Block and IP6Block get all blocks and networks for block run below for IP4Address, IP6Address - properties will be populated as well Sometimes below list contains all ip addresses and sometimes only one for gateway address Look through n.get_properties() and add them to ip_datareturn network_data block = ipaddress.ip_network(block_address, config_id, config_name) This constructs adding network as key with all values that were returned from calculate network stats Nothing to do here since we aren't adding anything to the object to tag address, add_ip4 - get back IP4Address object. Call object.link_entity(entity id of the tag) GET, PUT or POST @util.rest_workflow_permission_required('addiplist_page') are we authenticated? Get API object up front and use it going forward. That way, auth key doesn't expire on us when we are midway in processing Add tag if ip was added Algorithm to add ip to BAM check if block exists for this ip address. if block exists, check for network If Block and Network exists, add ip address currently, assigning ip address is throwing API exception:Server raised fault: "Duplicate of another item" Need to see how we can catch it Add ip address If no block exists and add_network is set to true, create Block with /32, create Network with /32 and then create ip with /32 No block address containing input ip address exists. Check the flag and create one Add Block, then network and finally add ip Below line is returning BAMException - IPv4 Blocks cannot be in size of /31 and /32 So, at this point, if there is no container, do not add ip address config.add_ip4_block_by_cidr(ip) add /30 for addressblock create network in block create ip under above created network Add ip address else: Seeing intermittent error while adding address block, so had to stop logging error app.loggererror('add_ip: ' + ex.message) when ip address already exists, it returns BAMException with message 'Server raised fault: "Duplicate of another item"' "Duplicate" in ex.get_message() TODO - how to log info message and not error? If tag group exists, chances are that tag exists as well, but just in case if it doesn't Tag group does not exist, create one Get tag group object. above API to add tag group is only returning object id instead of entire object Calling add_tag on it is throwing exception 'int' object has no attribute 'add_tag' Create Tag under Lumeta assign tag to ip | 3,108 | en | 0.807744 |
# Generated by Django 2.2.1 on 2019-07-10 04:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('investment_bot', '0005_amount_restrictions'),
]
operations = [
migrations.CreateModel(
name='Section_Deduction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('year', models.CharField(max_length=10)),
('employee_code', models.CharField(max_length=100)),
('section_id', models.CharField(max_length=100)),
('subsection_id', models.CharField(max_length=100)),
('amount', models.IntegerField()),
],
),
]
| Chatbot_investment/chatbot/investment_bot/migrations/0006_section_deduction.py | 787 | Generated by Django 2.2.1 on 2019-07-10 04:55 | 45 | en | 0.60808 |
####################
# ES-DOC CIM Questionnaire
# Copyright (c) 2017 ES-DOC. All rights reserved.
#
# University of Colorado, Boulder
# http://cires.colorado.edu/
#
# This project is distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT].
####################
from django.conf import settings
from django.conf.urls import patterns, url, include
from django.views.generic.base import RedirectView
from rest_framework.urlpatterns import format_suffix_patterns
from Q.questionnaire.views import *
from Q.questionnaire.views.api import *
from Q.questionnaire.views.services import *
from Q.questionnaire.views.views_feed import QFeed, q_publication
api_urls = patterns('',
# just some testing (obviously)...
url(r'^projects_test/(?P<pk>[0-9]+)/$', QProjectTestDetail.as_view(), name="project-test-detail"),
# just some lite serializations for populating the project page...
url(r'^customizations_lite/$', QCustomizationLiteList.as_view(), name="customization_lite-list"),
url(r'^realizations_lite/$', QRealizationLiteList.as_view(), name="realization_lite-list"),
url(r'^projects_lite/$', QProjectLiteList.as_view(), name="project_lite-list"),
url(r'^projects_lite/(?P<pk>[0-9]+)/$', QProjectLiteDetail.as_view(), name="project_lite-detail"),
# getting project info...
url(r'^projects/$', QProjectList.as_view(), name="project-list"),
url(r'^projects/(?P<pk>[0-9]+)/$', QProjectDetail.as_view(), name="project-detail"),
# getting ontology info...
url(r'^ontologies/$', QOntologyList.as_view(), name="ontology-list"),
# getting customization info...
url(r'^customizations/$', QModelCustomizationList.as_view(), name="customization-list"),
url(r'^customizations/(?P<pk>[0-9]+)/$', QModelCustomizationDetail.as_view(), name="customization-detail"),
url(r'^customizations/cache/$', get_cached_customizations, name="customization-cache"),
# getting realization info...
url(r'^realizations/$', QModelRealizationList.as_view(), name="realization-list"),
url(r'^realizations/(?P<pk>[0-9]+)/$', QModelRealizationDetail.as_view(), name="realization-detail"),
url(r'^realizations/cache/$', get_cached_realizations, name="realization-cache"),
)
if settings.DEBUG:
# only expose pre-defined api urls in debug mode...
api_urls += patterns('', url(r'^$', api_root))
# automatically add support for different serialization formats (JSON is default)...
api_urls = format_suffix_patterns(api_urls)
services_urls = patterns('',
# testing (obviously)...
url(r'^test/$', q_services_test),
# getting pending messages...
url(r'^messages/$', get_django_messages),
# routing http calls through a proxy...
url(r'^proxy/$', q_proxy, name="proxy"),
# logging data from the client...
url(r'^log/$', q_log, name="log"),
# the WORLD-FAMOUS load-on-demand paradigm...
url(r'^load_section/(?P<section_type>[^/]+)/$', q_load_section, name="load_section"),
# joining a project...
url(r'^(?P<project_name>[^/]+)/project_join_request/$', q_project_join_request, name="project_join_request"),
# managing a project...
url(r'^(?P<project_name>[^/]+)/project_add_member/$', q_project_add_member, name="project_add_member"),
# deleting a customization...
url(r'^customization_delete/$', q_customization_delete, name="customization_delete"),
# adding a relationship...
url(r'^realization_add_relationship_value/$', q_realization_add_relationship_value, name="realization_add_relationsip_value"),
# removing a relationship...
url(r'^realization_remove_relationship_value/$', q_realization_remove_relationship_value, name="realization_remove_relationsip_value"),
# publishing a realization...
url(r'^realization_publish/$', q_realization_publish, name="realization_publish"),
)
urlpatterns = patterns('',
# RESTful API...
url(r'^api/', include(api_urls)),
# webservices (AJAX POST only) outside of RESTful API...
url(r'^services/', include(services_urls)),
# testing (obviously)...
url(r'^test/$', q_test, name="test"),
# help...
url(r'^help/$', RedirectView.as_view(url=settings.Q_HELP_URL, permanent=True), name="help"),
# customizations...
url(r'^(?P<project_name>[^/]+)/customize/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/$', q_customize_new, name="customize_new"),
url(r'^(?P<project_name>[^/]+)/customize/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/(?P<customization_name>[^/]+)/$', q_customize_existing, name="customize_existing"),
# realizations...
url(r'^(?P<project_name>[^/]+)/edit/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/$', q_edit_new, name="edit_new"),
url(r'^(?P<project_name>[^/]+)/edit/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/(?P<realization_pk>[^/]+)/$', q_edit_existing, name="edit_existing"),
url(r'^(?P<project_name>[^/]+)/view/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/$', q_view_new, name="view_new"),
url(r'^(?P<project_name>[^/]+)/view/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/(?P<realization_pk>[^/]+)/$', q_view_existing, name="view_existing"),
url(r'^(?P<project_name>[^/]+)/get/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/$', q_get_existing, name="get_existing"),
# publications (ATOM feed)...
url(r'^feed/$', QFeed(), name="feed"),
url(r'^feed/(?P<project_name>[^/]+)/$', QFeed(), name="feed_project"),
url(r'^feed/(?P<project_name>[^/]+)/(?P<ontology_key>[^/]+)/$', QFeed(), name="feed_project_ontology"),
url(r'^feed/(?P<project_name>[^/]+)/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/$', QFeed(), name="feed_project_ontology_proxy"),
url(r'^feed/(?P<project_name>[^/]+)/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/(?P<publication_name>[^/]+)/$', q_publication, name="publication_latest"),
url(r'^feed/(?P<project_name>[^/]+)/(?P<ontology_key>[^/]+)/(?P<document_type>[^/]+)/(?P<publication_name>[^/]+)/(?P<publication_version>[^/]+)/$', q_publication, name="publication_version"),
# projects...
url(r'^(?P<project_name>[^/]+)/$', q_project, name="project"),
url(r'^(?P<project_name>[^/]+)/customize/$', q_project_customize, name="project_customize"),
url(r'^(?P<project_name>[^/]+)/manage/$', q_project_manage, name="project_manage"),
# index...
url(r'^$', 'questionnaire.views.q_index', name="index"),
)
| Q/questionnaire/q_urls.py | 6,431 | ES-DOC CIM Questionnaire Copyright (c) 2017 ES-DOC. All rights reserved. University of Colorado, Boulder http://cires.colorado.edu/ This project is distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT]. just some testing (obviously)... just some lite serializations for populating the project page... getting project info... getting ontology info... getting customization info... getting realization info... only expose pre-defined api urls in debug mode... automatically add support for different serialization formats (JSON is default)... testing (obviously)... getting pending messages... routing http calls through a proxy... logging data from the client... the WORLD-FAMOUS load-on-demand paradigm... joining a project... managing a project... deleting a customization... adding a relationship... removing a relationship... publishing a realization... RESTful API... webservices (AJAX POST only) outside of RESTful API... testing (obviously)... help... customizations... realizations... publications (ATOM feed)... projects... index... | 1,089 | en | 0.77065 |
def pattern_sixteen(steps):
''' Pattern sixteen
9
9 8
9 8 7
9 8 7 6
9 8 7 6 5
9 8 7 6 5 4
9 8 7 6 5 4 3
9 8 7 6 5 4 3 2
9 8 7 6 5 4 3 2 1
'''
get_range = [str(i) for i in range(1, steps + 1)][::-1] # Getting range of number in string and reverse it
for gr in range(1, len(get_range) + 1):
join = ' '.join(get_range[:gr]) # Slicing values
print(join)
if __name__ == '__main__':
try:
pattern_sixteen(9)
except NameError:
print('Integer was expected')
| Project Pattern/pattern_16.py | 612 | Pattern sixteen
9
9 8
9 8 7
9 8 7 6
9 8 7 6 5
9 8 7 6 5 4
9 8 7 6 5 4 3
9 8 7 6 5 4 3 2
9 8 7 6 5 4 3 2 1
Getting range of number in string and reverse it Slicing values | 172 | en | 0.326186 |
from __future__ import absolute_import, division, print_function
from tensorflow.python.keras.layers import Input, Dense
from tensorflow.python.keras.layers.normalization import BatchNormalization
from tensorflow.python.keras.models import Model
# 第一種架構: 深度前饋網路(deep feedforward network)
# 也叫做前饋神經網路(feedforward neural network)或多層感知機(multilayer perceptron, MLP)
def get_dfn(output_size, img_height, img_width, show=True):
model_input = Input(shape=(img_height * img_width,), name='Main_input')
x = Dense(256, activation='selu', name='Dense_selu_1')(model_input)
x = BatchNormalization(name='BN_1')(x)
x = Dense(256, activation='tanh', name='Dense_tanh_1')(x)
x = BatchNormalization(name='BN_2')(x)
x = Dense(256, activation='tanh', name='Dense_tanh_2')(x)
dfn_output = Dense(output_size, activation='linear',
name='Output_Dense_linear')(x)
dfn = Model(inputs=model_input, outputs=dfn_output, name='DFN')
if show:
print('DFN summary:')
dfn.summary()
print()
return dfn
def get_dfn_relu(output_size, img_height, img_width, show=True):
model_input = Input(shape=(img_height * img_width,), name='Main_input')
x = BatchNormalization(name='BN_1')(model_input)
x = Dense(256, activation='relu', name='Dense_relu_1')(x)
# x = BatchNormalization()(x)
x = Dense(256, activation='relu', name='Dense_relu_2')(x)
# x = BatchNormalization()(x)
x = Dense(256, activation='relu', name='Dense_relu_3')(x)
dfn_output = Dense(output_size, activation='linear',
name='Output_Dense_linear')(x)
dfn = Model(inputs=model_input, outputs=dfn_output, name='DFN_relu')
if show:
print('DFN_relu summary:')
dfn.summary()
print()
return dfn
def get_dfn_selu(output_size, img_height, img_width, show=True):
model_input = Input(shape=(img_height * img_width,), name='Main_input')
x = BatchNormalization()(model_input)
x = Dense(256, activation='selu', name='Dense_selu_1')(x)
# x = BatchNormalization()(x)
x = Dense(256, activation='selu', name='Dense_selu_2')(x)
# x = BatchNormalization()(x)
x = Dense(256, activation='selu', name='Dense_selu_3')(x)
dfn_output = Dense(output_size, activation='linear',
name='Output_Dense_linear')(x)
dfn = Model(inputs=model_input, outputs=dfn_output, name='DFN_selu')
if show:
print('DFN_selu summary:')
dfn.summary()
print()
return dfn
| erinn/python/models/DFN.py | 2,578 | 第一種架構: 深度前饋網路(deep feedforward network) 也叫做前饋神經網路(feedforward neural network)或多層感知機(multilayer perceptron, MLP) x = BatchNormalization()(x) x = BatchNormalization()(x) x = BatchNormalization()(x) x = BatchNormalization()(x) | 223 | en | 0.36728 |
from rest_framework import serializers, generics
from snippets.models import Snippet, LANGUAGE_CHOICES, STYLE_CHOICES
from users.models import UserProfile
# class SnippetSerializer(serializers.Serializer):
# id = serializers.IntegerField(read_only=True)
# title = serializers.CharField(required=False, allow_blank=True, max_length=100)
# code = serializers.CharField(style={'base_template': 'textarea.html'})
# linenos = serializers.BooleanField(required=False)
# language = serializers.ChoiceField(choices=LANGUAGE_CHOICES, default='python')
# style = serializers.ChoiceField(choices=STYLE_CHOICES, default='friendly')
#
# def create(self, validated_data):
# """
# 给定验证过的数据创建并返回一个新的 Snippet 实例。
# """
# return Snippet.objects.create(**validated_data)
#
# def update(self, instance, validated_data):
# """
# 根据已验证的数据更新并返回已存在的 Snippet 实例。
# """
# instance.title = validated_data.get('title', instance.title)
# instance.code = validated_data.get('code', instance.code)
# instance.linenos = validated_data.get('linenos', instance.linenos)
# instance.language = validated_data.get('language', instance.language)
# instance.style = validated_data.get('style', instance.style)
# instance.save()
# return instance
class SnippetSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
class Meta:
model = Snippet
fields = ('id', 'title', 'code', 'linenos', 'language', 'style', 'owner')
class UserSerializer(serializers.ModelSerializer):
snippets = serializers.PrimaryKeyRelatedField(many=True, queryset=Snippet.objects.all())
class Meta:
model = UserProfile
fields = ('id', 'username', 'first_name', 'last_name', 'snippets', 'password')
| apps/snippets/serializers.py | 1,951 | class SnippetSerializer(serializers.Serializer): id = serializers.IntegerField(read_only=True) title = serializers.CharField(required=False, allow_blank=True, max_length=100) code = serializers.CharField(style={'base_template': 'textarea.html'}) linenos = serializers.BooleanField(required=False) language = serializers.ChoiceField(choices=LANGUAGE_CHOICES, default='python') style = serializers.ChoiceField(choices=STYLE_CHOICES, default='friendly') def create(self, validated_data): """ 给定验证过的数据创建并返回一个新的 Snippet 实例。 """ return Snippet.objects.create(**validated_data) def update(self, instance, validated_data): """ 根据已验证的数据更新并返回已存在的 Snippet 实例。 """ instance.title = validated_data.get('title', instance.title) instance.code = validated_data.get('code', instance.code) instance.linenos = validated_data.get('linenos', instance.linenos) instance.language = validated_data.get('language', instance.language) instance.style = validated_data.get('style', instance.style) instance.save() return instance | 1,145 | en | 0.146654 |
"""Support for sending data to StatsD."""
import logging
import statsd
import voluptuous as vol
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_PREFIX, EVENT_STATE_CHANGED
from homeassistant.helpers import state as state_helper
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_ATTR = "log_attributes"
CONF_RATE = "rate"
CONF_VALUE_MAP = "value_mapping"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 8125
DEFAULT_PREFIX = "hass"
DEFAULT_RATE = 1
DOMAIN = "statsd"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_ATTR, default=False): cv.boolean,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PREFIX, default=DEFAULT_PREFIX): cv.string,
vol.Optional(CONF_RATE, default=DEFAULT_RATE): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Optional(CONF_VALUE_MAP): dict,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the StatsD component."""
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
sample_rate = conf.get(CONF_RATE)
prefix = conf.get(CONF_PREFIX)
value_mapping = conf.get(CONF_VALUE_MAP)
show_attribute_flag = conf.get(CONF_ATTR)
statsd_client = statsd.StatsClient(host=host, port=port, prefix=prefix)
def statsd_event_listener(event):
"""Listen for new messages on the bus and sends them to StatsD."""
state = event.data.get("new_state")
if state is None:
return
try:
if value_mapping and state.state in value_mapping:
_state = float(value_mapping[state.state])
else:
_state = state_helper.state_as_number(state)
except ValueError:
# Set the state to none and continue for any numeric attributes.
_state = None
states = dict(state.attributes)
_LOGGER.debug("Sending %s", state.entity_id)
if show_attribute_flag is True:
if isinstance(_state, (float, int)):
statsd_client.gauge("%s.state" % state.entity_id, _state, sample_rate)
# Send attribute values
for key, value in states.items():
if isinstance(value, (float, int)):
stat = "%s.%s" % (state.entity_id, key.replace(" ", "_"))
statsd_client.gauge(stat, value, sample_rate)
else:
if isinstance(_state, (float, int)):
statsd_client.gauge(state.entity_id, _state, sample_rate)
# Increment the count
statsd_client.incr(state.entity_id, rate=sample_rate)
hass.bus.listen(EVENT_STATE_CHANGED, statsd_event_listener)
return True
| homeassistant/components/statsd/__init__.py | 2,957 | Set up the StatsD component.
Listen for new messages on the bus and sends them to StatsD.
Support for sending data to StatsD.
Set the state to none and continue for any numeric attributes. Send attribute values Increment the count | 232 | en | 0.707765 |
import base64
from datetime import timedelta
import logging
import time
import uuid
import warnings
import httpx
from ably.types.capability import Capability
from ably.types.tokendetails import TokenDetails
from ably.types.tokenrequest import TokenRequest
from ably.util.exceptions import AblyException, IncompatibleClientIdException
__all__ = ["Auth"]
log = logging.getLogger(__name__)
class Auth:
class Method:
BASIC = "BASIC"
TOKEN = "TOKEN"
def __init__(self, ably, options):
self.__ably = ably
self.__auth_options = options
if options.token_details:
self.__client_id = options.token_details.client_id
else:
self.__client_id = options.client_id
self.__client_id_validated = False
self.__basic_credentials = None
self.__auth_params = None
self.__token_details = None
self.__time_offset = None
must_use_token_auth = options.use_token_auth is True
must_not_use_token_auth = options.use_token_auth is False
can_use_basic_auth = options.key_secret is not None
if not must_use_token_auth and can_use_basic_auth:
# We have the key, no need to authenticate the client
# default to using basic auth
log.debug("anonymous, using basic auth")
self.__auth_mechanism = Auth.Method.BASIC
basic_key = "%s:%s" % (options.key_name, options.key_secret)
basic_key = base64.b64encode(basic_key.encode('utf-8'))
self.__basic_credentials = basic_key.decode('ascii')
return
elif must_not_use_token_auth and not can_use_basic_auth:
raise ValueError('If use_token_auth is False you must provide a key')
# Using token auth
self.__auth_mechanism = Auth.Method.TOKEN
if options.token_details:
self.__token_details = options.token_details
elif options.auth_token:
self.__token_details = TokenDetails(token=options.auth_token)
else:
self.__token_details = None
if options.auth_callback:
log.debug("using token auth with auth_callback")
elif options.auth_url:
log.debug("using token auth with auth_url")
elif options.key_secret:
log.debug("using token auth with client-side signing")
elif options.auth_token:
log.debug("using token auth with supplied token only")
elif options.token_details:
log.debug("using token auth with supplied token_details")
else:
raise ValueError("Can't authenticate via token, must provide "
"auth_callback, auth_url, key, token or a TokenDetail")
async def __authorize_when_necessary(self, token_params=None, auth_options=None, force=False):
self.__auth_mechanism = Auth.Method.TOKEN
if token_params is None:
token_params = dict(self.auth_options.default_token_params)
else:
self.auth_options.default_token_params = dict(token_params)
self.auth_options.default_token_params.pop('timestamp', None)
if auth_options is not None:
self.auth_options.replace(auth_options)
auth_options = dict(self.auth_options.auth_options)
if self.client_id is not None:
token_params['client_id'] = self.client_id
token_details = self.__token_details
if not force and not self.token_details_has_expired():
log.debug("using cached token; expires = %d",
token_details.expires)
return token_details
self.__token_details = await self.request_token(token_params, **auth_options)
self._configure_client_id(self.__token_details.client_id)
return self.__token_details
def token_details_has_expired(self):
token_details = self.__token_details
if token_details is None:
return True
expires = token_details.expires
if expires is None:
return False
timestamp = self._timestamp()
if self.__time_offset:
timestamp += self.__time_offset
return expires < timestamp + token_details.TOKEN_EXPIRY_BUFFER
async def authorize(self, token_params=None, auth_options=None):
return await self.__authorize_when_necessary(token_params, auth_options, force=True)
async def authorise(self, *args, **kwargs):
warnings.warn(
"authorise is deprecated and will be removed in v2.0, please use authorize",
DeprecationWarning)
return await self.authorize(*args, **kwargs)
async def request_token(self, token_params=None,
# auth_options
key_name=None, key_secret=None, auth_callback=None,
auth_url=None, auth_method=None, auth_headers=None,
auth_params=None, query_time=None):
token_params = token_params or {}
token_params = dict(self.auth_options.default_token_params,
**token_params)
key_name = key_name or self.auth_options.key_name
key_secret = key_secret or self.auth_options.key_secret
log.debug("Auth callback: %s" % auth_callback)
log.debug("Auth options: %s" % self.auth_options)
if query_time is None:
query_time = self.auth_options.query_time
query_time = bool(query_time)
auth_callback = auth_callback or self.auth_options.auth_callback
auth_url = auth_url or self.auth_options.auth_url
auth_params = auth_params or self.auth_options.auth_params or {}
auth_method = (auth_method or self.auth_options.auth_method).upper()
auth_headers = auth_headers or self.auth_options.auth_headers or {}
log.debug("Token Params: %s" % token_params)
if auth_callback:
log.debug("using token auth with authCallback")
token_request = await auth_callback(token_params)
elif auth_url:
log.debug("using token auth with authUrl")
token_request = await self.token_request_from_auth_url(
auth_method, auth_url, token_params, auth_headers, auth_params)
else:
token_request = await self.create_token_request(
token_params, key_name=key_name, key_secret=key_secret,
query_time=query_time)
if isinstance(token_request, TokenDetails):
return token_request
elif isinstance(token_request, dict) and 'issued' in token_request:
return TokenDetails.from_dict(token_request)
elif isinstance(token_request, dict):
token_request = TokenRequest.from_json(token_request)
elif isinstance(token_request, str):
return TokenDetails(token=token_request)
token_path = "/keys/%s/requestToken" % token_request.key_name
response = await self.ably.http.post(
token_path,
headers=auth_headers,
body=token_request.to_dict(),
skip_auth=True
)
AblyException.raise_for_response(response)
response_dict = response.to_native()
log.debug("Token: %s" % str(response_dict.get("token")))
return TokenDetails.from_dict(response_dict)
async def create_token_request(self, token_params=None,
key_name=None, key_secret=None, query_time=None):
token_params = token_params or {}
token_request = {}
key_name = key_name or self.auth_options.key_name
key_secret = key_secret or self.auth_options.key_secret
if not key_name or not key_secret:
log.debug('key_name or key_secret blank')
raise AblyException("No key specified: no means to generate a token", 401, 40101)
token_request['key_name'] = key_name
if token_params.get('timestamp'):
token_request['timestamp'] = token_params['timestamp']
else:
if query_time is None:
query_time = self.auth_options.query_time
if query_time:
if self.__time_offset is None:
server_time = await self.ably.time()
local_time = self._timestamp()
self.__time_offset = server_time - local_time
token_request['timestamp'] = server_time
else:
local_time = self._timestamp()
token_request['timestamp'] = local_time + self.__time_offset
else:
token_request['timestamp'] = self._timestamp()
token_request['timestamp'] = int(token_request['timestamp'])
ttl = token_params.get('ttl')
if ttl is not None:
if isinstance(ttl, timedelta):
ttl = ttl.total_seconds() * 1000
token_request['ttl'] = int(ttl)
capability = token_params.get('capability')
if capability is not None:
token_request['capability'] = str(Capability(capability))
token_request["client_id"] = (
token_params.get('client_id') or self.client_id)
# Note: There is no expectation that the client
# specifies the nonce; this is done by the library
# However, this can be overridden by the client
# simply for testing purposes
token_request["nonce"] = token_params.get('nonce') or self._random_nonce()
token_request = TokenRequest(**token_request)
if token_params.get('mac') is None:
# Note: There is no expectation that the client
# specifies the mac; this is done by the library
# However, this can be overridden by the client
# simply for testing purposes.
token_request.sign_request(key_secret.encode('utf8'))
else:
token_request.mac = token_params['mac']
return token_request
@property
def ably(self):
return self.__ably
@property
def auth_mechanism(self):
return self.__auth_mechanism
@property
def auth_options(self):
return self.__auth_options
@property
def auth_params(self):
return self.__auth_params
@property
def basic_credentials(self):
return self.__basic_credentials
@property
def token_credentials(self):
if self.__token_details:
token = self.__token_details.token
token_key = base64.b64encode(token.encode('utf-8'))
return token_key.decode('ascii')
@property
def token_details(self):
return self.__token_details
@property
def client_id(self):
return self.__client_id
@property
def time_offset(self):
return self.__time_offset
def _configure_client_id(self, new_client_id):
# If new client ID from Ably is a wildcard, but preconfigured clientId is set,
# then keep the existing clientId
if self.client_id != '*' and new_client_id == '*':
self.__client_id_validated = True
return
# If client_id is defined and not a wildcard, prevent it changing, this is not supported
if self.client_id is not None and self.client_id != '*' and new_client_id != self.client_id:
raise IncompatibleClientIdException(
"Client ID is immutable once configured for a client. "
"Client ID cannot be changed to '{}'".format(new_client_id), 400, 40012)
self.__client_id_validated = True
self.__client_id = new_client_id
def can_assume_client_id(self, assumed_client_id):
if self.__client_id_validated:
return self.client_id == '*' or self.client_id == assumed_client_id
elif self.client_id is None or self.client_id == '*':
return True # client ID is unknown
else:
return self.client_id == assumed_client_id
async def _get_auth_headers(self):
if self.__auth_mechanism == Auth.Method.BASIC:
# RSA7e2
if self.client_id:
return {
'Authorization': 'Basic %s' % self.basic_credentials,
'X-Ably-ClientId': base64.b64encode(self.client_id.encode('utf-8'))
}
return {
'Authorization': 'Basic %s' % self.basic_credentials,
}
else:
await self.__authorize_when_necessary()
return {
'Authorization': 'Bearer %s' % self.token_credentials,
}
def _timestamp(self):
"""Returns the local time in milliseconds since the unix epoch"""
return int(time.time() * 1000)
def _random_nonce(self):
return uuid.uuid4().hex[:16]
async def token_request_from_auth_url(self, method, url, token_params, headers, auth_params):
body = None
params = None
if method == 'GET':
body = {}
params = dict(auth_params, **token_params)
elif method == 'POST':
params = {}
body = dict(auth_params, **token_params)
from ably.http.http import Response
async with httpx.AsyncClient(http2=True) as client:
resp = await client.request(method=method, url=url, headers=headers, params=params, data=body)
response = Response(resp)
AblyException.raise_for_response(response)
try:
token_request = response.to_native()
except ValueError:
token_request = response.text
return token_request
| ably/rest/auth.py | 13,689 | Returns the local time in milliseconds since the unix epoch
We have the key, no need to authenticate the client default to using basic auth Using token auth auth_options Note: There is no expectation that the client specifies the nonce; this is done by the library However, this can be overridden by the client simply for testing purposes Note: There is no expectation that the client specifies the mac; this is done by the library However, this can be overridden by the client simply for testing purposes. If new client ID from Ably is a wildcard, but preconfigured clientId is set, then keep the existing clientId If client_id is defined and not a wildcard, prevent it changing, this is not supported client ID is unknown RSA7e2 | 732 | en | 0.914317 |
from urllib.parse import urlparse
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.shortcuts import resolve_url
from gate.views import redirect_to_gate
from gate import REDIRECT_FIELD_NAME
class GateLockMixin:
gate_url = None
permission_denied_message = ''
raise_exception = False
redirect_field_name = REDIRECT_FIELD_NAME
def get_gate_url(self):
"""
Override this method to override the gate_url attribute.
"""
gate_url = self.gate_url or settings.GATE_URL
if not gate_url:
raise ImproperlyConfigured(
'{0} is missing the gate_url attribute. Define {0}.gate_url, settings.GATE_URL, or override '
'{0}.get_gate_url().'.format(self.__class__.__name__)
)
return str(gate_url)
def get_permission_denied_message(self):
"""
Override this method to override the permission_denied_message attribute.
"""
return self.permission_denied_message
def get_redirect_field_name(self):
"""
Override this method to override the redirect_field_name attribute.
"""
return self.redirect_field_name
def handle_no_permission(self):
if self.raise_exception:
raise PermissionDenied(self.get_permission_denied_message())
path = self.request.build_absolute_uri()
resolved_gate_url = resolve_url(self.get_gate_url())
# If the gate url is the same scheme and net location then use the
# path as the "next" url.
gate_scheme, gate_netloc = urlparse(resolved_gate_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if (
(not gate_scheme or gate_scheme == current_scheme) and
(not gate_netloc or gate_netloc == current_netloc)
):
path = self.request.get_full_path()
return redirect_to_gate(
path,
resolved_gate_url,
self.get_redirect_field_name(),
)
def lock_test_func(self, key):
raise NotImplementedError(
'{} is missing the implementation of the test_func() method.'.format(self.__class__.__name__)
)
def get_lock_test_func(self):
"""
Override this method to use a different test_func method.
"""
return self.lock_test_func
def dispatch(self, request, *args, **kwargs):
key = request.session.get('gate_key', None)
key_test_result = self.get_lock_test_func()(key)
if not key_test_result:
return self.handle_no_permission()
return super().dispatch(request, *args, **kwargs)
| gate/mixin.py | 2,723 | Override this method to override the gate_url attribute.
Override this method to use a different test_func method.
Override this method to override the permission_denied_message attribute.
Override this method to override the redirect_field_name attribute.
If the gate url is the same scheme and net location then use the path as the "next" url. | 347 | en | 0.775904 |
import random
import string
from discord import TextChannel
from discord.ext import commands
from discord.ext.tasks import loop
from discord_components import Button, ButtonStyle
from config import settings
from util.Match import Match
class Matchmaking(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.match_create_channel: TextChannel = None
self.ongoing_matches_channel: TextChannel = None
self.match_results_channel: TextChannel = None
self.match_create_message_id = None
self.queue = []
self.active_matches = {} # Match ID -> Match instance
@commands.Cog.listener()
async def on_ready(self):
self.match_create_channel = self.bot.get_channel(settings.MATCH_CREATE_CHANNEL)
self.ongoing_matches_channel = self.bot.get_channel(settings.ONGOING_MATCHES_CHANNEL)
self.match_results_channel = self.bot.get_channel(settings.MATCH_RESULTS_CHANNEL)
# Clear the match create channel
await self.match_create_channel.purge()
button = [Button(style=ButtonStyle.green, label='Enter Queue', emoji='✅', custom_id=settings.MATCHMAKING_JOIN_QUEUE_CUSTOM_ID)]
# create the queue message
self.match_create_message_id = await self.match_create_channel.send("enter queue msg", components=button)
# Start the attempt create match loop
self.attempt_create_match.start()
def handle_enter_queue(self, user_id):
if user_id in self.queue:
print(f"tried adding {user_id} to queue but they are already in it")
return
self.queue.append(user_id)
print(f"{user_id} has joined the queue")
async def handle_match_win(self, match, custom_id):
winner_id = None
if custom_id:
winner_id = custom_id.replace(settings.MATCHMAKING_ONGOING_CUSTOM_ID, '')
if winner_id:
msg = await self.match_results_channel.send(content=f"User {winner_id} won match {match.id}!")
del self.active_matches[match.id]
match_msg = self.bot.get_message(self.ongoing_matches_channel, match.message_id)
await self.bot.delete_message(match_msg)
@loop(seconds=settings.MATCHMAKING_CREATE_MATCH_FREQUENCY)
async def attempt_create_match(self):
print(f"[Matchmaking] attempting to create a match with {len(self.queue)} members")
if len(self.queue) <= 1:
print("tried creating match with less than 2 members")
return
#split queues later on based on rank/elo
matched_players = random.sample(self.queue, 2)
u1 = matched_players[0]
u2 = matched_players[1]
await self.create_match(u1, u2)
def generate_match_id(self):
avail_chars = string.ascii_uppercase + string.digits
id_list = []
for _ in range(6):
id_list.append(random.choice(avail_chars))
generated_id = ''.join(id_list)
if generated_id not in self.active_matches:
return generated_id
return self.generate_match_id()
def get_match(self, msg_id):
for match in self.active_matches.values():
if msg_id == match.message_id:
return match
return None
async def create_match(self, u1, u2):
match_id = self.generate_match_id()
buttons = [
Button(style=ButtonStyle.grey, label=f"{u1} won", emoji='✅', custom_id=f"{settings.MATCHMAKING_ONGOING_CUSTOM_ID}{u1}"),
Button(style=ButtonStyle.grey, label=f"{u2} won", emoji='✅', custom_id=f"{settings.MATCHMAKING_ONGOING_CUSTOM_ID}{u2}")
]
msg = await self.ongoing_matches_channel.send(content=f"Match between {u1}, {u2}", components=buttons)
self.active_matches[match_id] = Match(match_id, msg.id, [u1, u2])
# remove them from the queue
self.queue.remove(u1)
self.queue.remove(u2)
def setup(bot):
bot.add_cog(Matchmaking(bot))
| cogs/Matchmaking.py | 4,023 | Match ID -> Match instance Clear the match create channel create the queue message Start the attempt create match loopsplit queues later on based on rank/elo remove them from the queue | 184 | en | 0.702718 |
import PySimpleGUI as sg
layout = [
[sg.Text('text')],
[sg.Input('input', key= 'input1')],
[sg.Input('input', key='input2')],
[sg.Button('button', key='button1')]
]
window = sg.Window('list values - list or dict', layout)
while True:
event, values = window.Read()
if event == 'button1':
print(values['input1'])
print(values['input2'])
# prints button key because that's current events' key
print(event)
elif event is None:
break
window.Close()
| pysimplegui/values_from_some_elements.py | 486 | prints button key because that's current events' key | 52 | en | 0.928849 |
import os.path
from data.base_dataset import BaseDataset, get_transform
from data.image_folder import make_dataset
from PIL import Image
import random
import h5py
import numpy as np
from skimage.transform import resize as skResize
from util.util import normalize, adaptive_instance_normalization
class UnalignedDataset(BaseDataset):
"""
This dataset class can load unaligned/unpaired datasets.
It requires two directories to host training images from domain A '/path/to/data/trainA'
and from domain B '/path/to/data/trainB' respectively.
You can train the model with the dataset flag '--dataroot /path/to/data'.
Similarly, you need to prepare two directories:
'/path/to/data/testA' and '/path/to/data/testB' during test time.
"""
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
self.dir_A = os.path.join(opt.dataroot, opt.phase + 'A') # create a path '/path/to/data/trainA'
self.dir_B = os.path.join(opt.dataroot_B, opt.phase + 'B') # create a path '/path/to/data/trainB'
self.A_paths = sorted(make_dataset(self.dir_A, opt.max_dataset_size)) # load images from '/path/to/data/trainA'
self.B_paths = sorted(make_dataset(self.dir_B, opt.max_dataset_size)) # load images from '/path/to/data/trainB'
self.A_size = len(self.A_paths) # get the size of dataset A
self.B_size = len(self.B_paths) # get the size of dataset B
btoA = self.opt.direction == 'BtoA'
input_nc = self.opt.output_nc if btoA else self.opt.input_nc # get the number of channels of input image
output_nc = self.opt.input_nc if btoA else self.opt.output_nc # get the number of channels of output image
self.transform_A = get_transform(self.opt, grayscale=(input_nc == 1))
self.transform_B = get_transform(self.opt, grayscale=(output_nc == 1))
def __getitem__(self, index):
"""Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) -- an image in the input domain
B (tensor) -- its corresponding image in the target domain
A_paths (str) -- image paths
B_paths (str) -- image paths
"""
A_path = self.A_paths[index % self.A_size] # make sure index is within then range
if self.opt.serial_batches: # make sure index is within then range
index_B = index % self.B_size
else: # randomize the index for domain B to avoid fixed pairs.
index_B = random.randint(0, self.B_size - 1)
B_path = self.B_paths[index_B]
A_img = np.array(Image.open(A_path).convert('RGB'))
A_img = self.stack(A_img)
#Added a new loader for loading hsi images. Uncomment the following line for normal images.
try:
B_img = self.hsi_loader(B_path)
except KeyError:
print(B_path)
B = normalize(B_img, max_=4096)
A = normalize(A_img, max_=1)
A = adaptive_instance_normalization(A, B)
del A_img, B_img
return {'A': A, 'B': B, 'A_paths': A_path, 'B_paths': B_path}
def __len__(self):
"""Return the total number of images in the dataset.
As we have two datasets with potentially different number of images,
we take a maximum of
"""
return max(self.A_size, self.B_size)
def stack(self, img, resize=True):
_R = img[:,:,0]
_G = img[:,:,1]
_B = img[:,:,2]
R_img = np.stack((_R,)*10, axis=2)
G_img = np.stack((_G,)*10, axis=2)
B_img = np.stack((_B,)*11, axis=2)
hsi_img = np.concatenate((B_img, G_img, R_img), axis=2)
hsi_img = self.resize(hsi_img)
hsi_img = np.einsum('abc->cab', hsi_img)
return hsi_img
def resize(self, img):
img = skResize(img, (self.opt.crop_size, self.opt.crop_size))
return img
def hsi_loader(self, path):
with h5py.File(path, 'r') as f:
d = np.array(f['data'])
hs_data = np.einsum('abc -> cab',self.resize(d))
#print('Inside hsi loader, {0}'.format(np.shape(hs_data)))
return hs_data
| data/unaligned_dataset.py | 4,542 | This dataset class can load unaligned/unpaired datasets.
It requires two directories to host training images from domain A '/path/to/data/trainA'
and from domain B '/path/to/data/trainB' respectively.
You can train the model with the dataset flag '--dataroot /path/to/data'.
Similarly, you need to prepare two directories:
'/path/to/data/testA' and '/path/to/data/testB' during test time.
Return a data point and its metadata information.
Parameters:
index (int) -- a random integer for data indexing
Returns a dictionary that contains A, B, A_paths and B_paths
A (tensor) -- an image in the input domain
B (tensor) -- its corresponding image in the target domain
A_paths (str) -- image paths
B_paths (str) -- image paths
Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
Return the total number of images in the dataset.
As we have two datasets with potentially different number of images,
we take a maximum of
create a path '/path/to/data/trainA' create a path '/path/to/data/trainB' load images from '/path/to/data/trainA' load images from '/path/to/data/trainB' get the size of dataset A get the size of dataset B get the number of channels of input image get the number of channels of output image make sure index is within then range make sure index is within then range randomize the index for domain B to avoid fixed pairs.Added a new loader for loading hsi images. Uncomment the following line for normal images.print('Inside hsi loader, {0}'.format(np.shape(hs_data))) | 1,618 | en | 0.709308 |
##
# The MIT License (MIT)
#
# Copyright (c) 2016 Stefan Wendler
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
##
import os
import subprocess
class AbstractBrowser:
_binary = None
def __init__(self, url, user_data_dir):
self.user_data_dir = os.path.join(user_data_dir, self._binary)
self.url = url
if not os.path.exists(self.user_data_dir):
os.makedirs(self.user_data_dir)
@staticmethod
def _available(binary):
extensions = os.environ.get("PATHEXT", "").split(os.pathsep)
for directory in os.environ.get("PATH", "").split(os.pathsep):
base = os.path.join(directory, binary)
options = [base] + [(base + ext) for ext in extensions]
for filename in options:
if os.path.exists(filename):
return True
return False
def _start(self, args):
print("running: " + self._binary)
try:
subprocess.check_output([self._binary] + args)
except subprocess.CalledProcessError as e:
print(e.output)
return e.returncode
except Exception as e:
print(e)
return -1
return 0
def start(self):
return -1
@staticmethod
def available():
return False
class Chrome(AbstractBrowser):
_binary = "google-chrome"
@staticmethod
def available():
return AbstractBrowser._available(Chrome._binary)
def start(self):
args = ["--app=%s" % self.url]
args += ["--user-data-dir=%s" % self.user_data_dir]
return self._start(args)
class Chromium(Chrome):
_binary = "xchromium"
@staticmethod
def available():
return AbstractBrowser._available(Chromium._binary)
class Firefox(AbstractBrowser):
_binary = "firefox"
@staticmethod
def available():
return AbstractBrowser._available(Firefox._binary)
def start(self):
args = ["--profile", self.user_data_dir]
args += ["--no-remote"]
args += [self.url]
return self._start(args)
class Browser:
def __init__(self, url, user_data_dir=None):
self.client = None
for cls in [Chrome, Chromium, Firefox]:
if cls.available():
self.client = cls(url, user_data_dir)
break
if self.client is None:
raise Exception("No suitable client found!")
def start(self):
return self.client.start()
| src/edubot/client.py | 3,520 | The MIT License (MIT) Copyright (c) 2016 Stefan Wendler Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 1,076 | en | 0.858222 |
import logging, ast, os
from bisect import bisect_left, bisect
import louie as dispatcher
from twisted.internet import reactor
from rdflib import Literal
from light9 import showconfig
from light9.namespaces import L9, RDF, RDFS
from rdfdb.patch import Patch
log = logging.getLogger()
# todo: move to config, consolidate with ascoltami, musicPad, etc
introPad = 4
postPad = 4
class Curve(object):
"""curve does not know its name. see Curveset"""
def __init__(self, uri, pointsStorage='graph'):
self.uri = uri
self.pointsStorage = pointsStorage
self.points = [] # x-sorted list of (x,y)
self._muted = False
def __repr__(self):
return "<%s %s (%s points)>" % (self.__class__.__name__, self.uri,
len(self.points))
def muted():
doc = "Whether to currently send levels (boolean, obviously)"
def fget(self):
return self._muted
def fset(self, val):
self._muted = val
dispatcher.send('mute changed', sender=self)
return locals()
muted = property(**muted())
def toggleMute(self):
self.muted = not self.muted
def load(self, filename):
self.points[:] = []
for line in open(filename):
x, y = line.split()
self.points.append((float(x), ast.literal_eval(y)))
self.points.sort()
dispatcher.send("points changed", sender=self)
def set_from_string(self, pts):
self.points[:] = []
vals = pts.split()
pairs = list(zip(vals[0::2], vals[1::2]))
for x, y in pairs:
self.points.append((float(x), ast.literal_eval(y)))
self.points.sort()
dispatcher.send("points changed", sender=self)
def points_as_string(self):
def outVal(x):
if isinstance(x, str): # markers
return x
return "%.4g" % x
return ' '.join(
"%s %s" % (outVal(p[0]), outVal(p[1])) for p in self.points)
def save(self, filename):
# this is just around for markers, now
if filename.endswith('-music') or filename.endswith('_music'):
print("not saving music track")
return
f = open(filename, 'w')
for p in self.points:
f.write("%s %r\n" % p)
f.close()
def eval(self, t, allow_muting=True):
if self.muted and allow_muting:
return 0
if not self.points:
raise ValueError("curve has no points")
i = bisect_left(self.points, (t, None)) - 1
if i == -1:
return self.points[0][1]
if self.points[i][0] > t:
return self.points[i][1]
if i >= len(self.points) - 1:
return self.points[i][1]
p1, p2 = self.points[i], self.points[i + 1]
frac = (t - p1[0]) / (p2[0] - p1[0])
y = p1[1] + (p2[1] - p1[1]) * frac
return y
__call__ = eval
def insert_pt(self, new_pt):
"""returns index of new point"""
i = bisect(self.points, (new_pt[0], None))
self.points.insert(i, new_pt)
# missing a check that this isn't the same X as the neighbor point
dispatcher.send("points changed", sender=self)
return i
def live_input_point(self, new_pt, clear_ahead_secs=.01):
x, y = new_pt
exist = self.points_between(x, x + clear_ahead_secs)
for pt in exist:
self.remove_point(pt)
self.insert_pt(new_pt)
dispatcher.send("points changed", sender=self)
# now simplify to the left
def set_points(self, updates):
for i, pt in updates:
self.points[i] = pt
# this should be on, but live_input_point made it fail a
# lot. need a new solution.
#self.checkOverlap()
dispatcher.send("points changed", sender=self)
def checkOverlap(self):
x = None
for p in self.points:
if p[0] <= x:
raise ValueError("overlapping points")
x = p[0]
def pop_point(self, i):
p = self.points.pop(i)
dispatcher.send("points changed", sender=self)
return p
def remove_point(self, pt):
self.points.remove(pt)
dispatcher.send("points changed", sender=self)
def indices_between(self, x1, x2, beyond=0):
leftidx = max(0, bisect(self.points, (x1, None)) - beyond)
rightidx = min(len(self.points),
bisect(self.points, (x2, None)) + beyond)
return list(range(leftidx, rightidx))
def points_between(self, x1, x2):
"""returns (x,y) points"""
return [self.points[i] for i in self.indices_between(x1, x2)]
def point_before(self, x):
"""(x,y) of the point left of x, or None"""
leftidx = self.index_before(x)
if leftidx is None:
return None
return self.points[leftidx]
def index_before(self, x):
leftidx = bisect(self.points, (x, None)) - 1
if leftidx < 0:
return None
return leftidx
class CurveResource(object):
"""
holds a Curve, deals with graphs
"""
def __init__(self, graph, uri):
# probably newCurve and loadCurve should be the constructors instead.
self.graph, self.uri = graph, uri
def curvePointsContext(self):
return self.uri
def newCurve(self, ctx, label):
"""
Save type/label for a new :Curve resource.
Pass the ctx where the main curve data (not the points) will go.
"""
if hasattr(self, 'curve'):
raise ValueError('CurveResource already has a curve %r' %
self.curve)
self.graph.patch(
Patch(addQuads=[
(self.uri, RDF.type, L9['Curve'], ctx),
(self.uri, RDFS.label, label, ctx),
]))
self.curve = Curve(self.uri)
self.curve.points.extend([(0, 0)])
self.saveCurve()
self.watchCurvePointChanges()
def loadCurve(self):
if hasattr(self, 'curve'):
raise ValueError('CurveResource already has a curve %r' %
self.curve)
pointsFile = self.graph.value(self.uri, L9['pointsFile'])
self.curve = Curve(self.uri,
pointsStorage='file' if pointsFile else 'graph')
if hasattr(self.graph, 'addHandler'):
self.graph.addHandler(self.pointsFromGraph)
else:
# given a currentState graph
self.pointsFromGraph()
def pointsFromGraph(self):
pts = self.graph.value(self.uri, L9['points'])
if pts is not None:
self.curve.set_from_string(pts)
else:
diskPts = self.graph.value(self.uri, L9['pointsFile'])
if diskPts is not None:
self.curve.load(os.path.join(showconfig.curvesDir(), diskPts))
else:
log.warn("curve %s has no points", self.uri)
self.watchCurvePointChanges()
def saveCurve(self):
self.pendingSave = None
for p in self.getSavePatches():
self.graph.patch(p)
def getSavePatches(self):
if self.curve.pointsStorage == 'file':
log.warn("not saving file curves anymore- skipping %s" % self.uri)
#cur.save("%s-%s" % (basename,name))
return []
elif self.curve.pointsStorage == 'graph':
return [
self.graph.getObjectPatch(self.curvePointsContext(),
subject=self.uri,
predicate=L9['points'],
newObject=Literal(
self.curve.points_as_string()))
]
else:
raise NotImplementedError(self.curve.pointsStorage)
def watchCurvePointChanges(self):
"""start watching and saving changes to the graph"""
dispatcher.connect(self.onChange, 'points changed', sender=self.curve)
def onChange(self):
# Don't write a patch for the edited curve points until they've been
# stable for this long. This can be very short, since it's just to
# stop a 100-point edit from sending many updates. If it's too long,
# you won't see output lights change while you drag a point. Todo:
# this is just the wrong timing algorithm- it should be a max rate,
# not a max-hold-still-time.
HOLD_POINTS_GRAPH_COMMIT_SECS = .1
if getattr(self, 'pendingSave', None):
self.pendingSave.cancel()
self.pendingSave = reactor.callLater(HOLD_POINTS_GRAPH_COMMIT_SECS,
self.saveCurve)
class Markers(Curve):
"""Marker is like a point but the y value is a string"""
def eval(self):
raise NotImplementedError()
def slope(p1, p2):
if p2[0] == p1[0]:
return 0
return (p2[1] - p1[1]) / (p2[0] - p1[0])
class Curveset(object):
def __init__(self, graph, session):
self.graph, self.session = graph, session
self.currentSong = None
self.curveResources = {} # uri : CurveResource
self.markers = Markers(uri=None, pointsStorage='file')
graph.addHandler(self.loadCurvesForSong)
def curveFromUri(self, uri):
return self.curveResources[uri].curve
def loadCurvesForSong(self):
"""
current curves will track song's curves.
This fires 'add_curve' dispatcher events to announce the new curves.
"""
log.info('loadCurvesForSong')
dispatcher.send("clear_curves")
self.curveResources.clear()
self.markers = Markers(uri=None, pointsStorage='file')
self.currentSong = self.graph.value(self.session, L9['currentSong'])
if self.currentSong is None:
return
for uri in sorted(self.graph.objects(self.currentSong, L9['curve'])):
try:
cr = self.curveResources[uri] = CurveResource(self.graph, uri)
cr.loadCurve()
curvename = self.graph.label(uri)
if not curvename:
raise ValueError("curve %r has no label" % uri)
dispatcher.send("add_curve",
sender=self,
uri=uri,
label=curvename,
curve=cr.curve)
except Exception as e:
log.error("loading %s failed: %s", uri, e)
basename = os.path.join(
showconfig.curvesDir(),
showconfig.songFilenameFromURI(self.currentSong))
try:
self.markers.load("%s.markers" % basename)
except IOError:
print("no marker file found")
def save(self):
"""writes a file for each curve with a name
like basename-curvename, or saves them to the rdf graph"""
basename = os.path.join(
showconfig.curvesDir(),
showconfig.songFilenameFromURI(self.currentSong))
patches = []
for cr in list(self.curveResources.values()):
patches.extend(cr.getSavePatches())
self.markers.save("%s.markers" % basename)
# this will cause reloads that will rebuild our curve list
for p in patches:
self.graph.patch(p)
def sorter(self, name):
return self.curves[name].uri
def curveUrisInOrder(self):
return sorted(self.curveResources.keys())
def currentCurves(self):
# deprecated
for uri, cr in sorted(self.curveResources.items()):
with self.graph.currentState(tripleFilter=(uri, RDFS['label'],
None)) as g:
yield uri, g.label(uri), cr.curve
def globalsdict(self):
raise NotImplementedError('subterm used to get a dict of name:curve')
def get_time_range(self):
return 0, dispatcher.send("get max time")[0][1]
def new_curve(self, name):
if isinstance(name, Literal):
name = str(name)
uri = self.graph.sequentialUri(self.currentSong + '/curve-')
cr = self.curveResources[uri] = CurveResource(self.graph, uri)
cr.newCurve(ctx=self.currentSong, label=Literal(name))
s, e = self.get_time_range()
cr.curve.points.extend([(s, 0), (e, 0)])
ctx = self.currentSong
self.graph.patch(
Patch(addQuads=[
(self.currentSong, L9['curve'], uri, ctx),
]))
cr.saveCurve()
| light9/curvecalc/curve.py | 12,715 | curve does not know its name. see Curveset
holds a Curve, deals with graphs
Marker is like a point but the y value is a string
returns index of new point
current curves will track song's curves.
This fires 'add_curve' dispatcher events to announce the new curves.
Save type/label for a new :Curve resource.
Pass the ctx where the main curve data (not the points) will go.
(x,y) of the point left of x, or None
returns (x,y) points
writes a file for each curve with a name
like basename-curvename, or saves them to the rdf graph
start watching and saving changes to the graph
todo: move to config, consolidate with ascoltami, musicPad, etc x-sorted list of (x,y) markers this is just around for markers, now missing a check that this isn't the same X as the neighbor point now simplify to the left this should be on, but live_input_point made it fail a lot. need a new solution.self.checkOverlap() probably newCurve and loadCurve should be the constructors instead. given a currentState graphcur.save("%s-%s" % (basename,name)) Don't write a patch for the edited curve points until they've been stable for this long. This can be very short, since it's just to stop a 100-point edit from sending many updates. If it's too long, you won't see output lights change while you drag a point. Todo: this is just the wrong timing algorithm- it should be a max rate, not a max-hold-still-time. uri : CurveResource this will cause reloads that will rebuild our curve list deprecated | 1,475 | en | 0.903653 |
import paddle
import paddle.nn as nn
class ContrastiveLoss(nn.Layer):
"""
Compute contrastive loss
"""
def __init__(self, margin=0, max_violation=False):
super(ContrastiveLoss, self).__init__()
self.margin = margin
self.max_violation = max_violation
def forward(self, scores):
# compute image-sentence score matrix
diag_idx = [[i, i] for i in range(len(scores))]
diagonal = paddle.gather_nd(scores, paddle.to_tensor(diag_idx)).unsqueeze(1)
d1 = diagonal.expand_as(scores)
d2 = paddle.transpose(d1, (1,0)).expand_as(scores)
# compare every diagonal score to scores in its column
# caption retrieval
cost_s = (self.margin + scores - d1).clip(min=0)
# compare every diagonal score to scores in its row
# image retrieval
cost_im = (self.margin + scores - d2).clip(min=0)
# clear diagonals
mask = paddle.eye(scores.shape[0]) < .5
cost_s = cost_s * mask
cost_im = cost_im * mask
# keep the maximum violating negative for each query
if self.max_violation:
cost_s = cost_s.max(1)
cost_im = cost_im.max(0)
return cost_s.sum() + cost_im.sum() | paddlemm/models/retrieval/layers/contrastive.py | 1,251 | Compute contrastive loss
compute image-sentence score matrix compare every diagonal score to scores in its column caption retrieval compare every diagonal score to scores in its row image retrieval clear diagonals keep the maximum violating negative for each query | 266 | en | 0.896856 |
# Enter your code for "Degree Distribution" here.
import csv
degrees = []
students = []
for l in csv.DictReader(open("degrees.csv")):
degrees.append(l)
for l in csv.DictReader(open("students.csv")):
students.append(l)
students = sorted(students, key=lambda x: float(x["score"]))
students.reverse()
print(students)
| Degree Distribution.py | 328 | Enter your code for "Degree Distribution" here. | 47 | en | 0.906068 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['TagArgs', 'Tag']
@pulumi.input_type
class TagArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
resource_arn: pulumi.Input[str],
value: pulumi.Input[str]):
"""
The set of arguments for constructing a Tag resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "resource_arn", resource_arn)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Input[str]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
Tag value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class _TagState:
def __init__(__self__, *,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Tag resources.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if resource_arn is not None:
pulumi.set(__self__, "resource_arn", resource_arn)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@resource_arn.setter
def resource_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_arn", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Tag value.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
class Tag(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: TagArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param TagArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(TagArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = TagArgs.__new__(TagArgs)
if key is None and not opts.urn:
raise TypeError("Missing required property 'key'")
__props__.__dict__["key"] = key
if resource_arn is None and not opts.urn:
raise TypeError("Missing required property 'resource_arn'")
__props__.__dict__["resource_arn"] = resource_arn
if value is None and not opts.urn:
raise TypeError("Missing required property 'value'")
__props__.__dict__["value"] = value
super(Tag, __self__).__init__(
'aws:ecs/tag:Tag',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
key: Optional[pulumi.Input[str]] = None,
resource_arn: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None) -> 'Tag':
"""
Get an existing Tag resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _TagState.__new__(_TagState)
__props__.__dict__["key"] = key
__props__.__dict__["resource_arn"] = resource_arn
__props__.__dict__["value"] = value
return Tag(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def key(self) -> pulumi.Output[str]:
"""
Tag name.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter(name="resourceArn")
def resource_arn(self) -> pulumi.Output[str]:
"""
Amazon Resource Name (ARN) of the ECS resource to tag.
"""
return pulumi.get(self, "resource_arn")
@property
@pulumi.getter
def value(self) -> pulumi.Output[str]:
"""
Tag value.
"""
return pulumi.get(self, "value")
| sdk/python/pulumi_aws/ecs/tag.py | 9,126 | The set of arguments for constructing a Tag resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
Input properties used for looking up and filtering Tag resources.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
## Import
`aws_ecs_tag` can be imported by using the ECS resource identifier and key, separated by a comma (`,`), e.g.
```sh
$ pulumi import aws:ecs/tag:Tag example arn:aws:ecs:us-east-1:123456789012:cluster/example,Name
```
:param str resource_name: The name of the resource.
:param TagArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
Get an existing Tag resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] key: Tag name.
:param pulumi.Input[str] resource_arn: Amazon Resource Name (ARN) of the ECS resource to tag.
:param pulumi.Input[str] value: Tag value.
Tag name.
Tag name.
Tag name.
Amazon Resource Name (ARN) of the ECS resource to tag.
Amazon Resource Name (ARN) of the ECS resource to tag.
Amazon Resource Name (ARN) of the ECS resource to tag.
Tag value.
Tag value.
Tag value.
coding=utf-8 *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** *** Do not edit by hand unless you're certain you know what you are doing! *** | 2,334 | en | 0.478124 |
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import io
import os
import platform
import sys
import time
import unittest
import common
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
os.pardir, 'tools', 'variations'))
import fieldtrial_util
test_blacklist = [
# These tests set their own field trials and should be ignored.
'quic.Quic.testCheckPageWithQuicProxy',
'quic.Quic.testCheckPageWithQuicProxyTransaction',
'smoke.Smoke.testCheckPageWithHoldback',
]
def GetExperimentArgs():
"""Returns a list of arguments with all tested field trials.
This function is a simple wrapper around the variation team's fieldtrail_util
script that generates command line arguments to test Chromium field trials.
Returns:
an array of command line arguments to pass to chrome
"""
config_path = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
os.pardir, 'testing', 'variations', 'fieldtrial_testing_config.json')
my_platform = ''
if common.ParseFlags().android:
my_platform = 'android'
elif platform.system().lower() == 'linux':
my_platform = 'linux'
elif platform.system().lower() == 'windows':
my_platform = 'windows'
elif platform.system().lower() == 'darwin':
my_platform = 'mac'
else:
raise Exception('unknown platform!')
return fieldtrial_util.GenerateArgs(config_path, my_platform)
def GenerateTestSuites():
"""A generator function that yields non-blacklisted tests to run.
This function yields test suites each with a single test case whose id is not
blacklisted in the array at the top of this file.
Yields:
non-blacklisted test suites to run
"""
loader = unittest.TestLoader()
for test_suite in loader.discover(os.path.dirname(__file__), pattern='*.py'):
for test_case in test_suite:
for test_method in test_case:
if test_method.id() not in test_blacklist:
ts = unittest.TestSuite()
ts.addTest(test_method)
yield (ts, test_method.id())
def ParseFlagsWithExtraBrowserArgs(extra_args):
"""Generates a function to override common.ParseFlags.
The returned function will honor everything in the original ParseFlags(), but
adds on additional browser_args.
Args:
extra_args: The extra browser agruments to add.
Returns:
A function to override common.ParseFlags with additional browser_args.
"""
original_flags = common.ParseFlags()
def AddExtraBrowserArgs():
original_flags.browser_args = ((original_flags.browser_args if
original_flags.browser_args else '') + ' ' + extra_args)
return original_flags
return AddExtraBrowserArgs
def main():
"""Runs all non-blacklisted tests against Chromium field trials.
This script run all chrome proxy integration tests that haven't been
blacklisted against the field trial testing configuration used by Chromium
perf bots.
"""
flags = common.ParseFlags()
experiment_args = ' '.join(GetExperimentArgs())
common.ParseFlags = ParseFlagsWithExtraBrowserArgs(experiment_args)
# Each test is wrapped in its own test suite so results can be evaluated
# individually.
for test_suite, test_id in GenerateTestSuites():
buf = io.BytesIO()
sys.stdout.write('%s... ' % test_id)
sys.stdout.flush()
testRunner = unittest.runner.TextTestRunner(stream=buf, verbosity=2,
buffer=(not flags.disable_buffer))
result = testRunner.run(test_suite)
if result.wasSuccessful():
print('ok')
else:
print('failed')
print(buf.getvalue())
print('To repeat this test, run: ')
print("%s %s %s --test_filter=%s --browser_args='%s'" % (
sys.executable,
os.path.join(os.path.dirname(__file__), 'run_all_tests.py'), ' '.join(
sys.argv[1:]), '.'.join(test_id.split('.')[1:]), experiment_args))
if flags.failfast:
return
if __name__ == '__main__':
main()
| tools/chrome_proxy/webdriver/variations_combinations.py | 4,059 | A generator function that yields non-blacklisted tests to run.
This function yields test suites each with a single test case whose id is not
blacklisted in the array at the top of this file.
Yields:
non-blacklisted test suites to run
Returns a list of arguments with all tested field trials.
This function is a simple wrapper around the variation team's fieldtrail_util
script that generates command line arguments to test Chromium field trials.
Returns:
an array of command line arguments to pass to chrome
Generates a function to override common.ParseFlags.
The returned function will honor everything in the original ParseFlags(), but
adds on additional browser_args.
Args:
extra_args: The extra browser agruments to add.
Returns:
A function to override common.ParseFlags with additional browser_args.
Runs all non-blacklisted tests against Chromium field trials.
This script run all chrome proxy integration tests that haven't been
blacklisted against the field trial testing configuration used by Chromium
perf bots.
Copyright 2017 The Chromium Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. These tests set their own field trials and should be ignored. Each test is wrapped in its own test suite so results can be evaluated individually. | 1,341 | en | 0.821383 |
# Link for the problem : https://leetcode.com/problems/next-permutation/
class Solution(object):
def nextPermutation(self, nums):
found = False
i = len(nums)-2
while i >=0:
if nums[i] < nums[i+1]:
found =True
break
i-=1
if not found:
nums.sort()
else:
m = self.findMaxIndex(i+1,nums,nums[i])
nums[i],nums[m] = nums[m],nums[i]
nums[i+1:] = nums[i+1:][::-1]
return nums
def findMaxIndex(self,index,a,curr):
ans = -1
index = 0
for i in range(index,len(a)):
if a[i]>curr:
if ans == -1:
ans = curr
index = i
else:
ans = min(ans,a[i])
index = i
return index
ob1 = Solution()
| DSA 450 GFG/next_permutation.py | 818 | Link for the problem : https://leetcode.com/problems/next-permutation/ | 70 | en | 0.58739 |
#
# blood_graph.py
#
# vanilla_core execution visualizer.
#
# input: vanilla_operation_trace.csv
# vanilla_stats.csv (for timing)
# output: blood graph file (blood_abstrat/detailed.png)
# blood graph key (key_abstract/detailed.png)
#
# @author Tommy, Borna
#
# How to use:
# python blood_graph.py --trace {vanilla_operation_trace.csv}
# --stats {vanilla_stats.csv}
# --abstract {optional}
# --generate-key {optional}
# --cycle {start_cycle@end_cycle}
#
# ex) python blood_graph.py --trace vanilla_operation_trace.csv
# --stats vanilla_stats.csv
# --abstract --generate-key
# --cycle 10000@20000
#
#
# {stats} used for extracting the timing window for blood graph
# {abstract} used for abstract simplifed bloodgraph
# {generate-key} also generates a color key for the blood graph
# {cycle} used for user-specified custom timing window
#
#
# Note: You can use the "Digital Color Meter" in MacOS in order to compare
# the values from the color key to the values in the bloodgraph, if you are
# having trouble distinguishing a color.
import sys
import csv
import argparse
import warnings
import os.path
from PIL import Image, ImageDraw, ImageFont
from itertools import chain
from . import common
class BloodGraph:
# for generating the key
_KEY_WIDTH = 512
_KEY_HEIGHT = 512
# List of types of stalls incurred by the core
_STALLS_LIST = ["stall_depend_dram_load",
"stall_depend_group_load",
"stall_depend_global_load",
"stall_depend_idiv",
"stall_depend_fdiv",
"stall_depend_local_load",
"stall_depend_imul",
"stall_amo_aq",
"stall_amo_rl",
"stall_bypass",
"stall_lr_aq",
"stall_fence",
"stall_remote_req",
"stall_remote_credit",
"stall_fdiv_busy",
"stall_idiv_busy",
"stall_fcsr",
"stall_remote_ld",
"stall_ifetch_wait",
"stall_remote_flw_wb",
"stall_remote_ld_wb",
"bubble_branch_miss",
"bubble_jalr_miss",
"bubble_icache_miss"]
# List of types of integer instructions executed by the core
_INSTRS_LIST = [
"local_ld",
"local_st",
"remote_ld_dram",
"remote_ld_global",
"remote_ld_group",
"remote_st_dram",
"remote_st_global",
"remote_st_group",
"local_flw",
"local_fsw",
"remote_flw_dram",
"remote_flw_global",
"remote_flw_group",
"remote_fsw_dram",
"remote_fsw_global",
"remote_fsw_group",
# icache_miss is no longer treated as an instruction
# but treated the same as stall_ifetch_wait
# "icache_miss",
"lr",
"lr_aq",
"amoswap",
"amoor",
"amoadd",
"beq",
"bne",
"blt",
"bge",
"bltu",
"bgeu",
"jal",
"jalr",
"beq_miss",
"bne_miss",
"blt_miss",
"bge_miss",
"bltu_miss",
"bgeu_miss",
"jalr_miss",
"sll",
"slli",
"srl",
"srli",
"sra",
"srai",
"add",
"addi",
"sub",
"lui",
"auipc",
"xor",
"xori",
"or",
"ori",
"and",
"andi",
"slt",
"slti",
"sltu",
"sltiu",
"div",
"divu",
"rem",
"remu",
"mul",
"fence",
"csrrw",
"csrrs",
"csrrc",
"csrrwi",
"csrrsi",
"csrrci",
"barsend",
"barrecv",
"unknown"]
# List of types of floating point instructions executed by the core
_FP_INSTRS_LIST = ["fadd",
"fsub",
"fmul",
"fsgnj",
"fsgnjn",
"fsgnjx",
"fmin",
"fmax",
"fcvt_s_w",
"fcvt_s_wu",
"fmv_w_x",
"fmadd",
"fmsub",
"fnmsub",
"fnmadd",
"feq",
"flt",
"fle",
"fcvt_w_s",
"fcvt_wu_s",
"fclass",
"fmv_x_w",
"fdiv",
"fsqrt"]
# Coloring scheme for different types of operations
# For detailed mode
# i_cache miss is treated the same is stall_ifetch_wait
_DETAILED_STALL_BUBBLE_COLOR = {
"stall_depend_dram_load" : (0xff, 0x00, 0x00), ## red
"stall_depend_group_load" : (0x00, 0xff, 0x00), ## green
"stall_depend_global_load" : (0x00, 0x55, 0x00), ## dark green
"stall_depend_local_load" : (0x00, 0xff, 0xff), ## cyan
"stall_depend_idiv" : (0xff, 0xf0, 0xa0), ## light orange
"stall_depend_fdiv" : (0xff, 0xf0, 0xa0), ## light orange
"stall_depend_imul" : (0xff, 0xf0, 0xa0), ## light orange
"stall_fdiv_busy" : (0x00, 0xaa, 0xff), ## dark cyan
"stall_idiv_busy" : (0x00, 0xaa, 0xff), ## dark cyan
"stall_amo_aq" : (0x8b, 0x45, 0x13), ## brown
"stall_amo_rl" : (0x8b, 0x45, 0x13), ## brown
"stall_bypass" : (0xff, 0x00, 0xff), ## pink
"stall_lr_aq" : (0x40, 0x40, 0x40), ## dark gray
"stall_fence" : (0x00, 0x00, 0x80), ## navy blue
"stall_remote_req" : (0xff, 0xff, 0x00), ## yellow
"stall_barrier" : (0x00, 0x11, 0xff), ## blue
"stall_remote_credit" : (0x80, 0x00, 0x00), ## maroon
"stall_fcsr" : (0x00, 0x55, 0xff), ## dark blue
"stall_remote_ld" : (0xaa, 0x00, 0x00), ## dark red
"stall_remote_flw_wb" : (0xff, 0xff, 0x80), ## light yellow
"stall_remote_ld_wb" : (0xff, 0xff, 0x40), ## light-ish yellow
"bubble_branch_miss" : (0x80, 0x00, 0x80), ## purple
"bubble_jalr_miss" : (0xff, 0xa5, 0x00), ## orange
"icache_miss" : (0x00, 0x00, 0xff), ## blue
"bubble_icache_miss" : (0x00, 0x00, 0xff), ## blue
"stall_ifetch_wait" : (0x00, 0x00, 0xff), ## blue
}
_DETAILED_UNIFIED_INSTR_COLOR = (0xff, 0xff, 0xff) ## white
_DETAILED_UNIFIED_FP_INSTR_COLOR = (0xff, 0xaa, 0xff) ## light pink
# Coloring scheme for different types of operations
# For abstract mode
# i_cache miss is treated the same is stall_ifetch_wait
_ABSTRACT_STALL_BUBBLE_COLOR = {
"stall_depend_dram_load" : (0xff, 0x00, 0x00), ## red
"stall_depend_group_load" : (0x00, 0xff, 0x00), ## green
"stall_depend_global_load" : (0x00, 0xff, 0x00), ## green
"stall_depend_local_load" : (0x00, 0xff, 0xff), ## cyan
"stall_depend_idiv" : (0xff, 0xff, 0xff), ## white
"stall_depend_fdiv" : (0xff, 0xff, 0xff), ## white
"stall_depend_imul" : (0xff, 0xff, 0xff), ## white
"stall_fdiv_busy" : (0xff, 0xff, 0xff), ## white
"stall_idiv_busy" : (0xff, 0xff, 0xff), ## white
"stall_amo_aq" : (0x00, 0x00, 0x00), ## black
"stall_amo_rl" : (0x00, 0x00, 0x00), ## black
"stall_bypass" : (0x00, 0x00, 0x00), ## black
"stall_lr_aq" : (0x40, 0x40, 0x40), ## dark gray
"stall_fence" : (0x00, 0x00, 0x00), ## black
"stall_remote_req" : (0x00, 0x00, 0x00), ## black
"stall_barrier" : (0x00, 0x11, 0xff), ## blue
"stall_remote_credit" : (0x00, 0x00, 0x00), ## black
"stall_fcsr" : (0x00, 0x00, 0x00), ## black
"stall_remote_ld" : (0x00, 0x00, 0x00), ## black
"stall_remote_flw_wb" : (0x00, 0x00, 0x00), ## black
"stall_remote_ld_wb" : (0x00, 0x00, 0x00), ## black
"bubble_branch_miss" : (0x00, 0x00, 0x00), ## black
"bubble_jalr_miss" : (0x00, 0x00, 0x00), ## black
"icache_miss" : (0x00, 0x00, 0xff), ## blue
"bubble_icache_miss" : (0x00, 0x00, 0xff), ## blue
"stall_ifetch_wait" : (0x00, 0x00, 0xff), ## blue
}
_ABSTRACT_UNIFIED_INSTR_COLOR = (0xff, 0xff, 0xff) ## white
_ABSTRACT_UNIFIED_FP_INSTR_COLOR = (0xff, 0xff, 0xff) ## white
# default constructor
def __init__(self, trace_file, stats_file, cycle, abstract):
self.abstract = abstract
# Determine coloring rules based on mode {abstract / detailed}
if (self.abstract):
self.stall_bubble_color = self._ABSTRACT_STALL_BUBBLE_COLOR
self.unified_instr_color = self._ABSTRACT_UNIFIED_INSTR_COLOR
self.unified_fp_instr_color = self._ABSTRACT_UNIFIED_INSTR_COLOR
else:
self.stall_bubble_color = self._DETAILED_STALL_BUBBLE_COLOR
self.unified_instr_color = self._DETAILED_UNIFIED_INSTR_COLOR
self.unified_fp_instr_color = self._DETAILED_UNIFIED_INSTR_COLOR
# Parse vanilla operation trace file to generate traces
self.traces = self.__parse_traces(trace_file)
# Parse vanilla stats file to generate timing stats
self.stats = self.__parse_stats(stats_file)
# get tile group diemsnions
self.__get_tile_group_dim(self.traces)
# get the timing window (start and end cycle) for blood graph
self.start_cycle, self.end_cycle = self.__get_timing_window(self.traces, self.stats, cycle)
# parses vanilla_operation_trace.csv to generate operation traces
def __parse_traces(self, trace_file):
traces = []
with open(trace_file) as f:
csv_reader = csv.DictReader(f, delimiter=",")
for row in csv_reader:
trace = {}
trace["x"] = int(row["x"])
trace["y"] = int(row["y"])
trace["operation"] = row["operation"]
trace["cycle"] = int(row["cycle"])
traces.append(trace)
return traces
# Parses vanilla_stats.csv to generate timing stats
# to gather start and end cycle of entire graph
def __parse_stats(self, stats_file):
stats = []
if(stats_file):
if (os.path.isfile(stats_file)):
with open(stats_file) as f:
csv_reader = csv.DictReader(f, delimiter=",")
for row in csv_reader:
stat = {}
stat["global_ctr"] = int(row["global_ctr"])
stat["time"] = int(row["time"])
stats.append(stat)
else:
warnings.warn("Stats file not found, overriding blood graph's start/end cycle with traces.")
return stats
# look through the input file to get the tile group dimension (x,y)
def __get_tile_group_dim(self, traces):
xs = [t["x"] for t in traces]
ys = [t["y"] for t in traces]
self.xmin = min(xs)
self.xmax = max(xs)
self.ymin = min(ys)
self.ymax = max(ys)
self.xdim = self.xmax-self.xmin+1
self.ydim = self.ymax-self.ymin+1
return
# Determine the timing window (start and end) cycle of graph
# The timing window will be calculated using:
# Custom input: if custom start cycle is given by using the --cycle argument
# Vanilla stats file: otherwise if vanilla stats file is given as input
# Traces: otherwise the entire course of simulation
def __get_timing_window(self, traces, stats, cycle):
custom_start, custom_end = cycle.split('@')
if (custom_start):
start = int(custom_start)
elif (stats):
start = stats[0]["global_ctr"]
else:
start = traces[0]["cycle"]
if (custom_end):
end = int(custom_end)
elif (stats):
end = stats[-1]["global_ctr"]
else:
end = traces[-1]["cycle"]
return start, end
# main public method
def generate(self):
# init image
self.__init_image()
# create image
for trace in self.traces:
self.__mark_trace(trace)
#self.img.show()
mode = "abstract" if self.abstract else "detailed"
self.img.save(("blood_" + mode + ".png"))
return
# public method to generate key for bloodgraph
# called if --generate-key argument is true
def generate_key(self, key_image_fname = "key"):
img = Image.new("RGB", (self._KEY_WIDTH, self._KEY_HEIGHT), "black")
draw = ImageDraw.Draw(img)
font = ImageFont.load_default()
# the current row position of our key
yt = 0
# for each color in stalls...
for (operation,color) in chain([(stall_bubble, self.stall_bubble_color[stall_bubble]) for stall_bubble in self._STALLS_LIST],
[("unified_instr" ,self.unified_instr_color),
("unified_fp_instr" ,self.unified_fp_instr_color)]):
# get the font size
(font_height,font_width) = font.getsize(operation)
# draw a rectangle with color fill
yb = yt + font_width
# [0, yt, 64, yb] is [top left x, top left y, bottom right x, bottom left y]
draw.rectangle([0, yt, 64, yb], color)
# write the label for this color in white
# (68, yt) = (top left x, top left y)
# (255, 255, 255) = white
draw.text((68, yt), operation, (255,255,255))
# create the new row's y-coord
yt += font_width
# save the key
mode = "abstract" if self.abstract else "detailed"
img.save("{}.png".format(key_image_fname + "_" + mode))
return
# initialize image
def __init_image(self):
self.img_width = 2048 # default
self.img_height = (((self.end_cycle-self.start_cycle)+self.img_width)//self.img_width)*(2+(self.xdim*self.ydim))
self.img = Image.new("RGB", (self.img_width, self.img_height), "black")
self.pixel = self.img.load()
return
# mark the trace on output image
def __mark_trace(self, trace):
# ignore trace outside the cycle range
if trace["cycle"] < self.start_cycle or trace["cycle"] >= self.end_cycle:
return
# determine pixel location
cycle = (trace["cycle"] - self.start_cycle)
col = cycle % self.img_width
floor = cycle // self.img_width
tg_x = trace["x"] - self.xmin
tg_y = trace["y"] - self.ymin
row = floor*(2+(self.xdim*self.ydim)) + (tg_x+(tg_y*self.xdim))
# determine color
if trace["operation"] in self.stall_bubble_color.keys():
self.pixel[col,row] = self.stall_bubble_color[trace["operation"]]
elif trace["operation"] in self._INSTRS_LIST:
self.pixel[col,row] = self.unified_instr_color
elif trace["operation"] in self._FP_INSTRS_LIST:
self.pixel[col,row] = self.unified_fp_instr_color
else:
raise Exception('Invalid operation in vanilla operation trace log {}'.format(trace["operation"]))
return
# Parse input arguments and options
def add_args(parser):
parser.add_argument("--no-blood-graph", default=False, action='store_true',
help="Skip blood graph generation")
def main(args):
bg = BloodGraph(args.trace, args.stats, args.cycle, args.abstract)
if not args.no_blood_graph:
bg.generate()
if args.generate_key:
bg.generate_key()
# main()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Argument parser for blood_graph.py")
common.add_args(parser)
add_args(parser)
args = parser.parse_args()
main(args)
| software/py/vanilla_parser/blood_graph.py | 20,140 | blood_graph.py vanilla_core execution visualizer. input: vanilla_operation_trace.csv vanilla_stats.csv (for timing) output: blood graph file (blood_abstrat/detailed.png) blood graph key (key_abstract/detailed.png) @author Tommy, Borna How to use: python blood_graph.py --trace {vanilla_operation_trace.csv} --stats {vanilla_stats.csv} --abstract {optional} --generate-key {optional} --cycle {start_cycle@end_cycle} ex) python blood_graph.py --trace vanilla_operation_trace.csv --stats vanilla_stats.csv --abstract --generate-key --cycle 10000@20000 {stats} used for extracting the timing window for blood graph {abstract} used for abstract simplifed bloodgraph {generate-key} also generates a color key for the blood graph {cycle} used for user-specified custom timing window Note: You can use the "Digital Color Meter" in MacOS in order to compare the values from the color key to the values in the bloodgraph, if you are having trouble distinguishing a color. for generating the key List of types of stalls incurred by the core List of types of integer instructions executed by the core icache_miss is no longer treated as an instruction but treated the same as stall_ifetch_wait "icache_miss", List of types of floating point instructions executed by the core Coloring scheme for different types of operations For detailed mode i_cache miss is treated the same is stall_ifetch_wait red green dark green cyan light orange light orange light orange dark cyan dark cyan brown brown pink dark gray navy blue yellow blue maroon dark blue dark red light yellow light-ish yellow purple orange blue blue blue white light pink Coloring scheme for different types of operations For abstract mode i_cache miss is treated the same is stall_ifetch_wait red green green cyan white white white white white black black black dark gray black black blue black black black black black black black blue blue blue white white default constructor Determine coloring rules based on mode {abstract / detailed} Parse vanilla operation trace file to generate traces Parse vanilla stats file to generate timing stats get tile group diemsnions get the timing window (start and end cycle) for blood graph parses vanilla_operation_trace.csv to generate operation traces Parses vanilla_stats.csv to generate timing stats to gather start and end cycle of entire graph look through the input file to get the tile group dimension (x,y) Determine the timing window (start and end) cycle of graph The timing window will be calculated using: Custom input: if custom start cycle is given by using the --cycle argument Vanilla stats file: otherwise if vanilla stats file is given as input Traces: otherwise the entire course of simulation main public method init image create imageself.img.show() public method to generate key for bloodgraph called if --generate-key argument is true the current row position of our key for each color in stalls... get the font size draw a rectangle with color fill [0, yt, 64, yb] is [top left x, top left y, bottom right x, bottom left y] write the label for this color in white (68, yt) = (top left x, top left y) (255, 255, 255) = white create the new row's y-coord save the key initialize image default mark the trace on output image ignore trace outside the cycle range determine pixel location determine color Parse input arguments and options main() | 3,602 | en | 0.65771 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Submits files or a URL to Cuckoo"""
from builtins import input
from argparse import ArgumentParser
from distutils.util import strtobool
from io import BytesIO
from time import sleep
from glob import glob
from zipfile import ZipFile
from os.path import basename
from cuckooutils import Cuckoo, get_file_hash
__version__ = "1.0.0"
__license = """Copyright 2016 Sean Whalen
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
cuckoo = Cuckoo("https://cuckoo.example.net", "username", "password")
parser = ArgumentParser(description=__doc__, version=__version__)
parser.add_argument("sample", nargs="+", help="One or more filenames or globs, or a single URL")
parser.add_argument("--tags",
help="Comma separated tags for selecting an analysis VM",
default=None)
parser.add_argument("--options",
help="Comma separated option=value pairs",
default=None)
parser.add_argument("--tor", action="store_true",
help="Enable Tor during analysis")
parser.add_argument("--procmemdump", action="store_true",
help="Dump and analyze process memory")
args = parser.parse_args()
options = {}
if args.tor:
options['tor'] = 'yes'
if args.procmemdump:
options['procmemdump'] = 'yes'
options = ",".join(list(map(lambda option: "{0}={1}".format(option, options[option]), options.keys())))
if args.options:
if len(options) > 0:
options += ","
options += args.options
url = len(args.sample) == 1 and args.sample[0].lower().startswith("http")
if url:
url = args.sample[0]
results = cuckoo.submit_url(url, tags=args.tags, options=options)
else:
filenames = []
for filename in args.sample:
filenames += glob(filename)
if len(filenames) == 0:
raise ValueError("No matching files found")
elif len(filenames) > 1:
multi_file = True
else:
multi_file = False
if multi_file:
temp_file = BytesIO()
temp_filename = "bulk.zip"
with ZipFile(temp_file, 'a') as temp_zip:
temp_zip.setpassword("infected")
for filename in filenames:
temp_zip.write(filename)
else:
temp_filename = basename(filenames[0])
with open(temp_filename, 'rb') as sample_file:
temp_file = BytesIO(sample_file.read())
file_hash = get_file_hash(temp_file)
existing_tasks = cuckoo.find_tasks(file_hash)
if len(existing_tasks) > 0:
print("The following analysis reports already exist for this sample:")
for task_id in existing_tasks:
print("{0}/analysis/{1}".format(cuckoo.root, task_id))
try:
resubmit = strtobool(input("Would you like to resubmit it? (/y/N)").lower())
except ValueError:
exit()
if not resubmit:
exit()
results = cuckoo.submit_file(temp_filename, temp_file.getvalue(), tags=args.tags, options=options)
tasks = {}
task_ids = results['task_ids']
for task_id in task_ids:
tasks[task_id] = dict(previous_state=None, current_state=None)
while (len(tasks)) > 0:
for task_id in tasks.keys():
tasks[task_id]['previous_state'] = tasks[task_id]['current_state']
tasks[task_id]['current_state'] = cuckoo.get_task_status(task_id)
if tasks[task_id]['current_state'] != tasks[task_id]['previous_state']:
print("Task {0} is {1}".format(task_id, tasks[task_id]['current_state']))
if tasks[task_id]['current_state'] == "reported":
print("{0}/analysis/{1}".format(cuckoo.root, task_id))
if tasks[task_id]['current_state'] == "reported" or tasks[task_id]['current_state'].startswith("failed"):
del tasks[task_id]
sleep(1) | submit-to-cuckoo.py | 4,335 | Submits files or a URL to Cuckoo
!/usr/bin/env python -*- coding: utf-8 -*- | 76 | en | 0.623217 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""A module for visualizing device coupling maps"""
import math
import numpy as np
from qiskit.exceptions import QiskitError
from .matplotlib import HAS_MATPLOTLIB
from .exceptions import VisualizationError
class _GraphDist():
"""Transform the circles properly for non-square axes.
"""
def __init__(self, size, ax, x=True):
self.size = size
self.ax = ax # pylint: disable=invalid-name
self.x = x
@property
def dist_real(self):
"""Compute distance.
"""
x0, y0 = self.ax.transAxes.transform( # pylint: disable=invalid-name
(0, 0))
x1, y1 = self.ax.transAxes.transform( # pylint: disable=invalid-name
(1, 1))
value = x1 - x0 if self.x else y1 - y0
return value
@property
def dist_abs(self):
"""Distance abs
"""
bounds = self.ax.get_xlim() if self.x else self.ax.get_ylim()
return bounds[0] - bounds[1]
@property
def value(self):
"""Return value.
"""
return (self.size / self.dist_real) * self.dist_abs
def __mul__(self, obj):
return self.value * obj
def plot_gate_map(backend, figsize=None,
plot_directed=False,
label_qubits=True,
qubit_size=24,
line_width=4,
font_size=12,
qubit_color=None,
qubit_labels=None,
line_color=None,
font_color='w',
ax=None):
"""Plots the gate map of a device.
Args:
backend (BaseBackend): A backend instance,
figsize (tuple): Output figure size (wxh) in inches.
plot_directed (bool): Plot directed coupling map.
label_qubits (bool): Label the qubits.
qubit_size (float): Size of qubit marker.
line_width (float): Width of lines.
font_size (int): Font size of qubit labels.
qubit_color (list): A list of colors for the qubits
qubit_labels (list): A list of qubit labels
line_color (list): A list of colors for each line from coupling_map.
font_color (str): The font color for the qubit labels.
ax (Axes): A Matplotlib axes instance.
Returns:
Figure: A Matplotlib figure instance.
Raises:
QiskitError: if tried to pass a simulator.
ImportError: if matplotlib not installed.
Example:
.. jupyter-execute::
:hide-code:
:hide-output:
from qiskit.test.ibmq_mock import mock_get_backend
mock_get_backend('FakeVigo')
.. jupyter-execute::
from qiskit import QuantumCircuit, execute, IBMQ
from qiskit.visualization import plot_gate_map
%matplotlib inline
provider = IBMQ.load_account()
accountProvider = IBMQ.get_provider(hub='ibm-q')
backend = accountProvider.get_backend('ibmq_vigo')
plot_gate_map(backend)
"""
if not HAS_MATPLOTLIB:
raise ImportError('Must have Matplotlib installed. To install, '
'run "pip install matplotlib".')
from matplotlib import get_backend
import matplotlib.pyplot as plt # pylint: disable=import-error
import matplotlib.patches as mpatches
if backend.configuration().simulator:
raise QiskitError('Requires a device backend, not simulator.')
input_axes = False
if ax:
input_axes = True
mpl_data = {}
mpl_data[1] = [[0, 0]]
mpl_data[5] = [[1, 0], [0, 1], [1, 1], [1, 2], [2, 1]]
mpl_data[7] = [[0, 0], [0, 1], [0, 2],
[1, 1],
[2, 0], [2, 1], [2, 2]]
mpl_data[20] = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],
[1, 0], [1, 1], [1, 2], [1, 3], [1, 4],
[2, 0], [2, 1], [2, 2], [2, 3], [2, 4],
[3, 0], [3, 1], [3, 2], [3, 3], [3, 4]]
mpl_data[15] = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],
[0, 5], [0, 6], [1, 7], [1, 6], [1, 5],
[1, 4], [1, 3], [1, 2], [1, 1], [1, 0]]
mpl_data[16] = [[1, 0], [0, 0], [0, 1], [0, 2], [0, 3],
[0, 4], [0, 5], [0, 6], [0, 7], [1, 7],
[1, 6], [1, 5], [1, 4], [1, 3], [1, 2], [1, 1]]
mpl_data[27] = [[1, 0], [1, 1], [2, 1], [3, 1], [1, 2],
[3, 2], [0, 3], [1, 3], [3, 3], [4, 3],
[1, 4], [3, 4], [1, 5], [2, 5], [3, 5],
[1, 6], [3, 6], [0, 7], [1, 7], [3, 7],
[4, 7], [1, 8], [3, 8], [1, 9], [2, 9],
[3, 9], [3, 10]]
mpl_data[28] = [[0, 2], [0, 3], [0, 4], [0, 5], [0, 6],
[1, 2], [1, 6],
[2, 0], [2, 1], [2, 2], [2, 3], [2, 4],
[2, 5], [2, 6], [2, 7], [2, 8],
[3, 0], [3, 4], [3, 8],
[4, 0], [4, 1], [4, 2], [4, 3], [4, 4],
[4, 5], [4, 6], [4, 7], [4, 8]]
mpl_data[53] = [[0, 2], [0, 3], [0, 4], [0, 5], [0, 6],
[1, 2], [1, 6],
[2, 0], [2, 1], [2, 2], [2, 3], [2, 4],
[2, 5], [2, 6], [2, 7], [2, 8],
[3, 0], [3, 4], [3, 8],
[4, 0], [4, 1], [4, 2], [4, 3], [4, 4],
[4, 5], [4, 6], [4, 7], [4, 8],
[5, 2], [5, 6],
[6, 0], [6, 1], [6, 2], [6, 3], [6, 4],
[6, 5], [6, 6], [6, 7], [6, 8],
[7, 0], [7, 4], [7, 8],
[8, 0], [8, 1], [8, 2], [8, 3], [8, 4],
[8, 5], [8, 6], [8, 7], [8, 8],
[9, 2], [9, 6]]
mpl_data[65] = [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],
[0, 5], [0, 6], [0, 7], [0, 8], [0, 9],
[1, 0], [1, 4], [1, 8],
[2, 0], [2, 1], [2, 2], [2, 3], [2, 4],
[2, 5], [2, 6], [2, 7], [2, 8], [2, 9], [2, 10],
[3, 2], [3, 6], [3, 10],
[4, 0], [4, 1], [4, 2], [4, 3], [4, 4],
[4, 5], [4, 6], [4, 7], [4, 8], [4, 9], [4, 10],
[5, 0], [5, 4], [5, 8],
[6, 0], [6, 1], [6, 2], [6, 3], [6, 4],
[6, 5], [6, 6], [6, 7], [6, 8], [6, 9], [6, 10],
[7, 2], [7, 6], [7, 10],
[8, 1], [8, 2], [8, 3], [8, 4],
[8, 5], [8, 6], [8, 7], [8, 8], [8, 9], [8, 10]]
config = backend.configuration()
num_qubits = config.n_qubits
cmap = config.coupling_map
if qubit_labels is None:
qubit_labels = list(range(num_qubits))
else:
if len(qubit_labels) != num_qubits:
raise QiskitError('Length of qubit labels '
'does not equal number '
'of qubits.')
if num_qubits in mpl_data.keys():
grid_data = mpl_data[num_qubits]
else:
if not input_axes:
fig, ax = plt.subplots(figsize=(5, 5)) # pylint: disable=invalid-name
ax.axis('off')
return fig
x_max = max([d[1] for d in grid_data])
y_max = max([d[0] for d in grid_data])
max_dim = max(x_max, y_max)
if figsize is None:
if num_qubits == 1 or (x_max / max_dim > 0.33 and y_max / max_dim > 0.33):
figsize = (5, 5)
else:
figsize = (9, 3)
if ax is None:
fig, ax = plt.subplots(figsize=figsize) # pylint: disable=invalid-name
ax.axis('off')
# set coloring
if qubit_color is None:
qubit_color = ['#648fff'] * config.n_qubits
if line_color is None:
line_color = ['#648fff'] * len(cmap) if cmap else []
# Add lines for couplings
if num_qubits != 1:
for ind, edge in enumerate(cmap):
is_symmetric = False
if edge[::-1] in cmap:
is_symmetric = True
y_start = grid_data[edge[0]][0]
x_start = grid_data[edge[0]][1]
y_end = grid_data[edge[1]][0]
x_end = grid_data[edge[1]][1]
if is_symmetric:
if y_start == y_end:
x_end = (x_end - x_start) / 2 + x_start
elif x_start == x_end:
y_end = (y_end - y_start) / 2 + y_start
else:
x_end = (x_end - x_start) / 2 + x_start
y_end = (y_end - y_start) / 2 + y_start
ax.add_artist(plt.Line2D([x_start, x_end], [-y_start, -y_end],
color=line_color[ind], linewidth=line_width,
zorder=0))
if plot_directed:
dx = x_end - x_start # pylint: disable=invalid-name
dy = y_end - y_start # pylint: disable=invalid-name
if is_symmetric:
x_arrow = x_start + dx * 0.95
y_arrow = -y_start - dy * 0.95
dx_arrow = dx * 0.01
dy_arrow = -dy * 0.01
head_width = 0.15
else:
x_arrow = x_start + dx * 0.5
y_arrow = -y_start - dy * 0.5
dx_arrow = dx * 0.2
dy_arrow = -dy * 0.2
head_width = 0.2
ax.add_patch(mpatches.FancyArrow(x_arrow,
y_arrow,
dx_arrow,
dy_arrow,
head_width=head_width,
length_includes_head=True,
edgecolor=None,
linewidth=0,
facecolor=line_color[ind],
zorder=1))
# Add circles for qubits
for var, idx in enumerate(grid_data):
_idx = [idx[1], -idx[0]]
width = _GraphDist(qubit_size, ax, True)
height = _GraphDist(qubit_size, ax, False)
ax.add_artist(mpatches.Ellipse(
_idx, width, height, color=qubit_color[var], zorder=1))
if label_qubits:
ax.text(*_idx, s=qubit_labels[var],
horizontalalignment='center',
verticalalignment='center',
color=font_color, size=font_size, weight='bold')
ax.set_xlim([-1, x_max + 1])
ax.set_ylim([-(y_max + 1), 1])
if not input_axes:
if get_backend() in ['module://ipykernel.pylab.backend_inline',
'nbAgg']:
plt.close(fig)
return fig
return None
def plot_circuit_layout(circuit, backend, view='virtual'):
"""Plot the layout of a circuit transpiled for a given
target backend.
Args:
circuit (QuantumCircuit): Input quantum circuit.
backend (BaseBackend): Target backend.
view (str): Layout view: either 'virtual' or 'physical'.
Returns:
Figure: A matplotlib figure showing layout.
Raises:
QiskitError: Invalid view type given.
VisualizationError: Circuit has no layout attribute.
Example:
.. jupyter-execute::
:hide-code:
:hide-output:
from qiskit.test.ibmq_mock import mock_get_backend
mock_get_backend('FakeVigo')
.. jupyter-execute::
import numpy as np
from qiskit import QuantumCircuit, IBMQ, transpile
from qiskit.visualization import plot_histogram, plot_gate_map, plot_circuit_layout
from qiskit.tools.monitor import job_monitor
import matplotlib.pyplot as plt
%matplotlib inline
IBMQ.load_account()
ghz = QuantumCircuit(3, 3)
ghz.h(0)
for idx in range(1,3):
ghz.cx(0,idx)
ghz.measure(range(3), range(3))
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_vigo')
new_circ_lv3 = transpile(ghz, backend=backend, optimization_level=3)
plot_circuit_layout(new_circ_lv3, backend)
"""
if circuit._layout is None:
raise QiskitError('Circuit has no layout. '
'Perhaps it has not been transpiled.')
num_qubits = backend.configuration().n_qubits
qubits = []
qubit_labels = [None] * num_qubits
if view == 'virtual':
for key, val in circuit._layout.get_virtual_bits().items():
if key.register.name != 'ancilla':
qubits.append(val)
qubit_labels[val] = key.index
elif view == 'physical':
for key, val in circuit._layout.get_physical_bits().items():
if val.register.name != 'ancilla':
qubits.append(key)
qubit_labels[key] = key
else:
raise VisualizationError("Layout view must be 'virtual' or 'physical'.")
qcolors = ['#648fff'] * num_qubits
for k in qubits:
qcolors[k] = 'k'
cmap = backend.configuration().coupling_map
lcolors = ['#648fff'] * len(cmap)
for idx, edge in enumerate(cmap):
if edge[0] in qubits and edge[1] in qubits:
lcolors[idx] = 'k'
fig = plot_gate_map(backend,
qubit_color=qcolors,
qubit_labels=qubit_labels,
line_color=lcolors)
return fig
def plot_error_map(backend, figsize=(12, 9), show_title=True):
"""Plots the error map of a given backend.
Args:
backend (IBMQBackend): Given backend.
figsize (tuple): Figure size in inches.
show_title (bool): Show the title or not.
Returns:
Figure: A matplotlib figure showing error map.
Raises:
VisualizationError: Input is not IBMQ backend.
ImportError: If seaborn is not installed
Example:
.. jupyter-execute::
:hide-code:
:hide-output:
from qiskit.test.ibmq_mock import mock_get_backend
mock_get_backend('FakeVigo')
.. jupyter-execute::
from qiskit import QuantumCircuit, execute, IBMQ
from qiskit.visualization import plot_error_map
%matplotlib inline
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_vigo')
plot_error_map(backend)
"""
try:
import seaborn as sns
except ImportError:
raise ImportError('Must have seaborn installed to use plot_error_map. '
'To install, run "pip install seaborn".')
if not HAS_MATPLOTLIB:
raise ImportError('Must have Matplotlib installed. To install, '
'run "pip install matplotlib".')
import matplotlib
from matplotlib import get_backend
import matplotlib.pyplot as plt # pylint: disable=import-error
import matplotlib.gridspec as gridspec
from matplotlib import ticker
color_map = sns.cubehelix_palette(reverse=True, as_cmap=True)
props = backend.properties().to_dict()
config = backend.configuration().to_dict()
num_qubits = config['n_qubits']
# U2 error rates
single_gate_errors = [0]*num_qubits
for gate in props['gates']:
if gate['gate'] == 'u2':
_qubit = gate['qubits'][0]
single_gate_errors[_qubit] = gate['parameters'][0]['value']
# Convert to percent
single_gate_errors = 100 * np.asarray(single_gate_errors)
avg_1q_err = np.mean(single_gate_errors)
single_norm = matplotlib.colors.Normalize(
vmin=min(single_gate_errors), vmax=max(single_gate_errors))
q_colors = [color_map(single_norm(err)) for err in single_gate_errors]
cmap = config['coupling_map']
directed = False
line_colors = []
if cmap:
directed = False
if num_qubits < 20:
for edge in cmap:
if not [edge[1], edge[0]] in cmap:
directed = True
break
cx_errors = []
for line in cmap:
for item in props['gates']:
if item['qubits'] == line:
cx_errors.append(item['parameters'][0]['value'])
break
else:
continue
# Convert to percent
cx_errors = 100 * np.asarray(cx_errors)
avg_cx_err = np.mean(cx_errors)
cx_norm = matplotlib.colors.Normalize(
vmin=min(cx_errors), vmax=max(cx_errors))
line_colors = [color_map(cx_norm(err)) for err in cx_errors]
# Measurement errors
read_err = []
for qubit in range(num_qubits):
for item in props['qubits'][qubit]:
if item['name'] == 'readout_error':
read_err.append(item['value'])
read_err = 100 * np.asarray(read_err)
avg_read_err = np.mean(read_err)
max_read_err = np.max(read_err)
fig = plt.figure(figsize=figsize)
gridspec.GridSpec(nrows=2, ncols=3)
grid_spec = gridspec.GridSpec(12, 12, height_ratios=[1] * 11 + [0.5],
width_ratios=[2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2])
left_ax = plt.subplot(grid_spec[2:10, :1])
main_ax = plt.subplot(grid_spec[:11, 1:11])
right_ax = plt.subplot(grid_spec[2:10, 11:])
bleft_ax = plt.subplot(grid_spec[-1, :5])
if cmap:
bright_ax = plt.subplot(grid_spec[-1, 7:])
plot_gate_map(backend, qubit_color=q_colors,
line_color=line_colors,
qubit_size=28,
line_width=5,
plot_directed=directed,
ax=main_ax)
main_ax.axis('off')
main_ax.set_aspect(1)
if cmap:
single_cb = matplotlib.colorbar.ColorbarBase(bleft_ax, cmap=color_map,
norm=single_norm,
orientation='horizontal')
tick_locator = ticker.MaxNLocator(nbins=5)
single_cb.locator = tick_locator
single_cb.update_ticks()
single_cb.update_ticks()
bleft_ax.set_title('H error rate (%) [Avg. = {}]'.format(round(avg_1q_err, 3)))
if cmap is None:
bleft_ax.axis('off')
bleft_ax.set_title('H error rate (%) = {}'.format(round(avg_1q_err, 3)))
if cmap:
cx_cb = matplotlib.colorbar.ColorbarBase(bright_ax, cmap=color_map,
norm=cx_norm,
orientation='horizontal')
tick_locator = ticker.MaxNLocator(nbins=5)
cx_cb.locator = tick_locator
cx_cb.update_ticks()
bright_ax.set_title('CNOT error rate (%) [Avg. = {}]'.format(round(avg_cx_err, 3)))
if num_qubits < 10:
num_left = num_qubits
num_right = 0
else:
num_left = math.ceil(num_qubits / 2)
num_right = num_qubits - num_left
left_ax.barh(range(num_left), read_err[:num_left], align='center', color='#DDBBBA')
left_ax.axvline(avg_read_err, linestyle='--', color='#212121')
left_ax.set_yticks(range(num_left))
left_ax.set_xticks([0, round(avg_read_err, 2), round(max_read_err, 2)])
left_ax.set_yticklabels([str(kk) for kk in range(num_left)], fontsize=12)
left_ax.invert_yaxis()
left_ax.set_title('Readout Error (%)', fontsize=12)
for spine in left_ax.spines.values():
spine.set_visible(False)
if num_right:
right_ax.barh(range(num_left, num_qubits), read_err[num_left:],
align='center', color='#DDBBBA')
right_ax.axvline(avg_read_err, linestyle='--', color='#212121')
right_ax.set_yticks(range(num_left, num_qubits))
right_ax.set_xticks([0, round(avg_read_err, 2), round(max_read_err, 2)])
right_ax.set_yticklabels([str(kk) for kk in range(num_left, num_qubits)],
fontsize=12)
right_ax.invert_yaxis()
right_ax.invert_xaxis()
right_ax.yaxis.set_label_position("right")
right_ax.yaxis.tick_right()
right_ax.set_title('Readout Error (%)', fontsize=12)
else:
right_ax.axis('off')
for spine in right_ax.spines.values():
spine.set_visible(False)
if show_title:
fig.suptitle('{name} Error Map'.format(name=backend.name()),
fontsize=24, y=0.9)
if get_backend() in ['module://ipykernel.pylab.backend_inline',
'nbAgg']:
plt.close(fig)
return fig
| qiskit/visualization/gate_map.py | 21,274 | Transform the circles properly for non-square axes.
Distance abs
Compute distance.
Plot the layout of a circuit transpiled for a given
target backend.
Args:
circuit (QuantumCircuit): Input quantum circuit.
backend (BaseBackend): Target backend.
view (str): Layout view: either 'virtual' or 'physical'.
Returns:
Figure: A matplotlib figure showing layout.
Raises:
QiskitError: Invalid view type given.
VisualizationError: Circuit has no layout attribute.
Example:
.. jupyter-execute::
:hide-code:
:hide-output:
from qiskit.test.ibmq_mock import mock_get_backend
mock_get_backend('FakeVigo')
.. jupyter-execute::
import numpy as np
from qiskit import QuantumCircuit, IBMQ, transpile
from qiskit.visualization import plot_histogram, plot_gate_map, plot_circuit_layout
from qiskit.tools.monitor import job_monitor
import matplotlib.pyplot as plt
%matplotlib inline
IBMQ.load_account()
ghz = QuantumCircuit(3, 3)
ghz.h(0)
for idx in range(1,3):
ghz.cx(0,idx)
ghz.measure(range(3), range(3))
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_vigo')
new_circ_lv3 = transpile(ghz, backend=backend, optimization_level=3)
plot_circuit_layout(new_circ_lv3, backend)
Plots the error map of a given backend.
Args:
backend (IBMQBackend): Given backend.
figsize (tuple): Figure size in inches.
show_title (bool): Show the title or not.
Returns:
Figure: A matplotlib figure showing error map.
Raises:
VisualizationError: Input is not IBMQ backend.
ImportError: If seaborn is not installed
Example:
.. jupyter-execute::
:hide-code:
:hide-output:
from qiskit.test.ibmq_mock import mock_get_backend
mock_get_backend('FakeVigo')
.. jupyter-execute::
from qiskit import QuantumCircuit, execute, IBMQ
from qiskit.visualization import plot_error_map
%matplotlib inline
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
backend = provider.get_backend('ibmq_vigo')
plot_error_map(backend)
Plots the gate map of a device.
Args:
backend (BaseBackend): A backend instance,
figsize (tuple): Output figure size (wxh) in inches.
plot_directed (bool): Plot directed coupling map.
label_qubits (bool): Label the qubits.
qubit_size (float): Size of qubit marker.
line_width (float): Width of lines.
font_size (int): Font size of qubit labels.
qubit_color (list): A list of colors for the qubits
qubit_labels (list): A list of qubit labels
line_color (list): A list of colors for each line from coupling_map.
font_color (str): The font color for the qubit labels.
ax (Axes): A Matplotlib axes instance.
Returns:
Figure: A Matplotlib figure instance.
Raises:
QiskitError: if tried to pass a simulator.
ImportError: if matplotlib not installed.
Example:
.. jupyter-execute::
:hide-code:
:hide-output:
from qiskit.test.ibmq_mock import mock_get_backend
mock_get_backend('FakeVigo')
.. jupyter-execute::
from qiskit import QuantumCircuit, execute, IBMQ
from qiskit.visualization import plot_gate_map
%matplotlib inline
provider = IBMQ.load_account()
accountProvider = IBMQ.get_provider(hub='ibm-q')
backend = accountProvider.get_backend('ibmq_vigo')
plot_gate_map(backend)
Return value.
A module for visualizing device coupling maps
This code is part of Qiskit. (C) Copyright IBM 2017, 2018. This code is licensed under the Apache License, Version 2.0. You may obtain a copy of this license in the LICENSE.txt file in the root directory of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. Any modifications or derivative works of this code must retain this copyright notice, and modified files need to carry a notice indicating that they have been altered from the originals. pylint: disable=invalid-name pylint: disable=invalid-name pylint: disable=invalid-name pylint: disable=import-error pylint: disable=invalid-name pylint: disable=invalid-name set coloring Add lines for couplings pylint: disable=invalid-name pylint: disable=invalid-name Add circles for qubits pylint: disable=import-error U2 error rates Convert to percent Convert to percent Measurement errors | 4,513 | en | 0.538534 |
#1KiB
with open("Makeflow1KiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=1\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1))
#10KiB
with open("Makeflow10KiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=1\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*10))
#100KiB
with open("Makeflow100KiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=1\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*100))
#1MiB
with open("Makeflow1MiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=2\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1024*1))
#10MiB
with open("Makeflow10MiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=20\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1024*10))
#100MiB
with open("Makeflow100MiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=200\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1024*100))
#1GiB
with open("Makeflow1GiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=2000\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1024*1024*1))
#10GiB
with open("Makeflow10GiB","w+") as f:
f.write("CORES=1\nMEMORY=1000\nDISK=10738\n\n")
for x in range(100):
f.write("out%i.txt:generate\n\t./generate out%i.txt %i\n\n"%(x,x,1024*1024*1024*10))
| generation/generateMakeflows.py | 1,709 | 1KiB10KiB100KiB1MiB10MiB100MiB1GiB10GiB | 39 | it | 0.219868 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
from bpy.types import (
Panel,
)
from bl_ui.utils import PresetPanel
from bl_ui.properties_physics_common import (
point_cache_ui,
effector_weights_ui,
)
def cloth_panel_enabled(md):
return md.point_cache.is_baked is False
class CLOTH_PT_presets(PresetPanel, Panel):
bl_label = "Cloth Presets"
preset_subdir = "cloth"
preset_operator = "script.execute_preset"
preset_add_operator = "cloth.preset_add"
class PhysicButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "physics"
@classmethod
def poll(cls, context):
ob = context.object
return (ob and ob.type == 'MESH') and (context.engine in cls.COMPAT_ENGINES) and (context.cloth)
class PHYSICS_PT_cloth(PhysicButtonsPanel, Panel):
bl_label = "Cloth"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header_preset(self, _context):
CLOTH_PT_presets.draw_panel_header(self.layout)
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
cloth = md.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "quality", text="Quality Steps")
col = flow.column()
col.prop(cloth, "time_scale", text="Speed Multiplier")
class PHYSICS_PT_cloth_physical_properties(PhysicButtonsPanel, Panel):
bl_label = "Physical Properties"
bl_parent_id = 'PHYSICS_PT_cloth'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
cloth = md.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "mass", text="Vertex Mass")
col = flow.column()
col.prop(cloth, "air_damping", text="Air Viscosity")
col = flow.column()
col.prop(cloth, "bending_model")
class PHYSICS_PT_cloth_stiffness(PhysicButtonsPanel, Panel):
bl_label = "Stiffness"
bl_parent_id = 'PHYSICS_PT_cloth_physical_properties'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
cloth = md.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
if cloth.bending_model == 'ANGULAR':
col.prop(cloth, "tension_stiffness", text="Tension")
col = flow.column()
col.prop(cloth, "compression_stiffness", text="Compression")
else:
col.prop(cloth, "tension_stiffness", text="Structural")
col = flow.column()
col.prop(cloth, "shear_stiffness", text="Shear")
col = flow.column()
col.prop(cloth, "bending_stiffness", text="Bending")
class PHYSICS_PT_cloth_damping(PhysicButtonsPanel, Panel):
bl_label = "Damping"
bl_parent_id = 'PHYSICS_PT_cloth_physical_properties'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
cloth = md.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
if cloth.bending_model == 'ANGULAR':
col.prop(cloth, "tension_damping", text="Tension")
col = flow.column()
col.prop(cloth, "compression_damping", text="Compression")
else:
col.prop(cloth, "tension_damping", text="Structural")
col = flow.column()
col.prop(cloth, "shear_damping", text="Shear")
col = flow.column()
col.prop(cloth, "bending_damping", text="Bending")
class PHYSICS_PT_cloth_internal_springs(PhysicButtonsPanel, Panel):
bl_label = "Internal Springs"
bl_parent_id = 'PHYSICS_PT_cloth_physical_properties'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header(self, context):
cloth = context.cloth.settings
self.layout.active = cloth_panel_enabled(context.cloth)
self.layout.prop(cloth, "use_internal_springs", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cloth = context.cloth.settings
md = context.cloth
ob = context.object
layout.active = cloth.use_internal_springs and cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "internal_spring_max_length", text="Max Spring Creation Length")
col = flow.column()
col.prop(cloth, "internal_spring_max_diversion", text="Max Creation Diversion")
col = flow.column()
col.prop(cloth, "internal_spring_normal_check", text="Check Surface Normals")
col = flow.column()
col.prop(cloth, "internal_tension_stiffness", text="Tension")
col = flow.column()
col.prop(cloth, "internal_compression_stiffness", text="Compression")
col = flow.column()
col.prop_search(cloth, "vertex_group_intern", ob, "vertex_groups", text="Vertex Group")
col = flow.column()
col.prop(cloth, "internal_tension_stiffness_max", text="Max Tension")
col = flow.column()
col.prop(cloth, "internal_compression_stiffness_max", text="Max Compression")
class PHYSICS_PT_cloth_pressure(PhysicButtonsPanel, Panel):
bl_label = "Pressure"
bl_parent_id = 'PHYSICS_PT_cloth_physical_properties'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header(self, context):
cloth = context.cloth.settings
self.layout.active = cloth_panel_enabled(context.cloth)
self.layout.prop(cloth, "use_pressure", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cloth = context.cloth.settings
md = context.cloth
ob = context.object
layout.active = cloth.use_pressure and cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "uniform_pressure_force")
col = flow.column()
col.prop(cloth, "use_pressure_volume", text="Custom Volume")
col = flow.column()
col.active = cloth.use_pressure_volume
col.prop(cloth, "target_volume")
col = flow.column()
col.prop(cloth, "pressure_factor")
col = flow.column()
col.prop(cloth, "fluid_density")
col = flow.column()
col.prop_search(cloth, "vertex_group_pressure", ob, "vertex_groups", text="Vertex Group")
class PHYSICS_PT_cloth_cache(PhysicButtonsPanel, Panel):
bl_label = "Cache"
bl_parent_id = 'PHYSICS_PT_cloth'
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
md = context.cloth
point_cache_ui(self, md.point_cache, cloth_panel_enabled(md), 'CLOTH')
class PHYSICS_PT_cloth_shape(PhysicButtonsPanel, Panel):
bl_label = "Shape"
bl_parent_id = 'PHYSICS_PT_cloth'
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
ob = context.object
cloth = md.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column(align=True)
col.prop_search(cloth, "vertex_group_mass", ob, "vertex_groups", text="Pin Group")
sub = col.column(align=True)
sub.active = cloth.vertex_group_mass != ""
sub.prop(cloth, "pin_stiffness", text="Stiffness")
col.separator()
col = flow.column(align=True)
col.prop(cloth, "use_sewing_springs", text="Sewing")
sub = col.column(align=True)
sub.active = cloth.use_sewing_springs
sub.prop(cloth, "sewing_force_max", text="Max Sewing Force")
col.separator()
col = flow.column()
col.prop(cloth, "shrink_min", text="Shrinking Factor")
col = flow.column()
col.prop(cloth, "use_dynamic_mesh", text="Dynamic Mesh")
key = ob.data.shape_keys
if key:
col = flow.column()
col.active = not cloth.use_dynamic_mesh
col.prop_search(cloth, "rest_shape_key", key, "key_blocks", text="Rest Shape Key")
class PHYSICS_PT_cloth_collision(PhysicButtonsPanel, Panel):
bl_label = "Collisions"
bl_parent_id = 'PHYSICS_PT_cloth'
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cloth = context.cloth.collision_settings
md = context.cloth
layout.active = (cloth.use_collision or cloth.use_self_collision) and cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "collision_quality", text="Quality")
class PHYSICS_PT_cloth_object_collision(PhysicButtonsPanel, Panel):
bl_label = "Object Collisions"
bl_parent_id = 'PHYSICS_PT_cloth_collision'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header(self, context):
cloth = context.cloth.collision_settings
self.layout.active = cloth_panel_enabled(context.cloth)
self.layout.prop(cloth, "use_collision", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cloth = context.cloth.collision_settings
md = context.cloth
layout.active = cloth.use_collision and cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "distance_min", slider=True, text="Distance")
col = flow.column()
col.prop(cloth, "impulse_clamp")
col = flow.column()
col.prop(cloth, "collection")
class PHYSICS_PT_cloth_self_collision(PhysicButtonsPanel, Panel):
bl_label = "Self Collisions"
bl_parent_id = 'PHYSICS_PT_cloth_collision'
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw_header(self, context):
cloth = context.cloth.collision_settings
self.layout.active = cloth_panel_enabled(context.cloth)
self.layout.prop(cloth, "use_self_collision", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
cloth = context.cloth.collision_settings
md = context.cloth
ob = context.object
layout.active = cloth.use_self_collision and cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=False, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop(cloth, "self_friction", text="Friction")
col = flow.column()
col.prop(cloth, "self_distance_min", slider=True, text="Distance")
col = flow.column()
col.prop(cloth, "self_impulse_clamp")
col = flow.column()
col.prop_search(cloth, "vertex_group_self_collisions", ob, "vertex_groups", text="Vertex Group")
class PHYSICS_PT_cloth_property_weights(PhysicButtonsPanel, Panel):
bl_label = "Property Weights"
bl_parent_id = 'PHYSICS_PT_cloth'
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
layout = self.layout
layout.use_property_split = True
md = context.cloth
ob = context.object
cloth = context.cloth.settings
layout.active = cloth_panel_enabled(md)
flow = layout.grid_flow(row_major=True, columns=0, even_columns=True, even_rows=False, align=True)
col = flow.column()
col.prop_search(
cloth, "vertex_group_structural_stiffness", ob, "vertex_groups",
text="Structural Group",
)
col.prop(cloth, "tension_stiffness_max", text="Max Tension")
col.prop(cloth, "compression_stiffness_max", text="Max Compression")
col.separator()
col = flow.column()
col.prop_search(
cloth, "vertex_group_shear_stiffness", ob, "vertex_groups",
text="Shear Group",
)
col.prop(cloth, "shear_stiffness_max", text="Max Shearing")
col.separator()
col = flow.column()
col.prop_search(
cloth, "vertex_group_bending", ob, "vertex_groups",
text="Bending Group"
)
col.prop(cloth, "bending_stiffness_max", text="Max Bending")
col.separator()
col = flow.column()
col.prop_search(
cloth, "vertex_group_shrink", ob, "vertex_groups",
text="Shrinking Group"
)
col.prop(cloth, "shrink_max", text="Max Shrinking")
class PHYSICS_PT_cloth_field_weights(PhysicButtonsPanel, Panel):
bl_label = "Field Weights"
bl_parent_id = 'PHYSICS_PT_cloth'
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE', 'BLENDER_WORKBENCH'}
def draw(self, context):
cloth = context.cloth.settings
effector_weights_ui(self, cloth.effector_weights, 'CLOTH')
classes = (
CLOTH_PT_presets,
PHYSICS_PT_cloth,
PHYSICS_PT_cloth_physical_properties,
PHYSICS_PT_cloth_stiffness,
PHYSICS_PT_cloth_damping,
PHYSICS_PT_cloth_internal_springs,
PHYSICS_PT_cloth_pressure,
PHYSICS_PT_cloth_cache,
PHYSICS_PT_cloth_shape,
PHYSICS_PT_cloth_collision,
PHYSICS_PT_cloth_object_collision,
PHYSICS_PT_cloth_self_collision,
PHYSICS_PT_cloth_property_weights,
PHYSICS_PT_cloth_field_weights,
)
if __name__ == "__main__": # only for live edit.
from bpy.utils import register_class
for cls in classes:
register_class(cls)
| Blender 2.91/2.91/scripts/startup/bl_ui/properties_physics_cloth.py | 15,864 | BEGIN GPL LICENSE BLOCK This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. END GPL LICENSE BLOCK <pep8 compliant> only for live edit. | 775 | en | 0.894343 |
from django.template import Library
from django.utils import timezone
import datetime
register = Library()
@register.filter
def utcoffset(value):
# Yeap, it's strange, but tags are so ugly.. So I defined not use value, but get current timezone from utils
tz = timezone.get_current_timezone()
utc_offset = datetime.datetime.now(tz).utcoffset()
minutes = (utc_offset.days * 24 * 60) + (utc_offset.seconds / 60)
if minutes == 0:
return ''
return '(UTC%+03i:%02i)' % divmod(minutes, 60)
| src/web/drapo/templatetags/timezones.py | 519 | Yeap, it's strange, but tags are so ugly.. So I defined not use value, but get current timezone from utils | 106 | en | 0.77773 |
#
# Autogenerated by Thrift Compiler (0.10.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:tornado
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
import sys
from thrift.transport import TTransport
| examples/service/fastweb_thrift_async/HelloService/ttypes.py | 353 | Autogenerated by Thrift Compiler (0.10.0) DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING options string: py:tornado | 134 | en | 0.918888 |
"""
Tests for pagination template tags and filters.
"""
from mock import Mock
from django import template
from tests import case
class PaginateTest(case.DBTestCase):
"""Tests for paginate template tag."""
def test_paginate(self):
"""Places Pager object in context with size/num from request."""
from moztrap.model.tags.models import Tag
tpl = template.Template(
"{% load pagination %}{% paginate queryset as pager %}"
"{% for obj in pager.objects %}{{ obj }} {% endfor %}")
request = Mock()
request.GET = {"pagesize": 3, "pagenumber": 2}
for i in range(1, 7):
self.F.TagFactory.create(name=str(i))
qs = Tag.objects.all()
output = tpl.render(
template.Context({"request": request, "queryset": qs}))
self.assertEqual(output, "4 5 6 ")
class FilterTest(case.TestCase):
"""Tests for template filters."""
def test_pagenumber_url(self):
"""``pagenumber_url`` filter updates pagenumber in URL."""
from moztrap.view.lists.templatetags.pagination import pagenumber_url
request = Mock()
request.get_full_path.return_value = (
"http://localhost/?pagenumber=2&pagesize=10")
self.assertEqual(
pagenumber_url(request, 1),
"http://localhost/?pagenumber=1&pagesize=10")
def test_pagesize_url(self):
"""``pagesize_url`` updates pagesize in URL (and jumps to page 1)."""
from moztrap.view.lists.templatetags.pagination import pagesize_url
request = Mock()
request.get_full_path.return_value = (
"http://localhost/?pagenumber=2&pagesize=10")
self.assertEqual(
pagesize_url(request, 20),
"http://localhost/?pagenumber=1&pagesize=20")
def test_pagenumber(self):
"""``pagenumber`` gets the pagenumber from the request."""
from moztrap.view.lists.templatetags.pagination import pagenumber
request = Mock()
request.GET = {"pagenumber": 2, "pagesize": 10}
self.assertEqual(pagenumber(request), 2)
def test_pagesize(self):
"""``pagenumber`` gets the pagenumber from the request."""
from moztrap.view.lists.templatetags.pagination import pagesize
request = Mock()
request.GET = {"pagenumber": 2, "pagesize": 10}
self.assertEqual(pagesize(request), 10)
| tests/view/lists/templatetags/test_pagination.py | 2,424 | Tests for template filters.
Tests for paginate template tag.
``pagenumber`` gets the pagenumber from the request.
``pagenumber_url`` filter updates pagenumber in URL.
``pagenumber`` gets the pagenumber from the request.
``pagesize_url`` updates pagesize in URL (and jumps to page 1).
Places Pager object in context with size/num from request.
Tests for pagination template tags and filters. | 390 | en | 0.610467 |
import copy
import numpy as np
from math import cos, sin, pi, atan2
import warnings
import matplotlib.patches as mpatches
from matplotlib.path import Path
from matplotlib.lines import Line2D
from matplotlib.transforms import Affine2D, Bbox, IdentityTransform
from matplotlib.text import Annotation
def rotated_polygon(xy, ox, oy, angle):
# angle in degree
theta = angle / 180. * pi
st = sin(theta)
ct = cos(theta)
xy = np.asarray(xy, dtype="d")
x, y = xy[:, 0], xy[:, 1]
x1 = x - ox
y1 = y - oy
x2 = ct * x1 + -st * y1
y2 = st * x1 + ct * y1
xp = x2 + ox
yp = y2 + oy
return np.hstack((xp.reshape((-1, 1)), yp.reshape((-1, 1))))
# sss3 = [s1[0] for s1 in sss2 if isinstance(s1[0], parser_ds9.Shape)]
_point_type_dict = dict(circle="o",
box="s",
diamond="D",
x="x",
cross="+",
arrow="^",
boxcircle="*")
_ds9_to_mpl_colormap = dict(green="lime",
)
def properties_func_default(shape, saved_attrs):
attr_list = copy.copy(shape.attr[0])
attr_dict = copy.copy(shape.attr[1])
attr_list.extend(saved_attrs[0])
attr_dict.update(saved_attrs[1])
color = attr_dict.get("color", None)
color = _ds9_to_mpl_colormap.get(color, color)
if shape.name == "text":
kwargs = dict(color=color,
rotation=attr_dict.get("textangle", 0),
)
font = attr_dict.get("font")
if font:
a = font.split()
if len(a) >= 3:
fontsize = float(a[1])
kwargs["fontsize"] = fontsize
elif shape.name == "point":
point_attrs = attr_dict.get("point", "boxcircle").split()
if len(point_attrs) == 1:
point_type = point_attrs[0]
point_size = 11
elif len(point_attrs) > 1:
point_type = point_attrs[0]
point_size = int(point_attrs[1])
marker = _point_type_dict.get(point_type, "o")
kwargs = dict(markeredgecolor=color,
markerfacecolor="none",
marker=marker,
markeredgewidth=int(attr_dict.get("width", 1)),
markersize=point_size
)
elif shape.name in ["line", "vector"]:
fontsize = 10 # default font size
font = attr_dict.get("font")
if font:
a = font.split()
if len(a) >= 3:
fontsize = float(a[1])
kwargs = dict(color=color,
linewidth=int(attr_dict.get("width", 1)),
mutation_scale=fontsize,
)
if int(attr_dict.get("dash", "0")):
kwargs["linestyle"] = "dashed"
else:
kwargs = dict(edgecolor=color,
linewidth=int(attr_dict.get("width", 1)),
facecolor="none"
)
if "background" in attr_list:
kwargs["linestyle"] = "dashed"
if int(attr_dict.get("dash", "0")):
kwargs["linestyle"] = "dashed"
if shape.exclude:
kwargs["hatch"] = "/"
return kwargs
def _get_text(txt, x, y, dx, dy, ha="center", va="center", **kwargs):
if "color" in kwargs:
textcolor = kwargs["color"]
del kwargs["color"]
elif "markeredgecolor" in kwargs:
textcolor = kwargs["markeredgecolor"]
else:
import matplotlib as mpl
textcolor = mpl.rcParams['text.color']
ann = Annotation(txt, (x, y), xytext=(dx, dy),
xycoords='data',
textcoords="offset points",
color=textcolor,
ha=ha, va=va,
**kwargs)
ann.set_transform(IdentityTransform())
return ann
def as_mpl_artists(shape_list,
properties_func=None,
text_offset=5.0, origin=1):
"""
Converts a region list to a list of patches and a list of artists.
Optional Keywords:
[ text_offset ] - If there is text associated with the regions, add
some vertical offset (in pixels) to the text so that it doesn't overlap
with the regions.
Often, the regions files implicitly assume the lower-left corner
of the image as a coordinate (1,1). However, the python convetion
is that the array index starts from 0. By default (origin = 1),
coordinates of the returned mpl artists have coordinate shifted by
(1, 1). If you do not want this shift, set origin=0.
"""
patch_list = []
artist_list = []
if properties_func is None:
properties_func = properties_func_default
# properties for continued(? multiline?) regions
saved_attrs = None
for shape in shape_list:
patches = []
if saved_attrs is None:
_attrs = [], {}
else:
_attrs = copy.copy(saved_attrs[0]), copy.copy(saved_attrs[1])
kwargs = properties_func(shape, _attrs)
if shape.name == "composite":
saved_attrs = shape.attr
continue
if saved_attrs is None and shape.continued:
saved_attrs = shape.attr
# elif (shape.name in shape.attr[1]):
# if (shape.attr[1][shape.name] != "ignore"):
# saved_attrs = shape.attr
if not shape.continued:
saved_attrs = None
# text associated with the shape
txt = shape.attr[1].get("text")
if shape.name == "polygon":
xy = np.array(shape.coord_list)
xy.shape = -1, 2
# -1 for change origin to 0,0
patches = [mpatches.Polygon(xy - origin, closed=True, **kwargs)]
elif shape.name == "rotbox" or shape.name == "box":
xc, yc, w, h, rot = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
_box = np.array([[-w / 2., -h / 2.],
[-w / 2., h / 2.],
[w / 2., h / 2.],
[w / 2., -h / 2.]])
box = _box + [xc, yc]
rotbox = rotated_polygon(box, xc, yc, rot)
patches = [mpatches.Polygon(rotbox, closed=True, **kwargs)]
elif shape.name == "ellipse":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
angle = shape.coord_list[-1]
maj_list, min_list = shape.coord_list[2:-1:2], shape.coord_list[3:-1:2]
patches = [mpatches.Ellipse((xc, yc), 2 * maj, 2 * min,
angle=angle, **kwargs)
for maj, min in zip(maj_list, min_list)]
elif shape.name == "annulus":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
r_list = shape.coord_list[2:]
patches = [mpatches.Ellipse((xc, yc), 2 * r, 2 * r, **kwargs) for r in r_list]
elif shape.name == "circle":
xc, yc, major = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
patches = [mpatches.Ellipse((xc, yc), 2 * major, 2 * major, angle=0, **kwargs)]
elif shape.name == "panda":
xc, yc, a1, a2, an, r1, r2, rn = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
patches = [mpatches.Arc((xc, yc), rr * 2, rr * 2, angle=0,
theta1=a1, theta2=a2, **kwargs)
for rr in np.linspace(r1, r2, rn + 1)]
for aa in np.linspace(a1, a2, an + 1):
xx = np.array([r1, r2]) * np.cos(aa / 180. * np.pi) + xc
yy = np.array([r1, r2]) * np.sin(aa / 180. * np.pi) + yc
p = Path(np.transpose([xx, yy]))
patches.append(mpatches.PathPatch(p, **kwargs))
elif shape.name == "pie":
xc, yc, r1, r2, a1, a2 = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
patches = [mpatches.Arc((xc, yc), rr * 2, rr * 2, angle=0,
theta1=a1, theta2=a2, **kwargs)
for rr in [r1, r2]]
for aa in [a1, a2]:
xx = np.array([r1, r2]) * np.cos(aa / 180. * np.pi) + xc
yy = np.array([r1, r2]) * np.sin(aa / 180. * np.pi) + yc
p = Path(np.transpose([xx, yy]))
patches.append(mpatches.PathPatch(p, **kwargs))
elif shape.name == "epanda":
xc, yc, a1, a2, an, r11, r12, r21, r22, rn, angle = shape.coord_list
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
# mpl takes angle a1, a2 as angle as in circle before
# transformation to ellipse.
x1, y1 = cos(a1 / 180. * pi), sin(a1 / 180. * pi) * r11 / r12
x2, y2 = cos(a2 / 180. * pi), sin(a2 / 180. * pi) * r11 / r12
a1, a2 = atan2(y1, x1) / pi * 180., atan2(y2, x2) / pi * 180.
patches = [mpatches.Arc((xc, yc), rr1 * 2, rr2 * 2,
angle=angle, theta1=a1, theta2=a2,
**kwargs)
for rr1, rr2 in zip(np.linspace(r11, r21, rn + 1),
np.linspace(r12, r22, rn + 1))]
for aa in np.linspace(a1, a2, an + 1):
xx = np.array([r11, r21]) * np.cos(aa / 180. * np.pi)
yy = np.array([r11, r21]) * np.sin(aa / 180. * np.pi)
p = Path(np.transpose([xx, yy]))
tr = Affine2D().scale(1, r12 / r11).rotate_deg(angle).translate(xc, yc)
p2 = tr.transform_path(p)
patches.append(mpatches.PathPatch(p2, **kwargs))
elif shape.name == "text":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
if txt:
_t = _get_text(txt, xc, yc, 0, 0, **kwargs)
artist_list.append(_t)
elif shape.name == "point":
xc, yc = shape.coord_list[:2]
# -1 for change origin to 0,0
xc, yc = xc - origin, yc - origin
artist_list.append(Line2D([xc], [yc],
**kwargs))
if txt:
textshape = copy.copy(shape)
textshape.name = "text"
textkwargs = properties_func(textshape, _attrs)
_t = _get_text(txt, xc, yc, 0, text_offset,
va="bottom",
**textkwargs)
artist_list.append(_t)
elif shape.name in ["line", "vector"]:
if shape.name == "line":
x1, y1, x2, y2 = shape.coord_list[:4]
# -1 for change origin to 0,0
x1, y1, x2, y2 = x1 - origin, y1 - origin, x2 - origin, y2 - origin
a1, a2 = shape.attr[1].get("line", "0 0").strip().split()[:2]
arrowstyle = "-"
if int(a1):
arrowstyle = "<" + arrowstyle
if int(a2):
arrowstyle = arrowstyle + ">"
else: # shape.name == "vector"
x1, y1, l, a = shape.coord_list[:4]
# -1 for change origin to 0,0
x1, y1 = x1 - origin, y1 - origin
x2, y2 = x1 + l * np.cos(a / 180. * np.pi), y1 + l * np.sin(a / 180. * np.pi)
v1 = int(shape.attr[1].get("vector", "0").strip())
if v1:
arrowstyle = "->"
else:
arrowstyle = "-"
patches = [mpatches.FancyArrowPatch(posA=(x1, y1),
posB=(x2, y2),
arrowstyle=arrowstyle,
arrow_transmuter=None,
connectionstyle="arc3",
patchA=None, patchB=None,
shrinkA=0, shrinkB=0,
connector=None,
**kwargs)]
else:
warnings.warn("'as_mpl_artists' does not know how to convert {0} "
"to mpl artist".format(shape.name))
patch_list.extend(patches)
if txt and patches:
# the text associated with a shape uses different
# matplotlib keywords than the shape itself for, e.g.,
# color
textshape = copy.copy(shape)
textshape.name = "text"
textkwargs = properties_func(textshape, _attrs)
# calculate the text position
_bb = [p.get_window_extent() for p in patches]
# this is to work around backward-incompatible change made
# in matplotlib 1.2. This change is later reverted so only
# some versions are affected. With affected version of
# matplotlib, get_window_extent method calls get_transform
# method which sets the _transformSet to True, which is
# not desired.
for p in patches:
p._transformSet = False
_bbox = Bbox.union(_bb)
x0, y0, x1, y1 = _bbox.extents
xc = .5 * (x0 + x1)
_t = _get_text(txt, xc, y1, 0, text_offset,
va="bottom",
**textkwargs)
artist_list.append(_t)
return patch_list, artist_list
| pyregion/mpl_helper.py | 14,031 | Converts a region list to a list of patches and a list of artists.
Optional Keywords:
[ text_offset ] - If there is text associated with the regions, add
some vertical offset (in pixels) to the text so that it doesn't overlap
with the regions.
Often, the regions files implicitly assume the lower-left corner
of the image as a coordinate (1,1). However, the python convetion
is that the array index starts from 0. By default (origin = 1),
coordinates of the returned mpl artists have coordinate shifted by
(1, 1). If you do not want this shift, set origin=0.
angle in degree sss3 = [s1[0] for s1 in sss2 if isinstance(s1[0], parser_ds9.Shape)] default font size properties for continued(? multiline?) regions elif (shape.name in shape.attr[1]): if (shape.attr[1][shape.name] != "ignore"): saved_attrs = shape.attr text associated with the shape -1 for change origin to 0,0 -1 for change origin to 0,0 -1 for change origin to 0,0 -1 for change origin to 0,0 -1 for change origin to 0,0 -1 for change origin to 0,0 -1 for change origin to 0,0 -1 for change origin to 0,0 mpl takes angle a1, a2 as angle as in circle before transformation to ellipse. -1 for change origin to 0,0 -1 for change origin to 0,0 -1 for change origin to 0,0 shape.name == "vector" -1 for change origin to 0,0 the text associated with a shape uses different matplotlib keywords than the shape itself for, e.g., color calculate the text position this is to work around backward-incompatible change made in matplotlib 1.2. This change is later reverted so only some versions are affected. With affected version of matplotlib, get_window_extent method calls get_transform method which sets the _transformSet to True, which is not desired. | 1,749 | en | 0.888413 |
# SPDX-FileCopyrightText: 2021 Arthur Breitman
# SPDX-License-Identifier: LicenseRef-MIT-Arthur-Breitman
import math
from collections import defaultdict
from pycfmm.data import AutoRepr
infinity = 10 ** 100
class Tick(AutoRepr):
"""
An initialized tick, marking the beginning or end of a position
"""
def __init__(self, i_prev, i_next, feeGrowthOutside):
"""
:type i_prev: int
:type i_next: int
"""
self.i_prev = i_prev
self.i_next = i_next
self.Delta_L = 0
self.feeGrowthOutside = feeGrowthOutside
self.n_positions = 0
class Position(AutoRepr):
"""
A LP's position
"""
def __init__(self, L=0):
self.L = L
self.feeGrowthInsideLast = XY()
class XY(AutoRepr):
"""
A pair of balances in asset X and Y
"""
def __init__(self, x=0, y=0):
self.x, self.y = x, y
def __add__(self, other):
x = self.x + other.x
y = self.y + other.y
return XY(x, y)
def __sub__(self, other):
x = self.x - other.x
y = self.y - other.y
return XY(x, y)
def __neg__(self):
return XY(-self.x, -self.y)
def __mul__(self, other):
return XY(other * self.x, other * self.y)
def __eq__(self, other):
return isinstance(other, XY) and self.x == other.x and self.y == other.y
class Contract(AutoRepr):
"""
A contract in the fashion of Uniswap v3
"""
@staticmethod
def tick(srp):
"""
Computes the closest tick index below a certain price, given its square root
:param srp: square root of a price
:return: the closest tick below a certain price
"""
if srp == infinity:
return infinity
else:
return math.floor(math.log(srp) / math.log(math.sqrt(1.0001)))
@staticmethod
def srp(tick):
"""
Computes the square root of the price corresponding to a given tick
:param tick: the index of a tick
:return: the corresponding square root price
"""
if tick == infinity:
return infinity
return math.pow(math.sqrt(1.0001), tick)
def __init__(self, X, Y, fee=0.3 / 100):
self.balance = XY(X, Y)
self.srP = math.sqrt(Y / X)
self.i_a = self.tick(self.srP)
self.L = math.floor(math.sqrt(X * Y))
self.fee = fee
self.i_l = -infinity
self.ticks = {-infinity: Tick(-infinity, infinity, XY()), infinity: Tick(-infinity, infinity, XY())}
self.positions = defaultdict(Position)
self.feeGrowth = XY()
def initialize_tick(self, i, i_l):
"""
Initialize a new tick at index i, provide the index of an initialized tick lower
than i to find it easily in the linked list. Assumes that i is *not* already initialized.
:param i:
:param i_l:
"""
assert (i not in self.ticks)
assert (i_l < i)
i_next = self.ticks[i_l].i_next
if i_next > i:
self.ticks[i_l].i_next = i
# find an instance where i_a = i and we set XY(0, 0) and that's wrong
self.ticks[i] = Tick(i_l, i_next, self.feeGrowth if self.i_a >= i else XY())
self.ticks[i_next].i_prev = i
else:
self.initialize_tick(i, i_next)
def collect_fees(self, user, i_l, i_u):
key = (user, i_l, i_u)
position = self.positions[key]
f_a = self.feeGrowth - self.ticks[i_u].feeGrowthOutside if self.i_a >= i_u else self.ticks[i_u].feeGrowthOutside
f_b = self.ticks[i_l].feeGrowthOutside if self.i_a >= i_l else self.feeGrowth - self.ticks[i_l].feeGrowthOutside
feeGrowthInside = self.feeGrowth - f_a - f_b
fees = (feeGrowthInside - position.feeGrowthInsideLast) * position.L
position.feeGrowthInsideLast = feeGrowthInside
return fees
def set_position(self, user, i_l, i_l_l, i_u, i_u_l, Delta_L):
assert (i_l_l <= i_l)
if i_l not in self.ticks:
self.initialize_tick(i_l, i_l_l)
assert (i_u_l <= i_u)
if i_u not in self.ticks:
self.initialize_tick(i_u, i_u_l)
position_key = (user, i_l, i_u)
fees = self.collect_fees(user, i_l, i_u)
self.positions[position_key].L += Delta_L
assert (self.positions[position_key].L >= 0)
# todo, garbage collect if we are unwinding the position completely?
Delta = XY()
# Add or remove liquidity above the current tick
if self.i_a < i_l:
Delta.x = Delta_L * (1 / self.srp(i_l) - 1 / self.srp(i_u))
Delta.y = 0
# Add or remove liquidity around the current tick
elif i_l <= self.i_a < i_u:
# update interval we are in if need be
if i_l > self.i_l:
self.i_l = i_l
Delta.x = Delta_L * (1 / self.srP - 1 / self.srp(i_u))
Delta.y = Delta_L * (self.srP - self.srp(i_l))
self.L += Delta_L
else: # i_a >= i_u
Delta.x = 0
Delta.y = Delta_L * (self.srp(i_u) - self.srp(i_l))
Delta -= fees
# make a note of how much liquidity is gained or lost when
# entering this interval
self.ticks[i_l].Delta_L += Delta_L
self.ticks[i_u].Delta_L -= Delta_L
self.balance += Delta
return -Delta
def X_to_Y(self, dX, fee=None):
# dX must be positive
assert (dX >= 0)
if fee is None:
fee = self.fee
# If there is no liquidity, stop the trade at this point
if self.L == 0:
self.i_a = self.tick(
self.srP) # we may need to update i_a if we went through several ticks to reach this point
return XY()
# Assume the trade will fit in a tick, what would the fees be like?
fees = XY(dX * fee, 0)
srp_new = 1.0 / (1.0 / self.srP + (dX - fees.x) / self.L)
i_l = self.i_l
tick_new = self.tick(srp_new)
if tick_new >= i_l: # we didn't pushed past the interval
dY = - (dX - fees.x) * self.srP * srp_new
self.srP = srp_new
self.i_a = tick_new
user = XY(-dX, -dY)
self.balance -= user
# Update fee growth with the fees we just collected
self.feeGrowth += fees * (1.0 / self.L)
return user
else:
# compute what we got up til i_u and how much it cost
# well, what delta_X would have taken me there?
self.i_l = self.ticks[self.i_l].i_prev
srP_l = self.srp(i_l)
dY = self.L * (srP_l - self.srP)
dX_ = - dY / (self.srP * srP_l)
tmp = dX_ / (1.0 - fee)
dX_, fees = tmp, XY(tmp - dX_, 0)
# update fee growth
self.feeGrowth += fees * (1.0 / self.L)
# remove the liquidity we used to have
self.L -= self.ticks[i_l].Delta_L
# flip feeGrowth
self.ticks[i_l].feeGrowthOutside = self.feeGrowth - self.ticks[i_l].feeGrowthOutside
self.srP = self.srp(i_l) - 1e-16 # todo can we do better than this crutch?
user = XY(-dX_, -dY)
self.balance -= user
return user + self.X_to_Y(dX - dX_, fee)
def Y_to_X(self, dY, fee=None):
# dY must be positive
assert (dY >= 0)
if fee is None:
fee = self.fee
# If there is no liquidity, stop the trade at this point
if self.L == 0:
self.i_a = self.tick(
self.srP) # we may need to update i_a if we went through several ticks to reach this point
return XY()
# Assume the trade will fit in a tick, what would the fees be like?
fees = XY(0, dY * fee)
srp_new = self.srP + (dY - fees.y) / self.L
i_u = self.ticks[self.i_l].i_next
tick_new = self.tick(srp_new)
if tick_new < i_u: # we did not push past the interval
dX = - (dY - fees.y) / (self.srP * srp_new)
self.srP = srp_new
self.i_a = tick_new
user = XY(-dX, -dY)
self.balance -= user
# Update fee growth with the fees we just collected
self.feeGrowth += fees * (1.0 / self.L)
return user
else:
self.i_l = i_u
srP_u = self.srp(i_u)
dY_ = self.L * (srP_u - self.srP)
dX = - dY_ / (self.srP * srP_u)
tmp = dY_ / (1.0 - fee)
dY_, fees = tmp, XY(0, tmp - dY_)
# update fee growth
self.feeGrowth += fees * (1.0 / self.L)
self.L += self.ticks[i_u].Delta_L
self.ticks[i_u].feeGrowthOutside = self.feeGrowth - self.ticks[i_u].feeGrowthOutside
self.srP = srP_u
user = XY(-dX, -dY_)
self.balance -= user
return user + self.Y_to_X(dY - dY_, fee)
| python/scfmm/__init__.py | 9,012 | A contract in the fashion of Uniswap v3
A LP's position
An initialized tick, marking the beginning or end of a position
A pair of balances in asset X and Y
:type i_prev: int
:type i_next: int
Initialize a new tick at index i, provide the index of an initialized tick lower
than i to find it easily in the linked list. Assumes that i is *not* already initialized.
:param i:
:param i_l:
Computes the square root of the price corresponding to a given tick
:param tick: the index of a tick
:return: the corresponding square root price
Computes the closest tick index below a certain price, given its square root
:param srp: square root of a price
:return: the closest tick below a certain price
SPDX-FileCopyrightText: 2021 Arthur Breitman SPDX-License-Identifier: LicenseRef-MIT-Arthur-Breitman find an instance where i_a = i and we set XY(0, 0) and that's wrong todo, garbage collect if we are unwinding the position completely? Add or remove liquidity above the current tick Add or remove liquidity around the current tick update interval we are in if need be i_a >= i_u make a note of how much liquidity is gained or lost when entering this interval dX must be positive If there is no liquidity, stop the trade at this point we may need to update i_a if we went through several ticks to reach this point Assume the trade will fit in a tick, what would the fees be like? we didn't pushed past the interval Update fee growth with the fees we just collected compute what we got up til i_u and how much it cost well, what delta_X would have taken me there? update fee growth remove the liquidity we used to have flip feeGrowth todo can we do better than this crutch? dY must be positive If there is no liquidity, stop the trade at this point we may need to update i_a if we went through several ticks to reach this point Assume the trade will fit in a tick, what would the fees be like? we did not push past the interval Update fee growth with the fees we just collected update fee growth | 1,986 | en | 0.873074 |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import unittest
from functools import reduce
import numpy, scipy
from pyscf import lib
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
from pyscf import fci
b = 1.4
mol = gto.Mole()
mol.build(
verbose = 7,
output = '/dev/null',
atom = [
['N',( 0.000000, 0.000000, -b/2)],
['N',( 0.000000, 0.000000, b/2)], ],
basis = {'N': 'ccpvdz', },
symmetry = 1
)
mfr = scf.RHF(mol)
mfr.scf()
mcr = mcscf.CASSCF(mfr, 4, 4)
mcr.conv_tol_grad = 1e-6
mcr.mc1step()[0]
mfu = scf.UHF(mol)
mfu.scf()
mcu = mcscf.UCASSCF(mfu, 4, 4)
mcu.conv_tol_grad = 1e-6
mcu.mc1step()[0]
mol_prg = gto.M(
verbose = 0,
atom = [
['N',( 0.000000, 0.000000, -(b+0.1)/2)],
['N',( 0.000000, 0.000000, (b+0.1)/2)], ],
basis = 'ccpvdz',
symmetry=1)
mfr_prg = scf.RHF(mol_prg).set (max_cycle=1).run()
mcr_prg = mcscf.CASSCF(mfr_prg, 4, 4).set (max_cycle_macro=1).run()
mfu_prg = scf.UHF(mol_prg).set (max_cycle=1).run()
mcu_prg = mcscf.UCASSCF(mfu_prg, 4, 4).set (max_cycle_macro=1).run()
mol_prb = mol.copy ()
mol_prb.basis = {'N': 'aug-cc-pvdz' }
mol_prb.build ()
mfr_prb = scf.RHF(mol_prb).set (max_cycle=1).run()
mcr_prb = mcscf.CASSCF(mfr_prb, 4, 4).set (max_cycle_macro=1).run()
def tearDownModule():
global mol, mfr, mcr, mfu, mcu
mol.stdout.close()
del mol, mfr, mcr, mfu, mcu
class KnownValues(unittest.TestCase):
def test_spin_square(self):
ss = mcscf.addons.spin_square(mcr)[0]
self.assertAlmostEqual(ss, 0, 7)
def test_ucasscf_spin_square(self):
ss = mcscf.addons.spin_square(mcu)[0]
self.assertAlmostEqual(ss, 0, 7)
def test_rcas_natorb(self):
mo1, ci1, mocc1 = mcscf.addons.cas_natorb(mcr)
self.assertAlmostEqual(numpy.linalg.norm(mo1) , 9.9260608594977491, 6)
self.assertAlmostEqual(numpy.linalg.norm(mocc1), 5.1687145190800079, 6)
#TODO: def test_ucas_natorb(self):
#TODO: mo2, ci2, mocc2 = mcscf.addons.cas_natorb(mcu)
#TODO: self.assertAlmostEqual(numpy.linalg.norm(mo2) , 11.4470460817871*numpy.sqrt(2), 7)
#TODO: self.assertAlmostEqual(numpy.linalg.norm(mocc2), 2.59144951056707/numpy.sqrt(2), 7)
def test_get_fock(self):
f1 = mcscf.addons.get_fock(mcr)
self.assertTrue(numpy.allclose(f1, f1.T))
self.assertAlmostEqual(numpy.linalg.norm(f1), 25.482177562349467, 6)
#TODO: f1 = mcscf.addons.get_fock(mcu)
#TODO: self.assertTrue(numpy.allclose(f1[0], f1[0].T))
#TODO: self.assertTrue(numpy.allclose(f1[1], f1[1].T))
#TODO: self.assertAlmostEqual(numpy.linalg.norm(f1), 23.597476504476919*numpy.sqrt(2), 6)
def test_canonicalize1(self):
numpy.random.seed(1)
f1 = numpy.random.random(mcr.mo_coeff.shape)
u1 = numpy.linalg.svd(f1)[0]
mo1 = numpy.dot(mcr.mo_coeff, u1)
mo1 = lib.tag_array(mo1, orbsym=mcr.mo_coeff.orbsym)
mo, ci, mo_e = mcr.canonicalize(mo1)
e1 = numpy.einsum('ji,jk,ki', mo, f1, mo)
self.assertAlmostEqual(e1, 44.2658681077, 7)
self.assertAlmostEqual(lib.fp(mo_e), 5.1364166175063097, 7)
mo, ci, mo_e = mcr.canonicalize(mo1, eris=mcr.ao2mo(mcr.mo_coeff))
e1 = numpy.einsum('ji,jk,ki', mo, f1, mo)
self.assertAlmostEqual(e1, 44.2658681077, 7)
self.assertAlmostEqual(lib.fp(mo_e), 4.1206025804989173, 7)
mcr1 = copy.copy(mcr)
mcr1.frozen = 2
mo, ci, mo_e = mcr1.canonicalize(mo1)
self.assertAlmostEqual(lib.fp(mo_e), 6.6030999409178577, 7)
mcr1.frozen = [0,1]
mo, ci, mo_e = mcr1.canonicalize(mo1)
self.assertAlmostEqual(lib.fp(mo_e), 6.6030999409178577, 7)
mcr1.frozen = [1,12]
mo, ci, mo_e = mcr1.canonicalize(mo1)
self.assertAlmostEqual(lib.fp(mo_e), 5.2182584355788162, 7)
def test_canonicalize(self):
mo, ci, mo_e = mcr.canonicalize()
self.assertAlmostEqual(numpy.linalg.norm(mo), 9.9260608594977242, 7)
mo, ci, mo_e = mcr.canonicalize(eris=mcr.ao2mo(mcr.mo_coeff))
self.assertAlmostEqual(numpy.linalg.norm(mo), 9.9260608594977242, 7)
def test_make_rdm12(self):
dmr = mcscf.addons.make_rdm1(mcr)
dm1, dm2 = mcscf.addons.make_rdm12(mcr)
self.assertTrue(numpy.allclose(dmr, dm1))
self.assertAlmostEqual(numpy.linalg.norm(dm1), 3.8205551262007567, 6)
self.assertAlmostEqual(numpy.linalg.norm(dm2), 14.987267883423314, 5)
def test_make_rdm1s(self):
dm1 = mcscf.addons.make_rdm1s(mcr)
self.assertAlmostEqual(numpy.linalg.norm(dm1), 2.7015404376335805, 5)
dm1 = mcscf.addons.make_rdm1s(mcu)
self.assertAlmostEqual(numpy.linalg.norm(dm1), 2.7015404376335805, 5)
def test_sort_mo(self):
mo1 = numpy.arange(mfr.mo_energy.size).reshape(1,-1)
ref = [[0, 1, 2, 3, 7, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27]]
mo2 = mcscf.addons.sort_mo(mcr, mo1, [5,6,7,9])
self.assertTrue(numpy.allclose(mo2, ref))
mo2 = mcscf.addons.sort_mo(mcu, (mo1,mo1), [5,6,7,9])
self.assertTrue(numpy.allclose(mo2, (ref,ref)))
mo2 = mcscf.addons.sort_mo(mcu, (mo1,mo1), [[5,6,7,9],[5,6,8,9]])
ref1 = [[0, 1, 2, 3, 6, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27]]
self.assertTrue(numpy.allclose(mo2, (ref,ref1)))
def test_sort_mo_by_irrep(self):
mc1 = mcscf.CASSCF(mfr, 8, 4)
mo0 = mcscf.sort_mo_by_irrep(mc1, mfr.mo_coeff, {'E1ux':2, 'E1uy':2, 'E1gx':2, 'E1gy':2})
mo1 = mcscf.sort_mo_by_irrep(mc1, mfr.mo_coeff, {2:2, 3:2, 6:2, 7:2}, {2:0, 3:0, 6:0, 7:0})
mo2 = mcscf.sort_mo_by_irrep(mc1, mfr.mo_coeff, (0,0,2,2,0,0,2,2))
mo3 = mcscf.sort_mo_by_irrep(mc1, mfr.mo_coeff, {'E1ux':2, 'E1uy':2, 2:2, 3:2})
self.assertTrue(numpy.allclose(mo0, mo1))
self.assertTrue(numpy.allclose(mo0, mo2))
self.assertTrue(numpy.allclose(mo0, mo3))
def test_sort_mo_by_irrep1(self):
mol = gto.M(atom='N 0 0 -.45; N 0 0 .45', basis='ccpvdz',
symmetry=True, verbose=0)
mf = scf.RHF(mol).run()
mc1 = mcscf.CASSCF(mf, 6, 6)
caslst = mcscf.addons.caslst_by_irrep(mc1, mf.mo_coeff,
{'A1g': 1, 'A1u': 1, 'E1uy': 1, 'E1ux': 1, 'E1gy': 1, 'E1gx': 1},
{'A1g': 2, 'A1u': 2})
self.assertEqual(list(caslst), [4,5,7,8,9,10])
caslst = mcscf.addons.caslst_by_irrep(mc1, mf.mo_coeff,
{'E1uy': 1, 'E1ux': 1, 'E1gy': 1, 'E1gx': 1},
{'A1g': 2, 'A1u': 2})
self.assertEqual(list(caslst), [4,5,7,8,9,10])
caslst = mcscf.addons.caslst_by_irrep(mc1, mf.mo_coeff,
{'E1uy': 1, 'E1ux': 1, 'E1gy': 1, 'E1gx': 1},
{'A1u': 2})
self.assertEqual(list(caslst), [4,5,7,8,9,10])
caslst = mcscf.addons.caslst_by_irrep(mc1, mf.mo_coeff,
{'A1g': 1, 'A1u': 1}, {'E1uy': 1, 'E1ux': 1})
self.assertEqual(list(caslst), [3,6,8,9,12,13])
self.assertRaises(ValueError, mcscf.addons.caslst_by_irrep, mc1, mf.mo_coeff,
{'A1g': 1, 'A1u': 1}, {'E1uy': 3, 'E1ux': 3})
self.assertRaises(ValueError, mcscf.addons.caslst_by_irrep, mc1, mf.mo_coeff,
{'A1g': 3, 'A1u': 4}, {'E1uy': 1, 'E1ux': 1})
self.assertRaises(ValueError, mcscf.addons.caslst_by_irrep, mc1, mf.mo_coeff,
{'E2ux': 2, 'E2uy': 2}, {'E1uy': 1, 'E1ux': 1})
def test_state_average(self):
mc = mcscf.CASSCF(mfr, 4, 4)
mc.fcisolver = fci.solver(mol, singlet=False)
mc.state_average_((.64,.36))
e = mc.kernel()
e = mc.e_states
self.assertAlmostEqual(mc.e_tot, -108.83342083775061, 7)
self.assertAlmostEqual(mc.e_average, -108.83342083775061, 7)
self.assertAlmostEqual(e[0]*.64+e[1]*.36, -108.83342083775061, 7)
dm1 = mc.analyze()
self.assertAlmostEqual(lib.fp(dm1[0]), 0.52396929381500434, 4)
self.assertRaises(TypeError, mc.state_average_, (.64,.36))
def test_state_average_fci_dmrg(self):
fcisolver1 = fci.direct_spin1_symm.FCISolver(mol)
class FCI_as_DMRG(fci.direct_spin1_symm.FCISolver):
def __getattribute__(self, attr):
"""Prevent 'private' attribute access"""
if attr in ('make_rdm1s', 'spin_square', 'contract_2e',
'absorb_h1e'):
raise AttributeError
else:
return object.__getattribute__(self, attr)
def kernel(self, *args, **kwargs):
return fcisolver1.kernel(*args, **kwargs)
def approx_kernel(self, *args, **kwargs):
return fcisolver1.kernel(*args, **kwargs)
@property
def orbsym(self):
return fcisolver1.orbsym
@orbsym.setter
def orbsym(self, x):
fcisolver1.orbsym = x
spin_square = None
large_ci = None
transform_ci_for_orbital_rotation = None
mc = mcscf.CASSCF(mfr, 4, 4)
mc.fcisolver = FCI_as_DMRG(mol)
mc.fcisolver.nroots = fcisolver1.nroots = 2
mc.state_average_((.64,.36))
mc.kernel()
e = mc.e_states
self.assertAlmostEqual(e[0]*.64+e[1]*.36, -108.83342083775061, 7)
dm1 = mc.analyze()
self.assertAlmostEqual(lib.fp(dm1[0]), 0.52396929381500434*2, 4)
def test_state_average_mix(self):
solver1 = fci.FCI(mol)
solver1.spin = 0
solver1.nroots = 2
solver2 = fci.FCI(mol, singlet=False)
solver2.spin = 2
mc = mcscf.CASSCF(mfr, 4, 4)
mc = mcscf.addons.state_average_mix_(mc, [solver1, solver2],
(0.25,0.25,0.5))
mc.kernel()
e = mc.e_states
self.assertAlmostEqual(mc.e_tot, -108.80340952016508, 7)
self.assertAlmostEqual(mc.e_average, -108.80340952016508, 7)
self.assertAlmostEqual(numpy.dot(e,[.25,.25,.5]), -108.80340952016508, 7)
dm1 = mc.analyze()
self.assertAlmostEqual(lib.fp(dm1[0]), 0.52172669549357464, 4)
self.assertAlmostEqual(lib.fp(dm1[1]), 0.53366776017869022, 4)
self.assertAlmostEqual(lib.fp(dm1[0]+dm1[1]), 1.0553944556722636, 4)
mc.cas_natorb()
def test_state_average_mix_fci_dmrg(self):
fcisolver1 = fci.direct_spin0_symm.FCISolver(mol)
class FCI_as_DMRG(fci.direct_spin0_symm.FCISolver):
def __getattribute__(self, attr):
"""Prevent 'private' attribute access"""
if attr in ('make_rdm1s', 'spin_square', 'contract_2e',
'absorb_h1e'):
raise AttributeError
else:
return object.__getattribute__(self, attr)
def kernel(self, *args, **kwargs):
return fcisolver1.kernel(*args, **kwargs)
def approx_kernel(self, *args, **kwargs):
return fcisolver1.kernel(*args, **kwargs)
@property
def orbsym(self):
return fcisolver1.orbsym
@orbsym.setter
def orbsym(self, x):
fcisolver1.orbsym = x
spin_square = None
large_ci = None
transform_ci_for_orbital_rotation = None
solver1 = FCI_as_DMRG(mol)
solver1.spin = fcisolver1.spin = 0
solver1.nroots = fcisolver1.nroots = 2
solver2 = fci.FCI(mol, singlet=False)
solver2.spin = 2
mc = mcscf.CASSCF(mfr, 4, 4)
mc = mcscf.addons.state_average_mix_(mc, [solver1, solver2],
(0.25,0.25,0.5))
mc.kernel()
e = mc.e_states
self.assertAlmostEqual(numpy.dot(e, [.25,.25,.5]), -108.80340952016508, 7)
dm1 = mc.analyze()
self.assertAlmostEqual(lib.fp(dm1[0]), 1.0553944556722636, 4)
self.assertEqual(dm1[1], None)
mc.cas_natorb()
def test_state_specific(self):
mc = mcscf.CASSCF(mfr, 4, 4)
mc.fcisolver = fci.solver(mol, singlet=False)
mc.state_specific_(state=1)
e = mc.kernel()[0]
self.assertAlmostEqual(e, -108.70065770892457, 7)
dm1 = mc.analyze()
self.assertAlmostEqual(lib.fp(dm1[0]), 0.54605283139098515, 4)
mc = mcscf.CASSCF(mfr, 4, 4)
mc.state_specific_(state=0)
e = mc.kernel()[0]
self.assertAlmostEqual(mc.e_tot, mcr.e_tot, 7)
dm1 = mc.analyze()
dmref = mcr.analyze()
self.assertAlmostEqual(float(abs(dm1[0]-dmref[0]).max()), 0, 4)
def test_project_init_guess_geom (self):
mfr_mo_norm = numpy.einsum ('ip,ip->p', mfr.mo_coeff.conj (),
mfr_prg.get_ovlp ().dot (mfr.mo_coeff))
mfr_mo_norm = mfr.mo_coeff / numpy.sqrt (mfr_mo_norm)[None,:]
mo1 = mcscf.addons.project_init_guess (mcr_prg, mfr.mo_coeff)
s1 = reduce(numpy.dot, (mo1.T, mfr_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
def test_project_init_guess_basis (self):
mo1 = mcscf.addons.project_init_guess (mcr_prb, mfr.mo_coeff, prev_mol=mfr.mol)
s1 = reduce(numpy.dot, (mo1.T, mfr_prb.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 6.782329983125268, 9)
def test_project_init_guess_uhf (self):
mo1_u = mcscf.addons.project_init_guess (mcu_prg, mfu.mo_coeff)
for mo1 in mo1_u:
s1 = reduce(numpy.dot, (mo1.T, mfu_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
def test_project_init_guess_activefirst (self):
with lib.temporary_env (mcr_prg, ncas=6, ncore=3):
mo1 = mcscf.addons.project_init_guess (mcr_prg, mfr.mo_coeff, priority='active')
s1 = reduce(numpy.dot, (mo1.T, mfr_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
mfr_mo_norm = numpy.einsum ('ip,ip->p', mfr.mo_coeff.conj (),
mfr_prg.get_ovlp ().dot (mfr.mo_coeff))
mfr_mo_norm = mfr.mo_coeff / numpy.sqrt (mfr_mo_norm)[None,:]
s2 = [reduce (numpy.dot, (mfr_prg.get_ovlp (), mo1[:,i], mfr_mo_norm[:,i]))
for i in (1,3)] # core, active (same irrep)
self.assertAlmostEqual (s2[1], 1.0, 9)
self.assertFalse (s2[0] > s2[1])
def test_project_init_guess_corefirst (self):
with lib.temporary_env (mcr_prg, ncas=6, ncore=3):
mo1 = mcscf.addons.project_init_guess (mcr_prg, mfr.mo_coeff, priority='core')
s1 = reduce(numpy.dot, (mo1.T, mfr_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
mfr_mo_norm = numpy.einsum ('ip,ip->p', mfr.mo_coeff.conj (),
mfr_prg.get_ovlp ().dot (mfr.mo_coeff))
mfr_mo_norm = mfr.mo_coeff / numpy.sqrt (mfr_mo_norm)[None,:]
s1 = [reduce (numpy.dot, (mfr_prg.get_ovlp (), mo1[:,i], mfr_mo_norm[:,i]))
for i in (1,3)] # core, active (same irrep)
self.assertAlmostEqual (s1[0], 1.0, 9)
self.assertTrue (s1[0] > s1[1])
def test_project_init_guess_gramschmidt (self):
gram_schmidt_idx = numpy.arange (27, dtype=numpy.integer)[:,None].tolist ()
mo1 = mcscf.addons.project_init_guess (mcr_prg, mfr.mo_coeff, priority=gram_schmidt_idx)
s1 = reduce(numpy.dot, (mo1.T, mfr_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
mf2moi = reduce (numpy.dot, (mfr_prg.mo_coeff.conj ().T, mfr_prg.get_ovlp (), mfr.mo_coeff))
Q, R = scipy.linalg.qr (mf2moi) # Arbitrary sign, so abs below
mo2 = numpy.dot (mfr_prg.mo_coeff, Q)
s2 = numpy.abs (reduce (numpy.dot, (mo1.conj ().T, mfr_prg.get_ovlp (), mo2)))
self.assertAlmostEqual(numpy.linalg.norm(s2), 5.2915026221291841, 9)
def test_project_init_guess_prioritylists (self):
pr = [[[27],[5,3],[6,12]],[[5],[17],[13,10,8,6]]]
mo1_u = mcscf.addons.project_init_guess (mcu_prg, mfu.mo_coeff, priority=pr)
s0 = mfu_prg.get_ovlp ()
for ix, mo1 in enumerate (mo1_u):
s1 = reduce(numpy.dot, (mo1.T, s0, mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
mfu_mo = mfu.mo_coeff[ix]
mfu_mo_norm = numpy.einsum ('ip,ip->p', mfu_mo.conj (), s0.dot (mfu_mo))
mfu_mo_norm = mfu.mo_coeff[ix] / numpy.sqrt (mfu_mo_norm)[None,:]
p = pr[ix][0][0]
s2 = reduce (numpy.dot, (mfu_prg.get_ovlp (), mo1[:,p], mfu_mo_norm[:,p]))
self.assertAlmostEqual (s2, 1.0, 9)
def test_project_init_guess_usehfcore (self):
mo1 = mcscf.addons.project_init_guess (mcr_prg, mfr.mo_coeff, use_hf_core=True)
s1 = reduce(numpy.dot, (mo1.T, mfr_prg.get_ovlp(), mo1))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s1)[0]>1e-10),
s1.shape[0])
self.assertAlmostEqual(numpy.linalg.norm(s1), 5.2915026221291841, 9)
s2 = reduce (numpy.dot, (mo1[:,:5].T, mfr_prg.get_ovlp (), mfr_prg.mo_coeff[:,:5]))
self.assertEqual(numpy.count_nonzero(numpy.linalg.eigh(s2)[0]>1e-10),
s2.shape[0])
self.assertAlmostEqual (numpy.linalg.norm (s2), 2.23606797749979, 9)
def test_state_average_bad_init_guess(self):
mc = mcscf.CASCI(mfr, 4, 4)
mc.run()
mc.state_average_([.8, .2])
mscan = mc.as_scanner()
e = mscan(mol)
self.assertAlmostEqual(e, -108.84390277715984, 9)
if __name__ == "__main__":
print("Full Tests for mcscf.addons")
unittest.main()
| pyscf/mcscf/test/test_addons.py | 19,318 | Prevent 'private' attribute access
Prevent 'private' attribute access
!/usr/bin/env python Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.TODO: def test_ucas_natorb(self):TODO: mo2, ci2, mocc2 = mcscf.addons.cas_natorb(mcu)TODO: self.assertAlmostEqual(numpy.linalg.norm(mo2) , 11.4470460817871*numpy.sqrt(2), 7)TODO: self.assertAlmostEqual(numpy.linalg.norm(mocc2), 2.59144951056707/numpy.sqrt(2), 7)TODO: f1 = mcscf.addons.get_fock(mcu)TODO: self.assertTrue(numpy.allclose(f1[0], f1[0].T))TODO: self.assertTrue(numpy.allclose(f1[1], f1[1].T))TODO: self.assertAlmostEqual(numpy.linalg.norm(f1), 23.597476504476919*numpy.sqrt(2), 6) core, active (same irrep) core, active (same irrep) Arbitrary sign, so abs below | 1,303 | en | 0.488475 |
# Copyright (c) 2016-2017, Jani Nikula <jani@nikula.org>
# Licensed under the terms of BSD 2-Clause, see LICENSE for details.
"""
Hawkmoth
========
Sphinx C Domain autodoc directive extension.
"""
import glob
import os
import re
import stat
import subprocess
import sys
from docutils import nodes, statemachine
from docutils.parsers.rst import directives, Directive
from docutils.statemachine import ViewList
from sphinx.ext.autodoc import AutodocReporter
from sphinx.util.nodes import nested_parse_with_titles
from sphinx.util.docutils import switch_source_input
from hawkmoth.parser import parse
with open(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'VERSION')) as version_file:
__version__ = version_file.read().strip()
class CAutoDocDirective(Directive):
"""Extract all documentation comments from the specified file"""
required_argument = 1
optional_arguments = 1
# Allow passing a variable number of file patterns as arguments
final_argument_whitespace = True
option_spec = {
'compat': directives.unchanged_required,
'clang': directives.unchanged_required,
}
has_content = False
def __parse(self, viewlist, filename):
env = self.state.document.settings.env
compat = self.options.get('compat', env.config.cautodoc_compat)
clang = self.options.get('clang', env.config.cautodoc_clang)
comments = parse(filename, compat=compat, clang=clang)
for (comment, meta) in comments:
lineoffset = meta['line'] - 1
lines = statemachine.string2lines(comment, 8,
convert_whitespace=True)
for line in lines:
viewlist.append(line, filename, lineoffset)
lineoffset += 1
def run(self):
env = self.state.document.settings.env
result = ViewList()
for pattern in self.arguments[0].split():
filenames = glob.glob(env.config.cautodoc_root + '/' + pattern)
if len(filenames) == 0:
fmt = 'Pattern "{pat}" does not match any files.'
env.app.warn(fmt.format(pat=pattern),
location=(env.docname, self.lineno))
continue
for filename in filenames:
mode = os.stat(filename).st_mode
if stat.S_ISDIR(mode):
fmt = 'Path "{name}" matching pattern "{pat}" is a directory.'
env.app.warn(fmt.format(name=filename, pat=pattern),
location=(env.docname, self.lineno))
continue
# Tell Sphinx about the dependency and parse the file
env.note_dependency(os.path.abspath(filename))
self.__parse(result, filename)
# Parse the extracted reST
with switch_source_input(self.state, result):
node = nodes.section()
nested_parse_with_titles(self.state, result, node)
return node.children
def setup(app):
app.require_sphinx('1.8')
app.add_config_value('cautodoc_root', app.confdir, 'env')
app.add_config_value('cautodoc_compat', None, 'env')
app.add_config_value('cautodoc_clang', None, 'env')
app.add_directive_to_domain('c', 'autodoc', CAutoDocDirective)
return dict(version = __version__,
parallel_read_safe = True, parallel_write_safe = True)
| hawkmoth/__init__.py | 3,466 | Extract all documentation comments from the specified file
Hawkmoth
========
Sphinx C Domain autodoc directive extension.
Copyright (c) 2016-2017, Jani Nikula <jani@nikula.org> Licensed under the terms of BSD 2-Clause, see LICENSE for details. Allow passing a variable number of file patterns as arguments Tell Sphinx about the dependency and parse the file Parse the extracted reST | 385 | en | 0.765612 |
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from .source import SourceOrb
__all__ = ["SourceOrb"]
| airbyte-integrations/connectors/source-orb/source_orb/__init__.py | 118 | Copyright (c) 2021 Airbyte, Inc., all rights reserved. | 54 | en | 0.809036 |
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Autogenerated by tools/codegen/core/gen_stats_data.py
import massage_qps_stats_helpers
def massage_qps_stats(scenario_result):
for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:
if "coreStats" not in stats: return
core_stats = stats["coreStats"]
del stats["coreStats"]
stats["core_client_calls_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_calls_created")
stats["core_server_calls_created"] = massage_qps_stats_helpers.counter(
core_stats, "server_calls_created")
stats["core_cqs_created"] = massage_qps_stats_helpers.counter(
core_stats, "cqs_created")
stats[
"core_client_channels_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_channels_created")
stats[
"core_client_subchannels_created"] = massage_qps_stats_helpers.counter(
core_stats, "client_subchannels_created")
stats[
"core_server_channels_created"] = massage_qps_stats_helpers.counter(
core_stats, "server_channels_created")
stats["core_syscall_poll"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_poll")
stats["core_syscall_wait"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_wait")
stats["core_pollset_kick"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick")
stats[
"core_pollset_kicked_without_poller"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kicked_without_poller")
stats["core_pollset_kicked_again"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kicked_again")
stats[
"core_pollset_kick_wakeup_fd"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_wakeup_fd")
stats[
"core_pollset_kick_wakeup_cv"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_wakeup_cv")
stats[
"core_pollset_kick_own_thread"] = massage_qps_stats_helpers.counter(
core_stats, "pollset_kick_own_thread")
stats[
"core_histogram_slow_lookups"] = massage_qps_stats_helpers.counter(
core_stats, "histogram_slow_lookups")
stats["core_syscall_write"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_write")
stats["core_syscall_read"] = massage_qps_stats_helpers.counter(
core_stats, "syscall_read")
stats[
"core_tcp_backup_pollers_created"] = massage_qps_stats_helpers.counter(
core_stats, "tcp_backup_pollers_created")
stats[
"core_tcp_backup_poller_polls"] = massage_qps_stats_helpers.counter(
core_stats, "tcp_backup_poller_polls")
stats["core_http2_op_batches"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_batches")
stats["core_http2_op_cancel"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_cancel")
stats[
"core_http2_op_send_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_initial_metadata")
stats["core_http2_op_send_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_message")
stats[
"core_http2_op_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_send_trailing_metadata")
stats[
"core_http2_op_recv_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_initial_metadata")
stats["core_http2_op_recv_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_message")
stats[
"core_http2_op_recv_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_op_recv_trailing_metadata")
stats["core_http2_settings_writes"] = massage_qps_stats_helpers.counter(
core_stats, "http2_settings_writes")
stats["core_http2_pings_sent"] = massage_qps_stats_helpers.counter(
core_stats, "http2_pings_sent")
stats["core_http2_writes_begun"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_begun")
stats[
"core_http2_writes_offloaded"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_offloaded")
stats[
"core_http2_writes_continued"] = massage_qps_stats_helpers.counter(
core_stats, "http2_writes_continued")
stats["core_http2_partial_writes"] = massage_qps_stats_helpers.counter(
core_stats, "http2_partial_writes")
stats[
"core_http2_initiate_write_due_to_initial_write"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_initial_write")
stats[
"core_http2_initiate_write_due_to_start_new_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_start_new_stream")
stats[
"core_http2_initiate_write_due_to_send_message"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_send_message")
stats[
"core_http2_initiate_write_due_to_send_initial_metadata"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_send_initial_metadata")
stats[
"core_http2_initiate_write_due_to_send_trailing_metadata"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_send_trailing_metadata")
stats[
"core_http2_initiate_write_due_to_retry_send_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_retry_send_ping")
stats[
"core_http2_initiate_write_due_to_continue_pings"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_continue_pings")
stats[
"core_http2_initiate_write_due_to_goaway_sent"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_goaway_sent")
stats[
"core_http2_initiate_write_due_to_rst_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_rst_stream")
stats[
"core_http2_initiate_write_due_to_close_from_api"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_close_from_api")
stats[
"core_http2_initiate_write_due_to_stream_flow_control"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_stream_flow_control")
stats[
"core_http2_initiate_write_due_to_transport_flow_control"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_transport_flow_control")
stats[
"core_http2_initiate_write_due_to_send_settings"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_send_settings")
stats[
"core_http2_initiate_write_due_to_bdp_estimator_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_bdp_estimator_ping")
stats[
"core_http2_initiate_write_due_to_flow_control_unstalled_by_setting"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_flow_control_unstalled_by_setting")
stats[
"core_http2_initiate_write_due_to_flow_control_unstalled_by_update"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_flow_control_unstalled_by_update")
stats[
"core_http2_initiate_write_due_to_application_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_application_ping")
stats[
"core_http2_initiate_write_due_to_keepalive_ping"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_keepalive_ping")
stats[
"core_http2_initiate_write_due_to_transport_flow_control_unstalled"] = massage_qps_stats_helpers.counter(
core_stats,
"http2_initiate_write_due_to_transport_flow_control_unstalled")
stats[
"core_http2_initiate_write_due_to_ping_response"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_ping_response")
stats[
"core_http2_initiate_write_due_to_force_rst_stream"] = massage_qps_stats_helpers.counter(
core_stats, "http2_initiate_write_due_to_force_rst_stream")
stats[
"core_http2_spurious_writes_begun"] = massage_qps_stats_helpers.counter(
core_stats, "http2_spurious_writes_begun")
stats["core_hpack_recv_indexed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_indexed")
stats[
"core_hpack_recv_lithdr_incidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_incidx")
stats[
"core_hpack_recv_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_incidx_v")
stats[
"core_hpack_recv_lithdr_notidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_notidx")
stats[
"core_hpack_recv_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_notidx_v")
stats[
"core_hpack_recv_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_nvridx")
stats[
"core_hpack_recv_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_lithdr_nvridx_v")
stats[
"core_hpack_recv_uncompressed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_uncompressed")
stats["core_hpack_recv_huffman"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_huffman")
stats["core_hpack_recv_binary"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_binary")
stats[
"core_hpack_recv_binary_base64"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_recv_binary_base64")
stats["core_hpack_send_indexed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_indexed")
stats[
"core_hpack_send_lithdr_incidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_incidx")
stats[
"core_hpack_send_lithdr_incidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_incidx_v")
stats[
"core_hpack_send_lithdr_notidx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_notidx")
stats[
"core_hpack_send_lithdr_notidx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_notidx_v")
stats[
"core_hpack_send_lithdr_nvridx"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_nvridx")
stats[
"core_hpack_send_lithdr_nvridx_v"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_lithdr_nvridx_v")
stats[
"core_hpack_send_uncompressed"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_uncompressed")
stats["core_hpack_send_huffman"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_huffman")
stats["core_hpack_send_binary"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_binary")
stats[
"core_hpack_send_binary_base64"] = massage_qps_stats_helpers.counter(
core_stats, "hpack_send_binary_base64")
stats[
"core_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_initiated")
stats[
"core_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_scheduled_items")
stats[
"core_combiner_locks_scheduled_final_items"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_scheduled_final_items")
stats[
"core_combiner_locks_offloaded"] = massage_qps_stats_helpers.counter(
core_stats, "combiner_locks_offloaded")
stats[
"core_call_combiner_locks_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_locks_initiated")
stats[
"core_call_combiner_locks_scheduled_items"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_locks_scheduled_items")
stats[
"core_call_combiner_set_notify_on_cancel"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_set_notify_on_cancel")
stats[
"core_call_combiner_cancelled"] = massage_qps_stats_helpers.counter(
core_stats, "call_combiner_cancelled")
stats[
"core_executor_scheduled_short_items"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_short_items")
stats[
"core_executor_scheduled_long_items"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_long_items")
stats[
"core_executor_scheduled_to_self"] = massage_qps_stats_helpers.counter(
core_stats, "executor_scheduled_to_self")
stats[
"core_executor_wakeup_initiated"] = massage_qps_stats_helpers.counter(
core_stats, "executor_wakeup_initiated")
stats[
"core_executor_queue_drained"] = massage_qps_stats_helpers.counter(
core_stats, "executor_queue_drained")
stats["core_executor_push_retries"] = massage_qps_stats_helpers.counter(
core_stats, "executor_push_retries")
stats[
"core_server_requested_calls"] = massage_qps_stats_helpers.counter(
core_stats, "server_requested_calls")
stats[
"core_server_slowpath_requests_queued"] = massage_qps_stats_helpers.counter(
core_stats, "server_slowpath_requests_queued")
stats[
"core_cq_ev_queue_trylock_failures"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_trylock_failures")
stats[
"core_cq_ev_queue_trylock_successes"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_trylock_successes")
stats[
"core_cq_ev_queue_transient_pop_failures"] = massage_qps_stats_helpers.counter(
core_stats, "cq_ev_queue_transient_pop_failures")
h = massage_qps_stats_helpers.histogram(core_stats, "call_initial_size")
stats["core_call_initial_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_call_initial_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_call_initial_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_call_initial_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_call_initial_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"poll_events_returned")
stats["core_poll_events_returned"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_poll_events_returned_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_poll_events_returned_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_poll_events_returned_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_poll_events_returned_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats, "tcp_write_size")
stats["core_tcp_write_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_write_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats["core_tcp_write_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats["core_tcp_write_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats["core_tcp_write_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_write_iov_size")
stats["core_tcp_write_iov_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_write_iov_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_write_iov_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_write_iov_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_write_iov_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_size")
stats["core_tcp_read_size"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_read_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats["core_tcp_read_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats["core_tcp_read_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats["core_tcp_read_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats, "tcp_read_offer")
stats["core_tcp_read_offer"] = ",".join("%f" % x for x in h.buckets)
stats["core_tcp_read_offer_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats["core_tcp_read_offer_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats["core_tcp_read_offer_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats["core_tcp_read_offer_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"tcp_read_offer_iov_size")
stats["core_tcp_read_offer_iov_size"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_tcp_read_offer_iov_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_tcp_read_offer_iov_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_tcp_read_offer_iov_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_tcp_read_offer_iov_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"http2_send_message_size")
stats["core_http2_send_message_size"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_message_size_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_message_size_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_message_size_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_message_size_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_initial_metadata_per_write")
stats["core_http2_send_initial_metadata_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_initial_metadata_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_initial_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"http2_send_message_per_write")
stats["core_http2_send_message_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_message_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_message_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_message_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_message_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(
core_stats, "http2_send_trailing_metadata_per_write")
stats["core_http2_send_trailing_metadata_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_trailing_metadata_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_trailing_metadata_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"http2_send_flowctl_per_write")
stats["core_http2_send_flowctl_per_write"] = ",".join(
"%f" % x for x in h.buckets)
stats["core_http2_send_flowctl_per_write_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_http2_send_flowctl_per_write_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_http2_send_flowctl_per_write_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_http2_send_flowctl_per_write_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
h = massage_qps_stats_helpers.histogram(core_stats,
"server_cqs_checked")
stats["core_server_cqs_checked"] = ",".join("%f" % x for x in h.buckets)
stats["core_server_cqs_checked_bkts"] = ",".join(
"%f" % x for x in h.boundaries)
stats[
"core_server_cqs_checked_50p"] = massage_qps_stats_helpers.percentile(
h.buckets, 50, h.boundaries)
stats[
"core_server_cqs_checked_95p"] = massage_qps_stats_helpers.percentile(
h.buckets, 95, h.boundaries)
stats[
"core_server_cqs_checked_99p"] = massage_qps_stats_helpers.percentile(
h.buckets, 99, h.boundaries)
| tools/run_tests/performance/massage_qps_stats.py | 25,739 | Copyright 2017 gRPC authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Autogenerated by tools/codegen/core/gen_stats_data.py | 604 | en | 0.8341 |
from http import cookies
from io import StringIO
import pytest
def log_entry(entry):
return StringIO(entry)
@pytest.fixture
def cookie_zip_code():
cookie = cookies.SimpleCookie()
cookie.load(rawdata='zip=98101')
return cookie
@pytest.fixture
def cookie_empty():
cookie = cookies.SimpleCookie()
cookie.load(rawdata='')
return cookie
@pytest.fixture
def cloudfront_entry():
return log_entry('''2014-05-23 01:13:11 FRA2 182 192.0.2.10 GET d111111abcdef8.cloudfront.net /view/my/file.html 200 www.displaymyfiles.com Mozilla/4.0%20(compatible;%20MSIE%205.0b1;%20Mac_PowerPC) - zip=98101 RefreshHit MRVMF7KydIvxMWfJIglgwHQwZsbG2IhRJ07sn9AkKUFSHS9EXAMPLE== d111111abcdef8.cloudfront.net http - 0.001 - - - RefreshHit HTTP/1.1''') # noqa: E501
@pytest.fixture
def cloudfront_entry_broken_cookie():
return log_entry('''2014-05-23 01:13:11 FRA2 182 192.0.2.10 GET d111111abcdef8.cloudfront.net /view/my/file.html 200 www.displaymyfiles.com Mozilla/4.0%20(compatible;%20MSIE%205.0b1;%20Mac_PowerPC) - zip 98101 RefreshHit MRVMF7KydIvxMWfJIglgwHQwZsbG2IhRJ07sn9AkKUFSHS9EXAMPLE== d111111abcdef8.cloudfront.net http - 0.001 - - - RefreshHit HTTP/1.1''') # noqa: E501
@pytest.fixture
def cloudfront_entry2():
return log_entry('''2014-05-23 01:13:12 LAX1 2390282 192.0.2.202 GET d111111abcdef8.cloudfront.net /soundtrack/happy.mp3 304 www.unknownsingers.com Mozilla/4.0%20(compatible;%20MSIE%207.0;%20Windows%20NT%205.1) a=b&c=d zip=98101 Hit xGN7KWpVEmB9Dp7ctcVFQC4E-nrcOcEKS3QyAez--06dV7TEXAMPLE== d111111abcdef8.cloudfront.net http - 0.002 - - - Hit HTTP/1.1''') # noqa: E501
@pytest.fixture
def loadbalancer_http_entry():
return log_entry('''http 2018-07-02T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 192.168.131.39:2817 10.0.0.1:80 0.000 0.001 0.000 200 200 34 366 "GET http://www.example.com:80/?a=b&c=d&zip=98101 HTTP/1.1" "curl/7.46.0" - - arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337262-36d228ad5d99923122bbe354" "-" "-" 0 2018-07-02T22:22:48.364000Z "forward" "-" "-"''') # noqa: E501
@pytest.fixture
def loadbalancer_https_entry():
return log_entry('''https 2018-07-02T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 192.168.131.39:2817 10.0.0.1:80 0.086 0.048 0.037 200 200 0 57 "GET https://www.example.com:443/ HTTP/1.1" "curl/7.46.0" ECDHE-RSA-AES128-GCM-SHA256 TLSv1.2 arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337281-1d84f3d73c47ec4e58577259" "www.example.com" "arn:aws:acm:us-east-2:123456789012:certificate/12345678-1234-1234-1234-123456789012" 1 2018-07-02T22:22:48.364000Z "authenticate,forward" "-" "-"''') # noqa: E501
@pytest.fixture
def loadbalancer_http2_entry():
return log_entry('''h2 2018-07-02T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 10.0.1.252:48160 10.0.0.66:9000 0.000 0.002 0.000 200 200 5 257 "GET https://10.0.2.105:773/ HTTP/2.0" "curl/7.46.0" ECDHE-RSA-AES128-GCM-SHA256 TLSv1.2 arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337327-72bd00b0343d75b906739c42" "-" "-" 1 2018-07-02T22:22:48.364000Z "redirect" "https://example.com:80/" "-"''') # noqa: E501
@pytest.fixture
def loadbalancer_websockets_entry():
return log_entry('''ws 2018-07-02T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 10.0.0.140:40914 10.0.1.192:8010 0.001 0.003 0.000 101 101 218 587 "GET http://10.0.0.30:80/ HTTP/1.1" "-" - - arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337364-23a8c76965a2ef7629b185e3" "-" "-" 1 2018-07-02T22:22:48.364000Z "forward" "-" "-"''') # noqa: E501
@pytest.fixture
def loadbalancer_secured_websockets_entry():
return log_entry('''wss 2018-07-02T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 10.0.0.140:44244 10.0.0.171:8010 0.000 0.001 0.000 101 101 218 786 "GET https://10.0.0.30:443/ HTTP/1.1" "-" ECDHE-RSA-AES128-GCM-SHA256 TLSv1.2 arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337364-23a8c76965a2ef7629b185e3" "-" "-" 1 2018-07-02T22:22:48.364000Z "forward" "-" "-"''') # noqa: E501
@pytest.fixture
def loadbalancer_lambda_entry():
return log_entry('''http 2018-11-30T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 192.168.131.39:2817 - 0.000 0.001 0.000 200 200 34 366 "GET http://www.example.com:80/ HTTP/1.1" "curl/7.46.0" - - arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337364-23a8c76965a2ef7629b185e3" "-" "-" 0 2018-11-30T22:22:48.364000Z "forward" "-" "-"''') # noqa: E501
@pytest.fixture
def loadbalancer_lambda_failed_entry():
return log_entry('''http 2018-11-30T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 192.168.131.39:2817 - 0.000 0.001 0.000 502 - 34 366 "GET http://www.example.com:80/ HTTP/1.1" "curl/7.46.0" - - arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337364-23a8c76965a2ef7629b185e3" "-" "-" 0 2018-11-30T22:22:48.364000Z "forward" "-" "LambdaInvalidResponse"''') # noqa: E501
@pytest.fixture
def loadbalancer_cloudfront_forward():
return log_entry('''http 2018-11-30T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 192.168.131.39:2817 - 0.000 0.001 0.000 502 - 34 366 "GET http://www.example.com:80/ HTTP/1.1" "curl/7.46.0" - - arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337364-23a8c76965a2ef7629b185e3" "-" "-" 0 2018-11-30T22:22:48.364000Z "waf,forward" "-" "-"''') # noqa: E501
@pytest.fixture
def loadbalancer_cloudfront_forward_refused():
return log_entry('''http 2018-11-30T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 192.168.131.39:2817 - 0.000 0.001 0.000 502 - 34 366 "GET http://www.example.com:80/ HTTP/1.1" "curl/7.46.0" - - arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337364-23a8c76965a2ef7629b185e3" "api.example.com" "session-reused" 0 2018-11-30T22:22:48.364000Z "waf,forward" "-" "-"''') # noqa: E501
@pytest.fixture
def loadbalancer_cloudfront_forward_h2():
return log_entry('''h2 2018-11-30T22:23:00.186641Z app/my-loadbalancer/50dc6c495c0c9188 192.168.131.39:2817 - 0.000 0.001 0.000 502 - 34 366 "GET http://www.example.com:80/ HTTP/1.1" "curl/7.46.0" - - arn:aws:elasticloadbalancing:us-east-2:123456789012:targetgroup/my-targets/73e2d6bc24d8a067 "Root=1-58337364-23a8c76965a2ef7629b185e3" "api.example.com" "-" 0 2018-11-30T22:22:48.364000Z "waf,forward" "-" "-"''') # noqa: E501
| test/conftest.py | 6,683 | noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 noqa: E501 | 142 | uz | 0.345295 |
#!/home/observer/miniconda2/bin/python
import numpy as N
import sys, os
import logging as L
import subprocess as S
from collections import namedtuple
from sigpyproc.Readers import FilReader as F
sys.path.append("/home/vgupta/Codes/Fake_FRBs/")
from Furby_reader import Furby_reader
class FileNotFound(Exception):
pass
class Observation():
def __init__(self, utc, cfg_file = "/home/vgupta/resources/observations.cfg"):
self.utc = utc
self.cfg_file = cfg_file
self.read_conf()
self.get_results_dir()
self.get_archives_dir()
self.is_failed = self.if_failed()
self.read_info()
self.processed_offline()
self.annotation = self.read_annotation()
def __str__(self):
return self.utc
def __repr__(self):
return self.utc
def read_annotation(self):
afile = os.path.join(self.results_dir, "obs.txt")
if not os.path.exists(afile):
return None
with open(afile, 'r') as f:
return f.read()
def read_conf(self):
if not os.path.exists(self.cfg_file):
raise Exception("Cannot find observation configuration file - {0}".format(self.cfg_file))
#raise FileNotFound("Cannot find observation configuration file - {0}".format(self.cfg_file))
conf_tmp = {}
with open(self.cfg_file) as c:
lines = c.readlines()
for line in lines:
if (line.startswith("#") or line == "" or line == "\n"):
continue
key = line.strip().split()[0].strip()
val = line.strip().split()[1].strip()
val = self.check_type(val)
conf_tmp[key] = val
tmp = namedtuple("CONF", conf_tmp.keys())
self.conf = tmp(*conf_tmp.values())
def get_results_dir(self):
path1 = os.path.join(self.conf.results_dir, self.utc)
path2 = os.path.join(self.conf.old_results_dir, self.utc)
if os.path.isdir(path1):
self.results_dir = self.conf.results_dir
elif os.path.isdir(path2):
self.results_dir = self.conf.old_results_dir
else:
raise IOError("Directory for UTC: {0} does not exist in any of the new or old results. Neither {1} nor {2} exists".format(self.utc, path1, path2))
def get_archives_dir(self):
path1 = os.path.join(self.conf.archives_dir, self.utc)
path2 = os.path.join(self.conf.old_archives_dir, self.utc)
if os.path.isdir(path1):
self.archives_dir = self.conf.archives_dir
elif os.path.isdir(path2):
self.archives_dir = self.conf.old_archives_dir
else:
raise IOError("Directory for UTC: {0} does not exist in any of the new or old archives".format(self.utc))
def processed_offline(self):
self.offline_cand_file = os.path.join(self.archives_dir, self.utc, self.conf.offline_output_dir, self.conf.offline_output_file)
self.processed_offline = os.path.exists(self.offline_cand_file) and not self.is_failed
def read_header(self):
if self.is_failed:
self.header = None
return
self.header_file = os.path.join(self.results_dir, self.utc, "FB", self.conf.header_file)
if not os.path.exists(self.header_file):
raise Exception("Header file({0}) does not exist".format(self.header_file))
with open(self.header_file) as h:
lines = h.readlines()
hdr_tmp = {}
for line in lines:
key = line.split()[0].strip()
val = line.split()[1].strip()
cval = self.check_type(val)
if key.startswith("FURBY"):
cval = str(val)
hdr_tmp[key] = cval
keys = hdr_tmp.keys()
values = hdr_tmp.values()
tmp = namedtuple("HEADER", keys)
self.header = tmp(*values)
self.tres = self.header.TSAMP * 1e-6
return self.header
def read_info(self):
self.obs_info_file = os.path.join(self.results_dir, self.utc, "obs.info")
if not os.path.exists(self.obs_info_file):
raise Exception("obs.info file({0}) does not exist".format(self.obs_info_file))
with open(self.obs_info_file) as h:
lines = h.readlines()
hdr_tmp = {}
for line in lines:
if line.startswith("#") or line == "" or line == "\n":
continue
key = line.split()[0].strip()
val = line.split()[1].strip()
val = self.check_type(val)
hdr_tmp[key] = val
if key=="INT" and self.is_failed:
val = 0
keys = hdr_tmp.keys()
values = hdr_tmp.values()
tmp = namedtuple("INFO", keys)
self.info = tmp(*values)
#Getting Tobs-----------------
filterbank_name = self.utc + ".fil"
filterbank_file = os.path.join(self.archives_dir, self.utc, "FB/BEAM_001/", filterbank_name)
if os.path.exists(filterbank_file):
filt_header = F(filterbank_file).header
self.tobs = filt_header.tobs
if self.info.INT > self.tobs:
self.tobs = self.info.INT
else:
self.tobs = self.info.INT
#-----------------------------
return self.info
def check_type(self, val):
try:
ans=int(val)
return ans
except ValueError:
try:
ans=float(val)
return ans
except ValueError:
if val.lower()=="false":
return False
elif val.lower()=="true":
return True
else:
return val
def if_processing(self):
processing_file = os.path.join(self.results_dir, self.utc, "obs.processing")
return os.path.exists(processing_file)
def if_failed(self):
obs_failed_file = os.path.join(self.results_dir, self.utc, "obs.failed")
return os.path.exists(obs_failed_file)
def read_furby_params(self):
if self.is_failed:
self.inj_furbys = -1
return
if (self.info.MB_ENABLED or self.info.CORR_ENABLED):
self.inj_furbys = -1
else:
self.read_header()
try:
self.inj_furbys = self.header.INJECTED_FURBYS
except AttributeError as e:
#log.warn("Could not find INJECTED_FURBYS in the header file for UTC: {0}".format(self.utc))
#log.warn("Assuming no furby injection happened in this observation ({0})".format(self.utc))
self.inj_furbys = 0
else:
if self.inj_furbys > 0:
self.furby_beams = self.header.FURBY_BEAMS.strip(",")
self.furby_ids = self.header.FURBY_IDS.strip(",")
self.furby_tstamps = self.header.FURBY_TSTAMPS.strip(",")
#log.debug("Found: injected_furbys: {0}, furby_ids: {1}, furby_beams: {2}, furby_tstamps: {3}".format(self.inj_furbys, self.furby_ids, self.furby_beams, self.furby_tstamps))
def split_and_filter_furby_params(self):
if self.inj_furbys < 1:
raise ValueError("No furbies to split")
f_ids = N.array(self.furby_ids.split(","))
f_beams = N.array(self.furby_beams.split(","))
f_tstamps = N.array(self.furby_tstamps.split(","))
f_ids = f_ids[N.where(f_ids!='')]
f_beams = f_beams[N.where(f_beams!='')]
f_tstamps = f_tstamps[N.where(f_tstamps!='')]
test = N.array([len(f_ids), len(f_beams), len(f_tstamps)])
if N.any(test-self.inj_furbys):
raise ValueError("Incorrect number of furby params, observation should have failed")
self.furbies = []
self.dropped_furbies = []
for i in range(self.inj_furbys):
furby = Furby(f_ids[i], db = os.path.join(self.archives_dir, self.utc, "Furbys"))
furby.i_beam = int(f_beams[i])
furby.i_tstamp = float(f_tstamps[i])
furby.calc_times()
if (self.check_if_dropped(furby)):
self.dropped_furbies.append(furby)
else:
self.furbies.append(furby)
def check_if_dropped(self, furby):
if not hasattr(furby, 'header'):
furby.read_fheader()
if not hasattr(furby, 'length'):
furby.calc_times()
if furby.i_tstamp < furby.length/2:
return True
if (furby.i_tstamp - furby.length/2) > self.tobs:
return True
all_furby_tstamps = N.array([float(i.i_tstamp) for i in self.furbies])
diff = furby.i_tstamp - all_furby_tstamps
if N.any((diff < (furby.length + 512*self.tres)) & (diff > 0)):
return True
return False
#----------------------------------------------------------------------------------------#
class Furby(Furby_reader):
def __init__(self, ID, db = "/home/dada/furby_database"):
self.ID = ID
self.name = "furby_"+ID
self.DB = db
self.file = os.path.join(self.DB, self.name)
self.i_beam = None
self.i_tstamp = None
self.i_snr = None
def __repr__(self):
return str(self.ID)
def read_fheader(self):
#self.header = self.read_header(self.file)
self.read_header(self.file)
def calc_times(self):
log = L.getLogger("furby_manager")
if not hasattr(self, 'header'):
self.read_fheader()
chw = (self.header.FTOP - self.header.FBOTTOM) / self.header.NCHAN
f_chtop = self.header.FTOP - chw/2
f_chmid = f_chtop - (self.header.NCHAN/2 * chw)
f_chbottom = self.header.FBOTTOM + chw/2
delay_to_top = 4.14881 * 1e6 * self.header.DM * ( f_chtop**(-2) - f_chmid**(-2) ) *1e-3 #in s
delay_to_bottom = 4.14881 * 1e6 * self.header.DM * ( f_chbottom**(-2) - f_chmid**(-2) ) *1e-3 #in s
self.s_time = self.i_tstamp + delay_to_top
self.e_time = self.i_tstamp + delay_to_bottom
self.c_time = self.i_tstamp
self.length = self.header.NSAMPS * self.header.TSAMP * 1e-6
#---------------------------------------------------------------------------------------#
def list_UTCs_from(start_utc):
#Note to someone editing this in future: Keep in mind that other scripts depend upon that fact that this function returns the list of UTCs in correctly sorted order. Do not change that, even if that costs speed. Or make sure that the scripts using this can be edited accordingly.
start = Observation(start_utc)
cmd = "ls -1d "+start.results_dir+"/202* | grep -A 999999 "+start_utc+" | awk -F/ '{print $5}'"
utcs = S.Popen(cmd, shell=True, stdout=S.PIPE).communicate()[0].strip().split("\n")
#VG: 02/05/2020 -- disabling the section below -- It doesn't work, and I don't have a quick fix either.
'''
if start.results_dir == start.conf.old_results_dir:
#Also append utcs from the new results directory
cmd = "ls -1d "+conf.results_dir+"/20* | grep -A 999999 "+start_utc+" | awk -F/ '{print $5}'"
utcs.extend(S.Popen(cmd, shell=True, stdout=S.PIPE).communicate()[0].strip().split("\n"))
'''
if len(utcs) == 0:
raise Exception("Given start UTC ({}) not found in {}".format(start_utc, start.results_dir))
return utcs
def list_UTCs_until(utc):
check = Observation(utc)
start_utc = get_first_UTC()
UTCs_from_start = list_UTCs_from(start_utc)
#Assume that list_UTCs_from() returns UTCs sorted in correct order, which it should.
end_utc = utc
index = N.where(UTCs_from_start == end_utc)[0]
UTCs_until = UTCs_from_start[:index+1]
return UTCs_until
def list_UTCs_after(utc):
inclusive_utcs = list_UTCS_from(utc)
return inclusive_utcs[1:]
def get_latest_UTC():
cmd = "ls -1d -rt "+conf.results_dir+"/20* | tail -1 | awk -F/ '{print $5}'"
utc = S.Popen(cmd, shell=True, stdout=S.PIPE).communcate()[0].strip()
return utc
def get_first_UTC():
'''
Returns the first UTC recorded by Molonglo after the disk crash in October 2017
'''
return "2017-10-31-08:49:32"
| helpers.py | 11,178 | Returns the first UTC recorded by Molonglo after the disk crash in October 2017
!/home/observer/miniconda2/bin/pythonraise FileNotFound("Cannot find observation configuration file - {0}".format(self.cfg_file))Getting Tobs----------------------------------------------log.warn("Could not find INJECTED_FURBYS in the header file for UTC: {0}".format(self.utc))log.warn("Assuming no furby injection happened in this observation ({0})".format(self.utc))log.debug("Found: injected_furbys: {0}, furby_ids: {1}, furby_beams: {2}, furby_tstamps: {3}".format(self.inj_furbys, self.furby_ids, self.furby_beams, self.furby_tstamps))----------------------------------------------------------------------------------------self.header = self.read_header(self.file)in sin s---------------------------------------------------------------------------------------Note to someone editing this in future: Keep in mind that other scripts depend upon that fact that this function returns the list of UTCs in correctly sorted order. Do not change that, even if that costs speed. Or make sure that the scripts using this can be edited accordingly.VG: 02/05/2020 -- disabling the section below -- It doesn't work, and I don't have a quick fix either. Assume that list_UTCs_from() returns UTCs sorted in correct order, which it should. | 1,310 | en | 0.662181 |
# -*- coding: utf-8 -*-
import scrapy
class scrapyshkmbab39Spider(scrapy.Spider):
name = "scrapyshkmbab39"
allowed_domains = ["ganjoor.net"]
if 1 == 1:
start_urls = ["https://ganjoor.net/hojviri/kashfol-mahjoob/kmbab39/sh"]
else:
start_urls = ["https://ganjoor.net/hojviri/kashfol-mahjoob/kmbab39/sh" + "1"]
order = 1
def parse(self, response):
index = 0
sh = dict()
sh["type"] = "fasl"
sh["text"] = dict()
for i, poem in enumerate(response.css("div.poem>article>*")):
if index == 0:
if 0 == 1:
sh["title"] = "فصل" + " شماره " + str(self.order) + " - " + ''.join(poem.css("div.m1>p::text").extract()).strip()
elif 0 == 2:
sh["title"] = "فصل" + " شماره " + str(self.order) + " - " + ''.join(poem.css("div.m2>p::text").extract()).strip()
elif 0 == 3:
sh["title"] = "فصل" + " شماره " + str(self.order) + " - " + ''.join(response.css("div.poem>article>h2>a::text").extract()).strip() + ': ' + ''.join(poem.css("div.m1>p::text").extract()).strip()
elif 0 == 4:
sh["title"] = "فصل" + " شماره " + str(self.order) + " - " + ''.join(response.css("div.poem>article>h2>a::text").extract()).strip() + ': ' + ''.join(poem.css("div.m2>p::text").extract()).strip()
else:
sh["title"] = ''.join(response.css("div.poem>article>h2>a::text").extract_first()).strip()
if poem.css("p::text").extract_first() is None or 'rel="bookmark"' in poem.css('*').extract_first() or 'class="spacer"' in poem.css('*').extract_first() or '<div style=' in poem.css('*').extract_first():
continue
if len(poem.css("div.m1>p")) == 1:
if poem.css("div.b"):
if '٭٭٭' not in poem.css("div.m1>p::text").extract_first() and ''.join(poem.css("div.m1>p::text").extract()).strip() != '':
sh["text"][index] = dict([
("m1", ''.join(poem.css("div.m1>p::text").extract()).strip()),
("m2", ''.join(poem.css("div.m2>p::text").extract()).strip()),
])
else:
if '٭٭٭' not in poem.css("p:first-child::text").extract_first() and ''.join(poem.css("p:first-child::text").extract()).strip() != '':
sh["text"][index] = dict([
("t1", ''.join(poem.css("p:first-child::text").extract()).strip()),
("t2", ''.join(poem.css("p:last-child::text").extract()).strip()),
])
else:
if poem.css("div.b2"):
if '٭٭٭' not in poem.css("p:first-child::text").extract_first() and ''.join(poem.css("p:first-child::text").extract()).strip() != '':
sh["text"][index] = dict([
("t1", ''.join(poem.css("p:first-child::text").extract()).strip()),
("t2", ''.join(poem.css("p:last-child::text").extract()).strip()),
])
else:
if '٭٭٭' not in poem.css('p::text').extract_first() and ''.join(poem.css('p::text').extract()).strip() != '':
sh['text'][index] = dict([
('p', ''.join(poem.css('p::text').extract()).strip())
])
index = index + 1
sh["order"] = self.order
self.order = self.order + 1
yield sh
# next_page = response.css("div.navigation>div.navleft>a::attr(href)").extract_first()
if self.order < (1 + 1):
next_page = response.urljoin("https://ganjoor.net/hojviri/kashfol-mahjoob/kmbab39/sh" + str(self.order))
yield scrapy.Request(next_page, callback=self.parse)
| ganjoor/spiders/hojviri/kashfol-mahjoob/scrapyshkmbab39.py | 3,971 | -*- coding: utf-8 -*- next_page = response.css("div.navigation>div.navleft>a::attr(href)").extract_first() | 106 | en | 0.284506 |
import psycopg2
import psycopg2.extras
class DBHandler:
"""
Handles I/O concerning the database to hide its implementation from client services.
"""
def __init__(self,
postgres_username=None,
postgres_password=None,
db_username='dbpedia_app',
db_password='dummy_password'):
# ordinarily you would get these from some secret store
# e.g. heroku has a specific url that you parse to get both
# or os.environ storage (like those used for API keys and the like)
user_name = db_username
password = db_password
# check to see if the db exists locally, create it if necessary
if postgres_password is not None and postgres_username is not None:
try:
connection = psycopg2.connect("dbname='postgres' user='%s' "
"host='localhost' password='%s'"
% (postgres_username, postgres_password))
connection.autocommit = True
cursor = connection.cursor()
# queries the postgres catalog to see if 'dbpedia' exists
# if not, creates it
cursor.execute("SELECT COUNT(*) = 0 FROM pg_catalog.pg_database WHERE datname = 'dbpedia'")
not_exists_row = cursor.fetchone()
not_exists = not_exists_row[0]
if not_exists:
cursor.execute("CREATE USER %s PASSWORD '%s'" % (user_name, password))
cursor.execute('CREATE DATABASE dbpedia OWNER %s' % (user_name,))
connection.close()
except:
# Presume if credentials are passed the user wants to perform this check/DB construction
# fail via error propagation
raise
try:
self.connection = psycopg2.connect("dbname='dbpedia' user='%s' host='localhost' password='%s'"
% (user_name, password))
except:
raise AssertionError('Failed to connect to dbpedia database. Has the local dbpedia been created?')
def __del__(self):
self.connection.close()
def commit(self):
self.connection.commit()
def schema_exists(self):
"""
Checks the estimated number of tuples in the subjects table to determine if data exists
:return:
"""
with self.connection.cursor() as cursor:
cursor.execute('select reltuples FROM pg_class where relname = %s', ('subjects',))
result = cursor.fetchone()[0]
return result > 0
def build_table_schema(self, schema_name, schema_file_path):
"""
Loads the dbpedia schema used for supporting downstream analysis. If the schema already exists, it is
dropped (deleted) and recreated.
:param schema_name:
:param schema_file_path:
:return:
"""
# do not call with user input given the manual query construction here
with self.connection.cursor() as cursor:
cursor.execute('DROP SCHEMA IF EXISTS %s CASCADE' % schema_name)
schema_file = open(schema_file_path, 'rU').read()
cursor.execute(schema_file)
def build_indices(self):
"""
Builds the following indices:
Index on name for subjects
Index on predicate for predicate_object
Index on subject_id for predicate object
:return:
"""
with self.connection.cursor() as cursor:
cursor.execute('DROP INDEX IF EXISTS dbpedia.pv_subject_id_idx')
cursor.execute('DROP INDEX IF EXISTS dbpedia.subject_idx')
cursor.execute('DROP INDEX IF EXISTS dbpedia.pv_predicate_idx')
cursor.execute('create index subject_idx on dbpedia.subjects (name)')
cursor.execute('create index pv_subject_id_idx on dbpedia.predicate_object (subject_id)')
cursor.execute('create index pv_predicate_idx on dbpedia.predicate_object (predicate);')
def insert_spo_tuple(self, spo_tuple):
"""
Handles the insertion of spo tuples into the db. Workflow:
Attempt to find the subject table entry corresponding to your subject. If found, use that ID for
inserting your po values. Otherwise, insert your subject into the subject table and use that ID
instead. The resulting id, predicate, object tuple is then inserted into the predicate_object table.
:param spo_tuple:
:return:
"""
(subject, predicate, db_object) = spo_tuple
with self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cursor:
cursor.execute('select subject_id from dbpedia.subjects '
'where name = %s', (subject,))
results = cursor.fetchone()
if results is None or len(results) == 0:
cursor.execute('INSERT INTO dbpedia.subjects (name) VALUES (%s) '
'returning subject_id', (subject,))
results = cursor.fetchone()
id = results['subject_id']
# now we have the correct id in either case, insert the values into the db
cursor.execute('INSERT INTO dbpedia.predicate_object (subject_id, predicate, object) '
'VALUES (%s, %s, %s)', (id, predicate, db_object))
def get_person_metadata(self, person_name, use_exact_match=False):
"""
Returns all metadata associated with the provided person_name. However, does not actually check
to see if the identifier corresponds to a person or not; the class of the identifier will
be included in the returned metadata though. DBPedia People only contains people predicate
types as well.
Use_exact_match toggles between two behaviors: if True, then uses the exact identifier provided
to query against the subject table (WHERE = identifier). If False, uses the LIKE operator
to attempt to find similar IDs that are not exactly the same. Results will still be a superset
of the use_exact_match = True case.
:param person_name:
:param use_exact_match:
:return:
"""
# wikipedia replaces all spaces with under scores
# upper case to make case sensitive
person_name = person_name.replace(' ', '_').upper()
with self.connection.cursor() as cursor:
# get id associated with this person
# get all similar IDs
if not use_exact_match:
cursor.execute('SELECT subject_id, name FROM dbpedia.subjects WHERE upper(name) '
'LIKE %s',
('%%' + person_name + '%%',))
else:
cursor.execute('SELECT subject_id, name FROM dbpedia.subjects WHERE upper(name) = %s',
(person_name,))
results = cursor.fetchall()
# no person matches the input name
# return empty list
if results is None:
return []
subject_id_list = [x[0] for x in results]
# get all metadata associated with the subject_ids
cursor.execute('select dbpedia.subjects.name, predicate, object '
'FROM dbpedia.predicate_object '
'INNER JOIN dbpedia.subjects on (dbpedia.subjects.subject_id = dbpedia.predicate_object.subject_id) '
'WHERE dbpedia.predicate_object.subject_id = ANY(%s)', (subject_id_list,))
# this should never be none
# Sort results by name and return
return sorted(cursor.fetchall(), key=lambda x: x[0])
def get_tuples_by_predicate(self, predicate_of_interest):
"""
Extracts SPO tuples based on the predicate value passed to the function. This query will be slow since
you are querying such a large fraction of the po table at once (unless your predicate does not exist).
Predicates:
Name
Type
Gender
Description
Birthdate
GivenName
Surname
BirthPlace
DeathDate
DeathPlace
:param predicate_of_interest:
:return:
"""
with self.connection.cursor() as cursor:
cursor.execute('select dbpedia.subjects.name, '
'predicate, '
'object '
'FROM dbpedia.predicate_object '
'INNER JOIN dbpedia.subjects on (dbpedia.subjects.subject_id = dbpedia.predicate_object.subject_id) '
'WHERE upper(dbpedia.predicate_object.predicate) = upper(%s)', (predicate_of_interest,))
results = cursor.fetchall()
if results is None:
return []
else:
return results
| database_query_handler.py | 9,247 | Handles I/O concerning the database to hide its implementation from client services.
Builds the following indices:
Index on name for subjects
Index on predicate for predicate_object
Index on subject_id for predicate object
:return:
Loads the dbpedia schema used for supporting downstream analysis. If the schema already exists, it is
dropped (deleted) and recreated.
:param schema_name:
:param schema_file_path:
:return:
Returns all metadata associated with the provided person_name. However, does not actually check
to see if the identifier corresponds to a person or not; the class of the identifier will
be included in the returned metadata though. DBPedia People only contains people predicate
types as well.
Use_exact_match toggles between two behaviors: if True, then uses the exact identifier provided
to query against the subject table (WHERE = identifier). If False, uses the LIKE operator
to attempt to find similar IDs that are not exactly the same. Results will still be a superset
of the use_exact_match = True case.
:param person_name:
:param use_exact_match:
:return:
Extracts SPO tuples based on the predicate value passed to the function. This query will be slow since
you are querying such a large fraction of the po table at once (unless your predicate does not exist).
Predicates:
Name
Type
Gender
Description
Birthdate
GivenName
Surname
BirthPlace
DeathDate
DeathPlace
:param predicate_of_interest:
:return:
Handles the insertion of spo tuples into the db. Workflow:
Attempt to find the subject table entry corresponding to your subject. If found, use that ID for
inserting your po values. Otherwise, insert your subject into the subject table and use that ID
instead. The resulting id, predicate, object tuple is then inserted into the predicate_object table.
:param spo_tuple:
:return:
Checks the estimated number of tuples in the subjects table to determine if data exists
:return:
ordinarily you would get these from some secret store e.g. heroku has a specific url that you parse to get both or os.environ storage (like those used for API keys and the like) check to see if the db exists locally, create it if necessary queries the postgres catalog to see if 'dbpedia' exists if not, creates it Presume if credentials are passed the user wants to perform this check/DB construction fail via error propagation do not call with user input given the manual query construction here now we have the correct id in either case, insert the values into the db wikipedia replaces all spaces with under scores upper case to make case sensitive get id associated with this person get all similar IDs no person matches the input name return empty list get all metadata associated with the subject_ids this should never be none Sort results by name and return | 2,795 | en | 0.823897 |
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import unittest
from unittest.mock import ANY
from databuilder.models.graph_serializable import (
RELATION_END_KEY, RELATION_END_LABEL, RELATION_REVERSE_TYPE, RELATION_START_KEY, RELATION_START_LABEL,
RELATION_TYPE,
)
from databuilder.models.user import User
from databuilder.serializers import neo4_serializer, neptune_serializer
from databuilder.serializers.neptune_serializer import (
NEPTUNE_CREATION_TYPE_JOB, NEPTUNE_CREATION_TYPE_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT, NEPTUNE_HEADER_ID,
NEPTUNE_HEADER_LABEL, NEPTUNE_LAST_EXTRACTED_AT_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT,
NEPTUNE_RELATIONSHIP_HEADER_FROM, NEPTUNE_RELATIONSHIP_HEADER_TO,
)
class TestUser(unittest.TestCase):
def setUp(self) -> None:
super(TestUser, self).setUp()
self.user = User(first_name='test_first',
last_name='test_last',
name='test_first test_last',
email='test@email.com',
github_username='github_test',
team_name='test_team',
employee_type='FTE',
manager_email='test_manager@email.com',
slack_id='slack',
is_active=True,
updated_at=1,
role_name='swe')
def test_get_user_model_key(self) -> None:
user_email = User.get_user_model_key(email=self.user.email)
self.assertEqual(user_email, 'test@email.com')
def test_create_nodes(self) -> None:
nodes = self.user.create_nodes()
self.assertEqual(len(nodes), 1)
def test_create_node_additional_attr(self) -> None:
test_user = User(first_name='test_first',
last_name='test_last',
name='test_first test_last',
email='test@email.com',
github_username='github_test',
team_name='test_team',
employee_type='FTE',
manager_email='test_manager@email.com',
slack_id='slack',
is_active=True,
updated_at=1,
role_name='swe',
enable_notify=True)
nodes = test_user.create_nodes()
serialized_node = neo4_serializer.serialize_node(nodes[0])
self.assertEqual(serialized_node['email'], 'test@email.com')
self.assertEqual(serialized_node['role_name'], 'swe')
self.assertTrue(serialized_node['enable_notify:UNQUOTED'])
def test_create_node_additional_attr_neptune(self) -> None:
test_user = User(first_name='test_first',
last_name='test_last',
name='test_first test_last',
email='test@email.com',
github_username='github_test',
team_name='test_team',
employee_type='FTE',
manager_email='test_manager@email.com',
slack_id='slack',
is_active=True,
updated_at=1,
role_name='swe',
enable_notify=True)
nodes = test_user.create_nodes()
serialized_node = neptune_serializer.convert_node(nodes[0])
self.assertEqual(serialized_node['email:String(single)'], 'test@email.com')
self.assertEqual(serialized_node['role_name:String(single)'], 'swe')
self.assertTrue(serialized_node['enable_notify:Bool(single)'])
def test_create_relation(self) -> None:
relations = self.user.create_relation()
self.assertEqual(len(relations), 1)
start_key = 'test@email.com'
end_key = 'test_manager@email.com'
expected_relation = {
RELATION_START_KEY: start_key,
RELATION_START_LABEL: User.USER_NODE_LABEL,
RELATION_END_KEY: end_key,
RELATION_END_LABEL: User.USER_NODE_LABEL,
RELATION_TYPE: User.USER_MANAGER_RELATION_TYPE,
RELATION_REVERSE_TYPE: User.MANAGER_USER_RELATION_TYPE
}
self.assertTrue(expected_relation, neo4_serializer.serialize_relationship(relations[0]))
def test_create_relation_neptune(self) -> None:
relations = self.user.create_relation()
serialized = neptune_serializer.convert_relationship(relations[0])
start_key = '{email}'.format(email='test@email.com')
end_key = '{email}'.format(email='test_manager@email.com')
expected = [
{
NEPTUNE_HEADER_ID: "{from_vertex_id}_{to_vertex_id}_{label}".format(
from_vertex_id=start_key,
to_vertex_id=end_key,
label=User.USER_MANAGER_RELATION_TYPE
),
NEPTUNE_RELATIONSHIP_HEADER_FROM: start_key,
NEPTUNE_RELATIONSHIP_HEADER_TO: end_key,
NEPTUNE_HEADER_LABEL: User.USER_MANAGER_RELATION_TYPE,
NEPTUNE_LAST_EXTRACTED_AT_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: ANY,
NEPTUNE_CREATION_TYPE_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: NEPTUNE_CREATION_TYPE_JOB
},
{
NEPTUNE_HEADER_ID: "{from_vertex_id}_{to_vertex_id}_{label}".format(
from_vertex_id=end_key,
to_vertex_id=start_key,
label=User.MANAGER_USER_RELATION_TYPE
),
NEPTUNE_RELATIONSHIP_HEADER_FROM: end_key,
NEPTUNE_RELATIONSHIP_HEADER_TO: start_key,
NEPTUNE_HEADER_LABEL: User.MANAGER_USER_RELATION_TYPE,
NEPTUNE_LAST_EXTRACTED_AT_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: ANY,
NEPTUNE_CREATION_TYPE_RELATIONSHIP_PROPERTY_NAME_BULK_LOADER_FORMAT: NEPTUNE_CREATION_TYPE_JOB
}
]
self.assertListEqual(serialized, expected)
def test_not_including_empty_attribute(self) -> None:
test_user = User(email='test@email.com',
foo='bar')
self.assertDictEqual(neo4_serializer.serialize_node(test_user.create_next_node()),
{'KEY': 'test@email.com', 'LABEL': 'User', 'email': 'test@email.com',
'is_active:UNQUOTED': True, 'first_name': '', 'last_name': '', 'full_name': '',
'github_username': '', 'team_name': '', 'employee_type': '', 'slack_id': '',
'role_name': '', 'updated_at:UNQUOTED': 0, 'foo': 'bar'})
test_user2 = User(email='test@email.com',
foo='bar',
is_active=False,
do_not_update_empty_attribute=True)
self.assertDictEqual(neo4_serializer.serialize_node(test_user2.create_next_node()),
{'KEY': 'test@email.com', 'LABEL': 'User', 'email': 'test@email.com', 'foo': 'bar'})
| tests/unit/models/test_user.py | 7,228 | Copyright Contributors to the Amundsen project. SPDX-License-Identifier: Apache-2.0 | 83 | en | 0.433107 |
from django.contrib import admin
from .models import *
from django.contrib.auth.models import User
class ImageAdmin(admin.ModelAdmin):
fields = ( 'image','name','caption','profile','post_date', 'user', )
readonly_fields = ('profile', 'post_date', 'user',)
#registering the models
# admin.site.register(Image, ImageAdmin)
admin.site.register(Profile)
admin.site.register(Image)
admin.site.register(Like)
admin.site.register(Comment)
| insta/admin.py | 438 | registering the models admin.site.register(Image, ImageAdmin) | 61 | en | 0.289867 |
"""Registry for the TF Encrypted Converter."""
import array
import logging
import os
from typing import Any, List
from collections import OrderedDict
import yaml
import numpy as np
import tensorflow as tf
from ..layers import Conv2D, Relu, Sigmoid, Dense, AveragePooling2D, MaxPooling2D
from ..protocol.pond import PondPrivateTensor, PondMaskedTensor
def registry():
"""Map reserved names and scopes to their conversion functions."""
reg = {
'Placeholder': _placeholder,
'Const': _constant,
'Conv2D': _conv2d,
'Relu': _relu,
'Sigmoid': _sigmoid,
'MatMul': _matmul,
'Shape': _shape,
'StridedSlice': _strided_slice,
'Add': _add,
'Sub': _sub,
'Transpose': _transpose,
'Reshape': _reshape,
'Pack': _pack,
'Rsqrt': _rsqrt,
'Mul': _mul,
'ExpandDims': _expand_dims,
'AvgPool': _avgpool,
'Squeeze': _squeeze,
'ConcatV2': _concat,
'BiasAdd': _bias_add,
'MaxPool': _maxpool,
'Pad': _pad,
'BatchToSpaceND': _batch_to_space_nd,
'SpaceToBatchND': _space_to_batch_nd,
'ArgMax': _argmax,
'required_space_to_batch_paddings': _required_space_to_batch_paddings,
'flatten': _flatten,
'conv2d': _keras_conv2d,
'Slice': _slice,
'Neg': _negative,
'Split': _split,
'Identity': _identity,
"GatherV2": _gather,
"dense": _keras_dense,
}
return reg
convert_dir = os.path.dirname(os.path.abspath(__file__))
specops_path = os.path.join(convert_dir, "specops.yaml")
with open(specops_path, "r") as stream:
loaded_yaml = yaml.load(stream, Loader=yaml.SafeLoader)
sorted_yaml = sorted(loaded_yaml.items(), key=lambda kv: kv[0])
REGISTERED_SPECOPS = OrderedDict(sorted_yaml)
# pylint: disable=unused-argument
# pylint: disable=missing-docstring
def _placeholder(converter, node: Any, inputs: List[str]) -> Any:
return tf.placeholder(node.attr["dtype"].type,
shape=node.attr["shape"].shape)
def _constant(converter, node: Any, inputs: List[str]) -> Any:
# need to able to access the underlying weights return the node
return node
def _identity(converter, node: Any, inputs: List[str]) -> Any:
# need to able to access the underlying weights return the node
return converter.outputs[inputs[0]]
def _matmul(converter, node: Any, inputs: List[str]) -> Any:
a = converter.outputs[inputs[0]]
b = converter.outputs[inputs[1]]
tensor = b.attr["value"].tensor
b_shape = [i.size for i in tensor.tensor_shape.dim]
transpose_a = node.attr["transpose_a"].b
transpose_b = node.attr["transpose_b"].b
layer = Dense(a.shape.as_list(),
b_shape[1],
transpose_input=transpose_a,
transpose_weight=transpose_b)
dtype = tensor.dtype
if dtype == tf.float32:
nums = array.array('f', tensor.tensor_content)
elif dtype == tf.float64:
nums = array.array('d', tensor.tensor_content)
else:
raise TypeError("Unsupported dtype for weights")
def inputter_fn():
return tf.constant(np.array(nums).reshape(b_shape))
w = converter.protocol.define_private_input(converter.model_provider,
inputter_fn)
layer.initialize(initial_weights=w)
return layer.forward(a)
def _conv2d(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
kernel = converter.outputs[inputs[1]]
if isinstance(kernel, tf.NodeDef):
shape = [i.size for i in kernel.attr["value"].tensor.tensor_shape.dim]
w = _nodef_to_private_pond(converter, kernel)
else:
shape = kernel.shape.as_list()
w = kernel
fmt = node.attr["data_format"].s.decode('ascii')
layer = Conv2D(x_in.shape.as_list(),
shape,
strides=int(max(node.attr["strides"].list.i)),
padding=node.attr["padding"].s.decode('ascii'),
channels_first=fmt == "NCHW")
layer.initialize(initial_weights=w)
out = layer.forward(x_in)
return out
def _keras_conv2d(converter, interiors, inputs):
x_in = converter.outputs[inputs[0]]
conv_op = interiors["Conv2D"]
kernel = interiors["kernel"]
k = _nodef_to_private_pond(converter, kernel)
try:
bias = interiors["bias"]
b = _nodef_to_private_pond(converter, bias)
for ax in [0, -1, -1]:
b = b.expand_dims(axis=ax)
except KeyError:
b = None
input_shape = x_in.shape.as_list()
shape = [i.size for i in kernel.attr["value"].tensor.tensor_shape.dim]
fmt = conv_op.attr["data_format"].s.decode('ascii')
strides = int(max(conv_op.attr["strides"].list.i))
padding = conv_op.attr["padding"].s.decode('ascii')
layer = Conv2D(
input_shape, shape,
strides=strides,
padding=padding,
channels_first=fmt == "NCHW"
)
layer.initialize(initial_weights=k, initial_bias=b)
out = layer.forward(x_in)
return out
def _keras_dense(converter, interiors, inputs):
x_in = converter.outputs[inputs[0]]
kernel = interiors["kernel"]
k = _nodef_to_private_pond(converter, kernel)
try:
bias = interiors["bias"]
b = _nodef_to_private_pond(converter, bias)
except KeyError:
b = None
input_shape = x_in.shape.as_list()
shape = [i.size for i in kernel.attr["value"].tensor.tensor_shape.dim]
layer = Dense(input_shape,
out_features=shape[1])
layer.initialize(initial_weights=k, initial_bias=b)
out = layer.forward(x_in)
return out
def _relu(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
return Relu(x_in.shape.as_list()).forward(x_in)
def _sigmoid(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
return Sigmoid(x_in.shape.as_list()).forward(x_in)
def _strided_slice(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
begin = converter.outputs[inputs[1]]
end = converter.outputs[inputs[2]]
strides = converter.outputs[inputs[3]]
begin_mask = node.attr["begin_mask"].i
end_mask = node.attr["end_mask"].i
ellipsis_mask = node.attr["ellipsis_mask"].i
new_axis_mask = node.attr["new_axis_mask"].i
shrink_axis_mask = node.attr["shrink_axis_mask"].i
begin = tf.constant(begin.attr["value"].tensor)
end = tf.constant(end.attr["value"].tensor)
strides = tf.constant(strides.attr["value"].tensor)
return converter.protocol.strided_slice(input_out, begin, end,
strides=strides,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
def _pack(converter, node: Any, inputs: List[str]) -> Any:
final_inputs = []
for x_in in inputs:
input_c = converter.outputs[x_in]
if isinstance(input_c, tf.NodeDef):
final_inputs.append(_nodef_to_private_pond(converter, input_c))
else:
final_inputs.append(input_c)
return converter.protocol.stack(final_inputs, axis=node.attr["axis"].i)
def _bias_add(converter, node: Any, inputs: List[str]) -> Any:
a = converter.outputs[inputs[0]]
b = converter.outputs[inputs[1]]
if isinstance(a, tf.NodeDef):
a_out = _nodef_to_private_pond(converter, a)
else:
a_out = a
if isinstance(b, tf.NodeDef):
b_out = _nodef_to_private_pond(converter, b)
else:
b_out = b
return converter.protocol.add(a_out, b_out)
def _maxpool(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
ksize = node.attr["ksize"].list.i
s = node.attr["strides"].list.i
padding = node.attr["padding"].s.decode('ascii')
pool_size = [ksize[1], ksize[2]]
strides = [s[1], s[2]]
shape = [int(i) for i in x_in.shape]
channels_first = node.attr["data_format"].s.decode('ascii') == "NCHW"
pooler = MaxPooling2D(shape, pool_size, strides, padding, channels_first)
out = pooler.forward(x_in)
return out
def _shape(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
return x_in.shape
def _reshape(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
shape = converter.outputs[inputs[1]]
tensor = shape.attr["value"].tensor
dtype = shape.attr["dtype"].type
if dtype == tf.int32:
nums = array.array('i', tensor.tensor_content)
elif dtype == tf.int64:
nums = array.array('l', tensor.tensor_content)
else:
raise TypeError("Unsupported dtype for reshape shape")
return converter.protocol.reshape(x_in, list(nums))
def _transpose(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
perm = converter.outputs[inputs[1]]
tensor = perm.attr["value"].tensor
shape = [i.size for i in tensor.tensor_shape.dim]
dtype = perm.attr["dtype"].type
if dtype == tf.int32:
nums = array.array('i', tensor.tensor_content)
elif dtype == tf.int64:
nums = array.array('l', tensor.tensor_content)
else:
raise TypeError("Unsupported dtype for transpose perm")
return converter.protocol.transpose(x_in, np.array(nums).reshape(shape))
def _expand_dims(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
input_axis = converter.outputs[inputs[1]]
axis_attr = input_axis.attr["value"].tensor.int_val
axis_val = array.array('i', axis_attr)[0]
return converter.protocol.expand_dims(input_out, axis_val)
def _negative(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
return converter.protocol.negative(input_out)
def _gather(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
indices = converter.outputs[inputs[1]]
axis = converter.outputs[inputs[2]]
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
indices_out = list(_nodef_to_numpy_array(indices))
axis_val = axis.attr["value"].tensor.int_val[0]
return converter.protocol.gather(input_out, indices_out, axis_val)
def _squeeze(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
axis = node.attr["squeeze_dims"].list.i
return converter.protocol.squeeze(x_in, list(axis))
def _split(converter, node: Any, inputs: List[str]) -> Any:
axis = converter.outputs[inputs[0]]
x_in = converter.outputs[inputs[1]]
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
num_split = node.attr["num_split"].i
axis_val = axis.attr["value"].tensor.int_val[0]
return converter.protocol.split(input_out, num_split, axis_val)[0]
def _pad(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
p = (converter.outputs[inputs[1]])
paddings_t = p.attr["value"].tensor
paddings_arr = list(array.array('I', paddings_t.tensor_content))
paddings_lst = [paddings_arr[i:i + 2]
for i in range(0, len(paddings_arr), 2)]
return converter.protocol.pad(x_in, paddings_lst)
def _rsqrt(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
if isinstance(x_in, tf.NodeDef):
tensor = x_in.attr["value"].tensor
shape = [i.size for i in tensor.tensor_shape.dim]
dtype = x_in.attr["dtype"].type
if dtype == tf.float32:
nums = array.array('f', tensor.tensor_content)
elif dtype == tf.float64:
nums = array.array('d', tensor.tensor_content)
else:
raise TypeError("Unsupported dtype for rsqrt")
def inputter_fn():
return tf.constant(1 / np.sqrt(np.array(nums).reshape(shape)))
else:
# XXX this is a little weird but the input into rsqrt is public and
# being used only for batchnorm at the moment
decoded = converter.protocol._decode(x_in.value_on_0, True) # pylint: disable=protected-access
def inputter_fn():
return tf.rsqrt(decoded)
x = converter.protocol.define_public_input(
converter.model_provider, inputter_fn)
return x
def _add(converter, node: Any, inputs: List[str]) -> Any:
a = converter.outputs[inputs[0]]
b = converter.outputs[inputs[1]]
if isinstance(a, tf.NodeDef):
a_out = _nodef_to_public_pond(converter, a)
else:
a_out = a
if isinstance(b, tf.NodeDef):
b_out = _nodef_to_public_pond(converter, b)
else:
b_out = b
return converter.protocol.add(a_out, b_out)
def _sub(converter, node: Any, inputs: List[str]) -> Any:
a = converter.outputs[inputs[0]]
b = converter.outputs[inputs[1]]
if isinstance(a, tf.NodeDef):
a_out = _nodef_to_public_pond(converter, a)
else:
a_out = a
if isinstance(b, tf.NodeDef):
b_out = _nodef_to_public_pond(converter, b)
else:
b_out = b
return converter.protocol.sub(a_out, b_out)
def _mul(converter, node: Any, inputs: List[str]) -> Any:
a = converter.outputs[inputs[0]]
b = converter.outputs[inputs[1]]
if isinstance(a, tf.NodeDef):
a_out = _nodef_to_public_pond(converter, a)
else:
a_out = a
if isinstance(b, tf.NodeDef):
b_out = _nodef_to_public_pond(converter, b)
else:
b_out = b
return converter.protocol.mul(a_out, b_out)
def _avgpool(converter, node: Any, inputs: List[str]) -> Any:
x_in = converter.outputs[inputs[0]]
ksize = node.attr["ksize"].list.i
s = node.attr["strides"].list.i
padding = node.attr["padding"].s.decode('ascii')
pool_size = [ksize[1], ksize[2]]
strides = [s[1], s[2]]
shape = [int(i) for i in x_in.shape]
channels_first = node.attr["data_format"].s.decode('ascii') == "NCHW"
avg = AveragePooling2D(shape, pool_size, strides, padding, channels_first)
out = avg.forward(x_in)
return out
def _concat(converter, node: Any, inputs: List[str]) -> Any:
input0 = converter.outputs[inputs[0]]
input1 = converter.outputs[inputs[1]]
axis = converter.outputs[inputs[2]]
axis_int = axis.attr["value"].tensor.int_val[0]
return converter.protocol.concat([input0, input1], axis_int)
def _batch_to_space_nd(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
block_shape = converter.outputs[inputs[1]].attr["value"].tensor
crops = converter.outputs[inputs[2]].attr["value"].tensor
return converter.protocol.batch_to_space_nd(x_in, block_shape, crops)
def _space_to_batch_nd(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
block_shape = converter.outputs[inputs[1]].attr["value"].tensor
paddings = converter.outputs[inputs[2]].attr["value"].tensor
return converter.protocol.space_to_batch_nd(x_in, block_shape, paddings)
def _flatten(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
shape = x_in.shape.as_list()
non_batch = 1
for dim in shape[1:]:
non_batch *= dim
return converter.protocol.reshape(x_in, [-1, non_batch])
def _required_space_to_batch_paddings(converter, node, inputs: List[str]):
inputs_node = [converter.outputs[inputs[i]] for i in range(len(inputs))]
inputs_int32 = []
for x_in in inputs_node:
pvt_check = isinstance(x_in, PondPrivateTensor)
msk_check = isinstance(x_in, PondMaskedTensor)
if pvt_check or msk_check:
logging.warning(("Revealing private input: "
"required_space_to_batch_paddings assumes public "
"input."))
inputs_int32.append(tf.cast(x_in.reveal().decode(), tf.int32))
elif isinstance(x_in, tf.NodeDef):
inputs_int32.append(_nodef_to_numpy_array(x_in))
else:
raise TypeError("Unexpected input of type {}.".format(type(x_in)))
if len(inputs_int32) == 2:
input_shape, block_shape = inputs_int32
def inputter_pad():
pads, _ = tf.required_space_to_batch_paddings(input_shape, block_shape)
return tf.cast(pads, tf.float64)
def inputter_crop():
_, crops = tf.required_space_to_batch_paddings(input_shape, block_shape)
return tf.cast(crops, tf.float64)
else:
base_paddings, input_shape, block_shape = inputs_int32
def inputter_pad():
pads, _ = tf.required_space_to_batch_paddings(
input_shape,
block_shape,
base_paddings=base_paddings,
)
return tf.cast(pads, tf.float64)
def inputter_crop():
_, crops = tf.required_space_to_batch_paddings(
input_shape,
block_shape,
base_paddings=base_paddings,
)
return tf.cast(crops, tf.float64)
pad_private = converter.protocol.define_public_input(
converter.model_provider, inputter_pad)
crop_private = converter.protocol.define_public_input(
converter.model_provider, inputter_crop)
return (pad_private, crop_private)
def _argmax(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
axis = converter.outputs[inputs[1]].attr["value"].tensor.int_val[0]
return converter.protocol.argmax(x_in, axis=axis)
def _slice(converter, node, inputs):
x_in = converter.outputs[inputs[0]]
begin = _nodef_to_numpy_array(converter.outputs[inputs[1]])
size = _nodef_to_numpy_array(converter.outputs[inputs[2]])
if isinstance(x_in, tf.NodeDef):
input_out = _nodef_to_private_pond(converter, x_in)
else:
input_out = x_in
# Slice is a special case of strided_slice. Slice takes size (the number of
# elements we want to slice) as an input. However strided_slice takes end
# (integer until which the slicing takes place) as input.
# We can infere the end parameter with : end[i] = begin[i] + size[i].
# If size is negative, the stepping go towards smaller indices.
# In this case we can infer the end parameter with: end[i] = input_shape[i] - size[i] + 1
end = np.zeros(len(begin))
input_shape = x_in.shape.as_list()
# if size is negative take the input dimension
for i in range(len(end)): # pylint: disable=consider-using-enumerate
if size[i] < 0:
end[i] = input_shape[i] - size[i] + 1
else:
end[i] = begin[i] + size[i]
return converter.protocol.strided_slice(input_out, begin, end)
# pylint: enable=unused-argument
# pylint: enable=missing-docstring
def _nodef_to_public_pond(converter, x):
"""Map a NodeDef x to a PublicPondTensor."""
dtype = x.attr["dtype"].type
x_shape = [i.size for i in x.attr["value"].tensor.tensor_shape.dim]
if not x_shape:
if dtype == tf.float32:
nums = x.attr["value"].tensor.float_val
elif dtype == tf.float64:
nums = x.attr["value"].tensor.float_val
elif dtype == tf.int32:
nums = x.attr["value"].tensor.int_val
else:
raise TypeError("Unsupported dtype")
def inputter_fn():
return tf.constant(np.array(nums).reshape(1, 1))
else:
if dtype == tf.float32:
nums = array.array('f', x.attr["value"].tensor.tensor_content)
elif dtype == tf.float64:
nums = array.array('d', x.attr["value"].tensor.tensor_content)
elif dtype == tf.int32:
nums = array.array('i', x.attr["value"].tensor.tensor_content)
else:
raise TypeError("Unsupported dtype")
def inputter_fn():
return tf.constant(np.array(nums).reshape(x_shape))
x_public = converter.protocol.define_public_input(
converter.model_provider, inputter_fn)
return x_public
def _nodef_to_private_pond(converter, x):
"""Map a NodeDef x to a PrivatePondTensor."""
dtype = x.attr["dtype"].type
warn_msg = "Unexpected dtype {} found at node {}"
err_msg = "Unsupported dtype {} found at node {}"
x_shape = [i.size for i in x.attr["value"].tensor.tensor_shape.dim]
if not x_shape:
if dtype == tf.float32:
nums = x.attr["value"].tensor.float_val
elif dtype == tf.float64:
nums = x.attr["value"].tensor.float_val
elif dtype == tf.int32:
logging.warning(warn_msg, dtype, x.name)
nums = x.attr["value"].tensor.int_val
else:
raise TypeError(err_msg.format(dtype, x.name))
def inputter_fn():
return tf.constant(np.array(nums).reshape(1, 1))
else:
if dtype == tf.float32:
nums = array.array('f', x.attr["value"].tensor.tensor_content)
elif dtype == tf.float64:
nums = array.array('d', x.attr["value"].tensor.tensor_content)
elif dtype == tf.int32:
logging.warning(warn_msg, dtype, x.name)
nums = array.array('i', x.attr["value"].tensor.tensor_content)
else:
raise TypeError(err_msg.format(dtype, x.name))
def inputter_fn():
return tf.constant(np.array(nums).reshape(x_shape))
x_private = converter.protocol.define_private_input(
converter.model_provider, inputter_fn)
return x_private
def _nodef_to_numpy_array(x):
"""Map a NodeDef x to a np.array."""
dtype = x.attr["dtype"].type
x_shape = [i.size for i in x.attr["value"].tensor.tensor_shape.dim]
if dtype == tf.float32:
nums = array.array('f', x.attr["value"].tensor.tensor_content)
elif dtype == tf.float64:
nums = array.array('d', x.attr["value"].tensor.tensor_content)
elif dtype == tf.int32:
nums = array.array('i', x.attr["value"].tensor.tensor_content)
else:
raise TypeError("Unsupported dtype")
return np.array(nums).reshape(x_shape)
| tf_encrypted/convert/register.py | 21,596 | Map a NodeDef x to a np.array.
Map a NodeDef x to a PrivatePondTensor.
Map a NodeDef x to a PublicPondTensor.
Map reserved names and scopes to their conversion functions.
Registry for the TF Encrypted Converter.
pylint: disable=unused-argument pylint: disable=missing-docstring need to able to access the underlying weights return the node need to able to access the underlying weights return the node XXX this is a little weird but the input into rsqrt is public and being used only for batchnorm at the moment pylint: disable=protected-access Slice is a special case of strided_slice. Slice takes size (the number of elements we want to slice) as an input. However strided_slice takes end (integer until which the slicing takes place) as input. We can infere the end parameter with : end[i] = begin[i] + size[i]. If size is negative, the stepping go towards smaller indices. In this case we can infer the end parameter with: end[i] = input_shape[i] - size[i] + 1 if size is negative take the input dimension pylint: disable=consider-using-enumerate pylint: enable=unused-argument pylint: enable=missing-docstring | 1,116 | en | 0.767674 |
from lldbsuite.test.lldbtest import *
import os
import vscode
class VSCodeTestCaseBase(TestBase):
NO_DEBUG_INFO_TESTCASE = True
def create_debug_adaptor(self):
'''Create the Visual Studio Code debug adaptor'''
self.assertTrue(os.path.exists(self.lldbVSCodeExec),
'lldb-vscode must exist')
log_file_path = self.getBuildArtifact('vscode.txt')
self.vscode = vscode.DebugAdaptor(
executable=self.lldbVSCodeExec, init_commands=self.setUpCommands(),
log_file=log_file_path)
def build_and_create_debug_adaptor(self):
self.build()
self.create_debug_adaptor()
def set_source_breakpoints(self, source_path, lines, condition=None,
hitCondition=None):
'''Sets source breakpoints and returns an array of strings containing
the breakpoint IDs ("1", "2") for each breakpoint that was set.
'''
response = self.vscode.request_setBreakpoints(
source_path, lines, condition=condition, hitCondition=hitCondition)
if response is None:
return []
breakpoints = response['body']['breakpoints']
breakpoint_ids = []
for breakpoint in breakpoints:
breakpoint_ids.append('%i' % (breakpoint['id']))
return breakpoint_ids
def set_function_breakpoints(self, functions, condition=None,
hitCondition=None):
'''Sets breakpoints by function name given an array of function names
and returns an array of strings containing the breakpoint IDs
("1", "2") for each breakpoint that was set.
'''
response = self.vscode.request_setFunctionBreakpoints(
functions, condition=condition, hitCondition=hitCondition)
if response is None:
return []
breakpoints = response['body']['breakpoints']
breakpoint_ids = []
for breakpoint in breakpoints:
breakpoint_ids.append('%i' % (breakpoint['id']))
return breakpoint_ids
def verify_breakpoint_hit(self, breakpoint_ids):
'''Wait for the process we are debugging to stop, and verify we hit
any breakpoint location in the "breakpoint_ids" array.
"breakpoint_ids" should be a list of breakpoint ID strings
(["1", "2"]). The return value from self.set_source_breakpoints()
or self.set_function_breakpoints() can be passed to this function'''
stopped_events = self.vscode.wait_for_stopped()
for stopped_event in stopped_events:
if 'body' in stopped_event:
body = stopped_event['body']
if 'reason' not in body:
continue
if body['reason'] != 'breakpoint':
continue
if 'description' not in body:
continue
# Descriptions for breakpoints will be in the form
# "breakpoint 1.1", so look for any description that matches
# ("breakpoint 1.") in the description field as verification
# that one of the breakpoint locations was hit. VSCode doesn't
# allow breakpoints to have multiple locations, but LLDB does.
# So when looking at the description we just want to make sure
# the right breakpoint matches and not worry about the actual
# location.
description = body['description']
print("description: %s" % (description))
for breakpoint_id in breakpoint_ids:
match_desc = 'breakpoint %s.' % (breakpoint_id)
if match_desc in description:
return
self.assertTrue(False, "breakpoint not hit")
def verify_exception_breakpoint_hit(self, filter_label):
'''Wait for the process we are debugging to stop, and verify the stop
reason is 'exception' and that the description matches
'filter_label'
'''
stopped_events = self.vscode.wait_for_stopped()
for stopped_event in stopped_events:
if 'body' in stopped_event:
body = stopped_event['body']
if 'reason' not in body:
continue
if body['reason'] != 'exception':
continue
if 'description' not in body:
continue
description = body['description']
if filter_label == description:
return True
return False
def verify_commands(self, flavor, output, commands):
self.assertTrue(output and len(output) > 0, "expect console output")
lines = output.splitlines()
prefix = '(lldb) '
for cmd in commands:
found = False
for line in lines:
if line.startswith(prefix) and cmd in line:
found = True
break
self.assertTrue(found,
"verify '%s' found in console output for '%s'" % (
cmd, flavor))
def get_dict_value(self, d, key_path):
'''Verify each key in the key_path array is in contained in each
dictionary within "d". Assert if any key isn't in the
corresponding dictionary. This is handy for grabbing values from VS
Code response dictionary like getting
response['body']['stackFrames']
'''
value = d
for key in key_path:
if key in value:
value = value[key]
else:
self.assertTrue(key in value,
'key "%s" from key_path "%s" not in "%s"' % (
key, key_path, d))
return value
def get_stackFrames_and_totalFramesCount(self, threadId=None, startFrame=None,
levels=None, dump=False):
response = self.vscode.request_stackTrace(threadId=threadId,
startFrame=startFrame,
levels=levels,
dump=dump)
if response:
stackFrames = self.get_dict_value(response, ['body', 'stackFrames'])
totalFrames = self.get_dict_value(response, ['body', 'totalFrames'])
self.assertTrue(totalFrames > 0,
'verify totalFrames count is provided by extension that supports '
'async frames loading')
return (stackFrames, totalFrames)
return (None, 0)
def get_stackFrames(self, threadId=None, startFrame=None, levels=None,
dump=False):
(stackFrames, totalFrames) = self.get_stackFrames_and_totalFramesCount(
threadId=threadId,
startFrame=startFrame,
levels=levels,
dump=dump)
return stackFrames
def get_source_and_line(self, threadId=None, frameIndex=0):
stackFrames = self.get_stackFrames(threadId=threadId,
startFrame=frameIndex,
levels=1)
if stackFrames is not None:
stackFrame = stackFrames[0]
['source', 'path']
if 'source' in stackFrame:
source = stackFrame['source']
if 'path' in source:
if 'line' in stackFrame:
return (source['path'], stackFrame['line'])
return ('', 0)
def get_stdout(self, timeout=0.0):
return self.vscode.get_output('stdout', timeout=timeout)
def get_console(self, timeout=0.0):
return self.vscode.get_output('console', timeout=timeout)
def get_local_as_int(self, name, threadId=None):
value = self.vscode.get_local_variable_value(name, threadId=threadId)
if value.startswith('0x'):
return int(value, 16)
elif value.startswith('0'):
return int(value, 8)
else:
return int(value)
def set_local(self, name, value, id=None):
'''Set a top level local variable only.'''
return self.vscode.request_setVariable(1, name, str(value), id=id)
def set_global(self, name, value, id=None):
'''Set a top level global variable only.'''
return self.vscode.request_setVariable(2, name, str(value), id=id)
def stepIn(self, threadId=None, waitForStop=True):
self.vscode.request_stepIn(threadId=threadId)
if waitForStop:
return self.vscode.wait_for_stopped()
return None
def stepOver(self, threadId=None, waitForStop=True):
self.vscode.request_next(threadId=threadId)
if waitForStop:
return self.vscode.wait_for_stopped()
return None
def stepOut(self, threadId=None, waitForStop=True):
self.vscode.request_stepOut(threadId=threadId)
if waitForStop:
return self.vscode.wait_for_stopped()
return None
def continue_to_next_stop(self):
self.vscode.request_continue()
return self.vscode.wait_for_stopped()
def continue_to_breakpoints(self, breakpoint_ids):
self.vscode.request_continue()
self.verify_breakpoint_hit(breakpoint_ids)
def continue_to_exception_breakpoint(self, filter_label):
self.vscode.request_continue()
self.assertTrue(self.verify_exception_breakpoint_hit(filter_label),
'verify we got "%s"' % (filter_label))
def continue_to_exit(self, exitCode=0):
self.vscode.request_continue()
stopped_events = self.vscode.wait_for_stopped()
self.assertEquals(len(stopped_events), 1,
"stopped_events = {}".format(stopped_events))
self.assertEquals(stopped_events[0]['event'], 'exited',
'make sure program ran to completion')
self.assertEquals(stopped_events[0]['body']['exitCode'], exitCode,
'exitCode == %i' % (exitCode))
def attach(self, program=None, pid=None, waitFor=None, trace=None,
initCommands=None, preRunCommands=None, stopCommands=None,
exitCommands=None, attachCommands=None, coreFile=None):
'''Build the default Makefile target, create the VSCode debug adaptor,
and attach to the process.
'''
# Make sure we disconnect and terminate the VSCode debug adaptor even
# if we throw an exception during the test case.
def cleanup():
self.vscode.request_disconnect(terminateDebuggee=True)
self.vscode.terminate()
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
# Initialize and launch the program
self.vscode.request_initialize()
response = self.vscode.request_attach(
program=program, pid=pid, waitFor=waitFor, trace=trace,
initCommands=initCommands, preRunCommands=preRunCommands,
stopCommands=stopCommands, exitCommands=exitCommands,
attachCommands=attachCommands, coreFile=coreFile)
if not (response and response['success']):
self.assertTrue(response['success'],
'attach failed (%s)' % (response['message']))
def launch(self, program=None, args=None, cwd=None, env=None,
stopOnEntry=False, disableASLR=True,
disableSTDIO=False, shellExpandArguments=False,
trace=False, initCommands=None, preRunCommands=None,
stopCommands=None, exitCommands=None,sourcePath=None,
debuggerRoot=None, launchCommands=None, sourceMap=None):
'''Sending launch request to vscode
'''
# Make sure we disconnect and terminate the VSCode debug adapter,
# if we throw an exception during the test case
def cleanup():
self.vscode.request_disconnect(terminateDebuggee=True)
self.vscode.terminate()
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
# Initialize and launch the program
self.vscode.request_initialize()
response = self.vscode.request_launch(
program,
args=args,
cwd=cwd,
env=env,
stopOnEntry=stopOnEntry,
disableASLR=disableASLR,
disableSTDIO=disableSTDIO,
shellExpandArguments=shellExpandArguments,
trace=trace,
initCommands=initCommands,
preRunCommands=preRunCommands,
stopCommands=stopCommands,
exitCommands=exitCommands,
sourcePath=sourcePath,
debuggerRoot=debuggerRoot,
launchCommands=launchCommands,
sourceMap=sourceMap)
if not (response and response['success']):
self.assertTrue(response['success'],
'launch failed (%s)' % (response['message']))
def build_and_launch(self, program, args=None, cwd=None, env=None,
stopOnEntry=False, disableASLR=True,
disableSTDIO=False, shellExpandArguments=False,
trace=False, initCommands=None, preRunCommands=None,
stopCommands=None, exitCommands=None,
sourcePath=None, debuggerRoot=None):
'''Build the default Makefile target, create the VSCode debug adaptor,
and launch the process.
'''
self.build_and_create_debug_adaptor()
self.assertTrue(os.path.exists(program), 'executable must exist')
self.launch(program, args, cwd, env, stopOnEntry, disableASLR,
disableSTDIO, shellExpandArguments, trace,
initCommands, preRunCommands, stopCommands, exitCommands,
sourcePath, debuggerRoot)
| lldb/packages/Python/lldbsuite/test/tools/lldb-vscode/lldbvscode_testcase.py | 14,257 | Build the default Makefile target, create the VSCode debug adaptor,
and attach to the process.
Build the default Makefile target, create the VSCode debug adaptor,
and launch the process.
Create the Visual Studio Code debug adaptor
Verify each key in the key_path array is in contained in each
dictionary within "d". Assert if any key isn't in the
corresponding dictionary. This is handy for grabbing values from VS
Code response dictionary like getting
response['body']['stackFrames']
Sending launch request to vscode
Sets breakpoints by function name given an array of function names
and returns an array of strings containing the breakpoint IDs
("1", "2") for each breakpoint that was set.
Set a top level global variable only.
Set a top level local variable only.
Sets source breakpoints and returns an array of strings containing
the breakpoint IDs ("1", "2") for each breakpoint that was set.
Wait for the process we are debugging to stop, and verify we hit
any breakpoint location in the "breakpoint_ids" array.
"breakpoint_ids" should be a list of breakpoint ID strings
(["1", "2"]). The return value from self.set_source_breakpoints()
or self.set_function_breakpoints() can be passed to this function
Wait for the process we are debugging to stop, and verify the stop
reason is 'exception' and that the description matches
'filter_label'
Descriptions for breakpoints will be in the form "breakpoint 1.1", so look for any description that matches ("breakpoint 1.") in the description field as verification that one of the breakpoint locations was hit. VSCode doesn't allow breakpoints to have multiple locations, but LLDB does. So when looking at the description we just want to make sure the right breakpoint matches and not worry about the actual location. Make sure we disconnect and terminate the VSCode debug adaptor even if we throw an exception during the test case. Execute the cleanup function during test case tear down. Initialize and launch the program Make sure we disconnect and terminate the VSCode debug adapter, if we throw an exception during the test case Execute the cleanup function during test case tear down. Initialize and launch the program | 2,183 | en | 0.878214 |
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import threading
import warnings
import multiprocessing.pool
from functools import partial
from .. import backend as K
from ..utils.data_utils import Sequence
try:
from PIL import ImageEnhance
from PIL import Image as pil_image
except ImportError:
pil_image = None
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x, rg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
"""
theta = np.deg2rad(np.random.uniform(-rg, rg))
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x, intensity, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
"""
shear = np.deg2rad(np.random.uniform(-intensity, intensity))
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_axis=1, col_axis=2, channel_axis=0,
fill_mode='nearest', cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two'
' floats. Received: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
"""Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
"""
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity),
min_x,
max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def random_brightness(x, brightness_range):
"""Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
"""
if len(brightness_range) != 2:
raise ValueError(
'`brightness_range should be tuple or list of two floats. '
'Received: %s' % brightness_range)
x = array_to_img(x)
x = imgenhancer_Brightness = ImageEnhance.Brightness(x)
u = np.random.uniform(brightness_range[0], brightness_range[1])
x = imgenhancer_Brightness.enhance(u)
x = img_to_array(x)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=1,
mode=fill_mode,
cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def save_img(path,
x,
data_format=None,
file_format=None,
scale=True, **kwargs):
"""Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
"""
img = array_to_img(x, data_format=data_format, scale=scale)
img.save(path, format=file_format, **kwargs)
def load_img(path, grayscale=False, target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)]
class ImageDataGenerator(object):
"""Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
brightness_range=None,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None,
validation_split=0.0):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.brightness_range = brightness_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` '
'(channel after row and column) or '
'`"channels_first"` (channel before row and column). '
'Received: %s' % data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
if validation_split and not 0 < validation_split < 1:
raise ValueError(
'`validation_split` must be strictly between 0 and 1. '
' Received: %s' % validation_split)
self._validation_split = validation_split
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received: %s' % zoom_range)
if zca_whitening:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, which overrides '
'setting of `featurewise_center`.')
if featurewise_std_normalization:
self.featurewise_std_normalization = False
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening` '
'which overrides setting of'
'`featurewise_std_normalization`.')
if featurewise_std_normalization:
if not featurewise_center:
self.featurewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'which overrides setting of '
'`featurewise_center`.')
if samplewise_std_normalization:
if not samplewise_center:
self.samplewise_center = True
warnings.warn('This ImageDataGenerator specifies '
'`samplewise_std_normalization`, '
'which overrides setting of '
'`samplewise_center`.')
def flow(self, x, y=None, batch_size=32, shuffle=True, sample_weight=None, seed=None,
save_to_dir=None, save_prefix='', save_format='png', subset=None):
"""Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
"""
return NumpyArrayIterator(
x, y, self,
batch_size=batch_size,
shuffle=shuffle,
sample_weight=sample_weight,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
subset=subset)
def flow_from_directory(self, directory,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
"""Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
"""
return DirectoryIterator(
directory, self,
target_size=target_size, color_mode=color_mode,
classes=classes, class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links,
subset=subset,
interpolation=interpolation)
def standardize(self, x):
"""Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= (np.std(x, keepdims=True) + K.epsilon())
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + K.epsilon())
else:
warnings.warn('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, '
'but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
warnings.warn('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t '
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
"""
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# Use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.deg2rad(np.random.uniform(
-self.rotation_range,
self.rotation_range))
else:
theta = 0
if self.height_shift_range:
try: # 1-D array-like or int
tx = np.random.choice(self.height_shift_range)
tx *= np.random.choice([-1, 1])
except ValueError: # floating point
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range)
if np.max(self.height_shift_range) < 1:
tx *= x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
try: # 1-D array-like or int
ty = np.random.choice(self.width_shift_range)
ty *= np.random.choice([-1, 1])
except ValueError: # floating point
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range)
if np.max(self.width_shift_range) < 1:
ty *= x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.deg2rad(np.random.uniform(
-self.shear_range,
self.shear_range))
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(
self.zoom_range[0],
self.zoom_range[1],
2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(
transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_axis,
fill_mode=self.fill_mode, cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x,
self.channel_shift_range,
img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
if self.brightness_range is not None:
x = random_brightness(x, self.brightness_range)
return x
def fit(self, x,
augment=False,
rounds=1,
seed=None):
"""Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {1, 3, 4}:
warnings.warn(
'Expected input to be images (as Numpy array) '
'following the data format convention "' +
self.data_format + '" (channels on axis ' +
str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' +
str(self.channel_axis) + '. '
'However, it was passed an array with shape ' +
str(x.shape) + ' (' + str(x.shape[self.channel_axis]) +
' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]),
dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
flat_x = np.reshape(
x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
s_inv = 1. / np.sqrt(s[np.newaxis] + self.zca_epsilon)
self.principal_components = (u * s_inv).dot(u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:
self.batch_size * (idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
return (self.n + self.batch_size - 1) // self.batch_size # round up
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:
current_index + self.batch_size]
def __iter__(self):
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
"""
def __init__(self, x, y, image_data_generator,
batch_size=32, shuffle=False, sample_weight=None,
seed=None, data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
subset=None):
if (type(x) is tuple) or (type(x) is list):
if type(x[1]) is not list:
x_misc = [np.asarray(x[1])]
else:
x_misc = [np.asarray(xx) for xx in x[1]]
x = x[0]
for xx in x_misc:
if len(x) != len(xx):
raise ValueError(
'All of the arrays in `x` '
'should have the same length. '
'Found a pair with: len(x[0]) = %s, len(x[?]) = %s' %
(len(x), len(xx)))
else:
x_misc = []
if y is not None and len(x) != len(y):
raise ValueError('`x` (images tensor) and `y` (labels) '
'should have the same length. '
'Found: x.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if sample_weight is not None and len(x) != len(sample_weight):
raise ValueError('`x` (images tensor) and `sample_weight` '
'should have the same length. '
'Found: x.shape = %s, sample_weight.shape = %s' %
(np.asarray(x).shape, np.asarray(sample_weight).shape))
if subset is not None:
if subset not in {'training', 'validation'}:
raise ValueError('Invalid subset name:', subset,
'; expected "training" or "validation".')
split_idx = int(len(x) * image_data_generator._validation_split)
if subset == 'validation':
x = x[:split_idx]
x_misc = [np.asarray(xx[:split_idx]) for xx in x_misc]
if y is not None:
y = y[:split_idx]
else:
x = x[split_idx:]
x_misc = [np.asarray(xx[split_idx:]) for xx in x_misc]
if y is not None:
y = y[split_idx:]
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
self.x_misc = x_misc
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
warnings.warn('NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) +
'), i.e. expected either 1, 3 or 4 '
'channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' +
str(self.x.shape) + ' (' +
str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
if sample_weight is not None:
self.sample_weight = np.asarray(sample_weight)
else:
self.sample_weight = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0],
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(
x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
batch_x_miscs = [xx[index_array] for xx in self.x_misc]
output = (batch_x if batch_x_miscs == []
else [batch_x] + batch_x_miscs,)
if self.y is None:
return output[0]
output += (self.y[index_array],)
if self.sample_weight is not None:
output += (self.sample_weight[index_array],)
return output
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _iter_valid_files(directory, white_list_formats, follow_links):
"""Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
"""
def _recursive_list(subpath):
return sorted(os.walk(subpath, followlinks=follow_links),
key=lambda x: x[0])
for root, _, files in _recursive_list(directory):
for fname in sorted(files):
for extension in white_list_formats:
if fname.lower().endswith('.tiff'):
warnings.warn('Using \'.tiff\' files with multiple bands '
'will cause distortion. '
'Please verify your output.')
if fname.lower().endswith('.' + extension):
yield root, fname
def _count_valid_files_in_directory(directory,
white_list_formats,
split,
follow_links):
"""Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
"""
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
if split:
start, stop = int(split[0] * num_files), int(split[1] * num_files)
else:
start, stop = 0, num_files
return stop - start
def _list_valid_filenames_in_directory(directory, white_list_formats, split,
class_indices, follow_links):
"""Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
"""
dirname = os.path.basename(directory)
if split:
num_files = len(list(
_iter_valid_files(directory, white_list_formats, follow_links)))
start, stop = int(split[0] * num_files), int(split[1] * num_files)
valid_files = list(
_iter_valid_files(
directory, white_list_formats, follow_links))[start: stop]
else:
valid_files = _iter_valid_files(
directory, white_list_formats, follow_links)
classes = []
filenames = []
for root, fname in valid_files:
classes.append(class_indices[dirname])
absolute_path = os.path.join(root, fname)
relative_path = os.path.join(
dirname, os.path.relpath(absolute_path, directory))
filenames.append(relative_path)
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
"""
def __init__(self, directory, image_data_generator,
target_size=(256, 256), color_mode='rgb',
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
data_format=None,
save_to_dir=None, save_prefix='', save_format='png',
follow_links=False,
subset=None,
interpolation='nearest'):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse',
'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
self.interpolation = interpolation
if subset is not None:
validation_split = self.image_data_generator._validation_split
if subset == 'validation':
split = (0, validation_split)
elif subset == 'training':
split = (validation_split, 1)
else:
raise ValueError('Invalid subset name: ', subset,
'; expected "training" or "validation"')
else:
split = None
self.subset = subset
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp',
'ppm', 'tif', 'tiff'}
# First, count the number of samples and classes.
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links,
split=split)
self.samples = sum(pool.map(function_partial,
(os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' %
(self.samples, self.num_classes))
# Second, build an index of the images
# in the different class subfolders.
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory,
(dirpath, white_list_formats, split,
self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples,
batch_size,
shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(
(len(index_array),) + self.image_shape,
dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size,
interpolation=self.interpolation)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix,
index=j,
hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros(
(len(batch_x), self.num_classes),
dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
# Returns
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
| keras/preprocessing/image.py | 69,394 | Iterator capable of reading images from a directory on disk.
# Arguments
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
Generate batches of tensor image data with real-time data augmentation.
The data will be looped over (in batches).
# Arguments
featurewise_center: Boolean.
Set input mean to 0 over the dataset, feature-wise.
samplewise_center: Boolean. Set each sample mean to 0.
featurewise_std_normalization: Boolean.
Divide inputs by std of the dataset, feature-wise.
samplewise_std_normalization: Boolean. Divide each input by its std.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
zca_whitening: Boolean. Apply ZCA whitening.
rotation_range: Int. Degree range for random rotations.
width_shift_range: Float, 1-D array-like or int
- float: fraction of total width, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-width_shift_range, +width_shift_range)`
- With `width_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `width_shift_range=[-1, 0, +1]`,
while with `width_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
height_shift_range: Float, 1-D array-like or int
- float: fraction of total height, if < 1, or pixels if >= 1.
- 1-D array-like: random elements from the array.
- int: integer number of pixels from interval
`(-height_shift_range, +height_shift_range)`
- With `height_shift_range=2` possible values
are integers `[-1, 0, +1]`,
same as with `height_shift_range=[-1, 0, +1]`,
while with `height_shift_range=1.0` possible values are floats in
the interval [-1.0, +1.0).
shear_range: Float. Shear Intensity
(Shear angle in counter-clockwise direction in degrees)
zoom_range: Float or [lower, upper]. Range for random zoom.
If a float, `[lower, upper] = [1-zoom_range, 1+zoom_range]`.
channel_shift_range: Float. Range for random channel shifts.
fill_mode: One of {"constant", "nearest", "reflect" or "wrap"}.
Default is 'nearest'.
Points outside the boundaries of the input are filled
according to the given mode:
- 'constant': kkkkkkkk|abcd|kkkkkkkk (cval=k)
- 'nearest': aaaaaaaa|abcd|dddddddd
- 'reflect': abcddcba|abcd|dcbaabcd
- 'wrap': abcdabcd|abcd|abcdabcd
cval: Float or Int.
Value used for points outside the boundaries
when `fill_mode = "constant"`.
horizontal_flip: Boolean. Randomly flip inputs horizontally.
vertical_flip: Boolean. Randomly flip inputs vertically.
rescale: rescaling factor. Defaults to None.
If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided
(before applying any other transformation).
preprocessing_function: function that will be implied on each input.
The function will run after the image is resized and augmented.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: Image data format,
either "channels_first" or "channels_last".
"channels_last" mode means that the images should have shape
`(samples, height, width, channels)`,
"channels_first" mode means that the images should have shape
`(samples, channels, height, width)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
validation_split: Float. Fraction of images reserved for validation
(strictly between 0 and 1).
# Examples
Example of using `.flow(x, y)`:
```python
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(x_train)
# fits the model on batches with real-time data augmentation:
model.fit_generator(datagen.flow(x_train, y_train, batch_size=32),
steps_per_epoch=len(x_train) / 32, epochs=epochs)
# here's a more "manual" example
for e in range(epochs):
print('Epoch', e)
batches = 0
for x_batch, y_batch in datagen.flow(x_train, y_train, batch_size=32):
model.fit(x_batch, y_batch)
batches += 1
if batches >= len(x_train) / 32:
# we need to break the loop by hand because
# the generator loops indefinitely
break
```
Example of using `.flow_from_directory(directory)`:
```python
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'data/train',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
'data/validation',
target_size=(150, 150),
batch_size=32,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50,
validation_data=validation_generator,
validation_steps=800)
```
Example of transforming images and masks together.
```python
# we create two instances with the same arguments
data_gen_args = dict(featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=90.,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)
# Provide the same seed and keyword arguments to the fit and flow methods
seed = 1
image_datagen.fit(images, augment=True, seed=seed)
mask_datagen.fit(masks, augment=True, seed=seed)
image_generator = image_datagen.flow_from_directory(
'data/images',
class_mode=None,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
'data/masks',
class_mode=None,
seed=seed)
# combine generators into one which yields image and masks
train_generator = zip(image_generator, mask_generator)
model.fit_generator(
train_generator,
steps_per_epoch=2000,
epochs=50)
```
Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
# Arguments
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
Iterator yielding data from a Numpy array.
# Arguments
x: Numpy array of input data or tuple.
If tuple, the second elements is either
another numpy array or a list of numpy arrays,
each of which gets passed
through as an output without any modifications.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
sample_weight: Numpy array of sample weights.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
subset: Subset of data (`"training"` or `"validation"`) if
validation_split is set in ImageDataGenerator.
Counts files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: absolute path to the directory
containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
follow_links: boolean.
# Returns
the count of files with extension in `white_list_formats` contained in
the directory.
Gets a batch of transformed samples.
# Arguments
index_array: Array of sample indices to include in batch.
# Returns
A batch of transformed samples.
Iterates on files with extension in `white_list_formats` contained in `directory`.
# Arguments
directory: Absolute path to the directory
containing files to be counted
white_list_formats: Set of strings containing allowed extensions for
the files to be counted.
follow_links: Boolean.
# Yields
Tuple of (root, filename) with extension in `white_list_formats`.
Lists paths of files in `subdir` with extensions in `white_list_formats`.
# Arguments
directory: absolute path to a directory containing the files to list.
The directory name is used as class label
and must be a key of `class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
split: tuple of floats (e.g. `(0.2, 0.6)`) to only take into
account a certain fraction of files in each directory.
E.g.: `segment=(0.6, 1.0)` would only account for last 40 percent
of images in each directory.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
# Returns
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be
`["class1/file1.jpg", "class1/file2.jpg", ...]`).
Applies the image transformation specified by a matrix.
# Arguments
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
The transformed version of the input.
Converts a 3D Numpy array to a PIL Image instance.
# Arguments
x: Input Numpy array.
data_format: Image data format.
either "channels_first" or "channels_last".
scale: Whether to rescale image values
to be within `[0, 255]`.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
Computes the internal data stats related to the data-dependent transformations, based on an array of sample data.
Only required if `featurewise_center` or
`featurewise_std_normalization` or `zca_whitening` are set to True.
# Arguments
x: Sample data. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Boolean (default: False).
Whether to fit on randomly augmented samples.
rounds: Int (default: 1).
If using data augmentation (`augment=True`),
this is how many augmentation passes over the data to use.
seed: Int (default: None). Random seed.
Takes numpy data & label arrays, and generates batches of augmented data.
# Arguments
x: Input data. Numpy array of rank 4 or a tuple.
If tuple, the first element
should contain the images and the second element
another numpy array or a list of numpy arrays
that gets passed to the output
without any modifications.
Can be used to feed the model miscellaneous data
along with the images.
In case of grayscale data, the channels axis of the image array
should have value 1, and in case
of RGB data, it should have value 3.
y: Labels.
batch_size: Int (default: 32).
shuffle: Boolean (default: True).
sample_weight: Sample weights.
seed: Int (default: None).
save_to_dir: None or str (default: None).
This allows you to optionally specify a directory
to which to save the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str (default: `''`).
Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: one of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
# Returns
An `Iterator` yielding tuples of `(x, y)`
where `x` is a numpy array of image data
(in the case of a single image input) or a list
of numpy arrays (in the case with
additional inputs) and `y` is a numpy array
of corresponding labels. If 'sample_weight' is not None,
the yielded tuples are of the form `(x, y, sample_weight)`.
If `y` is None, only the numpy array `x` is returned.
Takes the path to a directory & generates batches of augmented data.
# Arguments
directory: Path to the target directory.
It should contain one subdirectory per class.
Any PNG, JPG, BMP, PPM or TIF images
inside each of the subdirectories directory tree
will be included in the generator.
See [this script](https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d)
for more details.
target_size: Tuple of integers `(height, width)`,
default: `(256, 256)`.
The dimensions to which all images found will be resized.
color_mode: One of "grayscale", "rbg". Default: "rgb".
Whether the images will be converted to
have 1 or 3 color channels.
classes: Optional list of class subdirectories
(e.g. `['dogs', 'cats']`). Default: None.
If not provided, the list of classes will be automatically
inferred from the subdirectory names/structure
under `directory`, where each subdirectory will
be treated as a different class
(and the order of the classes, which will map to the label
indices, will be alphanumeric).
The dictionary containing the mapping from class names to class
indices can be obtained via the attribute `class_indices`.
class_mode: One of "categorical", "binary", "sparse",
"input", or None. Default: "categorical".
Determines the type of label arrays that are returned:
- "categorical" will be 2D one-hot encoded labels,
- "binary" will be 1D binary labels,
"sparse" will be 1D integer labels,
- "input" will be images identical
to input images (mainly used to work with autoencoders).
- If None, no labels are returned
(the generator will only yield batches of image data,
which is useful to use with `model.predict_generator()`,
`model.evaluate_generator()`, etc.).
Please note that in case of class_mode None,
the data still needs to reside in a subdirectory
of `directory` for it to work correctly.
batch_size: Size of the batches of data (default: 32).
shuffle: Whether to shuffle the data (default: True)
seed: Optional random seed for shuffling and transformations.
save_to_dir: None or str (default: None).
This allows you to optionally specify
a directory to which to save
the augmented pictures being generated
(useful for visualizing what you are doing).
save_prefix: Str. Prefix to use for filenames of saved pictures
(only relevant if `save_to_dir` is set).
save_format: One of "png", "jpeg"
(only relevant if `save_to_dir` is set). Default: "png".
follow_links: Whether to follow symlinks inside
class subdirectories (default: False).
subset: Subset of data (`"training"` or `"validation"`) if
`validation_split` is set in `ImageDataGenerator`.
interpolation: Interpolation method used to
resample the image if the
target size is different from that of the loaded image.
Supported methods are `"nearest"`, `"bilinear"`,
and `"bicubic"`.
If PIL version 1.1.3 or newer is installed, `"lanczos"` is also
supported. If PIL version 3.4.0 or newer is installed,
`"box"` and `"hamming"` are also supported.
By default, `"nearest"` is used.
# Returns
A `DirectoryIterator` yielding tuples of `(x, y)`
where `x` is a numpy array containing a batch
of images with shape `(batch_size, *target_size, channels)`
and `y` is a numpy array of corresponding labels.
Converts a PIL Image instance to a Numpy array.
# Arguments
img: PIL Image instance.
data_format: Image data format,
either "channels_first" or "channels_last".
# Returns
A 3D Numpy array.
# Raises
ValueError: if invalid `img` or `data_format` is passed.
Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
For python 2.x.
# Returns
The next batch.
For python 2.x.
# Returns
The next batch.
Performs a random brightness shift.
# Arguments
x: Input tensor. Must be 3D.
brightness_range: Tuple of floats; brightness range.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
# Raises
ValueError if `brightness_range` isn't a tuple.
Performs a random channel shift.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
channel_axis: Index of axis for channels in the input tensor.
# Returns
Numpy image tensor.
Performs a random rotation of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Rotated Numpy image tensor.
Performs a random spatial shear of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
intensity: Transformation intensity in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Sheared Numpy image tensor.
Performs a random spatial shift of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Shifted Numpy image tensor.
Randomly augments a single image tensor.
# Arguments
x: 3D tensor, single image.
seed: Random seed.
# Returns
A randomly transformed version of the input (same shape).
Performs a random spatial zoom of a Numpy image tensor.
# Arguments
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
# Returns
Zoomed Numpy image tensor.
# Raises
ValueError: if `zoom_range` isn't a tuple.
Saves an image stored as a Numpy array to a path or file object.
# Arguments
path: Path or file object.
x: Numpy array.
data_format: Image data format,
either "channels_first" or "channels_last".
file_format: Optional file format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
scale: Whether to rescale image values to be within `[0, 255]`.
**kwargs: Additional keyword arguments passed to `PIL.Image.save()`.
Applies the normalization configuration to a batch of inputs.
# Arguments
x: Batch of inputs to be normalized.
# Returns
The inputs, normalized.
Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
These methods were only introduced in version 3.4.0 (2016). This method is new in version 1.1.3 (2013). no need to do offset Original Numpy array x has format (height, width, channel) or (channel, height, width) but target PIL image has format (width, height, channel) RGB grayscale Numpy array x has format (height, width, channel) or (channel, height, width) but original PIL image has format (width, height, channel) x is a single image, so it doesn't have image number at index 0 Use composition of homographies to generate final transform that needs to be applied 1-D array-like or int floating point 1-D array-like or int floating point round up Ensure self.batch_index is 0. Needed if we want to do something like: for x, y in data_gen.flow(...): Keeps under lock only the mechanism which advances the indexing of each batch. The transformation of images is not under thread lock so it can be done in parallel First, count the number of samples and classes. Second, build an index of the images in the different class subfolders. build batch of image data optionally save augmented images to disk for debugging purposes build batch of labels The transformation of images is not under thread lock so it can be done in parallel | 26,783 | en | 0.68613 |
import re
from streamlink.compat import urlparse, parse_qsl
from streamlink.plugin import Plugin, PluginError
from streamlink.plugin.api import http, validate
from streamlink.plugin.api.utils import parse_query
from streamlink.stream import HTTPStream, HLSStream
from streamlink.compat import parse_qsl
from streamlink.stream.ffmpegmux import MuxedStream
API_KEY = "AIzaSyBDBi-4roGzWJN4du9TuDMLd_jVTcVkKz4"
API_BASE = "https://www.googleapis.com/youtube/v3"
API_SEARCH_URL = API_BASE + "/search"
API_VIDEO_INFO = "http://youtube.com/get_video_info"
HLS_HEADERS = {
"User-Agent": "Mozilla/5.0"
}
def parse_stream_map(stream_map):
if not stream_map:
return []
return [parse_query(s) for s in stream_map.split(",")]
def parse_fmt_list(formatsmap):
formats = {}
if not formatsmap:
return formats
for format in formatsmap.split(","):
s = format.split("/")
(w, h) = s[1].split("x")
formats[int(s[0])] = "{0}p".format(h)
return formats
_config_schema = validate.Schema(
{
validate.optional("fmt_list"): validate.all(
validate.text,
validate.transform(parse_fmt_list)
),
validate.optional("url_encoded_fmt_stream_map"): validate.all(
validate.text,
validate.transform(parse_stream_map),
[{
"itag": validate.all(
validate.text,
validate.transform(int)
),
"quality": validate.text,
"url": validate.url(scheme="http"),
validate.optional("s"): validate.text,
validate.optional("stereo3d"): validate.all(
validate.text,
validate.transform(int),
validate.transform(bool)
),
}]
),
validate.optional("adaptive_fmts"): validate.all(
validate.text,
validate.transform(parse_stream_map),
[{
validate.optional("s"): validate.text,
"type": validate.all(
validate.text,
validate.transform(lambda t: t.split(";")[0].split("/")),
[validate.text, validate.text]
),
"url": validate.all(
validate.url(scheme="http")
)
}]
),
validate.optional("hlsvp"): validate.text,
validate.optional("live_playback"): validate.transform(bool),
"status": validate.text
}
)
_search_schema = validate.Schema(
{
"items": [{
"id": {
"videoId": validate.text
}
}]
},
validate.get("items")
)
_channelid_re = re.compile(r'meta itemprop="channelId" content="([^"]+)"')
_livechannelid_re = re.compile(r'meta property="og:video:url" content="([^"]+)')
_url_re = re.compile(r"""
http(s)?://(\w+\.)?youtube.com
(?:
(?:
/(watch.+v=|embed/|v/)
(?P<video_id>[0-9A-z_-]{11})
)
|
(?:
/(user|channel)/(?P<user>[^/?]+)
)
|
(?:
/c/(?P<liveChannel>[^/?]+)/live
)
)
""", re.VERBOSE)
class YouTube(Plugin):
adp_video = {
137: "1080p",
303: "1080p60", # HFR
299: "1080p60", # HFR
264: "1440p",
308: "1440p60", # HFR
266: "2160p",
315: "2160p60", # HFR
138: "2160p",
302: "720p60", # HFR
}
adp_audio = {
140: 128,
141: 256,
171: 128,
249: 48,
250: 64,
251: 160,
}
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
@classmethod
def stream_weight(cls, stream):
match_3d = re.match(r"(\w+)_3d", stream)
match_hfr = re.match(r"(\d+p)(\d+)", stream)
if match_3d:
weight, group = Plugin.stream_weight(match_3d.group(1))
weight -= 1
group = "youtube_3d"
elif match_hfr:
weight, group = Plugin.stream_weight(match_hfr.group(1))
weight += 1
group = "high_frame_rate"
else:
weight, group = Plugin.stream_weight(stream)
return weight, group
def _find_channel_video(self):
res = http.get(self.url)
match = _channelid_re.search(res.text)
if not match:
return
return self._get_channel_video(match.group(1))
def _get_channel_video(self, channel_id):
query = {
"channelId": channel_id,
"type": "video",
"eventType": "live",
"part": "id",
"key": API_KEY
}
res = http.get(API_SEARCH_URL, params=query)
videos = http.json(res, schema=_search_schema)
for video in videos:
video_id = video["id"]["videoId"]
return video_id
def _find_canonical_stream_info(self):
res = http.get(self.url)
match = _livechannelid_re.search(res.text)
if not match:
return
return self._get_stream_info(match.group(1))
def _get_stream_info(self, url):
match = _url_re.match(url)
user = match.group("user")
live_channel = match.group("liveChannel")
if user:
video_id = self._find_channel_video()
elif live_channel:
return self._find_canonical_stream_info()
else:
video_id = match.group("video_id")
if video_id == "live_stream":
query_info = dict(parse_qsl(urlparse(url).query))
if "channel" in query_info:
video_id = self._get_channel_video(query_info["channel"])
if not video_id:
return
params = {
"video_id": video_id,
"el": "player_embedded"
}
res = http.get(API_VIDEO_INFO, params=params, headers=HLS_HEADERS)
return parse_query(res.text, name="config", schema=_config_schema)
def _get_streams(self):
info = self._get_stream_info(self.url)
if not info:
return
formats = info.get("fmt_list")
streams = {}
protected = False
for stream_info in info.get("url_encoded_fmt_stream_map", []):
if stream_info.get("s"):
protected = True
continue
stream = HTTPStream(self.session, stream_info["url"])
name = formats.get(stream_info["itag"]) or stream_info["quality"]
if stream_info.get("stereo3d"):
name += "_3d"
streams[name] = stream
adaptive_streams = {}
best_audio_itag = None
# Extract audio streams from the DASH format list
for stream_info in info.get("adaptive_fmts", []):
if stream_info.get("s"):
protected = True
continue
stream_params = dict(parse_qsl(stream_info["url"]))
if "itag" not in stream_params:
continue
itag = int(stream_params["itag"])
# extract any high quality streams only available in adaptive formats
adaptive_streams[itag] = stream_info["url"]
stream_type, stream_format = stream_info["type"]
if stream_type == "audio":
stream = HTTPStream(self.session, stream_info["url"])
name = "audio_{0}".format(stream_format)
streams[name] = stream
# find the best quality audio stream m4a, opus or vorbis
if best_audio_itag is None or self.adp_audio[itag] > self.adp_audio[best_audio_itag]:
best_audio_itag = itag
if best_audio_itag and adaptive_streams and MuxedStream.is_usable(self.session):
aurl = adaptive_streams[best_audio_itag]
for itag, name in self.adp_video.items():
if itag in adaptive_streams:
vurl = adaptive_streams[itag]
streams[name] = MuxedStream(self.session,
HTTPStream(self.session, vurl),
HTTPStream(self.session, aurl))
hls_playlist = info.get("hlsvp")
if hls_playlist:
try:
hls_streams = HLSStream.parse_variant_playlist(
self.session, hls_playlist, headers=HLS_HEADERS, namekey="pixels"
)
streams.update(hls_streams)
except IOError as err:
self.logger.warning("Failed to extract HLS streams: {0}", err)
if not streams and protected:
raise PluginError("This plugin does not support protected videos, "
"try youtube-dl instead")
return streams
__plugin__ = YouTube
| src/streamlink/plugins/youtube.py | 8,945 | HFR HFR HFR HFR HFR Extract audio streams from the DASH format list extract any high quality streams only available in adaptive formats find the best quality audio stream m4a, opus or vorbis | 190 | en | 0.67676 |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Reroot to a subtree, maintaining an input proto index.
reroot is similar to get_descendant_or_error. However, this method allows
you to call create_proto_index(...) later on, that gives you a reference to the
original proto.
"""
from typing import FrozenSet, Optional, Sequence
from struct2tensor import calculate_options
from struct2tensor import expression
from struct2tensor import expression_add
from struct2tensor import path
from struct2tensor import prensor
import tensorflow as tf
def reroot(root: expression.Expression,
source_path: path.Path) -> expression.Expression:
"""Reroot to a new path, maintaining a input proto index.
Similar to root.get_descendant_or_error(source_path): however, this
method retains the ability to get a map to the original index.
Args:
root: the original root.
source_path: the path to the new root.
Returns:
the new root.
"""
new_root = root
for step in source_path.field_list:
new_root = _RerootExpression(new_root, step)
return new_root
def create_proto_index_field(root: expression.Expression,
new_field_name: path.Step
) -> expression.Expression:
return expression_add.add_paths(
root, {path.Path([new_field_name]): _InputProtoIndexExpression(root)})
class _RerootRootNodeTensor(prensor.RootNodeTensor):
"""The reroot root node.
This contains a map from a current index to the original index of a proto.
"""
def __init__(self, size: tf.Tensor, input_proto_index: tf.Tensor):
super().__init__(size)
self._input_proto_index = input_proto_index
@property
def input_proto_index(self):
return self._input_proto_index
def _get_proto_index_parent_index(node: prensor.RootNodeTensor):
return tf.range(node.size)
def _get_input_proto_index(node: prensor.RootNodeTensor):
if isinstance(node, _RerootRootNodeTensor):
return node.input_proto_index
return _get_proto_index_parent_index(node)
class _RerootExpression(expression.Expression):
"""Reroot to a new path, maintaining a input proto index."""
def __init__(self, original_root: expression.Expression,
field_name: path.Step):
super().__init__(True, None)
self._field_name = field_name
self._original_root = original_root
self._new_root = original_root.get_child_or_error(field_name)
if self._new_root.type is not None:
raise ValueError("New root must be a message type: {}".format(
str(self._field_name)))
# TODO(martinz): Check that the "original root source expression" has a type
# in (_RerootExpression, prensor._ProtoRootExpression)
# To do this, we need a general technique similar to
# expression_add._is_true_source_expression: however, this should also cover
# intermediate operations like "project".
# Since this check is not present, if it should have fired, there will be
# an error when calculate(...) is called.
def get_source_expressions(self) -> Sequence[expression.Expression]:
return [self._original_root, self._new_root]
def calculate(
self,
sources: Sequence[prensor.NodeTensor],
destinations: Sequence[expression.Expression],
options: calculate_options.Options,
side_info: Optional[prensor.Prensor] = None) -> prensor.NodeTensor:
[old_root_value, new_root_value] = sources
if isinstance(old_root_value, prensor.RootNodeTensor) and isinstance(
new_root_value, prensor.ChildNodeTensor):
old_input_proto_index = _get_input_proto_index(old_root_value)
# Notice that the "gather" operation is similar to promote.
return _RerootRootNodeTensor(
tf.size(new_root_value.parent_index, out_type=tf.int64),
tf.gather(old_input_proto_index, new_root_value.parent_index))
raise ValueError("Source types incorrect")
def calculation_is_identity(self) -> bool:
return False
def calculation_equal(self, expr: expression.Expression) -> bool:
# Although path can vary, it is not used in the calculation, just to
return isinstance(expr, _RerootExpression)
def _get_child_impl(self,
field_name: path.Step) -> Optional[expression.Expression]:
return self._new_root.get_child(field_name)
def known_field_names(self) -> FrozenSet[path.Step]:
return self._new_root.known_field_names()
class _InputProtoIndexExpression(expression.Leaf):
"""A proto index expression."""
def __init__(self, root: expression.Expression):
"""Constructor for proto index expression.
Args:
root: an expression that must return a RootNodeTensor.
"""
super().__init__(is_repeated=False, my_type=tf.int64)
self._root = root
def get_source_expressions(self) -> Sequence[expression.Expression]:
return [self._root]
def calculate(
self,
sources: Sequence[prensor.NodeTensor],
destinations: Sequence[expression.Expression],
options: calculate_options.Options,
side_info: Optional[prensor.Prensor] = None) -> prensor.NodeTensor:
[root_node] = sources
# The following check ensures not just that we can calculate the value,
# but that no "improper" reroots were done.
if isinstance(root_node, prensor.RootNodeTensor):
return prensor.LeafNodeTensor(
_get_proto_index_parent_index(root_node),
_get_input_proto_index(root_node),
is_repeated=False)
raise ValueError(
"Illegal operation: expected a true root node: got {}".format(
str(root_node)))
def calculation_is_identity(self) -> bool:
return False
def calculation_equal(self, expr: expression.Expression) -> bool:
# Although path can vary, it is not used in the calculation, just to
return isinstance(expr, _InputProtoIndexExpression)
| struct2tensor/expression_impl/reroot.py | 6,382 | A proto index expression.
Reroot to a new path, maintaining a input proto index.
The reroot root node.
This contains a map from a current index to the original index of a proto.
Constructor for proto index expression.
Args:
root: an expression that must return a RootNodeTensor.
Reroot to a new path, maintaining a input proto index.
Similar to root.get_descendant_or_error(source_path): however, this
method retains the ability to get a map to the original index.
Args:
root: the original root.
source_path: the path to the new root.
Returns:
the new root.
Reroot to a subtree, maintaining an input proto index.
reroot is similar to get_descendant_or_error. However, this method allows
you to call create_proto_index(...) later on, that gives you a reference to the
original proto.
Copyright 2019 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. TODO(martinz): Check that the "original root source expression" has a type in (_RerootExpression, prensor._ProtoRootExpression) To do this, we need a general technique similar to expression_add._is_true_source_expression: however, this should also cover intermediate operations like "project". Since this check is not present, if it should have fired, there will be an error when calculate(...) is called. Notice that the "gather" operation is similar to promote. Although path can vary, it is not used in the calculation, just to The following check ensures not just that we can calculate the value, but that no "improper" reroots were done. Although path can vary, it is not used in the calculation, just to | 2,057 | en | 0.85579 |
class GEDAColor:
""" Enumeration of gEDA colors """
BACKGROUND_COLOR = 0
PIN_COLOR = 1
NET_ENDPOINT_COLOR = 2
GRAPHIC_COLOR = 3
NET_COLOR = 4
ATTRIBUTE_COLOR = 5
LOGIC_BUBBLE_COLOR = 6
DOTS_GRID_COLOR = 7
DETACHED_ATTRIBUTE_COLOR = 8
TEXT_COLOR = 9
BUS_COLOR = 10
SELECT_COLOR = 11
BOUNDINGBOX_COLOR = 12
ZOOM_BOX_COLOR = 13
STROKE_COLOR = 14
LOCK_COLOR = 15
class GEDAParameter(object):
TYPE = ''
def __init__(self, name, datatype=int, default=None):
self._name = name
self.datatype = datatype
self.default = default
@property
def name(self):
if self.TYPE:
return "%s_%s" % (self.TYPE, self._name)
return self._name
class GEDAStyleParameter(GEDAParameter):
""" Style parameter """
TYPE = 'style'
class GEDAExtraParameter(GEDAParameter):
""" Extra parameter """
TYPE = 'extra'
class GEDACommand(object):
""" Command """
TYPE = None
PARAMETERS = ()
EXTRA_PARAMETERS = ()
def parameters(self):
return self.PARAMETERS + self.EXTRA_PARAMETERS
def get_style_keywords(self):
style_type = GEDAStyleParameter.TYPE
return [p.name for p in self.PARAMETERS
if p.name.startswith(style_type)]
def update_default_kwargs(self, **kwargs):
default_kwargs = {}
for parameter in self.parameters():
default_kwargs[parameter.name] = parameter.default
default_kwargs.update(kwargs)
return default_kwargs
def generate_command(self, **kwargs):
kwargs = self.update_default_kwargs(**kwargs)
command = [self.TYPE]
for parameter in self.PARAMETERS:
command.append("%%(%s)s" % parameter.name)
return [" ".join(command) % kwargs]
class GEDALineCommand(GEDACommand):
""" Line command """
TYPE = 'L'
PARAMETERS = (
GEDAParameter('x1'),
GEDAParameter('y1'),
GEDAParameter('x2'),
GEDAParameter('y2'),
GEDAStyleParameter('color', default=GEDAColor.GRAPHIC_COLOR),
GEDAStyleParameter('width', default=10),
GEDAStyleParameter('capstyle', default=0),
GEDAStyleParameter('dashstyle', default=0),
GEDAStyleParameter('dashlength', default=-1),
GEDAStyleParameter('dashspace', default=-1),
)
class GEDABoxCommand(GEDACommand):
""" Box command """
TYPE = "B"
PARAMETERS = (
GEDAParameter('x'),
GEDAParameter('y'),
GEDAParameter('width'),
GEDAParameter('height'),
GEDAStyleParameter('color', default=GEDAColor.GRAPHIC_COLOR),
GEDAStyleParameter('width', default=10),
GEDAStyleParameter('capstyle', default=0),
GEDAStyleParameter('dashstyle', default=0),
GEDAStyleParameter('dashlength', default=-1),
GEDAStyleParameter('dashspace', default=-1),
GEDAStyleParameter('filltype', default=0),
GEDAStyleParameter('fillwidth', default=-1),
GEDAStyleParameter('angle1', default=-1),
GEDAStyleParameter('pitch1', default=-1),
GEDAStyleParameter('angle2', default=-1),
GEDAStyleParameter('pitch2', default=-1),
)
class GEDACircleCommand(GEDACommand):
""" Circle command """
TYPE = 'V'
PARAMETERS = (
GEDAParameter('x'),
GEDAParameter('y'),
GEDAParameter('radius'),
GEDAStyleParameter('color', default=GEDAColor.GRAPHIC_COLOR),
GEDAStyleParameter('width', default=10),
GEDAStyleParameter('capstyle', default=0),
GEDAStyleParameter('dashstyle', default=0),
GEDAStyleParameter('dashlength', default=-1),
GEDAStyleParameter('dashspace', default=-1),
GEDAStyleParameter('filltype', default=0),
GEDAStyleParameter('fillwidth', default=-1),
GEDAStyleParameter('angle1', default=-1),
GEDAStyleParameter('pitch1', default=-1),
GEDAStyleParameter('angle2', default=-1),
GEDAStyleParameter('pitch2', default=-1),
)
class GEDAArcCommand(GEDACommand):
""" Arc command """
TYPE = 'A'
PARAMETERS = (
GEDAParameter('x'),
GEDAParameter('y'),
GEDAParameter('radius'),
GEDAParameter('startangle'),
GEDAParameter('sweepangle'),
GEDAStyleParameter('color', default=GEDAColor.GRAPHIC_COLOR),
GEDAStyleParameter('width', default=10),
GEDAStyleParameter('capstyle', default=0),
GEDAStyleParameter('dashstyle', default=0),
GEDAStyleParameter('dashlength', default=-1),
GEDAStyleParameter('dashspace', default=-1),
)
class GEDATextCommand(GEDACommand):
""" Text command """
TYPE = 'T'
PARAMETERS = (
GEDAParameter('x'),
GEDAParameter('y'),
GEDAStyleParameter('color', default=GEDAColor.TEXT_COLOR),
# GEDAStyleParameter('size', default=10),
GEDAParameter('size'),
GEDAParameter('visibility', default=1),
GEDAParameter('show_name_value', default=1),
GEDAParameter('angle', default=0),
GEDAParameter('alignment', default=0),
GEDAParameter('num_lines', default=1),
)
class GEDASegmentCommand(GEDACommand):
""" Segment command """
TYPE = 'N'
PARAMETERS = (
GEDAParameter('x1'),
GEDAParameter('y1'),
GEDAParameter('x2'),
GEDAParameter('y2'),
GEDAStyleParameter('color', default=GEDAColor.NET_COLOR),
)
class GEDAPinCommand(GEDACommand):
""" Pin command """
TYPE = 'P'
PARAMETERS = (
GEDAParameter('x1'),
GEDAParameter('y1'),
GEDAParameter('x2'),
GEDAParameter('y2'),
GEDAStyleParameter('color', default=GEDAColor.PIN_COLOR),
# pin type is always 0
GEDAStyleParameter('pintype', default=0),
# first point is active/connected pin
GEDAParameter('whichend', default=0),
)
class GEDAComponentCommand(GEDACommand):
""" Component command """
TYPE = 'C'
PARAMETERS = (
GEDAParameter('x'),
GEDAParameter('y'),
# GEDAParameter('selectable', default=0),
GEDAParameter('selectable', default=1),
GEDAParameter('angle'),
GEDAParameter('mirror'),
GEDAParameter('basename', datatype=str),
)
class GEDAPathCommand(GEDACommand):
""" Path command """
TYPE = "H"
PARAMETERS = (
GEDAStyleParameter('color', default=GEDAColor.GRAPHIC_COLOR),
GEDAStyleParameter('width', default=10),
GEDAStyleParameter('capstyle', default=0),
GEDAStyleParameter('dashstyle', default=0),
GEDAStyleParameter('dashlength', default=-1),
GEDAStyleParameter('dashspace', default=-1),
GEDAStyleParameter('filltype', default=0),
GEDAStyleParameter('fillwidth', default=-1),
GEDAStyleParameter('angle1', default=-1),
GEDAStyleParameter('pitch1', default=-1),
GEDAStyleParameter('angle2', default=-1),
GEDAStyleParameter('pitch2', default=-1),
GEDAParameter('num_lines'),
)
EXTRA_PARAMERTERS = (
GEDAExtraParameter('id'),
)
class GEDAVersionCommand(GEDACommand):
""" Version command """
TYPE = 'v'
PARAMETERS = (
GEDAParameter('version'),
GEDAParameter('fileformat_version'),
)
class GEDABusCommand(GEDACommand):
""" Bus command """
TYPE = 'U'
PARAMETERS = (
GEDAParameter('x1'),
GEDAParameter('y1'),
GEDAParameter('x2'),
GEDAParameter('y2'),
GEDAStyleParameter('color', default=GEDAColor.BUS_COLOR),
GEDAParameter('ripperdir', default=0),
)
class GEDAPictureCommand(GEDACommand):
""" Picture command """
TYPE = 'G'
PARAMETERS = ()
class GEDAEmbeddedEnvironmentCommand(GEDACommand):
""" Embeded command """
TYPE = '['
PARAMETERS = ()
class GEDAAttributeEnvironmentCommand(GEDACommand):
""" Attribute environment command """
TYPE = '{'
PARAMETERS = ()
class GEDACommand(GEDACommand):
""" Command """
TYPE = 'U'
PARAMETERS = ()
| upconvert/parser/geda_commands.py | 8,127 | Arc command
Attribute environment command
Box command
Bus command
Circle command
Enumeration of gEDA colors
Command
Command
Component command
Embeded command
Extra parameter
Line command
Path command
Picture command
Pin command
Segment command
Style parameter
Text command
Version command
GEDAStyleParameter('size', default=10), pin type is always 0 first point is active/connected pin GEDAParameter('selectable', default=0), | 460 | en | 0.470069 |
"""
Django settings for doiainn project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'fbrywz7o3a1=vf-+4luwn5h)!kt-xzghqtm#^3(epwcwcp^jws'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'doiainn.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'doiainn.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| code/doiainn/doiainn/settings.py | 2,641 | Django settings for doiainn project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
Build paths inside the project like this: os.path.join(BASE_DIR, ...) Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition Database https://docs.djangoproject.com/en/1.8/ref/settings/databases Internationalization https://docs.djangoproject.com/en/1.8/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/1.8/howto/static-files/ | 892 | en | 0.655326 |
# -*- coding: utf-8 -*-
"""
User database models
--------------------
"""
import enum
import logging
import uuid
from flask import current_app
from sqlalchemy_utils import types as column_types
from flask_login import current_user # NOQA
from app.extensions import db, FeatherModel
from app.extensions.auth import security
from app.extensions.edm import EDMObjectMixin
from app.extensions.api.parameters import _get_is_static_role_property
import app.extensions.logging as AuditLog
log = logging.getLogger(__name__)
class UserEDMMixin(EDMObjectMixin):
# fmt: off
# Name of the module, used for knowing what to sync i.e user.list, user.data
EDM_NAME = 'user'
# The EDM attribute for the version, if reported
EDM_VERSION_ATTRIBUTE = 'version'
#
EDM_LOG_ATTRIBUTES = [
'emailAddress',
]
EDM_ATTRIBUTE_MAPPING = {
# Ignored
'id' : None,
'lastLogin' : None,
'username' : None,
# Attributes
'acceptedUserAgreement' : 'accepted_user_agreement',
'affiliation' : 'affiliation',
'emailAddress' : 'email',
'fullName' : 'full_name',
'receiveEmails' : 'receive_notification_emails',
'sharing' : 'shares_data',
'userURL' : 'website',
'version' : 'version',
# Functions
'organizations' : '_process_edm_user_organization',
'profileImageUrl' : '_process_edm_user_profile_url',
}
# fmt: on
@classmethod
def ensure_edm_obj(cls, guid):
user = User.query.filter(User.guid == guid).first()
is_new = user is None
if is_new:
email = '%s@localhost' % (guid,)
password = User.initial_random_password()
user = User(
guid=guid,
email=email,
password=password,
version=None,
is_active=True,
in_alpha=True,
)
with db.session.begin():
db.session.add(user)
db.session.refresh(user)
return user, is_new
def _process_edm_user_profile_url(self, url):
# TODO is this actually needed
log.warning('User._process_edm_profile_url() not implemented yet')
def _process_edm_user_organization(self, org):
# TODO is this actually needed
log.warning('User._process_edm_user_organization() not implemented yet')
class User(db.Model, FeatherModel, UserEDMMixin):
"""
User database model.
TODO:
* Upgrade to HoustonModel after full transition for Users out of EDM is
complete
"""
def __init__(self, *args, **kwargs):
if 'password' not in kwargs:
raise ValueError('User must have a password')
super().__init__(*args, **kwargs)
guid = db.Column(
db.GUID, default=uuid.uuid4, primary_key=True
) # pylint: disable=invalid-name
version = db.Column(db.BigInteger, default=None, nullable=True)
email = db.Column(
db.String(length=120), index=True, unique=True, default='', nullable=False
)
password = db.Column(
column_types.PasswordType(max_length=128, schemes=('bcrypt',)), nullable=False
) # can me migrated from EDM field "password"
full_name = db.Column(
db.String(length=120), default='', nullable=False
) # can be migrated from EDM field "fullName"
website = db.Column(
db.String(length=120), nullable=True
) # can be migrated from EDM field "userURL"
location = db.Column(db.String(length=120), default='', nullable=True)
affiliation = db.Column(
db.String(length=120), default='', nullable=True
) # can be migrated from BE field "affiliation"
forum_id = db.Column(db.String(length=120), default='', nullable=True)
locale = db.Column(db.String(length=20), default='EN', nullable=True)
accepted_user_agreement = db.Column(
db.Boolean, default=False, nullable=False
) # can be migrated from EDM field "acceptedUserAgreement"
use_usa_date_format = db.Column(db.Boolean, default=True, nullable=False)
show_email_in_profile = db.Column(db.Boolean, default=False, nullable=False)
receive_notification_emails = db.Column(
db.Boolean, default=True, nullable=False
) # can be migrated from BE field "receiveEmails"
receive_newsletter_emails = db.Column(db.Boolean, default=False, nullable=False)
shares_data = db.Column(
db.Boolean, default=True, nullable=False
) # can be migrated from BE field "sharing"
default_identification_catalogue = db.Column(
db.GUID, nullable=True
) # this may just be a string, however EDM wants to do ID catalogues
profile_fileupload_guid = db.Column(
db.GUID, db.ForeignKey('file_upload.guid'), nullable=True
)
# 'FileUpload' failed to locate a name (class not yet loaded)
# so explicitly import FileUpload here
from app.modules.fileuploads.models import FileUpload
profile_fileupload = db.relationship(FileUpload)
organization_membership_enrollments = db.relationship(
'OrganizationUserMembershipEnrollment', back_populates='user'
)
organization_moderator_enrollments = db.relationship(
'OrganizationUserModeratorEnrollment', back_populates='user'
)
project_membership_enrollments = db.relationship(
'ProjectUserMembershipEnrollment', back_populates='user'
)
user_collaboration_associations = db.relationship(
'CollaborationUserAssociations', back_populates='user'
)
asset_groups = db.relationship(
'AssetGroup',
back_populates='owner',
primaryjoin='User.guid == AssetGroup.owner_guid',
order_by='AssetGroup.guid',
)
submitted_asset_groups = db.relationship(
'AssetGroup',
back_populates='submitter',
primaryjoin='User.guid == AssetGroup.submitter_guid',
order_by='AssetGroup.guid',
)
owned_encounters = db.relationship(
'Encounter',
back_populates='owner',
primaryjoin='User.guid == Encounter.owner_guid',
order_by='Encounter.guid',
)
submitted_encounters = db.relationship(
'Encounter',
back_populates='submitter',
primaryjoin='User.guid == Encounter.submitter_guid',
order_by='Encounter.guid',
)
owned_organizations = db.relationship(
'Organization',
back_populates='owner',
primaryjoin='User.guid == Organization.owner_guid',
order_by='Organization.guid',
)
owned_projects = db.relationship(
'Project',
back_populates='owner',
primaryjoin='User.guid == Project.owner_guid',
order_by='Project.guid',
)
# User may have many notifications
notifications = db.relationship(
'Notification',
back_populates='recipient',
primaryjoin='User.guid == Notification.recipient_guid',
order_by='Notification.guid',
)
# All User specific Notification Preferences will be held in one instance
notification_preferences = db.relationship(
'UserNotificationPreferences',
back_populates='user',
primaryjoin='User.guid == UserNotificationPreferences.user_guid',
order_by='UserNotificationPreferences.guid',
)
PUBLIC_USER_EMAIL = 'public@localhost'
class StaticRoles(enum.Enum):
# pylint: disable=missing-docstring,unsubscriptable-object
DATA_MANAGER = (0x100000, 'DataManager', 'DataManager', 'is_data_manager')
USER_MANAGER = (0x80000, 'UserManager', 'UserManager', 'is_user_manager')
CONTRIBUTOR = (0x40000, 'Contributor', 'Contributor', 'is_contributor')
RESEARCHER = (0x20000, 'Researcher', 'Researcher', 'is_researcher')
EXPORTER = (0x10000, 'Exporter', 'Exporter', 'is_exporter')
INTERNAL = (0x08000, 'Internal', 'Internal', 'is_internal')
ADMIN = (0x04000, 'Site Administrator', 'Admin', 'is_admin')
STAFF = (0x02000, 'Staff Member', 'Staff', 'is_staff')
ACTIVE = (0x01000, 'Active Account', 'Active', 'is_active')
SETUP = (0x00800, 'Account in Setup', 'Setup', 'in_setup')
RESET = (0x00400, 'Account in Password Reset', 'Reset', 'in_reset')
ALPHA = (0x00200, 'Enrolled in Alpha', 'Alpha', 'in_alpha')
BETA = (0x00100, 'Enrolled in Beta', 'Beta', 'in_beta')
@property
def mask(self):
return self.value[0]
@property
def title(self):
return self.value[1]
@property
def shorthand(self):
return self.value[2]
static_roles = db.Column(db.Integer, default=0, nullable=False)
is_contributor = _get_is_static_role_property(
'is_contributor', StaticRoles.CONTRIBUTOR
)
is_user_manager = _get_is_static_role_property(
'is_user_manager', StaticRoles.USER_MANAGER
)
is_data_manager = _get_is_static_role_property(
'is_data_manager', StaticRoles.DATA_MANAGER
)
is_researcher = _get_is_static_role_property('is_researcher', StaticRoles.RESEARCHER)
is_exporter = _get_is_static_role_property('is_exporter', StaticRoles.EXPORTER)
is_internal = _get_is_static_role_property('is_internal', StaticRoles.INTERNAL)
is_admin = _get_is_static_role_property('is_admin', StaticRoles.ADMIN)
is_staff = _get_is_static_role_property('is_staff', StaticRoles.STAFF)
is_active = _get_is_static_role_property('is_active', StaticRoles.ACTIVE)
in_beta = _get_is_static_role_property('in_beta', StaticRoles.BETA)
in_alpha = _get_is_static_role_property('in_alpha', StaticRoles.ALPHA)
in_reset = _get_is_static_role_property('in_reset', StaticRoles.RESET)
in_setup = _get_is_static_role_property('in_setup', StaticRoles.SETUP)
@property
def is_privileged(self):
return self.is_staff or self.is_internal
def get_state(self):
state = []
state += [self.StaticRoles.ACTIVE.shorthand] if self.is_active else []
state += [self.StaticRoles.SETUP.shorthand] if self.in_setup else []
state += [self.StaticRoles.RESET.shorthand] if self.in_reset else []
state += [self.StaticRoles.ALPHA.shorthand] if self.in_alpha else []
state += [self.StaticRoles.BETA.shorthand] if self.in_beta else []
return state
def get_roles(self):
roles = []
roles += [self.StaticRoles.DATA_MANAGER.shorthand] if self.is_data_manager else []
roles += [self.StaticRoles.USER_MANAGER.shorthand] if self.is_user_manager else []
roles += [self.StaticRoles.INTERNAL.shorthand] if self.is_internal else []
roles += [self.StaticRoles.ADMIN.shorthand] if self.is_admin else []
roles += [self.StaticRoles.STAFF.shorthand] if self.is_staff else []
roles += [self.StaticRoles.CONTRIBUTOR.shorthand] if self.is_contributor else []
roles += [self.StaticRoles.RESEARCHER.shorthand] if self.is_researcher else []
roles += [self.StaticRoles.EXPORTER.shorthand] if self.is_exporter else []
return roles
def __repr__(self):
state = ', '.join(self.get_state())
roles = ', '.join(self.get_roles())
return (
'<{class_name}('
'guid={self.guid}, '
'email="{self.email}", '
'name="{self.full_name}", '
'state={state}, '
'roles={roles}'
')>'.format(
class_name=self.__class__.__name__, self=self, state=state, roles=roles
)
)
@classmethod
def get_admins(cls):
# used for first run admin creation
users = cls.query.all() # NOQA
admin_users = []
for user in users:
# TODO: Remove the check below at a later point after default admin create is removed
if user.email.endswith('@localhost'):
continue
if user.is_admin:
admin_users.append(user)
return admin_users
@classmethod
def admin_user_initialized(cls):
# used for first run admin creation
return len(cls.get_admins()) > 0
@classmethod
def ensure_user(
cls,
email,
password,
is_internal=False,
is_admin=False,
is_staff=False,
is_researcher=False,
is_contributor=True,
is_user_manager=False,
is_exporter=False,
is_active=True,
in_beta=False,
in_alpha=False,
update=False,
**kwargs,
):
"""
Create a new user.
"""
from app.extensions import db
user = User.find(email=email)
if user is None:
user = User(
password=password,
email=email,
is_internal=is_internal,
is_admin=is_admin,
is_staff=is_staff,
is_active=is_active,
is_researcher=is_researcher,
is_contributor=is_contributor,
is_user_manager=is_user_manager,
is_exporter=is_exporter,
in_beta=in_beta,
in_alpha=in_alpha,
**kwargs,
)
with db.session.begin():
db.session.add(user)
log.info('New user created: %r' % (user,))
elif update:
user.password = password
user.is_internal = is_internal
user.is_admin = is_admin
user.is_staff = is_staff
user.is_researcher = is_researcher
user.is_contributor = is_contributor
user.is_user_manager = is_user_manager
user.is_exporter = is_exporter
user.is_active = is_active
user.in_beta = in_beta
user.in_alpha = in_alpha
with db.session.begin():
db.session.merge(user)
log.info('Updated user: %r' % (user,))
db.session.refresh(user)
return user
@classmethod
def find(cls, email=None, password=None, edm_login_fallback=True):
# Look-up via email
if email is None:
return None
email_candidates = [
email,
'%s@localhost' % (email,),
]
for email_candidate in email_candidates:
user = cls.query.filter(User.email == email_candidate).first()
if password is None:
# If no password was provided to check, return any user account we find
if user is not None:
return user
else:
# Check local Houston password first
if user is not None:
# We found the user, check their provided password
if user.password == password:
return user
# As a fallback, check all EDMs if the user can login
if edm_login_fallback:
# We want to check the EDM even if we don't have a local user record
if current_app.edm.check_user_login(email_candidate, password):
log.info('User authenticated via EDM: %r' % (email_candidate,))
if user is not None:
# We authenticated a local user against an EDM (but the local password failed)
if user.password != password:
# The user passed the login with an EDM, update local password
log.warning(
"Updating user's local password: %r" % (user,)
)
user = user.set_password(password)
return user
else:
log.critical(
'The user authenticated via EDM but has no local user record'
)
# Try syncing all users from EDM
cls.edm_sync_all()
# If the user was just synced, go grab it (recursively) and return
user = cls.find(email=email, edm_login_fallback=False)
return user
# If we have gotten here, one of these things happened:
# 1) the user wasn't found
# 2) the user's password was provided and was incorrect
# 3) the user authenticated against the EDM but has no local user record
return None
@classmethod
def query_search(cls, search=None):
from sqlalchemy import or_, and_
from app.modules.auth.models import Code, CodeTypes
if search is not None:
search = search.strip().split(' ')
search = [term.strip() for term in search]
search = [term for term in search if len(term) > 0]
or_terms = []
for term in search:
codes = (
Code.query.filter_by(code_type=CodeTypes.checkin)
.filter(
Code.accept_code.contains(term),
)
.all()
)
code_users = set([])
for code in codes:
if not code.is_expired:
code_users.add(code.user.guid)
or_term = or_(
cls.guid.in_(code_users),
cls.email.contains(term),
cls.affiliation.contains(term),
cls.forum_id.contains(term),
cls.full_name.contains(term),
)
or_terms.append(or_term)
users = cls.query.filter(and_(*or_terms))
else:
users = cls.query
return users
@property
def is_authenticated(self):
return True
@property
def is_anonymous(self):
return False
@property
def is_email_confirmed(self):
from app.modules.auth.models import Code, CodeTypes
# Get any codes that fit this request
code = (
Code.query.filter_by(user=self, code_type=CodeTypes.email)
.order_by(Code.created.desc())
.first()
)
if code is None:
return False
return code.is_resolved
def get_org_memberships(self):
return [
enrollment.organization
for enrollment in self.organization_membership_enrollments
]
def get_org_moderatorships(self):
return [
enrollment.organization
for enrollment in self.organization_moderator_enrollments
]
def get_projects(self):
return [enrollment.project for enrollment in self.project_membership_enrollments]
def get_collaborations_as_json(self):
from app.modules.collaborations.schemas import DetailedCollaborationSchema
json_resp = []
for collab_assoc in self.user_collaboration_associations:
json_resp.append(
DetailedCollaborationSchema().dump(collab_assoc.collaboration).data
)
return json_resp
def get_notification_preferences(self):
from app.modules.notifications.models import UserNotificationPreferences
# User preferences are the system ones plus the ones stored in this class
# Return the combination to the REST API
preferences = UserNotificationPreferences.get_user_preferences(self)
return preferences
def unprocessed_asset_groups(self):
return [
asset_group.guid
for asset_group in self.asset_groups
if not asset_group.is_processed()
]
def unprocessed_sightings(self):
from app.modules.sightings.models import SightingStage
return [
sighting.guid
for sighting in self.get_sightings()
if not sighting.stage == SightingStage.processed
]
def get_id(self):
return self.guid
def has_static_role(self, role):
return (self.static_roles & role.mask) != 0
def set_static_role(self, role):
if self.has_static_role(role):
return
self.static_roles |= role.mask
def unset_static_role(self, role):
if not self.has_static_role(role):
return
self.static_roles ^= role.mask
def check_owner(self, user):
return self == user
def check_supervisor(self, user):
return self.check_owner(user)
def get_codes(self, code_type, **kwargs):
# This import for Code needs to be local
from app.modules.auth.models import Code
code = Code.get(self, code_type, **kwargs)
return code
def get_invite_code(self):
# This import for Code needs to be local
from app.modules.auth.models import CodeTypes
return self.get_codes(CodeTypes.invite, replace=True)
def get_email_confirmation_code(self):
# This import for Code needs to be local
from app.modules.auth.models import CodeTypes
return self.get_codes(CodeTypes.email, replace=True)
def get_account_recovery_code(self):
# This import for Code needs to be local
from app.modules.auth.models import CodeTypes
return self.get_codes(CodeTypes.recover, replace=True, replace_ttl=None)
def set_password(self, password):
if password is None:
# This function "sets" the password, it's the responsibility of the caller to ensure it's valid
raise ValueError('Empty password not allowed')
self.password = password
with db.session.begin():
db.session.merge(self)
db.session.refresh(self)
return self
def lockout(self):
from app.modules.auth.models import OAuth2Client, OAuth2Grant, OAuth2Token, Code
# Disable permissions
self.is_staff = False
self.is_admin = False
self.is_active = False
self.in_reset = False
self.in_setup = False
with db.session.begin():
db.session.merge(self)
db.session.refresh(self)
# Logout of sessions and API keys
auth_list = []
auth_list += OAuth2Token.query.filter_by(user_guid=self.guid).all()
auth_list += OAuth2Grant.query.filter_by(user_guid=self.guid).all()
auth_list += OAuth2Client.query.filter_by(user_guid=self.guid).all()
auth_list += Code.query.filter_by(user_guid=self.guid).all()
for auth_ in auth_list:
auth_.delete()
return self
def owns_object(self, obj):
from app.modules.assets.models import Asset
from app.modules.asset_groups.models import AssetGroup
from app.modules.encounters.models import Encounter
from app.modules.sightings.models import Sighting
from app.modules.projects.models import Project
from app.modules.individuals.models import Individual
from app.modules.notifications.models import Notification
ret_val = False
if isinstance(obj, User):
ret_val = obj == self
# AssetGroup, Encounters and Projects all have an owner field, check that
elif isinstance(obj, (AssetGroup, Encounter, Project, Notification)):
ret_val = obj.owner == self
elif isinstance(obj, Asset):
# assets are not owned directly by the user but the asset_group they're in is.
# TODO: need to understand once assets become part of an encounter, do they still have a asset_group
if obj.asset_group is not None:
ret_val = obj.asset_group.owner is self
elif isinstance(obj, Sighting):
# decided (2021-03-12) that "owner" of a Sighting is not applicable therefore always False
# permissions must be handled in ways not dependent on ownership
ret_val = False
elif isinstance(obj, Individual):
for encounter in obj.get_encounters():
if encounter.get_owner() is self:
ret_val = True
break
return ret_val
def get_my_annotations(self):
annotations = []
for encounter in self.owned_encounters:
annotations.extend(encounter.annotations)
return annotations
def get_all_encounters(self):
annotations = self.get_my_annotations()
# TODO add collaboration annotations
return annotations
def delete(self):
with db.session.begin():
# TODO: Ensure proper cleanup
for asset_group in self.asset_groups:
asset_group.delete()
AuditLog.delete_object(log, self)
db.session.delete(self)
@classmethod
def initial_random_password(cls):
return security.generate_random(128)
@classmethod
def get_public_user(cls):
return User.ensure_user(
email=User.PUBLIC_USER_EMAIL,
password=User.initial_random_password(),
full_name='Public User',
is_internal=True,
)
def get_sightings(self):
sightings = []
for encounter in self.owned_encounters:
sighting = encounter.get_sighting()
if sighting:
sightings.append(encounter.get_sighting())
sighting_set = set(sightings)
return list(sighting_set)
USER_ROLES = [
role.value[-1]
for role in User.StaticRoles.__members__.values()
if role.value[-1] not in ('in_setup', 'in_reset')
]
| app/modules/users/models.py | 25,852 | User database model.
TODO:
* Upgrade to HoustonModel after full transition for Users out of EDM is
complete
Create a new user.
User database models
--------------------
-*- coding: utf-8 -*- NOQA fmt: off Name of the module, used for knowing what to sync i.e user.list, user.data The EDM attribute for the version, if reported Ignored Attributes Functions fmt: on TODO is this actually needed TODO is this actually needed pylint: disable=invalid-name can me migrated from EDM field "password" can be migrated from EDM field "fullName" can be migrated from EDM field "userURL" can be migrated from BE field "affiliation" can be migrated from EDM field "acceptedUserAgreement" can be migrated from BE field "receiveEmails" can be migrated from BE field "sharing" this may just be a string, however EDM wants to do ID catalogues 'FileUpload' failed to locate a name (class not yet loaded) so explicitly import FileUpload here User may have many notifications All User specific Notification Preferences will be held in one instance pylint: disable=missing-docstring,unsubscriptable-object used for first run admin creation NOQA TODO: Remove the check below at a later point after default admin create is removed used for first run admin creation Look-up via email If no password was provided to check, return any user account we find Check local Houston password first We found the user, check their provided password As a fallback, check all EDMs if the user can login We want to check the EDM even if we don't have a local user record We authenticated a local user against an EDM (but the local password failed) The user passed the login with an EDM, update local password Try syncing all users from EDM If the user was just synced, go grab it (recursively) and return If we have gotten here, one of these things happened: 1) the user wasn't found 2) the user's password was provided and was incorrect 3) the user authenticated against the EDM but has no local user record Get any codes that fit this request User preferences are the system ones plus the ones stored in this class Return the combination to the REST API This import for Code needs to be local This import for Code needs to be local This import for Code needs to be local This import for Code needs to be local This function "sets" the password, it's the responsibility of the caller to ensure it's valid Disable permissions Logout of sessions and API keys AssetGroup, Encounters and Projects all have an owner field, check that assets are not owned directly by the user but the asset_group they're in is. TODO: need to understand once assets become part of an encounter, do they still have a asset_group decided (2021-03-12) that "owner" of a Sighting is not applicable therefore always False permissions must be handled in ways not dependent on ownership TODO add collaboration annotations TODO: Ensure proper cleanup | 2,898 | en | 0.898781 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""
Test PcapDataset
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
import tensorflow_io.pcap as pcap_io # pylint: disable=wrong-import-position
if not (hasattr(tf, "version") and tf.version.VERSION.startswith("2.")):
tf.compat.v1.enable_eager_execution()
def test_pcap_input():
"""test_pcap_input
"""
print("Testing PcapDataset")
pcap_filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "test_pcap", "http.pcap")
file_url = "file://" + pcap_filename
url_filenames = [file_url]
dataset = pcap_io.PcapDataset(url_filenames, batch=1)
packets_total = 0
for v in dataset:
(packet_timestamp, packet_data) = v
if packets_total == 0:
assert packet_timestamp.numpy()[0] == 1084443427.311224 # we know this is the correct value in the test pcap file
assert len(packet_data.numpy()[0]) == 62 # we know this is the correct packet data buffer length in the test pcap file
packets_total += 1
assert packets_total == 43 # we know this is the correct number of packets in the test pcap file
if __name__ == "__main__":
test.main()
| tests/test_pcap_eager.py | 1,883 | test_pcap_input
Test PcapDataset
Copyright 2019 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== pylint: disable=wrong-import-position we know this is the correct value in the test pcap file we know this is the correct packet data buffer length in the test pcap file we know this is the correct number of packets in the test pcap file | 938 | en | 0.829182 |
import fnmatch
import io
import logging
from uuid import UUID
import requests
from requests.exceptions import ConnectionError, HTTPError
from .cromwell_metadata import CromwellMetadata
logger = logging.getLogger(__name__)
def requests_error_handler(func):
"""Re-raise ConnectionError with help message.
Continue on HTTP 404 error (server is on but workflow doesn't exist).
Otherwise, re-raise from None to hide nested tracebacks.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as err:
if err.response.status_code == 404:
logger.error("Workflow doesn't seem to exist.")
return
message = (
'{err}\n\n'
'Cromwell server is on but got an HTTP error other than 404. '
).format(err=err)
raise HTTPError(message) from None
except ConnectionError as err:
message = (
'{err}\n\n'
'Failed to connect to Cromwell server. '
'Check if Caper server is running. '
'Also check if hostname and port are correct. '
'method={method}, '
'url={url}'.format(
err=err, method=err.request.method, url=err.request.url
)
)
raise ConnectionError(message) from None
return wrapper
def is_valid_uuid(workflow_id, version=4):
"""To validate Cromwell's UUID (lowercase only).
This does not allow uppercase UUIDs.
"""
if not isinstance(workflow_id, str):
return False
if not workflow_id.islower():
return False
try:
UUID(workflow_id, version=version)
except ValueError:
return False
return True
def has_wildcard(workflow_id_or_label):
"""Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them.
"""
if workflow_id_or_label is None:
return False
if isinstance(workflow_id_or_label, (list, tuple)):
for val in workflow_id_or_label:
if has_wildcard(val):
return True
return False
else:
return '?' in workflow_id_or_label or '*' in workflow_id_or_label
class CromwellRestAPI:
QUERY_URL = 'http://{hostname}:{port}'
ENDPOINT_BACKEND = '/api/workflows/v1/backends'
ENDPOINT_WORKFLOWS = '/api/workflows/v1/query'
ENDPOINT_METADATA = '/api/workflows/v1/{wf_id}/metadata'
ENDPOINT_LABELS = '/api/workflows/v1/{wf_id}/labels'
ENDPOINT_SUBMIT = '/api/workflows/v1'
ENDPOINT_ABORT = '/api/workflows/v1/{wf_id}/abort'
ENDPOINT_RELEASE_HOLD = '/api/workflows/v1/{wf_id}/releaseHold'
DEFAULT_HOSTNAME = 'localhost'
DEFAULT_PORT = 8000
def __init__(
self, hostname=DEFAULT_HOSTNAME, port=DEFAULT_PORT, user=None, password=None
):
self._hostname = hostname
self._port = port
self._user = user
self._password = password
self.__init_auth()
def submit(
self,
source,
dependencies=None,
inputs=None,
options=None,
labels=None,
on_hold=False,
):
"""Submit a workflow.
Returns:
JSON Response from POST request submit a workflow
"""
manifest = {}
with open(source) as fp:
manifest['workflowSource'] = io.StringIO(fp.read())
if dependencies:
with open(dependencies, 'rb') as fp:
manifest['workflowDependencies'] = io.BytesIO(fp.read())
if inputs:
with open(inputs) as fp:
manifest['workflowInputs'] = io.StringIO(fp.read())
else:
manifest['workflowInputs'] = io.StringIO('{}')
if options:
with open(options) as fp:
manifest['workflowOptions'] = io.StringIO(fp.read())
if labels:
with open(labels) as fp:
manifest['labels'] = io.StringIO(fp.read())
if on_hold:
manifest['workflowOnHold'] = True
r = self.__request_post(CromwellRestAPI.ENDPOINT_SUBMIT, manifest)
logger.debug('submit: {r}'.format(r=r))
return r
def abort(self, workflow_ids=None, labels=None):
"""Abort workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for aborting workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_ABORT.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('abort: {r}'.format(r=result))
return result
def release_hold(self, workflow_ids=None, labels=None):
"""Release hold of workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for releasing hold of workflows
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
r = self.__request_post(
CromwellRestAPI.ENDPOINT_RELEASE_HOLD.format(wf_id=workflow_id)
)
result.append(r)
logger.debug('release_hold: {r}'.format(r=result))
return result
def get_default_backend(self):
"""Retrieve default backend name
Returns:
Default backend name
"""
return self.get_backends()['defaultBackend']
def get_backends(self):
"""Retrieve available backend names and default backend name
Returns:
JSON response with keys "defaultBackend" and "supportedBackends"
Example: {"defaultBackend":"Local","supportedBackends":
["Local","aws","gcp","pbs","sge","slurm"]}
"""
return self.__request_get(CromwellRestAPI.ENDPOINT_BACKEND)
def find_valid_workflow_ids(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).
If so then we don't have to send the server a query to get matching workflow IDs.
"""
if not labels and workflow_ids and all(is_valid_uuid(i) for i in workflow_ids):
return workflow_ids
else:
workflows = self.find(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
if not workflows:
return
return [w['id'] for w in workflows]
def get_metadata(self, workflow_ids=None, labels=None, embed_subworkflow=False):
"""Retrieve metadata for workflows matching workflow IDs or labels
Args:
workflow_ids:
List of workflows IDs to find workflows matched.
labels:
List of Caper's string labels to find workflows matched.
embed_subworkflow:
Recursively embed subworkflow's metadata in main
workflow's metadata.
This flag is to mimic behavior of Cromwell run mode with -m.
Metadata JSON generated with Cromwell run mode
includes all subworkflows embedded in main workflow's JSON file.
"""
valid_workflow_ids = self.find_valid_workflow_ids(
workflow_ids=workflow_ids, labels=labels
)
if valid_workflow_ids is None:
return
result = []
for workflow_id in valid_workflow_ids:
params = {}
if embed_subworkflow:
params['expandSubWorkflows'] = True
m = self.__request_get(
CromwellRestAPI.ENDPOINT_METADATA.format(wf_id=workflow_id),
params=params,
)
if m:
cm = CromwellMetadata(m)
result.append(cm.metadata)
return result
def get_labels(self, workflow_id):
"""Get labels JSON for a specified workflow
Returns:
Labels JSON for a workflow
"""
if workflow_id is None or not is_valid_uuid(workflow_id):
return
r = self.__request_get(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id)
)
if r is None:
return
return r['labels']
def get_label(self, workflow_id, key):
"""Get a label for a key in a specified workflow
Returns:
Value for a specified key in labels JSON for a workflow
"""
labels = self.get_labels(workflow_id)
if labels is None:
return
if key in labels:
return labels[key]
def update_labels(self, workflow_id, labels):
"""Update labels for a specified workflow with
a list of (key, val) tuples
"""
if workflow_id is None or labels is None:
return
r = self.__request_patch(
CromwellRestAPI.ENDPOINT_LABELS.format(wf_id=workflow_id), labels
)
logger.debug('update_labels: {r}'.format(r=r))
return r
def find_with_wildcard(
self, workflow_ids=None, labels=None, exclude_subworkflow=True
):
"""Retrieves all workflows from Cromwell server.
And then find matching workflows by ID or labels.
Wildcards (? and *) are allowed for both parameters.
"""
result = []
if not workflow_ids and not labels:
return result
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
},
)
if resp and resp['results']:
for workflow in resp['results']:
matched = False
if 'id' not in workflow:
continue
if workflow_ids:
for wf_id in workflow_ids:
if fnmatch.fnmatchcase(workflow['id'], wf_id):
result.append(workflow)
matched = True
break
if matched:
continue
if labels and 'labels' in workflow:
for k, v in labels:
v_ = workflow['labels'].get(k)
if not v_:
continue
if isinstance(v_, str) and isinstance(v, str):
# matching with wildcards for str values only
if fnmatch.fnmatchcase(v_, v):
result.append(workflow)
break
elif v_ == v:
result.append(workflow)
break
logger.debug(
'find_with_wildcard: workflow_ids={workflow_ids}, '
'labels={labels}, result={result}'.format(
workflow_ids=workflow_ids, labels=labels, result=result
)
)
return result
def find_by_workflow_ids(self, workflow_ids=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching workflow IDs (UUIDs).
Does OR search for a list of workflow IDs.
Invalid UUID in `workflows_ids` will be ignored without warning.
Wildcards (? and *) are not allowed.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only (Cromwell uses lower-case UUIDs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(workflow_ids):
raise ValueError(
'Wildcards are not allowed in workflow_ids. '
'ids={ids}'.format(ids=workflow_ids)
)
result = []
if workflow_ids:
# exclude invalid workflow UUIDs.
workflow_ids = [wf_id for wf_id in workflow_ids if is_valid_uuid(wf_id)]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'id': workflow_ids,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_workflow_ids: workflow_ids={workflow_ids}, '
'result={result}'.format(workflow_ids=workflow_ids, result=result)
)
return result
def find_by_labels(self, labels=None, exclude_subworkflow=True):
"""Finds workflows by exactly matching labels (key, value) tuples.
Does OR search for a list of label key/value pairs.
Wildcards (? and *) are not allowed.
Args:
labels:
List of labels (key/value pairs).
Returns:
List of matched workflow JSONs.
"""
if has_wildcard(labels):
raise ValueError(
'Wildcards are not allowed in labels. '
'labels={labels}'.format(labels=labels)
)
result = []
if labels:
# reformat labels with `:` notation. exclude pairs with empty value.
labels = [
'{key}:{val}'.format(key=key, val=val) for key, val in labels if val
]
resp = self.__request_get(
CromwellRestAPI.ENDPOINT_WORKFLOWS,
params={
'additionalQueryResultFields': 'labels',
'includeSubworkflows': not exclude_subworkflow,
'labelor': labels,
},
)
if resp and resp['results']:
result.extend(resp['results'])
logger.debug(
'find_by_labels: labels={labels}, result={result}'.format(
labels=labels, result=result
)
)
return result
def find(self, workflow_ids=None, labels=None, exclude_subworkflow=True):
"""Wrapper for the following three find functions.
- find_with_wildcard
- find_by_workflow_ids
- find_by_labels
Find workflows by matching workflow IDs or label (key, value) tuples.
Does OR search for both parameters.
Wildcards (? and *) in both parameters are allowed but Caper will
retrieve a list of all workflows, which can lead to HTTP 503 of
Cromwell server if there are many subworkflows and not `exclude_subworkflow`.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only.
labels:
List of labels (key/value pairs).
exclude_subworkflow:
Exclude subworkflows.
Returns:
List of matched workflow JSONs.
"""
wildcard_found_in_workflow_ids = has_wildcard(workflow_ids)
wildcard_found_in_labels = has_wildcard(
[val for key, val in labels] if labels else None
)
if wildcard_found_in_workflow_ids or wildcard_found_in_labels:
return self.find_with_wildcard(
workflow_ids=workflow_ids,
labels=labels,
exclude_subworkflow=exclude_subworkflow,
)
result = []
result_by_labels = self.find_by_labels(
labels=labels, exclude_subworkflow=exclude_subworkflow
)
result.extend(result_by_labels)
workflow_ids_found_by_labels = [workflow['id'] for workflow in result_by_labels]
result.extend(
[
workflow
for workflow in self.find_by_workflow_ids(
workflow_ids=workflow_ids, exclude_subworkflow=exclude_subworkflow
)
if workflow['id'] not in workflow_ids_found_by_labels
]
)
return result
def __init_auth(self):
"""Init auth object
"""
if self._user is not None and self._password is not None:
self._auth = (self._user, self._password)
else:
self._auth = None
@requests_error_handler
def __request_get(self, endpoint, params=None):
"""GET request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.get(
url, auth=self._auth, params=params, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_post(self, endpoint, manifest=None):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.post(
url, files=manifest, auth=self._auth, headers={'accept': 'application/json'}
)
resp.raise_for_status()
return resp.json()
@requests_error_handler
def __request_patch(self, endpoint, data):
"""POST request
Returns:
JSON response
"""
url = (
CromwellRestAPI.QUERY_URL.format(hostname=self._hostname, port=self._port)
+ endpoint
)
resp = requests.patch(
url,
data=data,
auth=self._auth,
headers={'accept': 'application/json', 'content-type': 'application/json'},
)
resp.raise_for_status()
return resp.json()
| caper/cromwell_rest_api.py | 18,534 | Init auth object
GET request
Returns:
JSON response
POST request
Returns:
JSON response
POST request
Returns:
JSON response
Abort workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for aborting workflows
Wrapper for the following three find functions.
- find_with_wildcard
- find_by_workflow_ids
- find_by_labels
Find workflows by matching workflow IDs or label (key, value) tuples.
Does OR search for both parameters.
Wildcards (? and *) in both parameters are allowed but Caper will
retrieve a list of all workflows, which can lead to HTTP 503 of
Cromwell server if there are many subworkflows and not `exclude_subworkflow`.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only.
labels:
List of labels (key/value pairs).
exclude_subworkflow:
Exclude subworkflows.
Returns:
List of matched workflow JSONs.
Finds workflows by exactly matching labels (key, value) tuples.
Does OR search for a list of label key/value pairs.
Wildcards (? and *) are not allowed.
Args:
labels:
List of labels (key/value pairs).
Returns:
List of matched workflow JSONs.
Finds workflows by exactly matching workflow IDs (UUIDs).
Does OR search for a list of workflow IDs.
Invalid UUID in `workflows_ids` will be ignored without warning.
Wildcards (? and *) are not allowed.
Args:
workflow_ids:
List of workflow ID (UUID) strings.
Lower-case only (Cromwell uses lower-case UUIDs).
Returns:
List of matched workflow JSONs.
Checks if workflow ID in `workflow_ids` are already valid UUIDs (without wildcards).
If so then we don't have to send the server a query to get matching workflow IDs.
Retrieves all workflows from Cromwell server.
And then find matching workflows by ID or labels.
Wildcards (? and *) are allowed for both parameters.
Retrieve available backend names and default backend name
Returns:
JSON response with keys "defaultBackend" and "supportedBackends"
Example: {"defaultBackend":"Local","supportedBackends":
["Local","aws","gcp","pbs","sge","slurm"]}
Retrieve default backend name
Returns:
Default backend name
Get a label for a key in a specified workflow
Returns:
Value for a specified key in labels JSON for a workflow
Get labels JSON for a specified workflow
Returns:
Labels JSON for a workflow
Retrieve metadata for workflows matching workflow IDs or labels
Args:
workflow_ids:
List of workflows IDs to find workflows matched.
labels:
List of Caper's string labels to find workflows matched.
embed_subworkflow:
Recursively embed subworkflow's metadata in main
workflow's metadata.
This flag is to mimic behavior of Cromwell run mode with -m.
Metadata JSON generated with Cromwell run mode
includes all subworkflows embedded in main workflow's JSON file.
Check if string or any element in list/tuple has
a wildcard (? or *).
Args:
workflow_id_or_label:
Workflow ID (str) or label (str).
Or array (list, tuple) of them.
To validate Cromwell's UUID (lowercase only).
This does not allow uppercase UUIDs.
Release hold of workflows matching workflow IDs or labels
Returns:
List of JSON responses from POST request
for releasing hold of workflows
Re-raise ConnectionError with help message.
Continue on HTTP 404 error (server is on but workflow doesn't exist).
Otherwise, re-raise from None to hide nested tracebacks.
Submit a workflow.
Returns:
JSON Response from POST request submit a workflow
Update labels for a specified workflow with
a list of (key, val) tuples
matching with wildcards for str values only exclude invalid workflow UUIDs. reformat labels with `:` notation. exclude pairs with empty value. | 3,833 | en | 0.761565 |
import cv2
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from data import lab_gamut
import numpy as np
class GUIGamut(QWidget):
def __init__(self, gamut_size=110):
QWidget.__init__(self)
self.gamut_size = gamut_size
self.win_size = gamut_size * 2 # divided by 4
self.setFixedSize(self.win_size, self.win_size)
self.ab_grid = lab_gamut.abGrid(gamut_size=gamut_size, D=1)
self.reset()
def set_gamut(self, l_in=50):
self.l_in = l_in
self.ab_map, self.mask = self.ab_grid.update_gamut(l_in=l_in)
self.update()
def set_ab(self, color):
self.color = color
self.lab = lab_gamut.rgb2lab_1d(self.color)
x, y = self.ab_grid.ab2xy(self.lab[1], self.lab[2])
self.pos = QPointF(x, y)
self.update()
def is_valid_point(self, pos):
if pos is None:
return False
else:
x = pos.x()
y = pos.y()
if x >= 0 and y >= 0 and x < self.win_size and y < self.win_size:
return self.mask[y, x]
else:
return False
def update_ui(self, pos):
self.pos = pos
a, b = self.ab_grid.xy2ab(pos.x(), pos.y())
# get color we need L
L = self.l_in
lab = np.array([L, a, b])
color = lab_gamut.lab2rgb_1d(lab, clip=True, dtype='uint8')
self.emit(SIGNAL('update_color'), color)
self.update()
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
painter.fillRect(event.rect(), Qt.white)
if self.ab_map is not None:
ab_map = cv2.resize(self.ab_map, (self.win_size, self.win_size))
qImg = QImage(ab_map.tostring(), self.win_size, self.win_size, QImage.Format_RGB888)
painter.drawImage(0, 0, qImg)
painter.setPen(QPen(Qt.gray, 3, Qt.DotLine, cap=Qt.RoundCap, join=Qt.RoundJoin))
painter.drawLine(self.win_size/2, 0, self.win_size/2, self.win_size)
painter.drawLine(0, self.win_size/2, self.win_size, self.win_size/2)
if self.pos is not None:
painter.setPen(QPen(Qt.black, 2, Qt.SolidLine, cap=Qt.RoundCap, join=Qt.RoundJoin))
w = 5
x = self.pos.x()
y = self.pos.y()
painter.drawLine(x - w, y, x + w, y)
painter.drawLine(x, y - w, x, y + w)
painter.end()
def mousePressEvent(self, event):
pos = event.pos()
if event.button() == Qt.LeftButton and self.is_valid_point(pos): # click the point
self.update_ui(pos)
self.mouseClicked = True
def mouseMoveEvent(self, event):
pos = event.pos()
if self.is_valid_point(pos):
if self.mouseClicked:
self.update_ui(pos)
def mouseReleaseEvent(self, event):
self.mouseClicked = False
def sizeHint(self):
return QSize(self.win_size, self.win_size)
def reset(self):
self.ab_map = None
self.mask = None
self.color = None
self.lab = None
self.pos = None
self.mouseClicked = False
self.update()
| interactive-deep-colorization/ui/gui_gamut.py | 3,230 | divided by 4 get color we need L click the point | 48 | en | 0.91134 |
# coding: utf-8
__author__ = "Jerry He"
import dash_bootstrap_components as dbc
from dash import dcc, no_update
from dash_extensions.enrich import Dash, Output, Input, State, html
import flask
from flask import jsonify
from flask_cors import CORS
from dash import dash_table
import dash_ace
server = flask.Flask(__name__)
CORS(server)
from dash_extensions.enrich import DashProxy,ServersideOutput, TriggerTransform, MultiplexerTransform, ServersideOutputTransform, NoOutputTransform
app = DashProxy(__name__,
server=server,
transforms=[
ServersideOutputTransform(), # enable use of ServersideOutput objects
],
external_stylesheets=[dbc.themes.BOOTSTRAP]
)
server = app.server
import pandas as pd
def row_tf(row):
keep = ['title', 'userid']
newrow = {k:row[k] for k in keep}
newrow['name'] = newrow['title'].split("-")[0].strip()
return newrow
def df_transform(df):
return pd.DataFrame([row_tf(row) for _,row in df.iterrows()])
app.layout = html.Div(
[
dcc.Store(id="querystr"),
dcc.Store(id="store"),
dcc.Store(id="all-df"),
dcc.Interval(interval=1800, id="query_sto"),
dbc.Card([
dbc.CardImg(src="assets/brick_header.jpg"),
dbc.CardBody([
dbc.Tabs(
[
dbc.Tab([
html.Hr(),
dash_ace.DashAceEditor(
id='query-input',
value=r"SELECT * FROM my_music_collection WHERE artist like '%Jr%' LIMIT 8",
theme='github',
mode='sql',
tabSize=2,
height="35px",
enableBasicAutocompletion=True,
enableLiveAutocompletion=True,
autocompleter='/autocompleter?prefix=',
placeholder='SQL code ...'
),
dbc.Button("Query", color="secondary", className="me-1",
id='query-button'),
html.Hr(),
html.Div(id="query-output")
],label="SQL", tab_id="tab-1"),
dbc.Tab(label="History", tab_id="tab-2"),
],
id="tabs",
active_tab="tab-1",
),
html.Div(id="tab-content"),
])
])
]
)
import json
app.clientside_callback("""
function(n_intervals, data) {
var existing_data;
if(data) {
existing_data = JSON.parse(data)
}
var editor = ace.edit("query-input")
if(!existing_data || existing_data['querystr'] != editor.getValue().trim()) {
return JSON.stringify({
'querystr':editor.getValue().trim(),
'time':(new Date()).toISOString()
})
}
}
""".strip(),
Output("querystr", "data"), Input("query_sto",'n_intervals'), State("querystr", "data"))
from sqlalchemy import create_engine
engine = create_engine('postgresql://localhost:5432/jerry') # change this to your SQL endpoint/auth
import logging
import dateutil.parser
@app.callback(ServersideOutput("store", "data"), Input('query-button', 'n_clicks'),State("querystr", "data"), memoize=True)
def query(n_clicks, query_data):
if query_data is None:
return no_update
qdata = json.loads(query_data)
try:
dat = pd.read_sql(qdata["querystr"].replace("%", "%%"), con=engine)
return dat
except:
logging.exception("SQL query failed\n")
from datetime import datetime
@app.callback(Output("query-output", "children"), ServersideOutput("all-df", "data"), Input("store", "data"), State("all-df", "data"))
def render_query_res_table(data, all_df):
df = df_transform(data)
df = df[sorted(df.columns.tolist())]
if all_df is None:
all_df = [{'df':df, 'time':datetime.now()}]
else:
all_df.append({'df':df, 'time':datetime.now()})
return [dash_table.DataTable(
id='table',
columns=[{"name": i, "id": i} for i in df.columns],
data=df.to_dict('records'),
style_header={
'backgroundColor': 'grey',
'fontWeight': 'bold'
},
)],all_df
@app.callback(Output("tab-content", "children"), [Input("tabs", "active_tab"), State("all-df", "data")])
def switch_tab(at, all_df):
if at == "tab-1":
return []
elif at == "tab-2":
return dbc.Accordion(
[
dbc.AccordionItem([
dash_table.DataTable(
id='table',
columns=[{"name": i, "id": i} for i in query_hist['df'].columns],
data=query_hist['df'].to_dict('records'),
style_header={
'backgroundColor': 'grey',
'fontWeight': 'bold'
},
)
], title = query_hist['time'].strftime("%H:%M:%S")) for query_hist in all_df
])
return html.P("This shouldn't ever be displayed...")
@server.route('/autocompleter', methods=['GET'])
def autocompleter():
return jsonify([{"name": "Completed", "value": "Completed", "score": 100, "meta": "test"}])
app.run_server(host="127.0.0.1", debug=True, port=8080) | dash_sql_client_ui.py | 5,052 | coding: utf-8 enable use of ServersideOutput objects change this to your SQL endpoint/auth | 90 | en | 0.677763 |
import numpy as np
import matplotlib.pyplot as plt
import time
from copy import copy
import os
from single_pitch import single_pitch
from channel import channel
from pseudo_speech import Pseudospeech_Synthetizer_class
from encryption import Encryption_class
from speech_analyzer import Speech_Analyzer_class
from speech_synthesizer import Speech_Synthesizer_class
################################################################
my_analyzer = Speech_Analyzer_class("speech_model.npz","spherical_code.npz") # model parameters generated by speech_model.py and spherical_code.py
my_encryptor = Encryption_class("spherical_code.npz") # model parameters generated by spherical_code.py
my_ps_sp_synthetizer = Pseudospeech_Synthetizer_class("pseudospeech_model.npz","spherical_code.npz") # model parameters generated by pseudo_speech_model.py and spherical_code.py
my_sp_synthesizer = Speech_Synthesizer_class("speech_model.npz") # model parameters generated by speech_model.py
# pseudo random data used for enciphering/deciphering
keybits = np.random.randint(2, size = (160, 10000))
print("step 1")
speech_samples = np.fromfile("temp/hts1a.raw", dtype='int16')
# print(speech_samples.shape)
##### SPEECH ENCODING ######
print("step 2")
pitch_indices, energy_indices, timbre_indices = my_analyzer.analyze_speech(speech_samples)
###### ENCRYPTION ######
print("step 3")
pitch_indices_enc, energy_indices_enc, timbre_indices_enc = my_encryptor.speech_encryption(pitch_indices, energy_indices, timbre_indices, keybits)
###### PSEUDOSPEECH SYNTHESIS ######
print("step 4")
signal = my_synthetizer.synthesize_pseudospeech(pitch_indices_enc, energy_indices_enc, timbre_indices_enc)
###### CHANNEL DISTORTION ######
print("step 5")
signal_rec = channel(signal, "SILK", 16000, 48000) # data samples, codec type, sampling frequency (Hz), compression rate (b/s)
###### PSEUDOSPEECH ANALYSIS ######
print("step 6")
pitch_indices_rec, energy_indices_rec, timbre_indices_rec = my_synthetizer.analyze_pseudospeech(signal_rec)
# ###### DECRYPTION ######
print("step 7")
pitch_indices_dec, energy_indices_dec, timbre_indices_dec = my_encryptor.speech_decryption(pitch_indices_rec, energy_indices_rec, timbre_indices_rec, keybits)
# ###### SPEECH SYNTHESIS ######
print("step 8")
my_speech_synthesizer.synthesize_speech(pitch_indices_dec, energy_indices_dec, timbre_indices_dec) # save to file / input of the narrowband (8kHz) LPCNet
print("Finished")
################
# plt.figure()
# plt.plot(energy_indices)
# plt.figure()
# plt.plot(pitch_indices)
# plt.figure()
# plt.plot(np.transpose(timbre_indices))
################
# plt.figure()
# plt.plot(energy_indices_enc)
# plt.figure()
# plt.plot(pitch_indices_enc)
# plt.figure()
# plt.plot(np.transpose(timbre_indices_enc))
################
# plt.figure()
# plt.plot(energy_indices_rec)
# plt.figure()
# plt.plot(pitch_indices_rec)
# plt.figure()
# plt.plot(np.transpose(timbre_indices_rec))
################
# plt.figure()
# plt.plot(energy_indices_dec)
# plt.figure()
# plt.plot(pitch_indices_dec)
# plt.figure()
# plt.plot(np.transpose(timbre_indices_dec))
################
plt.show()
| a_full_model.py | 3,165 | model parameters generated by speech_model.py and spherical_code.py model parameters generated by spherical_code.py model parameters generated by pseudo_speech_model.py and spherical_code.py model parameters generated by speech_model.py pseudo random data used for enciphering/deciphering print(speech_samples.shape) SPEECH ENCODING ENCRYPTION PSEUDOSPEECH SYNTHESIS CHANNEL DISTORTION data samples, codec type, sampling frequency (Hz), compression rate (b/s) PSEUDOSPEECH ANALYSIS DECRYPTION SPEECH SYNTHESIS save to file / input of the narrowband (8kHz) LPCNet plt.figure() plt.plot(energy_indices) plt.figure() plt.plot(pitch_indices) plt.figure() plt.plot(np.transpose(timbre_indices)) plt.figure() plt.plot(energy_indices_enc) plt.figure() plt.plot(pitch_indices_enc) plt.figure() plt.plot(np.transpose(timbre_indices_enc)) plt.figure() plt.plot(energy_indices_rec) plt.figure() plt.plot(pitch_indices_rec) plt.figure() plt.plot(np.transpose(timbre_indices_rec)) plt.figure() plt.plot(energy_indices_dec) plt.figure() plt.plot(pitch_indices_dec) plt.figure() plt.plot(np.transpose(timbre_indices_dec)) | 1,115 | en | 0.31441 |
#!/usr/bin/env python -u
"""
All commands that can be run in this project are available through this unified interface.
This should be run with the ./plaster.sh helper to get into the correct context.
"""
import tempfile
import numpy as np
import time
import os
import sys
import pandas as pd
import json
from pathlib import Path
from munch import Munch
from plumbum import colors
from plumbum import FG, TF, cli, local
from plaster.tools.zlog.zlog import important
from plaster.run.sigproc_v2 import synth
from plaster.tools.zlog.profile import prof, profile_from_file, profile_dump
from plaster.tools.utils.tmp import tmp_file
from plaster.tools.assets import assets
from plaster.tools.test_tools.test_tools import run_p
from plaster.run.run import RunResult
from plaster.tools.zlog import zlog
from plaster.tools.zlog.zlog import tell, h_line, spy
from plaster.tools.utils import tmp
from plaster.tools.utils import utils
import logging
log = logging.getLogger(__name__)
class CommandError(Exception):
def __init__(self, retcode=None):
self.retcode = retcode
def assert_env():
must_exist = ("ERISYON_ROOT", "JOBS_FOLDER")
found = 0
for e in must_exist:
if e in local.env:
found += 1
else:
print(f'Environment variable "{e}" not found.')
if found != len(must_exist):
raise CommandError(f"Environment variable(s) not found.")
class DoFuncs:
def is_dev(self):
return local.env.get("ERISYON_DEV") == "1"
def folder_user(self):
return local.env["FOLDER_USER"]
def run_user(self):
return local.env["RUN_USER"]
def clear(self):
local["clear"] & FG
def _print_job_folders(self, file_list, show_plaster_json=True):
"""
file_list is a list of munches [Munch(folder="folder", name="foo.txt", size=123, mtime=123456789)]
"""
if len(file_list) == 0:
print("No files found")
return
folders = {
file.folder: Munch(folder=file.folder, size_gb=0, file_count=0,)
for file in file_list
}
gb = 1024 ** 3
total_gb = 0
for file in file_list:
folder = file.folder
total_gb += file.size / gb
folders[folder].size_gb += file.size / gb
folders[folder].file_count += 1
df = pd.DataFrame.from_dict(folders, orient="index")
formatters = dict(
size_gb="{:10.2f}".format,
folder="{:<40.40s}".format,
file_count="{:.0f}".format,
)
columns = ["folder", "size_gb", "file_count"]
df = df.append(dict(folder="TOTAL", size_gb=total_gb), ignore_index=True)
print(df.to_string(columns=columns, formatters=formatters))
def print_local_job_folders(self):
important("Local job folders:")
root = local.path("./jobs_folder")
self._print_job_folders(
[
Munch(
folder=(p - root)[0],
name=p.name,
size=int(p.stat().st_size),
mtime=int(p.stat().st_mtime),
)
for p in root.walk()
]
)
def validate_job_folder(self, job_folder, allow_run_folders=False):
return assets.validate_job_folder(
job_folder, allow_run_folders=allow_run_folders
)
def run_zests_v2(self, cli_args, debug_mode):
tell(f"Running zests v2...")
# as os.environ is evaluated when it is first imported
# we can't use any of the more graceful ways to set the environment
with local.env(RUN_ENV="test", ZAP_DEBUG_MODE=debug_mode):
zest_version = None
try:
from zest.version import __version__ as zest_version
except ImportError:
pass
assert zlog.config_dict is not None
assert zest_version.startswith("1.1.")
with tmp.tmp_file() as tmp_path:
with open(tmp_path, "w") as f:
f.write(json.dumps(zlog.config_dict))
# cli_args += ["--logger_config_json", tmp_path]
local["python"]["-u", "-m", "zest.zest_cli"].bound_command(
*cli_args
) & FG(retcode=None)
def run_nbstripout(self):
"""Strip all notebooks of output to save space in commits"""
important("Stripping Notebooks...")
result = (
local["find"][
".",
"-type",
"f",
"-not",
"-path",
"*/\.*",
"-name",
"*.ipynb",
"-print",
]
| local["xargs"]["nbstripout"]
) & TF(FG=True)
if not result:
raise CommandError
def run_docker_build(self, docker_tag, quiet=False):
important(f"Building docker tag {docker_tag}")
with local.env(LANG="en_US.UTF-8"):
args = [
"build",
"-t",
f"erisyon:{docker_tag}",
"-f",
"./scripts/main_env.docker",
]
if quiet:
args += ["--quiet"]
args += "."
local["docker"][args] & FG
class DoCommand(cli.Application, DoFuncs):
def main(self):
return
@DoCommand.subcommand("run_notebook")
class RunNotebookCommand(cli.Application, DoFuncs):
"""
Run a notebook rendered to HTML
"""
def main(self, notebook_path, output_path: Path = None):
args = [
"nbconvert",
"--to",
"html",
"--execute",
notebook_path,
"--ExecutePreprocessor.timeout=1800",
]
if output_path is not None:
args += ["--output", output_path]
local["jupyter"].bound_command(*args) & FG
@DoCommand.subcommand("profile")
class ProfileCommand(cli.Application, DoFuncs):
gb = 1024 ** 3
skip_hardware = cli.Flag("--skip_hardware", help="Do not include hardware profile")
skip_sigproc = cli.Flag("--skip_sigproc", help="Do not include sigproc profile")
def fileio_test(self, jobs_folder):
job_name = f"_profile/_{int(time.time()):08x}"
large_random = np.random.uniform(
size=1024 ** 3 // 8
) # 8 because floats are 8 bytes
def write_to(write_path):
# import shutil
# total, used, free = shutil.disk_usage(write_path.dirname)
# print(f"Free disk at {write_path}: {free / gb:2.2f}GB ({free / total:2.1f}%)")
write_path.dirname.mkdir()
with open(write_path, "wb") as f:
f.write(large_random)
# PROFILE write to jobs_folder
job_folder_write_path = jobs_folder / job_name
try:
with prof(
"fileio_to_jobs_folder", gbs=large_random.nbytes / self.gb, _tell=True,
):
write_to(job_folder_write_path)
finally:
job_folder_write_path.delete()
# PROFILE write to plaster_tmp
with tmp_file() as plaster_tmp_folder_write_path:
with prof(
"fileio_to_plaster_tmp", gbs=large_random.nbytes / self.gb, _tell=True,
):
write_to(plaster_tmp_folder_write_path)
# PROFILE write to /tmp
tmp_folder_write_path = local.path(tempfile.mkstemp())
try:
with prof("fileio_to_tmp", gbs=large_random.nbytes / self.gb, _tell=True):
write_to(tmp_folder_write_path)
finally:
tmp_folder_write_path.delete()
def cpu_test(self):
mat = np.random.uniform(size=(5000, 5000))
with prof(
"cpu_tests_matrix_invert",
mega_elems=(mat.shape[0] * mat.shape[1]) / 1e6,
_tell=True,
):
np.linalg.inv(mat)
def mem_test(self):
gb = 1024 ** 3
rnd = np.random.uniform(size=(1_000, 500_000))
with prof("mem_tests_copy", gbs=rnd.nbytes / gb, _tell=True):
rnd.copy()
def sigproc_test(self, jobs_folder):
"""
This is adapted from zest_sigproc_v2_integration
"""
profile_folder = jobs_folder / "_profile"
profile_folder.delete()
job_folder = profile_folder / "sigproc_test"
source_folder = profile_folder / "_synth_field"
job_folder.mkdir()
source_folder.mkdir()
# GENERATE some fake data
dim = (1024, 1024)
n_channels = 1
n_cycles = 10
n_peaks = 500
psf_width = 1.5
bg_mean = 100.0
bg_std = 30.0
gain = 5000.0
def _synth_field(fl_i):
with synth.Synth(n_channels=n_channels, n_cycles=n_cycles, dim=dim) as s:
peaks = (
synth.PeaksModelGaussianCircular(n_peaks=n_peaks)
.locs_randomize()
.widths_uniform(psf_width)
.amps_constant(gain)
)
synth.CameraModel(bg_mean=bg_mean, bg_std=bg_std)
synth.HaloModel()
synth.IlluminationQuadraticFalloffModel()
chcy_ims = s.render_chcy(0)
for ch_i in range(chcy_ims.shape[0]):
for cy_i in range(chcy_ims.shape[1]):
np.save(
str(
source_folder
/ f"area_{fl_i:03d}_cell_000_{ch_i:03d}nm_{cy_i:03d}.npy"
),
chcy_ims[ch_i, cy_i],
)
n_fields = 2
for fl_i in range(n_fields):
_synth_field(fl_i)
run_p(
[
f"gen",
f"sigproc_v2",
f"--job={job_folder}",
f"--sigproc_source={source_folder}",
f"--force",
f"--self_calib",
]
)
log_file = local.path(local.env["PLASTER_ROOT"]) / "plaster.log"
log_file.delete()
run_p(["run", job_folder, "--no_progress", "--skip_reports"])
profile_lines = profile_from_file(log_file)
with colors.fg.DeepSkyBlue3:
print()
print(h_line("--"))
print("PROFILE RESULTS")
print(h_line("--"))
profile_dump(profile_lines)
def main(self, jobs_folder):
assert_env()
jobs_folder = local.path(jobs_folder)
if not self.skip_hardware:
tell(colors.cyan | "Profiling file_io")
self.fileio_test(jobs_folder)
tell(colors.cyan | "Profiling cpu")
self.cpu_test()
tell(colors.cyan | "Profiling mem")
self.mem_test()
if not self.skip_sigproc:
tell(colors.cyan | "Profiling sigproc")
self.sigproc_test(jobs_folder)
@DoCommand.subcommand("profile_dump")
class ProfileDumpCommand(cli.Application, DoFuncs):
def main(self, log_path):
assert_env()
log_file = local.path(log_path)
profile_lines = profile_from_file(log_file)
profile_dump(profile_lines)
@DoCommand.subcommand("test")
class TestCommand(cli.Application, DoFuncs):
"""
Run tests
"""
no_clear = cli.Flag("--no_clear", help="Do not clear screen")
integration = cli.Flag("--integration", help="Run integration tests")
debug_mode = cli.Flag("--debug_mode", help="Put zap into debug_mode")
cli_mode = cli.Flag("--cli_mode", help="Run without ui")
def main(self, *args):
if not self.no_clear:
self.clear()
cli_args = list(args)
root = local.env["PLASTER_ROOT"]
cli_args += [f"--root={root}"]
folders = (
"./plaster",
"./plaster/scripts",
)
include_dirs = ":".join(folders)
cli_args += [f"--include_dirs={include_dirs}"]
with local.cwd(root):
cli_args += [f"--hook_start=./scripts/testing_start.py:test_setup_logs"]
if not self.debug_mode:
if not self.cli_mode:
cli_args += [f"--ui"]
cli_args += [f"--n_workers", "8"]
if self.integration:
cli_args += [f"--groups=integration"]
else:
cli_args += [f"--exclude_groups=integration"]
return self.run_zests_v2(cli_args, self.debug_mode)
@DoCommand.subcommand("jupyter")
class JupyterCommand(cli.Application, DoFuncs):
ip = cli.SwitchAttr("--ip", str, default="0.0.0.0", help="ip to bind to")
port = cli.SwitchAttr("--port", int, default="8080", help="port to bind to")
def main(self, *args):
assert_env()
os.execlp(
"jupyter",
"jupyter",
"notebook",
f"--ip={self.ip}",
f"--port={self.port}",
"--allow-root",
*args,
)
@DoCommand.subcommand("pluck")
class PluckCommand(cli.Application, DoFuncs):
"""
Pluck a field from a result pickle
"""
save_npy = cli.SwitchAttr("--save_npy", str, default=None, help="save as npy file")
save_csv = cli.SwitchAttr(
"--save_csv", str, default=None, help="save as csv file (dataframe only)"
)
save_pkl = cli.SwitchAttr(
"--save_pkl", str, default=None, help="save as pkl file (dataframe only)"
)
def main(self, run_path, symbol):
"""
run_path: path to the run folder
symbol: Eg: "sigproc_v2.sig"
"""
run = RunResult(run_path)
parts = symbol.split(".")
result = run[parts[0]]
sym = getattr(result, parts[1])
if callable(sym):
val = sym()
else:
val = sym
if self.save_npy is not None:
assert isinstance(val, np.ndarray)
np.save(self.save_npy, val)
if self.save_csv is not None:
assert isinstance(val, pd.DataFrame)
val.to_csv(self.save_csv)
if self.save_pkl is not None:
assert isinstance(val, pd.DataFrame)
val.to_pickle(self.save_pkl)
@DoCommand.subcommand("export_sigproc_v2")
class ExportSigprocV2Command(cli.Application, DoFuncs):
"""
Export sigproc_v2 and raw data in easy to use formats.
"""
def main(self, run_path):
"""
run_path: path to the run folder (don't forget this is a subfolder of job)
"""
run = RunResult(run_path)
name = run.run_folder.parent.name
prefix = f"{name}__"
tell(f"Prefixing saved files with {prefix}")
tell("Saving sig.npy")
np.save(f"{prefix}sig.npy", run.sigproc_v2.sig())
tell("Saving noi.npy")
np.save(f"{prefix}noi.npy", run.sigproc_v2.noi())
tell("Saving df.csv")
run.sigproc_v2.fields__n_peaks__peaks__radmat().to_csv(f"{prefix}df.csv")
ims = []
for fl_i in range(run.sigproc_v2.n_fields):
tell(f"Loading align field {fl_i} of {run.sigproc_v2.n_fields}")
ims += [run.sigproc_v2.aln_unfilt_chcy_ims(fl_i)]
tell("Saving aln_ims.npy")
np.save(f"{prefix}aln_ims.npy", np.stack(ims))
tell("Saving example.py")
utils.save(
f"{prefix}example.py",
f"import numpy as np\n"
+ f"import pandas as pd\n\n"
+ f'prefix = "{prefix}"'
+ utils.smart_wrap(
"""
sig = np.load(f"{prefix}sig.npy")
noi = np.load(f"{prefix}noi.npy")
df = pd.read_csv(f"{prefix}df.csv")
ims = np.load(f"{prefix}aln_ims.npy", mmap_mode="r")
n_peaks = sig.shape[0]
n_fields, n_channels, n_cycles, im_mea, _ = ims.shape
# Examine some peak
peak_i = 123 # 0 <= peak_i < n_peaks
ch_i = 0 # 0 <= ch_i < n_channels
cy_i = 0 # 0 <= cy_i < n_cycles
y, x, fl_i = df[df.peak_i == peak_i][["aln_y", "aln_x", "field_i"]].drop_duplicates().values.flatten().astype(int)
peak_radius = 10
peak_im = ims[fl_i, ch_i, cy_i, y-peak_radius:y+peak_radius, x-peak_radius:x+peak_radius]
# Now peak_im is a centered sub-image of that peak with shape=(peak_radius, peak_radius)
""",
width=200,
assert_if_exceeds_width=True,
),
)
tell("\n\nThe following commands may be useful:")
# tell(f" tar czf {prefix}data.tar.gz {prefix}sig.npy {prefix}noi.npy {prefix}df.csv")
# tell(f" tar czf {prefix}ims.tar.gz {prefix}aln_ims.npy")
# tell("")
# tell(f" aws s3 cp {prefix}data.tar.gz s3://erisyon-public")
# tell(f" aws s3 cp {prefix}ims.tar.gz s3://erisyon-public")
tell(f" aws s3 cp {prefix}sig.npy s3://erisyon-public")
tell(f" aws s3 cp {prefix}noi.npy s3://erisyon-public")
tell(f" aws s3 cp {prefix}df.csv s3://erisyon-public")
tell(f" aws s3 cp {prefix}aln_ims.npy s3://erisyon-public")
tell(f" aws s3 cp {prefix}example.py s3://erisyon-public")
if __name__ == "__main__":
try:
DoCommand.subcommand("gen", "plaster.gen.gen_main.GenApp")
DoCommand.subcommand("run", "plaster.run.run_main.RunApp")
DoCommand.run()
except (KeyboardInterrupt):
print() # Add an extra line because various thing terminate with \r
sys.exit(1)
except Exception as e:
log.exception(e)
sys.exit(1)
| plaster/main.py | 17,715 | Export sigproc_v2 and raw data in easy to use formats.
Pluck a field from a result pickle
Run a notebook rendered to HTML
Run tests
file_list is a list of munches [Munch(folder="folder", name="foo.txt", size=123, mtime=123456789)]
run_path: path to the run folder
symbol: Eg: "sigproc_v2.sig"
run_path: path to the run folder (don't forget this is a subfolder of job)
Strip all notebooks of output to save space in commits
This is adapted from zest_sigproc_v2_integration
All commands that can be run in this project are available through this unified interface.
This should be run with the ./plaster.sh helper to get into the correct context.
!/usr/bin/env python -u as os.environ is evaluated when it is first imported we can't use any of the more graceful ways to set the environment cli_args += ["--logger_config_json", tmp_path] 8 because floats are 8 bytes import shutil total, used, free = shutil.disk_usage(write_path.dirname) print(f"Free disk at {write_path}: {free / gb:2.2f}GB ({free / total:2.1f}%)") PROFILE write to jobs_folder PROFILE write to plaster_tmp PROFILE write to /tmp GENERATE some fake data tell(f" tar czf {prefix}data.tar.gz {prefix}sig.npy {prefix}noi.npy {prefix}df.csv") tell(f" tar czf {prefix}ims.tar.gz {prefix}aln_ims.npy") tell("") tell(f" aws s3 cp {prefix}data.tar.gz s3://erisyon-public") tell(f" aws s3 cp {prefix}ims.tar.gz s3://erisyon-public") Add an extra line because various thing terminate with \r | 1,450 | en | 0.726624 |
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.hooks.base_hook import BaseHook
class GenericTransfer(BaseOperator):
"""
Moves data from a connection to another, assuming that they both
provide the required methods in their respective hooks. The source hook
needs to expose a `get_records` method, and the destination a
`insert_rows` method.
This is mean to be used on small-ish datasets that fit in memory.
:param sql: SQL query to execute against the source database
:type sql: str
:param destination_table: target table
:type destination_table: str
:param source_conn_id: source connection
:type source_conn_id: str
:param destination_conn_id: source connection
:type destination_conn_id: str
:param preoperator: sql statement or list of statements to be
executed prior to loading the data
:type preoperator: str or list of str
"""
template_fields = ('sql', 'destination_table', 'preoperator')
template_ext = ('.sql', '.hql',)
ui_color = '#b0f07c'
@apply_defaults
def __init__(
self,
sql,
destination_table,
source_conn_id,
destination_conn_id,
preoperator=None,
*args, **kwargs):
super(GenericTransfer, self).__init__(*args, **kwargs)
self.sql = sql
self.destination_table = destination_table
self.source_conn_id = source_conn_id
self.destination_conn_id = destination_conn_id
self.preoperator = preoperator
def execute(self, context):
source_hook = BaseHook.get_hook(self.source_conn_id)
self.logger.info("Extracting data from %s", self.source_conn_id)
self.logger.info("Executing: \n %s", self.sql)
results = source_hook.get_records(self.sql)
destination_hook = BaseHook.get_hook(self.destination_conn_id)
if self.preoperator:
self.logger.info("Running preoperator")
self.logger.info(self.preoperator)
destination_hook.run(self.preoperator)
self.logger.info("Inserting rows into %s", self.destination_conn_id)
destination_hook.insert_rows(table=self.destination_table, rows=results)
| airflow/operators/generic_transfer.py | 2,854 | Moves data from a connection to another, assuming that they both
provide the required methods in their respective hooks. The source hook
needs to expose a `get_records` method, and the destination a
`insert_rows` method.
This is mean to be used on small-ish datasets that fit in memory.
:param sql: SQL query to execute against the source database
:type sql: str
:param destination_table: target table
:type destination_table: str
:param source_conn_id: source connection
:type source_conn_id: str
:param destination_conn_id: source connection
:type destination_conn_id: str
:param preoperator: sql statement or list of statements to be
executed prior to loading the data
:type preoperator: str or list of str
-*- coding: utf-8 -*- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 1,257 | en | 0.818679 |
#For the whatsapp statuses url given below
#COOL
import requests
from bs4 import BeautifulSoup
url_to_scrape = 'https://www.appstatustxt.com/cool-whatsapp-status/'
r = requests.get(url_to_scrape)
soup = BeautifulSoup(r.text,"html5lib")
status_object=[]
statuses=[]
title=soup.title.string
print(title)
status_object=soup.find_all('span',style="color: #008000;")
fo = open("cool.txt", "a")
#Adding basic stuff for json syntax
#fo.write("{\n")
i=1;
for status in status_object:
if len(status.string)<=135:
statuses.append(status.string+'\n')
print(status.string)
# actual_status=status.string.encode('utf-8')
fo.write(status.string.encode('utf-8')+'\n')
# fo.write('"'+str(i)+'":"'+actual_status+'",\n')
i=i+1 | cool.py | 745 | For the whatsapp statuses url given belowCOOLAdding basic stuff for json syntaxfo.write("{\n") actual_status=status.string.encode('utf-8') fo.write('"'+str(i)+'":"'+actual_status+'",\n') | 186 | en | 0.330167 |
"""
Author: Nathan Clack
Date : 2009
Copyright (c) 2009 HHMI. Free downloads and distribution are allowed for any
non-profit research and educational purposes as long as proper credit is given
to the author. All other rights reserved.
"""
from .tests import plot_whiskers
from ui.whiskerdata.trace import Whisker_Seg
from numpy import *
import pdb
from functools import reduce
def load():
from ui.whiskerdata import load_whiskers, load_trajectories
from ui.genetiff import Reader
movie = Reader('data/seq/whisker_data_0140.seq',adjuststipple=1)
w,wid = load_whiskers('seq.whiskers')
#movie = Reader('../../data/W0.tif',adjuststipple=1)
#w,wid = load_whiskers('w0-grid.whiskers')
#w,wid = load_whiskers('whisk-vc/whisk-vc/seq.whiskers')
#movie = Reader('data/JF8410_041808_001.tif',adjuststipple=1)
#w,wid = load_whiskers('test.whiskers')
#movie = Reader('data/lorenz/090519-19a_0035.seq',adjuststipple=1)
#w,wid = load_whiskers('lorenz.whiskers')
#w,wid = load_whiskers('results/seq-hand.whiskers')
#t,tid = load_trajectories('results/seq-hand.trajectories')
return w,movie
def check_bounds(wvd,shape):
for fid, wv in wvd.items():
for i,w in wv.items():
for x,y,t,s in w:
if x<0 or x>=shape[1] or y<0 or y>=shape[0]:
print("out of bounds")
pdb.set_trace()
if not ( w.x.flags.contiguous and w.y.flags.contiguous ):
print("not contiguous")
pdb.set_trace()
def fix(wvd,movie,scale=2, signal_per_pixel = 0, max_dist = 60, max_angle = 20.*pi/180.):
shape = movie[0].shape
for fid,wv in list(wvd.items()):
print(fid)
table = CollisionTable( wv, shape, scale )
r = set( resolution( table, wv ) )
for j,l in choose_gaps(movie[fid],r,signal_per_pixel,max_dist,max_angle):
e = reduce( Whisker_Seg.join, j )
r.discard( j[0] )
r.discard( j[-1] )
r.add(e)
wvd[fid] = dict( [ p for p in enumerate(r) ] )
return wvd
def compute_join_length( px, py, tlow = 0.0, thigh = 1.0 ):
from scipy.integrate import quad
xp = polyder( px, 1 )
yp = polyder( py, 1 )
xp2 = polymul( xp, xp )
yp2 = polymul( yp, yp )
p = polyadd( xp2, yp2 )
integrand = lambda t: sqrt( polyval( p, t ) )
return quad(integrand, tlow, thigh) [0]
def compute_join_curvature( px, py ):
from scipy.integrate import quad
xp = polyder( px, 1 )
xpp = polyder( px, 2 )
yp = polyder( py, 1 )
ypp = polyder( py, 2 )
pn = polyadd( polymul( xp, ypp ), polymul( yp, xpp )) #numerator
pd = polyadd( polymul( xp, xp ) , polymul( yp, yp ) ) #denominator
integrand = lambda t: fabs(polyval( pn, t )/( polyval( pd, t )**(1.5)) )
return quad(integrand, 0, 1) [0]
def compute_join_angle( px, py ):
from scipy.integrate import quad
xp = polyder( px, 1 )
yp = polyder( py, 1 )
integrand = lambda t: arctan2(polyval(yp, t), polyval(xp, t))
return quad(integrand, 0, 1) [0]
def _compute_intensity( im, x, y ):
if ( x<0 ).any() or \
( x>=im.shape[1] ).any() or \
( y<0 ).any() or \
( y>=im.shape[0] ).any():
return inf
p = set( p for p in zip(x,y) )
score = 0
for j,i in p:
score += im[i,j]
return score/len(p)
def compute_join_intensity( im, px, py ):
tt = linspace(0,1,50)
x = array( [round(polyval(px,t)) for t in tt] )
y = array( [round(polyval(px,t)) for t in tt] )
return _compute_intensity(im,x,y)
def compute_join_score( im, px, py, thick = 2 ):
tt = linspace(0,1,50)
dpx = polyder(px)
dpy = polyder(py)
dL2 = polymul(dpx,dpx) + polymul(dpy,dpy)
ux = polyval( px,tt )
uy = polyval( py,tt )
dx = diff(ux) #polyval( px,tt )
dy = diff(uy) #polyval( py,tt )
dx = r_[dx[0],dx]
dy = r_[dy[0],dy]
dL = sqrt( dx**2 + dy**2 )
a = _compute_intensity(im, ux, uy )
b = _compute_intensity(im, ux + thick*dy/dL , uy - thick*dx/dL )
c = _compute_intensity(im, ux - thick*dy/dL , uy + thick*dx/dL )
return (2*a - b - c)/4.0
def solve_polynomial_join( left, right, reverse = 0):
"""
Solves for a parametric cubic polynomial curve joining the right side of left
to the left side of right. The curve matches slope and position at it's
boundaries and is parameterized from 0 to 1; 0 being the left boundary and 1
being the right.
method: parametric cubic matching position and slope of endpoints.
This ends up being cheap to compute, since the matrix is
known (interval of parameter is always 0 to 1) and so the
inverse can be precomputed.
minv is inverse of m, where:
m = array( [ [ a**3, a**2, a, 1 ],
[ b**3, b**2, b, 1 ],
[ 3*a**2, 2*a , 1, 0 ],
[ 3*b**2, 2*b , 1, 0 ] ] )
is the matrix for the linear system:
m * coeff = v,
with v = [ x(0) x(1) dx/dt(0) dx/dt(1) ].
Here a = 0 and b = 1 so m and it's inverse is always the same.
"""
minv = matrix( [[ 2., -2., 1., 1.],
[-3., 3., -2., -1.],
[ 0., 0., 1., 0.],
[ 1., 0., 0., 0.]])
#take care of cases joining very short segements
lr = len(right)
ll = len(left)
#L = length( right.x, right.y ) + length( left.x, left.y )
#dd = hypot( left.x[0] - right.x[-1], left.y[0] - right.y[-1] )
nl = ll/4
nr = lr/4
slope = lambda v: v[ 0] - v[-1] # want the total change over the length
#slope = lambda v: diff(v).mean()
length = lambda x,y: hypot(diff(x),diff(y)).sum() # euclidian distance in pixels
#
# Compute slope at boundary.
# Uses a number of points near the boundary to compute slope.
# Need to account for edge cases where one or both sides
# consist of very few points.
#
if nr < 2 and nl < 2:
lnorm = length( left.x , left.y )
rnorm = length( right.x , right.y )
dly = diff( left.y ).mean() / lnorm
dlx = diff( left.x ).mean() / lnorm
dry = diff(right.y ).mean() / rnorm
drx = diff(right.x ).mean() / rnorm
nl = 0
nr = lr - 1
elif nr < 2: # use the derivative on the other side
lnorm = length( left.x[:nl], left.y[:nl] )
rnorm = length( right.x , right.y )
dly = -slope( left.y[(-nl):] ) / lnorm
dlx = -slope( left.x[(-nl):] ) / lnorm
dry = diff(right.y ).mean() / rnorm
drx = diff(right.x ).mean() / rnorm
nr = lr - 1
#print dly,dlx,dry,drx
elif nl < 2: # use the derivative on the other side
rnorm = length( right.x[:nr], right.y[:nr] )
lnorm = length( left.x , left.y )
dry = -slope(right.y[:nr] ) / rnorm
drx = -slope(right.x[:nr] ) / rnorm
dly = diff( left.y ).mean() / lnorm
dlx = diff( left.x ).mean() / lnorm
nl = 0
else: # the "normal" case
rnorm = length( right.x[:nr], right.y[:nr] ) # Compute path length of right border region
lnorm = length( left.x[(-nl):], left.y[(-nl):] ) # Compute path length of left border region
dry = -slope(right.y[:nr] ) / rnorm # Compute dy/dl for right side
drx = -slope(right.x[:nr] ) / rnorm # etc...
dly = -slope( left.y[(-nl):] ) / lnorm
dlx = -slope( left.x[(-nl):] ) / lnorm
rnorm = hypot( left.x[0] - right.x[0], left.y[0] - right.y[0] )
lnorm = hypot( left.x[-1]- right.x[0], left.y[-1]- right.y[0] )
if not isfinite(dlx): dlx =(left.x[0] - right.x[0])/lnorm
if not isfinite(dly): dly =(left.y[0] - right.y[0])/lnorm
if not isfinite(drx): drx =(left.x[-1] - right.x[0])/rnorm
if not isfinite(dry): dry =(left.y[-1] - right.y[0])/rnorm
if reverse:
dlx = -dlx
dly = -dly
drx = -drx
dry = -dry
ry = right.y[ 0] ## right.y[nr]
ly = left.y[-1 ] ## left.y[-nl]
rx = right.x[ 0] ## right.x[nr]
lx = left.x[-1 ] ## left.x[-nl]
L = hypot( rx-lx, ry-ly ) # Approximate dl/dt
print("L:%g"%L)
yv = matrix( [[ ly ],
[ ry ],
[ dly * L ], # dy/dt = dy/dl * dl/dt
[ dry * L ]])
xv = matrix( [[ lx ],
[ rx ],
[ dlx * L ],
[ drx * L ]])
cx = minv*xv
cy = minv*yv
if not (isfinite(cx).any() and isfinite(cy).any()):
pdb.set_trace()
return [array(t).squeeze() for t in (cx,cy)]
def plot_join(px,py,*args,**kwargs):
from pylab import plot, polyval
tt = linspace(0,1,50)
plot( polyval(px,tt), polyval(py,tt), *args, **kwargs )
def plot_test(px,py,thick=2):
from pylab import plot
tt = linspace(0,1,50)
dpx = polyder(px)
dpy = polyder(py)
dL2 = polymul(dpx,dpx) + polymul(dpy,dpy)
ux = polyval( px,tt )
uy = polyval( py,tt )
dx = diff(ux) #polyval( px,tt )
dy = diff(uy) #polyval( py,tt )
dx = r_[dx[0],dx]
dy = r_[dy[0],dy]
dL = sqrt( dx**2 + dy**2 )
plot( ux, uy , '.-')
plot( ux + thick*dy/dL , uy - thick*dx/dL ,'-')
plot( ux - thick*dy/dL , uy + thick*dx/dL ,'-' )
def filter_ends( wv, min_score, shape, border = 10 ):
"""
Return candidate ends for joining.
Returns an iterator yielding (Whisker_Seg, side).
"""
maxy, maxx = [x - border for x in shape]
minx, miny = border, border
test_point = lambda x,y: x>minx and x<maxx and y > miny and y < maxy
bordertest = lambda e,side: test_point( e.x[side], e.y[side] )
scoretest = lambda e,side: e.scores[side] > min_score
sides = [0,-1]
for e in wv:
for s in sides:
if bordertest(e,s) and scoretest(e,s):
yield e,s
def plot_candidate_ends(im, wv, min_score, border = 10):
from pylab import plot, imshow, cm, ion,ioff, show, text
left,right = group_ends( list(filter_ends(wv,min_score,im.shape, border)) )
ioff()
#imshow(im,cmap=cm.gray,hold=0)
m = {0:'ro',-1:'gs'}
for i,e in enumerate(left):
s = 0
text(e.x[s],e.y[s],str(i),color=m[s][0])
plot([e.x[s]],[e.y[s]],m[s])
for i,e in enumerate(right):
s = -1
text(e.x[s],e.y[s],str(i),color=m[s][0])
plot([e.x[s]],[e.y[s]],m[s])
show()
ion()
def group_ends( ends ):
return [e for e,s in ends if s == 0], [e for e,s in ends if s == -1]
def end_direction(w, side, n=16):
a = 0
b = min( n, len(w) )
if side != 0:
a = -b
b = -1
dx = diff( w.x[a:b] ).mean()
dy = diff( w.y[a:b] ).mean()
return dx,dy
def make_joining_whisker(px,py,dist,lthick,lscore,rthick,rscore):
w = Whisker_Seg()
tt = linspace(0,1,round(dist))
w.x = polyval(px,tt).astype(float32)
w.y = polyval(py,tt).astype(float32)
w.thick = polyval( [rthick-lthick,lthick], tt ).astype(float32)
w.scores = polyval( [rscore-lscore,lscore], tt ).astype(float32)
return w
def choose_gaps(im,wv, signal_per_pixel = 0.0, max_dist=60, max_angle = pi/4.):
left,right = group_ends( list(filter_ends(wv,100,im.shape)) )
theta = lambda w,side: reduce(arctan2, reversed( end_direction(w,side) ) )
dtheta = lambda left,right: fabs(theta(left,0) - theta(right,-1))
for i,a in enumerate(left):
for j,b in enumerate(right):
dx = a.x[ 0]-b.x[-1]
dy = a.y[ 0]-b.y[-1]
d = hypot(dx,dy)
dth = dtheta(a,b)
v = end_direction(a,0)
norm = hypot(*v)
proj = dot( v/norm, (dx,dy) )
# jth: angle change from a to direct line joining a,b
jth = fabs(arctan2( hypot(*( dx-proj*v[0]/norm, dy-proj*v[1]/norm )) , proj ))
#print i,j,
#print "\tD: %g Proj: %g Theta: %g"%(d,proj,jth*180/pi)
l=0;
if d < max_dist and jth < max_angle and proj > 0:
px,py = solve_polynomial_join( b, a )
l = compute_join_score(im,px,py)
if l < -signal_per_pixel:
#plot_test(px,py)
print("\tScore: %g Theta: %g"%(l,jth*180/pi))
e = make_joining_whisker(px,py,d,b.thick[-1],b.scores[-1],a.thick[ 0],a.scores[ 0])
yield (b,e,a),l
def gap_measures(im,wv):
pmetric = lambda p: sqrt(dot(p[:-1],p[:-1]))
left,right = group_ends( list(filter_ends(wv,100,im.shape)) )
shape = (len(left),len(right) )
d = zeros( shape )
l = zeros( shape )
c = zeros( shape )
cx = zeros( shape )
cy = zeros( shape )
for i,a in enumerate(left):
for j,b in enumerate(right):
dx = a.x[0 ]-b.x[-1]
dy = a.y[0 ]-b.y[-1]
d[i,j] = hypot(dx,dy)
px,py = solve_polynomial_join( b, a )
lpx,lpy = solve_polynomial_join( a, a, reverse = 1 )
rpx,rpy = solve_polynomial_join( b, b, reverse = 1 )
cx[i,j] = max( pmetric( px - lpx ) , pmetric( px - rpx ) )
cy[i,j] = max( pmetric( px - lpx ) , pmetric( py - rpy ) )
#l[i,j] = compute_join_length(px,py)
l[i,j] = compute_join_score(im,px,py)
plot_test(px,py)
#c[i,j] = compute_join_curvature(px,py)
#if sqrt( px[0]**2 + py[0]**2 ) < 50.0:
# plot_join(px,py)
return d,l,cx,cy
def trace_overlap(xxx_todo_changeme, xxx_todo_changeme1, thresh = 2.0 ):
# DONE: does not assume that indexes run along same direction
(wa,i) = xxx_todo_changeme
(wb,j) = xxx_todo_changeme1
def dist(ia,ib):
a,b = wa[ia], wb[ib]
return hypot( a[0] - b[0], a[1] - b[1] )
# determine relative direction of indexing
ia,ib = i,j
if ia == len(wa)-1 or ib == len(wb)-1:
if ia != 0 and ib != 0:
dax = wa.x[ia-1] - wa.x[ia]
day = wa.y[ia-1] - wa.y[ia]
dbx = wb.x[ib-1] - wb.x[ib]
dby = wb.y[ib-1] - wb.y[ib]
elif ia == 0:
dax = wa.x[ia+1] - wa.x[ia]
day = wa.y[ia+1] - wa.y[ia]
dbx = - wb.x[ib-1] + wb.x[ib]
dby = - wb.y[ib-1] + wb.y[ib]
elif ib == 0:
dax = - wa.x[ia-1] + wa.x[ia]
day = - wa.y[ia-1] + wa.y[ia]
dbx = wb.x[ib+1] - wb.x[ib]
dby = wb.y[ib+1] - wb.y[ib]
else:
dax = wa.x[ia+1] - wa.x[ia]
day = wa.y[ia+1] - wa.y[ia]
dbx = wb.x[ib+1] - wb.x[ib]
dby = wb.y[ib+1] - wb.y[ib]
stepa = -1; #only need to keep track of one direction
enda = 0;
notend = lambda i,n: i>n
if( abs(dax) > abs(day) ): #determine by x change
if( dax*dbx < 0 ): #have different signs
stepa = 1
enda = len(wa)
notend = lambda i,n: i<n-1
else: #determine by y change
if( day*dby < 0 ): #have different signs
stepa = 1
enda = len(wa)
notend = lambda i,n: i<n-1
bnda = [i,i]
bndb = [j,j]
ms = 0
while ms < thresh and notend(ia,enda) and ib > 0:
moves = ( ( ia + stepa, ib - 1 ),
( ia + stepa, ib ),
( ia , ib - 1 ) )
scores = [dist( iam, ibm ) for iam, ibm in moves]
ms = min(scores)
for idx,s in enumerate( scores ): #choose best move
if s == ms:
ia,ib = moves[idx]
break
#relax at boundary, move downhill
if not notend(ia,enda) and ib == 0:
pass
elif not notend(ia,enda):
last = ms
s = dist( ia, ib - 1 )
while s < last and ib > 1:
ib -= 1
last = s
s = dist( ia, ib - 1 )
elif ib == 0:
last = ms
s = dist( ia + stepa, ib )
while s < last and notend(ia,enda-stepa):
ia += stepa
last = s
s = dist( ia + stepa, ib )
bnda[0] = ia
bndb[0] = ib
#flip direction
if stepa == -1:
stepa = 1
enda = len(wa)
notend = lambda i,n:i<n-1
else:
stepa = -1
enda = 0
notend = lambda i,n: i>n
ia,ib = i,j
ms = 0
while ms < thresh and notend(ia,enda) and ib < len(wb)-1:
moves = ( ( ia + stepa, ib + 1 ),
( ia + stepa, ib ),
( ia , ib + 1 ) )
scores = [dist( iam, ibm ) for iam, ibm in moves]
ms = min(scores)
for idx,s in enumerate(scores):
if s == ms:
ia,ib = moves[idx]
break
#relax at boundary, move downhill
if not notend(ia,enda) and ib == len(wb)-1:
pass
elif not notend(ia,enda):
last = ms
s = dist( ia, ib + 1 )
while s < last and ib < len(wb)-2:
ib += 1
last = s
s = dist( ia, ib + 1 )
elif ib == len(wb)-1:
last = ms
s = dist( ia + stepa, ib )
while s < last and notend(ia,enda-stepa):
ia += stepa
last = s
s = dist( ia + stepa, ib )
bnda[1] = ia
bndb[1] = ib
bnda.sort()
return bnda, bndb
def resolution(table, wvd):
rest = set(wvd.values())
match = next(table)
while match:
keep,discard = merge(match)
if discard:
for a in discard:
table.remove( a )
for a in keep:
yield a
for a,i in match:
rest.discard(a)
match = next(table)
for a in rest:
yield a
def pairwise_merge( match ):
overhang = 8
wa = match[0][0]
wb = match[1][0]
bnda, bndb = trace_overlap(*match)
iscomplete = lambda bnd,w: bnd[0] < overhang and bnd[1] >= len(w)-overhang
if iscomplete(bnda,wa) or iscomplete(bndb,wb):
sa = wa.scores.sum()
sb = wb.scores.sum()
if sa > sb:
return wa,None
else:
return None,wb
return None,None
def merge( match ):
dep = dict( [ (e[0],0) for e in match ] )
#iterate through all pairs and mark those who are contained in another whisker
# The pairwise merge should impose a strict ordering
match = list(match)
for i,ma in enumerate(match):
for j,mb in enumerate(match[ (i+1): ]):
ra,rb = pairwise_merge( (ma,mb) )
if ra or rb:
if not ra:
dep[ma[0]] = 1
if not rb:
dep[mb[0]] = 1
# partition into two sets. Those to keep and those to discard.
# Those to keep depend on none of the others.
return [ k for k,v in dep.items() if v==0 ], \
[ k for k,v in dep.items() if v!=0 ]
class CollisionTable(object):
def __init__(self, wvd, shape, scale):
""" `wvd` may be either a dict or list of whiskers """
object.__init__(self)
self._map = {}
self._shape = shape
self._scale = scale
self._stride = stride = shape[1]/scale
self.topx = lambda p: int(p[0]/scale) + stride * int(p[1]/scale)
self._build_inverse_table( wvd )
def _build_inverse_table(self, wvd ):
g = enumerate(wvd)
if isinstance(wvd, dict):
g = iter(wvd.items())
for i,w in g:
self.add(w)
def update( self, changes ):
""" Changes is a dict mapping old whisker segments to new segments """
last = None
for w,p in changes.items():
self.remove(w)
if p:
self.add(p[0]) # add back ends
self.add(p[-1])
last = p[1]
if last:
self.add(last) # add back last middle
def add(self, w):
if not w: return
hash = lambda e: enumerate( map(self.topx,list(zip(e.x,e.y))) )
for i,px in hash(w):
self._map.setdefault(px,set()).add( (w,i) )
for i,px in hash(w): # scan back through and remove repeat hits on a pixel
for x in [e for e in self._map[px] if e[0] == w][1:]:
self._map[px].remove(x)
def remove(self, w):
if not w: return
hash = lambda e: enumerate( map(self.topx,list(zip(e.x,e.y))) )
for i,px in hash(w):
s = self._map.get(px)
if s:
s.discard( (w,i) )
def __iter__(self):
m = next(self)
while m:
yield m
m = next(self)
def __next__(self):
""" This changes the inverse table by removing hits.
Returns a (Whisker_Seg, index),(Whisker_Seg, index)... tuple
or None, if done.
"""
todelete = []
retval = None
for px,s in self._map.items():
todelete.append(px) # get rid of references to visited pixels
if len(s) > 1:
retval = s
break
for k in todelete:
del self._map[k]
return retval
def counts( self ):
tosc = lambda e: e/self._scale
im = zeros(list(map(tosc, self._shape)))
imr = im.ravel()
for px,s in self._map.items():
imr[px] = len(s) #len(set( [e for e,i in s] ))
return im
| whisk/test_merge3.py | 20,038 | `wvd` may be either a dict or list of whiskers
This changes the inverse table by removing hits.
Returns a (Whisker_Seg, index),(Whisker_Seg, index)... tuple
or None, if done.
Return candidate ends for joining.
Returns an iterator yielding (Whisker_Seg, side).
Solves for a parametric cubic polynomial curve joining the right side of left
to the left side of right. The curve matches slope and position at it's
boundaries and is parameterized from 0 to 1; 0 being the left boundary and 1
being the right.
method: parametric cubic matching position and slope of endpoints.
This ends up being cheap to compute, since the matrix is
known (interval of parameter is always 0 to 1) and so the
inverse can be precomputed.
minv is inverse of m, where:
m = array( [ [ a**3, a**2, a, 1 ],
[ b**3, b**2, b, 1 ],
[ 3*a**2, 2*a , 1, 0 ],
[ 3*b**2, 2*b , 1, 0 ] ] )
is the matrix for the linear system:
m * coeff = v,
with v = [ x(0) x(1) dx/dt(0) dx/dt(1) ].
Here a = 0 and b = 1 so m and it's inverse is always the same.
Changes is a dict mapping old whisker segments to new segments
Author: Nathan Clack
Date : 2009
Copyright (c) 2009 HHMI. Free downloads and distribution are allowed for any
non-profit research and educational purposes as long as proper credit is given
to the author. All other rights reserved.
movie = Reader('../../data/W0.tif',adjuststipple=1)w,wid = load_whiskers('w0-grid.whiskers')w,wid = load_whiskers('whisk-vc/whisk-vc/seq.whiskers')movie = Reader('data/JF8410_041808_001.tif',adjuststipple=1)w,wid = load_whiskers('test.whiskers')movie = Reader('data/lorenz/090519-19a_0035.seq',adjuststipple=1)w,wid = load_whiskers('lorenz.whiskers')w,wid = load_whiskers('results/seq-hand.whiskers')t,tid = load_trajectories('results/seq-hand.trajectories')numeratordenominatorpolyval( px,tt )polyval( py,tt )take care of cases joining very short segementsL = length( right.x, right.y ) + length( left.x, left.y )dd = hypot( left.x[0] - right.x[-1], left.y[0] - right.y[-1] ) want the total change over the lengthslope = lambda v: diff(v).mean() euclidian distance in pixels Compute slope at boundary. Uses a number of points near the boundary to compute slope. Need to account for edge cases where one or both sides consist of very few points. use the derivative on the other sideprint dly,dlx,dry,drx use the derivative on the other side the "normal" case Compute path length of right border region Compute path length of left border region Compute dy/dl for right side etc... right.y[nr] left.y[-nl] right.x[nr] left.x[-nl] Approximate dl/dt dy/dt = dy/dl * dl/dtpolyval( px,tt )polyval( py,tt )imshow(im,cmap=cm.gray,hold=0) jth: angle change from a to direct line joining a,bprint i,j,print "\tD: %g Proj: %g Theta: %g"%(d,proj,jth*180/pi)plot_test(px,py)l[i,j] = compute_join_length(px,py)c[i,j] = compute_join_curvature(px,py)if sqrt( px[0]**2 + py[0]**2 ) < 50.0: plot_join(px,py) DONE: does not assume that indexes run along same direction determine relative direction of indexingonly need to keep track of one directiondetermine by x changehave different signsdetermine by y changehave different signschoose best moverelax at boundary, move downhillflip directionrelax at boundary, move downhilliterate through all pairs and mark those who are contained in another whisker The pairwise merge should impose a strict ordering partition into two sets. Those to keep and those to discard. Those to keep depend on none of the others. add back ends add back last middle scan back through and remove repeat hits on a pixel get rid of references to visited pixelslen(set( [e for e,i in s] )) | 3,784 | en | 0.764888 |
from unittest import TestCase
from unittest.mock import patch
from xmlschema import XMLSchemaException
from xml.dom.minidom import Element, Document, parse
class TestXmlParserInstructionspath(TestCase):
@patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders')
@patch('instructionparsers.xmlparser.XmlParser._init_instructions')
@patch('instructionparsers.xmlparser.path.isfile')
@patch('instructionparsers.xmlparser.XmlParser._validate_schema')
@patch('instructionparsers.xmlparser.XmlParser._initializemetadata')
def test_instructionspath(self, placeholder_mock, xmlparser_mock, isfile_mock, schema_mock, initmetadata_mock):
"""
Will return the instructions file path set in __init__
"""
from instructionparsers.xmlparser import XmlParser
expected_file = 'test_instructions.xml'
isfile_mock.return_value = True
xml_parser = XmlParser(instructionspath=expected_file, protocol=None)
actual_file = xml_parser.instructionspath
self.assertEqual(expected_file, actual_file)
@patch('businesslogic.placeholders.Placeholder._initialize_global_placeholders')
@patch('instructionparsers.xmlparser.XmlParser._init_instructions')
@patch('instructionparsers.xmlparser.path.isfile')
@patch('instructionparsers.xmlparser.XmlParser._validate_schema')
@patch('instructionparsers.xmlparser.XmlParser._initializemetadata')
def test_instructionspath_instruction_file_not_there(self, placeholder_mock, xmlparser_mock, isfile_mock,
schema_mock, initmetadata_mock):
"""
Will raise FileNotFound exeption.
"""
from instructionparsers.xmlparser import XmlParser
expected_file = 'test_instructions.xml'
isfile_mock.return_value = True
xml_parser = XmlParser(instructionspath=expected_file, protocol=None)
isfile_mock.return_value = False
with self.assertRaises(FileNotFoundError):
xml_parser.instructionspath = expected_file
class TestXmlParserValidate_schema(TestCase):
def test__validate_schema_valid_instructions(self):
"""
Should do nothing.
"""
from instructionparsers.xmlparser import XmlParser
try:
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
XmlParser._validate_schema(xmlfilepath='./instructions/valid_instructions.xml')
except XMLSchemaException:
self.fail("_validate_schema should not raise exception with valid xml instructions.")
def test__validate_schema_invalid_instructions(self):
"""
Should raise exception.
"""
from instructionparsers.xmlparser import XmlParser
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
self.assertRaises(XMLSchemaException,
XmlParser._validate_schema, './instructions/invalid_instructions.xml')
def test__validate_schema_minimal_valid_instructions(self):
"""
Should do nothing.
"""
from instructionparsers.xmlparser import XmlParser
try:
XmlParser.XMLSCHEMA_PATH = '../instructionparsers/xmlparser.xsd'
XmlParser._validate_schema(xmlfilepath='./instructions/minimal_valid_instructions.xml')
except XMLSchemaException:
self.fail("_validate_schema should not raise exception with valid xml instructions.")
class TestXmlParserInitializemetadata(TestCase):
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__initializemetadata_valid_instructions(self, path_mock):
"""
Should initialize member 'metadata' with all elements which have the attribute "title".
"""
metadata = ('Examiner', 'Assignment', 'Client', 'Description of Artefact', 'Task Description')
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
xml_parser._initializemetadata()
for data in metadata:
with self.subTest(data):
self.assertIsNotNone(xml_parser.metadata[data])
class TestXmlParserInitInstructions(TestCase):
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__init_instructions_valid_instructions(self, path_mock):
"""
Should initialize collectors for all XML elements which have the attribute "module".
"""
from instructionparsers.xmlparser import XmlParser
from instructionparsers.wrapper import InstructionWrapper
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
instructionstree = xml_parser._init_instructions()
self.assertIsInstance(instructionstree, InstructionWrapper)
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__init_instructions_valid_instructions(self, path_mock):
"""
Should return the instruction tree starting with "Root" node.
"""
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
instructionstree = xml_parser._init_instructions()
self.assertEqual(instructionstree.instructionname,
'Root')
self.assertEqual(instructionstree.instructionchildren[0].instructionname,
'LocalHost')
self.assertEqual(instructionstree.instructionchildren[0].instructionchildren[0].instructionname,
'MachineName')
self.assertEqual(instructionstree.instructionchildren[1].instructionname, 'LocalHost')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[0].instructionname,
'OSName')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[1].instructionname,
'OSVersion')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[2].instructionname,
'OSTimezone')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[3].instructionname,
'AllUsernames')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[4].instructionname,
'CurrentUser')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[5].instructionname,
'SudoVersion')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[6].instructionname,
'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[7].instructionname,
'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[8].instructionname,
'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[9].instructionname,
'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[10].instructionname,
'FileExistence')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[11].instructionname,
'ShellHistoryOfAllUsers')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[12].instructionname,
'NVRAMCollector')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[13].instructionname,
'TimeFromNTPServer')
self.assertEqual(instructionstree.instructionchildren[1].instructionchildren[14].instructionname,
'LocalTime')
class TestXmlParserGetFirstInstructionElement(TestCase):
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_first_instruction_element(self, path_mock):
"""
Should return the xml element with the title "Root".
"""
from instructionparsers.xmlparser import XmlParser
instructions = './instructions/valid_instructions.xml'
xml_parser = XmlParser(instructionspath=instructions, protocol=None)
xml_parser._instructionspath = instructions
element = xml_parser._get_first_instruction_element()
self.assertIsInstance(element, Element)
self.assertEqual(element.localName, 'Root')
class TestXmlParser(TestCase):
def test__get_placeholder_name(self):
"""
If XmlElement contains attribute "placeholder" method should return value of this attribute.
"""
from instructionparsers.xmlparser import XmlParser
document = Document()
element = document.createElement('Demo')
element.setAttribute(XmlParser.PLACEHOLDERNAME_ATTRIBUTE, "test")
result = XmlParser._get_placeholder_name(element)
self.assertEqual(result, 'test')
def test__get_placeholder_name_no_placeholder(self):
"""
If XmlElement does not contain attribute "placeholder" method should return an empty string.
"""
from instructionparsers.xmlparser import XmlParser
#from xml.dom.minidom import Element
element = Element('Demo')
result = XmlParser._get_placeholder_name(element)
self.assertEqual(result, '')
class TestXmlParserGetParameterAttributes(TestCase):
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_return_userdict(self, path_mock):
"""
Should return UserDict
"""
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse("./instructions/instructions_stub.xml").documentElement.childNodes[1]
actual = XmlParser._get_parameter_attributes(attributes=elem.attributes)
self.assertIsInstance(actual, UserDict)
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_return_userdict_with_2_entries(self, path_mock):
"""
Should return dict with two entries
"""
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse("./instructions/instructions_stub.xml").documentElement.childNodes[1]
actual = XmlParser._get_parameter_attributes(attributes=elem.attributes)
self.assertEqual(len(actual), 2)
@patch('instructionparsers.xmlparser.XmlParser.instructionspath')
def test__get_parameter_attributes_should_return_none_special_attributes(self, path_mock):
"""
Should return dicitionry with "users_with_homedir" key and with "properties" key.
"""
from instructionparsers.xmlparser import XmlParser
from collections import UserDict
elem = parse("./instructions/instructions_stub.xml").documentElement.childNodes[1]
actual = XmlParser._get_parameter_attributes(attributes=elem.attributes)
self.assertIsNotNone(actual.get("properties"))
self.assertIsNotNone(actual.get("users_with_homedir"))
| tests/test_xmlparser.py | 11,713 | Should return the xml element with the title "Root".
Should return UserDict
Should return dict with two entries
Should return dicitionry with "users_with_homedir" key and with "properties" key.
If XmlElement contains attribute "placeholder" method should return value of this attribute.
If XmlElement does not contain attribute "placeholder" method should return an empty string.
Should initialize collectors for all XML elements which have the attribute "module".
Should return the instruction tree starting with "Root" node.
Should initialize member 'metadata' with all elements which have the attribute "title".
Should raise exception.
Should do nothing.
Should do nothing.
Will return the instructions file path set in __init__
Will raise FileNotFound exeption.
from xml.dom.minidom import Element | 802 | en | 0.765648 |
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-ShellCommon-StartLayoutPopulation
GUID : 97ca8142-10b1-4baa-9fbb-70a7d11231c3
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1_0(Etw):
pattern = Struct(
"collectionName" / WString,
"initializationReason" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=3, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_3_0(Etw):
pattern = Struct(
"layoutSelectionSerializedString" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=5, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_5_0(Etw):
pattern = Struct(
"TaskHResultValue" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=7, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_7_0(Etw):
pattern = Struct(
"layoutProviderName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=8, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_8_0(Etw):
pattern = Struct(
"layoutProviderName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=11, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_11_0(Etw):
pattern = Struct(
"layoutProviderName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=12, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_12_0(Etw):
pattern = Struct(
"layoutProviderName" / WString,
"HResultValue" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=15, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_15_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=16, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_16_0(Etw):
pattern = Struct(
"tileIdentifier" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=17, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_17_0(Etw):
pattern = Struct(
"tileIdentifier" / WString,
"failureDetails" / CString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=18, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_18_0(Etw):
pattern = Struct(
"TaskHResultValue" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=19, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_19_0(Etw):
pattern = Struct(
"tileData" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=21, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_21_0(Etw):
pattern = Struct(
"failureDetails" / CString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=22, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_22_0(Etw):
pattern = Struct(
"TaskHResultValue" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=23, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_23_0(Etw):
pattern = Struct(
"tileIdentifier" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=28, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_28_0(Etw):
pattern = Struct(
"tileIdentifier" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=29, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_29_0(Etw):
pattern = Struct(
"tileAumid" / WString,
"appSize" / Int64ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=30, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_30_0(Etw):
pattern = Struct(
"tileAumid" / WString,
"appSize" / Int64ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=31, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_31_0(Etw):
pattern = Struct(
"appSize" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=32, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_32_0(Etw):
pattern = Struct(
"tileIdentifier" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=33, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_33_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=35, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_35_0(Etw):
pattern = Struct(
"TaskHResultValue" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=38, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_38_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=39, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_39_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=41, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_41_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=42, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_42_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=45, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_45_0(Etw):
pattern = Struct(
"containerName" / WString,
"containerXPosition" / Int32ul,
"containerYPosition" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=46, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_46_0(Etw):
pattern = Struct(
"containerName" / WString,
"containerXPosition" / Int32ul,
"containerYPosition" / Int32ul,
"failureDetails" / CString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=49, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_49_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=52, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_52_0(Etw):
pattern = Struct(
"tileData" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=53, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_53_0(Etw):
pattern = Struct(
"tileIdentifier" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=54, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_54_0(Etw):
pattern = Struct(
"groupData" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=55, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_55_0(Etw):
pattern = Struct(
"groupData" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=56, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_56_0(Etw):
pattern = Struct(
"containerName" / WString,
"containerXPosition" / Int32ul,
"containerYPosition" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=57, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_57_0(Etw):
pattern = Struct(
"containerName" / WString,
"containerXPosition" / Int32ul,
"containerYPosition" / Int32ul,
"failureDetails" / CString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=58, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_58_0(Etw):
pattern = Struct(
"TaskHResultValue" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=60, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_60_0(Etw):
pattern = Struct(
"TaskHResultValue" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=62, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_62_0(Etw):
pattern = Struct(
"TaskHResultValue" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=63, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_63_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=64, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_64_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=65, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_65_0(Etw):
pattern = Struct(
"value1" / WString,
"value2" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1002, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1002_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1004, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1004_0(Etw):
pattern = Struct(
"itemId" / WString,
"itemName" / WString,
"groupCount" / Int32ul,
"tileCount" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1005, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1005_0(Etw):
pattern = Struct(
"itemId" / WString,
"itemName" / WString,
"groupCount" / Int32ul,
"tileCount" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1100, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1100_0(Etw):
pattern = Struct(
"itemId" / Guid,
"containerId" / Guid,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1101, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1101_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid,
"containerId" / Guid,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1102, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1102_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid,
"containerId" / Guid,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1103, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1103_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid,
"containerId" / Guid,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1104, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1104_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid,
"containerId" / Guid,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1105, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1105_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid,
"containerId" / Guid,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1106, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1106_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1107, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1107_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1200, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1200_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1202, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1202_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1203, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1203_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1204, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1204_0(Etw):
pattern = Struct(
"itemId" / Guid,
"containerId" / Guid,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1205, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1205_0(Etw):
pattern = Struct(
"itemId" / Guid,
"containerId" / Guid,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1206, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1206_0(Etw):
pattern = Struct(
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1207, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1207_0(Etw):
pattern = Struct(
"itemId" / Guid,
"containerId" / Guid,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1208, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1208_0(Etw):
pattern = Struct(
"itemId" / Guid,
"containerId" / Guid,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1209, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1209_0(Etw):
pattern = Struct(
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1250, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1250_0(Etw):
pattern = Struct(
"savedVersion" / Int64ul,
"itemId" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1252, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1252_0(Etw):
pattern = Struct(
"savedVersion" / Int64ul,
"itemId" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1253, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1253_0(Etw):
pattern = Struct(
"savedVersion" / Int64ul,
"itemId" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1300, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1300_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1301, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1301_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1303, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1303_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1400, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1400_0(Etw):
pattern = Struct(
"tileIdentifier" / WString,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1401, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1401_0(Etw):
pattern = Struct(
"tileIdentifier" / WString,
"collectionName" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1404, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1404_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1405, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1405_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1900, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1900_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1902, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1902_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1903, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1903_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1904, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1904_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1905, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1905_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=1906, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_1906_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2101, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2101_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2102, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2102_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid,
"savedVersion" / Int64ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2103, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2103_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2110, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2110_0(Etw):
pattern = Struct(
"itemName" / WString,
"size" / Int64ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2111, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2111_0(Etw):
pattern = Struct(
"itemName" / WString,
"size" / Int64ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2112, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2112_0(Etw):
pattern = Struct(
"itemName" / WString,
"size" / Int64ul,
"savedVersion" / Int64ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2150, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2150_0(Etw):
pattern = Struct(
"itemName" / WString,
"itemId" / Guid
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2151, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2151_0(Etw):
pattern = Struct(
"value" / WString
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2152, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2152_0(Etw):
pattern = Struct(
"packageFamilyName" / WString,
"InstallState" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2153, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2153_0(Etw):
pattern = Struct(
"packageFamilyName" / WString,
"InstallState" / Int32ul
)
@declare(guid=guid("97ca8142-10b1-4baa-9fbb-70a7d11231c3"), event_id=2154, version=0)
class Microsoft_Windows_ShellCommon_StartLayoutPopulation_2154_0(Etw):
pattern = Struct(
"value" / WString
)
| etl/parsers/etw/Microsoft_Windows_ShellCommon_StartLayoutPopulation.py | 21,846 | Microsoft-Windows-ShellCommon-StartLayoutPopulation
GUID : 97ca8142-10b1-4baa-9fbb-70a7d11231c3
-*- coding: utf-8 -*- | 119 | en | 0.51754 |
#!/usr/bin/env python
from setuptools import setup, find_packages
# Parse version number from pyglet/__init__.py:
with open('pyglet/__init__.py') as f:
info = {}
for line in f:
if line.startswith('version'):
exec(line, info)
break
setup_info = dict(
name='pyglet',
version=info['version'],
author='Alex Holkner',
author_email='Alex.Holkner@gmail.com',
url='http://pyglet.readthedocs.org/en/latest/',
download_url='http://pypi.python.org/pypi/pyglet',
project_urls={
'Documentation': 'https://pyglet.readthedocs.io/en/latest',
'Source': 'https://github.com/pyglet/pyglet',
'Tracker': 'https://github.com/pyglet/pyglet/issues',
},
description='Cross-platform windowing and multimedia library',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
license='BSD',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Games/Entertainment',
'Topic :: Software Development :: Libraries :: Python Modules',
],
# Package info
packages=['pyglet'] + ['pyglet.' + pkg for pkg in find_packages('pyglet')],
# Add _ prefix to the names of temporary build dirs
options={'build': {'build_base': '_build'}, },
zip_safe=True,
)
setup(**setup_info)
| setup.py | 1,959 | !/usr/bin/env python Parse version number from pyglet/__init__.py: Package info Add _ prefix to the names of temporary build dirs | 129 | en | 0.277351 |
# SPDX-License-Identifier: Apache-2.0
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class LessOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = LessOptions()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsLessOptions(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def LessOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# LessOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def Start(builder): builder.StartObject(0)
def LessOptionsStart(builder):
"""This method is deprecated. Please switch to Start."""
return Start(builder)
def End(builder): return builder.EndObject()
def LessOptionsEnd(builder):
"""This method is deprecated. Please switch to End."""
return End(builder) | tf2onnx/tflite/LessOptions.py | 1,285 | This method is deprecated. Please switch to GetRootAs.
This method is deprecated. Please switch to End.
This method is deprecated. Please switch to Start.
SPDX-License-Identifier: Apache-2.0 automatically generated by the FlatBuffers compiler, do not modify namespace: tflite LessOptions | 289 | en | 0.685683 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.