hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
957390234204d2cc7e8b745122d1f4cd14ea465e | 335 | py | Python | bin/wait_for_postgres.py | interactivemap/interactive-map-backend | 6f5ecf4e186f972bedb6757cc04ac14c0491b65b | [
"MIT"
] | 1 | 2018-03-05T06:41:08.000Z | 2018-03-05T06:41:08.000Z | bin/wait_for_postgres.py | interactivemap/interactive-map-backend | 6f5ecf4e186f972bedb6757cc04ac14c0491b65b | [
"MIT"
] | null | null | null | bin/wait_for_postgres.py | interactivemap/interactive-map-backend | 6f5ecf4e186f972bedb6757cc04ac14c0491b65b | [
"MIT"
] | null | null | null | import time
import sys
import psycopg2
dbname = sys.argv[1]
host = sys.argv[2]
port = int(sys.argv[3])
user = sys.argv[4]
password = sys.argv[5]
while True:
try:
conn = psycopg2.connect(host=host, port=port, dbname=dbname, user=user, password=password)
conn.close()
break
except:
time.sleep(1) | 19.705882 | 98 | 0.641791 |
0bea63419a06ecb9ee2bcf81941c43e643e1c3e4 | 7,004 | py | Python | setup/configure_hostapd.py | Mihaylov93/tbng | 0758b02577a2a11d18590f16298fe02ee7279e89 | [
"Unlicense"
] | 34 | 2017-10-10T14:06:24.000Z | 2021-11-15T16:31:15.000Z | setup/configure_hostapd.py | Mihaylov93/tbng | 0758b02577a2a11d18590f16298fe02ee7279e89 | [
"Unlicense"
] | 6 | 2018-03-05T19:05:28.000Z | 2019-12-14T15:26:52.000Z | setup/configure_hostapd.py | Mihaylov93/tbng | 0758b02577a2a11d18590f16298fe02ee7279e89 | [
"Unlicense"
] | 6 | 2017-11-14T17:41:34.000Z | 2022-01-16T12:34:31.000Z | #!/usr/bin/env python3
#
# import modules used here -- sys is a very standard one
import argparse,json
import urllib.request
import tempfile
import gzip
import random
import netifaces as ni
from libtbngsetup import *
#Dictionary for random name/password geneation
Adjectives=[
'perfect','soft','hurt','zonked',
'spiritual','nondescript','ragged',
'unadvised','overconfident','ambitious','idiotic',
'bashful','guarded','elite','waggish','typical','bouncy',
'labored','placid','drab','moldy','highfalutin','maddening','imported','selfish',
'mighty','lackadaisical','redundant','parsimonious','fallacious','simplistic',
'brawny','dysfunctional','complex','stupendous','responsible','disgusting','solid',
'chemical','painful','many',
'curious','elfin','godly','rustic','classy','tidy','nappy','furtive',
'bumpy','depressed','spiky','somber','satisfying','halting','befitting',
'aloof','hateful','hot','snotty','freezing','blue-eyed','ceaseless','early'
]
Nouns=[
'statement','group','bite','school','walk','shop','toe','limit',
'gold','sort','back','lunchroom','care','view','dust','question','queen','start',
'art','ship','pencil','sugar','feeling','fruit','twig','desire','knee','elbow',
'month','jewel','toes','pocket','glass','night','cattle','place','cap','fish',
'trucks','fact','sack','rake','jail','bed','sweater','teeth','uncle','representative','sister',
'note','quill','force','board','jam',
'quarter','mouth','hate','boat','waste','coat','hat','event','chance','soup'
]
def generate_name():
logging.debug("Generating random name")
secure_random = random.SystemRandom()
adjective=secure_random.choice(Adjectives)
secure_random = random.SystemRandom()
noun=secure_random.choice(Nouns)
return adjective.title()+noun.title()
def generate_password(name):
logging.debug("Generating random password")
secure_random = random.SystemRandom()
return name+str(secure_random.randint(100000, 999999))
# Gather our code in a main() function
def main(args, loglevel):
logging.basicConfig(format="%(levelname)s: %(message)s", level=loglevel)
logging.debug("Arguments passed: {0}".format(args))
stubName=generate_name()
stubPassword=generate_password(stubName)
parameters={}
parameters['project']=project_dir
parameters['interface']=args.interface
if args.apname is not None:
parameters['apname']=args.apname
else:
parameters['apname']=stubName
if args.appassword is not None:
parameters['appassword']=args.appassword
else:
parameters['appassword']=stubPassword
parameters['driver']=args.driver
logging.info("Checking arguments")
if len(parameters['appassword']) < 8:
raise Exception("Access point password must be 8 symbols or more")
logging.info("Checking {0} is configured manually".format(parameters['interface']))
logging.info("Trying to get address of interface {0}".format(parameters['interface']))
ip_address = check_interface(parameters['interface'])
if not ip_address:
raise Exception("Cannot determine interface {0} address. Run ifup {0} and restart the script".format(args.interface))
filename = "{0}_hostapd.gz".format(tempfile.mktemp())
url="http://static-bins.herokuapp.com/files/{0}/hostapd/hostapd.gz".format(args.arch)
logging.info("""Downloading from {0}
to {1}
""".format(url,filename))
urllib.request.urlretrieve(url,filename)
logging.info("Extracting archive")
with gzip.open(filename, "rb") as compressed_file:
with open("{0}/bin/hostapd-tbng".format(project_dir),"wb") as uncompressed_file:
uncompressed_file.write(compressed_file.read())
logging.debug(utility.run_shell_command("chmod a+x {0}/bin/hostapd-tbng".format(project_dir)).decode("utf-8"))
logging.info("Generating hostapd config file")
filein = open("{0}/setup/templates/hostapd-tbng.conf".format(project_dir))
src = Template( filein.read() )
src.substitute(parameters)
with open("{0}/config/hostapd-tbng.conf".format(project_dir), "w") as text_file:
text_file.write(src.substitute(parameters))
logging.info("Generating systemd file")
systemd_folder="/lib/systemd/system"
filein = open( "{0}/setup/templates/hostapd-tbng.service".format(project_dir))
src = Template( filein.read() )
src.substitute(parameters)
with open("{0}/hostapd-tbng.service".format(systemd_folder), "w") as text_file:
text_file.write(src.substitute(parameters))
logging.info("File {0}/hostapd-tbng.service created".format(systemd_folder))
logging.debug(utility.run_shell_command("systemctl daemon-reload").decode("utf-8"))
logging.debug(utility.run_shell_command("systemctl enable hostapd-tbng").decode("utf-8"))
logging.debug(utility.run_shell_command("sync").decode("utf-8"))
result="""Static version of hostapd binary installed to {0}/bin/hostapd-tbng.
Configuration located at {0}/config/hostapd-tbng.conf.
Your AP name: {3}
Your password: {4}
SystemD service hostapd-tbng is registered and enabled by default.
Don' forget to confgure dhcp service for {1} or use static IPs. Current IP of {1} is {2}""".format(
project_dir,
parameters['interface'],
ip_address,
parameters['apname'],
parameters['appassword']
)
logging.info(result)
# Standard boilerplate to call the main() function to begin
# the program.
if sys.version_info[0] < 3:
raise Exception("Python 3.x is required.")
if not os.geteuid()==0:
raise Exception("sudo or root is required.")
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description = "Script to configure static version of hostapd. Use with caution."
)
parser.add_argument('-a',
'--arch',
type=str, help='Architecture (armh,aarch64,x86,x86_64,...)',required=True)
parser.add_argument('-i',
'--interface',
type=str, help="Interface name (wlan0,wlan1,...)",required=True)
parser.add_argument('-n',
'--apname',
type=str, help="Access point name - will be chosen randomly, if omitted",required=False)
parser.add_argument('-p',
'--appassword',
type=str, help="Access point password (must be 8+ symbols) - will be chosen randomly if ommitted",required=False)
parser.add_argument('-d',
'--driver',
type=str, default="nl80211", help="Driver for hostapd. Default is nl80211, also possible rtl871xdrv for Realtek cards")
parser.add_argument(
"-v",
"--verbose",
help="increase output verbosity",
action="store_true")
args = parser.parse_args()
# Setup logging
if args.verbose:
loglevel = logging.DEBUG
else:
loglevel = logging.INFO
if sys.version_info[0] < 3:
raise Exception("Python 3.x is required.")
#if not os.geteuid()==0:
# raise Exception("sudo or root is required.")
main(args, loglevel)
| 35.917949 | 141 | 0.679897 |
88010a675e55c170caf560427b77fdeb7ff18b6a | 4,013 | py | Python | py_src/yolov4/tflite/__init__.py | lbcsept/tensorflow-yolov4 | 7ec8b2463a45a579b27b0d4439268560440b190e | [
"MIT"
] | 1 | 2021-05-28T16:49:17.000Z | 2021-05-28T16:49:17.000Z | py_src/yolov4/tflite/__init__.py | waltpeng/tensorflow-yolov4 | fd45eea11d2de86664978184e6b798daf2546cdc | [
"MIT"
] | null | null | null | py_src/yolov4/tflite/__init__.py | waltpeng/tensorflow-yolov4 | fd45eea11d2de86664978184e6b798daf2546cdc | [
"MIT"
] | null | null | null | """
MIT License
Copyright (c) 2020 Hyeonki Hong <hhk7734@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import time
from typing import Union
import numpy as np
try:
import tensorflow.lite as tflite
except ModuleNotFoundError:
import tflite_runtime.interpreter as tflite
from ..common import media, predict
from ..common.base_class import BaseClass
class YOLOv4(BaseClass):
def __init__(self, tiny: bool = False, tpu: bool = False):
"""
Default configuration
"""
super(YOLOv4, self).__init__(tiny=tiny, tpu=tpu)
self.grid_coord = []
self.input_index = None
self.interpreter = None
self.output_index = None
self.output_size = None
def load_tflite(self, tflite_path):
if self.tpu:
self.interpreter = tflite.Interpreter(
model_path=tflite_path,
experimental_delegates=[
tflite.load_delegate("libedgetpu.so.1")
],
)
else:
self.interpreter = tflite.Interpreter(model_path=tflite_path)
self.interpreter.allocate_tensors()
input_details = self.interpreter.get_input_details()[0]
# width, height
self.input_size = (input_details["shape"][2], input_details["shape"][1])
self.input_index = input_details["index"]
output_details = self.interpreter.get_output_details()
self.output_index = [details["index"] for details in output_details]
#############
# Inference #
#############
def predict(
self,
frame: np.ndarray,
iou_threshold: float = 0.3,
score_threshold: float = 0.25,
):
"""
Predict one frame
@param frame: Dim(height, width, channels)
@return pred_bboxes == Dim(-1, (x, y, w, h, class_id, probability))
"""
# image_data == Dim(1, input_size[1], input_size[0], channels)
image_data = self.resize_image(frame)
image_data = image_data / 255
image_data = image_data[np.newaxis, ...].astype(np.float32)
# s_pred, m_pred, l_pred
# x_pred == Dim(1, g_height, g_width, anchors, (bbox))
self.interpreter.set_tensor(self.input_index, image_data)
self.interpreter.invoke()
candidates = [
self.interpreter.get_tensor(index) for index in self.output_index
]
_candidates = []
for candidate in candidates:
grid_size = candidate.shape[1:3]
_candidates.append(
np.reshape(
candidate[0], (1, grid_size[0] * grid_size[1] * 3, -1)
)
)
candidates = np.concatenate(_candidates, axis=1)
pred_bboxes = self.candidates_to_pred_bboxes(
candidates[0],
iou_threshold=iou_threshold,
score_threshold=score_threshold,
)
pred_bboxes = self.fit_pred_bboxes_to_original(pred_bboxes, frame.shape)
return pred_bboxes
| 34.895652 | 80 | 0.651134 |
9db53d821d86d16c3405034753625c3f44caaa8c | 12,188 | py | Python | torch_collections/models/_retinanet.py | mingruimingrui/torch-collections | f7c20b28b63de76c763983338aa4c825904ef4cd | [
"MIT"
] | 3 | 2018-08-14T19:40:58.000Z | 2018-10-22T15:41:39.000Z | torch_collections/models/_retinanet.py | mingruimingrui/torch-collections | f7c20b28b63de76c763983338aa4c825904ef4cd | [
"MIT"
] | 2 | 2018-08-14T19:40:41.000Z | 2018-10-29T14:46:40.000Z | torch_collections/models/_retinanet.py | mingruimingrui/torch-collections | f7c20b28b63de76c763983338aa4c825904ef4cd | [
"MIT"
] | null | null | null | """ RetinaNet submodules """
import math
import torch
from ..modules import Anchors, ConvBlock2d, DenseBlock2d
from ..losses import DetectionFocalLoss, DetectionSmoothL1Loss
from ..utils import anchors as utils_anchors
def compute_targets(
batch_annotations,
image_shape,
anchors,
num_classes,
regression_mean=0.0,
regression_std=0.2
):
""" Function to compute the classification and regression targets given a set of annotations and anchors
Args
batch_annotations : List of annotations where each annotation is a (num_detection, 5) shaped torch.Tensor
anchors : torch.Tensor containing all anchors generated on the image_batch
should be (batch_size, num_anchors, 4) shaped
num_classes : Number of classes model classifies
regression_mean : The regression mean shift for (x1, y1, x2, y2)
regression_std : The regression scale for (x1, y1, x2, y2)
Returns
"""
# Create blobs to store anchor informations
regression_batch = []
labels_batch = []
states_batch = []
for annotations, anchor in zip(batch_annotations, anchors):
labels, annotations, anchor_states = utils_anchors.anchor_targets_bbox(
anchor,
annotations,
num_classes=num_classes,
mask_shape=image_shape
)
regression = utils_anchors.bbox_transform(
anchor,
annotations,
mean=regression_mean,
std=regression_std
)
regression_batch.append(regression)
labels_batch.append(labels)
states_batch.append(anchor_states)
regression_batch = torch.stack(regression_batch, dim=0)
labels_batch = torch.stack(labels_batch , dim=0)
states_batch = torch.stack(states_batch , dim=0)
return regression_batch, labels_batch, states_batch
def _init_zero(t):
torch.nn.init.constant_(t, 0.0)
def _init_uniform(t):
torch.nn.init.normal_(t, 0.0, 0.01)
def _make_dynamic_block(
block_type='fc',
num_layers=4,
input_size=256,
internal_size=256,
growth_rate=64
):
""" Creates a 2d conv block to extract features based on block type
Args
block_type : Defines the type of conv block this this option from ['fc', 'dense']
num_layers : Number of layers in this dense block
input_size : Input channel size
internal_size : Model internal channel size (only used if block type is 'fc')
growth_rate : Model channel growth rate (only used if block type is 'dense')
Returns
block : The conv block which takes a [N, C0, H, W] format tensor as an input
Outputs [N, C1, H, W] shaped tensor with C1 = output_size
output_size : The number of channels block will output
"""
if block_type == 'fc':
# The default block according to the https://arxiv.org/abs/1708.02002 paper
# Uses bias in the fully connected layers
block = ConvBlock2d(
input_feature_size=input_size,
output_feature_size=internal_size,
internal_feature_size=internal_size,
num_layers=num_layers,
batch_norm=False,
dropout=None,
bias=True,
bias_initializer=_init_zero,
weight_initializer=_init_uniform
)
elif block_type == 'dense':
# Dense blocks from the densenet paper uses bias-less conv layers
block = DenseBlock2d(
input_feature_size=input_size,
num_layers=num_layers,
growth_rate=growth_rate,
batch_norm=False,
transition=False,
dropout=None,
bias=False,
weight_initializer=_init_uniform
)
else:
raise ValueError('block_type must be either fc or dense, cannot be {}'.format(block_type))
# Now to get the output channel size
dummy_input = torch.Tensor(1, input_size, 1, 1)
dummy_output = block(dummy_input)
output_size = dummy_output.shape[1]
return block, output_size
class FeaturePyramidSubmodel(torch.nn.Module):
def __init__(self, backbone_channel_sizes, feature_size=256):
super(FeaturePyramidSubmodel, self).__init__()
C3_size, C4_size, C5_size = backbone_channel_sizes[-3:]
self.relu = torch.nn.ReLU(inplace=False)
self.conv_C5_reduce = torch.nn.Conv2d(C5_size , feature_size, kernel_size=1, stride=1, padding=0)
self.conv_P5 = torch.nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
self.conv_C4_reduce = torch.nn.Conv2d(C4_size , feature_size, kernel_size=1, stride=1, padding=0)
self.conv_P4 = torch.nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
self.conv_C3_reduce = torch.nn.Conv2d(C3_size , feature_size, kernel_size=1, stride=1, padding=0)
self.conv_P3 = torch.nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
self.conv_P6 = torch.nn.Conv2d(C5_size , feature_size, kernel_size=3, stride=2, padding=1)
self.conv_P7 = torch.nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=2, padding=1)
def forward(self, C3, C4, C5):
# upsample C5 to get P5 from the FPN paper
P5 = self.conv_C5_reduce(C5)
if torch.__version__ == '0.4.1':
P5_upsampled = torch.nn.functional.interpolate(P5, size=C4.shape[-2:], mode='bilinear', align_corners=False)
else:
P5_upsampled = torch.nn.functional.upsample(P5, size=C4.shape[-2:], mode='bilinear', align_corners=False)
P5 = self.conv_P5(P5)
# add P5 elementwise to C4
P4 = self.conv_C4_reduce(C4)
P4 = P5_upsampled + P4
if torch.__version__ == '0.4.1':
P4_upsampled = torch.nn.functional.interpolate(P4, size=C3.shape[-2:], mode='bilinear', align_corners=False)
else:
P4_upsampled = torch.nn.functional.upsample(P4, size=C3.shape[-2:], mode='bilinear', align_corners=False)
P4 = self.conv_P4(P4)
# add P4 elementwise to C3
P3 = self.conv_C3_reduce(C3)
P3 = P4_upsampled + P3
P3 = self.conv_P3(P3)
# "P6 is obtained via a 3x3 stride-2 conv on C5"
P6 = self.conv_P6(C5)
# "P7 is computed by applying ReLU followed by a 3x3 stride-2 conv on P6"
P7 = self.relu(P6)
P7 = self.conv_P7(P7)
return P3, P4, P5, P6, P7
class DynamicRegressionModel(torch.nn.Module):
def __init__(
self,
num_anchors,
pyramid_feature_size=256,
regression_feature_size=256,
growth_rate=64,
num_layers=4,
block_type='fc'
):
super(DynamicRegressionModel, self).__init__()
# Make all layers
self.block, block_output_size = _make_dynamic_block(
block_type=block_type,
num_layers=num_layers,
input_size=pyramid_feature_size,
internal_size=regression_feature_size,
growth_rate=growth_rate
)
self.relu = torch.nn.ReLU(inplace=False)
self.conv_final = torch.nn.Conv2d(
block_output_size,
num_anchors * 4,
kernel_size=3,
stride=1,
padding=1,
bias=False
)
# Initialize regression output to be small
_init_uniform(self.conv_final.weight)
def forward(self, x):
x = self.block(x)
x = self.conv_final(x)
return x.permute(0, 2, 3, 1).reshape(x.shape[0], -1, 4)
class DynamicClassificationModel(torch.nn.Module):
def __init__(
self,
num_classes,
num_anchors,
pyramid_feature_size=256,
classification_feature_size=256,
growth_rate=64,
num_layers=4,
block_type='fc',
prior_probability=0.01
):
super(DynamicClassificationModel, self).__init__()
self.num_classes = num_classes
# Make all layers
self.block, block_output_size = _make_dynamic_block(
block_type=block_type,
num_layers=num_layers,
input_size=pyramid_feature_size,
internal_size=classification_feature_size,
growth_rate=growth_rate
)
self.sigmoid = torch.nn.Sigmoid()
self.conv_final = torch.nn.Conv2d(
block_output_size,
num_anchors * num_classes,
kernel_size=3,
stride=1,
padding=1
)
# Initialize classification output to 0.01
# kernel ~ 0.0
# bias ~ -log((1 - 0.01) / 0.01) So that output is 0.01 after sigmoid
kernel = self.conv_final.weight
bias = self.conv_final.bias
kernel.data.fill_(0.0)
bias.data.fill_(-math.log((1 - 0.01) / 0.01))
def forward(self, x):
x = self.block(x)
x = self.conv_final(x)
x = self.sigmoid(x)
return x.permute(0, 2, 3, 1).reshape(x.shape[0], -1, self.num_classes)
class ComputeAnchors(torch.nn.Module):
def __init__(self, sizes, strides, ratios, scales):
super(ComputeAnchors, self).__init__()
assert len(sizes) == 5
assert len(strides) == 5
self.levels = [3, 4, 5, 6, 7]
for level, size, stride in zip(self.levels, sizes, strides):
setattr(self, 'anchor_P{}'.format(level), Anchors(
size=size,
stride=stride,
ratios=ratios,
scales=scales
))
def forward(self, batch_size, feature_shapes):
all_anchors = []
for level, feature_shape in zip(self.levels, feature_shapes):
anchors = getattr(self, 'anchor_P{}'.format(level))(batch_size, feature_shape[-2:])
all_anchors.append(anchors)
return torch.cat(all_anchors, dim=1)
class RetinaNetLoss(torch.nn.Module):
def __init__(
self,
num_classes,
focal_alpha=0.25,
focal_gamma=2.0,
huber_sigma=3.0,
regression_mean=0.0,
regression_std=0.2
):
super(RetinaNetLoss, self).__init__()
self.num_classes = num_classes
self.regression_mean = regression_mean
self.regression_std = regression_std
self.focal_loss_fn = DetectionFocalLoss(alpha=focal_alpha, gamma=focal_gamma)
self.huber_loss_fn = DetectionSmoothL1Loss(sigma=huber_sigma)
def forward(self, output_regression, output_classification, batch_annotations, image_shape, anchors):
# Compute targets
target_regression, target_classification, anchor_states = compute_targets(
batch_annotations, image_shape, anchors,
num_classes=self.num_classes,
regression_mean=self.regression_mean,
regression_std=self.regression_std
)
# Calculate losses
classification_loss = self.focal_loss_fn(
output_classification,
target_classification,
anchor_states
)
regression_loss = self.huber_loss_fn(
output_regression,
target_regression,
anchor_states
)
# Return None if all anchors are too be ignored
# Provides an easy way to skip back prop
if classification_loss is None:
return None
if regression_loss is None:
# TODO: Identify which is the better way to train model
# Regression loss defaults to 0 in the event that there are no positive anchors
# Basically ensures that backprob happens only for negative classification
# regression_loss = 0.0
# Return None if no positive anchors
# Regression loss tends to be inflated when there are no positive anchors
# Due to large number of negative anchors already, negative mining seems
# rather overkill
return None
return classification_loss + regression_loss
| 34.922636 | 120 | 0.624138 |
484a7d4efb683b31095d1a0e61a514ae1279ff11 | 575 | py | Python | server/apps/utils/rest/exceptions.py | iotile/iotile_cloud | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | [
"MIT"
] | null | null | null | server/apps/utils/rest/exceptions.py | iotile/iotile_cloud | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | [
"MIT"
] | null | null | null | server/apps/utils/rest/exceptions.py | iotile/iotile_cloud | 9dc65ac86d3a730bba42108ed7d9bbb963d22ba6 | [
"MIT"
] | null | null | null | from rest_framework.exceptions import APIException
class ApiIllegalFilterOrTargetException(APIException):
status_code = 400
default_detail = 'Bad Request: Illegal filter/target string'
default_code = 'bad_request'
class ApiIllegalSlugException(APIException):
status_code = 400
default_detail = 'Bad Request: Illegal Project, Device or Stream slug'
default_code = 'bad_request'
class ApiIllegalPkException(APIException):
status_code = 400
default_detail = 'Bad Request: Expected an integer database ID'
default_code = 'bad_request'
| 26.136364 | 74 | 0.766957 |
20273378ee7c76fd73d74298d8ec3936faf2c7de | 6,735 | py | Python | tools/ci/python_packages/ttfw_idf/CIScanTests.py | git4Chub/esp-idf | 39cbf2f7ed4810913031055065da08227020e8b4 | [
"Apache-2.0"
] | 1 | 2021-06-16T04:18:02.000Z | 2021-06-16T04:18:02.000Z | tools/ci/python_packages/ttfw_idf/CIScanTests.py | git4Chub/esp-idf | 39cbf2f7ed4810913031055065da08227020e8b4 | [
"Apache-2.0"
] | null | null | null | tools/ci/python_packages/ttfw_idf/CIScanTests.py | git4Chub/esp-idf | 39cbf2f7ed4810913031055065da08227020e8b4 | [
"Apache-2.0"
] | null | null | null | import argparse
import errno
import json
import logging
import os
from collections import defaultdict
from copy import deepcopy
from find_apps import find_apps
from find_build_apps import BUILD_SYSTEMS, BUILD_SYSTEM_CMAKE
from ttfw_idf.IDFAssignTest import ExampleAssignTest, TestAppsAssignTest
from idf_py_actions.constants import SUPPORTED_TARGETS, PREVIEW_TARGETS
TEST_LABELS = {
'example_test': 'BOT_LABEL_EXAMPLE_TEST',
'test_apps': 'BOT_LABEL_CUSTOM_TEST',
'component_ut': ['BOT_LABEL_UNIT_TEST',
'BOT_LABEL_UNIT_TEST_32',
'BOT_LABEL_UNIT_TEST_S2'],
}
BUILD_ALL_LABELS = [
'BOT_LABEL_BUILD',
'BOT_LABEL_BUILD_ALL_APPS',
'BOT_LABEL_REGULAR_TEST',
'BOT_LABEL_WEEKEND_TEST',
]
def _has_build_all_label():
for label in BUILD_ALL_LABELS:
if os.getenv(label):
return True
return False
def _judge_build_or_not(action, build_all): # type: (str, bool) -> (bool, bool)
"""
:return: (build_or_not_for_test_related_apps, build_or_not_for_non_related_apps)
"""
if build_all or _has_build_all_label() or (not os.getenv('BOT_TRIGGER_WITH_LABEL')):
logging.info('Build all apps')
return True, True
labels = TEST_LABELS[action]
if not isinstance(labels, list):
labels = [labels]
for label in labels:
if os.getenv(label):
logging.info('Build only test cases apps')
return True, False
logging.info('Skip all')
return False, False
def output_json(apps_dict_list, target, build_system, output_dir):
output_path = os.path.join(output_dir, 'scan_{}_{}.json'.format(target.lower(), build_system))
with open(output_path, 'w') as fw:
fw.writelines([json.dumps(app) + '\n' for app in apps_dict_list])
def main():
parser = argparse.ArgumentParser(description='Scan the required build tests')
parser.add_argument('test_type',
choices=TEST_LABELS.keys(),
help='Scan test type')
parser.add_argument('paths', nargs='+',
help='One or more app paths')
parser.add_argument('-b', '--build-system',
choices=BUILD_SYSTEMS.keys(),
default=BUILD_SYSTEM_CMAKE)
parser.add_argument('-c', '--ci-config-file',
required=True,
help="gitlab ci config target-test file")
parser.add_argument('-o', '--output-path',
required=True,
help="output path of the scan result")
parser.add_argument("--exclude", nargs="*",
help='Ignore specified directory. Can be used multiple times.')
parser.add_argument('--preserve', action="store_true",
help='add this flag to preserve artifacts for all apps')
parser.add_argument('--build-all', action="store_true",
help='add this flag to build all apps')
args = parser.parse_args()
build_test_case_apps, build_standalone_apps = _judge_build_or_not(args.test_type, args.build_all)
if not os.path.exists(args.output_path):
try:
os.makedirs(args.output_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
SUPPORTED_TARGETS.extend(PREVIEW_TARGETS)
if (not build_standalone_apps) and (not build_test_case_apps):
for target in SUPPORTED_TARGETS:
output_json([], target, args.build_system, args.output_path)
SystemExit(0)
paths = set([os.path.join(os.getenv('IDF_PATH'), path) if not os.path.isabs(path) else path for path in args.paths])
test_cases = []
for path in paths:
if args.test_type == 'example_test':
assign = ExampleAssignTest(path, args.ci_config_file)
elif args.test_type in ['test_apps', 'component_ut']:
assign = TestAppsAssignTest(path, args.ci_config_file)
else:
raise SystemExit(1) # which is impossible
test_cases.extend(assign.search_cases())
'''
{
<target>: {
'test_case_apps': [<app_dir>], # which is used in target tests
'standalone_apps': [<app_dir>], # which is not
},
...
}
'''
scan_info_dict = defaultdict(dict)
# store the test cases dir, exclude these folders when scan for standalone apps
default_exclude = args.exclude if args.exclude else []
exclude_apps = deepcopy(default_exclude)
build_system = args.build_system.lower()
build_system_class = BUILD_SYSTEMS[build_system]
if build_test_case_apps:
for target in SUPPORTED_TARGETS:
target_dict = scan_info_dict[target]
test_case_apps = target_dict['test_case_apps'] = set()
for case in test_cases:
app_dir = case.case_info['app_dir']
app_target = case.case_info['target']
if app_target.lower() != target.lower():
continue
test_case_apps.update(find_apps(build_system_class, app_dir, True, default_exclude, target.lower()))
exclude_apps.append(app_dir)
else:
for target in SUPPORTED_TARGETS:
scan_info_dict[target]['test_case_apps'] = set()
if build_standalone_apps:
for target in SUPPORTED_TARGETS:
target_dict = scan_info_dict[target]
standalone_apps = target_dict['standalone_apps'] = set()
for path in paths:
standalone_apps.update(find_apps(build_system_class, path, True, exclude_apps, target.lower()))
else:
for target in SUPPORTED_TARGETS:
scan_info_dict[target]['standalone_apps'] = set()
test_case_apps_preserve_default = True if build_system == 'cmake' else False
for target in SUPPORTED_TARGETS:
apps = []
for app_dir in scan_info_dict[target]['test_case_apps']:
apps.append({
'app_dir': app_dir,
'build_system': args.build_system,
'target': target,
'preserve': args.preserve or test_case_apps_preserve_default
})
for app_dir in scan_info_dict[target]['standalone_apps']:
apps.append({
'app_dir': app_dir,
'build_system': args.build_system,
'target': target,
'preserve': args.preserve
})
output_path = os.path.join(args.output_path, 'scan_{}_{}.json'.format(target.lower(), build_system))
with open(output_path, 'w') as fw:
fw.writelines([json.dumps(app) + '\n' for app in apps])
if __name__ == '__main__':
main()
| 37.005495 | 120 | 0.624796 |
67a13907a3594cc419cc74fcfbb781679f3ddf9b | 7,980 | py | Python | instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py | toumorokoshi/opentelemetry-python-contrib | 7159372e3b381119715c99a37603b3d2d6b9ea46 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py | toumorokoshi/opentelemetry-python-contrib | 7159372e3b381119715c99a37603b3d2d6b9ea46 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | instrumentation/opentelemetry-instrumentation-flask/src/opentelemetry/instrumentation/flask/__init__.py | toumorokoshi/opentelemetry-python-contrib | 7159372e3b381119715c99a37603b3d2d6b9ea46 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note: This package is not named "flask" because of
# https://github.com/PyCQA/pylint/issues/2648
"""
This library builds on the OpenTelemetry WSGI middleware to track web requests
in Flask applications. In addition to opentelemetry-util-http, it
supports Flask-specific features such as:
* The Flask url rule pattern is used as the Span name.
* The ``http.route`` Span attribute is set so that one can see which URL rule
matched a request.
Usage
-----
.. code-block:: python
from flask import Flask
from opentelemetry.instrumentation.flask import FlaskInstrumentor
app = Flask(__name__)
FlaskInstrumentor().instrument_app(app)
@app.route("/")
def hello():
return "Hello!"
if __name__ == "__main__":
app.run(debug=True)
API
---
"""
from logging import getLogger
import flask
import opentelemetry.instrumentation.wsgi as otel_wsgi
from opentelemetry import context, trace
from opentelemetry.instrumentation.flask.version import __version__
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
from opentelemetry.propagate import extract
from opentelemetry.util.http import get_excluded_urls
from opentelemetry.util.providers import time_ns
_logger = getLogger(__name__)
_ENVIRON_STARTTIME_KEY = "opentelemetry-flask.starttime_key"
_ENVIRON_SPAN_KEY = "opentelemetry-flask.span_key"
_ENVIRON_ACTIVATION_KEY = "opentelemetry-flask.activation_key"
_ENVIRON_TOKEN = "opentelemetry-flask.token"
_excluded_urls = get_excluded_urls("FLASK")
def get_default_span_name():
span_name = ""
try:
span_name = flask.request.url_rule.rule
except AttributeError:
span_name = otel_wsgi.get_default_span_name(flask.request.environ)
return span_name
def _rewrapped_app(wsgi_app):
def _wrapped_app(wrapped_app_environ, start_response):
# We want to measure the time for route matching, etc.
# In theory, we could start the span here and use
# update_name later but that API is "highly discouraged" so
# we better avoid it.
wrapped_app_environ[_ENVIRON_STARTTIME_KEY] = time_ns()
def _start_response(status, response_headers, *args, **kwargs):
if not _excluded_urls.url_disabled(flask.request.url):
span = flask.request.environ.get(_ENVIRON_SPAN_KEY)
if span:
otel_wsgi.add_response_attributes(
span, status, response_headers
)
else:
_logger.warning(
"Flask environ's OpenTelemetry span "
"missing at _start_response(%s)",
status,
)
return start_response(status, response_headers, *args, **kwargs)
return wsgi_app(wrapped_app_environ, _start_response)
return _wrapped_app
def _wrapped_before_request(name_callback):
def _before_request():
if _excluded_urls.url_disabled(flask.request.url):
return
flask_request_environ = flask.request.environ
span_name = name_callback()
token = context.attach(
extract(otel_wsgi.carrier_getter, flask_request_environ)
)
tracer = trace.get_tracer(__name__, __version__)
span = tracer.start_span(
span_name,
kind=trace.SpanKind.SERVER,
start_time=flask_request_environ.get(_ENVIRON_STARTTIME_KEY),
)
if span.is_recording():
attributes = otel_wsgi.collect_request_attributes(
flask_request_environ
)
if flask.request.url_rule:
# For 404 that result from no route found, etc, we
# don't have a url_rule.
attributes["http.route"] = flask.request.url_rule.rule
for key, value in attributes.items():
span.set_attribute(key, value)
activation = tracer.use_span(span, end_on_exit=True)
activation.__enter__()
flask_request_environ[_ENVIRON_ACTIVATION_KEY] = activation
flask_request_environ[_ENVIRON_SPAN_KEY] = span
flask_request_environ[_ENVIRON_TOKEN] = token
return _before_request
def _teardown_request(exc):
if _excluded_urls.url_disabled(flask.request.url):
return
activation = flask.request.environ.get(_ENVIRON_ACTIVATION_KEY)
if not activation:
# This request didn't start a span, maybe because it was created in a
# way that doesn't run `before_request`, like when it is created with
# `app.test_request_context`.
return
if exc is None:
activation.__exit__(None, None, None)
else:
activation.__exit__(
type(exc), exc, getattr(exc, "__traceback__", None)
)
context.detach(flask.request.environ.get(_ENVIRON_TOKEN))
class _InstrumentedFlask(flask.Flask):
name_callback = get_default_span_name
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._original_wsgi_ = self.wsgi_app
self.wsgi_app = _rewrapped_app(self.wsgi_app)
_before_request = _wrapped_before_request(
_InstrumentedFlask.name_callback
)
self._before_request = _before_request
self.before_request(_before_request)
self.teardown_request(_teardown_request)
class FlaskInstrumentor(BaseInstrumentor):
# pylint: disable=protected-access,attribute-defined-outside-init
"""An instrumentor for flask.Flask
See `BaseInstrumentor`
"""
def _instrument(self, **kwargs):
self._original_flask = flask.Flask
name_callback = kwargs.get("name_callback")
if callable(name_callback):
_InstrumentedFlask.name_callback = name_callback
flask.Flask = _InstrumentedFlask
def instrument_app(
self, app, name_callback=get_default_span_name
): # pylint: disable=no-self-use
if not hasattr(app, "_is_instrumented"):
app._is_instrumented = False
if not app._is_instrumented:
app._original_wsgi_app = app.wsgi_app
app.wsgi_app = _rewrapped_app(app.wsgi_app)
_before_request = _wrapped_before_request(name_callback)
app._before_request = _before_request
app.before_request(_before_request)
app.teardown_request(_teardown_request)
app._is_instrumented = True
else:
_logger.warning(
"Attempting to instrument Flask app while already instrumented"
)
def _uninstrument(self, **kwargs):
flask.Flask = self._original_flask
def uninstrument_app(self, app): # pylint: disable=no-self-use
if not hasattr(app, "_is_instrumented"):
app._is_instrumented = False
if app._is_instrumented:
app.wsgi_app = app._original_wsgi_app
# FIXME add support for other Flask blueprints that are not None
app.before_request_funcs[None].remove(app._before_request)
app.teardown_request_funcs[None].remove(_teardown_request)
del app._original_wsgi_app
app._is_instrumented = False
else:
_logger.warning(
"Attempting to uninstrument Flask "
"app while already uninstrumented"
)
| 32.839506 | 79 | 0.674436 |
a442c40c058cc7b8d4761158650b82804d3cf1ec | 1,719 | py | Python | desist/eventhandler/eventhandler.py | UvaCsl/desist | 932616447329682f2276d1d4619058fd18b73807 | [
"BSD-3-Clause"
] | null | null | null | desist/eventhandler/eventhandler.py | UvaCsl/desist | 932616447329682f2276d1d4619058fd18b73807 | [
"BSD-3-Clause"
] | null | null | null | desist/eventhandler/eventhandler.py | UvaCsl/desist | 932616447329682f2276d1d4619058fd18b73807 | [
"BSD-3-Clause"
] | null | null | null | """Command-line utility for the eventhandling.
This provides a basic ``click``-based command-line utility that uses a
concrete API of the abstract ``desist.eventhander.api.API`` implementation.
The utility attaches the ``event``, ``example``, and ``test`` commands.
"""
import click
from .api import API
@click.command()
@click.pass_context
def event(ctx):
"""Invokes the ``API.event`` call to dispatch the event evaluation."""
ctx.obj.event()
@click.command()
@click.pass_context
def example(ctx):
"""Invokes the ``API.example`` call to dispatch the example evaluation."""
ctx.obj.example()
@click.command()
@click.pass_context
def test(ctx):
"""Invokes the ``API.test`` call to dispatch the test evaluation."""
ctx.obj.test()
def event_handler(api_class=API, **kwargs):
"""Initialise the event handler API.
Initialises the click-based command-line utility where the API is defined
by the passed class or function. The argument supports any function that
accepts a patient path and event id, and returns an initialised API
instance.
Any ``**kwargs`` are forwarded to the initialisation of the API.
This results in commands formatted as, with ``$id`` the desired event ID
and ``$cmd`` either ``event``, ``example``, or ``test``.
>>> python3 API.py /patient/patient.yml $id $cmd
"""
@click.group()
@click.argument('patient', type=click.Path(exists=True))
@click.argument('event', type=int)
@click.pass_context
def cli(ctx, patient, event):
ctx.obj = api_class(patient, event, **kwargs)
# attach the default commands
for command in [event, example, test]:
cli.add_command(command)
return cli
| 28.180328 | 78 | 0.686446 |
79a277910c3f61ce22c0a990b22b93f0b6596264 | 4,240 | py | Python | dort-core/protocols/full_node_protocol.py | Dortchain/dort-blockchian | 14f16e321a60f9d70f849f58e4e9964fa337a084 | [
"Apache-2.0"
] | 1 | 2021-09-05T18:21:09.000Z | 2021-09-05T18:21:09.000Z | dort-core/protocols/full_node_protocol.py | Dortchain/dort-blockchian | 14f16e321a60f9d70f849f58e4e9964fa337a084 | [
"Apache-2.0"
] | 1 | 2021-07-11T03:04:25.000Z | 2021-07-11T03:04:25.000Z | dort-core/protocols/full_node_protocol.py | Dortchain/dort-blockchian | 14f16e321a60f9d70f849f58e4e9964fa337a084 | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from typing import List, Optional
from Dort.types.blockchain_format.sized_bytes import bytes32
from Dort.types.blockchain_format.vdf import VDFInfo, VDFProof
from Dort.types.end_of_slot_bundle import EndOfSubSlotBundle
from Dort.types.full_block import FullBlock
from Dort.types.peer_info import TimestampedPeerInfo
from Dort.types.spend_bundle import SpendBundle
from Dort.types.unfinished_block import UnfinishedBlock
from Dort.types.weight_proof import WeightProof
from Dort.util.ints import uint8, uint32, uint64, uint128
from Dort.util.streamable import Streamable, streamable
"""
Protocol between full nodes.
Note: When changing this file, also change protocol_message_types.py, and the protocol version in shared_protocol.py
"""
@dataclass(frozen=True)
@streamable
class NewPeak(Streamable):
header_hash: bytes32
height: uint32
weight: uint128
fork_point_with_previous_peak: uint32
unfinished_reward_block_hash: bytes32
@dataclass(frozen=True)
@streamable
class NewTransaction(Streamable):
transaction_id: bytes32
cost: uint64
fees: uint64
@dataclass(frozen=True)
@streamable
class RequestTransaction(Streamable):
transaction_id: bytes32
@dataclass(frozen=True)
@streamable
class RespondTransaction(Streamable):
transaction: SpendBundle
@dataclass(frozen=True)
@streamable
class RequestProofOfWeight(Streamable):
total_number_of_blocks: uint32
tip: bytes32
@dataclass(frozen=True)
@streamable
class RespondProofOfWeight(Streamable):
wp: WeightProof
tip: bytes32
@dataclass(frozen=True)
@streamable
class RequestBlock(Streamable):
height: uint32
include_transaction_block: bool
@dataclass(frozen=True)
@streamable
class RejectBlock(Streamable):
height: uint32
@dataclass(frozen=True)
@streamable
class RequestBlocks(Streamable):
start_height: uint32
end_height: uint32
include_transaction_block: bool
@dataclass(frozen=True)
@streamable
class RespondBlocks(Streamable):
start_height: uint32
end_height: uint32
blocks: List[FullBlock]
@dataclass(frozen=True)
@streamable
class RejectBlocks(Streamable):
start_height: uint32
end_height: uint32
@dataclass(frozen=True)
@streamable
class RespondBlock(Streamable):
block: FullBlock
@dataclass(frozen=True)
@streamable
class NewUnfinishedBlock(Streamable):
unfinished_reward_hash: bytes32
@dataclass(frozen=True)
@streamable
class RequestUnfinishedBlock(Streamable):
unfinished_reward_hash: bytes32
@dataclass(frozen=True)
@streamable
class RespondUnfinishedBlock(Streamable):
unfinished_block: UnfinishedBlock
@dataclass(frozen=True)
@streamable
class NewSignagePointOrEndOfSubSlot(Streamable):
prev_challenge_hash: Optional[bytes32]
challenge_hash: bytes32
index_from_challenge: uint8
last_rc_infusion: bytes32
@dataclass(frozen=True)
@streamable
class RequestSignagePointOrEndOfSubSlot(Streamable):
challenge_hash: bytes32
index_from_challenge: uint8
last_rc_infusion: bytes32
@dataclass(frozen=True)
@streamable
class RespondSignagePoint(Streamable):
index_from_challenge: uint8
challenge_chain_vdf: VDFInfo
challenge_chain_proof: VDFProof
reward_chain_vdf: VDFInfo
reward_chain_proof: VDFProof
@dataclass(frozen=True)
@streamable
class RespondEndOfSubSlot(Streamable):
end_of_slot_bundle: EndOfSubSlotBundle
@dataclass(frozen=True)
@streamable
class RequestMempoolTransactions(Streamable):
filter: bytes
@dataclass(frozen=True)
@streamable
class NewCompactVDF(Streamable):
height: uint32
header_hash: bytes32
field_vdf: uint8
vdf_info: VDFInfo
@dataclass(frozen=True)
@streamable
class RequestCompactVDF(Streamable):
height: uint32
header_hash: bytes32
field_vdf: uint8
vdf_info: VDFInfo
@dataclass(frozen=True)
@streamable
class RespondCompactVDF(Streamable):
height: uint32
header_hash: bytes32
field_vdf: uint8
vdf_info: VDFInfo
vdf_proof: VDFProof
@dataclass(frozen=True)
@streamable
class RequestPeers(Streamable):
"""
Return full list of peers
"""
@dataclass(frozen=True)
@streamable
class RespondPeers(Streamable):
peer_list: List[TimestampedPeerInfo]
| 20.784314 | 116 | 0.788679 |
dd6a637c6721e52ba37a0ab12d09289531306a0c | 183 | py | Python | api_server/__init__.py | CjwRiver/apiAutoTest | 35f1c2475e76dd34089e2cee33b351a1ca97c168 | [
"MIT"
] | null | null | null | api_server/__init__.py | CjwRiver/apiAutoTest | 35f1c2475e76dd34089e2cee33b351a1ca97c168 | [
"MIT"
] | null | null | null | api_server/__init__.py | CjwRiver/apiAutoTest | 35f1c2475e76dd34089e2cee33b351a1ca97c168 | [
"MIT"
] | null | null | null | #!/usr/bin/env/python3
# -*- coding:utf-8 -*-
"""
@project: apiAutoTest
@author: cjw
@file: __init__.py
@ide: PyCharm
@time: 2020/11/20
@desc: 提供一些测试用的api接口,接口代码 请前往FastAPI官网查看学习
"""
| 16.636364 | 42 | 0.688525 |
a3379b1d283c8baac90cab57284b3f9ade4b5cc3 | 1,688 | py | Python | tensorflow_io/audio/python/ops/audio_ops.py | HubBucket-Team/io | de05464e53672389119a6215fea9ceacf7f77203 | [
"Apache-2.0"
] | 1 | 2019-10-10T06:11:23.000Z | 2019-10-10T06:11:23.000Z | tensorflow_io/audio/python/ops/audio_ops.py | VonRosenchild/io | de05464e53672389119a6215fea9ceacf7f77203 | [
"Apache-2.0"
] | null | null | null | tensorflow_io/audio/python/ops/audio_ops.py | VonRosenchild/io | de05464e53672389119a6215fea9ceacf7f77203 | [
"Apache-2.0"
] | 1 | 2019-10-10T06:11:24.000Z | 2019-10-10T06:11:24.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Audio Dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
import tensorflow as tf
from tensorflow_io.core.python.ops import data_ops
warnings.warn(
"The tensorflow_io.audio.WAVDataset is "
"deprecated. Please look for tfio.IOTensor.from_audio "
"for reading WAV files into tensorflow.",
DeprecationWarning)
class AudioDataset(data_ops.Dataset):
"""A Audio File Dataset that reads the audio file."""
def __init__(self, filename, batch=None):
"""Create a `AudioDataset`.
Args:
filename: A `tf.string` tensor containing one or more filenames.
"""
batch = 0 if batch is None else batch
dtypes = [tf.int16]
shapes = [
tf.TensorShape([None])] if batch == 0 else [
tf.TensorShape([None, None])]
super(AudioDataset, self).__init__(
ffmpeg_ops.audio_dataset,
ffmpeg_ops.audio_input(filename),
batch, dtypes, shapes)
| 34.44898 | 80 | 0.689573 |
c6872aca2ade777b820febc8633a3fb1da68f492 | 771 | py | Python | main3.py | alexandreib/Stock-Trading-Environment | 229a38812fe8d3527fff2b21bb2af3866c316d42 | [
"MIT"
] | null | null | null | main3.py | alexandreib/Stock-Trading-Environment | 229a38812fe8d3527fff2b21bb2af3866c316d42 | [
"MIT"
] | null | null | null | main3.py | alexandreib/Stock-Trading-Environment | 229a38812fe8d3527fff2b21bb2af3866c316d42 | [
"MIT"
] | null | null | null | import gym
import gym_anytrading
from gym_anytrading.envs import TradingEnv, ForexEnv, StocksEnv, Actions, Positions
from gym_anytrading.datasets import FOREX_EURUSD_1H_ASK, STOCKS_GOOGL
import matplotlib.pyplot as plt
from stable_baselines3.common.vec_env import DummyVecEnv
from stable_baselines3 import PPO
env = gym.make('forex-v0', frame_bound=(50, 100), window_size=10)
model = PPO("MlpPolicy", env, verbose=1)
model.learn(total_timesteps=20000)
observation = env.reset()
while True:
# action = env.action_space.sample() # take ramdom action
action, _states = model.predict(observation)
observation, reward, done, info = env.step(action)
env.render()
if done:
print("info:", info)
break
plt.cla()
env.render_all()
plt.show() | 28.555556 | 83 | 0.750973 |
7ee0ca54c9a950547f152af02751f743b6cb7ee8 | 2,228 | py | Python | scripts/rnnlm/get_special_symbol_opts.py | LanceaKing/kaldi | eb205a83f08fb8056ba1deb03c505ec8b722d4d9 | [
"Apache-2.0"
] | 116 | 2016-10-25T06:04:49.000Z | 2022-03-13T02:30:52.000Z | scripts/rnnlm/get_special_symbol_opts.py | LanceaKing/kaldi | eb205a83f08fb8056ba1deb03c505ec8b722d4d9 | [
"Apache-2.0"
] | 8 | 2017-09-06T00:12:00.000Z | 2019-03-22T08:03:19.000Z | scripts/rnnlm/get_special_symbol_opts.py | LanceaKing/kaldi | eb205a83f08fb8056ba1deb03c505ec8b722d4d9 | [
"Apache-2.0"
] | 52 | 2016-04-21T13:38:21.000Z | 2022-02-16T08:33:13.000Z | #!/usr/bin/env python3
# Copyright 2017 Jian Wang
# License: Apache 2.0.
import io
import os
import argparse
import sys
import re
parser = argparse.ArgumentParser(description="This script checks whether the special symbols "
"appear in words.txt with expected values, if not, it will "
"print out the options with correct value to stdout, which may look like "
"'--bos-symbol=14312 --eos-symbol=14313 --brk-symbol=14320'.",
epilog="E.g. " + sys.argv[0] + " < exp/rnnlm/config/words.txt > exp/rnnlm/special_symbol_opts.txt",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
args = parser.parse_args()
# this dict stores the special_symbols and their corresponding (expected_id, option_name)
special_symbols = {'<s>': (1, '--bos-symbol'),
'</s>': (2, '--eos-symbol'),
'<brk>': (3, '--brk-symbol')}
upper_special_symbols = [key.upper() for key in special_symbols]
lower_ids = {}
upper_ids = {}
input_stream = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
for line in input_stream:
fields = line.split()
assert(len(fields) == 2)
sym = fields[0]
if sym in special_symbols:
assert sym not in lower_ids
lower_ids[sym] = int(fields[1])
elif sym in upper_special_symbols:
assert sym.lower() not in upper_ids
upper_ids[sym.lower()] = int(fields[1])
printed = False
for sym in special_symbols:
if sym in lower_ids:
if special_symbols[sym][0] != lower_ids[sym]:
print('{0}={1} '.format(special_symbols[sym][1], lower_ids[sym]), end='')
printed = True
if sym in upper_ids:
print(sys.argv[0] + ": both uppercase and lowercase are present for " + sym,
file=sys.stderr)
elif sym in upper_ids:
if special_symbols[sym][0] != upper_ids[sym]:
print('{0}={1} '.format(special_symbols[sym][1], upper_ids[sym]), end='')
printed = True
else:
raise ValueError("Special symbol is not appeared: " + sym)
sys.exit(1)
if printed:
print('')
| 36.52459 | 132 | 0.596499 |
5c95da400786a046bc44ec320932bc5e58b85a06 | 509 | py | Python | scripts/droptables.py | imranariffin/liveswot-api | a2acc05fd2c51adc30e8e1785b857a94af81677d | [
"MIT"
] | null | null | null | scripts/droptables.py | imranariffin/liveswot-api | a2acc05fd2c51adc30e8e1785b857a94af81677d | [
"MIT"
] | 25 | 2018-03-25T05:25:22.000Z | 2021-06-10T19:51:12.000Z | scripts/droptables.py | imranariffin/liveswot-api | a2acc05fd2c51adc30e8e1785b857a94af81677d | [
"MIT"
] | 2 | 2018-07-02T02:59:24.000Z | 2018-08-21T02:58:21.000Z | from authenticationjwt.models import User
from swot.models import Swot
from swot_item.models import SwotItem
from swot_item_vote.models import Vote
from swot_members.models import SwotMember, Invite
print 'deleting all rows in all tables ...'
Invite.objects.all().delete()
SwotMember.objects.all().delete()
Vote.objects.all().delete()
SwotItem.objects.all().delete()
Swot.objects.all().delete()
User.objects.all().delete()
print 'Done deleting rows in tables Invite, SwotMember, Vote, SwotItem, Swot, User'
| 31.8125 | 83 | 0.785855 |
85d3347d7e6ead270294123e513589d58aa32418 | 728 | py | Python | source/_static/code/optgrowth/solve_model.py | tuttugu-ryo/lecture-source-py | 9ce84044c2cc421775ea63a004556d7ae3b4e504 | [
"BSD-3-Clause"
] | null | null | null | source/_static/code/optgrowth/solve_model.py | tuttugu-ryo/lecture-source-py | 9ce84044c2cc421775ea63a004556d7ae3b4e504 | [
"BSD-3-Clause"
] | null | null | null | source/_static/code/optgrowth/solve_model.py | tuttugu-ryo/lecture-source-py | 9ce84044c2cc421775ea63a004556d7ae3b4e504 | [
"BSD-3-Clause"
] | null | null | null | def solve_model(og,
use_parallel=True,
tol=1e-4,
max_iter=1000,
verbose=True,
print_skip=25):
T, _ = operator_factory(og, parallel_flag=use_parallel)
# Set up loop
v = np.log(og.grid) # Initial condition
i = 0
error = tol + 1
while i < max_iter and error > tol:
v_new = T(v)
error = np.max(np.abs(v - v_new))
i += 1
if verbose and i % print_skip == 0:
print(f"Error at iteration {i} is {error}.")
v = v_new
if i == max_iter:
print("Failed to converge!")
if verbose and i < max_iter:
print(f"\nConverged in {i} iterations.")
return v_new
| 24.266667 | 59 | 0.513736 |
e90349e6313912f64dd6db492520cc98f26a0c2c | 1,790 | py | Python | tranny/notification/email.py | bobbintb/tranny | 9d46c56ff2d878f262e0a24e3dbe6b0926a805aa | [
"BSD-3-Clause"
] | null | null | null | tranny/notification/email.py | bobbintb/tranny | 9d46c56ff2d878f262e0a24e3dbe6b0926a805aa | [
"BSD-3-Clause"
] | null | null | null | tranny/notification/email.py | bobbintb/tranny | 9d46c56ff2d878f262e0a24e3dbe6b0926a805aa | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from contextlib import contextmanager
import smtplib
from email.mime.text import MIMEText
from tranny.app import config
from tranny import plugin
from tranny.events import EventHandler, EVENT_NOTIFICATION
_config_key = 'notification_email'
def send_event(event, payload=None):
if payload is None:
payload = dict()
def send_message(body_plain, subject, addr_from, addr_to):
msg = MIMEText(body_plain)
msg['Subject'] = subject
msg['From'] = addr_from
msg['To'] = addr_to
with smtp_client() as smtp:
smtp.sendmail(addr_from, [addr_to], msg.as_string())
return msg
@contextmanager
def smtp_client():
""" Create a configured SMTP instance ready to send
:return:
:rtype:
"""
srv = None
try:
use_ssl = config.get_default_boolean(_config_key, 'ssl', False)
username = config.get_default(_config_key, 'username', "")
password = config.get_default(_config_key, 'password', "")
args = {
'host': config.get_default(_config_key, 'host', 'localhost'),
'port': config.get_default(_config_key, 'port', 25, int)
}
if use_ssl:
srv = smtplib.SMTP_SSL(**args)
else:
srv = smtplib.SMTP(**args)
if config.get_default_boolean(_config_key, 'starttls', False):
srv.starttls()
if username and password:
srv.login(username, password)
yield srv
finally:
if srv and hasattr(srv, 'quit'):
srv.quit()
class NotificationEmail(plugin.BasePlugin):
def get_handlers(self):
return [
EventHandler(EVENT_NOTIFICATION, self.handle_event_notification)
]
def handle_event_notification(self, payload):
pass
| 26.716418 | 76 | 0.637989 |
36094dbab5bcbbc8059f8602420aa640b73cf352 | 3,219 | py | Python | targets/minispartan6/crg.py | skiphansen/litex-buildenv | a6ea4aa281a79f4ea2a03e89dcfd8bf08819617b | [
"BSD-2-Clause"
] | 198 | 2018-01-17T05:39:54.000Z | 2022-03-15T08:59:16.000Z | targets/minispartan6/crg.py | skiphansen/litex-buildenv | a6ea4aa281a79f4ea2a03e89dcfd8bf08819617b | [
"BSD-2-Clause"
] | 610 | 2017-12-31T01:32:32.000Z | 2022-03-19T22:07:28.000Z | targets/minispartan6/crg.py | skiphansen/litex-buildenv | a6ea4aa281a79f4ea2a03e89dcfd8bf08819617b | [
"BSD-2-Clause"
] | 85 | 2018-01-13T05:51:38.000Z | 2022-02-11T18:54:14.000Z | # Support for the MiniSpartan6+ - https://www.scarabhardware.com/minispartan6/
from fractions import Fraction
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
class _CRG(Module):
def __init__(self, platform, clk_freq):
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys_ps = ClockDomain()
f0 = 32*1000000
clk32 = platform.request("clk32")
clk32a = Signal()
self.specials += Instance("IBUFG", i_I=clk32, o_O=clk32a)
clk32b = Signal()
self.specials += Instance("BUFIO2", p_DIVIDE=1,
p_DIVIDE_BYPASS="TRUE", p_I_INVERT="FALSE",
i_I=clk32a, o_DIVCLK=clk32b)
f = Fraction(int(clk_freq), int(f0))
n, m, p = f.denominator, f.numerator, 8
assert f0/n*m == clk_freq
pll_lckd = Signal()
pll_fb = Signal()
pll = Signal(6)
self.specials.pll = Instance("PLL_ADV", p_SIM_DEVICE="SPARTAN6",
p_BANDWIDTH="OPTIMIZED", p_COMPENSATION="INTERNAL",
p_REF_JITTER=.01, p_CLK_FEEDBACK="CLKFBOUT",
i_DADDR=0, i_DCLK=0, i_DEN=0, i_DI=0, i_DWE=0, i_RST=0, i_REL=0,
p_DIVCLK_DIVIDE=1, p_CLKFBOUT_MULT=m*p//n, p_CLKFBOUT_PHASE=0.,
i_CLKIN1=clk32b, i_CLKIN2=0, i_CLKINSEL=1,
p_CLKIN1_PERIOD=1000000000/f0, p_CLKIN2_PERIOD=0.,
i_CLKFBIN=pll_fb, o_CLKFBOUT=pll_fb, o_LOCKED=pll_lckd,
o_CLKOUT0=pll[0], p_CLKOUT0_DUTY_CYCLE=.5,
o_CLKOUT1=pll[1], p_CLKOUT1_DUTY_CYCLE=.5,
o_CLKOUT2=pll[2], p_CLKOUT2_DUTY_CYCLE=.5,
o_CLKOUT3=pll[3], p_CLKOUT3_DUTY_CYCLE=.5,
o_CLKOUT4=pll[4], p_CLKOUT4_DUTY_CYCLE=.5,
o_CLKOUT5=pll[5], p_CLKOUT5_DUTY_CYCLE=.5,
p_CLKOUT0_PHASE=0., p_CLKOUT0_DIVIDE=p//1,
p_CLKOUT1_PHASE=0., p_CLKOUT1_DIVIDE=p//1,
p_CLKOUT2_PHASE=0., p_CLKOUT2_DIVIDE=p//1,
p_CLKOUT3_PHASE=0., p_CLKOUT3_DIVIDE=p//1,
p_CLKOUT4_PHASE=0., p_CLKOUT4_DIVIDE=p//1, # sys
p_CLKOUT5_PHASE=270., p_CLKOUT5_DIVIDE=p//1, # sys_ps
)
self.specials += Instance("BUFG", i_I=pll[4], o_O=self.cd_sys.clk)
self.specials += Instance("BUFG", i_I=pll[5], o_O=self.cd_sys_ps.clk)
self.specials += AsyncResetSynchronizer(self.cd_sys, ~pll_lckd)
self.specials += Instance("ODDR2", p_DDR_ALIGNMENT="NONE",
p_INIT=0, p_SRTYPE="SYNC",
i_D0=0, i_D1=1, i_S=0, i_R=0, i_CE=1,
i_C0=self.cd_sys.clk, i_C1=~self.cd_sys.clk,
o_Q=platform.request("sdram_clock"))
| 55.5 | 101 | 0.508232 |
394305a65e82214ca2b97adcaebb56bb5bddb9a3 | 8,091 | py | Python | index.py | upenderrreddy/Downloder-App | bb33811ede6c9f6b21d3a2d2e031fe37621b1ea9 | [
"MIT"
] | 1 | 2020-07-16T03:58:31.000Z | 2020-07-16T03:58:31.000Z | index.py | upenderrreddy/Downloder-App | bb33811ede6c9f6b21d3a2d2e031fe37621b1ea9 | [
"MIT"
] | null | null | null | index.py | upenderrreddy/Downloder-App | bb33811ede6c9f6b21d3a2d2e031fe37621b1ea9 | [
"MIT"
] | null | null | null | import sys
from urllib import request
import humanize
import pafy
import os
from PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog, QMessageBox
from PyQt5.uic import loadUiType
ui, _ = loadUiType('main.ui')
class MainApp(QMainWindow, ui):
def __init__(self, parent=None):
super(MainApp, self).__init__(parent)
QMainWindow.__init__(self)
self.setupUi(self)
self.init_ui()
self.handle_buttons()
def init_ui(self):
# contains all ui changes in loading
# hiding tab bar
self.tabWidget.tabBar().setVisible(False)
def handle_buttons(self):
# handle all buttons in the App
# whenever download button is clicked download() method will be called
self.pushButton.clicked.connect(self.download)
# whenever browse button is clicked handle_browse() method will be called
self.pushButton_19.clicked.connect(self.handle_browse)
# whenever search button is clicked get_video_data() method will be called
self.pushButton_6.clicked.connect(self.get_video_data)
# whenever download button is clicked download_video() method will be called
self.pushButton_17.clicked.connect(self.download_video)
# browse for save location of video
self.pushButton_20.clicked.connect(self.save_browse)
#
self.pushButton_4.clicked.connect(self.playlist_download)
# playlist save browse
self.pushButton_16.clicked.connect(self.playlist_save_browse)
# connecting UI buttons
self.pushButton_2.clicked.connect(self.open_home)
self.pushButton_7.clicked.connect(self.open_download)
self.pushButton_5.clicked.connect(self.open_youtube)
self.pushButton_3.clicked.connect(self.open_dettings)
def handle_progress(self, block_no, block_size, total_size):
# calculate the progress
read_data = block_no * block_size
if total_size > 0:
download_percentage = int(read_data / total_size * 100)
self.progressBar.setValue(download_percentage)
QApplication.processEvents()
def handle_browse(self):
# enable browsing files to pick a save location
save_location = QFileDialog.getSaveFileName(self, caption="Save as", directory=".", filter="All Files(*.*)")
# print(save_location) #
self.lineEdit_2.setText(save_location[0])
def download(self):
# downloading file
# print('Starting download')
# get download url and save location from gui
download_url = self.lineEdit.text()
save_location = self.lineEdit_2.text()
# validating user input
if download_url == '' or save_location == '':
QMessageBox.warning(self, "Data Error", "URL or Save location is invalid")
else:
try:
# urlretrieve() takes downlaod_url, save_location and here we are sending info obtained by this method to handle_progress()
request.urlretrieve(download_url, save_location, self.handle_progress)
except Exception:
QMessageBox.warning(self, "Download Error", "Invalid URL or Save location")
return
QMessageBox.information(self, "Download Completed", "Download completed Successfully")
# After download is completed make url and save location text fields empty
self.lineEdit.setText('')
self.lineEdit_2.setText('')
self.progressBar.setValue(0)
##################################################################
# Download single YouTube video #
def save_browse(self):
# save location in the line edit
save_location = QFileDialog.getSaveFileName(self, caption="Save as", directory=".", filter="All Files(*.*)")
# print(save_location) #
self.lineEdit_4.setText(save_location[0])
def get_video_data(self):
video_url = self.lineEdit_3.text()
print(video_url)
if video_url == '':
QMessageBox.warning(self, "Data Error", "Video URL is invalid")
else:
# creating pafy object
video = pafy.new(video_url)
# print(video.title)
# print(video.duration)
# print(video.length)
# print(video.likes)
# print(video.dislikes)
# print(video.viewcount)
video_streams = video.streams
for stream in video_streams:
size = humanize.naturalsize(stream.get_filesize())
data = "{} {} {} {}".format(stream.mediatype, stream.extension, stream.quality, size)
self.comboBox.addItem(data)
def download_video(self):
video_url = self.lineEdit_3.text()
save_location = self.lineEdit_4.text()
if video_url == '' or save_location == '':
QMessageBox.warning(self, "Data Error", "URL or Save location is invalid")
else:
video = pafy.new(video_url)
video_stream = video.streams
video_quality = self.comboBox.currentIndex()
download = video_stream[video_quality].download(filepath=save_location, callback=self.video_progress)
def video_progress(self, total, received, ratio, rate, time):
read_data = received
# print(read_data)
if total > 0:
download_percentage = read_data * 100 / total
self.progressBar_2.setValue(int(download_percentage))
remaining_time = round(time / 60, 2)
self.label_4.setText(str('{} minutes remaining'.format(remaining_time)))
QApplication.processEvents()
################################################
# Youtube Playlist Download #
def playlist_download(self):
playlist_url = self.lineEdit_7.text()
save_location = self.lineEdit_8.text()
if playlist_url == '' or save_location == '':
QMessageBox.warning(self, "Data Error", "Provide a valid Playlist URL or save location")
else:
playlist = pafy.get_playlist(playlist_url)
playlist_videos = playlist['items']
self.lcdNumber_2.display(len(playlist_videos))
os.chdir(save_location)
if os.path.exists(str(playlist['title'])):
os.chdir(str(playlist['title']))
else:
os.mkdir(str(playlist['title']))
os.chdir(str(playlist['title']))
current_video_in_download = 1
quality = self.comboBox_2.currentIndex()
QApplication.processEvents()
for video in playlist_videos:
current_video = video['pafy']
current_video_stream = current_video.streams
self.lcdNumber.display(current_video_in_download)
download = current_video_stream[quality].download(callback=self.playlist_progress)
QApplication.processEvents()
current_video_in_download += 1
def playlist_progress(self, total, received, ratio, rate, time):
read_data = received
if total > 0:
download_percentage = read_data * 100 // total
self.progressBar_4.setValue(download_percentage)
remaining_time = round(time / 60, 2)
self.label_6.setText(str('{} minutes remaining'.format(remaining_time)))
QApplication.processEvents()
def playlist_save_browse(self):
# save location in the line edit
save_location = QFileDialog.getExistingDirectory(self, "Select Download Directory")
# print(save_location) #
self.lineEdit_8.setText(save_location)
# UI Changes methods #
def open_home(self):
self.tabWidget.setCurrentIndex(0)
def open_download(self):
self.tabWidget.setCurrentIndex(1)
def open_youtube(self):
self.tabWidget.setCurrentIndex(2)
def open_dettings(self):
self.tabWidget.setCurrentIndex(3)
def main():
app = QApplication(sys.argv)
window = MainApp()
window.show()
app.exec_()
if __name__ == '__main__':
main()
| 35.800885 | 139 | 0.636263 |
ee252ce24d9df688c26985206965a9e6322174a0 | 1,538 | py | Python | docs/doc_check/test/test_same-as-file.py | gperrotta/onnx-mlir | 75930ffbcf14cfbaccd8417c47c3598f56342926 | [
"Apache-2.0"
] | 25 | 2019-12-24T09:22:09.000Z | 2021-09-09T17:10:09.000Z | docs/doc_check/test/test_same-as-file.py | gperrotta/onnx-mlir | 75930ffbcf14cfbaccd8417c47c3598f56342926 | [
"Apache-2.0"
] | 69 | 2019-12-24T06:24:38.000Z | 2020-03-16T14:42:53.000Z | docs/doc_check/test/test_same-as-file.py | gperrotta/onnx-mlir | 75930ffbcf14cfbaccd8417c47c3598f56342926 | [
"Apache-2.0"
] | 10 | 2019-12-24T01:41:42.000Z | 2021-09-09T17:11:31.000Z | # ===------- test-same-as-file.py - Test for same-as-file directive -------===//
#
# Copyright 2019-2020 The IBM Research Authors.
#
# =============================================================================
#
# ===----------------------------------------------------------------------===//
import unittest
import os
import sys
# Make common utilities visible by adding them to system paths.
test_dir = os.path.dirname(os.path.realpath(__file__))
doc_check_base_dir = os.path.abspath(os.path.join(test_dir, os.pardir))
print(doc_check_base_dir)
sys.path.append(doc_check_base_dir)
import check
class TestStringMethods(unittest.TestCase):
def test_basic(self):
check.main('./same-as-file/simple/', [])
def test_different(self):
with self.assertRaises(ValueError) as context:
check.main("./same-as-file/error-doc-different-from-ref/", [])
self.assertTrue('Check failed because doc file content is not the same as that of reference file.' in str(
context.exception))
def test_doc_shorter_than_ref(self):
# check.main('./same-as-file/error-doc-shorter-than-ref/', [])
with self.assertRaises(ValueError) as context:
check.main('./same-as-file/error-doc-shorter-than-ref/', [])
self.assertTrue('Check failed because doc file is shorter than reference file.' in str(
context.exception))
def test_skip_doc_ref(self):
check.main('./same-as-file/skip-doc-ref/', [])
if __name__ == '__main__':
unittest.main()
| 34.177778 | 114 | 0.605982 |
cf9d2a631c6573a152f3cb592f0128ae78c7ce49 | 20,973 | py | Python | venv/lib/python3.6/site-packages/cssutils/tests/test_util.py | Amirh24/monKey-s | 10afccb07dc47c9239de07c5bbcdb95008f5f0e5 | [
"MIT"
] | 53 | 2018-10-02T05:58:54.000Z | 2020-09-15T08:58:26.000Z | venv/Lib/site-packages/cssutils/tests/test_util.py | DoesArt-Studios/RamBrowse | a81da53e04d265d17e76855e7affc11130ee6120 | [
"MIT"
] | 52 | 2018-09-26T05:16:09.000Z | 2022-03-11T23:51:14.000Z | venv/Lib/site-packages/cssutils/tests/test_util.py | DoesArt-Studios/RamBrowse | a81da53e04d265d17e76855e7affc11130ee6120 | [
"MIT"
] | 10 | 2019-03-11T16:35:14.000Z | 2019-10-23T08:03:54.000Z | # -*- coding: utf-8 -*-
"""Testcases for cssutils.util"""
import cgi
from email import message_from_string, message_from_file
import io
import re
import sys
import urllib.request, urllib.error, urllib.parse
import xml.dom
try:
import mock
except ImportError:
mock = None
print("install mock library to run all tests")
from . import basetest
import encutils
from cssutils.util import Base, ListSeq, _readUrl, _defaultFetcher, LazyRegex
class ListSeqTestCase(basetest.BaseTestCase):
def test_all(self):
"util.ListSeq"
ls = ListSeq()
self.assertEqual(0, len(ls))
# append()
self.assertRaises(NotImplementedError, ls.append, 1)
# set
self.assertRaises(NotImplementedError, ls.__setitem__, 0, 1)
# hack:
ls.seq.append(1)
ls.seq.append(2)
# len
self.assertEqual(2, len(ls))
# __contains__
self.assertEqual(True, 1 in ls)
# get
self.assertEqual(1, ls[0])
self.assertEqual(2, ls[1])
# del
del ls[0]
self.assertEqual(1, len(ls))
self.assertEqual(False, 1 in ls)
# for in
for x in ls:
self.assertEqual(2, x)
class BaseTestCase(basetest.BaseTestCase):
def test_normalize(self):
"Base._normalize()"
b = Base()
tests = {'abcdefg ABCDEFG äöü߀ AÖÜ': 'abcdefg abcdefg äöü߀ aöü',
r'\ga\Ga\\\ ': r'gaga\ ',
r'0123456789': '0123456789',
# unicode escape seqs should have been done by
# the tokenizer...
}
for test, exp in list(tests.items()):
self.assertEqual(b._normalize(test), exp)
# static too
self.assertEqual(Base._normalize(test), exp)
def test_tokenupto(self):
"Base._tokensupto2()"
# tests nested blocks of {} [] or ()
b = Base()
tests = [
('default', 'a[{1}]({2}) { } NOT', 'a[{1}]({2}) { }', False),
('default', 'a[{1}]({2}) { } NOT', 'a[{1}]func({2}) { }', True),
('blockstartonly', 'a[{1}]({2}) { NOT', 'a[{1}]({2}) {', False),
('blockstartonly', 'a[{1}]({2}) { NOT', 'a[{1}]func({2}) {', True),
('propertynameendonly', 'a[(2)1] { }2 : a;', 'a[(2)1] { }2 :', False),
('propertynameendonly', 'a[(2)1] { }2 : a;', 'a[func(2)1] { }2 :', True),
('propertyvalueendonly', 'a{;{;}[;](;)}[;{;}[;](;)](;{;}[;](;)) 1; NOT',
'a{;{;}[;](;)}[;{;}[;](;)](;{;}[;](;)) 1;', False),
('propertyvalueendonly', 'a{;{;}[;](;)}[;{;}[;](;)](;{;}[;](;)) 1; NOT',
'a{;{;}[;]func(;)}[;{;}[;]func(;)]func(;{;}[;]func(;)) 1;', True),
('funcendonly', 'a{[1]}([3])[{[1]}[2]([3])]) NOT',
'a{[1]}([3])[{[1]}[2]([3])])', False),
('funcendonly', 'a{[1]}([3])[{[1]}[2]([3])]) NOT',
'a{[1]}func([3])[{[1]}[2]func([3])])', True),
('selectorattendonly', '[a[()]{()}([()]{()}())] NOT',
'[a[()]{()}([()]{()}())]', False),
('selectorattendonly', '[a[()]{()}([()]{()}())] NOT',
'[a[func()]{func()}func([func()]{func()}func())]', True),
# issue 50
('withstarttoken [', 'a];x', '[a];', False)
]
for typ, values, exp, paransasfunc in tests:
def maketokens(valuelist):
# returns list of tuples
return [('TYPE', v, 0, 0) for v in valuelist]
tokens = maketokens(list(values))
if paransasfunc:
for i, t in enumerate(tokens):
if '(' == t[1]:
tokens[i] = ('FUNCTION', 'func(', t[2], t[3])
if 'default' == typ:
restokens = b._tokensupto2(tokens)
elif 'blockstartonly' == typ:
restokens = b._tokensupto2(
tokens, blockstartonly=True)
elif 'propertynameendonly' == typ:
restokens = b._tokensupto2(
tokens, propertynameendonly=True)
elif 'propertyvalueendonly' == typ:
restokens = b._tokensupto2(
tokens, propertyvalueendonly=True)
elif 'funcendonly' == typ:
restokens = b._tokensupto2(
tokens, funcendonly=True)
elif 'selectorattendonly' == typ:
restokens = b._tokensupto2(
tokens, selectorattendonly=True)
elif 'withstarttoken [' == typ:
restokens = b._tokensupto2(tokens, ('CHAR', '[', 0, 0))
res = ''.join([t[1] for t in restokens])
self.assertEqual(exp, res)
class _readUrl_TestCase(basetest.BaseTestCase):
"""needs mock"""
def test_readUrl(self):
"""util._readUrl()"""
# for additional tests see test_parse.py
url = 'http://example.com/test.css'
def make_fetcher(r):
# normally r == encoding, content
def fetcher(url):
return r
return fetcher
tests = {
# defaultFetcher returns: readUrl returns
None: (None, None, None),
(None, ''): ('utf-8', 5, ''),
(None, '€'.encode('utf-8')): ('utf-8', 5, '€'),
('utf-8', '€'.encode('utf-8')): ('utf-8', 1, '€'),
('ISO-8859-1', 'ä'.encode('iso-8859-1')): ('ISO-8859-1', 1, 'ä'),
('ASCII', 'a'.encode('ascii')): ('ASCII', 1, 'a')
}
for r, exp in list(tests.items()):
self.assertEqual(_readUrl(url, fetcher=make_fetcher(r)), exp)
tests = {
# (overrideEncoding, parentEncoding, (httpencoding, content)):
# readUrl returns
# ===== 0. OVERRIDE WINS =====
# override + parent + http
('latin1', 'ascii', ('utf-16', ''.encode())): ('latin1', 0, ''),
('latin1', 'ascii', ('utf-16', '123'.encode())): ('latin1', 0, '123'),
('latin1', 'ascii', ('utf-16', 'ä'.encode('iso-8859-1'))):
('latin1', 0, 'ä'),
('latin1', 'ascii', ('utf-16', 'a'.encode('ascii'))):
('latin1',0, 'a'),
# + @charset
('latin1', 'ascii', ('utf-16', '@charset "ascii";'.encode())):
('latin1', 0, '@charset "latin1";'),
('latin1', 'ascii', ('utf-16', '@charset "utf-8";ä'.encode('latin1'))):
('latin1', 0, '@charset "latin1";ä'),
('latin1', 'ascii', ('utf-16', '@charset "utf-8";ä'.encode('utf-8'))):
('latin1', 0, '@charset "latin1";\xc3\xa4'), # read as latin1!
# override only
('latin1', None, None): (None, None, None),
('latin1', None, (None, ''.encode())): ('latin1', 0, ''),
('latin1', None, (None, '123'.encode())): ('latin1', 0, '123'),
('latin1', None, (None, 'ä'.encode('iso-8859-1'))):
('latin1', 0, 'ä'),
('latin1', None, (None, 'a'.encode('ascii'))):
('latin1', 0, 'a'),
# + @charset
('latin1', None, (None, '@charset "ascii";'.encode())):
('latin1', 0, '@charset "latin1";'),
('latin1', None, (None, '@charset "utf-8";ä'.encode('latin1'))):
('latin1', 0, '@charset "latin1";ä'),
('latin1', None, (None, '@charset "utf-8";ä'.encode('utf-8'))):
('latin1', 0, '@charset "latin1";\xc3\xa4'), # read as latin1!
# override + parent
('latin1', 'ascii', None): (None, None, None),
('latin1', 'ascii', (None, ''.encode())): ('latin1', 0, ''),
('latin1', 'ascii', (None, '123'.encode())): ('latin1', 0, '123'),
('latin1', 'ascii', (None, 'ä'.encode('iso-8859-1'))):
('latin1', 0, 'ä'),
('latin1', 'ascii', (None, 'a'.encode('ascii'))):
('latin1', 0, 'a'),
# + @charset
('latin1', 'ascii', (None, '@charset "ascii";'.encode())):
('latin1', 0, '@charset "latin1";'),
('latin1', 'ascii', (None, '@charset "utf-8";ä'.encode('latin1'))):
('latin1', 0, '@charset "latin1";ä'),
('latin1', 'ascii', (None, '@charset "utf-8";ä'.encode('utf-8'))):
('latin1', 0, '@charset "latin1";\xc3\xa4'), # read as latin1!
# override + http
('latin1', None, ('utf-16', ''.encode())): ('latin1', 0, ''),
('latin1', None, ('utf-16', '123'.encode())): ('latin1', 0, '123'),
('latin1', None, ('utf-16', 'ä'.encode('iso-8859-1'))):
('latin1', 0, 'ä'),
('latin1', None, ('utf-16', 'a'.encode('ascii'))):
('latin1', 0, 'a'),
# + @charset
('latin1', None, ('utf-16', '@charset "ascii";'.encode())):
('latin1', 0, '@charset "latin1";'),
('latin1', None, ('utf-16', '@charset "utf-8";ä'.encode('latin1'))):
('latin1', 0, '@charset "latin1";ä'),
('latin1', None, ('utf-16', '@charset "utf-8";ä'.encode('utf-8'))):
('latin1', 0, '@charset "latin1";\xc3\xa4'), # read as latin1!
# override ü @charset
('latin1', None, (None, '@charset "ascii";'.encode())):
('latin1', 0, '@charset "latin1";'),
('latin1', None, (None, '@charset "utf-8";ä'.encode('latin1'))):
('latin1', 0, '@charset "latin1";ä'),
('latin1', None, (None, '@charset "utf-8";ä'.encode('utf-8'))):
('latin1', 0, '@charset "latin1";\xc3\xa4'), # read as latin1!
# ===== 1. HTTP WINS =====
(None, 'ascii', ('latin1', ''.encode())): ('latin1', 1, ''),
(None, 'ascii', ('latin1', '123'.encode())): ('latin1', 1, '123'),
(None, 'ascii', ('latin1', 'ä'.encode('iso-8859-1'))):
('latin1', 1, 'ä'),
(None, 'ascii', ('latin1', 'a'.encode('ascii'))):
('latin1', 1, 'a'),
# + @charset
(None, 'ascii', ('latin1', '@charset "ascii";'.encode())):
('latin1', 1, '@charset "latin1";'),
(None, 'ascii', ('latin1', '@charset "utf-8";ä'.encode('latin1'))):
('latin1', 1, '@charset "latin1";ä'),
(None, 'ascii', ('latin1', '@charset "utf-8";ä'.encode('utf-8'))):
('latin1', 1, '@charset "latin1";\xc3\xa4'), # read as latin1!
# ===== 2. @charset WINS =====
(None, 'ascii', (None, '@charset "latin1";'.encode())):
('latin1', 2, '@charset "latin1";'),
(None, 'ascii', (None, '@charset "latin1";ä'.encode('latin1'))):
('latin1', 2, '@charset "latin1";ä'),
(None, 'ascii', (None, '@charset "latin1";ä'.encode('utf-8'))):
('latin1', 2, '@charset "latin1";\xc3\xa4'), # read as latin1!
# ===== 2. BOM WINS =====
(None, 'ascii', (None, 'ä'.encode('utf-8-sig'))):
('utf-8-sig', 2, '\xe4'), # read as latin1!
(None, 'ascii', (None, '@charset "utf-8";ä'.encode('utf-8-sig'))):
('utf-8-sig', 2, '@charset "utf-8";\xe4'), # read as latin1!
(None, 'ascii', (None, '@charset "latin1";ä'.encode('utf-8-sig'))):
('utf-8-sig', 2, '@charset "utf-8";\xe4'), # read as latin1!
# ===== 4. parentEncoding WINS =====
(None, 'latin1', (None, ''.encode())): ('latin1', 4, ''),
(None, 'latin1', (None, '123'.encode())): ('latin1', 4, '123'),
(None, 'latin1', (None, 'ä'.encode('iso-8859-1'))):
('latin1', 4, 'ä'),
(None, 'latin1', (None, 'a'.encode('ascii'))):
('latin1', 4, 'a'),
(None, 'latin1', (None, 'ä'.encode('utf-8'))):
('latin1', 4, '\xc3\xa4'), # read as latin1!
# ===== 5. default WINS which in this case is None! =====
(None, None, (None, ''.encode())): ('utf-8', 5, ''),
(None, None, (None, '123'.encode())): ('utf-8', 5, '123'),
(None, None, (None, 'a'.encode('ascii'))):
('utf-8', 5, 'a'),
(None, None, (None, 'ä'.encode('utf-8'))):
('utf-8', 5, 'ä'), # read as utf-8
(None, None, (None, 'ä'.encode('iso-8859-1'))): # trigger UnicodeDecodeError!
('utf-8', 5, None),
}
for (override, parent, r), exp in list(tests.items()):
self.assertEqual(_readUrl(url,
overrideEncoding=override,
parentEncoding=parent,
fetcher=make_fetcher(r)),
exp)
def test_defaultFetcher(self):
"""util._defaultFetcher"""
if mock:
class Response(object):
"""urllib2.Reponse mock"""
def __init__(self, url,
contenttype, content,
exception=None, args=None):
self.url = url
mt, params = cgi.parse_header(contenttype)
self.mimetype = mt
self.charset = params.get('charset', None)
self.text = content
self.exception = exception
self.args = args
def geturl(self):
return self.url
def info(self):
mimetype, charset = self.mimetype, self.charset
class Info(object):
# py2x
def gettype(self):
return mimetype
def getparam(self, name=None):
return charset
# py 3x
get_content_type = gettype
get_content_charset = getparam # here always charset!
return Info()
def read(self):
# returns fake text or raises fake exception
if not self.exception:
return self.text
else:
raise self.exception(*self.args)
def urlopen(url,
contenttype=None, content=None,
exception=None, args=None):
# return an mock which returns parameterized Response
def x(*ignored):
if exception:
raise exception(*args)
else:
return Response(url,
contenttype, content,
exception=exception, args=args)
return x
urlopenpatch = 'urllib2.urlopen' if basetest.PY2x else 'urllib.request.urlopen'
# positive tests
tests = {
# content-type, contentstr: encoding, contentstr
('text/css', '€'.encode('utf-8')):
(None, '€'.encode('utf-8')),
('text/css;charset=utf-8', '€'.encode('utf-8')):
('utf-8', '€'.encode('utf-8')),
('text/css;charset=ascii', 'a'):
('ascii', 'a')
}
url = 'http://example.com/test.css'
for (contenttype, content), exp in list(tests.items()):
@mock.patch(urlopenpatch, new=urlopen(url, contenttype, content))
def do(url):
return _defaultFetcher(url)
self.assertEqual(exp, do(url))
# wrong mimetype
@mock.patch(urlopenpatch, new=urlopen(url, 'text/html', 'a'))
def do(url):
return _defaultFetcher(url)
self.assertRaises(ValueError, do, url)
# calling url results in fake exception
# py2 ~= py3 raises error earlier than urlopen!
tests = {
'1': (ValueError, ['invalid value for url']),
#_readUrl('mailto:a.css')
'mailto:e4': (urllib.error.URLError, ['urlerror']),
# cannot resolve x, IOError
'http://x': (urllib.error.URLError, ['ioerror']),
}
for url, (exception, args) in list(tests.items()):
@mock.patch(urlopenpatch, new=urlopen(url, exception=exception, args=args))
def do(url):
return _defaultFetcher(url)
self.assertRaises(exception, do, url)
# py2 != py3 raises error earlier than urlopen!
urlrequestpatch = 'urllib2.urlopen' if basetest.PY2x else 'urllib.request.Request'
tests = {
#_readUrl('http://cthedot.de/__UNKNOWN__.css')
'e2': (urllib.error.HTTPError, ['u', 500, 'server error', {}, None]),
'e3': (urllib.error.HTTPError, ['u', 404, 'not found', {}, None]),
}
for url, (exception, args) in list(tests.items()):
@mock.patch(urlrequestpatch, new=urlopen(url, exception=exception, args=args))
def do(url):
return _defaultFetcher(url)
self.assertRaises(exception, do, url)
else:
self.assertEqual(False, 'Mock needed for this test')
class TestLazyRegex(basetest.BaseTestCase):
"""Tests for cssutils.util.LazyRegex."""
def setUp(self):
self.lazyre = LazyRegex('f.o')
def test_public_interface(self):
methods = ['search', 'match', 'split', 'sub', 'subn', 'findall',
'finditer', 'pattern', 'flags', 'groups', 'groupindex',]
for method in methods:
self.assertTrue(hasattr(self.lazyre, method),
'expected %r public attribute' % method)
def test_ensure(self):
self.assertIsNone(self.lazyre.matcher)
self.lazyre.ensure()
self.assertIsNotNone(self.lazyre.matcher)
def test_calling(self):
self.assertIsNone(self.lazyre('bar'))
match = self.lazyre('foobar')
self.assertEqual(match.group(), 'foo')
def test_matching(self):
self.assertIsNone(self.lazyre.match('bar'))
match = self.lazyre.match('foobar')
self.assertEqual(match.group(), 'foo')
def test_matching_with_position_parameters(self):
self.assertIsNone(self.lazyre.match('foo', 1))
self.assertIsNone(self.lazyre.match('foo', 0, 2))
def test_searching(self):
self.assertIsNone(self.lazyre.search('rafuubar'))
match = self.lazyre.search('rafoobar')
self.assertEqual(match.group(), 'foo')
def test_searching_with_position_parameters(self):
self.assertIsNone(self.lazyre.search('rafoobar', 3))
self.assertIsNone(self.lazyre.search('rafoobar', 0, 4))
match = self.lazyre.search('rafoofuobar', 4)
self.assertEqual(match.group(), 'fuo')
def test_split(self):
self.assertEqual(self.lazyre.split('rafoobarfoobaz'),
['ra', 'bar', 'baz'])
self.assertEqual(self.lazyre.split('rafoobarfoobaz', 1),
['ra', 'barfoobaz'])
def test_findall(self):
self.assertEqual(self.lazyre.findall('rafoobarfuobaz'),
['foo', 'fuo'])
def test_finditer(self):
result = self.lazyre.finditer('rafoobarfuobaz')
self.assertEqual([m.group() for m in result], ['foo', 'fuo'])
def test_sub(self):
self.assertEqual(self.lazyre.sub('bar', 'foofoo'), 'barbar')
self.assertEqual(self.lazyre.sub(lambda x: 'baz', 'foofoo'), 'bazbaz')
def test_subn(self):
subbed = self.lazyre.subn('bar', 'foofoo')
self.assertEqual(subbed, ('barbar', 2))
subbed = self.lazyre.subn(lambda x: 'baz', 'foofoo')
self.assertEqual(subbed, ('bazbaz', 2))
def test_groups(self):
lazyre = LazyRegex('(.)(.)')
self.assertIsNone(lazyre.groups)
lazyre.ensure()
self.assertEqual(lazyre.groups, 2)
def test_groupindex(self):
lazyre = LazyRegex('(?P<foo>.)')
self.assertIsNone(lazyre.groupindex)
lazyre.ensure()
self.assertEqual(lazyre.groupindex, {'foo': 1})
def test_flags(self):
self.lazyre.ensure()
self.assertEqual(self.lazyre.flags, re.compile('.').flags)
def test_pattern(self):
self.assertEqual(self.lazyre.pattern, 'f.o')
if __name__ == '__main__':
import unittest
unittest.main()
| 40.962891 | 95 | 0.466886 |
80d1c6b01db44a23fb8b740ed320007517fc1828 | 46 | py | Python | src/models/menu.py | pepell/min_web2py_app | 2a906545f06a1e4152f989b1e12f15af01672a46 | [
"MIT"
] | null | null | null | src/models/menu.py | pepell/min_web2py_app | 2a906545f06a1e4152f989b1e12f15af01672a46 | [
"MIT"
] | null | null | null | src/models/menu.py | pepell/min_web2py_app | 2a906545f06a1e4152f989b1e12f15af01672a46 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
response.title = ''
| 9.2 | 23 | 0.5 |
53abc4dadf613a48680facb22cc27e2f540729b9 | 860 | py | Python | setup.py | trilleplay/fortnite-replay-reader | 55919ae9519737944e4fe197f1759d65d9fd0f51 | [
"MIT"
] | 30 | 2018-11-08T16:33:50.000Z | 2022-03-06T20:52:54.000Z | setup.py | trilleplay/fortnite-replay-reader | 55919ae9519737944e4fe197f1759d65d9fd0f51 | [
"MIT"
] | 20 | 2018-11-22T20:42:32.000Z | 2021-08-07T00:09:39.000Z | setup.py | trilleplay/fortnite-replay-reader | 55919ae9519737944e4fe197f1759d65d9fd0f51 | [
"MIT"
] | 20 | 2018-11-12T22:31:46.000Z | 2022-03-06T20:52:45.000Z | from setuptools import setup
import ray
def readme():
with open('README.md') as f:
return f.read()
setup(name='fortnite-replay-reader',
version=ray.__version__,
description='Parse fortnite .replay files',
long_description=readme(),
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
],
keywords='fortnite replay reader ray',
url='http://github.com/Shiqan/fortnite-replay-reader',
author='Shiqan',
license='MIT',
packages=['ray'],
install_requires=[
'bitstring',
],
tests_require=['pytest'],
include_package_data=True,
zip_safe=False)
| 26.060606 | 60 | 0.60814 |
dbf54102f431d24d62a40a2df52953c36165e909 | 751 | py | Python | tests/plugins/trackers/tapochek/tapochektracker_helper.py | DmitryRibalka/monitorrent | f329d4bca151360d29e93d5369a1d21268d8998f | [
"WTFPL"
] | 465 | 2015-08-31T09:16:41.000Z | 2022-03-12T10:33:04.000Z | tests/plugins/trackers/tapochek/tapochektracker_helper.py | DmitryRibalka/monitorrent | f329d4bca151360d29e93d5369a1d21268d8998f | [
"WTFPL"
] | 340 | 2015-07-18T17:31:54.000Z | 2022-03-30T15:16:25.000Z | tests/plugins/trackers/tapochek/tapochektracker_helper.py | DmitryRibalka/monitorrent | f329d4bca151360d29e93d5369a1d21268d8998f | [
"WTFPL"
] | 87 | 2015-07-18T10:52:24.000Z | 2022-03-27T09:52:35.000Z | from builtins import object
# coding=utf-8
class TapochekHelper(object):
# real values
real_login = None
real_password = None
real_uid = None
real_bb_data = None
# fake values
fake_login = 'fakelogin'
fake_password = 'p@$$w0rd'
fake_uid = '407039'
fake_bb_data = u'a%3A3%3A%7Bs%3A2%3A%22uk%22%3BN%3Bs%3A3%3A%22uid%22%' \
u'3Bi%3A407039%3Bs%3A3%3A%22sid%22%3Bs%3A20%3A%22bbGF6KdstmL1onQgnZ0u%22%3B%7D'
def __init__(self, login=None, password=None, uid=None, bb_data=None):
self.real_login = login or self.fake_login
self.real_password = password or self.fake_password
self.real_uid = uid or self.fake_uid
self.real_bb_data = bb_data or self.fake_bb_data
| 32.652174 | 98 | 0.6751 |
c32caa675ef4cffe8a71d833eb88b1d8d70769de | 1,506 | py | Python | applications/adversarial_rl/parse_adv_rl.py | sebascuri/rhucrl | 27663e1302f3bbc636dff28495c6f2667bb7c1da | [
"MIT"
] | 1 | 2021-11-19T11:46:48.000Z | 2021-11-19T11:46:48.000Z | applications/adversarial_rl/parse_adv_rl.py | sebascuri/rhucrl | 27663e1302f3bbc636dff28495c6f2667bb7c1da | [
"MIT"
] | 1 | 2021-11-22T07:48:03.000Z | 2021-11-22T07:48:03.000Z | applications/adversarial_rl/parse_adv_rl.py | sebascuri/rhucrl | 27663e1302f3bbc636dff28495c6f2667bb7c1da | [
"MIT"
] | 1 | 2022-03-26T10:18:01.000Z | 2022-03-26T10:18:01.000Z | """Parse Adversarial RL experiments."""
import os
import pandas as pd
class Experiment(object):
"""Experiment class to parse."""
def __init__(self, file_name):
splits = file_name[:-5].split("_")
self.environment = splits[0][2:-3]
self.alpha = float(splits[1])
self.agent = splits[2]
if len(splits) == 6:
self.config = splits[3] + "_" + splits[4]
else:
self.config = splits[3]
self.seed = int(splits[-1])
df = pd.read_json(file_name)
self.train_returns = df.train_return
robust_file_name = file_name[:-5] + "_robust.json"
df = pd.read_json(robust_file_name)
self.robust_returns = df.train_return
def get_df(self):
"""Get the experiment as a data frame."""
return pd.DataFrame(vars(self))
def parse_dir(path=None):
"""Parse all experiments in directory."""
if path is None:
path = os.getcwd()
df = pd.DataFrame()
for file_name in filter(
lambda x: x.endswith(".json") and not x.endswith("robust.json"),
os.listdir(path),
):
try:
experiment = Experiment(file_name)
df = df.append(experiment.get_df())
except ValueError:
pass
return df
if __name__ == "__main__":
import socket
df = parse_dir(path=os.path.dirname(os.path.realpath(__file__)))
df.reset_index(inplace=True)
df.to_json(f"adversarial_robust_{socket.gethostname()}.json")
| 26.421053 | 72 | 0.598274 |
c0b7bbfe9f083059bca8cfd7f82390cfe017fb85 | 2,453 | py | Python | Conputional_Genonics/Assignment/assignment1/src/indexer.py | infinityglow/Unimelb-CS-Subjects | 07bdb49fd4c50035b7f2e80ca218ac2b620098e4 | [
"MIT"
] | 1 | 2022-02-14T16:31:07.000Z | 2022-02-14T16:31:07.000Z | Conputional_Genonics/Assignment/assignment1/src/indexer.py | hidara2000/Unimelb-CS-Subjects | 07bdb49fd4c50035b7f2e80ca218ac2b620098e4 | [
"MIT"
] | null | null | null | Conputional_Genonics/Assignment/assignment1/src/indexer.py | hidara2000/Unimelb-CS-Subjects | 07bdb49fd4c50035b7f2e80ca218ac2b620098e4 | [
"MIT"
] | 1 | 2021-06-14T11:59:13.000Z | 2021-06-14T11:59:13.000Z | #!usr/lib/python2.7
# Auther : Haonan Li <haonanl5@student.unimelb.edu.au>
# Porpuse : Build a k-mer index for a FASTA reference file
import os
import sys
import getopt
# Read the FASTA reference, build a k-mer dictionary with positions
def get_kmer_dict(ref_file, k):
# Read reference information
ref_file = open(ref_file, "r")
ref_name = ref_file.readline()
ref_name = ref_name[1:].strip()
ref = ">"
for line in ref_file.readlines():
line = line.strip()
ref += line
ref = ref.upper()
# Build the dictionary, (key: Value) is k-mer and position list separately.
ref_dict = {}
for i in range (1,len(ref)-k+1):
k_mer = ref[i:i+k]
if k_mer in ref_dict:
ref_dict[k_mer].append(i)
else:
ref_dict[k_mer] = [i]
return (ref_name, ref_dict)
# Sort the dictionary and output
def build_index(out_file, ref_name, ref_dict, k):
out_file = open(out_file, "w")
out_file.write("INDEX:" + ref_name + ' ' + str(k) + '\n')
ref_tuple = sorted(ref_dict.items(), key = lambda e:e[0], reverse = False)
for tup in ref_tuple:
out_file.write(tup[0])
for pos in tup[1]:
out_file.write(' ' + str(pos))
out_file.write('\n')
# Usage of the tool
def usage():
print ("usage:python indexer.py [options] ... [-f reffile | -k value] ...")
print ("Options and arguments:")
print ("-h :Help")
print ("-f :Reference file.")
print ("-k :Value of k for k-mers.")
print ("-o :Output index file.")
def main(argv):
try:
opts, args = getopt.getopt(argv[1:], "hf:k:o:", \
["reference=", "k=", "output="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
sys.exit()
elif opt in ('-f', '--reference='):
ref_file = arg
elif opt in ('-k', '--k='):
k = int(arg)
elif opt in ('-o','--output='):
out_file = arg
# Make sure the parameters were defined
if not('ref_file' in locals().keys()) or \
not('k' in locals().keys()) or \
not('out_file' in locals().keys()):
usage()
sys.exit()
# main process
ref_name, ref_dict = get_kmer_dict(ref_file, k)
build_index(out_file, ref_name, ref_dict, k)
if __name__ == "__main__":
main(sys.argv)
| 28.523256 | 79 | 0.561761 |
5cdb28aa68bc5ced7fd1f1339e8128214f2738c4 | 469 | py | Python | ex077.py | pepev123/PythonEx | 8f39751bf87a9099d7b733aa829988595dab2344 | [
"MIT"
] | null | null | null | ex077.py | pepev123/PythonEx | 8f39751bf87a9099d7b733aa829988595dab2344 | [
"MIT"
] | null | null | null | ex077.py | pepev123/PythonEx | 8f39751bf87a9099d7b733aa829988595dab2344 | [
"MIT"
] | null | null | null | a = e = i = o = u = ""
lista = ('pao', 'bacon', 'cenoura', 'mel', 'uva')
for c in range(0, len(lista)):
palavra = lista[c]
if palavra.count('a') > 0:
a = "a"
if palavra.count('e') > 0:
e = "e"
if palavra.count('i') > 0:
i = "i"
if palavra.count('o') > 0:
o = "o"
if palavra.count('u') > 0:
u = "u"
print(f'Na palavra {palavra} tem as seguintes vogais: {a} {e} {i} {o} {u}')
a = e = i = o = u = ""
| 27.588235 | 79 | 0.439232 |
55a85439b9416300e9af8bce36822e9494b0a96c | 5,983 | py | Python | tools/remoteserver/robotremoteserverB.py | gdw2/robot-framework | f25068edf1502e76ba8664d4b5ed1aebe0ee2434 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tools/remoteserver/robotremoteserverB.py | gdw2/robot-framework | f25068edf1502e76ba8664d4b5ed1aebe0ee2434 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tools/remoteserver/robotremoteserverB.py | gdw2/robot-framework | f25068edf1502e76ba8664d4b5ed1aebe0ee2434 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2008-2010 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pdb
import sys
import inspect
import traceback
import sha
from StringIO import StringIO
from SimpleXMLRPCServer import SimpleXMLRPCServer
try:
import signal
except ImportError:
signal = None
class RobotRemoteServer(SimpleXMLRPCServer):
allow_reuse_address = True
def __init__(self, host='localhost', port=8270):
SimpleXMLRPCServer.__init__(self, (host, int(port)), logRequests=False)
self._libraries = []
self.register_function(self.get_keyword_names)
self.register_function(self.run_keyword)
self.register_function(self.get_keyword_arguments)
self.register_function(self.get_keyword_documentation)
self.register_function(self.stop_remote_server)
self.register_function(self.remote_import)
callback = lambda signum, frame: self.stop_remote_server()
if hasattr(signal, 'SIGHUP'):
signal.signal(signal.SIGHUP, callback)
if hasattr(signal, 'SIGINT'):
signal.signal(signal.SIGINT, callback)
print 'Robot Framework remote library started at %s:%s' % (host, port)
self.serve_forever()
def serve_forever(self):
self._shutdown = False
while not self._shutdown:
self.handle_request()
def stop_remote_server(self):
self._shutdown = True
return True
def remote_import(self, lib_name, code):
filename = sha.new(code).hexdigest() # create a temporary filename
open(filename+'.py','w').write(code)
exec('from %s import %s as m' % (filename, lib_name))
self._libraries.append(m)
return True
def get_keyword_names(self):
names = []
for library in self._libraries:
get_kw_names = getattr(library, 'get_keyword_names', None) or \
getattr(library, 'getKeywordNames', None)
if inspect.isroutine(get_kw_names):
names += get_kw_names()
else:
names += [ attr for attr in dir(library) if attr[0] != '_'
and inspect.isroutine(getattr(library, attr)) ]
return names + ['stop_remote_server']
def run_keyword(self, name, args):
result = {'status': 'PASS', 'return': '', 'output': '',
'error': '', 'traceback': ''}
self._intercept_stdout()
try:
return_value = self._get_keyword(name)(*args)
except:
result['status'] = 'FAIL'
result['error'], result['traceback'] = self._get_error_details()
else:
result['return'] = self._handle_return_value(return_value)
result['output'] = self._restore_stdout()
return result
def get_keyword_arguments(self, name):
kw = self._get_keyword(name)
args, varargs, _, defaults = inspect.getargspec(kw)
if inspect.ismethod(kw):
args = args[1:] # drop 'self'
if defaults:
args, names = args[:-len(defaults)], args[-len(defaults):]
args += [ '%s=%s' % (name, value)
for name, value in zip(names, defaults) ]
if varargs:
args.append('*%s' % varargs)
return args
def get_keyword_documentation(self, name):
return inspect.getdoc(self._get_keyword(name)) or ''
def _get_keyword(self, name):
if name == 'stop_remote_server':
return self.stop_remote_server
for library in self._libraries:
try:
return getattr(library(), name)
except:
pass #TODO Do something here
def _get_error_details(self):
exc_type, exc_value, exc_tb = sys.exc_info()
if exc_type in (SystemExit, KeyboardInterrupt):
self._restore_stdout()
raise
return (self._get_error_message(exc_type, exc_value),
self._get_error_traceback(exc_tb))
def _get_error_message(self, exc_type, exc_value):
name = exc_type.__name__
message = str(exc_value)
if not message:
return name
if name in ('AssertionError', 'RuntimeError', 'Exception'):
return message
return '%s: %s' % (name, message)
def _get_error_traceback(self, exc_tb):
# Latest entry originates from this class so it can be removed
entries = traceback.extract_tb(exc_tb)[1:]
trace = ''.join(traceback.format_list(entries))
return 'Traceback (most recent call last):\n' + trace
def _handle_return_value(self, ret):
if isinstance(ret, (basestring, int, long, float)):
return ret
if isinstance(ret, (tuple, list)):
return [ self._handle_return_value(item) for item in ret ]
if isinstance(ret, dict):
return dict([ (self._str(key), self._handle_return_value(value))
for key, value in ret.items() ])
return self._str(ret)
def _str(self, item):
if item is None:
return ''
return str(item)
def _intercept_stdout(self):
# TODO: What about stderr?
sys.stdout = StringIO()
def _restore_stdout(self):
output = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = sys.__stdout__
return output
if __name__ == '__main__':
RobotRemoteServer(*sys.argv[1:])
| 35.613095 | 79 | 0.620257 |
f3173cc05bb85762ec2a8f6cb55d8a4b0025dfcb | 1,528 | py | Python | tnmlearn/datasets/dog_cat_dataset.py | t2wain/machine-learning | 4b5e1a24fab7c4ab42f646f7785191ff3d3283ba | [
"MIT"
] | null | null | null | tnmlearn/datasets/dog_cat_dataset.py | t2wain/machine-learning | 4b5e1a24fab7c4ab42f646f7785191ff3d3283ba | [
"MIT"
] | null | null | null | tnmlearn/datasets/dog_cat_dataset.py | t2wain/machine-learning | 4b5e1a24fab7c4ab42f646f7785191ff3d3283ba | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import zipfile
import tarfile
import os
import re
from tnmlearn.other import paths
def extract_file(filePath, to_directory):
if filePath.endswith('.zip'):
opener, mode = zipfile.ZipFile, 'r'
elif filePath.endswith('.tar.gz') or filePath.endswith('.tgz'):
opener, mode = tarfile.open, 'r:gz'
elif filePath.endswith('.tar.bz2') or filePath.endswith('.tbz'):
opener, mode = tarfile.open, 'r:bz2'
else:
return
os.makedirs(to_directory, exist_ok=True)
file = opener(filePath, mode)
try: file.extractall(to_directory)
finally: file.close()
def split_dog_cat_image_files(traindir):
catdir = os.path.join(traindir, 'cat')
dogdir = os.path.join(traindir, 'dog')
os.makedirs(catdir, exist_ok=True)
os.makedirs(dogdir, exist_ok=True)
imagepaths = [(f, os.path.basename(f)) for f in paths.list_images(traindir)]
imagepaths = [(f, os.path.join(dogdir if n.startswith('dog') else catdir, n))
for (f, n) in imagepaths]
for (f, fn) in imagepaths:
os.rename(f, fn)
def split_17flowers(traindir):
for dir_id in range(17):
os.makedirs(os.path.join(traindir, 'dir_'+str(dir_id)), exist_ok=True)
imagepaths = [(f, os.path.basename(f)) for f in paths.list_images(traindir)]
imagepaths = [(f, os.path.join(traindir, 'dir_'+str((int(i)-1)//80), n))
for (f, n) in imagepaths
for i in re.findall('(\d{4})', n)]
for (f, fn) in imagepaths:
os.rename(f, fn)
| 29.384615 | 80 | 0.639398 |
8353a6a0442ab4863b198da50889478a69bf8c77 | 471 | py | Python | data/scripts/templates/object/draft_schematic/clothing/shared_clothing_armor_mandalorian_helmet.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/draft_schematic/clothing/shared_clothing_armor_mandalorian_helmet.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/draft_schematic/clothing/shared_clothing_armor_mandalorian_helmet.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_armor_mandalorian_helmet.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 27.705882 | 97 | 0.740977 |
ed5de3615ac11bcd32477b8711511b6158f658a5 | 6,280 | py | Python | python/led.py | Shyamranny/audio-reactive-led-strip | 8e0cc0ee9737563ea3ad09897b7017da9aa75a16 | [
"MIT"
] | null | null | null | python/led.py | Shyamranny/audio-reactive-led-strip | 8e0cc0ee9737563ea3ad09897b7017da9aa75a16 | [
"MIT"
] | null | null | null | python/led.py | Shyamranny/audio-reactive-led-strip | 8e0cc0ee9737563ea3ad09897b7017da9aa75a16 | [
"MIT"
] | null | null | null | from __future__ import print_function
from __future__ import division
import platform
import numpy as np
import config
# ESP8266 uses WiFi communication
if config.DEVICE == 'esp8266':
import socket
_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Raspberry Pi controls the LED strip directly
elif config.DEVICE == 'pi':
from rpi_ws281x import *
# LED strip configuration:
LED_COUNT = 16 # Number of LED pixels.
LED_PIN = 18 # GPIO pin connected to the pixels (18 uses PWM!).
#LED_PIN = 10 # GPIO pin connected to the pixels (10 uses SPI /dev/spidev0.0).
LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)
LED_DMA = 10 # DMA channel to use for generating signal (try 10)
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)
LED_CHANNEL = 0 # set to '1' for GPIOs 13, 19, 41, 45 or 53
strip = Adafruit_NeoPixel(LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT, LED_BRIGHTNESS, LED_CHANNEL)
strip.begin()
elif config.DEVICE == 'blinkstick':
from blinkstick import blinkstick
import signal
import sys
#Will turn all leds off when invoked.
def signal_handler(signal, frame):
all_off = [0]*(config.N_PIXELS*3)
stick.set_led_data(0, all_off)
sys.exit(0)
stick = blinkstick.find_first()
# Create a listener that turns the leds off when the program terminates
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
_gamma = np.load(config.GAMMA_TABLE_PATH)
"""Gamma lookup table used for nonlinear brightness correction"""
_prev_pixels = np.tile(253, (3, config.N_PIXELS))
"""Pixel values that were most recently displayed on the LED strip"""
pixels = np.tile(1, (3, config.N_PIXELS))
"""Pixel values for the LED strip"""
_is_python_2 = int(platform.python_version_tuple()[0]) == 2
def _update_esp8266():
"""Sends UDP packets to ESP8266 to update LED strip values
The ESP8266 will receive and decode the packets to determine what values
to display on the LED strip. The communication protocol supports LED strips
with a maximum of 256 LEDs.
The packet encoding scheme is:
|i|r|g|b|
where
i (0 to 255): Index of LED to change (zero-based)
r (0 to 255): Red value of LED
g (0 to 255): Green value of LED
b (0 to 255): Blue value of LED
"""
global pixels, _prev_pixels
# Truncate values and cast to integer
pixels = np.clip(pixels, 0, 255).astype(int)
# Optionally apply gamma correc tio
p = _gamma[pixels] if config.SOFTWARE_GAMMA_CORRECTION else np.copy(pixels)
MAX_PIXELS_PER_PACKET = 126
# Pixel indices
idx = range(pixels.shape[1])
idx = [i for i in idx if not np.array_equal(p[:, i], _prev_pixels[:, i])]
n_packets = len(idx) // MAX_PIXELS_PER_PACKET + 1
idx = np.array_split(idx, n_packets)
for packet_indices in idx:
m = '' if _is_python_2 else []
for i in packet_indices:
if _is_python_2:
m += chr(i) + chr(p[0][i]) + chr(p[1][i]) + chr(p[2][i])
else:
m.append(i) # Index of pixel to change
m.append(p[0][i]) # Pixel red value
m.append(p[1][i]) # Pixel green value
m.append(p[2][i]) # Pixel blue value
m = m if _is_python_2 else bytes(m)
_sock.sendto(m, (config.UDP_IP, config.UDP_PORT))
_prev_pixels = np.copy(p)
def _update_pi():
"""Writes new LED values to the Raspberry Pi's LED strip
Raspberry Pi uses the rpi_ws281x to control the LED strip directly.
This function updates the LED strip with new values.
"""
global pixels, _prev_pixels
# Truncate values and cast to integer
pixels = np.clip(pixels, 0, 255).astype(int)
# Optional gamma correction
p = _gamma[pixels] if config.SOFTWARE_GAMMA_CORRECTION else np.copy(pixels)
# Encode 24-bit LED values in 32 bit integers
r = np.left_shift(p[0][:].astype(int), 8)
g = np.left_shift(p[1][:].astype(int), 16)
b = p[2][:].astype(int)
rgb = np.bitwise_or(np.bitwise_or(r, g), b)
# Update the pixels
for i in range(config.N_PIXELS):
# Ignore pixels if they haven't changed (saves bandwidth)
if np.array_equal(p[:, i], _prev_pixels[:, i]):
continue
#strip._led_data[i] = rgb[i]
strip._led_data[i] = int(rgb[i])
_prev_pixels = np.copy(p)
strip.show()
def _update_blinkstick():
"""Writes new LED values to the Blinkstick.
This function updates the LED strip with new values.
"""
global pixels
# Truncate values and cast to integer
pixels = np.clip(pixels, 0, 255).astype(int)
# Optional gamma correction
p = _gamma[pixels] if config.SOFTWARE_GAMMA_CORRECTION else np.copy(pixels)
# Read the rgb values
r = p[0][:].astype(int)
g = p[1][:].astype(int)
b = p[2][:].astype(int)
#create array in which we will store the led states
newstrip = [None]*(config.N_PIXELS*3)
for i in range(config.N_PIXELS):
# blinkstick uses GRB format
newstrip[i*3] = g[i]
newstrip[i*3+1] = r[i]
newstrip[i*3+2] = b[i]
#send the data to the blinkstick
stick.set_led_data(0, newstrip)
def update():
"""Updates the LED strip values"""
if config.DEVICE == 'esp8266':
_update_esp8266()
elif config.DEVICE == 'pi':
_update_pi()
elif config.DEVICE == 'blinkstick':
_update_blinkstick()
else:
raise ValueError('Invalid device selected')
# Execute this file to run a LED strand test
# If everything is working, you should see a red, green, and blue pixel scroll
# across the LED strip continously
if __name__ == '__main__':
import time
# Turn all pixels off
pixels *= 0
pixels[0, 0] = 255 # Set 1st pixel red
pixels[1, 1] = 255 # Set 2nd pixel green
pixels[2, 2] = 255 # Set 3rd pixel blue
print('Starting LED strand test')
while True:
pixels = np.roll(pixels, 1, axis=1)
update()
time.sleep(.1)
| 35.885714 | 112 | 0.645382 |
fca4787d63d5c744297f12e8eaf44573826eecbb | 1,990 | py | Python | controllers/notes/NewNote.py | heminsatya/free_notes | 88272a34c48e60d1a82e28b0b2d56883fa724bb3 | [
"MIT"
] | null | null | null | controllers/notes/NewNote.py | heminsatya/free_notes | 88272a34c48e60d1a82e28b0b2d56883fa724bb3 | [
"MIT"
] | null | null | null | controllers/notes/NewNote.py | heminsatya/free_notes | 88272a34c48e60d1a82e28b0b2d56883fa724bb3 | [
"MIT"
] | null | null | null | # Dependencies
from aurora import Controller, View, Forms
from models import Users, Notes
from aurora.security import login_required, get_session
from flask import request
from datetime import datetime
# The controller class
class NewNote(Controller):
# POST Method
@login_required(app='users')
def post(self):
# The required models
user = Users().read(where={'username':get_session('user')}).first()
notes = Notes()
# Form data
data = request.form
form = Forms(data)
# Valid form data
if form.validate():
# Collect form inputs
title = data.get('title')
content = data.get('content')
# Required fields
if not title or not content:
return {
'error': '<i class="fas fa-exclamation-triangle mr-1"></i> Form data is invalid!',
}, 400
# Everything is fine
# Insert new note into the database
data = {
'user_id': user['id'],
'title': title,
'content': content,
# 'date': datetime.now().strftime("%m-%d-%Y")
}
notes.create(data=data)
# Return the result
return {
'success': '<i class="fas fa-check-circle mr-1"></i> The new note created successfully!',
}, 200
# Invalid form data
else:
# Return the result
return {
'error': '<i class="fas fa-exclamation-triangle mr-1"></i> Form data is invalid!',
}, 400
# GET Method
@login_required(app='users')
def get(self):
# The required models
user = Users().read(where={'username':get_session('user')}).first()
notes = Notes().read(where={'user_id':user['id']}, order_by={'id':'DESC'}).all()
form = Forms()
return View('create', user=user, form=form)
| 29.264706 | 105 | 0.529648 |
847463f3dfa7a0310a42f9aa0f1b7e033b2cbd6b | 3,387 | bzl | Python | project/workspace_defs.bzl | googleinterns/dagger-query | 3c5fa44e1cb66f2369929e64920fc765af59fb11 | [
"Apache-2.0"
] | 4 | 2020-07-22T13:03:39.000Z | 2021-09-14T11:38:41.000Z | project/workspace_defs.bzl | googleinterns/dagger-query | 3c5fa44e1cb66f2369929e64920fc765af59fb11 | [
"Apache-2.0"
] | 17 | 2020-07-30T13:15:38.000Z | 2020-09-25T12:45:30.000Z | project/workspace_defs.bzl | googleinterns/dagger-query | 3c5fa44e1cb66f2369929e64920fc765af59fb11 | [
"Apache-2.0"
] | 1 | 2020-07-14T10:00:32.000Z | 2020-07-14T10:00:32.000Z | # Copyright (C) 2018 The Google Bazel Common Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A WORKSPACE macro for Google open-source libraries to use"""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:java.bzl", "java_import_external")
_MAVEN_MIRRORS = [
"https://bazel-mirror.storage.googleapis.com/repo1.maven.org/maven2/",
"https://repo1.maven.org/maven2/",
]
def _maven_import(artifact, sha256, licenses, **kwargs):
parts = artifact.split(":")
group_id = parts[0]
artifact_id = parts[1]
version = parts[2]
name = ("%s_%s" % (group_id, artifact_id)).replace(".", "_").replace("-", "_")
url_suffix = "{0}/{1}/{2}/{1}-{2}.jar".format(group_id.replace(".", "/"), artifact_id, version)
java_import_external(
name = name,
jar_urls = [base + url_suffix for base in _MAVEN_MIRRORS],
jar_sha256 = sha256,
licenses = licenses,
rule_load = """load("@rules_java//java:defs.bzl", "java_import")""",
tags = ["maven_coordinates=" + artifact],
**kwargs
)
def google_common_workspace_rules():
"""Defines WORKSPACE rules for Google open-source libraries.
Call this once at the top of your WORKSPACE file to load all of the repositories. Note that you
should not refer to these repositories directly and instead prefer to use the targets defined in
//third_party.
"""
# Import AutoService
_maven_import(
artifact = "com.google.auto:auto-common:0.10",
licenses = ["notice"],
sha256 = "b876b5fddaceeba7d359667f6c4fb8c6f8658da1ab902ffb79ec9a415deede5f",
)
_maven_import(
artifact = "com.google.auto.service:auto-service:1.0-rc4",
licenses = ["notice"],
sha256 = "e422d49c312fd2031222e7306e8108c1b4118eb9c049f1b51eca280bed87e924",
)
# Import Protocol Buffers
_maven_import(
artifact = "com.google.protobuf:protobuf-java:3.12.0",
licenses = ["notice"],
sha256 = "a98ed5a0272cdda6bde98fe15e794e3d99ec04554dba0ba8e0a49ff5cecc5e9e",
)
for protobuf_repo in ("com_google_protobuf", "com_google_protobuf_java"):
http_archive(
name = protobuf_repo,
sha256 = "c5dc4cacbb303d5d0aa20c5cbb5cb88ef82ac61641c951cdf6b8e054184c5e22",
strip_prefix = "protobuf-3.12.4",
urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.12.4.zip"],
)
# Guava
_maven_import(
artifact = "com.google.guava:guava:27.1-jre",
licenses = ["notice"],
sha256 = "4a5aa70cc968a4d137e599ad37553e5cfeed2265e8c193476d7119036c536fe7",
)
# JUnit 4
_maven_import(
artifact = "junit:junit:4.13",
licenses = ["notice"],
sha256 = "4b8532f63bdc0e0661507f947eb324a954d1dbac631ad19c8aa9a00feed1d863",
)
| 35.28125 | 100 | 0.678477 |
b294393fd551ddde417f5681caa78fb5ec91cd71 | 312 | py | Python | snippets - machine learning sklearn/evaluating-classification-metrics.py | bjfisica/MachineLearning | 20349301ae7f82cd5048410b0cf1f7a5f7d7e5a2 | [
"MIT"
] | 52 | 2019-02-15T16:37:13.000Z | 2022-02-17T18:34:30.000Z | snippets - machine learning sklearn/evaluating-classification-metrics.py | RodeoBlues/Complete-Data-Science-Toolkits | c5e83889e24af825ec3baed6e8198debb135f1ff | [
"MIT"
] | null | null | null | snippets - machine learning sklearn/evaluating-classification-metrics.py | RodeoBlues/Complete-Data-Science-Toolkits | c5e83889e24af825ec3baed6e8198debb135f1ff | [
"MIT"
] | 22 | 2019-02-25T23:52:09.000Z | 2021-09-21T03:09:35.000Z | #accuracy score
from sklearn.metrics import accuracy_score
accuracy_score(y_test, y_pred)
#classification report
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
#confusion matrix
from sklearn.metrics import confusion_matrix
print(confusion_matrix(y_test, y_pred)) | 28.363636 | 49 | 0.858974 |
a105b88723330ddc75ba26c37c6a449d4c4a6599 | 358 | bzl | Python | bazel/data_view_cc.bzl | chokobole/data_view | db482b10dce44e4a775d45835ed689c77bc440ea | [
"BSD-3-Clause"
] | 2 | 2019-12-28T05:49:07.000Z | 2021-05-09T10:05:12.000Z | bazel/data_view_cc.bzl | chokobole/data_view | db482b10dce44e4a775d45835ed689c77bc440ea | [
"BSD-3-Clause"
] | null | null | null | bazel/data_view_cc.bzl | chokobole/data_view | db482b10dce44e4a775d45835ed689c77bc440ea | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2019 The DataView Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
def data_view_copts():
return select({
"@data_view//:windows": [
"/std:c++14",
],
"//conditions:default": [
"-std=c++14",
],
}) | 27.538462 | 72 | 0.561453 |
da04cab5bb973f2d8f5fe582c00bcb80b1d9be65 | 293 | py | Python | main.py | YunzheZJU/CardinalSpline | bb54251a8c4558baec0b467b36313247d463071f | [
"MIT"
] | null | null | null | main.py | YunzheZJU/CardinalSpline | bb54251a8c4558baec0b467b36313247d463071f | [
"MIT"
] | null | null | null | main.py | YunzheZJU/CardinalSpline | bb54251a8c4558baec0b467b36313247d463071f | [
"MIT"
] | 1 | 2021-02-21T00:46:05.000Z | 2021-02-21T00:46:05.000Z | # -*- coding: utf-8 -*-
from flask import Flask, request, session, g, redirect, url_for, abort, render_template
app = Flask(__name__)
@app.route('/', methods=['GET'])
def demo():
return render_template('Spline.html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8080)
| 22.538462 | 87 | 0.662116 |
a8672fff0ecdf0b91231b34ba6ebfcc3ec4448b1 | 6,761 | py | Python | graphzoo/manifolds/hyperboloid.py | oom-debugger/GraphZoo-1 | 7ef1184c0016090597e56b8706a87539a3f62fd6 | [
"MIT"
] | 2 | 2022-03-30T01:11:39.000Z | 2022-03-30T11:08:12.000Z | graphzoo/manifolds/hyperboloid.py | oom-debugger/GraphZoo-1 | 7ef1184c0016090597e56b8706a87539a3f62fd6 | [
"MIT"
] | null | null | null | graphzoo/manifolds/hyperboloid.py | oom-debugger/GraphZoo-1 | 7ef1184c0016090597e56b8706a87539a3f62fd6 | [
"MIT"
] | 2 | 2022-01-27T21:03:40.000Z | 2022-03-15T20:20:12.000Z | """Hyperboloid manifold"""
import torch
from graphzoo.manifolds.base import Manifold
from graphzoo.utils.math_utils import arcosh, cosh, sinh
from graphzoo.utils.train_utils import broadcast_shapes
from graphzoo.manifolds.poincare import PoincareBall
class Hyperboloid(Manifold):
"""
Hyperboloid manifold class
We use the following convention: -x0^2 + x1^2 + ... + xd^2 = -K
c = 1 / K is the hyperbolic curvature
"""
def __init__(self):
super(Hyperboloid, self).__init__()
self.name = 'Hyperboloid'
self.eps = {torch.float32: 1e-7, torch.float64: 1e-15}
self.min_norm = 1e-15
self.max_norm = 1e6
def minkowski_dot(self, x, y, keepdim=True):
res = torch.sum(x * y, dim=-1) - 2 * x[..., 0] * y[..., 0]
if keepdim:
res = res.view(res.shape + (1,))
return res
def minkowski_norm(self, u, keepdim=True):
dot = self.minkowski_dot(u, u, keepdim=keepdim)
return torch.sqrt(torch.clamp(dot, min=self.eps[u.dtype]))
def sqdist(self, x, y, c):
K = 1. / c
prod = self.minkowski_dot(x, y)
theta = torch.clamp(-prod / K, min=1.0 + self.eps[x.dtype])
sqdist = K * arcosh(theta) ** 2
# clamp distance to avoid nans in Fermi-Dirac decoder
return torch.clamp(sqdist, max=50.0)
def proj(self, x, c):
K = 1. / c
d = x.size(-1) - 1
y = x.narrow(-1, 1, d)
y_sqnorm = torch.norm(y, p=2, dim=1, keepdim=True) ** 2
mask = torch.ones_like(x)
mask[:, 0] = 0
vals = torch.zeros_like(x)
vals[:, 0:1] = torch.sqrt(torch.clamp(K + y_sqnorm, min=self.eps[x.dtype]))
return vals + mask * x
def proj_tan(self, u, x, c):
K = 1. / c
d = x.size(1) - 1
ux = torch.sum(x.narrow(-1, 1, d) * u.narrow(-1, 1, d), dim=1, keepdim=True)
mask = torch.ones_like(u)
mask[:, 0] = 0
vals = torch.zeros_like(u)
vals[:, 0:1] = ux / torch.clamp(x[:, 0:1], min=self.eps[x.dtype])
return vals + mask * u
def proj_tan0(self, u, c):
narrowed = u.narrow(-1, 0, 1)
vals = torch.zeros_like(u)
vals[:, 0:1] = narrowed
return u - vals
def expmap(self, u, x, c):
K = 1. / c
sqrtK = K ** 0.5
normu = self.minkowski_norm(u)
normu = torch.clamp(normu, max=self.max_norm)
theta = normu / sqrtK
theta = torch.clamp(theta, min=self.min_norm)
result = cosh(theta) * x + sinh(theta) * u / theta
return self.proj(result, c)
def logmap(self, x, y, c):
K = 1. / c
xy = torch.clamp(self.minkowski_dot(x, y) + K, max=-self.eps[x.dtype]) - K
u = y + xy * x * c
normu = self.minkowski_norm(u)
normu = torch.clamp(normu, min=self.min_norm)
dist = self.sqdist(x, y, c) ** 0.5
result = dist * u / normu
return self.proj_tan(result, x, c)
def expmap0(self, u, c):
K = 1. / c
sqrtK = K ** 0.5
d = u.size(-1) - 1
x = u.narrow(-1, 1, d).view(-1, d)
x_norm = torch.norm(x, p=2, dim=1, keepdim=True)
x_norm = torch.clamp(x_norm, min=self.min_norm)
theta = x_norm / sqrtK
res = torch.ones_like(u)
res[:, 0:1] = sqrtK * cosh(theta)
res[:, 1:] = sqrtK * sinh(theta) * x / x_norm
return self.proj(res, c)
def logmap0(self, x, c):
K = 1. / c
sqrtK = K ** 0.5
d = x.size(-1) - 1
y = x.narrow(-1, 1, d).view(-1, d)
y_norm = torch.norm(y, p=2, dim=1, keepdim=True)
y_norm = torch.clamp(y_norm, min=self.min_norm)
res = torch.zeros_like(x)
theta = torch.clamp(x[:, 0:1] / sqrtK, min=1.0 + self.eps[x.dtype])
res[:, 1:] = sqrtK * arcosh(theta) * y / y_norm
return res
def mobius_add(self, x, y, c):
u = self.logmap0(y, c)
v = self.ptransp0(x, u, c)
return self.expmap(v, x, c)
def mobius_matvec(self, m, x, c):
u = self.logmap0(x, c)
mu = u @ m.transpose(-1, -2)
return self.expmap0(mu, c)
def ptransp(self, x, y, u, c):
logxy = self.logmap(x, y, c)
logyx = self.logmap(y, x, c)
sqdist = torch.clamp(self.sqdist(x, y, c), min=self.min_norm)
alpha = self.minkowski_dot(logxy, u) / sqdist
res = u - alpha * (logxy + logyx)
return self.proj_tan(res, y, c)
def ptransp0(self, x, u, c):
K = 1. / c
sqrtK = K ** 0.5
x0 = x.narrow(-1, 0, 1)
d = x.size(-1) - 1
y = x.narrow(-1, 1, d)
y_norm = torch.clamp(torch.norm(y, p=2, dim=1, keepdim=True), min=self.min_norm)
y_normalized = y / y_norm
v = torch.ones_like(x)
v[:, 0:1] = - y_norm
v[:, 1:] = (sqrtK - x0) * y_normalized
alpha = torch.sum(y_normalized * u[:, 1:], dim=1, keepdim=True) / sqrtK
res = u - alpha * v
return self.proj_tan(res, x, c)
def to_poincare(self, x, c):
K = 1. / c
sqrtK = K ** 0.5
d = x.size(-1) - 1
return sqrtK * x.narrow(-1, 1, d) / (x[:, 0:1] + sqrtK)
def retr(self, x: torch.Tensor, u: torch.Tensor) -> torch.Tensor:
return x + u
def transp(self, x: torch.Tensor, y: torch.Tensor, v: torch.Tensor) -> torch.Tensor:
target_shape = broadcast_shapes(x.shape, y.shape, v.shape)
return v.expand(target_shape)
def concat(self, v, c):
"""
Note that the output dimension is (input_dim-1) * n + 1
"""
p = PoincareBall().from_hyperboloid(v, c)
p = PoincareBall().concat(p)
return Hyperboloid().from_poincare(p, c)
def from_poincare(self, x, c=1, ideal=False):
"""Convert from Poincare ball model to hyperboloid model.
Note: converting a point from poincare ball to hyperbolic is
reversible, i.e. p == to_poincare(from_poincare(p)).
Args:
x: torch.tensor of shape (..., dim)
ideal: boolean. Should be True if the input vectors are ideal points, False otherwise
Returns:
torch.tensor of shape (..., dim+1)
To do:
Add some capping to make things numerically stable. This is only needed in the case ideal == False
"""
if ideal:
t = torch.ones(x.shape[:-1], device=x.device).unsqueeze(-1)
return torch.cat((t, x), dim=-1)
else:
K = 1./ c
sqrtK = K ** 0.5
eucl_squared_norm = (x * x).sum(dim=-1, keepdim=True)
return sqrtK * torch.cat((K + eucl_squared_norm, 2 * sqrtK * x), dim=-1) / (K - eucl_squared_norm).clamp_min(self.min_norm)
| 35.584211 | 135 | 0.536903 |
25f96612a7c806c1083eef1b2122623c706f4abe | 419 | py | Python | ex7.py | cohadar/learn-python-the-hard-way | 10d88fe59a8abc5303661cfe91c6db9fa71bdd56 | [
"MIT"
] | null | null | null | ex7.py | cohadar/learn-python-the-hard-way | 10d88fe59a8abc5303661cfe91c6db9fa71bdd56 | [
"MIT"
] | null | null | null | ex7.py | cohadar/learn-python-the-hard-way | 10d88fe59a8abc5303661cfe91c6db9fa71bdd56 | [
"MIT"
] | null | null | null | print "Oh I disagree, I shall skip some"
print "But not all"
print "XY" * 10 # seems nice but might be used once in 5 years
end1 = "C"
end2 = "h"
end3 = "e"
end4 = "e"
end5 = "s"
end6 = "e"
end7 = "B"
end8 = "u"
end9 = "r"
endA = "g"
endB = "e"
endC = "r"
# comma at the end, seriously how many obscure syntax do we need
print end1 + end2 + end3 + end4 + end5 + end6,
print end7 + end8 + end9 + endA + endB + endC
| 17.458333 | 64 | 0.610979 |
001dbcb1c37db3dea65d883cc149943ca9543c10 | 657 | py | Python | tests/core/dataset/test_history.py | UCSFMemoryAndAging/macpie | 524ffe47c2270ef52299a893c6d28a9c21d96903 | [
"BSD-2-Clause"
] | null | null | null | tests/core/dataset/test_history.py | UCSFMemoryAndAging/macpie | 524ffe47c2270ef52299a893c6d28a9c21d96903 | [
"BSD-2-Clause"
] | null | null | null | tests/core/dataset/test_history.py | UCSFMemoryAndAging/macpie | 524ffe47c2270ef52299a893c6d28a9c21d96903 | [
"BSD-2-Clause"
] | null | null | null | from pathlib import Path
from macpie.core.dataset import LavaDataset
current_dir = Path(__file__).parent.absolute()
def test_history():
primary = LavaDataset.from_file(current_dir / "primary.xlsx")
secondary = LavaDataset.from_file(current_dir / "secondary.xlsx")
secondary.date_proximity(
primary,
get='closest',
when='earlier_or_later',
days=90
)
secondary.group_by_keep_one()
assert len(secondary.history) == 2
record1 = secondary.history[0]
assert record1['func_name'] == 'date_proximity'
record2 = secondary.history[1]
assert record2['func_name'] == 'group_by_keep_one'
| 22.655172 | 69 | 0.69102 |
ad6b0a1bf1d60bf263f1275c90f1260def6bac6d | 1,352 | py | Python | gunicorn_conf.py | albert-lii/myzone | 401acb84320b717e59860d95ad4aa4742b762158 | [
"MIT"
] | null | null | null | gunicorn_conf.py | albert-lii/myzone | 401acb84320b717e59860d95ad4aa4742b762158 | [
"MIT"
] | 1 | 2021-03-31T19:51:33.000Z | 2021-03-31T19:51:33.000Z | gunicorn_conf.py | albert-lii/myzone | 401acb84320b717e59860d95ad4aa4742b762158 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
"""
:author: Albert Li
:copyright: © 2019 Albert Li
:time: 2019/10/26 10:13
"""
import os
import multiprocessing
path_of_current_file = os.path.abspath(__file__) # 获取当前该配置文件的绝对路径
path_of_current_dir = os.path.split(path_of_current_file)[0]
chdir = path_of_current_dir # 项目的根目录,加载应用程序之前将 chdir 目录切换到指定的工作目录
bind = "127.0.0.1:5000" # 绑定ip和端口号
backlog = 512 # 最大挂起的客户端连接数(即等待服务的客户端数),超过此数量将导致客户端在尝试连接时出错,一般设置范围 64-2048
workers = multiprocessing.cpu_count() * 2 + 1 # worker 进程数,会自动分配到你机器上的多CPU,完成简单并行化
worker_class = "gevent" # worker进程的工作模式
worker_connections = 500 # 最大并发的客户端连接数,默认 1000
# threads = 2 # 指定每个进程开启的线程数,此设置仅影响 gthread 工作模式
timeout = 60 # 请求超时时间(秒),超过此时间后,worker 将被杀死,被重新创建一个 worker
spew = False # 打印服务器执行过的每一条语句,默认 False。此选择为原子性的,即要么全部打印,要么全部不打印
reload = True # 每当代码发生更改时,work 将会自动重启,适用于开发阶段,默认为 False
daemon = True # 是否以守护进程启动,默认 False
debug = True
pidfile = "%s/gunicorn.pid" % path_of_current_dir # 设置 pid 文件的文件名,如果不设置将不会创建pid文件
accesslog = (
"%s/logs/gunicorn_access.log" % path_of_current_dir
) # 访问日志的路径,注意首先需要存在logs文件夹,gunicorn 才可自动创建log文件,否则报错
accesslog_format = (
'%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"' # 访问日志的格式
)
loglevel = "info" # 错误日志输出等级,访问日志的输出等级无法设置
errorlog = "%s/logs/gunicorn_error.log" % path_of_current_dir # 错误日志的路径,可与访问日志相同
| 36.540541 | 83 | 0.724852 |
61a55261d7ae187a4926773ca0b7ffd5df1ba5cb | 4,115 | py | Python | arcesetc/tests/test_utils.py | bmorris3/arcesetc | 514504715ba7a1dad59cc54cf89f649d8ff10718 | [
"MIT",
"BSD-3-Clause"
] | 3 | 2019-03-27T20:00:42.000Z | 2021-12-17T20:59:33.000Z | arcesetc/tests/test_utils.py | bmorris3/arcesetc | 514504715ba7a1dad59cc54cf89f649d8ff10718 | [
"MIT",
"BSD-3-Clause"
] | 10 | 2018-12-21T19:18:40.000Z | 2020-10-29T19:52:08.000Z | arcesetc/tests/test_utils.py | bmorris3/arcesetc | 514504715ba7a1dad59cc54cf89f649d8ff10718 | [
"MIT",
"BSD-3-Clause"
] | 2 | 2019-01-31T20:08:14.000Z | 2019-01-31T20:22:09.000Z | import os
import astropy.units as u
import numpy as np
import pytest
from astropy.io import fits
from .legacy_specutils import readspec
from ..util import (reconstruct_order, closest_sptype, archive, scale_flux,
signal_to_noise_to_exp_time)
path = os.path.dirname(__file__)
@pytest.mark.parametrize("order,", [30, 35, 41, 60, 65, 70, 75, 80, 90])
def test_reconstruct_order_B3V(order):
"""
End-to-end functional test on several well-behaved orders of an early-type
star.
"""
fits_path = os.path.join(path, os.pardir, 'data',
'HR5191.0002.wfrmcpc.fits')
b3v = readspec.read_fits_spectrum1d(fits_path)
header = fits.getheader(fits_path)
# Reconstruct the order for a star with the same V mag as the template
wave, flux, sp_type, exp_time = reconstruct_order('B3V',
b3v[order].wavelength.mean(),
1.86,
exp_time=header['EXPTIME']*u.s)
interp_flux = np.interp(b3v[order].wavelength, wave, flux)
np.testing.assert_allclose(b3v[order].flux.value, interp_flux, atol=500, rtol=1e-1)
assert sp_type == 'B3V'
@pytest.mark.parametrize("order", [30, 35, 41, 60, 65, 70, 75, 80, 90])
def test_reconstruct_order_white_dwarf(order):
"""
End-to-end functional test on several well-behaved orders of a white dwarf
"""
fits_path = os.path.join(path, os.pardir, 'data',
'BD28_4211.0026.wfrmcpc.fits')
wd = readspec.read_fits_spectrum1d(fits_path)
header = fits.getheader(fits_path)
# Reconstruct the order for a star with the same V mag as the template
wave, flux, sp_type, exp_time = reconstruct_order('sdO2VIIIHe5',
wd[order].wavelength.mean(),
10.58,
exp_time=header['EXPTIME']*u.s)
interp_flux = np.interp(wd[order].wavelength, wave, flux)
np.testing.assert_allclose(wd[order].flux.value, interp_flux, atol=500, rtol=0.05)
assert sp_type == 'sdO2VIIIHe5'
@pytest.mark.parametrize("order", [30, 35, 41, 60, 65, 75, 80])
def test_reconstruct_order_white_dwarf_2(order):
"""
End-to-end functional test on several well-behaved orders of a white dwarf
"""
fits_path = os.path.join(path, os.pardir, 'data',
'HIP107864.0003.wfrmcpc.fits')
wd = readspec.read_fits_spectrum1d(fits_path)
header = fits.getheader(fits_path)
# Reconstruct the order for a star with the same V mag as the template
wave, flux, sp_type, exp_time = reconstruct_order('sdO2VIIIHe5',
wd[order].wavelength.mean(),
10.58,
exp_time=header['EXPTIME']*u.s)
interp_flux = np.interp(wd[order].wavelength, wave, flux)
np.testing.assert_allclose(wd[order].flux.value, interp_flux, atol=500, rtol=0.2)
assert sp_type == 'sdO2VIIIHe5'
def test_closest_sptype():
"""Test the function that finds closest available sptype"""
assert closest_sptype('G4V') == 'G5V'
assert closest_sptype('B4V') == 'B3V'
def test_scale_flux():
"""
Check that the flux scaling is working appropriately by inputting the actual
magnitude of a particular star, show that it returns flux scaling == 1
"""
assert np.abs(scale_flux(archive['HR 3454'], V=4.3) - 1) < 1e-6
assert np.abs(scale_flux(archive['HR5191'], V=1.86) - 1) < 1e-6
def test_sn_to_exptime():
"""
Check that the plot-less function works appropriately.
"""
sptype = 'M0V'
wavelength = 6562 * u.Angstrom
signal_to_noise = 30
V = 12
exp_time = signal_to_noise_to_exp_time(sptype, wavelength, V,
signal_to_noise)
assert np.abs(exp_time.to(u.s).value - 642.11444) < 1e-2
| 36.415929 | 87 | 0.595383 |
995bab68ecd55cef5b28aa2efd3fc2c8737ec48d | 1,014 | py | Python | openwater/tests/test_split.py | flowmatters/openwater | 8c48fc1694f54c2735a7ac451fcce56df498e520 | [
"MIT"
] | 1 | 2020-02-12T11:17:02.000Z | 2020-02-12T11:17:02.000Z | openwater/tests/test_split.py | flowmatters/openwater | 8c48fc1694f54c2735a7ac451fcce56df498e520 | [
"MIT"
] | null | null | null | openwater/tests/test_split.py | flowmatters/openwater | 8c48fc1694f54c2735a7ac451fcce56df498e520 | [
"MIT"
] | 1 | 2020-02-27T13:58:14.000Z | 2020-02-27T13:58:14.000Z |
from openwater.split import split_time_series
import numpy as np
def test_create_split_windows():
grp = {
'DummyModel':{
'inputs':np.zeros((1,1,10000))
}
}
BREAKS = [
[100,1000,5000],
[0,100,1000,5000],
[100,1000,5000,10000],
[0,100,1000,5000,10000]
]
for breaks in BREAKS:
windows = split_time_series(grp,10,breaks)
assert len(windows)==4
assert windows[0] == (0,100)
assert windows[1] == (100,1000)
assert windows[2] == (1000,5000)
assert windows[3] == (5000,10000)
BREAKS = [
[],
[0],
[10000],
[0,10000]
]
for breaks in BREAKS:
windows = split_time_series(grp,11,breaks)
assert len(windows)==1
assert windows[0] == (0,10000)
windows = split_time_series(grp,11,None)
assert len(windows)==11
assert windows[0] == (0,909)
assert windows[1] == (909,1818)
assert windows[10] == (9090,10000)
| 22.043478 | 50 | 0.545365 |
bd59c0e033868b8a13a407ea787bbb9a65ce1431 | 15,570 | py | Python | oscar_ecomenv/Scripts/pildriver.py | PamilerinId/Ecommerce-Boiler | 1d706f88c8c828e86309793cb33ea102f385bf2f | [
"Apache-2.0"
] | null | null | null | oscar_ecomenv/Scripts/pildriver.py | PamilerinId/Ecommerce-Boiler | 1d706f88c8c828e86309793cb33ea102f385bf2f | [
"Apache-2.0"
] | null | null | null | oscar_ecomenv/Scripts/pildriver.py | PamilerinId/Ecommerce-Boiler | 1d706f88c8c828e86309793cb33ea102f385bf2f | [
"Apache-2.0"
] | null | null | null | #!c:\users\pi\documents\batcave\web\ecommerce\oscar_ecomenv\scripts\python.exe
"""PILdriver, an image-processing calculator using PIL.
An instance of class PILDriver is essentially a software stack machine
(Polish-notation interpreter) for sequencing PIL image
transformations. The state of the instance is the interpreter stack.
The only method one will normally invoke after initialization is the
`execute' method. This takes an argument list of tokens, pushes them
onto the instance's stack, and then tries to clear the stack by
successive evaluation of PILdriver operators. Any part of the stack
not cleaned off persists and is part of the evaluation context for
the next call of the execute method.
PILDriver doesn't catch any exceptions, on the theory that these
are actually diagnostic information that should be interpreted by
the calling code.
When called as a script, the command-line arguments are passed to
a PILDriver instance. If there are no command-line arguments, the
module runs an interactive interpreter, each line of which is split into
space-separated tokens and passed to the execute method.
In the method descriptions below, a first line beginning with the string
`usage:' means this method can be invoked with the token that follows
it. Following <>-enclosed arguments describe how the method interprets
the entries on the stack. Each argument specification begins with a
type specification: either `int', `float', `string', or `image'.
All operations consume their arguments off the stack (use `dup' to
keep copies around). Use `verbose 1' to see the stack state displayed
before each operation.
Usage examples:
`show crop 0 0 200 300 open test.png' loads test.png, crops out a portion
of its upper-left-hand corner and displays the cropped portion.
`save rotated.png rotate 30 open test.tiff' loads test.tiff, rotates it
30 degrees, and saves the result as rotated.png (in PNG format).
"""
# by Eric S. Raymond <esr@thyrsus.com>
# $Id$
# TO DO:
# 1. Add PILFont capabilities, once that's documented.
# 2. Add PILDraw operations.
# 3. Add support for composing and decomposing multiple-image files.
#
from __future__ import print_function
from PIL import Image
class PILDriver(object):
verbose = 0
def do_verbose(self):
"""usage: verbose <int:num>
Set verbosity flag from top of stack.
"""
self.verbose = int(self.do_pop())
# The evaluation stack (internal only)
stack = [] # Stack of pending operations
def push(self, item):
"Push an argument onto the evaluation stack."
self.stack.insert(0, item)
def top(self):
"Return the top-of-stack element."
return self.stack[0]
# Stack manipulation (callable)
def do_clear(self):
"""usage: clear
Clear the stack.
"""
self.stack = []
def do_pop(self):
"""usage: pop
Discard the top element on the stack.
"""
return self.stack.pop(0)
def do_dup(self):
"""usage: dup
Duplicate the top-of-stack item.
"""
if hasattr(self, 'format'): # If it's an image, do a real copy
dup = self.stack[0].copy()
else:
dup = self.stack[0]
self.push(dup)
def do_swap(self):
"""usage: swap
Swap the top-of-stack item with the next one down.
"""
self.stack = [self.stack[1], self.stack[0]] + self.stack[2:]
# Image module functions (callable)
def do_new(self):
"""usage: new <int:xsize> <int:ysize> <int:color>:
Create and push a greyscale image of given size and color.
"""
xsize = int(self.do_pop())
ysize = int(self.do_pop())
color = int(self.do_pop())
self.push(Image.new("L", (xsize, ysize), color))
def do_open(self):
"""usage: open <string:filename>
Open the indicated image, read it, push the image on the stack.
"""
self.push(Image.open(self.do_pop()))
def do_blend(self):
"""usage: blend <image:pic1> <image:pic2> <float:alpha>
Replace two images and an alpha with the blended image.
"""
image1 = self.do_pop()
image2 = self.do_pop()
alpha = float(self.do_pop())
self.push(Image.blend(image1, image2, alpha))
def do_composite(self):
"""usage: composite <image:pic1> <image:pic2> <image:mask>
Replace two images and a mask with their composite.
"""
image1 = self.do_pop()
image2 = self.do_pop()
mask = self.do_pop()
self.push(Image.composite(image1, image2, mask))
def do_merge(self):
"""usage: merge <string:mode> <image:pic1>
[<image:pic2> [<image:pic3> [<image:pic4>]]]
Merge top-of stack images in a way described by the mode.
"""
mode = self.do_pop()
bandlist = []
for band in mode:
bandlist.append(self.do_pop())
self.push(Image.merge(mode, bandlist))
# Image class methods
def do_convert(self):
"""usage: convert <string:mode> <image:pic1>
Convert the top image to the given mode.
"""
mode = self.do_pop()
image = self.do_pop()
self.push(image.convert(mode))
def do_copy(self):
"""usage: copy <image:pic1>
Make and push a true copy of the top image.
"""
self.dup()
def do_crop(self):
"""usage: crop <int:left> <int:upper> <int:right> <int:lower>
<image:pic1>
Crop and push a rectangular region from the current image.
"""
left = int(self.do_pop())
upper = int(self.do_pop())
right = int(self.do_pop())
lower = int(self.do_pop())
image = self.do_pop()
self.push(image.crop((left, upper, right, lower)))
def do_draft(self):
"""usage: draft <string:mode> <int:xsize> <int:ysize>
Configure the loader for a given mode and size.
"""
mode = self.do_pop()
xsize = int(self.do_pop())
ysize = int(self.do_pop())
self.push(self.draft(mode, (xsize, ysize)))
def do_filter(self):
"""usage: filter <string:filtername> <image:pic1>
Process the top image with the given filter.
"""
from PIL import ImageFilter
imageFilter = getattr(ImageFilter, self.do_pop().upper())
image = self.do_pop()
self.push(image.filter(imageFilter))
def do_getbbox(self):
"""usage: getbbox
Push left, upper, right, and lower pixel coordinates of the top image.
"""
bounding_box = self.do_pop().getbbox()
self.push(bounding_box[3])
self.push(bounding_box[2])
self.push(bounding_box[1])
self.push(bounding_box[0])
def do_getextrema(self):
"""usage: extrema
Push minimum and maximum pixel values of the top image.
"""
extrema = self.do_pop().extrema()
self.push(extrema[1])
self.push(extrema[0])
def do_offset(self):
"""usage: offset <int:xoffset> <int:yoffset> <image:pic1>
Offset the pixels in the top image.
"""
xoff = int(self.do_pop())
yoff = int(self.do_pop())
image = self.do_pop()
self.push(image.offset(xoff, yoff))
def do_paste(self):
"""usage: paste <image:figure> <int:xoffset> <int:yoffset>
<image:ground>
Paste figure image into ground with upper left at given offsets.
"""
figure = self.do_pop()
xoff = int(self.do_pop())
yoff = int(self.do_pop())
ground = self.do_pop()
if figure.mode == "RGBA":
ground.paste(figure, (xoff, yoff), figure)
else:
ground.paste(figure, (xoff, yoff))
self.push(ground)
def do_resize(self):
"""usage: resize <int:xsize> <int:ysize> <image:pic1>
Resize the top image.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
image = self.do_pop()
self.push(image.resize((xsize, ysize)))
def do_rotate(self):
"""usage: rotate <int:angle> <image:pic1>
Rotate image through a given angle
"""
angle = int(self.do_pop())
image = self.do_pop()
self.push(image.rotate(angle))
def do_save(self):
"""usage: save <string:filename> <image:pic1>
Save image with default options.
"""
filename = self.do_pop()
image = self.do_pop()
image.save(filename)
def do_save2(self):
"""usage: save2 <string:filename> <string:options> <image:pic1>
Save image with specified options.
"""
filename = self.do_pop()
options = self.do_pop()
image = self.do_pop()
image.save(filename, None, options)
def do_show(self):
"""usage: show <image:pic1>
Display and pop the top image.
"""
self.do_pop().show()
def do_thumbnail(self):
"""usage: thumbnail <int:xsize> <int:ysize> <image:pic1>
Modify the top image in the stack to contain a thumbnail of itself.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
self.top().thumbnail((xsize, ysize))
def do_transpose(self):
"""usage: transpose <string:operator> <image:pic1>
Transpose the top image.
"""
transpose = self.do_pop().upper()
image = self.do_pop()
self.push(image.transpose(transpose))
# Image attributes
def do_format(self):
"""usage: format <image:pic1>
Push the format of the top image onto the stack.
"""
self.push(self.do_pop().format)
def do_mode(self):
"""usage: mode <image:pic1>
Push the mode of the top image onto the stack.
"""
self.push(self.do_pop().mode)
def do_size(self):
"""usage: size <image:pic1>
Push the image size on the stack as (y, x).
"""
size = self.do_pop().size
self.push(size[0])
self.push(size[1])
# ImageChops operations
def do_invert(self):
"""usage: invert <image:pic1>
Invert the top image.
"""
from PIL import ImageChops
self.push(ImageChops.invert(self.do_pop()))
def do_lighter(self):
"""usage: lighter <image:pic1> <image:pic2>
Pop the two top images, push an image of the lighter pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.lighter(image1, image2))
def do_darker(self):
"""usage: darker <image:pic1> <image:pic2>
Pop the two top images, push an image of the darker pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.darker(image1, image2))
def do_difference(self):
"""usage: difference <image:pic1> <image:pic2>
Pop the two top images, push the difference image
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.difference(image1, image2))
def do_multiply(self):
"""usage: multiply <image:pic1> <image:pic2>
Pop the two top images, push the multiplication image.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.multiply(image1, image2))
def do_screen(self):
"""usage: screen <image:pic1> <image:pic2>
Pop the two top images, superimpose their inverted versions.
"""
from PIL import ImageChops
image2 = self.do_pop()
image1 = self.do_pop()
self.push(ImageChops.screen(image1, image2))
def do_add(self):
"""usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled sum with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.add(image1, image2, scale, offset))
def do_subtract(self):
"""usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled difference with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.subtract(image1, image2, scale, offset))
# ImageEnhance classes
def do_color(self):
"""usage: color <image:pic1>
Enhance color in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
def do_contrast(self):
"""usage: contrast <image:pic1>
Enhance contrast in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Contrast(image)
self.push(enhancer.enhance(factor))
def do_brightness(self):
"""usage: brightness <image:pic1>
Enhance brightness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Brightness(image)
self.push(enhancer.enhance(factor))
def do_sharpness(self):
"""usage: sharpness <image:pic1>
Enhance sharpness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Sharpness(image)
self.push(enhancer.enhance(factor))
# The interpreter loop
def execute(self, list):
"Interpret a list of PILDriver commands."
list.reverse()
while len(list) > 0:
self.push(list[0])
list = list[1:]
if self.verbose:
print("Stack: " + repr(self.stack))
top = self.top()
if not isinstance(top, str):
continue
funcname = "do_" + top
if not hasattr(self, funcname):
continue
else:
self.do_pop()
func = getattr(self, funcname)
func()
if __name__ == '__main__':
import sys
# If we see command-line arguments, interpret them as a stack state
# and execute. Otherwise go interactive.
driver = PILDriver()
if len(sys.argv[1:]) > 0:
driver.execute(sys.argv[1:])
else:
print("PILDriver says hello.")
while True:
try:
if sys.version_info[0] >= 3:
line = input('pildriver> ')
else:
line = raw_input('pildriver> ')
except EOFError:
print("\nPILDriver says goodbye.")
break
driver.execute(line.split())
print(driver.stack)
# The following sets edit modes for GNU EMACS
# Local Variables:
# mode:python
# End:
| 29.544592 | 79 | 0.5921 |
f271ecd28a210a5c9907ba23fd82a1d3179c1279 | 9,852 | py | Python | farm/modeling/tokenization.py | Yakonick/KONVENS2019_and_LREC2020 | c0c5afa00d37a6e90bc5674378b908afd6aa286d | [
"Apache-2.0"
] | 4 | 2020-07-22T02:22:52.000Z | 2021-12-27T22:26:37.000Z | farm/modeling/tokenization.py | cgsee1/FARM | afdb91a4a182faa110e7bb82fd93745ea0621d10 | [
"Apache-2.0"
] | null | null | null | farm/modeling/tokenization.py | cgsee1/FARM | afdb91a4a182faa110e7bb82fd93745ea0621d10 | [
"Apache-2.0"
] | 1 | 2021-04-07T19:30:02.000Z | 2021-04-07T19:30:02.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
from io import open
import os
import unicodedata
from pytorch_transformers.tokenization_bert import BertTokenizer, WordpieceTokenizer, BasicTokenizer, load_vocab
logger = logging.getLogger(__name__)
class BasicTokenizer(BasicTokenizer):
def __init__(self, do_lower_case=True, never_split=None, never_split_chars=None, tokenize_chinese_chars=True):
""" Constructs a BasicTokenizer.
Args:
**do_lower_case**: Whether to lower case the input.
**never_split**: (`optional`) list of str
Kept for backward compatibility purposes.
Now implemented directly at the base class level (see :func:`PreTrainedTokenizer.tokenize`)
List of token not to split.
**tokenize_chinese_chars**: (`optional`) boolean (default True)
Whether to tokenize Chinese characters.
This should likely be desactivated for Japanese:
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328
"""
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = never_split
self.tokenize_chinese_chars = tokenize_chinese_chars
self.never_split_chars = never_split_chars
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
if text in self.never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char, excluded_chars=self.never_split_chars):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
class BertTokenizer(BertTokenizer):
def __init__(self, vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, never_split_chars=None,
unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]",
mask_token="[MASK]", tokenize_chinese_chars=True, **kwargs):
"""Constructs a BertTokenizer.
Args:
**vocab_file**: Path to a one-wordpiece-per-line vocabulary file
**do_lower_case**: (`optional`) boolean (default True)
Whether to lower case the input
Only has an effect when do_basic_tokenize=True
**do_basic_tokenize**: (`optional`) boolean (default True)
Whether to do basic tokenization before wordpiece.
**never_split**: (`optional`) list of string
List of tokens which will never be split during tokenization.
Only has an effect when do_basic_tokenize=True
**tokenize_chinese_chars**: (`optional`) boolean (default True)
Whether to tokenize Chinese characters.
This should likely be desactivated for Japanese:
see: https://github.com/huggingface/pytorch-pretrained-BERT/issues/328
"""
super(BertTokenizer, self).__init__(vocab_file, do_lower_case=True, do_basic_tokenize=True, never_split=None, never_split_chars=None,
unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]",
mask_token="[MASK]", tokenize_chinese_chars=True, **kwargs)
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split,
never_split_chars=never_split_chars,
tokenize_chinese_chars=tokenize_chinese_chars)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
assert len(self.vocab) > 0
assert self.wordpiece_tokenizer is not None
def add_custom_vocab(self, custom_vocab_file):
self.vocab = self._load_custom_vocab(custom_vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)
def _load_custom_vocab(self, custom_vocab_file):
custom_vocab = {}
unique_custom_tokens = set()
idx = 0
with open(custom_vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline().strip()
if not token:
break
if token not in unique_custom_tokens:
if token not in self.vocab.keys():
key = "[unused{}]".format(idx)
custom_vocab[key] = token
idx += 1
unique_custom_tokens.add(token)
else:
logger.info("Dropped custom token (already in original vocab): {}".format(token))
else:
logger.info("Dropped custom token (duplicate): {}".format(token))
# merge vocabs
update_count = 0
updated_vocab = []
for k,v in self.vocab.items():
if k in custom_vocab.keys():
updated_vocab.append((custom_vocab[k], v))
update_count += 1
else:
updated_vocab.append((k, v))
self.vocab = collections.OrderedDict(updated_vocab)
if update_count < len(custom_vocab):
logger.warning("Updated vocabulary only with {} out of {} tokens from supplied custom vocabulary. The original vocab might not have enough unused tokens.".format(update_count, len(custom_vocab)))
else:
logger.info("Updated vocabulary with {} out of {} tokens from custom vocabulary.".format(update_count, len(custom_vocab)))
return self.vocab
def tokenize_with_metadata(text, tokenizer, max_seq_len):
# split text into "words" (here: simple whitespace tokenizer)
words = text.split(" ")
word_offsets = []
cumulated = 0
for idx, word in enumerate(words):
word_offsets.append(cumulated)
cumulated += len(word) + 1 # 1 because we so far have whitespace tokenizer
# split "words"into "subword tokens"
tokens, offsets, start_of_word = _words_to_tokens(
words, word_offsets, tokenizer, max_seq_len
)
tokenized = {"tokens": tokens, "offsets": offsets, "start_of_word": start_of_word}
return tokenized
def _words_to_tokens(words, word_offsets, tokenizer, max_seq_len):
tokens = []
token_offsets = []
start_of_word = []
for w, w_off in zip(words, word_offsets):
# Get tokens of single word
tokens_word = tokenizer.tokenize(w)
# Sometimes the tokenizer returns no tokens
if len(tokens_word) == 0:
continue
tokens += tokens_word
# get gloabl offset for each token in word + save marker for first tokens of a word
first_tok = True
for tok in tokens_word:
token_offsets.append(w_off)
w_off += len(tok.replace("##", ""))
if first_tok:
start_of_word.append(True)
first_tok = False
else:
start_of_word.append(False)
# Clip at max_seq_length. The "-2" is for CLS and SEP token
tokens = tokens[: max_seq_len - 2]
token_offsets = token_offsets[: max_seq_len - 2]
start_of_word = start_of_word[: max_seq_len - 2]
assert len(tokens) == len(token_offsets) == len(start_of_word)
return tokens, token_offsets, start_of_word
def _is_punctuation(char, excluded_chars=None):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if excluded_chars:
if char in excluded_chars:
return False
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False | 42.649351 | 207 | 0.624137 |
f6f852adfcdcbd6b2ff30fffcd2b981e549c1f5b | 7,976 | py | Python | jishaku/features/root_command.py | Dorukyum/jishaku | 8d62bfe6bbd8945ce96cb80613c979413e7c9ad7 | [
"MIT"
] | null | null | null | jishaku/features/root_command.py | Dorukyum/jishaku | 8d62bfe6bbd8945ce96cb80613c979413e7c9ad7 | [
"MIT"
] | null | null | null | jishaku/features/root_command.py | Dorukyum/jishaku | 8d62bfe6bbd8945ce96cb80613c979413e7c9ad7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
jishaku.features.root_command
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The jishaku root command.
:copyright: (c) 2021 Devon (Gorialis) R
:license: MIT, see LICENSE for more details.
"""
import math
import sys
import typing
import discord
from discord.ext import commands
from jishaku.features.baseclass import Feature
from jishaku.flags import Flags
from jishaku.modules import package_version
from jishaku.paginators import PaginatorInterface
try:
import psutil
except ImportError:
psutil = None
def natural_size(size_in_bytes: int):
"""
Converts a number of bytes to an appropriately-scaled unit
E.g.:
1024 -> 1.00 KiB
12345678 -> 11.77 MiB
"""
units = ('B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB')
power = int(math.log(size_in_bytes, 1024))
return f"{size_in_bytes / (1024 ** power):.2f} {units[power]}"
class RootCommand(Feature):
"""
Feature containing the root jsk command
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.jsk.hidden = Flags.HIDE
@Feature.Command(name="jishaku", aliases=["jsk"],
invoke_without_command=True, ignore_extra=False)
async def jsk(self, ctx: commands.Context): # pylint: disable=too-many-branches
"""
The Jishaku debug and diagnostic commands.
This command on its own gives a status brief.
All other functionality is within its subcommands.
"""
summary = [
f"Jishaku v{package_version('jishaku')}, Pycord v`{package_version('py-cord')}`, "
f"`Python {sys.version}` on `{sys.platform}`".replace("\n", ""),
f"Module was loaded <t:{self.load_time.timestamp():.0f}:R>, "
f"cog was loaded <t:{self.start_time.timestamp():.0f}:R>.",
""
]
# detect if [procinfo] feature is installed
if psutil:
try:
proc = psutil.Process()
with proc.oneshot():
try:
mem = proc.memory_full_info()
summary.append(f"Using {natural_size(mem.rss)} physical memory and "
f"{natural_size(mem.vms)} virtual memory, "
f"{natural_size(mem.uss)} of which unique to this process.")
except psutil.AccessDenied:
pass
try:
name = proc.name()
pid = proc.pid
thread_count = proc.num_threads()
summary.append(f"Running on PID {pid} (`{name}`) with {thread_count} thread(s).")
except psutil.AccessDenied:
pass
summary.append("") # blank line
except psutil.AccessDenied:
summary.append(
"psutil is installed, but this process does not have high enough access rights "
"to query process information."
)
summary.append("") # blank line
cache_summary = f"{len(self.bot.guilds)} guild(s) and {len(self.bot.users)} user(s)"
# Show shard settings to summary
if isinstance(self.bot, discord.AutoShardedClient):
if len(self.bot.shards) > 20:
summary.append(
f"This bot is automatically sharded ({len(self.bot.shards)} shards of {self.bot.shard_count})"
f" and can see {cache_summary}."
)
else:
shard_ids = ', '.join(str(i) for i in self.bot.shards.keys())
summary.append(
f"This bot is automatically sharded (Shards {shard_ids} of {self.bot.shard_count})"
f" and can see {cache_summary}."
)
elif self.bot.shard_count:
summary.append(
f"This bot is manually sharded (Shard {self.bot.shard_id} of {self.bot.shard_count})"
f" and can see {cache_summary}."
)
else:
summary.append(f"This bot is not sharded and can see {cache_summary}.")
# pylint: disable=protected-access
if self.bot._connection.max_messages:
message_cache = f"Message cache capped at {self.bot._connection.max_messages}"
else:
message_cache = "Message cache is disabled"
if discord.version_info >= (1, 5, 0):
presence_intent = f"presence intent is {'enabled' if self.bot.intents.presences else 'disabled'}"
members_intent = f"members intent is {'enabled' if self.bot.intents.members else 'disabled'}"
summary.append(f"{message_cache}, {presence_intent} and {members_intent}.")
else:
guild_subscriptions = f"guild subscriptions are {'enabled' if self.bot._connection.guild_subscriptions else 'disabled'}"
summary.append(f"{message_cache} and {guild_subscriptions}.")
# pylint: enable=protected-access
# Show websocket latency in milliseconds
summary.append(f"Average websocket latency: {round(self.bot.latency * 1000, 2)}ms")
await ctx.send("\n".join(summary))
# pylint: disable=no-member
@Feature.Command(parent="jsk", name="hide")
async def jsk_hide(self, ctx: commands.Context):
"""
Hides Jishaku from the help command.
"""
if self.jsk.hidden:
return await ctx.send("Jishaku is already hidden.")
self.jsk.hidden = True
await ctx.send("Jishaku is now hidden.")
@Feature.Command(parent="jsk", name="show")
async def jsk_show(self, ctx: commands.Context):
"""
Shows Jishaku in the help command.
"""
if not self.jsk.hidden:
return await ctx.send("Jishaku is already visible.")
self.jsk.hidden = False
await ctx.send("Jishaku is now visible.")
# pylint: enable=no-member
@Feature.Command(parent="jsk", name="tasks")
async def jsk_tasks(self, ctx: commands.Context):
"""
Shows the currently running jishaku tasks.
"""
if not self.tasks:
return await ctx.send("No currently running tasks.")
paginator = commands.Paginator(max_size=1985)
for task in self.tasks:
paginator.add_line(f"{task.index}: `{task.ctx.command.qualified_name}`, invoked at "
f"{task.ctx.message.created_at.strftime('%Y-%m-%d %H:%M:%S')} UTC")
interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author)
return await interface.send_to(ctx)
@Feature.Command(parent="jsk", name="cancel")
async def jsk_cancel(self, ctx: commands.Context, *, index: typing.Union[int, str]):
"""
Cancels a task with the given index.
If the index passed is -1, will cancel the last task instead.
"""
if not self.tasks:
return await ctx.send("No tasks to cancel.")
if index == "~":
task_count = len(self.tasks)
for task in self.tasks:
task.task.cancel()
self.tasks.clear()
return await ctx.send(f"Cancelled {task_count} tasks.")
if isinstance(index, str):
raise commands.BadArgument('Literal for "index" not recognized.')
if index == -1:
task = self.tasks.pop()
else:
task = discord.utils.get(self.tasks, index=index)
if task:
self.tasks.remove(task)
else:
return await ctx.send("Unknown task.")
task.task.cancel()
return await ctx.send(f"Cancelled task {task.index}: `{task.ctx.command.qualified_name}`,"
f" invoked at {task.ctx.message.created_at.strftime('%Y-%m-%d %H:%M:%S')} UTC")
| 34.678261 | 132 | 0.568079 |
21397b518ac2a288c7f6d09c8bf80755c105536e | 88,744 | py | Python | tests/tensor/test_blas.py | ionuttamas/Theano-PyMC | b666bdbb35aec21ac83936bfc91c573ef5ccf741 | [
"BSD-3-Clause"
] | null | null | null | tests/tensor/test_blas.py | ionuttamas/Theano-PyMC | b666bdbb35aec21ac83936bfc91c573ef5ccf741 | [
"BSD-3-Clause"
] | null | null | null | tests/tensor/test_blas.py | ionuttamas/Theano-PyMC | b666bdbb35aec21ac83936bfc91c573ef5ccf741 | [
"BSD-3-Clause"
] | null | null | null | from copy import copy
from itertools import product
import numpy as np
import pytest
from numpy import (
arange,
array,
common_type,
complex64,
complex128,
float32,
float64,
newaxis,
shape,
transpose,
zeros,
)
from numpy.testing import assert_array_almost_equal
import theano
import theano.tensor as tt
import theano.tensor.blas_scipy
from tests import unittest_tools
from tests.tensor.utils import inplace_func
from theano import In, config, shared
from theano.gof.fg import FunctionGraph
from theano.tensor import as_tensor_variable, inplace
from theano.tensor.blas import (
Dot22,
Dot22Scalar,
Gemm,
Gemv,
Ger,
InconsistencyError,
_as_scalar,
_dot22,
_dot22scalar,
_factor_canonicalized,
_gemm_canonicalize,
_is_real_matrix,
gemm,
gemm_inplace,
gemm_no_inplace,
gemv,
gemv_inplace,
gemv_no_inplace,
ger,
ger_destructive,
local_dot22_to_dot22scalar,
local_gemm_to_ger,
res_is_a,
)
from theano.tensor.nnet import sigmoid
from theano.tensor.opt import in2out
if config.mode == "FAST_COMPILE":
mode_not_fast_compile = "FAST_RUN"
else:
mode_not_fast_compile = config.mode
mode_blas_opt = theano.compile.get_default_mode().including(
"BlasOpt", "specialize", "InplaceBlasOpt"
)
mode_blas_opt = mode_blas_opt.excluding("c_blas")
def test_dot_eq():
assert tt.Dot() == tt.Dot()
def sharedX(x, name):
return theano.shared(np.asarray(x, config.floatX), name=name)
class TestGemm:
"""
This test suite is supposed to establish that gemm works as it is supposed to.
"""
def setup_method(self):
unittest_tools.seed_rng()
Gemm.debug = False
@staticmethod
def _gemm(z, a, x, y, b):
assert a.shape == ()
assert b.shape == ()
return b * z + a * np.dot(x, y)
@staticmethod
def rand(*args):
return np.random.rand(*args)
def cmp(self, z_, a_, x_, y_, b_):
for dtype in ["float32", "float64", "complex64", "complex128"]:
z = np.asarray(z_, dtype=dtype)
a = np.asarray(a_, dtype=dtype)
x = np.asarray(x_, dtype=dtype)
y = np.asarray(y_, dtype=dtype)
b = np.asarray(b_, dtype=dtype)
def cmp_linker(z, a, x, y, b, l):
z, a, x, y, b = [np.asarray(p) for p in (z, a, x, y, b)]
z_orig = z.copy()
tz, ta, tx, ty, tb = [
as_tensor_variable(p).type() for p in (z, a, x, y, b)
]
f = inplace_func(
[tz, ta, tx, ty, tb],
gemm_inplace(tz, ta, tx, ty, tb),
mode=theano.compile.Mode(optimizer=None, linker=l),
)
f(z, a, x, y, b)
z_after = self._gemm(z_orig, a, x, y, b)
# print z_orig, z_after, z, type(z_orig), type(z_after), type(z)
unittest_tools.assert_allclose(z_after, z)
if a == 0.0 and b == 1.0:
return
elif z_orig.size == 0:
assert z.size == 0
else:
assert np.any(z_orig != z)
cmp_linker(copy(z), a, x, y, b, "c|py")
cmp_linker(copy(z), a, x, y, b, "py")
if not dtype.startswith("complex") and config.cxx:
# If config.blas__ldflags is empty, Theano will use
# a NumPy C implementation of [sd]gemm_.
cmp_linker(copy(z), a, x, y, b, "c")
def test_basic(self):
Gemm.debug = True
with pytest.raises(TypeError, match=Gemm.E_rank):
gemm_no_inplace([1.0], 1.0, [1.0], [1.0], 1.0)
def test_basic_1(self):
with pytest.raises(TypeError, match=Gemm.E_rank):
self.cmp(1.0, 0.0, 1.0, 1.0, 1.0)
def test_basic_2(self):
with pytest.raises(TypeError, match=Gemm.E_rank):
self.cmp(2.0, 1.0, [3, 2, 1.0], [[1], [2], [3.0]], 1.0)
def test_basic_4(self):
self.cmp(self.rand(3, 4), 1.0, self.rand(3, 5), self.rand(5, 4), 0.0)
def test_basic_5(self):
self.cmp(self.rand(3, 4), 1.0, self.rand(3, 5), self.rand(5, 4), 1.0)
def test_basic_6(self):
self.cmp(self.rand(3, 4), 1.0, self.rand(3, 5), self.rand(5, 4), -1.0)
def test_basic_7(self):
self.cmp(self.rand(3, 4), 0.0, self.rand(3, 5), self.rand(5, 4), 0.0)
def test_basic_8(self):
self.cmp(self.rand(3, 4), 0.0, self.rand(3, 5), self.rand(5, 4), 0.6)
def test_basic_9(self):
self.cmp(self.rand(3, 4), 0.0, self.rand(3, 5), self.rand(5, 4), -1.0)
def test_basic_10(self):
self.cmp(self.rand(3, 4), -1.0, self.rand(3, 5), self.rand(5, 4), 0.0)
def test_basic_11(self):
self.cmp(self.rand(3, 4), -1.0, self.rand(3, 5), self.rand(5, 4), 1.0)
def test_basic_12(self):
self.cmp(self.rand(3, 4), -1.0, self.rand(3, 5), self.rand(5, 4), -1.0)
def test_shape_0(self):
self.cmp(self.rand(0, 4), -1.0, self.rand(0, 5), self.rand(5, 4), -1.0)
self.cmp(self.rand(3, 0), -1.0, self.rand(3, 5), self.rand(5, 0), -1.0)
self.cmp(self.rand(3, 4), -1.0, self.rand(3, 0), self.rand(0, 4), -1.0)
self.cmp(self.rand(0, 0), -1.0, self.rand(0, 5), self.rand(5, 0), -1.0)
self.cmp(self.rand(0, 0), -1.0, self.rand(0, 0), self.rand(0, 0), -1.0)
def test_factorised_scalar(self):
a = tt.matrix()
b = tt.matrix()
s = theano.shared(np.zeros((5, 5)).astype(config.floatX))
lr1 = tt.constant(0.01).astype(config.floatX)
lr2 = tt.constant(2).astype(config.floatX)
l2_reg = tt.constant(0.0001).astype(config.floatX)
# test constant merge with gemm
f = theano.function(
[a, b],
updates=[(s, lr1 * tt.dot(a, b) + l2_reg * lr2 * s)],
mode=mode_not_fast_compile,
).maker.fgraph.toposort()
# [Gemm{inplace}(<TensorType(float64, matrix)>, 0.01,
# <TensorType(float64, matrix)>, <TensorType(float64, matrix)>,
# 2e-06)]
assert len(f) == 1
assert f[0].op == gemm_inplace
# test factored scalar with merge
f = theano.function(
[a, b],
updates=[(s, lr1 * (tt.dot(a, b) - l2_reg * s))],
mode=mode_not_fast_compile,
).maker.fgraph.toposort()
# [Gemm{inplace}(<TensorType(float64, matrix)>, 0.01,
# <TensorType(float64, matrix)>, <TensorType(float64, matrix)>,
# -2e-06)]
assert len(f) == 1
assert f[0].op == gemm_inplace
# test factored scalar with merge and neg
f = theano.function(
[a, b],
updates=[(s, s - lr1 * (s * 0.0002 + tt.dot(a, b)))],
mode=mode_not_fast_compile,
).maker.fgraph.toposort()
# [Gemm{inplace}(<TensorType(float64, matrix)>, -0.01,
# <TensorType(float64, matrix)>, <TensorType(float64, matrix)>,
# 0.999998)]
assert len(f) == 1
assert f[0].op == gemm_inplace
def test_destroy_map0(self):
# test that only first input can be overwritten.
Z = as_tensor_variable(self.rand(2, 2))
with pytest.raises(InconsistencyError, match=Gemm.E_z_uniq):
gemm_inplace(Z, 1.0, Z, Z, 1.0)
def test_destroy_map1(self):
# test that only first input can be overwritten.
Z = as_tensor_variable(self.rand(2, 2))
A = as_tensor_variable(self.rand(2, 2))
with pytest.raises(InconsistencyError, match=Gemm.E_z_uniq):
gemm_inplace(Z, 1.0, A, inplace.transpose_inplace(Z), 1.0)
def test_destroy_map2(self):
# test that only first input can be overwritten.
Z = as_tensor_variable(self.rand(2, 2))
A = as_tensor_variable(self.rand(2, 2))
with pytest.raises(InconsistencyError, match=Gemm.E_z_uniq):
gemm_inplace(Z, 1.0, inplace.transpose_inplace(Z), A, 1.0)
def test_destroy_map3(self):
# test that only first input can be overwritten
Z = as_tensor_variable(self.rand(2, 2))
A = as_tensor_variable(self.rand(2, 2))
with pytest.raises(InconsistencyError, match=Gemm.E_z_uniq):
gemm_inplace(Z, 1.0, Z, A, 1.0)
def test_destroy_map4(self):
# test that dot args can be aliased
Z = shared(self.rand(2, 2), name="Z")
A = shared(self.rand(2, 2), name="A")
one = tt.constant(1.0).astype(Z.dtype)
f = inplace_func([], gemm_inplace(Z, one, A, A, one))
f()
f = inplace_func([], gemm_inplace(Z, one, A, A.T, one))
f()
def test_transposes(self):
# three square matrices which are not contiguous
A = self.rand(4, 5)[:, :4]
B = self.rand(4, 5)[:, :4]
C = self.rand(4, 5)[:, :4]
def t(z, x, y, a=1.0, b=0.0, l="c|py", dt="float64"):
z, a, x, y, b = [theano._asarray(p, dtype=dt) for p in (z, a, x, y, b)]
# z_orig = z.copy()
z_after = self._gemm(z, a, x, y, b)
tz, ta, tx, ty, tb = [shared(p) for p in (z, a, x, y, b)]
# f = inplace_func([tz,ta,tx,ty,tb], gemm_inplace(tz,ta,tx,ty,tb),
# mode = theano.compile.Mode(optimizer = None, linker=l))
# f(z, a, x, y, b)
f = inplace_func(
[],
gemm_inplace(tz, ta, tx, ty, tb),
mode=theano.compile.Mode(optimizer=None, linker=l),
)
f()
unittest_tools.assert_allclose(z_after, tz.get_value(borrow=True))
f()
unittest_tools.assert_allclose(z_after, tz.get_value(borrow=True))
f()
unittest_tools.assert_allclose(z_after, tz.get_value(borrow=True))
# tz.value *= 0 # clear z's value
y_T = ty.get_value(borrow=True).T
ty.set_value(tx.get_value(borrow=True).T, borrow=True)
tx.set_value(y_T, borrow=True)
f()
# test that the transposed version of multiplication gives
# same answer
unittest_tools.assert_allclose(z_after, tz.get_value(borrow=True).T)
t(C, A, B)
t(C.T, A, B)
t(C, A.T, B, dt="float32")
t(C, A, B.T)
t(C.T, A.T, B)
t(C, A.T, B.T, dt="float32")
t(C.T, A, B.T)
t(C.T, A.T, B.T, dt="float32")
t(C, A[:, :2], B[:2, :])
t(C.T, A[:, :2], B[:2, :], dt="float32")
t(C, A[:2, :].T, B[:2, :])
t(C.T, A[:2, :].T, B[:2, :], dt="float32")
t(C, A[:2, :].T, B[:, :2].T)
t(C.T, A[:2, :].T, B[:, :2].T)
with pytest.raises(ValueError, match=r".*aligned.*"):
t(C.T, A[:2, :], B[:, :2].T)
def test_non_contiguous(self):
# Like test_transposes but with matrices without any
# continuous dimension
A = self.rand(4, 4, 3)
B = self.rand(4, 4, 3)
C = self.rand(4, 4, 3)
def t(z, x, y, a=1.0, b=0.0, l="c|py", dt="float64"):
z, a, x, y, b = [theano._asarray(p, dtype=dt) for p in (z, a, x, y, b)]
z_orig = z.copy()
z_after = np.zeros_like(z_orig)
for i in range(3):
z_after[:, :, i] = self._gemm(z[:, :, i], a, x[:, :, i], y[:, :, i], b)
tz, ta, tx, ty, tb = [shared(p) for p in (z, a, x, y, b)]
for i in range(3):
f_i = inplace_func(
[],
gemm_inplace(tz[:, :, i], ta, tx[:, :, i], ty[:, :, i], tb),
mode=theano.compile.Mode(optimizer=None, linker=l),
)
for j in range(3):
# tz will not _always_ be overwritten,
# and adding update={...} in the call to function()
# will create cycles, so we update by hand.
z_i = f_i()
z = tz.get_value(borrow=True, return_internal_type=True)
z[:, :, i] = z_i
unittest_tools.assert_allclose(
z_after[:, :, i], tz.get_value(borrow=True)[:, :, i]
)
tz_i = gemm_no_inplace(tz[:, :, i], ta, tx[:, :, i], ty[:, :, i], tb)
g_i = theano.function(
[],
tz_i,
updates=[(tz, tt.set_subtensor(tz[:, :, i], tz_i))],
mode=theano.compile.Mode(optimizer=None, linker=l),
)
for j in range(3):
g_i()
unittest_tools.assert_allclose(
z_after[:, :, i], tz.get_value(borrow=True)[:, :, i]
)
t(C, A, B)
t(C.transpose((1, 0, 2)), A, B)
t(C, A.transpose((1, 0, 2)), B, dt="float32")
t(C, A, B.transpose((1, 0, 2)))
t(C.transpose((1, 0, 2)), A.transpose((1, 0, 2)), B)
t(C, A.transpose((1, 0, 2)), B.transpose((1, 0, 2)), dt="float32")
t(C.transpose((1, 0, 2)), A, B.transpose((1, 0, 2)))
t(
C.transpose((1, 0, 2)),
A.transpose((1, 0, 2)),
B.transpose((1, 0, 2)),
dt="float32",
)
class TestGemmNoFlags:
gemm = gemm_no_inplace
M = 4
N = 5
K = 6
slice_step = 3
def setup_method(self):
unittest_tools.seed_rng()
def get_variable(self, V, to_transpose, to_slice):
if to_transpose:
V = V.T
if to_slice:
V = V[:: self.slice_step]
return V
def get_function(
self,
dtype,
transpose_A=False,
transpose_B=False,
transpose_C=False,
slice_A=False,
slice_B=False,
slice_C=False,
):
alpha = tt.scalar(dtype=dtype, name="alpha")
beta = tt.scalar(dtype=dtype, name="beta")
A = tt.matrix(dtype=dtype, name="A")
B = tt.matrix(dtype=dtype, name="B")
C = tt.matrix(dtype=dtype, name="C")
A1 = self.get_variable(A, transpose_A, slice_A)
B1 = self.get_variable(B, transpose_B, slice_B)
C1 = self.get_variable(C, transpose_C, slice_C)
return theano.function(
[alpha, A, B, beta, C], self.gemm(C1, alpha, A1, B1, beta)
)
def generate_value(self, dtype, width, height, to_transpose, to_slice):
if to_slice:
if to_transpose:
shape = (height, width * self.slice_step)
else:
shape = (width * self.slice_step, height)
else:
if to_transpose:
shape = (height, width)
else:
shape = (width, height)
return np.random.random(shape).astype(dtype)
def get_data(
self,
dtype,
alpha,
beta,
transpose_A=False,
transpose_B=False,
transpose_C=False,
slice_A=False,
slice_B=False,
slice_C=False,
):
A = self.generate_value(dtype, self.M, self.N, transpose_A, slice_A)
B = self.generate_value(dtype, self.N, self.K, transpose_B, slice_B)
C = self.generate_value(dtype, self.M, self.K, transpose_C, slice_C)
return (alpha, A, B, beta, C)
def get_value(self, V, to_transpose, to_slice):
if to_transpose:
V = V.T
if to_slice:
V = V[:: self.slice_step]
return V
def compute_ref(
self,
alpha,
A,
B,
beta,
C,
transpose_A,
transpose_B,
transpose_C,
slice_A,
slice_B,
slice_C,
):
A = self.get_value(A, transpose_A, slice_A)
B = self.get_value(B, transpose_B, slice_B)
C = self.get_value(C, transpose_C, slice_C)
return alpha * np.dot(A, B) + beta * C
@config.change_flags({"blas__ldflags": ""})
def run_gemm(
self,
dtype,
ALPHA,
BETA,
transpose_A,
transpose_B,
transpose_C,
slice_A,
slice_B,
slice_C,
):
f = self.get_function(
dtype, transpose_A, transpose_B, transpose_C, slice_A, slice_B, slice_C
)
values = self.get_data(
dtype,
ALPHA,
BETA,
transpose_A,
transpose_B,
transpose_C,
slice_A,
slice_B,
slice_C,
)
assert any(isinstance(node.op, Gemm) for node in f.maker.fgraph.apply_nodes)
z_val = f(*values)
assert z_val.dtype == dtype
assert tuple(z_val.shape) == (self.M, self.K)
ref_val = self.compute_ref(
*(
values
+ (transpose_A, transpose_B, transpose_C, slice_A, slice_B, slice_C)
)
)
unittest_tools.assert_allclose(ref_val, z_val)
def test_gemm(self):
dtypes = ("float32", "float64")
scalars = (0, 1, -2)
booleans = (False, True)
# dtype, alpha, beta, transA, transB, transC, sliceA, sliceB, sliceC
iterables = [dtypes] + ([scalars] * 2) + ([booleans] * 6)
for dtype, alpha, beta, tA, tB, tC, sA, sB, sC in product(*iterables):
self.run_gemm(dtype, alpha, beta, tA, tB, tC, sA, sB, sC)
def test_res_is_a():
X, Y, Z, a, b = XYZab()
assert not res_is_a(None, a, tt.sqrt)
assert not res_is_a(None, a + a, tt.sqrt)
assert res_is_a(None, tt.sqrt(a + a), tt.sqrt)
sqrt_term = tt.sqrt(a + a)
fg = FunctionGraph([a], [2 * sqrt_term], clone=False)
assert res_is_a(fg, sqrt_term, tt.sqrt, 2)
assert not res_is_a(fg, sqrt_term, tt.sqrt, 0)
class TestAsScalar:
def test_basic(self):
# Test that it works on scalar constants
a = tt.constant(2.5)
b = tt.constant(np.asarray([[[0.5]]]))
b2 = b.dimshuffle()
assert b2.ndim == 0
d_a = tt.DimShuffle([], [])(a)
d_b = tt.DimShuffle([True, True, True], [0, 2, 1])(b)
d_a2 = tt.DimShuffle([], ["x", "x", "x"])(a)
assert _as_scalar(a) == a
assert _as_scalar(b) != b
assert _as_scalar(d_a) != d_a
assert _as_scalar(d_b) != d_b
assert _as_scalar(d_a2) != d_a2
def test_basic_1(self):
# Test that it fails on nonscalar constants
a = tt.constant(np.ones(5))
assert _as_scalar(a) is None
assert _as_scalar(tt.DimShuffle([False], [0, "x"])(a)) is None
def test_basic_2(self):
# Test that it works on scalar variables
a = tt.dscalar()
d_a = tt.DimShuffle([], [])(a)
d_a2 = tt.DimShuffle([], ["x", "x"])(a)
assert _as_scalar(a) is a
assert _as_scalar(d_a) is a
assert _as_scalar(d_a2) is a
def test_basic_3(self):
# Test that it fails on nonscalar variables
a = tt.matrix()
assert _as_scalar(a) is None
assert _as_scalar(tt.DimShuffle([False, False], [0, "x", 1])(a)) is None
class TestRealMatrix:
def test_basic(self):
assert _is_real_matrix(tt.DimShuffle([False, False], [1, 0])(tt.matrix()))
assert not _is_real_matrix(tt.DimShuffle([False], ["x", 0])(tt.dvector()))
"""
This test suite ensures that Gemm is inserted where it belongs, and
that the resulting functions compute the same things as the originals.
"""
def XYZab():
return tt.matrix(), tt.matrix(), tt.matrix(), tt.scalar(), tt.scalar()
def just_gemm(i, o, ishapes=None, max_graphlen=0, expected_nb_gemm=1):
if ishapes is None:
ishapes = [(4, 3), (3, 5), (4, 5), (), ()]
f = inplace_func(
[In(ii, mutable=True, allow_downcast=True) for ii in i],
o,
mode="FAST_RUN",
on_unused_input="ignore",
)
nb_gemm = 0
for node in f.maker.fgraph.apply_nodes:
assert not isinstance(
node.op, tt.Dot
), "_dot22 not changed to gemm_inplace in graph"
assert node.op != _dot22
if node.op == gemm_inplace:
nb_gemm += 1
assert nb_gemm == expected_nb_gemm, (nb_gemm, expected_nb_gemm)
g = inplace_func(
i,
o,
mode=theano.compile.Mode(linker="py", optimizer=None),
allow_input_downcast=True,
on_unused_input="ignore",
)
for node in g.maker.fgraph.apply_nodes:
assert node.op != gemm_inplace, "gemm_inplace in original graph"
graphlen = len(f.maker.fgraph.toposort())
assert not (max_graphlen and (graphlen <= max_graphlen)), "graphlen=%i>%i" % (
graphlen,
max_graphlen,
)
rng = np.random.RandomState(unittest_tools.fetch_seed(234))
r0 = f(*[np.asarray(rng.randn(*sh), config.floatX) for sh in ishapes])
rng = np.random.RandomState(unittest_tools.fetch_seed(234))
r1 = g(*[np.asarray(rng.randn(*sh), config.floatX) for sh in ishapes])
max_abs_err = np.max(np.abs(r0[0] - r1[0]))
eps = 1.0e-8
if config.floatX == "float32":
eps = 1.0e-6
assert max_abs_err <= eps, "GEMM is computing the wrong output. max_rel_err ="
@unittest_tools.assertFailure_fast
def test_gemm_opt0():
# Many subgraphs whose dots can be eliminated
X, Y, Z, a, b = XYZab()
just_gemm([X, Y, Z, a, b], [tt.dot(X, Y) * a + Z * b])
just_gemm([X, Y, Z, a, b], [a * tt.dot(X, Y) + b * Z])
just_gemm([X, Y, Z, a, b], [b * Z + a * tt.dot(X, Y)])
just_gemm([X, Y, Z, a, b], [tt.dot(X, Y) * a - Z * b])
just_gemm([X, Y, Z, a, b], [a * tt.dot(X, Y) - b * Z])
just_gemm([X, Y, Z, a, b], [b * Z - a * tt.dot(X, Y)])
# with transposes (transposes should be pushed through dot in canonicalize)
just_gemm([X, Y, Z, a, b], [b * Z.T - a * tt.dot(Y.T, X.T)])
just_gemm([X, Y, Z, a, b], [b * Z.T + a * b * tt.dot(X, Y).T])
just_gemm(
[X, Y, Z, a, b],
[b * Z + a * tt.dot(X, Y).T],
ishapes=[(5, 3), (3, 4), (4, 5), (), ()],
)
# with N multiplications instead of just one
just_gemm([X, Y, Z, a, b], [(b * b) * Z * a + (a * a) * tt.dot(X, Y) * b])
just_gemm([X, Y, Z, a, b], [Z + tt.dot(X, Y)])
just_gemm([X, Y, Z, a, b], [Z * b + tt.dot(X, Y)])
just_gemm([X, Y, Z, a, b], [Z + a * b * a * tt.dot(X, Y)])
just_gemm([X, Y, Z, a, b], [(b * b) * Z * a - (a * a) * tt.dot(X, Y) * b])
just_gemm([X, Y, Z, a, b], [Z - tt.dot(X, Y)])
just_gemm([X, Y, Z, a, b], [Z * b - tt.dot(X, Y)])
just_gemm([X, Y, Z, a, b], [Z - a * b * a * tt.dot(X, Y)])
@unittest_tools.assertFailure_fast
def test_gemm_opt_double_gemm():
# This is the pattern that shows up in the autoencoder
X, Y, Z, a, b = tt.matrix(), tt.matrix(), tt.matrix(), tt.scalar(), tt.scalar()
R, S, c = tt.matrix(), tt.matrix(), tt.scalar()
just_gemm(
[X, Y, Z, a, b, R, S, c],
[Z * c + a * tt.dot(X, Y) + b * tt.dot(R, S).T],
ishapes=[(4, 3), (3, 5), (4, 5), (), (), (5, 9), (9, 4), ()],
expected_nb_gemm=2,
)
ishapes = [(4, 3), (3, 5), (4, 5), (), (), (5, 9), (9, 4), ()]
i = [X, Y, Z, a, b, R, S, c]
o = [
(
a * tt.dot(X, Y)
+ gemm_inplace(Z, b, S.T, R.T, tt.constant(1.0).astype(config.floatX))
)
]
f = inplace_func(
[In(ii, mutable=True) for ii in i],
o,
mode="FAST_RUN",
on_unused_input="ignore",
)
for node in f.maker.fgraph.apply_nodes:
assert not isinstance(node.op, tt.Dot)
assert node.op != _dot22
g = inplace_func(
i,
o,
mode=theano.compile.Mode(linker="py", optimizer=None),
on_unused_input="ignore",
)
rng = np.random.RandomState(unittest_tools.fetch_seed(234))
r0 = f(*[np.asarray(rng.randn(*sh), config.floatX) for sh in ishapes])
rng = np.random.RandomState(unittest_tools.fetch_seed(234))
r1 = g(*[np.asarray(rng.randn(*sh), config.floatX) for sh in ishapes])
max_abs_err = np.max(np.abs(r0[0] - r1[0]))
eps = 1.0e-8
if config.floatX == "float32":
eps = 1.0e-6
assert max_abs_err <= eps, "GEMM is computing the wrong output. max_rel_err ="
def test_gemm_canonicalize():
X, Y, Z, a, b = (
tt.matrix("X"),
tt.matrix("Y"),
tt.matrix("Z"),
tt.scalar("a"),
tt.scalar("b"),
)
c, d = tt.scalar("c"), tt.scalar("d")
u = tt.row("u")
v = tt.vector("v")
w = tt.col("w")
can = []
fg = FunctionGraph([X, Y, Z], [X + Y + Z], clone=False)
_gemm_canonicalize(fg, fg.outputs[0], 1.0, can, 0)
assert can == [(1.0, X), (1.0, Y), (1.0, Z)]
fg.disown()
can = []
fg = FunctionGraph([X, Y, u], [X + Y + u], clone=False)
_gemm_canonicalize(fg, fg.outputs[0], 1.0, can, 0)
assert can == [(1.0, X), (1.0, Y), (1.0, u)], can
fg.disown()
can = []
fg = FunctionGraph([X, Y, v], [X + Y + v], clone=False)
_gemm_canonicalize(fg, fg.outputs[0], 1.0, can, 0)
# [(1.0, X), (1.0, Y), (1.0, InplaceDimShuffle{x,0}(v))]
assert can[:2] == [(1.0, X), (1.0, Y)]
assert isinstance(can[2], tuple)
assert len(can[2]) == 2
assert can[2][0] == 1.0
assert can[2][1].owner
assert isinstance(can[2][1].owner.op, tt.DimShuffle)
assert can[2][1].owner.inputs == [v]
fg.disown()
can = []
fg = FunctionGraph([X, Y, w], [X + Y + w], clone=False)
_gemm_canonicalize(fg, fg.outputs[0], 1.0, can, 0)
assert can == [(1.0, X), (1.0, Y), (1.0, w)], can
fg.disown()
can = []
fg = FunctionGraph([a, X, Y, b, Z, c], [a * X + Y - b * Z * c], clone=False)
_gemm_canonicalize(fg, fg.outputs[0], 1.0, can, 0)
assert can[0] == (a, X)
assert can[1] == (1.0, Y)
assert can[2][0].owner.op == tt.mul
assert can[2][0].owner.inputs[0].owner.op == tt.neg
assert can[2][0].owner.inputs[0].owner.inputs[0] == c
assert can[2][0].owner.inputs[1] == b
fg.disown()
can = []
fg = FunctionGraph(
[a, X, Y, b, Z, c, d], [(-d) * X - (a * X + Y - b * Z * c)], clone=False
)
_gemm_canonicalize(fg, fg.outputs[0], 1.0, can, 0)
assert can[0][0].owner.op == tt.neg
assert can[0][0].owner.inputs[0] == d
assert can[0][1] == X
assert can[1][0].owner.op == tt.neg
assert can[1][0].owner.inputs[0] == a
assert can[2] == (-1.0, Y)
assert can[3][0].owner.op == tt.mul
assert can[3][0].owner.inputs == [c, b]
fg.disown()
def test_gemm_factor():
X, Y = tt.matrix("X"), tt.matrix("Y")
assert [(1.0, X), (1.0, Y)] == _factor_canonicalized([(1.0, X), (1.0, Y)])
assert [(2.0, X)] == _factor_canonicalized([(1.0, X), (1.0, X)])
def test_upcasting_scalar_nogemm():
# Test that the optimization does not crash when the scale has an incorrect
# dtype, and forces upcasting of the result
v = tt.fmatrix("v")
w = tt.fmatrix("w")
t = tt.fmatrix("t")
alpha = tt.dscalar("a")
rval = tt.dot(w, v) * alpha + t
f = theano.function([w, v, t, alpha], rval)
t = f.maker.fgraph.toposort()
assert np.sum([isinstance(n.op, Gemm) for n in t]) == 0
# theano.printing.debugprint(f, print_type=True)
v = tt.fmatrix("v")
w = tt.fmatrix("w")
t = tt.fmatrix("t")
alpha = tt.cscalar("a")
with config.change_flags(on_opt_error="raise"):
rval = tt.dot(w, v) * alpha + t
f = theano.function([w, v, t, alpha], rval)
t = f.maker.fgraph.toposort()
assert np.sum([isinstance(n.op, Gemm) for n in t]) == 0
# theano.printing.debugprint(f, print_type=True)
def test_gemm_nested():
X, Y, Z, a, b = (
tt.matrix("X"),
tt.matrix("Y"),
tt.matrix("Z"),
tt.scalar("a"),
tt.scalar("b"),
)
R, S, U, c, d = (
tt.matrix("R"),
tt.matrix("S"),
tt.matrix("U"),
tt.scalar("c"),
tt.scalar("d"),
)
just_gemm(
[X, Y, Z, R, S, U, a, b, c, d],
[a * Z - b * (c * tt.dot(X, Y) + d * Z)],
ishapes=[(2, 3), (3, 4), (2, 4), (2, 3), (3, 4), (2, 4), (), (), (), ()],
max_graphlen=1,
)
# print "---------------------"
just_gemm(
[X, Y, Z, R, S, U, a, b, c, d],
[a * Z - b * (c * tt.dot(X, Y) + d * Z + c * Z)],
ishapes=[(2, 3), (3, 4), (2, 4), (2, 3), (3, 4), (2, 4), (), (), (), ()],
max_graphlen=1,
)
# print "---------------------"
just_gemm(
[X, Y, Z, R, S, U, a, b, c, d],
[a * Z - b * (c * tt.dot(X, Y) + d * Z + c * U)],
ishapes=[(2, 3), (3, 4), (2, 4), (2, 3), (3, 4), (2, 4), (), (), (), ()],
max_graphlen=3,
)
def test_gemm_opt_wishlist():
X, Y, Z, a, b = tt.matrix(), tt.matrix(), tt.matrix(), tt.scalar(), tt.scalar()
# with >2 additions of the same T.dot(X,Y term
just_gemm(
[X, Y, Z, a, b], [(b * b) * Z * a + (a * a) * tt.dot(X, Y) + b * tt.dot(X, Y)]
)
just_gemm([X, Y, Z, a, b], [Z + tt.dot(X, Y) + tt.dot(X, Y)])
def test_gemm_with_vector():
# Many subgraphs whose dots can be eliminated. This adds a
# vector two the previous test, which triggers the long-sought GEMM
# bug.
X, Y, Z, a, b = XYZab()
v = tt.vector()
def my_just_gemm(o):
i = [X, Y, Z, a, b, v]
ishapes = [(4, 3), (3, 5), (4, 5), (), (), (5,)]
just_gemm(i, o, ishapes=ishapes)
my_just_gemm([v + tt.dot(X, Y) * a + Z * b])
my_just_gemm([v + a * tt.dot(X, Y) + b * Z])
my_just_gemm([v + b * Z + a * tt.dot(X, Y)])
my_just_gemm([v + tt.dot(X, Y) * a - Z * b])
my_just_gemm([v + a * tt.dot(X, Y) - b * Z])
my_just_gemm([v + b * Z - a * tt.dot(X, Y)])
# with N multiplications instead of just one
my_just_gemm([v + (b * b) * Z * a + (a * a) * tt.dot(X, Y) * b])
my_just_gemm([v + Z + tt.dot(X, Y)])
my_just_gemm([v + Z * b + tt.dot(X, Y)])
my_just_gemm([v + Z + a * b * a * tt.dot(X, Y)])
my_just_gemm([v + (b * b) * Z * a - (a * a) * tt.dot(X, Y) * b])
my_just_gemm([Z - tt.dot(X, Y) + v])
my_just_gemm([Z * b - tt.dot(X, Y) + v])
my_just_gemm([Z - a * b * a * tt.dot(X, Y) + v])
def test_gemm_opt_vector_stuff():
X, Y, a = tt.matrix(), tt.matrix(), tt.scalar()
u, v = tt.vector(), tt.vector()
f = inplace_func([a, u, v], a + tt.dot(u, v), mode="FAST_RUN")
assert gemm_inplace not in [n.op for n in f.maker.fgraph.apply_nodes]
f = inplace_func([a, u, X, Y], a * u + tt.dot(X, Y), mode="FAST_RUN")
assert gemm_inplace not in [n.op for n in f.maker.fgraph.apply_nodes]
def test_gemm_unrolled():
# This test that the gemm optimizer remove the dot22 that was
# present in the graph. Otherwise, this add a gemm, but still
# compute the dot22.
# This was not always the case in the with this the following code.
batch_size = 100
rep_size = 40
rng = np.random.RandomState([1, 2, 3])
for num_rounds in range(1, 10):
W = sharedX(rng.randn(rep_size, rep_size), name="W")
V = sharedX(np.zeros((batch_size, rep_size)), name="V")
H = sharedX(np.zeros((batch_size, rep_size)), name="H")
G = sharedX(np.zeros((batch_size, rep_size)), name="G")
cur_V = V
cur_H = H
def update_V(cur_H):
return sigmoid(tt.dot(cur_H, W.T))
def update_H(cur_V):
return sigmoid(tt.dot(cur_V, W) + tt.dot(G, W.T))
for i in range(num_rounds):
cur_V = update_V(cur_H)
cur_H = update_H(cur_V)
unrolled_theano = theano.function(
[], updates=[(V, cur_V), (H, cur_H)], name="unrolled_theano"
)
nb_dot = sum(
[
1
for node in unrolled_theano.maker.fgraph.toposort()
if isinstance(
node.op,
(
tt.Dot,
Dot22,
Gemm,
),
)
]
)
# Each num_rounds add 3 dot, but one of them is always the same.
# So the final graph should have 1 + 2* num_rounds dot variant op.
assert nb_dot == num_rounds * 2 + 1, nb_dot
unrolled_theano()
def test_inplace0():
# should fail to insert gemm_inplace because gemm_inplace would
# create cycles
X, Y, Z, a, b = (
tt.matrix("X"),
tt.matrix("Y"),
tt.matrix("Z"),
tt.scalar("a"),
tt.scalar("b"),
)
R, S, c = tt.matrix("R"), tt.matrix("S"), tt.scalar("c")
f = inplace_func([Z, b, R, S], [Z * (Z + b * tt.dot(R, S).T)], mode="FAST_RUN")
assert gemm_inplace not in [n.op for n in f.maker.fgraph.apply_nodes]
assert gemm_no_inplace in [n.op for n in f.maker.fgraph.apply_nodes]
# gemm_inplace should be inserted here, to work in-place on Z*c
f = inplace_func(
[X, Y, Z, a, b, R, S, c],
[Z * (c * Z + a * tt.dot(X, Y) + b * tt.dot(R, S).T)],
mode="FAST_RUN",
)
assert gemm_inplace in [n.op for n in f.maker.fgraph.apply_nodes]
def test_inplace1():
X, Y, Z, a, b = XYZab()
# with > 2 terms in the overall addition
f = inplace_func([X, Y, Z], [Z + Z + tt.dot(X, Y)], mode="FAST_RUN")
# theano.printing.debugprint(f)
# it doesn't work inplace because we didn't mark Z as mutable input
assert [n.op for n in f.maker.fgraph.apply_nodes] == [gemm_no_inplace]
def test_dot22():
for dtype1 in ["float32", "float64", "complex64", "complex128"]:
a = tt.matrix(dtype=dtype1)
for dtype2 in ["float32", "float64", "complex64", "complex128"]:
b = tt.matrix(dtype=dtype2)
f = theano.function([a, b], tt.dot(a, b), mode=mode_blas_opt)
topo = f.maker.fgraph.toposort()
if dtype1 == dtype2:
assert _dot22 in [x.op for x in topo], (dtype1, dtype2)
else:
check = [isinstance(x.op, tt.Dot) for x in topo]
assert any(check), (dtype1, dtype2)
rng = np.random.RandomState(unittest_tools.fetch_seed())
def cmp(a_shp, b_shp):
av = rng.uniform(size=a_shp).astype(dtype1)
bv = rng.uniform(size=b_shp).astype(dtype2)
f(av, bv)
cmp((3, 4), (4, 5))
cmp((0, 4), (4, 5))
cmp((3, 0), (0, 5))
cmp((3, 4), (4, 0))
cmp((0, 4), (4, 0))
cmp((0, 0), (0, 0))
@pytest.mark.slow
def test_dot22scalar():
# including does not seem to work for 'local_dot_to_dot22' and
# 'local_dot22_to_dot22scalar'
# TODO: exclude other optimizations in BlasOpt?
# m = theano.compile.get_default_mode().including('local_dot_to_dot22',
# 'local_dot22_to_dot22scalar','specialize')
# m = theano.compile.get_default_mode().including('BlasOpt', 'specialize')
rng = np.random.RandomState(unittest_tools.fetch_seed())
for dtype1 in ["complex64", "complex128"]:
a = tt.matrix("a", dtype=dtype1)
for dtype2 in ["complex64", "complex128"]:
b = tt.matrix("b", dtype=dtype2)
for dtype3 in ["complex64", "complex128"]:
c = tt.matrix("c", dtype=dtype3)
for dtype4 in ["complex64", "complex128"]:
cst = tt.constant(0.2, dtype=dtype4)
cst2 = tt.constant(0.1, dtype=dtype4)
def check_dot22scalar(func, len_topo_scalar=-1):
topo = func.maker.fgraph.toposort()
ops = [x.op for x in topo]
dtype4_upcast = theano.scalar.upcast(dtype4, dtype1, dtype2)
if dtype1 == dtype2 == dtype3 == dtype4_upcast:
if len_topo_scalar > 0:
assert len(topo) == len_topo_scalar
assert _dot22scalar in ops, (dtype1, dtype2, dtype3, dtype4)
elif dtype1 == dtype2 == dtype4_upcast:
if not (len_topo_scalar > 0):
assert len(topo) == len_topo_scalar
assert _dot22scalar in ops, (
dtype1,
dtype2,
dtype3,
dtype4,
)
else:
# Currently there is a problem of
# optimization order The constant get
# upcasted to float64 before we try to
# merge it with the dot22 of
# float32. So this prevent the merge.
assert _dot22scalar in ops or _dot22 in ops, (
dtype1,
dtype2,
dtype3,
dtype4,
)
elif dtype1 == dtype2:
assert _dot22 in ops, (dtype1, dtype2, dtype3, dtype4)
else:
check = [isinstance(o, tt.Dot) for o in ops]
assert any(check), (dtype1, dtype2, dtype3, dtype4)
def cmp(a_shp, b_shp, c_shp, sqr_shp=(5, 5)):
av = rng.uniform(size=a_shp).astype(dtype1)
bv = rng.uniform(size=b_shp).astype(dtype2)
cv = rng.uniform(size=c_shp).astype(dtype3)
sv = rng.uniform(size=sqr_shp).astype(dtype1)
if False:
f = theano.function(
[a, b], cst * tt.dot(a, b), mode=mode_blas_opt
)
f.maker.fgraph.toposort()
check_dot22scalar(f, 1)
f(av, bv)
if True:
f = theano.function(
[a, b, c], cst * c * tt.dot(a, b), mode=mode_blas_opt
)
f.maker.fgraph.toposort()
check_dot22scalar(f, 2)
f(av, bv, cv)
f = theano.function(
[a, b, c], c * cst * tt.dot(a, b), mode=mode_blas_opt
)
f.maker.fgraph.toposort()
check_dot22scalar(f, 2)
f(av, bv, cv)
# Here, canonicalize also seems needed
# TODO: add only the optimizations needed?
m2 = mode_blas_opt.including("canonicalize")
f = theano.function(
[a, b, c], cst2 * c * cst * tt.dot(a, b), mode=m2
)
f.maker.fgraph.toposort()
check_dot22scalar(f, 2)
f(av, bv, cv)
if dtype1 == dtype2 == dtype3:
f = theano.function(
[a, b, c], c * cst * a * tt.dot(a, b), mode=m2
)
f.maker.fgraph.toposort()
check_dot22scalar(f, 2)
f(sv, sv, sv)
f = theano.function(
[a, b, c],
cst * c * a * tt.dot(a, b),
mode=mode_blas_opt,
)
f.maker.fgraph.toposort()
# currently the canonizer don't always
# merge all Mul together... dot22scalar
# optimizer does not do a recursive search
# therefore, it doesn't find potential
# matches of the scalar. TODO: combine
# with the 'canonicalization' that is part
# of the Gemm optimizer.
#
# assert _dot22scalar in [x.op for x in topo]
# assert len(topo)==2
f(sv, sv, sv)
f = theano.function(
[a, b, c], c * a * cst * tt.dot(a, b), mode=m2
)
f.maker.fgraph.toposort()
check_dot22scalar(f, 2)
f(sv, sv, sv)
cmp((3, 4), (4, 5), (3, 5))
cmp((0, 4), (4, 5), (0, 5))
cmp((3, 0), (0, 5), (3, 5))
cmp((3, 4), (4, 0), (3, 0), (0, 0))
cmp((0, 4), (4, 0), (0, 0))
cmp((0, 0), (0, 0), (0, 0))
def test_dot22scalar_cast():
# Test that in `dot22_to_dot22scalar` we properly cast integers to floats.
# Note that this test was failing before d5ff6904.
A = tt.dmatrix()
for scalar_int_type in tt.int_dtypes:
y = tt.scalar(dtype=scalar_int_type)
f = theano.function([A, y], tt.dot(A, A) * y, mode=mode_blas_opt)
assert _dot22scalar in [x.op for x in f.maker.fgraph.toposort()]
A = tt.fmatrix()
for scalar_int_type in tt.int_dtypes:
y = tt.scalar(dtype=scalar_int_type)
f = theano.function([A, y], tt.dot(A, A) * y, mode=mode_blas_opt)
if scalar_int_type in ["int32", "int64"]:
assert _dot22 in [x.op for x in f.maker.fgraph.toposort()]
else:
assert _dot22scalar in [x.op for x in f.maker.fgraph.toposort()]
def test_local_dot22_to_dot22scalar():
# This test that the bug in gh-1507 is really fixed
A = tt.dmatrix()
mode = theano.compile.mode.get_default_mode()
opt = in2out(local_dot22_to_dot22scalar)
mode = mode.__class__(optimizer=opt)
x = tt.dscalar()
y = tt.dscalar()
z = tt.dscalar()
# make sure to don't have dimshuffle as we don't opt those cases
m = tt.dmatrix()
r = tt.drow()
for idx, node in enumerate(
[
# Old working cases
tt.mul(_dot22(A, A), x),
tt.mul(_dot22(A, A), x, y),
tt.mul(_dot22(A, A), x, r),
tt.mul(_dot22(A, A), m, x),
tt.mul(_dot22(A, A), x, m),
tt.mul(_dot22(A, A), x, (m * y)),
tt.mul(_dot22(A, A), (m * y), x),
tt.mul(_dot22(A, A), x, (r * y)),
tt.mul(_dot22(A, A), (r * y), x),
tt.mul(_dot22(A, A), (x * y), (m * x)),
tt.mul(_dot22(A, A), (r * y), (y * x)),
# Case that was raising an assert that is fixed in gh-1507
tt.mul(_dot22(A, A), (m * y), m),
tt.mul(_dot22(A, A), m, (m * y)),
tt.mul(_dot22(A, A), (r * y), (m * x)),
# assert fixed in gh-1507 and opt case added in gh-1515
tt.mul(_dot22(A, A), (m * y * z), m),
tt.mul(_dot22(A, A), m, (m * y * z)),
# Opt case added in gh-1515
tt.mul(_dot22(A, A), tt.mul(m, y, z), m),
tt.mul(_dot22(A, A), m, tt.mul(m, y, z)),
# Case that opt later in gh-1515
tt.mul(_dot22(A, A), (r * m), (m * x)),
]
):
node2 = local_dot22_to_dot22scalar.transform(None, node.owner)
assert node2
f = theano.function(
[x, y, z, m, r, A], node, mode=mode, on_unused_input="ignore"
)
f(0.1, 0.2, 0.3, [[1, 2], [3, 4]], [[5, 6]], [[7, 8], [9, 10]])
def test_dot_w_self():
# This can trigger problems in the optimization because what would
# normally be a gemm must not be because the output is aliased to
# one of the inputs.
A = shared(value=np.ones((2, 2)))
B = tt.matrix()
p = tt.dot(A, A) * B
grad = tt.grad(tt.mean(p), A)
f = theano.function([B], p, updates=[(A, A - grad)])
# tests correctness in debugmode
f(np.asarray([[0, 1], [2, 3]], dtype=config.floatX))
###############################################################################
# Tests for Gemv
###############################################################################
class TestGemv(unittest_tools.OptimizationTestMixin):
def test_dot_vv(self):
# Currently we generate a gemv for that case
rng = np.random.RandomState(unittest_tools.fetch_seed())
v = theano.shared(np.array(rng.uniform(size=(2,)), dtype="float32"))
w = theano.shared(np.array(rng.uniform(size=(2,)), dtype="float32"))
f = theano.function([], theano.tensor.dot(v, w), mode=mode_blas_opt)
# Assert that the dot was optimized somehow
self.assertFunctionContains0(f, tt.dot)
self.assertFunctionContains1(f, Gemv(True))
# Assert they produce the same output
assert np.allclose(f(), np.dot(v.get_value(), w.get_value()))
def test_dot_vm(self):
# Test vector dot matrix
rng = np.random.RandomState(unittest_tools.fetch_seed())
v = theano.shared(np.array(rng.uniform(size=(2,)), dtype="float32"))
m = theano.shared(np.array(rng.uniform(size=(2, 3)), dtype="float32"))
f = theano.function([], theano.tensor.dot(v, m), mode=mode_blas_opt)
# Assert that the dot was optimized somehow
self.assertFunctionContains0(f, tt.dot)
self.assertFunctionContains1(f, Gemv(True))
# Assert they produce the same output
assert np.allclose(f(), np.dot(v.get_value(), m.get_value()))
# Assert it works when m has no contiguous dimension
m.set_value(m.get_value(borrow=True)[::-1, ::-1], borrow=True)
assert np.allclose(f(), np.dot(v.get_value(), m.get_value()))
def test_dot_mv(self):
# Test matrix dot vector
rng = np.random.RandomState(unittest_tools.fetch_seed())
v = theano.shared(np.array(rng.uniform(size=(2,)), dtype="float32"))
m = theano.shared(np.array(rng.uniform(size=(3, 2)), dtype="float32"))
f = theano.function([], theano.tensor.dot(m, v), mode=mode_blas_opt)
# Assert that the dot was optimized somehow
self.assertFunctionContains0(f, tt.dot)
self.assertFunctionContains1(f, Gemv(True))
# Assert they produce the same output
assert np.allclose(f(), np.dot(m.get_value(), v.get_value()))
# Assert it works when m has no contiguous dimension
m.set_value(m.get_value(borrow=True)[::-1, ::-1], borrow=True)
assert np.allclose(f(), np.dot(m.get_value(), v.get_value()))
@staticmethod
def t_gemv1(m_shp):
# test vector2+dot(matrix,vector1)
rng = np.random.RandomState(unittest_tools.fetch_seed())
v1 = theano.shared(np.array(rng.uniform(size=(m_shp[1],)), dtype="float32"))
v2_orig = np.array(rng.uniform(size=(m_shp[0],)), dtype="float32")
v2 = theano.shared(v2_orig)
m = theano.shared(np.array(rng.uniform(size=m_shp), dtype="float32"))
f = theano.function([], v2 + theano.tensor.dot(m, v1), mode=mode_blas_opt)
# Assert they produce the same output
assert np.allclose(f(), np.dot(m.get_value(), v1.get_value()) + v2_orig)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, Gemv)
assert topo[0].op.inplace is False
# test the inplace version
g = theano.function(
[], [], updates=[(v2, v2 + theano.tensor.dot(m, v1))], mode=mode_blas_opt
)
# Assert they produce the same output
g()
assert np.allclose(
v2.get_value(), np.dot(m.get_value(), v1.get_value()) + v2_orig
)
topo = g.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, Gemv)
if config.mode != "FAST_COMPILE":
assert topo[0].op.inplace is True
# Do the same tests with a matrix with strides in both dimensions
m.set_value(m.get_value(borrow=True)[::-1, ::-1], borrow=True)
v2.set_value(v2_orig)
assert np.allclose(f(), np.dot(m.get_value(), v1.get_value()) + v2_orig)
g()
assert np.allclose(
v2.get_value(), np.dot(m.get_value(), v1.get_value()) + v2_orig
)
@pytest.mark.slow
def test_gemv1(self):
self.t_gemv1((3, 2))
self.t_gemv1((0, 2))
self.t_gemv1((3, 0))
self.t_gemv1((0, 0))
def test_gemv2(self):
# test vector2+dot(vector1,matrix)
rng = np.random.RandomState(unittest_tools.fetch_seed())
v1 = theano.shared(np.array(rng.uniform(size=(2,)), dtype="float32"))
v2_orig = np.array(rng.uniform(size=(3,)), dtype="float32")
v2 = theano.shared(v2_orig)
m = theano.shared(np.array(rng.uniform(size=(2, 3)), dtype="float32"))
f = theano.function([], v2 + theano.tensor.dot(v1, m), mode=mode_blas_opt)
# Assert they produce the same output
assert np.allclose(f(), np.dot(v1.get_value(), m.get_value()) + v2.get_value())
topo = f.maker.fgraph.toposort()
assert sum(isinstance(node.op, Gemv) for node in topo) == 1
assert topo[-1].op.inplace is False
# test the inplace version
g = theano.function(
[], [], updates=[(v2, v2 + theano.tensor.dot(v1, m))], mode=mode_blas_opt
)
# Assert they produce the same output
g()
assert np.allclose(
v2.get_value(), np.dot(v1.get_value(), m.get_value()) + v2_orig
)
topo = g.maker.fgraph.toposort()
assert sum(isinstance(node.op, Gemv) for node in topo) == 1
if config.mode != "FAST_COMPILE":
assert topo[-1].op.inplace is True
# Do the same tests with a matrix with strides in both dimensions
m.set_value(m.get_value(borrow=True)[::-1, ::-1], borrow=True)
v2.set_value(v2_orig)
assert np.allclose(f(), np.dot(v1.get_value(), m.get_value()) + v2.get_value())
g()
assert np.allclose(
v2.get_value(), np.dot(v1.get_value(), m.get_value()) + v2_orig
)
def test_gemv_broadcast(self):
# test gemv with some broadcasted input
rng = np.random.RandomState(unittest_tools.fetch_seed())
v1 = theano.shared(np.array(rng.uniform(size=(2,)), dtype="float32"))
v2_orig = np.array(rng.uniform(size=(1,)), dtype="float32")
v2 = theano.shared(v2_orig)
m = theano.shared(
np.array(rng.uniform(size=(1, 2)), dtype="float32"),
broadcastable=(True, False),
)
o = theano.tensor.dot(m, v1)
f = theano.function([], o + v2, mode=mode_blas_opt)
# Assert they produce the same output
assert np.allclose(f(), np.dot(m.get_value(), v1.get_value()) + v2.get_value())
topo = f.maker.fgraph.toposort()
assert sum(isinstance(node.op, Gemv) for node in topo) == 1
# call gemv directly for mixed broadcast pattern.
o = gemv_no_inplace(v2, 0.5, m, v1, 0.25)
f = theano.function([], o, mode=mode_blas_opt)
assert np.allclose(
f(), 0.5 * np.dot(m.get_value(), v1.get_value()) + 0.25 * v2.get_value()
)
topo = f.maker.fgraph.toposort()
assert sum(isinstance(node.op, Gemv) for node in topo) == 1
def test_gemv_dimensions(self):
A = tt.matrix("A")
x, y = tt.vectors("x", "y")
alpha = theano.shared(theano._asarray(1.0, dtype=config.floatX), name="alpha")
beta = theano.shared(theano._asarray(1.0, dtype=config.floatX), name="beta")
z = beta * y + alpha * tt.dot(A, x)
f = theano.function([A, x, y], z)
# Matrix value
A_val = np.ones((5, 3), dtype=config.floatX)
# Different vector length
ones_3 = np.ones(3, dtype=config.floatX)
ones_4 = np.ones(4, dtype=config.floatX)
ones_5 = np.ones(5, dtype=config.floatX)
ones_6 = np.ones(6, dtype=config.floatX)
f(A_val, ones_3, ones_5)
f(A_val[::-1, ::-1], ones_3, ones_5)
with pytest.raises(ValueError):
f(A_val, ones_4, ones_5)
with pytest.raises(ValueError):
f(A_val, ones_3, ones_6)
with pytest.raises(ValueError):
f(A_val, ones_4, ones_6)
# The following gemv tests were added in March 2011 by Ian Goodfellow
# and are based on the gemv tests from scipy
# http://projects.scipy.org/scipy/browser/trunk/scipy/linalg/tests/test_fblas.py?rev=6803
# NOTE: At the time these tests were written, theano did not have a
# conjugate function. If such a thing is ever added, the tests involving
# conjugate should be ported over as well.
def matrixmultiply(a, b):
if len(b.shape) == 1:
b_is_vector = True
b = b[:, newaxis]
else:
b_is_vector = False
assert a.shape[1] == b.shape[0]
c = zeros((a.shape[0], b.shape[1]), common_type(a, b))
for i in range(a.shape[0]):
for j in range(b.shape[1]):
s = 0
for k in range(a.shape[1]):
s += a[i, k] * b[k, j]
c[i, j] = s
if b_is_vector:
c = c.reshape((a.shape[0],))
return c
class BaseGemv:
mode = mode_blas_opt # can be overridden with self.mode
shared = staticmethod(theano.shared)
def get_data(self, x_stride=1, y_stride=1):
rng = np.random.RandomState(unittest_tools.fetch_seed())
mult = array(1, dtype=self.dtype)
if self.dtype in [complex64, complex128]:
mult = array(1 + 1j, dtype=self.dtype)
alpha = array(1.0, dtype=self.dtype) * mult
beta = array(1.0, dtype=self.dtype) * mult
a = rng.randn(3, 3).astype(self.dtype) * mult
x = arange(shape(a)[0] * x_stride, dtype=self.dtype) * mult
y = arange(shape(a)[1] * y_stride, dtype=self.dtype) * mult
return alpha, beta, a, x, y
def test_simple(self):
alpha, beta, a, x, y = [self.shared(value) for value in self.get_data()]
desired_oy = (
alpha.get_value() * matrixmultiply(a.get_value(), x.get_value())
+ beta.get_value() * y.get_value()
)
oy = alpha * tt.dot(a, x) + beta * y
oy_func = theano.function([], oy, mode=self.mode)
oy_func.maker.fgraph.toposort()
self.assertFunctionContains1(oy_func, self.gemv)
oy_val = oy_func()
assert_array_almost_equal(desired_oy, oy_val)
def test_default_beta_y(self):
vs = self.get_data()
alpha_v, beta_v, a_v, x_v, y_v = vs
a = self.shared(a_v)
x = self.shared(x_v)
desired_oy = matrixmultiply(a_v, x_v)
oy = tt.dot(a, x)
oy_func = theano.function([], oy, mode=self.mode)
self.assertFunctionContains1(oy_func, self.gemv_inplace)
oy_v = oy_func()
assert_array_almost_equal(desired_oy, oy_v)
def test_simple_transpose(self):
vs = self.get_data()
alpha_v, beta_v, a_v, x_v, y_v = vs
alpha, beta, a, x, y = [self.shared(v) for v in vs]
desired_oy = alpha_v * matrixmultiply(transpose(a_v), x_v) + beta_v * y_v
oy = alpha * tt.dot(a.T, x) + beta * y
oy_func = theano.function([], oy, mode=self.mode)
self.assertFunctionContains1(oy_func, self.gemv)
oy_v = oy_func()
assert_array_almost_equal(desired_oy, oy_v)
def test_x_stride(self):
vs = self.get_data(x_stride=2)
alpha_v, beta_v, a_v, x_v, y_v = vs
alpha, beta, a, x, y = [self.shared(v) for v in vs]
desired_oy = alpha_v * matrixmultiply(a_v, x_v[::2]) + beta_v * y_v
oy = alpha * tt.dot(a, x[::2]) + beta * y
oy_func = theano.function([], oy, mode=self.mode)
self.assertFunctionContains1(oy_func, self.gemv)
oy_v = oy_func()
assert_array_almost_equal(desired_oy, oy_v)
def test_x_stride_transpose(self):
vs = self.get_data(x_stride=2)
alpha_v, beta_v, a_v, x_v, y_v = vs
alpha, beta, a, x, y = [self.shared(v) for v in vs]
desired_oy = alpha_v * matrixmultiply(transpose(a_v), x_v[::2]) + beta_v * y_v
oy = alpha * tt.dot(a.T, x[::2]) + beta * y
oy_func = theano.function([], oy, mode=self.mode)
self.assertFunctionContains1(oy_func, self.gemv)
oy_v = oy_func()
assert_array_almost_equal(desired_oy, oy_v)
def test_y_stride(self):
vs = self.get_data(y_stride=2)
alpha_v, beta_v, a_v, x_v, y_v = vs
alpha, beta, a, x, y = [self.shared(v) for v in vs]
desired_oy = alpha_v * matrixmultiply(a_v, x_v) + beta_v * y_v[::2]
oy = alpha * tt.dot(a, x) + beta * y[::2]
oy_func = theano.function([], oy, mode=self.mode)
self.assertFunctionContains1(oy_func, self.gemv)
oy_v = oy_func()
assert_array_almost_equal(desired_oy, oy_v)
def test_y_stride_transpose(self):
vs = self.get_data(y_stride=2)
alpha_v, beta_v, a_v, x_v, y_v = vs
alpha, beta, a, x, y = [self.shared(v) for v in vs]
desired_oy = alpha_v * matrixmultiply(transpose(a_v), x_v) + beta_v * y_v[::2]
oy = alpha * tt.dot(a.T, x) + beta * y[::2]
oy_func = theano.function([], oy, mode=self.mode)
self.assertFunctionContains1(oy_func, self.gemv)
oy_v = oy_func()
assert_array_almost_equal(desired_oy, oy_v)
def test_a_strides(self):
vs = self.get_data()
alpha_v, beta_v, a_v, x_v, y_v = vs
alpha, beta, a, x, y = [self.shared(v) for v in vs]
a_v = a_v[::-1, ::-1]
a.set_value(
a.get_value(borrow=True, return_internal_type=True)[::-1, ::-1], borrow=True
)
desired_oy = alpha_v * matrixmultiply(a_v, x_v) + beta_v * y_v
oy = alpha * tt.dot(a, x) + beta * y
oy_func = theano.function([], oy, mode=self.mode)
self.assertFunctionContains1(oy_func, self.gemv)
oy_v = oy_func()
assert_array_almost_equal(desired_oy, oy_v)
def test_a_strides_transpose(self):
vs = self.get_data()
alpha_v, beta_v, a_v, x_v, y_v = vs
alpha, beta, a, x, y = [self.shared(v) for v in vs]
a_v = a_v[::-1, ::-1]
a.set_value(
a.get_value(borrow=True, return_internal_type=True)[::-1, ::-1], borrow=True
)
desired_oy = alpha_v * matrixmultiply(transpose(a_v), x_v) + beta_v * y_v
oy = alpha * tt.dot(a.T, x) + beta * y
oy_func = theano.function([], oy, mode=self.mode)
self.assertFunctionContains1(oy_func, self.gemv)
oy_v = oy_func()
assert_array_almost_equal(desired_oy, oy_v)
def test_upcasting_scalar_nogemv(self):
# Test that the optimization does not crash when the scale has
# an incorrect dtype, and forces upcasting of the result
# We put this test in this class to test it on the gpu too.
vs = self.get_data()
alpha_v, beta_v, a_v, x_v, y_v = vs
alpha_v = alpha_v.astype("float64")
a_v = a_v.astype("float32")
x_v = x_v.astype("float32")
y_v = y_v.astype("float32")
alpha = tt.dscalar("alpha")
a = self.shared(a_v)
x = self.shared(x_v)
y = self.shared(y_v)
rval = tt.dot(a, x) * alpha + y
f = theano.function([alpha], rval, mode=self.mode)
# this function is currently optimized so that the gemv is
# done inplace on a temporarily allocated-buffer, which is
# then scaled by alpha and to t with a fused elemwise.
n_gemvs = 0
# theano.printing.debugprint(f, print_type=True)
for node in f.maker.fgraph.toposort():
if node.op == self.gemv_inplace:
n_gemvs += 1
assert node.outputs[0].dtype == "float32"
assert n_gemvs == 1, n_gemvs
self.assertFunctionContains1(f, self.gemv_inplace)
f(alpha_v)
class TestSgemv(BaseGemv, unittest_tools.OptimizationTestMixin):
dtype = float32
gemv = gemv_no_inplace
gemv_inplace = gemv_inplace
class TestDgemv(BaseGemv, unittest_tools.OptimizationTestMixin):
dtype = float64
gemv = gemv_no_inplace
gemv_inplace = gemv_inplace
# The optimization to put Gemv don't work for complex type for now.
# See ticket 653.
# class TestCgemv(BaseGemv):
# dtype = complex64
# class TestZgemv(BaseGemv):
# dtype = complex128
###############################################################################
# Tests for Ger
###############################################################################
class TestGerMakeNode:
def setup_method(self):
self.iv = tt.tensor(dtype="int32", broadcastable=(False,))
self.fv = tt.tensor(dtype="float32", broadcastable=(False,))
self.fv1 = tt.tensor(dtype="float32", broadcastable=(True,))
self.dv = tt.tensor(dtype="float64", broadcastable=(False,))
self.dv1 = tt.tensor(dtype="float64", broadcastable=(True,))
self.cv = tt.tensor(dtype="complex64", broadcastable=(False,))
self.zv = tt.tensor(dtype="complex128", broadcastable=(False,))
self.fv_2 = tt.tensor(dtype="float32", broadcastable=(False,))
self.fv1_2 = tt.tensor(dtype="float32", broadcastable=(True,))
self.dv_2 = tt.tensor(dtype="float64", broadcastable=(False,))
self.dv1_2 = tt.tensor(dtype="float64", broadcastable=(True,))
self.cv_2 = tt.tensor(dtype="complex64", broadcastable=(False,))
self.zv_2 = tt.tensor(dtype="complex128", broadcastable=(False,))
self.fm = tt.fmatrix()
self.dm = tt.dmatrix()
self.cm = tt.cmatrix()
self.zm = tt.zmatrix()
self.fa = tt.fscalar()
self.da = tt.dscalar()
self.ca = tt.cscalar()
self.za = tt.zscalar()
def test_works_on_all_valid_dtypes(self):
assert self.fm.type == ger(self.fm, self.fa, self.fv, self.fv_2).type
assert self.fm.type == ger(self.fm, self.fa, self.fv, self.fv_2).type
assert self.fm.type == ger(self.fm, self.fa, self.fv, self.fv_2).type
assert self.fm.type == ger(self.fm, self.fa, self.fv, self.fv_2).type
def test_fails_on_invalid_dtypes(self):
with pytest.raises(TypeError):
ger(tt.imatrix(), tt.iscalar(), tt.ivector(), tt.ivector())
def test_fails_for_nonscalar_alpha(self):
with pytest.raises(TypeError):
ger(self.fm, self.fm, self.fv, self.fv_2)
# boundary case - fv1 has the right dtype and could be dimshuffled to a
# scalar, but that's not make_node's job.
with pytest.raises(TypeError):
ger(self.fm, self.fv1, self.fv, self.fv_2)
# actually doing the aforementioned dimshuffle makes it work
assert (
self.fm.type == ger(self.fm, self.fv1.dimshuffle(), self.fv, self.fv_2).type
)
def test_fails_for_nonmatrix_A(self):
with pytest.raises(TypeError):
ger(self.fv, self.fa, self.fv, self.fv_2)
def test_fails_for_nonvector_x_or_y(self):
with pytest.raises(TypeError):
ger(self.fm, self.fa, self.fv.dimshuffle("x", 0), self.fv_2)
with pytest.raises(TypeError):
ger(self.fm, self.fa, self.fv, self.fv_2.dimshuffle("x", 0))
def test_fails_for_mixed_dtypes(self):
with pytest.raises(TypeError):
ger(self.dm, self.fa, self.fv, self.fv_2)
with pytest.raises(TypeError):
ger(self.fm, self.da, self.fv, self.fv_2)
with pytest.raises(TypeError):
ger(self.fm, self.fa, self.dv, self.fv_2)
with pytest.raises(TypeError):
ger(self.fm, self.fa, self.fv, self.dv_2)
with pytest.raises(TypeError):
ger(self.cm, self.fa, self.fv, self.dv_2)
with pytest.raises(TypeError):
ger(self.cm, self.fa, self.fv, self.zv_2)
class TestGerOpContract(unittest_tools.OpContractTestMixin):
def setup_method(self):
self.ops = [ger, ger_destructive]
def clone(self, op):
return Ger(op.destructive)
class TestGer(unittest_tools.OptimizationTestMixin):
shared = staticmethod(theano.shared)
def setup_method(self):
self.mode = theano.compile.get_default_mode().including("fast_run")
self.mode = self.mode.excluding("c_blas", "scipy_blas")
dtype = self.dtype = "float64" # optimization isn't dtype-dependent
self.A = tt.tensor(dtype=dtype, broadcastable=(False, False))
self.a = tt.tensor(dtype=dtype, broadcastable=())
self.x = tt.tensor(dtype=dtype, broadcastable=(False,))
self.y = tt.tensor(dtype=dtype, broadcastable=(False,))
self.ger = ger
self.ger_destructive = ger_destructive
self.gemm = gemm_no_inplace
def function(self, inputs, outputs, updates=None):
if updates is None:
updates = []
return theano.function(inputs, outputs, self.mode, updates=updates)
def b(self, bval):
return tt.as_tensor_variable(np.asarray(bval, dtype=self.dtype))
def test_b_0_triggers_ger(self):
# test local_gemm_to_ger opt
assert local_gemm_to_ger.transform(
None,
gemm_no_inplace(
self.A,
self.a,
self.x.dimshuffle(0, "x"),
self.y.dimshuffle("x", 0),
self.b(0),
).owner,
)
def test_b_1_triggers_ger(self):
# test local_gemm_to_ger opt
assert local_gemm_to_ger.transform(
None,
gemm_no_inplace(
self.A,
self.a,
self.x.dimshuffle(0, "x"),
self.y.dimshuffle("x", 0),
self.b(1),
).owner,
)
def test_b_other_does_not_triggers_ger(self):
# test local_gemm_to_ger opt
assert not local_gemm_to_ger.transform(
None,
gemm_no_inplace(
self.A,
self.a,
self.x.dimshuffle(0, "x"),
self.y.dimshuffle("x", 0),
self.b(1.5),
).owner,
)
def test_b_nonconst_does_not_triggers_ger(self):
# test local_gemm_to_ger opt
assert not local_gemm_to_ger.transform(
None,
gemm_no_inplace(
self.A,
self.a,
self.x.dimshuffle(0, "x"),
self.y.dimshuffle("x", 0),
self.a,
).owner,
)
def test_outer(self):
f = self.function([self.x, self.y], tt.outer(self.x, self.y))
self.assertFunctionContains(f, self.ger_destructive)
f(np.random.rand(5).astype(self.dtype), np.random.rand(4).astype(self.dtype))
def test_A_plus_outer(self):
f = self.function([self.A, self.x, self.y], self.A + tt.outer(self.x, self.y))
self.assertFunctionContains(f, self.ger)
f(
np.random.rand(5, 4).astype(self.dtype),
np.random.rand(5).astype(self.dtype),
np.random.rand(4).astype(self.dtype),
)
f(
np.random.rand(5, 4).astype(self.dtype)[::-1, ::-1],
np.random.rand(5).astype(self.dtype),
np.random.rand(4).astype(self.dtype),
)
def test_A_plus_scaled_outer(self):
f = self.function(
[self.A, self.x, self.y], self.A + 0.1 * tt.outer(self.x, self.y)
)
self.assertFunctionContains(f, self.ger)
f(
np.random.rand(5, 4).astype(self.dtype),
np.random.rand(5).astype(self.dtype),
np.random.rand(4).astype(self.dtype),
)
f(
np.random.rand(5, 4).astype(self.dtype)[::-1, ::-1],
np.random.rand(5).astype(self.dtype),
np.random.rand(4).astype(self.dtype),
)
def test_scaled_A_plus_scaled_outer(self):
f = self.function(
[self.A, self.x, self.y],
np.asarray(0.2, self.dtype) * self.A
+ np.asarray(0.1, self.dtype) * tt.outer(self.x, self.y),
)
# Why gemm? This make the graph simpler did we test that it
# make it faster?
self.assertFunctionContains(f, self.gemm)
f(
np.random.rand(5, 4).astype(self.dtype),
np.random.rand(5).astype(self.dtype),
np.random.rand(4).astype(self.dtype),
)
f(
np.random.rand(5, 4).astype(self.dtype)[::-1, ::-1],
np.random.rand(5).astype(self.dtype),
np.random.rand(4).astype(self.dtype),
)
def given_dtype(self, dtype, M, N):
# test corner case shape and dtype
f = self.function(
[self.A, self.x, self.y], self.A + 0.1 * tt.outer(self.x, self.y)
)
self.assertFunctionContains(f, self.ger)
f(
np.random.rand(M, N).astype(self.dtype),
np.random.rand(M).astype(self.dtype),
np.random.rand(N).astype(self.dtype),
)
f(
np.random.rand(M, N).astype(self.dtype)[::-1, ::-1],
np.random.rand(M).astype(self.dtype),
np.random.rand(N).astype(self.dtype),
)
def test_f32_0_0(self):
return self.given_dtype("float32", 0, 0)
def test_f32_1_0(self):
return self.given_dtype("float32", 1, 0)
def test_f32_0_1(self):
return self.given_dtype("float32", 0, 1)
def test_f32_1_1(self):
return self.given_dtype("float32", 1, 1)
def test_f32_4_4(self):
return self.given_dtype("float32", 4, 4)
def test_f32_7_1(self):
return self.given_dtype("float32", 7, 1)
def test_f32_1_2(self):
return self.given_dtype("float32", 1, 2)
def test_f64_4_5(self):
return self.given_dtype("float64", 4, 5)
def test_c64_7_1(self):
return self.given_dtype("complex64", 7, 1)
def test_c128_1_9(self):
return self.given_dtype("complex128", 1, 9)
def test_inplace(self):
A = self.shared(np.random.rand(4, 5).astype(self.dtype))
f = self.function(
[self.x, self.y],
[],
updates=[
(A, A + tt.constant(0.1, dtype=self.dtype) * tt.outer(self.x, self.y))
],
)
self.assertFunctionContains(f, self.ger_destructive)
f(np.random.rand(4).astype(self.dtype), np.random.rand(5).astype(self.dtype))
A.set_value(
A.get_value(borrow=True, return_internal_type=True)[::-1, ::-1], borrow=True
)
f(np.random.rand(4).astype(self.dtype), np.random.rand(5).astype(self.dtype))
class TestBlasStrides:
dtype = "float64"
shared = staticmethod(tt._shared)
mode = theano.compile.get_default_mode()
mode = mode.including("fast_run").excluding("gpu", "c_blas", "scipy_blas")
rng = np.random.RandomState(seed=unittest_tools.fetch_seed())
def rand(self, *shape):
return theano._asarray(self.rng.rand(*shape), dtype=self.dtype)
def cmp_dot22(self, b_shp, c_shp):
av = np.zeros((0, 0), dtype=self.dtype)
bv = self.rand(*b_shp)
cv = self.rand(*c_shp)
a = self.shared(av, "a")
b = self.shared(bv, "b")
c = self.shared(cv, "c")
b_t = self.shared(bv.T, "b.T")
c_t = self.shared(cv.T, "c.T")
b_dev = b.get_value(borrow=False, return_internal_type=True)
c_dev = c.get_value(borrow=False, return_internal_type=True)
bt_dev = b_t.get_value(borrow=False, return_internal_type=True)
ct_dev = c_t.get_value(borrow=False, return_internal_type=True)
f_nn = theano.function([], [], updates=[(a, tt.dot(b, c))], mode=self.mode)
# print 'class name:', self.__class__.__name__
# theano.printing.debugprint(f_nn)
f_nt = theano.function([], [], updates=[(a, tt.dot(b, c_t.T))], mode=self.mode)
f_tn = theano.function([], [], updates=[(a, tt.dot(b_t.T, c))], mode=self.mode)
f_tt = theano.function(
[], [], updates=[(a, tt.dot(b_t.T, c_t.T))], mode=self.mode
)
# Try with all stride patterns, and all transposed pattern
for step_signs in product((-1, 1), repeat=4):
for step in (1, 2):
b_step1, b_step2, c_step1, c_step2 = (s * step for s in step_signs)
b.set_value(b_dev.copy()[::b_step1, ::b_step2], borrow=True)
c.set_value(c_dev.copy()[::c_step1, ::c_step2], borrow=True)
b_t.set_value(bt_dev.copy()[::b_step2, ::b_step1], borrow=True)
c_t.set_value(ct_dev.copy()[::c_step2, ::c_step1], borrow=True)
# Numpy result
a_n = np.dot(bv[::b_step1, ::b_step2], cv[::c_step1, ::c_step2])
f_nn()
assert np.allclose(a.get_value(), a_n)
f_nt()
assert np.allclose(a.get_value(), a_n)
f_tn()
assert np.allclose(a.get_value(), a_n)
f_tt()
assert np.allclose(a.get_value(), a_n)
def test_dot22(self):
self.cmp_dot22((3, 4), (4, 5))
self.cmp_dot22((1, 4), (4, 5))
self.cmp_dot22((3, 4), (4, 1))
self.cmp_dot22((3, 1), (1, 1))
self.cmp_dot22((1, 4), (4, 1))
self.cmp_dot22((3, 1), (1, 5))
self.cmp_dot22((0, 4), (4, 5))
self.cmp_dot22((0, 4), (4, 1))
self.cmp_dot22((0, 1), (1, 5))
self.cmp_dot22((3, 4), (4, 0))
self.cmp_dot22((3, 0), (0, 5))
self.cmp_dot22((0, 4), (4, 0))
self.cmp_dot22((0, 0), (0, 0))
def cmp_dot22scalar(self, b_shp, c_shp):
av = np.zeros((0, 0), dtype=self.dtype)
bv = self.rand(*b_shp)
cv = self.rand(*c_shp)
l = np.float32(0.2)
a = self.shared(av, "a")
b = self.shared(bv, "b")
c = self.shared(cv, "c")
b_t = self.shared(bv.T, "b.T")
c_t = self.shared(cv.T, "c.T")
b_dev = b.get_value(borrow=False, return_internal_type=True)
c_dev = c.get_value(borrow=False, return_internal_type=True)
bt_dev = b_t.get_value(borrow=False, return_internal_type=True)
ct_dev = c_t.get_value(borrow=False, return_internal_type=True)
f_nn = theano.function([], [], updates=[(a, l * tt.dot(b, c))], mode=self.mode)
f_nt = theano.function(
[], [], updates=[(a, l * tt.dot(b, c_t.T))], mode=self.mode
)
f_tn = theano.function(
[], [], updates=[(a, l * tt.dot(b_t.T, c))], mode=self.mode
)
f_tt = theano.function(
[], [], updates=[(a, l * tt.dot(b_t.T, c_t.T))], mode=self.mode
)
# Try with all stride patterns, and all transposed pattern
for step_signs in product((-1, 1), repeat=4):
for step in (1, 2):
b_step1, b_step2, c_step1, c_step2 = (s * step for s in step_signs)
b.set_value(b_dev.copy()[::b_step1, ::b_step2], borrow=True)
c.set_value(c_dev.copy()[::c_step1, ::c_step2], borrow=True)
b_t.set_value(bt_dev.copy()[::b_step2, ::b_step1], borrow=True)
c_t.set_value(ct_dev.copy()[::c_step2, ::c_step1], borrow=True)
# Numpy result
a_n = l * np.dot(bv[::b_step1, ::b_step2], cv[::c_step1, ::c_step2])
f_nn()
assert np.allclose(a.get_value(), a_n)
f_nt()
assert np.allclose(a.get_value(), a_n)
f_tn()
assert np.allclose(a.get_value(), a_n)
f_tt()
assert np.allclose(a.get_value(), a_n)
def test_dot22scalar(self):
self.cmp_dot22scalar((3, 4), (4, 5))
self.cmp_dot22scalar((1, 4), (4, 5))
self.cmp_dot22scalar((3, 4), (4, 1))
self.cmp_dot22scalar((3, 1), (1, 1))
self.cmp_dot22scalar((1, 4), (4, 1))
self.cmp_dot22scalar((3, 1), (1, 5))
self.cmp_dot22scalar((0, 4), (4, 5))
self.cmp_dot22scalar((0, 4), (4, 1))
self.cmp_dot22scalar((0, 1), (1, 5))
self.cmp_dot22scalar((3, 4), (4, 0))
self.cmp_dot22scalar((3, 0), (0, 5))
self.cmp_dot22scalar((0, 4), (4, 0))
self.cmp_dot22scalar((0, 0), (0, 0))
def cmp_gemm(self, a_shp, b_shp, c_shp):
av = self.rand(*a_shp)
bv = self.rand(*b_shp)
cv = self.rand(*c_shp)
l = np.float32(0.2)
a = self.shared(av, "a")
b = self.shared(bv, "b")
c = self.shared(cv, "c")
a_t = self.shared(av.T, "a.T")
b_t = self.shared(bv.T, "b.T")
c_t = self.shared(cv.T, "c.T")
a_dev = a.get_value(borrow=False, return_internal_type=True)
b_dev = b.get_value(borrow=False, return_internal_type=True)
c_dev = c.get_value(borrow=False, return_internal_type=True)
bt_dev = b_t.get_value(borrow=False, return_internal_type=True)
ct_dev = c_t.get_value(borrow=False, return_internal_type=True)
f_nnn = theano.function(
[], [], updates=[(a, (l * a + tt.dot(b, c)))], mode=self.mode
)
f_nnt = theano.function(
[], [], updates=[(a, (l * a + tt.dot(b, c_t.T)))], mode=self.mode
)
f_ntn = theano.function(
[], [], updates=[(a, (l * a + tt.dot(b_t.T, c)))], mode=self.mode
)
f_ntt = theano.function(
[], [], updates=[(a, (l * a + tt.dot(b_t.T, c_t.T)))], mode=self.mode
)
f_tnn = theano.function(
[], [], updates=[(a_t, (l * a_t + tt.dot(b, c).T))], mode=self.mode
)
f_tnt = theano.function(
[], [], updates=[(a_t, (l * a_t + tt.dot(b, c_t.T).T))], mode=self.mode
)
f_ttn = theano.function(
[], [], updates=[(a_t, (l * a_t + tt.dot(b_t.T, c).T))], mode=self.mode
)
f_ttt = theano.function(
[],
[],
updates=[(a_t, (l * a_t + tt.dot(b_t.T, c_t.T).T))],
mode=self.mode,
)
# Try with all stride patterns, and all transposed pattern
for step_signs in product((-1, 1), repeat=6):
for step in (1, 2):
a_step1, a_step2, b_step1, b_step2, c_step1, c_step2 = (
s * step for s in step_signs
)
b.set_value(b_dev.copy()[::b_step1, ::b_step2], borrow=True)
c.set_value(c_dev.copy()[::c_step1, ::c_step2], borrow=True)
b_t.set_value(bt_dev.copy()[::b_step2, ::b_step1], borrow=True)
c_t.set_value(ct_dev.copy()[::c_step2, ::c_step1], borrow=True)
# Numpy results
a_n = l * av[::a_step1, ::a_step2] + np.dot(
bv[::b_step1, ::b_step2], cv[::c_step1, ::c_step2]
)
at_n = (
l * av[::a_step1, ::a_step2].T
+ np.dot(bv[::b_step1, ::b_step2], cv[::c_step1, ::c_step2]).T
)
# a's value is updated, so we need to reinitialize it each time
a.set_value(a_dev.copy()[::a_step1, ::a_step2], borrow=True)
f_nnn()
assert np.allclose(a.get_value(), a_n)
a.set_value(a_dev.copy()[::a_step1, ::a_step2], borrow=True)
f_nnt()
assert np.allclose(a.get_value(), a_n)
a.set_value(a_dev.copy()[::a_step1, ::a_step2], borrow=True)
f_ntn()
assert np.allclose(a.get_value(), a_n)
a.set_value(a_dev.copy()[::a_step1, ::a_step2], borrow=True)
f_ntt()
assert np.allclose(a.get_value(), a_n)
a_t.set_value(
transpose(a_dev.copy())[::a_step2, ::a_step1], borrow=True
)
f_tnn()
assert np.allclose(a_t.get_value(), at_n)
a_t.set_value(
transpose(a_dev.copy())[::a_step2, ::a_step1], borrow=True
)
f_tnt()
assert np.allclose(a_t.get_value(), at_n)
a_t.set_value(
transpose(a_dev.copy())[::a_step2, ::a_step1], borrow=True
)
f_ttn()
assert np.allclose(a_t.get_value(), at_n)
a_t.set_value(
transpose(a_dev.copy())[::a_step2, ::a_step1], borrow=True
)
f_ttt()
assert np.allclose(a_t.get_value(), at_n)
def test_gemm(self):
self.cmp_gemm((3, 5), (3, 4), (4, 5))
self.cmp_gemm((1, 5), (1, 4), (4, 5))
self.cmp_gemm((3, 1), (3, 4), (4, 1))
self.cmp_gemm((3, 1), (3, 1), (1, 1))
self.cmp_gemm((1, 1), (1, 4), (4, 1))
self.cmp_gemm((3, 5), (3, 1), (1, 5))
self.cmp_gemm((0, 5), (0, 4), (4, 5))
self.cmp_gemm((0, 1), (0, 4), (4, 1))
self.cmp_gemm((0, 5), (0, 1), (1, 5))
self.cmp_gemm((3, 0), (3, 4), (4, 0))
self.cmp_gemm((3, 5), (3, 0), (0, 5))
self.cmp_gemm((0, 0), (0, 4), (4, 0))
self.cmp_gemm((0, 0), (0, 0), (0, 0))
def cmp_gemv(self, a_shp, b_shp, c_shp):
av = self.rand(a_shp)
bv = self.rand(*b_shp)
cv = self.rand(c_shp)
l = np.float32(0.2)
a = self.shared(av, "a")
b = self.shared(bv, "b")
c = self.shared(cv, "c")
b_t = self.shared(bv.T, "b.T")
a_dev = a.get_value(borrow=False, return_internal_type=True)
b_dev = b.get_value(borrow=False, return_internal_type=True)
c_dev = c.get_value(borrow=False, return_internal_type=True)
f_n = theano.function(
[], [], updates=[(a, (a + l * tt.dot(b, c)))], mode=self.mode
)
f_t = theano.function(
[], [], updates=[(a, (a + l * tt.dot(b_t.T, c)))], mode=self.mode
)
# Try with all stride patterns, and all transposed pattern
for step_signs in product((1, -1), repeat=4):
for step in (1, 2):
a_step, b_step1, b_step2, c_step = (s * step for s in step_signs)
a.set_value(a_dev.copy()[::a_step], borrow=True)
b.set_value(b_dev.copy()[::b_step1, ::b_step2], borrow=True)
b_t.set_value(
transpose(b_dev.copy())[::b_step2, ::b_step1], borrow=True
)
c.set_value(c_dev.copy()[::c_step], borrow=True)
a_n = av[::a_step] + l * np.dot(bv[::b_step1, ::b_step2], cv[::c_step])
f_n()
assert np.allclose(a.get_value(), a_n), (a.get_value(), a_n)
a.set_value(a_dev.copy()[::a_step], borrow=True)
f_t()
assert np.allclose(a.get_value(), a_n), (a.get_value(), a_n)
def test_gemv(self):
self.cmp_gemv(3, (3, 5), 5)
self.cmp_gemv(1, (1, 5), 5)
self.cmp_gemv(3, (3, 1), 1)
self.cmp_gemv(0, (0, 5), 5)
self.cmp_gemv(3, (3, 0), 0)
self.cmp_gemv(0, (0, 1), 1)
self.cmp_gemv(1, (1, 0), 0)
self.cmp_gemv(0, (0, 0), 0)
def cmp_ger(self, a_shp, b_shp, c_shp):
av = self.rand(*a_shp)
bv = self.rand(b_shp)
cv = self.rand(c_shp)
l = np.float32(0.2)
a = self.shared(av, "a")
b = self.shared(bv, "b")
c = self.shared(cv, "c")
a_t = self.shared(av.T, "a.T")
a_dev = a.get_value(borrow=False, return_internal_type=True)
b_dev = b.get_value(borrow=False, return_internal_type=True)
c_dev = c.get_value(borrow=False, return_internal_type=True)
f_n = theano.function(
[], [], updates=[(a, (a + l * tt.outer(b, c)))], mode=self.mode
)
f_t = theano.function(
[], [], updates=[(a_t, (a_t + l * tt.outer(b, c).T))], mode=self.mode
)
# Try with all stride patterns, and all transposed patterns
for step_signs in product((1, -1), repeat=4):
for step in (1, 2):
a_step1, a_step2, b_step, c_step = (s * step for s in step_signs)
a.set_value(a_dev.copy()[::a_step1, ::a_step2], borrow=True)
a_t.set_value(
transpose(a_dev.copy())[::a_step1, ::a_step2], borrow=True
)
b.set_value(b_dev.copy()[::b_step], borrow=True)
c.set_value(c_dev.copy()[::c_step], borrow=True)
f_n()
n_n = av[::a_step1, ::a_step2] + l * np.outer(
bv[::b_step], cv[::c_step]
)
assert np.allclose(a.get_value(), n_n), (a.get_value(), n_n)
f_t()
n_t = (
av.T[::a_step1, ::a_step2]
+ l * np.outer(bv[::b_step], cv[::c_step]).T
)
assert np.allclose(a_t.get_value(), n_t), (a_t.get_value(), n_t)
def test_ger_strides(self):
self.cmp_ger((3, 5), 3, 5)
self.cmp_ger((1, 5), 1, 5)
self.cmp_ger((3, 1), 3, 1)
self.cmp_ger((0, 5), 0, 5)
self.cmp_ger((3, 0), 3, 0)
self.cmp_ger((0, 1), 0, 1)
self.cmp_ger((1, 0), 1, 0)
self.cmp_ger((0, 0), 0, 0)
def test_gemm_non_contiguous(self):
# test_gemm_non_contiguous: Test if GEMM works well with non-contiguous matrices.
aval = np.ones((6, 2))
bval = np.ones((2, 7))
cval = np.arange(7) + np.arange(0, 0.6, 0.1)[:, np.newaxis]
a = theano.shared(aval[:3], borrow=True)
b = theano.shared(bval[:, :5], borrow=True)
c = theano.shared(cval[:3, :5], borrow=True)
s = tt.scalar()
upd_c = s * c + tt.dot(a, b)
f = theano.function([s], [], updates={c: upd_c})
f(0)
ref_output = np.ones((3, 5)) * 2
unittest_tools.assert_allclose(c.get_value(), ref_output)
class TestInferShape(unittest_tools.InferShapeTester):
def test_dot22(self):
x, y = tt.matrices("xy")
self._compile_and_check(
[x, y],
[_dot22(x, y)],
[
np.random.random((2, 3)).astype(config.floatX),
np.random.random((3, 4)).astype(config.floatX),
],
Dot22,
)
def test_dot22scalar(self):
x, y = tt.matrices("xy")
a = tt.scalar("a")
self._compile_and_check(
[x, y, a],
[_dot22scalar(x, y, a)],
[
np.random.random((2, 3)).astype(config.floatX),
np.random.random((3, 4)).astype(config.floatX),
np.asarray(0.5, dtype=config.floatX),
],
Dot22Scalar,
)
def test_gemm(self):
x, y, z = tt.matrices("xyz")
a = tt.scalar("a")
b = tt.scalar("b")
self._compile_and_check(
[x, y, a, z, b],
[gemm(z, a, x, y, b)],
[
np.random.random((2, 3)).astype(config.floatX),
np.random.random((3, 4)).astype(config.floatX),
np.asarray(0.5, dtype=config.floatX),
np.random.random((2, 4)).astype(config.floatX),
np.asarray(0.5, dtype=config.floatX),
],
Gemm,
)
def test_gemv(self):
A = tt.matrix("A")
x, y = tt.vectors("xy")
a = tt.scalar("a")
b = tt.scalar("b")
self._compile_and_check(
[y, a, A, x, b],
[gemv(y, a, A, x, b)],
[
np.random.random((2,)).astype(config.floatX),
np.asarray(0.5, dtype=config.floatX),
np.random.random((2, 3)).astype(config.floatX),
np.random.random((3,)).astype(config.floatX),
np.asarray(0.5, dtype=config.floatX),
],
Gemv,
)
def test_ger(self):
A = tt.matrix("A")
x, y = tt.vectors("xy")
a = tt.scalar("a")
self._compile_and_check(
[A, a, x, y],
[ger(A, a, x, y)],
[
np.random.random((2, 3)).astype(config.floatX),
np.asarray(0.5, dtype=config.floatX),
np.random.random((2,)).astype(config.floatX),
np.random.random((3,)).astype(config.floatX),
],
Ger,
)
| 35.943297 | 89 | 0.525365 |
7f943af1f38b47d5e26b9cffcd23ec803a3fa083 | 11,325 | py | Python | daal4py/sklearn/cluster/k_means.py | Surfndez/daal4py | f0dfd8684e45bf298a61ca9efb0cea8314952b3f | [
"Apache-2.0"
] | 1 | 2020-06-07T23:23:25.000Z | 2020-06-07T23:23:25.000Z | daal4py/sklearn/cluster/k_means.py | masdevas/daal4py | 8530377aac4c629102f9cad62a569f18d33e4458 | [
"Apache-2.0"
] | 1 | 2021-01-21T12:13:36.000Z | 2021-01-21T12:13:36.000Z | daal4py/sklearn/cluster/k_means.py | masdevas/daal4py | 8530377aac4c629102f9cad62a569f18d33e4458 | [
"Apache-2.0"
] | null | null | null | #
#*******************************************************************************
# Copyright 2014-2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#******************************************************************************/
import numpy as np
from scipy import sparse as sp
from sklearn.utils import (check_random_state, check_array)
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.utils.validation import (check_is_fitted, _num_samples)
from sklearn.cluster.k_means_ import (k_means, _labels_inertia, _k_init, _validate_center_shape)
import sys
if sys.version_info[0] == 2:
from sklearn.externals.six import string_types
else:
string_types = str
from sklearn.utils.extmath import row_norms
from sklearn.cluster import KMeans as KMeans_original
import daal4py
from ..utils import getFPType
def _daal_mean_var(X):
fpt = getFPType(X)
try:
alg = daal4py.low_order_moments(fptype=fpt, method='defaultDense', estimatesToCompute='estimatesAll')
except AttributeError:
return np.var(X, axis=0).mean()
ssc = alg.compute(X).sumSquaresCentered
ssc = ssc.reshape((-1,1))
alg = daal4py.low_order_moments(fptype=fpt, method='defaultDense', estimatesToCompute='estimatesAll')
ssc_total_res = alg.compute(ssc)
mean_var = ssc_total_res.sum / X.size
return mean_var[0, 0]
def _tolerance(X, rtol):
"""Compute absolute tolerance from the relative tolerance"""
if rtol == 0.0:
return rtol
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
mean_var = np.mean(variances)
else:
mean_var = _daal_mean_var(X)
return mean_var * rtol
def _daal4py_compute_starting_centroids(X, X_fptype, nClusters, cluster_centers_0, random_state):
def is_string(s, target_str):
return isinstance(s, string_types) and s == target_str
if is_string(cluster_centers_0, 'k-means++'):
_seed = random_state.randint(np.iinfo('i').max)
daal_engine = daal4py.engines_mt19937(fptype=X_fptype, method='defaultDense', seed=_seed)
_n_local_trials = 2 + int(np.log(nClusters))
kmeans_init = daal4py.kmeans_init(nClusters, fptype=X_fptype,
nTrials=_n_local_trials, method='plusPlusDense', engine=daal_engine)
kmeans_init_res = kmeans_init.compute(X)
centroids_ = kmeans_init_res.centroids
elif is_string(cluster_centers_0, 'random'):
_seed = random_state.randint(np.iinfo('i').max)
daal_engine = daal4py.engines_mt19937(seed=_seed, fptype=X_fptype, method='defaultDense')
kmeans_init = daal4py.kmeans_init(nClusters, fptype=X_fptype, method='randomDense', engine=daal_engine)
kmeans_init_res = kmeans_init.compute(X)
centroids_ = kmeans_init_res.centroids
elif hasattr(cluster_centers_0, '__array__'):
cc_arr = np.ascontiguousarray(cluster_centers_0, dtype=X.dtype)
_validate_center_shape(X, nClusters, cc_arr)
centroids_ = cc_arr
elif callable(cluster_centers_0):
cc_arr = cluster_centers_0(X, nClusters, random_state)
cc_arr = np.ascontiguousarray(cc_arr, dtype=X.dtype)
_validate_center_shape(X, nClusters, cc_arr)
centroids_ = cc_arr
elif is_string(cluster_centers_0, 'deterministic'):
kmeans_init = daal4py.kmeans_init(nClusters, fptype=X_fptype, method='defaultDense')
kmeans_init_res = kmeans_init.compute(X)
centroids_ = kmeans_init_res.centroids
else:
raise ValueError("Cluster centers should either be 'k-means++', 'random', 'deterministic' or an array")
return centroids_
def _daal4py_k_means_dense(X, nClusters, numIterations, tol, cluster_centers_0, n_init, random_state):
if numIterations < 0:
raise ValueError("Wrong iterations number")
if hasattr(X, '__array__'):
X_fptype = getFPType(X)
else:
raise NotImplementedError("""Unsupported input type {} encountered in DAAL-based optimization of KMeans.
You can disable DAAL-based optimizations of scikit-learn with sklearn.daal4sklearn.dispatcher.disable()""".format(type(X)))
abs_tol = _tolerance(X, tol) # tol is relative tolerance
best_labels, best_inertia, best_cluster_centers = None, None, None
best_n_iter = -1
if numIterations == 0:
n_init = 1
kmeans_algo = daal4py.kmeans(
nClusters = nClusters,
maxIterations = numIterations,
assignFlag = True,
accuracyThreshold = abs_tol,
fptype = X_fptype,
# gamma = 1.0, # only relevant for categorical features of which we should have none
method = 'defaultDense') #,
# distanceType = 'euclidean')
for k in range(n_init):
starting_centroids_ = _daal4py_compute_starting_centroids(
X, X_fptype, nClusters, cluster_centers_0, random_state)
res = kmeans_algo.compute(X, starting_centroids_)
# Per documentation, with numIterations == 0, centroids and goalFunction are not updated
if numIterations == 0:
best_labels = res.assignments[:,0]
best_n_iter = int(res.nIterations[0,0])
break
else:
inertia = res.goalFunction[0,0]
if best_inertia is None or inertia < best_inertia:
best_labels = res.assignments.ravel()
best_cluster_centers = res.centroids
if n_init > 1:
best_labels = best_labels.copy()
best_cluster_centers = best_cluster_centers.copy()
best_inertia = inertia
best_n_iter = int(res.nIterations[0,0])
return best_cluster_centers, best_labels, best_inertia, best_n_iter
def fit(self, X, y=None, sample_weight=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Training instances to cluster. It must be noted that the data
will be converted to C ordering, which will cause a memory
copy if the given data is not C-contiguous.
y : Ignored
not used, present here for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None)
"""
if self.n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % self.n_init)
random_state = check_random_state(self.random_state)
if self.max_iter <= 0:
raise ValueError('Number of iterations should be a positive number,'
' got %d instead' % self.max_iter)
if self.precompute_distances == 'auto':
precompute_distances = False
elif isinstance(self.precompute_distances, bool):
precompute_distances = self.precompute_distances
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
self.precompute_distances)
# avoid forcing order when copy_x=False
order = "C" if self.copy_x else None
X = check_array(X, accept_sparse='csr', dtype=[np.float64, np.float32],
order=order, copy=self.copy_x)
daal_ready = not sp.issparse(X) and not precompute_distances
daal_ready = daal_ready and hasattr(X, '__array__')
if daal_ready:
X_len = _num_samples(X)
daal_ready = (self.n_clusters <= X_len)
if daal_ready and sample_weight is not None:
sample_weight = np.asarray(sample_weight)
daal_ready = (sample_weight.shape[0] == X_len) and (
np.allclose(sample_weight, np.ones_like(sample_weight)))
if not daal_ready:
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, sample_weight=sample_weight, init=self.init,
n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose,
precompute_distances=precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs, algorithm=self.algorithm,
return_n_iter=True)
else:
X = check_array(X, dtype=[np.float64, np.float32])
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
_daal4py_k_means_dense(
X, self.n_clusters, self.max_iter, self.tol, self.init, self.n_init,
random_state)
return self
def predict(self, X, sample_weight=None):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
sample_weight : array-like, shape (n_samples,), optional
The weights for each observation in X. If None, all observations
are assigned equal weight (default: None)
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
daal_ready = sample_weight is None and hasattr(X, '__array__') # or sp.isspmatrix_csr(X)
if daal_ready:
return _daal4py_k_means_dense(X, self.n_clusters, 0, 0.0, self.cluster_centers_, 1, None)[1]
else:
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, sample_weight, x_squared_norms,
self.cluster_centers_)[0]
_fit_copy = fit
_predict_copy = predict
class KMeans(KMeans_original):
__doc__ = KMeans_original.__doc__
def __init__(self, n_clusters=8, init='k-means++', n_init=10,
max_iter=300, tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True,
n_jobs=None, algorithm='auto'):
super(KMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
tol=tol, precompute_distances=precompute_distances,
n_init=n_init, verbose=verbose, random_state=random_state,
copy_x=copy_x, n_jobs=n_jobs, algorithm=algorithm)
def fit(self, X, y=None, sample_weight=None):
return _fit_copy(self, X, y=y, sample_weight=sample_weight)
def predict(self, X, sample_weight=None):
return _predict_copy(self, X, sample_weight=sample_weight)
| 40.446429 | 131 | 0.664371 |
56c93daf82647235685fde6d9502de3c44bedb5b | 451 | py | Python | ads_maker/admin.py | abi83/kromm.info | cbdc91627ba4afb6151109668d8b05fea93f3dc3 | [
"MIT"
] | null | null | null | ads_maker/admin.py | abi83/kromm.info | cbdc91627ba4afb6151109668d8b05fea93f3dc3 | [
"MIT"
] | null | null | null | ads_maker/admin.py | abi83/kromm.info | cbdc91627ba4afb6151109668d8b05fea93f3dc3 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import WSite, SiteMap
class SiteAdmin(admin.ModelAdmin):
list_display = ('pk', 'url', 'created_at',)
search_fields = ('url',)
list_filter = ('created_at',)
class SiteMapAdmin(admin.ModelAdmin):
list_display = ('pk', 'url', 'created_at', 'site',)
search_fields = ('url',)
list_filter = ("site",)
admin.site.register(WSite, SiteAdmin)
admin.site.register(SiteMap, SiteMapAdmin) | 25.055556 | 55 | 0.687361 |
f294e7077213c17678b46cb42431baa45305c6c5 | 9,192 | py | Python | tensorflow_quantum/python/differentiators/linear_combination_test.py | dlyongemallo/tensorflow-quantum | 22cf3058ae5bfb4f0ca7ed70cb691bd7be650e61 | [
"Apache-2.0"
] | 1 | 2020-03-10T04:12:46.000Z | 2020-03-10T04:12:46.000Z | tensorflow_quantum/python/differentiators/linear_combination_test.py | dlyongemallo/tensorflow-quantum | 22cf3058ae5bfb4f0ca7ed70cb691bd7be650e61 | [
"Apache-2.0"
] | null | null | null | tensorflow_quantum/python/differentiators/linear_combination_test.py | dlyongemallo/tensorflow-quantum | 22cf3058ae5bfb4f0ca7ed70cb691bd7be650e61 | [
"Apache-2.0"
] | 1 | 2020-03-12T07:19:12.000Z | 2020-03-12T07:19:12.000Z | # Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic tests for the LinearCombinationDifferentiator"""
import numpy as np
from absl.testing import parameterized
import tensorflow as tf
import sympy
import cirq
from tensorflow_quantum.core.ops import circuit_execution_ops
from tensorflow_quantum.python import util
from tensorflow_quantum.python.differentiators import linear_combination
def _simple_op_inputs():
qubit = cirq.GridQubit(0, 0)
symbol = 'alpha'
circuit = cirq.Circuit(cirq.Y(qubit)**sympy.Symbol(symbol))
op = cirq.X(qubit)
value = 0.3
n_samples = 2000
# Return inputs prepped for expectation ops.
# circuit, symbol_names, values, ops, n_samples
# along with expected feedforward expectation
# and expected gradient.
return (util.convert_to_tensor([circuit]), tf.convert_to_tensor([symbol]),
tf.convert_to_tensor([[value]]), util.convert_to_tensor([[op]]),
tf.convert_to_tensor([[n_samples]]),
tf.convert_to_tensor([[np.sin(np.pi * value)]]),
tf.convert_to_tensor([[np.pi * np.cos(np.pi * value)]]))
class LinearCombinationTest(tf.test.TestCase, parameterized.TestCase):
"""Test the LinearCombination based Differentiators."""
def test_linear_combination_instantiate(self):
"""Test LinearCombinationDifferentiator type checking."""
linear_combination.LinearCombination([1, 1], [1, 0])
with self.assertRaisesRegex(TypeError,
expected_regex="weights must be"):
linear_combination.LinearCombination("junk", [1, 0])
with self.assertRaisesRegex(TypeError,
expected_regex="perturbations must be"):
linear_combination.LinearCombination([1, 1], "junk")
with self.assertRaisesRegex(TypeError,
expected_regex="weight in weights"):
linear_combination.LinearCombination([1, "junk"], [1, 0])
with self.assertRaisesRegex(
TypeError, expected_regex="perturbation in perturbations"):
linear_combination.LinearCombination([1, 1], [1, "junk"])
with self.assertRaisesRegex(ValueError, expected_regex="length"):
linear_combination.LinearCombination([1, 1, 1], [1, 0])
with self.assertRaisesRegex(ValueError, expected_regex="unique"):
linear_combination.LinearCombination([1, 1], [1, 1])
def test_forward_instantiate(self):
"""Test ForwardDifference type checking."""
linear_combination.ForwardDifference()
linear_combination.ForwardDifference(1, 0.1)
with self.assertRaisesRegex(ValueError,
expected_regex="positive integer"):
linear_combination.ForwardDifference(0.1, 0.1)
with self.assertRaisesRegex(ValueError,
expected_regex="positive integer"):
linear_combination.ForwardDifference(-1, 0.1)
with self.assertRaisesRegex(ValueError,
expected_regex="positive integer"):
linear_combination.ForwardDifference(0, 0.1)
with self.assertRaisesRegex(ValueError, expected_regex="grid_spacing"):
linear_combination.ForwardDifference(1, -0.1)
with self.assertRaisesRegex(ValueError, expected_regex="grid_spacing"):
linear_combination.ForwardDifference(1, 1j)
@parameterized.parameters(
list(
util.kwargs_cartesian_product(
**{
'order_coef_perturbs': [(1, (-1, 1), (
0, 1)), (2, (-3 / 2, 2, -1 / 2), (0, 1, 2))],
'grid_spacing': [0.1, 0.01, 0.5, 1, 0.05]
})))
def test_forward_coeffecients(self, order_coef_perturbs, grid_spacing):
"""Test that ForwardDifference produces the right coeffecients for
common first and second order cases."""
order = order_coef_perturbs[0]
expected_std_coeffs = order_coef_perturbs[1]
expected_perturbations = order_coef_perturbs[2]
forward = linear_combination.ForwardDifference(order, grid_spacing)
self.assertAllClose(
np.array(expected_std_coeffs) / grid_spacing, forward.weights)
self.assertAllClose(
np.array(expected_perturbations) * grid_spacing,
forward.perturbations)
def test_central_instantiate(self):
"""Test CentralDifference type checking."""
linear_combination.CentralDifference()
linear_combination.CentralDifference(2, 0.1)
with self.assertRaisesRegex(ValueError,
expected_regex="positive, even"):
linear_combination.CentralDifference(0.1, 0.1)
with self.assertRaisesRegex(ValueError,
expected_regex="positive, even"):
linear_combination.CentralDifference(-1, 0.1)
with self.assertRaisesRegex(ValueError,
expected_regex="positive, even"):
linear_combination.CentralDifference(0, 0.1)
with self.assertRaisesRegex(ValueError,
expected_regex="positive, even"):
linear_combination.CentralDifference(1, 0.1)
with self.assertRaisesRegex(ValueError, expected_regex="grid_spacing"):
linear_combination.CentralDifference(2, -0.1)
with self.assertRaisesRegex(ValueError, expected_regex="grid_spacing"):
linear_combination.CentralDifference(2, 1j)
@parameterized.parameters(
list(
util.kwargs_cartesian_product(
**{
'order_coef_perturbs': [(2, (-1 / 2, 1 / 2), (-1, 1)),
(4, (1 / 12, -8 / 12, 8 / 12,
-1 / 12), (-2, -1, 1, 2))],
'grid_spacing': [0.1, 0.01, 0.5, 1, 0.05]
})))
def test_central_coefficients(self, order_coef_perturbs, grid_spacing):
"""Test that CentralDifference produces the right coefficients for
common first and second order cases."""
order = order_coef_perturbs[0]
expected_std_coeffs = order_coef_perturbs[1]
expected_perturbations = order_coef_perturbs[2]
forward = linear_combination.CentralDifference(order, grid_spacing)
self.assertAllClose(
np.array(expected_std_coeffs) / grid_spacing, forward.weights)
self.assertAllClose(
np.array(expected_perturbations) * grid_spacing,
forward.perturbations)
@parameterized.parameters([{
'diff': linear_combination.ForwardDifference()
}, {
'diff': linear_combination.CentralDifference()
}])
def test_analytic_functional(self, diff):
"""Test that the differentiate_analytic function WORKS."""
differentiable_op = diff.generate_differentiable_op(
analytic_op=circuit_execution_ops.get_expectation_op())
circuit, names, values, ops, _, true_f, true_g = _simple_op_inputs()
with tf.GradientTape() as g:
g.watch(values)
res = differentiable_op(circuit, names, values, ops)
# Just check that it computes without failing.
self.assertAllClose(true_f, res, atol=1e-2, rtol=1e-2)
self.assertAllClose(true_g,
g.gradient(res, values),
atol=1e-2,
rtol=1e-2)
@parameterized.parameters([{
'diff': linear_combination.ForwardDifference()
}, {
'diff': linear_combination.CentralDifference()
}])
def test_sampled_functional(self, diff):
"""Test that the differentiate_sampled function WORKS."""
differentiable_op = diff.generate_differentiable_op(
sampled_op=circuit_execution_ops.get_sampled_expectation_op())
circuit, names, values, ops, n_samples, true_f, true_g = \
_simple_op_inputs()
with tf.GradientTape() as g:
g.watch(values)
res = differentiable_op(circuit, names, values, ops, n_samples)
# Just check that it computes without failing.
self.assertAllClose(true_f, res, atol=1e-1, rtol=1e-1)
self.assertAllClose(true_g,
g.gradient(res, values),
atol=1e-1,
rtol=1e-1)
if __name__ == "__main__":
tf.test.main()
| 46.659898 | 80 | 0.625109 |
a3c76ab901a039027611b0843f414a5a3c72f748 | 1,334 | py | Python | jdcloud_sdk/services/waf/models/ListRiskCommonReq.py | Tanc009/jdcloud-sdk-python | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | [
"Apache-2.0"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | jdcloud_sdk/services/waf/models/ListRiskCommonReq.py | Tanc009/jdcloud-sdk-python | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | [
"Apache-2.0"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | jdcloud_sdk/services/waf/models/ListRiskCommonReq.py | Tanc009/jdcloud-sdk-python | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | [
"Apache-2.0"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class ListRiskCommonReq(object):
def __init__(self, wafInstanceId, domain, id=None, rulesFilter=None, pageIndex=None, pageSize=None):
"""
:param wafInstanceId: WAF实例id
:param domain: 域名
:param id: (Optional) 请求id, 0-请求全部;1-获取指定id的UsrList
:param rulesFilter: (Optional) 过滤关键字,在id>0时对UsrList的表项进行过滤
:param pageIndex: (Optional) 页码,[1-100],默认是1
:param pageSize: (Optional) 页大小,[1-100],默认是10
"""
self.wafInstanceId = wafInstanceId
self.domain = domain
self.id = id
self.rulesFilter = rulesFilter
self.pageIndex = pageIndex
self.pageSize = pageSize
| 35.105263 | 104 | 0.697151 |
b0ab08f7e799fd2fff48cd2e303a8153d5b63d0e | 2,899 | py | Python | images_to_video.py | ZekeMedley/joy-divisions-cover-generation | a69ba13a737f11dc83aef9e3bad97d8f5c776725 | [
"MIT"
] | 3 | 2018-07-28T22:11:20.000Z | 2019-09-10T17:50:44.000Z | images_to_video.py | ZekeMedley/joy-divisions-cover-generation | a69ba13a737f11dc83aef9e3bad97d8f5c776725 | [
"MIT"
] | null | null | null | images_to_video.py | ZekeMedley/joy-divisions-cover-generation | a69ba13a737f11dc83aef9e3bad97d8f5c776725 | [
"MIT"
] | null | null | null | # take filenames and output filename
def images_to_video(filenames, output, fps):
'''
builds a video from a list of images
IN: list of filenames for images, where to output, , frames per second
OUT: filename for output video
'''
from cv2 import VideoWriter, VideoWriter_fourcc, imread, destroyAllWindows
print("CREATING VIDEO")
fourcc = VideoWriter_fourcc(*'mp4v')
vid = None
for file in filenames:
img = imread(file)
# now that we have an image we can setup vid
if vid is None:
size = img.shape[1], img.shape[0]
print("SIZE:\t{}".format(size))
# false for is_color
vid = VideoWriter(output, fourcc, float(fps), size)
vid.write(img)
vid.release()
destroyAllWindows()
return output
def add_audio(video, audio):
'''
combines audio and visual channels into new .mp4
IN: video filename, audio filename
OUT: saves a video to auio_+video_filename
WARNING: this is where code is bad, you need ffmpeg installed for it to work
'''
import subprocess
print("ADDING AUDIO")
cmd = "ffmpeg -i {} -i {} -vcodec copy audio_{}".format(video, audio, video)
print("CALLING\t{}".format(cmd))
subprocess.call(cmd, shell=True)
# duration specifies the number of seconds between frames
# lowest supported value is 0.1 I think
def create_gif(filenames, output, duration):
'''
IN: filenames to build gif from, where to save gif, time between frames
OUT: saves a .gif to the requested output
'''
import imageio
print("BUILDING GIF")
images = []
for file in filenames:
images.append(imageio.imread(file))
imageio.mimsave(output, images, duration=duration)
def video_from_folder(folder, output, fps):
'''
builds a video from all the .pngs in a folder
IN: folder, output location, frames per second
OUT: save location
'''
print("GETTING .PNG FROM:\t{}".format(folder))
import os
filenames = os.listdir(folder)
for fichier in filenames[:]: # filelist[:] makes a copy of filelist.
if not(fichier.endswith(".png")):
filenames.remove(fichier)
from cv2 import VideoWriter, VideoWriter_fourcc, imread, destroyAllWindows
print("CREATING VIDEO")
fourcc = VideoWriter_fourcc(*'mp4v')
vid = None
for file in filenames:
img = imread("{}/{}".format(folder, file))
# now that we have an image we can setup vid
if vid is None:
size = img.shape[1], img.shape[0]
print("SIZE:\t{}".format(size))
# false for is_color
vid = VideoWriter(output, fourcc, float(fps), size)
vid.write(img)
vid.release()
destroyAllWindows()
return output
# output = video_from_folder('covers', 'ultralight.mp4', 30)
# add_audio("ultralight.mp4", "ultralight.wav")
| 30.840426 | 80 | 0.64229 |
2f0e7553d0d72552b3a6210e5e6f31bc44ceb14c | 1,299 | py | Python | SummaryRanges.py | pravindra01/DS_And_AlgorithmsPractice | 1f6a1c7d206676d0e1ffcd77b64c4732ec7763d8 | [
"MIT"
] | null | null | null | SummaryRanges.py | pravindra01/DS_And_AlgorithmsPractice | 1f6a1c7d206676d0e1ffcd77b64c4732ec7763d8 | [
"MIT"
] | null | null | null | SummaryRanges.py | pravindra01/DS_And_AlgorithmsPractice | 1f6a1c7d206676d0e1ffcd77b64c4732ec7763d8 | [
"MIT"
] | null | null | null | class Solution(object):
def summaryRanges(self, nums):
"""
:type nums: List[int]
:rtype: List[str]
"""
if (len(nums) == 0):
return []
incrementCounter = nums[0]
retList = []
add = True
lindex = 0
for i in range(0, len(nums)):
if incrementCounter == nums[i]:
if lindex != len(retList) -1:
retList.append(str(nums[i]))
add = False
else:
add = True
incrementCounter += 1
else:
myVal = retList[lindex]
if(myVal != str(nums[i-1])):
del retList[lindex]
retList.append("%s->%s"%(myVal,nums[i-1]))
add = False
incrementCounter = nums[i] + 1
lindex += 1
retList.append(str(nums[i]))
if add:
myVal = retList[lindex]
if(myVal != str(nums[-1])):
del retList[lindex]
retList.append("%s->%s"%(myVal,nums[-1]))
return retList
if __name__ == "__main__":
input = [0,2,3,4,6,8,9]
#Output: ["0->2","4->5","7"]
test = Solution()
print test.summaryRanges(input) | 31.682927 | 62 | 0.428022 |
7db4a009b5b82182e1cddae25b3e1302482ae9a5 | 1,385 | py | Python | nlabot/stickers.py | maremun/nlabot | f28e93ff6b3a16a22f28aad2054b2b7364e39b60 | [
"MIT"
] | 7 | 2017-11-02T21:33:23.000Z | 2018-10-26T11:37:54.000Z | nlabot/stickers.py | maremun/nlabot | f28e93ff6b3a16a22f28aad2054b2b7364e39b60 | [
"MIT"
] | 3 | 2017-11-12T10:58:36.000Z | 2017-11-26T10:47:05.000Z | nlabot/stickers.py | maremun/nlabot | f28e93ff6b3a16a22f28aad2054b2b7364e39b60 | [
"MIT"
] | null | null | null | # encoding: utf-8
# stickers.py
from random import randint
# TODO: use getStickers to get sticker pack with ids
SUCCESS = ['CAADBAAD0gIAAlI5kwbFm0b9e6ezGQI',
'CAADAgAD4QADNuwbBW5uM6aRdOCbAg',
'CAADBAADuAMAAlI5kwbThupzSIc0CQI',
'CAADAgADdAcAAlOx9wP2aGQDAAEeKfQC',
'CAADBQAD-gADR5XbAWtSgBKOL_fFAg']
SORRY = ['CAADAgADBAADijc4AAFx0NNqDnJm4QI', 'CAADAgADBgADijc4AAH50MoMENn2lQI',
'CAADAgADDgADijc4AAGOGq6J30OGfwI', 'CAADAgADEgADijc4AAF00GirhpifXQI',
'CAADAgADFAADijc4AAGtl5dISqHmiAI', 'CAADAgADFgADijc4AAErJ-ihzzsO7wI',
'CAADAgADGwADijc4AAEdwByBSe9kgQI', 'CAADAgADHQADijc4AAEw0RBgpCTPAAEC',
'CAADAgADHwADijc4AAFXWsuIC4i6fAI']
TRY = ['CAADAgADSwAD4FP5CycQs-qvf8GBAg', 'CAADBAAD2AIAAlI5kwa4IYnU6rFSuAI',
'CAADBAADzAIAAlI5kwZs9nlnbC5cTgI', 'CAADBAADlwMAAlI5kwayXLLrd21tpAI',
'CAADBAAD1gIAAlI5kwbUol2GEfKhHQI', 'CAADBAAD2gIAAlI5kwZfjbEodl4riQI',
'CAADAgADTgAD4FP5C_W-1YHvi0cYAg', 'CAADAgADFAAD4FP5C75navrL1cHAAg',
'CAADAgADFwAD4FP5C0q5UW3A8qxPAg', 'CAADAgADFgAD4FP5C-_9iCNax_siAg',
'CAADAgADGAAD4FP5C0gLHKTxveR4Ag', 'CAADAgADKwAD4FP5CzoKTHHnVGQoAg',
'CAADAgADRwAD4FP5Cw8GVjPle2rxAg'] + SORRY
FAIL = ['CAADAgADYwEAAjbsGwXkTe2zgRvwWAI',
'CAADAgADJQEAAjbsGwX1CuOrgYRKAAEC']
def get_random_sticker(group):
i = randint(0, len(group)-1)
return group[i]
| 44.677419 | 79 | 0.760289 |
5be48407e02bac96946526e1bf9bcc05e9788447 | 3,379 | py | Python | tests/test_background_poke.py | standanley/fault | 7418103744e3ec5b5d142e0a152f08b7feef6a1e | [
"BSD-3-Clause"
] | null | null | null | tests/test_background_poke.py | standanley/fault | 7418103744e3ec5b5d142e0a152f08b7feef6a1e | [
"BSD-3-Clause"
] | null | null | null | tests/test_background_poke.py | standanley/fault | 7418103744e3ec5b5d142e0a152f08b7feef6a1e | [
"BSD-3-Clause"
] | null | null | null | import magma as m
import fault
import tempfile
import pytest
from pathlib import Path
from .common import pytest_sim_params, TestBasicCircuit
def plot(xs, ys):
import matplotlib.pyplot as plt
plt.plot(xs, ys, '*')
plt.grid()
plt.show()
def pytest_generate_tests(metafunc):
#pytest_sim_params(metafunc, 'verilator', 'system-verilog')
#pytest_sim_params(metafunc, 'spice')
pytest_sim_params(metafunc, 'system-verilog')
#@pytest.mark.skip(reason='Not yet implemented')
def test_clock_verilog(target, simulator):
print('target, sim', target, simulator)
# TODO delete the next line; my iverilog is just broken so I can't test it
simulator = 'ncsim'
circ = TestBasicCircuit
tester = fault.Tester(circ)
tester.zero_inputs()
tester.poke(circ.I, 1)
#tester.eval()
tester.expect(circ.O, 1)
# register clock
tester.poke(circ.I, 0, delay={
'freq': 0.125,
'duty_cycle': 0.625,
# take default initial_value of 0
})
tester.expect(circ.O, 1) # should fail
tester.expect(circ.O, 0) # should fail
tester.expect(circ.O, 0, save_for_later=True)
tester.print("%08x", circ.O)
#with tempfile.TemporaryDirectory(dir=".") as _dir:
#with open('build/') as _dir:
if True:
_dir = 'build'
if target == "verilator":
tester.compile_and_run(target, directory=_dir, flags=["-Wno-fatal"])
else:
tester.compile_and_run(target, directory=_dir, simulator=simulator)
print('JUST FINISHED COMPILENANDRUN')
@pytest.mark.skip(reason='Turn this back on later')
def test_sin_spice(vsup=1.5, vil_rel=0.4, vih_rel=0.6,
vol_rel=0.1, voh_rel=0.9):
# TODO make pytest choose target/simulator
target = 'spice'
simulator = 'ngspice'
# declare circuit
myinv = m.DeclareCircuit(
'myinv',
'in_', fault.RealIn,
'out', fault.RealOut,
'vdd', fault.RealIn,
'vss', fault.RealIn
)
# wrap if needed
if target == 'verilog-ams':
dut = fault.VAMSWrap(myinv)
else:
dut = myinv
# define the test
tester = fault.Tester(dut)
tester.poke(dut.vdd, vsup)
tester.poke(dut.vss, 0)
freq = 1e3
tester.poke(dut.in_, 0, delay = {
'type': 'sin',
'freq': freq,
'amplitude': 0.4,
'offset': 0.6,
'phase_degrees': 90
})
num_reads = 100
xs = []
dt = 1/(freq * 50)
for k in range(num_reads):
tester.expect(dut.in_, 0, save_for_later=True)
tester.delay(dt)
xs.append(k*dt)
#for k in [.4, .5, .6]:
# in_ = k * vsup
# tester.poke(dut.in_, in_)
# # We might not know the expected value now but will want to check later
# tester.expect(dut.out, 0, save_for_later=True)
# set options
kwargs = dict(
target=target,
simulator=simulator,
model_paths=[Path('tests/spice/myinv.sp').resolve()],
vsup=vsup,
tmp_dir=True,
clock_step_delay = 0
)
if target == 'verilog-ams':
kwargs['use_spice'] = ['myinv']
# run the simulation
tester.compile_and_run(**kwargs)
ys = []
for k in range(num_reads):
value = tester.targets[target].saved_for_later[k]
ys.append(value)
print('%2d\t'%k, value)
plot(xs, ys)
| 25.216418 | 80 | 0.603137 |
276463d984ce35042c314d5d665d37a55639fe39 | 3,414 | py | Python | xos/tosca/tests/servicetest.py | xmaruto/mcord | 3678a3d10c3703c2b73f396c293faebf0c82a4f4 | [
"Apache-2.0"
] | null | null | null | xos/tosca/tests/servicetest.py | xmaruto/mcord | 3678a3d10c3703c2b73f396c293faebf0c82a4f4 | [
"Apache-2.0"
] | 5 | 2020-06-05T17:47:15.000Z | 2021-09-23T23:21:27.000Z | xos/tosca/tests/servicetest.py | pan2za/xos | c2a4da2ccaa12360b2718be303b247866aefdfe6 | [
"Apache-2.0"
] | null | null | null | from basetest import BaseToscaTest
from core.models import Service
class ServiceTest(BaseToscaTest):
tests = ["create_service_minimal",
"create_service_notpublished",
"create_service_notenabled",
"create_service_public_key",
"update_service_notpublished",
"create_service_maximal",
"destroy_service"]
def cleanup(self):
self.try_to_delete(Service, name="test_svc")
def create_service_minimal(self):
self.assert_noobj(Service, "test_svc")
self.execute(self.make_nodetemplate("test_svc", "tosca.nodes.Service"))
self.assert_obj(Service, "test_svc", kind="generic", published=True, enabled=True)
def create_service_notpublished(self):
self.assert_noobj(Service, "test_svc")
self.execute(self.make_nodetemplate("test_svc", "tosca.nodes.Service", {"published": False}))
self.assert_obj(Service, "test_svc", kind="generic", published=False, enabled=True)
def create_service_notenabled(self):
self.assert_noobj(Service, "test_svc")
self.execute(self.make_nodetemplate("test_svc", "tosca.nodes.Service", {"enabled": False}))
self.assert_obj(Service, "test_svc", kind="generic", published=True, enabled=False)
def create_service_public_key(self):
self.assert_noobj(Service, "test_svc")
self.execute(self.make_nodetemplate("test_svc", "tosca.nodes.Service", {"public_key": "foobar"}))
self.assert_obj(Service, "test_svc", kind="generic", published=True, enabled=True, public_key="foobar")
def update_service_notpublished(self):
self.assert_noobj(Service, "test_svc")
self.execute(self.make_nodetemplate("test_svc", "tosca.nodes.Service"))
original_obj = self.assert_obj(Service, "test_svc", kind="generic", published=True, enabled=True)
self.execute(self.make_nodetemplate("test_svc", "tosca.nodes.Service", {"published": False}))
updated_obj = self.assert_obj(Service, "test_svc", kind="generic", published=False, enabled=True)
assert(original_obj.id == updated_obj.id)
def create_service_maximal(self):
self.assert_noobj(Service, "test_svc")
self.execute(self.make_nodetemplate("test_svc", "tosca.nodes.Service",
{"kind": "testkind",
"published": False,
"enabled": False,
"view_url": "http://foo/",
"icon_url": "http://bar/",
"public_key": "foobar",
"versionNumber": "1.2"} ))
self.assert_obj(Service, "test_svc",
kind="testkind",
published=False,
enabled=False,
view_url="http://foo/",
icon_url="http://bar/",
public_key="foobar",
versionNumber="1.2")
def destroy_service(self):
self.assert_noobj(Service, "test_svc")
self.execute(self.make_nodetemplate("test_svc", "tosca.nodes.Service"))
self.assert_obj(Service, "test_svc", kind="generic", published=True, enabled=True)
self.destroy(self.make_nodetemplate("test_svc", "tosca.nodes.Service"))
self.assert_noobj(Service, "test_svc")
if __name__ == "__main__":
ServiceTest()
| 45.52 | 111 | 0.618922 |
44016960a958fd779052b1d67b817962f3609f1c | 7,520 | py | Python | main.py | Techget/gail-tf-sc2 | b10ad198b6f3b91b1281a2a35ed0635a56c6cf8d | [
"MIT"
] | 10 | 2018-04-14T14:16:32.000Z | 2022-02-21T05:46:56.000Z | main.py | Techget/gail-tf-sc2 | b10ad198b6f3b91b1281a2a35ed0635a56c6cf8d | [
"MIT"
] | null | null | null | main.py | Techget/gail-tf-sc2 | b10ad198b6f3b91b1281a2a35ed0635a56c6cf8d | [
"MIT"
] | 1 | 2019-08-20T14:34:04.000Z | 2019-08-20T14:34:04.000Z | import argparse
from gailtf.baselines.common import set_global_seeds, tf_util as U
import gym, logging, sys
from gailtf.baselines import bench
import os.path as osp
from gailtf.baselines import logger
from gailtf.dataset.mujoco import Mujoco_Dset
from gailtf.dataset.sc2_dataset import SC2Dataset
import numpy as np
import ipdb
from pysc2.env import sc2_env
from absl import flags
import sys
FLAGS = flags.FLAGS
FLAGS(sys.argv)
def argsparser():
parser = argparse.ArgumentParser("Tensorflow Implementation of GAIL")
parser.add_argument('--env_id', help='environment ID', default='sc2')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--num_cpu', help='number of cpu to used', type=int, default=1)
parser.add_argument('--expert_path', type=str, default='/home/xuan/pysc2-replay/map_race_data/Ascension to Aiur LE_Terran_Terran/')
parser.add_argument('--checkpoint_dir', help='the directory to save model', default='checkpoint')
parser.add_argument('--log_dir', help='the directory to save log file', default='log')
parser.add_argument('--load_model_path', help='if provided, load the model', type=str, default=None)
# Task
parser.add_argument('--task', type=str, choices=['train', 'evaluate'], default='train')
# for evaluatation
parser.add_argument('--stochastic_policy', type=bool, default=False)
# Mujoco Dataset Configuration
parser.add_argument('--ret_threshold', help='the return threshold for the expert trajectories', type=int, default=0)
parser.add_argument('--traj_limitation', type=int, default=np.inf)
# Optimization Configuration
parser.add_argument('--g_step', help='number of steps to train policy in each epoch', type=int, default=4)
parser.add_argument('--d_step', help='number of steps to train discriminator in each epoch', type=int, default=1)
# Network Configuration (Using MLP Policy)
parser.add_argument('--policy_hidden_size', type=int, default=100)
parser.add_argument('--adversary_hidden_size', type=int, default=100)
# Algorithms Configuration
parser.add_argument('--algo', type=str, choices=['bc', 'trpo', 'ppo'], default='trpo')
parser.add_argument('--max_kl', type=float, default=0.02)
parser.add_argument('--policy_entcoeff', help='entropy coefficiency of policy', type=float, default=1e-2)
parser.add_argument('--adversary_entcoeff', help='entropy coefficiency of discriminator', type=float, default=1e-3)
# Traing Configuration
parser.add_argument('--save_per_iter', help='save model every xx iterations', type=int, default=100)
parser.add_argument('--num_timesteps', help='number of timesteps per episode', type=int, default=1e9)
# Behavior Cloning
parser.add_argument('--pretrained', help='Use BC to pretrain', type=bool, default=False)
parser.add_argument('--BC_max_iter', help='Max iteration for training BC', type=int, default=1e4)
return parser.parse_args()
def get_task_name(args):
if args.algo == 'bc':
task_name = 'behavior_cloning.'
if args.traj_limitation != np.inf: task_name += "traj_limitation_%d."%args.traj_limitation
task_name += args.env_id.split("-")[0]
else:
import time
t = time.strftime("%c")
t = t.replace(' ','_')
t = t.replace(':','_')
task_name = args.algo + "_ppo_modify_available_action_gail." + t
if args.pretrained: task_name += "with_pretrained."
if args.traj_limitation != np.inf: task_name += "traj_limitation_%d."%args.traj_limitation
task_name += args.env_id.split("-")[0]
if args.ret_threshold > 0: task_name += ".return_threshold_%d" % args.ret_threshold
task_name = task_name + ".g_step_" + str(args.g_step) + ".d_step_" + str(args.d_step) + \
".policy_entcoeff_" + str(args.policy_entcoeff) + ".adversary_entcoeff_" + str(args.adversary_entcoeff)
return task_name
def main(args):
from gailtf.baselines.ppo1 import mlp_policy
U.make_session(num_cpu=args.num_cpu).__enter__()
# set_global_seeds(args.seed)
# env = gym.make(args.env_id)
MAP_USED = "'Ascension to Aiur LE'"
RACE_USED = "Terran"
env = sc2_env.SC2Env(
map_name= 'AscensiontoAiur',
agent_race="T", #Terran
bot_race="T",
difficulty=9, # 1
step_mul=8,
screen_size_px=(64,64), # will change to (64,64)
minimap_size_px=(64,64),
visualize=False)
def policy_fn(name, ob_space, ac_space, reuse=False):
return mlp_policy.MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
reuse=reuse, hid_size=600, num_hid_layers=2) # 600 > 524
# env = bench.Monitor(env, logger.get_dir() and
# osp.join(logger.get_dir(), "monitor.json"))
# env.seed(args.seed)
# gym.logger.setLevel(logging.WARN)
task_name = get_task_name(args)
args.checkpoint_dir = osp.join(args.checkpoint_dir, task_name)
args.log_dir = osp.join(args.log_dir, task_name)
# dataset = Mujoco_Dset(expert_path=args.expert_path, ret_threshold=args.ret_threshold, traj_limitation=args.traj_limitation)
dataset = SC2Dataset(expert_path=args.expert_path)
pretrained_weight = None
# if (args.pretrained and args.task == 'train') or args.algo == 'bc':
# # Pretrain with behavior cloning
# from gailtf.algo import behavior_clone
# if args.algo == 'bc' and args.task == 'evaluate':
# behavior_clone.evaluate(env, policy_fn, args.load_model_path, stochastic_policy=args.stochastic_policy)
# sys.exit()
# pretrained_weight = behavior_clone.learn(env, policy_fn, dataset,
# max_iters=args.BC_max_iter, pretrained=args.pretrained,
# ckpt_dir=args.checkpoint_dir, log_dir=args.log_dir, task_name=task_name)
# if args.algo == 'bc':
# sys.exit()
from gailtf.network.adversary import TransitionClassifier
# discriminator
discriminator = TransitionClassifier(args.adversary_hidden_size, entcoeff=args.adversary_entcoeff)
if args.algo == 'trpo':
# Set up for MPI seed
from mpi4py import MPI
rank = MPI.COMM_WORLD.Get_rank()
if rank != 0:
logger.set_level(logger.DISABLED)
workerseed = args.seed + 10000 * MPI.COMM_WORLD.Get_rank()
set_global_seeds(workerseed)
# env.seed(workerseed)
from gailtf.algo import trpo_mpi
if args.task == 'train':
trpo_mpi.learn(env, policy_fn, discriminator, dataset,
pretrained=args.pretrained, pretrained_weight=pretrained_weight,
g_step=args.g_step, d_step=args.d_step,
timesteps_per_batch=16,
max_kl=args.max_kl, cg_iters=10, cg_damping=0.1,
max_timesteps=args.num_timesteps,
entcoeff=args.policy_entcoeff, gamma=0.99, lam=0.94,
vf_iters=3, vf_stepsize=5e-4,
ckpt_dir=args.checkpoint_dir, log_dir=args.log_dir,
save_per_iter=args.save_per_iter, load_model_path=args.load_model_path,
task_name=task_name)
elif args.task == 'evaluate':
trpo_mpi.evaluate(env, policy_fn, args.checkpoint_dir, timesteps_per_batch=1024,
number_trajs=10, stochastic_policy=args.stochastic_policy)
else: raise NotImplementedError
else: raise NotImplementedError
env.close()
if __name__ == '__main__':
args = argsparser()
main(args)
| 49.801325 | 135 | 0.685106 |
6b0dd46d6e0ac8d9a46f1a883d0a90e3f20d43fc | 1,207 | py | Python | QRSMS/faculty_portal/models.py | Srishti-Ahuja/QRSMS-V1 | 1f2fa82e8ddaeb62e633fcd6a136696355317bba | [
"Apache-2.0"
] | 4 | 2020-06-16T09:42:20.000Z | 2021-11-24T08:18:16.000Z | QRSMS/faculty_portal/models.py | Srishti-Ahuja/QRSMS-V1 | 1f2fa82e8ddaeb62e633fcd6a136696355317bba | [
"Apache-2.0"
] | 7 | 2021-04-08T21:57:34.000Z | 2022-02-27T06:41:15.000Z | QRSMS/faculty_portal/models.py | Srishti-Ahuja/QRSMS-V1 | 1f2fa82e8ddaeb62e633fcd6a136696355317bba | [
"Apache-2.0"
] | 7 | 2020-11-29T09:45:44.000Z | 2022-03-30T15:27:33.000Z | from django.db import models
from django.core.validators import RegexValidator, ValidationError
from django.urls import reverse
from actor.models import User
from django.contrib.auth.models import Group
# Create your models here.
class Faculty(models.Model):
user = models.OneToOneField('actor.User', on_delete=models.CASCADE)
@classmethod
def create(cls, user=None, **kwargs):
if user is None:
username = None
password = None
username = kwargs['username']
password = kwargs['password']
u = User.create(username=kwargs['username'], password=kwargs['password'],
is_faculty=True, is_employee=True)
user = u
t = cls(user=user)
t.save()
# t.groups.add(Group.objects.get(name='faculty_group'))
return t
class Meta:
ordering = ('-pk',)
verbose_name_plural = "Faculty Supervisors"
def __str__(self):
return self.user.username
def get_absolute_url(self):
return reverse('initial_faculty_detail', args=(self.pk,))
def get_update_url(self):
return reverse('initial_faculty_update', args=(self.pk,))
| 28.738095 | 85 | 0.636288 |
cfe262b1f22fefb84be53804a52523c6e115fe25 | 1,533 | py | Python | structlog_wrapper/django/celery/steps.py | ankitkr/structlog-wrapper | bc085937656ef6aec5f485d95c9d761f4a3c0299 | [
"MIT"
] | null | null | null | structlog_wrapper/django/celery/steps.py | ankitkr/structlog-wrapper | bc085937656ef6aec5f485d95c9d761f4a3c0299 | [
"MIT"
] | null | null | null | structlog_wrapper/django/celery/steps.py | ankitkr/structlog-wrapper | bc085937656ef6aec5f485d95c9d761f4a3c0299 | [
"MIT"
] | null | null | null | from celery import bootsteps
from . import receivers
class DjangoStructLogInitStep(bootsteps.Step):
"""``celery`` worker boot step to initialize ``django_structlog``.
>>> from celery import Celery
>>> from structlog_wrapper.django.celery.steps import DjangoStructLogInitStep
>>>
>>> app = Celery("demo_project")
>>> app.steps['worker'].add(DjangoStructLogInitStep)
"""
def __init__(self, parent, **kwargs):
super().__init__(parent, **kwargs)
import celery
from celery.signals import (
before_task_publish,
after_task_publish,
task_received,
task_prerun,
task_retry,
task_success,
task_failure,
task_revoked,
)
before_task_publish.connect(receivers.receiver_before_task_publish)
after_task_publish.connect(receivers.receiver_after_task_publish)
task_received.connect(receivers.receiver_task_received)
task_prerun.connect(receivers.receiver_task_pre_run)
task_retry.connect(receivers.receiver_task_retry)
task_success.connect(receivers.receiver_task_success)
task_failure.connect(receivers.receiver_task_failure)
task_revoked.connect(receivers.receiver_task_revoked)
if celery.VERSION > (4,):
from celery.signals import task_unknown, task_rejected
task_unknown.connect(receivers.receiver_task_unknown)
task_rejected.connect(receivers.receiver_task_rejected)
| 34.066667 | 81 | 0.688845 |
fc60ffa7ad9e05c303a7b2aa731d0ab35ac594c1 | 5,740 | py | Python | src/diamond/handler/mqtt.py | hermdog/Diamond | 0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47 | [
"MIT"
] | 1,795 | 2015-01-05T11:14:55.000Z | 2022-03-25T12:07:15.000Z | src/diamond/handler/mqtt.py | hermdog/Diamond | 0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47 | [
"MIT"
] | 671 | 2015-01-02T05:57:27.000Z | 2022-03-29T22:39:05.000Z | src/diamond/handler/mqtt.py | hermdog/Diamond | 0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47 | [
"MIT"
] | 793 | 2015-01-03T01:39:02.000Z | 2022-02-18T05:12:27.000Z | # coding=utf-8
"""
Send metrics to an MQTT broker.
### Dependencies
* [mosquitto](http://mosquitto.org/documentation/python/)
* Python `ssl` module (and Python >= 2.7)
In order for this to do something useful, you'll need an
MQTT broker (e.g. [mosquitto](http://mosquitto.org) and
a `diamond.conf` containing something along these lines:
[server]
handlers = diamond.handler.mqtt.MQTTHandler
...
[handlers]
[[MQTTHandler]]
host = address-of-mqtt-broker (default: localhost)
port = 1883 (default: 1883; with tls, default: 8883)
qos = 0 (default: 0)
# If False, do not include timestamp in the MQTT payload
# i.e. just the metric number
timestamp = True
# Optional topic-prefix to prepend to metrics en-route to
# MQTT broker
prefix = some/pre/fix (default: "")
# If you want to connect to your MQTT broker with TLS, you'll have
# to set the following four parameters
tls = True (default: False)
cafile = /path/to/ca/cert.pem
certfile = /path/to/certificate.pem
keyfile = /path/to/key.pem
Test by launching an MQTT subscribe, e.g.:
mosquitto_sub -v -t 'servers/#'
or
mosquitto_sub -v -t 'some/pre/fix/#'
### To Graphite
You may be interested in
[mqtt2graphite](https://github.com/jpmens/mqtt2graphite)
which subscribes to an MQTT broker and sends metrics off to Graphite.
### Notes
* This handler sets a last will and testament, so that the broker
publishes its death at a topic called clients/diamond/<hostname>
* Support for reconnecting to a broker is implemented and ought to
work.
"""
from Handler import Handler
from diamond.collector import get_hostname
import os
HAVE_SSL = True
try:
import ssl
except ImportError:
HAVE_SSL = False
try:
import mosquitto
except ImportError:
mosquitto = None
__author__ = 'Jan-Piet Mens'
__email__ = 'jpmens@gmail.com'
class MQTTHandler(Handler):
"""
"""
def __init__(self, config=None):
"""
Create a new instance of the MQTTHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
# Initialize Data
self.mqttc = None
self.hostname = get_hostname(self.config)
self.client_id = "%s_%s" % (self.hostname, os.getpid())
# Initialize Options
self.host = self.config.get('host', 'localhost')
self.port = 0
self.qos = int(self.config.get('qos', 0))
self.prefix = self.config.get('prefix', "")
self.tls = self.config.get('tls', False)
self.timestamp = 0
try:
self.timestamp = self.config['timestamp']
if not self.timestamp:
self.timestamp = 1
else:
self.timestamp = 0
except:
self.timestamp = 1
if not mosquitto:
self.log.error('mosquitto import failed. Handler disabled')
self.enabled = False
return
# Initialize
self.mqttc = mosquitto.Mosquitto(self.client_id, clean_session=True)
if not self.tls:
self.port = int(self.config.get('port', 1883))
else:
# Set up TLS if requested
self.port = int(self.config.get('port', 8883))
self.cafile = self.config.get('cafile', None)
self.certfile = self.config.get('certfile', None)
self.keyfile = self.config.get('keyfile', None)
if None in [self.cafile, self.certfile, self.keyfile]:
self.log.error("MQTTHandler: TLS configuration missing.")
return
try:
self.mqttc.tls_set(
self.cafile,
certfile=self.certfile,
keyfile=self.keyfile,
cert_reqs=ssl.CERT_REQUIRED,
tls_version=3,
ciphers=None)
except:
self.log.error("MQTTHandler: Cannot set up TLS " +
"configuration. Files missing?")
self.mqttc.will_set("clients/diamond/%s" % (self.hostname),
payload="Adios!", qos=0, retain=False)
self.mqttc.connect(self.host, self.port, 60)
self.mqttc.on_disconnect = self._disconnect
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(MQTTHandler, self).get_default_config_help()
config.update({
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(MQTTHandler, self).get_default_config()
config.update({
})
return config
def process(self, metric):
"""
Process a metric by converting metric name to MQTT topic name;
the payload is metric and timestamp.
"""
if not mosquitto:
return
line = str(metric)
topic, value, timestamp = line.split()
if len(self.prefix):
topic = "%s/%s" % (self.prefix, topic)
topic = topic.replace('.', '/')
topic = topic.replace('#', '&') # Topic must not contain wildcards
if self.timestamp == 0:
self.mqttc.publish(topic, "%s" % (value), self.qos)
else:
self.mqttc.publish(topic, "%s %s" % (value, timestamp), self.qos)
def _disconnect(self, mosq, obj, rc):
self.log.debug("MQTTHandler: reconnecting to broker...")
mosq.reconnect()
| 28.7 | 78 | 0.577003 |
f19eac5d24f039746bc864fcd0bb9bc35dc8c683 | 23,690 | py | Python | apps/oozie/src/oozie/old_migrations/0006_auto__chg_field_java_files__chg_field_java_archives__chg_field_sqoop_f.py | kokosing/hue | 2307f5379a35aae9be871e836432e6f45138b3d9 | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | apps/oozie/src/oozie/old_migrations/0006_auto__chg_field_java_files__chg_field_java_archives__chg_field_sqoop_f.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | apps/oozie/src/oozie/old_migrations/0006_auto__chg_field_java_files__chg_field_java_archives__chg_field_sqoop_f.py | zks888/hue | 93a8c370713e70b216c428caa2f75185ef809deb | [
"Apache-2.0"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Java.files'
db.alter_column('oozie_java', 'files', self.gf('django.db.models.fields.TextField')())
# Changing field 'Java.archives'
db.alter_column('oozie_java', 'archives', self.gf('django.db.models.fields.TextField')())
# Changing field 'Sqoop.files'
db.alter_column('oozie_sqoop', 'files', self.gf('django.db.models.fields.TextField')())
# Changing field 'Sqoop.archives'
db.alter_column('oozie_sqoop', 'archives', self.gf('django.db.models.fields.TextField')())
# Changing field 'Pig.files'
db.alter_column('oozie_pig', 'files', self.gf('django.db.models.fields.TextField')())
# Changing field 'Pig.archives'
db.alter_column('oozie_pig', 'archives', self.gf('django.db.models.fields.TextField')())
# Changing field 'Mapreduce.files'
db.alter_column('oozie_mapreduce', 'files', self.gf('django.db.models.fields.TextField')())
# Changing field 'Mapreduce.archives'
db.alter_column('oozie_mapreduce', 'archives', self.gf('django.db.models.fields.TextField')())
# Changing field 'Streaming.files'
db.alter_column('oozie_streaming', 'files', self.gf('django.db.models.fields.TextField')())
# Changing field 'Streaming.archives'
db.alter_column('oozie_streaming', 'archives', self.gf('django.db.models.fields.TextField')())
# Changing field 'Shell.files'
db.alter_column('oozie_shell', 'files', self.gf('django.db.models.fields.TextField')())
# Changing field 'Shell.archives'
db.alter_column('oozie_shell', 'archives', self.gf('django.db.models.fields.TextField')())
# Changing field 'Hive.files'
db.alter_column('oozie_hive', 'files', self.gf('django.db.models.fields.TextField')())
# Changing field 'Hive.archives'
db.alter_column('oozie_hive', 'archives', self.gf('django.db.models.fields.TextField')())
def backwards(self, orm):
# Changing field 'Java.files'
db.alter_column('oozie_java', 'files', self.gf('django.db.models.fields.CharField')(max_length=512))
# Changing field 'Java.archives'
db.alter_column('oozie_java', 'archives', self.gf('django.db.models.fields.CharField')(max_length=512))
# Changing field 'Sqoop.files'
db.alter_column('oozie_sqoop', 'files', self.gf('django.db.models.fields.CharField')(max_length=512))
# Changing field 'Sqoop.archives'
db.alter_column('oozie_sqoop', 'archives', self.gf('django.db.models.fields.CharField')(max_length=512))
# Changing field 'Pig.files'
db.alter_column('oozie_pig', 'files', self.gf('django.db.models.fields.CharField')(max_length=512))
# Changing field 'Pig.archives'
db.alter_column('oozie_pig', 'archives', self.gf('django.db.models.fields.CharField')(max_length=512))
# Changing field 'Mapreduce.files'
db.alter_column('oozie_mapreduce', 'files', self.gf('django.db.models.fields.CharField')(max_length=512))
# Changing field 'Mapreduce.archives'
db.alter_column('oozie_mapreduce', 'archives', self.gf('django.db.models.fields.CharField')(max_length=512))
# Changing field 'Streaming.files'
db.alter_column('oozie_streaming', 'files', self.gf('django.db.models.fields.CharField')(max_length=512))
# Changing field 'Streaming.archives'
db.alter_column('oozie_streaming', 'archives', self.gf('django.db.models.fields.CharField')(max_length=512))
# Changing field 'Shell.files'
db.alter_column('oozie_shell', 'files', self.gf('django.db.models.fields.CharField')(max_length=512))
# Changing field 'Shell.archives'
db.alter_column('oozie_shell', 'archives', self.gf('django.db.models.fields.CharField')(max_length=512))
# Changing field 'Hive.files'
db.alter_column('oozie_hive', 'files', self.gf('django.db.models.fields.CharField')(max_length=512))
# Changing field 'Hive.archives'
db.alter_column('oozie_hive', 'archives', self.gf('django.db.models.fields.CharField')(max_length=512))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'oozie.coordinator': {
'Meta': {'object_name': 'Coordinator', '_ormbases': ['oozie.Job']},
'concurrency': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 9, 23, 14, 58, 35, 963525)'}),
'execution': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 9, 20, 14, 58, 35, 963495)'}),
'throttle': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timeout': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']", 'null': 'True'})
},
'oozie.datainput': {
'Meta': {'object_name': 'DataInput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataoutput': {
'Meta': {'object_name': 'DataOutput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataset': {
'Meta': {'object_name': 'Dataset'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'done_flag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 9, 20, 14, 58, 35, 964734)'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'uri': ('django.db.models.fields.CharField', [], {'default': "'/data/${YEAR}${MONTH}${DAY}'", 'max_length': '1024'})
},
'oozie.end': {
'Meta': {'object_name': 'End'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fork': {
'Meta': {'object_name': 'Fork'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.history': {
'Meta': {'object_name': 'History'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Job']"}),
'oozie_job_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'properties': ('django.db.models.fields.TextField', [], {}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'submitter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'oozie.hive': {
'Meta': {'object_name': 'Hive'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"},{"name":"oozie.hive.defaults","value":"${hive.default.xml}"}]\''}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.java': {
'Meta': {'object_name': 'Java'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'args': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'blank': 'True'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'java_opts': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'main_class': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.job': {
'Meta': {'object_name': 'Job'},
'deployment_dir': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_shared': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'parameters': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'schema_version': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'oozie.join': {
'Meta': {'object_name': 'Join'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.kill': {
'Meta': {'object_name': 'Kill'},
'message': ('django.db.models.fields.CharField', [], {'default': "'Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]'", 'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.link': {
'Meta': {'object_name': 'Link'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_node'", 'to': "orm['oozie.Node']"}),
'comment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child_node'", 'to': "orm['oozie.Node']"})
},
'oozie.mapreduce': {
'Meta': {'object_name': 'Mapreduce'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.node': {
'Meta': {'object_name': 'Node'},
'children': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'parents'", 'symmetrical': 'False', 'through': "orm['oozie.Link']", 'to': "orm['oozie.Node']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'node_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.pig': {
'Meta': {'object_name': 'Pig'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.shell': {
'Meta': {'object_name': 'Shell'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.sqoop': {
'Meta': {'object_name': 'Sqoop'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'blank': 'True'})
},
'oozie.ssh': {
'Meta': {'object_name': 'Ssh'},
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'oozie.start': {
'Meta': {'object_name': 'Start'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True'})
},
'oozie.streaming': {
'Meta': {'object_name': 'Streaming'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''}),
'mapper': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'reducer': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'oozie.workflow': {
'Meta': {'object_name': 'Workflow', '_ormbases': ['oozie.Job']},
'end': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'end_workflow'", 'null': 'True', 'to': "orm['oozie.End']"}),
'is_single': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'start': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'start_workflow'", 'null': 'True', 'to': "orm['oozie.Start']"})
}
}
complete_apps = ['oozie']
| 70.928144 | 206 | 0.56336 |
0991fd30d8045d6a0ce16e6ff1eede5019e3cc0a | 4,388 | py | Python | library/views.py | xpokers/AiOps | 40eb6d8ef93d35a79df23dabb88fc034e1ce975c | [
"Apache-2.0"
] | null | null | null | library/views.py | xpokers/AiOps | 40eb6d8ef93d35a79df23dabb88fc034e1ce975c | [
"Apache-2.0"
] | 4 | 2021-04-07T23:18:56.000Z | 2021-09-23T23:22:47.000Z | library/views.py | xpokers/CMDB | 6c402dcf30bfd74453a5efaa8d0de69d938632b9 | [
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render, redirect, HttpResponse, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from library.models import librarys
from .form import LibrarysForm
import json
from django.contrib.auth.models import User, Group
from guardian.shortcuts import assign_perm, get_perms
from guardian.core import ObjectPermissionChecker
from guardian.decorators import permission_required_or_403
from tasks.views import ssh
from guardian.shortcuts import get_objects_for_user, get_objects_for_group
from guardian.models import UserObjectPermission, GroupObjectPermission
from django.views.generic import TemplateView, ListView, View, CreateView, UpdateView, DeleteView, DetailView
from django.urls import reverse_lazy
class LibraryListAll(TemplateView):
template_name = 'library/library.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LibraryListAll, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
obj = librarys.objects.all()
context = {
"library_active": "active",
"library_list_active": "active",
'library_list': obj,
}
kwargs.update(context)
return super(LibraryListAll, self).get_context_data(**kwargs)
class LibraryAdd(CreateView,View):
model = librarys
form_class = LibrarysForm
template_name = 'library/library-add.html'
success_url = reverse_lazy('library:library_list')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LibraryAdd, self).dispatch(*args, **kwargs)
def form_valid(self,form,):
self.lib_save = lib_save = form.save()
return super(LibraryAdd, self).form_valid(form)
def get_context_data(self, **kwargs):
context = {
"library_active": "active",
"library_list_active": "active",
}
kwargs.update(context)
return super(LibraryAdd, self).get_context_data(**kwargs)
class LibraryUpdate(UpdateView):
model = librarys
form_class = LibrarysForm
template_name = 'library/library-update.html'
success_url = reverse_lazy('library:library_list')
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LibraryUpdate, self).dispatch(*args, **kwargs)
def form_valid(self, form):
self.lib_save = form.save()
return super(LibraryUpdate, self).form_valid(form)
def form_invalid(self, form):
print(form.errors)
return super(LibraryUpdate, self).form_invalid(form)
def get_context_data(self, **kwargs):
context = {
"library_active": "active",
"library_list_active": "active",
}
kwargs.update(context)
return super(LibraryUpdate, self).get_context_data(**kwargs)
def get_success_url(self):
return super(LibraryUpdate, self).get_success_url()
class LibraryDetail(DetailView):
model = librarys
template_name = 'library/library-detail.html'
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LibraryDetail, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
pk = self.kwargs.get(self.pk_url_kwarg, None)
detail = librarys.objects.get(id=pk)
context = {
"library_active": "active",
"library_list_active": "active",
"librarys": detail,
"nid": pk,
}
kwargs.update(context)
return super(LibraryDetail, self).get_context_data(**kwargs)
class LibraryDel(View):
model = librarys
form_class = LibrarysForm
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LibraryDel, self).dispatch(*args, **kwargs)
def post(self, request):
ret = {'status': True, 'error': None, }
try:
id = request.POST.get('nid', None)
lib = librarys.objects.get(id=id)
lib.delete()
print(ret)
except Exception as e:
ret = {
"static": False,
"error": '删除请求错误,{}'.format(e)
}
return HttpResponse(json.dumps(ret))
| 29.648649 | 109 | 0.660665 |
a9e2461f464b19702bfad38fdab6994629be3ad9 | 2,124 | py | Python | graph-algorithms/seminary/parse.py | vampy/university | 9496cb63594dcf1cc2cec8650b8eee603f85fdab | [
"MIT"
] | 6 | 2015-06-22T19:43:13.000Z | 2019-07-15T18:08:41.000Z | graph-algorithms/seminary/parse.py | vampy/university | 9496cb63594dcf1cc2cec8650b8eee603f85fdab | [
"MIT"
] | null | null | null | graph-algorithms/seminary/parse.py | vampy/university | 9496cb63594dcf1cc2cec8650b8eee603f85fdab | [
"MIT"
] | 1 | 2015-09-26T09:01:54.000Z | 2015-09-26T09:01:54.000Z | import graph
def dfsIter(g, s, prev, tree):
tree[s] = []
q = [s]
visited = set()
visited.add(s)
while len(q) > 0:
x = q[-1]
q = q[: -1]
# print x
for y in g.parseNout(x):
if y not in visited:
visited.add(y)
q.append(y)
prev[y] = x
tree[x].append(y)
tree[y] = []
def bfs(g, s, prev, tree):
tree[s] = []
q = [s]
visited = set()
visited.add(s)
while len(q) > 0:
x = q[0]
q = q[1:]
# print x
for y in g.parseNout(x):
if y not in visited:
visited.add(y)
q.append(y)
prev[y] = x
tree[x].append(y)
tree[y] = []
def printTree(tree, root):
printTreeAux(tree, root, "")
def printTreeAux(tree, root, indent):
print
indent + str(root)
children = tree[root]
newindent = indent + " "
for i in children:
printTreeAux(tree, i, newindent)
def dfsAux(g, x, visited, prev, tree):
# print x
tree[x] = []
for y in g.parseNout(x):
if y not in visited:
prev[y] = x
tree[x].append(y)
visited.add(y)
dfsAux(g, y, visited, prev, tree)
def dfs(g, s, prev, tree):
visited = set()
visited.add(s)
dfsAux(g, s, visited, prev, tree)
def getPath(s, t, prev):
list = []
while t != s:
list.append(t)
t = prev[t]
ret = [s]
for i in range(len(list)):
ret.append(list[len(list) - i - 1])
return ret
g = graph.initSecondGraph(graph.DoubleDictGraph)
s = 0
t = 4
# g = graph.GoatGraph()
# s = graph.GoatStatus(0)
# t = graph.GoatStatus(15)
prev = {}
tree = {}
# bfs(g, s, prev , tree)
dfsIter(g, s, prev, tree)
printTree(tree, s)
# sol = getPath(s, t, prev)
# for v in sol:
# print v
# g = graph.initRandomGraph(DoubleDictGraph, 100000, 300000)
# g = graph.GoatGraph()
# for x in g.parseX():
# print ("%s:" % x)
# for y in g.parseNout(x):
# print ("%s -> %s; cost=%s" % (x, y, g.cost(x,y)))
| 19.486239 | 60 | 0.484934 |
4063390b6be5e45ec60a374b850904ab435daec3 | 1,047 | py | Python | osmnx/globals.py | tanvim/test | 3fc7deab27a33183e49e7868aac361a650727ff3 | [
"MIT"
] | null | null | null | osmnx/globals.py | tanvim/test | 3fc7deab27a33183e49e7868aac361a650727ff3 | [
"MIT"
] | null | null | null | osmnx/globals.py | tanvim/test | 3fc7deab27a33183e49e7868aac361a650727ff3 | [
"MIT"
] | null | null | null | ###################################################################################################
# Module: globals.py
# Description: Global defaults, can be configured by user by passing values to utils.config()
# License: MIT, see full license in LICENSE.txt
# Web: https://github.com/gboeing/osmnx
###################################################################################################
import logging as lg
# default locations to save data, logs, images, and cache
data_folder = 'data'
logs_folder = 'logs'
imgs_folder = 'images'
cache_folder = 'cache'
# cache server responses
use_cache = False
# write log to file and/or to console
log_file = False
log_console = False
log_level = lg.INFO
log_name = 'osmnx'
log_filename = 'osmnx'
# useful osm tags - note that load_graphml expects a consistent set of tag names for parsing
useful_tags_node = ['ref', 'highway']
useful_tags_path = ['bridge', 'tunnel', 'oneway', 'lanes', 'ref', 'name', 'highway', 'maxspeed', 'service', 'access', 'area', 'landuse', 'width', 'est_width']
| 33.774194 | 158 | 0.586437 |
307b2543c4a49ebc1f8ed2471787af9d2e4a9052 | 5,878 | py | Python | fn_ldap_utilities/tests/test_ldap_search.py | rudimeyer/resilient-community-apps | 7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00 | [
"MIT"
] | 1 | 2020-08-25T03:43:07.000Z | 2020-08-25T03:43:07.000Z | fn_ldap_utilities/tests/test_ldap_search.py | rudimeyer/resilient-community-apps | 7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00 | [
"MIT"
] | 1 | 2019-07-08T16:57:48.000Z | 2019-07-08T16:57:48.000Z | fn_ldap_utilities/tests/test_ldap_search.py | rudimeyer/resilient-community-apps | 7a46841ba41fa7a1c421d4b392b0a3ca9e36bd00 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# (c) Copyright IBM Corp. 2018. All Rights Reserved.
"""Tests using pytest_resilient_circuits"""
from __future__ import print_function
import functools
import pytest
from helper import TestingHelper, get_mock_config_data
from mock import patch
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
from ldap3 import Server, Connection, ALL, ALL_ATTRIBUTES, MOCK_SYNC
import os
import logging
LOG = logging.getLogger(__name__)
PACKAGE_NAME = "fn_ldap_utilities"
FUNCTION_NAME = "ldap_utilities_search"
MOCK_DATA_PATH = os.getcwd() + "/tests/mock_data/search_specific/"
# Read the default configuration-data section from the package
config_data = get_mock_config_data()
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
# Create a fake LDAP server from the info and schema json files
fake_server = Server.from_definition('my_fake_server', MOCK_DATA_PATH + 'mock_server_info.json', MOCK_DATA_PATH + 'mock_server_schema.json')
def mocked_server():
"""Mock ldap3 server.
:return: Return mocked server object
"""
server = Mock(return_value=fake_server)
return server
def mocked_connection():
"""Mock ldap3 connection.
:return: Return mocked connection object
"""
# Create a MockSyncStrategy connection to the fake server
mocked_connection = Connection(fake_server, user='cn=my_user,ou=test,o=lab', password='my_password',
client_strategy=MOCK_SYNC)
# Populate the DIT of the fake server with mock entries
mocked_connection.strategy.entries_from_json(MOCK_DATA_PATH + 'mock_server_entries.json')
connection = Mock(return_value=mocked_connection)
return connection
def call_ldap_utilities_search_function(circuits, function_params, timeout=10):
# Fire a message to the function
evt = SubmitTestFunction("ldap_utilities_search", function_params)
circuits.manager.fire(evt)
event = circuits.watcher.wait("ldap_utilities_search_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestLdapUtilitiesSearch:
""" Tests for the ldap_utilities_search function"""
helper = TestingHelper(isSearch=True)
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
@patch('fn_ldap_utilities.util.helper.Connection', helper.mocked_connection())
@patch('fn_ldap_utilities.util.helper.Server', helper.mocked_server())
@pytest.mark.parametrize("login_search_base, login_search_filter, login_search_attributes, login_param, login_expected_result", [
("dc=example,dc=com", {"type": "text", "content": "(uid=)"}, "cn", "", {'success': False, 'entries': []})
])
def test_ldap_basic_connection(self, circuits_app, login_search_base, login_search_filter, login_search_attributes, login_param,
login_expected_result):
""" Test LDAP connection
Test LDAP connection with simple search options.
Positive tests.
"""
function_params = {
"ldap_search_base": login_search_base,
"ldap_search_filter": login_search_filter,
"ldap_search_attributes": login_search_attributes,
"ldap_search_param": login_param
}
result = call_ldap_utilities_search_function(circuits_app, function_params)
assert (login_expected_result == result)
@patch('fn_ldap_utilities.util.helper.Connection', helper.mocked_connection())
@patch('fn_ldap_utilities.util.helper.Server', helper.mocked_server())
@pytest.mark.parametrize("success_search_base, success_search_filter, success_search_attributes, success_param, success_expected_result",
[
(
"dc=example,dc=com",
{"type": "text", "content": "(&(objectClass=person)(uid=einstein))"},
"uid,cn",
"",
{'success': True,'entries': [{'cn': ['Albert Einstein'], 'dn': 'uid=einstein,dc=example,dc=com', 'uid': ['einstein']}]}
),
(
"dc=example,dc=com",
{"type": "text", "content": "(&(objectClass=person)(uid=%ldap_param%))"},
"uid,cn",
"einstein",
{'success': True, 'entries': [{'cn': ['Albert Einstein'], 'dn': 'uid=einstein,dc=example,dc=com', 'uid': ['einstein']}]}
),
(
"dc=example,dc=com",
{"type": "text", "content": "(&(objectClass=person)(|(uid=newton)(uid=%ldap_param%)))"},
"uid,cn",
"einstein",
{'success': True, 'entries': [{'cn': ['Isaac Newton'], 'dn': 'uid=newton,dc=example,dc=com', 'uid': ['newton']}, {'cn': ['Albert Einstein'], 'dn': 'uid=einstein,dc=example,dc=com', 'uid': ['einstein']}]}
)
])
def test_utilities_ldap_search(self, circuits_app, success_search_base, success_search_filter, success_search_attributes, success_param, success_expected_result):
""" Test LDAP searches
Test LDAP search with various base, filter and attribute options.
All positive tests.
"""
function_params = {
"ldap_search_base": success_search_base,
"ldap_search_filter": success_search_filter,
"ldap_search_attributes": success_search_attributes,
"ldap_search_param": success_param
}
result = call_ldap_utilities_search_function(circuits_app, function_params)
assert(success_expected_result == result) | 43.540741 | 213 | 0.691222 |
ea27d515298484c83dd39e8af7b9341ae0074dcf | 936 | py | Python | setup.py | derlih/async-fsm | 1a9b25a7fe2373e221a38c1c78d2ea2707c9206a | [
"MIT"
] | 1 | 2021-02-04T10:50:18.000Z | 2021-02-04T10:50:18.000Z | setup.py | derlih/async-fsm | 1a9b25a7fe2373e221a38c1c78d2ea2707c9206a | [
"MIT"
] | null | null | null | setup.py | derlih/async-fsm | 1a9b25a7fe2373e221a38c1c78d2ea2707c9206a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from distutils.core import setup
setup(name='async-fsm',
version='0.1',
description='Python FSM implementation that supports asyncio',
author='Dmitry Erlikh',
author_email='derlih@gmail.com',
url='https://github.com/derlih/async-fsm',
packages=['async_fsm'],
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Libraries',
'Framework :: AsyncIO',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6'
],
setup_requires=['pytest-runner'],
tests_require=['pytest', 'pytest-asyncio',
'pytest-cov', 'pytest-helpers-namespace']
)
| 34.666667 | 68 | 0.584402 |
07fa4ae26074bad4578ccdcf7900c822d207ef13 | 2,645 | py | Python | src/mysite/urls.py | shubhampcvn/django_blog_app | c6885504abe53dd19c7bd386daa1f26e37685351 | [
"MIT"
] | null | null | null | src/mysite/urls.py | shubhampcvn/django_blog_app | c6885504abe53dd19c7bd386daa1f26e37685351 | [
"MIT"
] | null | null | null | src/mysite/urls.py | shubhampcvn/django_blog_app | c6885504abe53dd19c7bd386daa1f26e37685351 | [
"MIT"
] | null | null | null | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.conf.urls.static import static
from django.urls import path, include
from django.conf import settings
from django.contrib.auth import views as auth_views
from personal.views import (
home_screen_view,
)
from account.views import (
registration_view,
logout_view,
login_view,
account_view,
must_authenticate_view
)
urlpatterns = [
path('admin/', admin.site.urls),
path('blog/', include('blog.urls', 'blog')),
path('', home_screen_view, name='home'),
path('register/', registration_view, name='register'),
path('must_authenticate/', must_authenticate_view, name='must_authenticate'),
path('logout/', logout_view, name='logout'),
path('login/', login_view, name='login'),
path('account/', account_view, name='account'),
# Password reset links (ref: https://github.com/django/django/blob/master/django/contrib/auth/views.py)
path('password_change/done/',
auth_views.PasswordChangeDoneView.as_view(template_name='registration/password_change_done.html'),
name='password_change_done'),
path('password_change/', auth_views.PasswordChangeView.as_view(template_name='registration/password_change.html'),
name='password_change'),
path('password_reset/done/',
auth_views.PasswordResetCompleteView.as_view(template_name='registration/password_reset_done.html'),
name='password_reset_done'),
path('reset/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.as_view(), name='password_reset_confirm'),
path('password_reset/', auth_views.PasswordResetView.as_view(), name='password_reset'),
path('reset/done/',
auth_views.PasswordResetCompleteView.as_view(template_name='registration/password_reset_complete.html'),
name='password_reset_complete'),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | 44.083333 | 118 | 0.732325 |
d734f5a849e22e6f35699c81977cfbdab1246dd1 | 490 | py | Python | dealscore/urls.py | lh15/dealscore_django | 0f17cc5c399d28d89ea1db9b4f978515d8f29334 | [
"MIT"
] | null | null | null | dealscore/urls.py | lh15/dealscore_django | 0f17cc5c399d28d89ea1db9b4f978515d8f29334 | [
"MIT"
] | 3 | 2021-03-19T11:32:01.000Z | 2022-02-10T11:43:56.000Z | dealscore/urls.py | lh15/dealscore_django | 0f17cc5c399d28d89ea1db9b4f978515d8f29334 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('users/', include('django.contrib.auth.urls')),
path('accounts/', include('allauth.urls')),
path('', include('pages.urls')),
path('engine/', include('dealsengine.urls')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| 27.222222 | 56 | 0.677551 |
917aa7a412823a22cbd703f51d5b109f0987e5c8 | 4,344 | py | Python | benchmark/startQiskit_noisy2852.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy2852.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startQiskit_noisy2852.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=4
# total number=42
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=19
prog.cz(input_qubit[0],input_qubit[3]) # number=20
prog.h(input_qubit[3]) # number=21
prog.h(input_qubit[3]) # number=24
prog.cz(input_qubit[0],input_qubit[3]) # number=25
prog.h(input_qubit[3]) # number=26
prog.cx(input_qubit[0],input_qubit[3]) # number=31
prog.x(input_qubit[3]) # number=32
prog.h(input_qubit[3]) # number=39
prog.cz(input_qubit[0],input_qubit[3]) # number=40
prog.h(input_qubit[3]) # number=41
prog.h(input_qubit[3]) # number=36
prog.cz(input_qubit[0],input_qubit[3]) # number=37
prog.h(input_qubit[3]) # number=38
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.y(input_qubit[1]) # number=29
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[1]) # number=30
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.swap(input_qubit[3],input_qubit[0]) # number=22
prog.swap(input_qubit[3],input_qubit[0]) # number=23
prog.swap(input_qubit[1],input_qubit[0]) # number=27
prog.swap(input_qubit[1],input_qubit[0]) # number=28
prog.swap(input_qubit[3],input_qubit[0]) # number=34
prog.swap(input_qubit[3],input_qubit[0]) # number=35
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2852.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 35.032258 | 140 | 0.655387 |
f9e82998a166cbf4ae8b90555296c1c29873aaf0 | 1,336 | py | Python | NodeCore/Sockets/OutSocket.py | FaderKing/NodeEditor | 37e5d193ff08c834ba5e901af20a48fa2adfe133 | [
"BSD-2-Clause"
] | 4 | 2020-03-15T08:13:46.000Z | 2021-06-19T02:33:35.000Z | NodeCore/Sockets/OutSocket.py | bloomv/NodeEditor | e1bfb3d77cc5fbb409dca0ee14a5779255377c33 | [
"BSD-2-Clause"
] | null | null | null | NodeCore/Sockets/OutSocket.py | bloomv/NodeEditor | e1bfb3d77cc5fbb409dca0ee14a5779255377c33 | [
"BSD-2-Clause"
] | 2 | 2020-05-21T22:55:59.000Z | 2021-10-06T03:45:07.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file was created using the DirectGUI Designer
from .SocketBase import SocketBase, OUTSOCKET
from direct.gui.DirectFrame import DirectFrame
from direct.gui.DirectLabel import DirectLabel
from panda3d.core import TextNode
class OutSocket(SocketBase):
def __init__(self, node, name):
SocketBase.__init__(self, node, name)
self.type = OUTSOCKET
self.frame = DirectFrame(
frameColor=(0.25, 0.25, 0.25, 1),
frameSize=(-1, 0, -self.height, 0),
parent=node.frame,
)
SocketBase.createPlug(self, self.frame)
self.text = DirectLabel(
frameColor=(0, 0, 0, 0),
frameSize=(-1, 0, -self.height, 0),
scale=(1, 1, 1),
text=self.name,
text_align=TextNode.A_right,
text_scale=(0.1, 0.1),
text_pos=(-0.1, -0.02),
text_fg=(1, 1, 1, 1),
text_bg=(0, 0, 0, 0),
parent=self.frame,
)
self.resize(1)
def show(self, z, right):
self.frame.setZ(z)
self.frame.setX(right)
def resize(self, newWidth):
self.frame["frameSize"] = (-newWidth, 0, -self.height/2, self.height/2)
self.text["frameSize"] = (-newWidth, 0, -self.height/2, self.height/2)
| 27.833333 | 79 | 0.569611 |
23805252c3ee61dd977e334816fc3a61840a15a9 | 4,900 | py | Python | grr/lib/authorization/client_approval_auth_test.py | ethicalhackeragnidhra/Grr | 9ff9178396d9d16575e42dded33627cb09ac3af1 | [
"Apache-2.0"
] | 1 | 2020-12-18T00:47:19.000Z | 2020-12-18T00:47:19.000Z | grr/lib/authorization/client_approval_auth_test.py | ethicalhackeragnidhra/Grr | 9ff9178396d9d16575e42dded33627cb09ac3af1 | [
"Apache-2.0"
] | null | null | null | grr/lib/authorization/client_approval_auth_test.py | ethicalhackeragnidhra/Grr | 9ff9178396d9d16575e42dded33627cb09ac3af1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Tests for grr.lib.authorization.client_approval_auth."""
from grr.lib import access_control
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.authorization import client_approval_auth
from grr.lib.rdfvalues import client as rdf_client
from grr.lib.rdfvalues import test_base
class ClientApprovalAuthorizationTest(test_base.RDFValueTestCase):
rdfvalue_class = client_approval_auth.ClientApprovalAuthorization
def setUp(self):
super(ClientApprovalAuthorizationTest, self).setUp()
self.urn = rdf_client.ClientURN("C.0000000000000000")
def GenerateSample(self, number=0):
return client_approval_auth.ClientApprovalAuthorization(
label="label%d" % number, users=["test", "test2"])
def testApprovalValidation(self):
# String instead of list of users
with self.assertRaises(
client_approval_auth.ErrorInvalidClientApprovalAuthorization):
client_approval_auth.ClientApprovalAuthorization(
label="label", users="test")
# Missing label
acl = client_approval_auth.ClientApprovalAuthorization(users=["test"])
with self.assertRaises(
client_approval_auth.ErrorInvalidClientApprovalAuthorization):
print acl.label
# Bad label
with self.assertRaises(
client_approval_auth.ErrorInvalidClientApprovalAuthorization):
acl.label = None
class ClientApprovalAuthorizationManager(test_lib.GRRBaseTest):
def setUp(self):
super(ClientApprovalAuthorizationManager, self).setUp()
self.mgr = client_approval_auth.ClientApprovalAuthorizationManager()
self.urn = rdf_client.ClientURN("C.0000000000000000")
def _CreateAuthSingleLabel(self):
self.mgr.LoadApprovals(yaml_data="""label: "label1"
users:
- one
- two
""")
def _CreateAuthCheckRequester(self):
self.mgr.LoadApprovals(yaml_data="""label: "label1"
requester_must_be_authorized: True
users:
- one
- two
""")
def _CreateAuthMultiApproval(self):
self.mgr.LoadApprovals(yaml_data="""label: "label1"
requester_must_be_authorized: True
num_approvers_required: 2
users:
- one
- two
- three
- four
""")
def testRaisesOnNoApprovals(self):
self._CreateAuthSingleLabel()
with self.assertRaises(access_control.UnauthorizedAccess):
self.mgr.CheckApproversForLabel(self.token, self.urn, "requester_user",
[], "label1")
def testRaisesOnSelfApproval(self):
self._CreateAuthSingleLabel()
with self.assertRaises(access_control.UnauthorizedAccess):
self.mgr.CheckApproversForLabel(self.token, self.urn, "requester_user",
["requester_user"], "label1")
def testRaisesOnAuthorizedSelfApproval(self):
self._CreateAuthSingleLabel()
with self.assertRaises(access_control.UnauthorizedAccess):
self.mgr.CheckApproversForLabel(self.token, self.urn, "one", ["one"],
"label1")
def testRaisesOnApprovalFromUnauthorized(self):
self._CreateAuthSingleLabel()
with self.assertRaises(access_control.UnauthorizedAccess):
self.mgr.CheckApproversForLabel(self.token, self.urn, "requester_user",
["approver1"], "label1")
def testPassesWithApprovalFromApprovedUser(self):
self._CreateAuthSingleLabel()
self.mgr.CheckApproversForLabel(self.token, self.urn, "requester_user",
["approver1", "two"], "label1")
def testRaisesWhenRequesterNotAuthorized(self):
self._CreateAuthCheckRequester()
with self.assertRaises(access_control.UnauthorizedAccess):
self.mgr.CheckApproversForLabel(self.token, self.urn, "requester_user",
["one"], "label1")
def testRaisesOnSelfApprovalByAuthorizedRequester(self):
self._CreateAuthCheckRequester()
with self.assertRaises(access_control.UnauthorizedAccess):
self.mgr.CheckApproversForLabel(self.token, self.urn, "one", ["one"],
"label1")
def testPassesWhenApproverAndRequesterAuthorized(self):
self._CreateAuthCheckRequester()
self.mgr.CheckApproversForLabel(self.token, self.urn, "one", ["one", "two"],
"label1")
def testRaisesWhenOnlyOneAuthorizedApprover(self):
self._CreateAuthMultiApproval()
with self.assertRaises(access_control.UnauthorizedAccess):
self.mgr.CheckApproversForLabel(self.token, self.urn, "one",
["one", "two"], "label1")
def testPassesWithTwoAuthorizedApprovers(self):
self._CreateAuthMultiApproval()
self.mgr.CheckApproversForLabel(self.token, self.urn, "one",
["two", "four"], "label1")
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| 35 | 80 | 0.701633 |
8f2ba62d07dbfd69784228702015160e81964348 | 406 | py | Python | tests/test_examples.py | cscorley/triage | b7a2de8c5955418acf2b9bd93953a83327052257 | [
"MIT"
] | 3 | 2019-01-13T13:03:53.000Z | 2020-05-03T21:34:43.000Z | tests/test_examples.py | cscorley/triage | b7a2de8c5955418acf2b9bd93953a83327052257 | [
"MIT"
] | null | null | null | tests/test_examples.py | cscorley/triage | b7a2de8c5955418acf2b9bd93953a83327052257 | [
"MIT"
] | 1 | 2018-09-30T13:42:10.000Z | 2018-09-30T13:42:10.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
if __name__ == '__main__':
import nose
nose.main()
from nose.tools import *
import unittest
import src
class TestExample(unittest.TestCase):
def setUp(self):
self.is_setup = True
def test_truth(self):
assert self.is_setup
@raises(AssertionError)
def test_passes_by_failing(self):
assert not self.is_setup
| 17.652174 | 37 | 0.662562 |
19106bc74941bc660bd54a5270fea0e02fdf2a76 | 771 | py | Python | fastestimator/dataset/__init__.py | AriChow/fastestimator | d381d9acc1d42c6cf88a4424e083375cf98140bf | [
"Apache-2.0"
] | null | null | null | fastestimator/dataset/__init__.py | AriChow/fastestimator | d381d9acc1d42c6cf88a4424e083375cf98140bf | [
"Apache-2.0"
] | null | null | null | fastestimator/dataset/__init__.py | AriChow/fastestimator | d381d9acc1d42c6cf88a4424e083375cf98140bf | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from fastestimator.dataset import cub200, horse2zebra, mnist, montgomery, svhn
| 48.1875 | 80 | 0.693904 |
e99fff3cd40abb1d632585fb55560368993c51ac | 15,849 | py | Python | posthog/api/action.py | brave-care/posthog | 8edd14a16ad936fb241dcf856925e9f2ea87cba4 | [
"MIT"
] | 1 | 2021-07-28T19:44:48.000Z | 2021-07-28T19:44:48.000Z | posthog/api/action.py | brave-care/posthog | 8edd14a16ad936fb241dcf856925e9f2ea87cba4 | [
"MIT"
] | null | null | null | posthog/api/action.py | brave-care/posthog | 8edd14a16ad936fb241dcf856925e9f2ea87cba4 | [
"MIT"
] | null | null | null | import json
from typing import Any, Dict, List, Union, cast
import posthoganalytics
from django.core.cache import cache
from django.db.models import Count, Exists, OuterRef, Prefetch, QuerySet
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.timezone import now
from rest_framework import authentication, request, serializers, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework_csv import renderers as csvrenderers
from rest_hooks.signals import raw_hook_event
from posthog.api.routing import StructuredViewSetMixin
from posthog.api.shared import UserBasicSerializer
from posthog.api.utils import get_target_entity
from posthog.auth import PersonalAPIKeyAuthentication, TemporaryTokenAuthentication
from posthog.celery import update_cache_item_task
from posthog.constants import INSIGHT_STICKINESS, TREND_FILTER_TYPE_ACTIONS, TREND_FILTER_TYPE_EVENTS, TRENDS_STICKINESS
from posthog.decorators import CacheType, cached_function
from posthog.models import (
Action,
ActionStep,
CohortPeople,
DashboardItem,
Entity,
Event,
Filter,
Person,
RetentionFilter,
)
from posthog.models.event import EventManager
from posthog.models.filters.stickiness_filter import StickinessFilter
from posthog.models.team import Team
from posthog.permissions import ProjectMembershipNecessaryPermissions, TeamMemberAccessPermission
from posthog.queries import base, retention, stickiness, trends
from posthog.tasks.calculate_action import calculate_action
from posthog.utils import generate_cache_key, get_safe_cache, should_refresh
from .person import PersonSerializer, paginated_result
class ActionStepSerializer(serializers.HyperlinkedModelSerializer):
id = serializers.CharField(read_only=False, required=False)
class Meta:
model = ActionStep
fields = [
"id",
"event",
"tag_name",
"text",
"href",
"selector",
"url",
"name",
"url_matching",
"properties",
]
extra_kwargs = {
"event": {"trim_whitespace": False},
"tag_name": {"trim_whitespace": False},
"text": {"trim_whitespace": False},
"href": {"trim_whitespace": False},
"name": {"trim_whitespace": False},
}
class ActionSerializer(serializers.HyperlinkedModelSerializer):
steps = ActionStepSerializer(many=True, required=False)
created_by = UserBasicSerializer(read_only=True)
class Meta:
model = Action
fields = [
"id",
"name",
"post_to_slack",
"slack_message_format",
"steps",
"created_at",
"deleted",
"is_calculating",
"last_calculated_at",
"created_by",
"team_id",
]
extra_kwargs = {"team_id": {"read_only": True}}
def validate(self, attrs):
instance = cast(Action, self.instance)
exclude_args = {}
if instance:
include_args = {"team": instance.team}
exclude_args = {"id": instance.pk}
else:
attrs["team_id"] = self.context["view"].team_id
include_args = {"team_id": attrs["team_id"]}
colliding_action_ids = list(
Action.objects.filter(name=attrs["name"], deleted=False, **include_args)
.exclude(**exclude_args)[:1]
.values_list("id", flat=True)
)
if colliding_action_ids:
raise serializers.ValidationError(
{"name": f"This project already has an action with this name, ID {colliding_action_ids[0]}"},
code="unique",
)
return attrs
def create(self, validated_data: Any) -> Any:
steps = validated_data.pop("steps", [])
validated_data["created_by"] = self.context["request"].user
instance = super().create(validated_data)
for step in steps:
ActionStep.objects.create(
action=instance, **{key: value for key, value in step.items() if key not in ("isNew", "selection")},
)
calculate_action.delay(action_id=instance.pk)
posthoganalytics.capture(
validated_data["created_by"].distinct_id, "action created", instance.get_analytics_metadata()
)
return instance
def update(self, instance: Any, validated_data: Dict[str, Any]) -> Any:
steps = validated_data.pop("steps", None)
# If there's no steps property at all we just ignore it
# If there is a step property but it's an empty array [], we'll delete all the steps
if steps is not None:
# remove steps not in the request
step_ids = [step["id"] for step in steps if step.get("id")]
instance.steps.exclude(pk__in=step_ids).delete()
for step in steps:
if step.get("id"):
step_instance = ActionStep.objects.get(pk=step["id"])
step_serializer = ActionStepSerializer(instance=step_instance)
step_serializer.update(step_instance, step)
else:
ActionStep.objects.create(
action=instance,
**{key: value for key, value in step.items() if key not in ("isNew", "selection")},
)
instance = super().update(instance, validated_data)
calculate_action.delay(action_id=instance.pk)
instance.refresh_from_db()
posthoganalytics.capture(
self.context["request"].user.distinct_id,
"action updated",
{
**instance.get_analytics_metadata(),
"updated_by_creator": self.context["request"].user == instance.created_by,
},
)
return instance
def get_actions(queryset: QuerySet, params: dict, team_id: int) -> QuerySet:
queryset = queryset.annotate(count=Count(TREND_FILTER_TYPE_EVENTS))
queryset = queryset.prefetch_related(Prefetch("steps", queryset=ActionStep.objects.order_by("id")))
return queryset.filter(team_id=team_id).order_by("-id")
class ActionViewSet(StructuredViewSetMixin, viewsets.ModelViewSet):
renderer_classes = tuple(api_settings.DEFAULT_RENDERER_CLASSES) + (csvrenderers.PaginatedCSVRenderer,)
queryset = Action.objects.all()
serializer_class = ActionSerializer
authentication_classes = [
TemporaryTokenAuthentication,
PersonalAPIKeyAuthentication,
authentication.SessionAuthentication,
authentication.BasicAuthentication,
]
permission_classes = [IsAuthenticated, ProjectMembershipNecessaryPermissions, TeamMemberAccessPermission]
def get_queryset(self):
queryset = super().get_queryset()
if self.action == "list":
queryset = queryset.filter(deleted=False)
return get_actions(queryset, self.request.GET.dict(), self.team_id)
def list(self, request: request.Request, *args: Any, **kwargs: Any) -> Response:
actions = self.get_queryset()
actions_list: List[Dict[Any, Any]] = self.serializer_class(actions, many=True, context={"request": request}).data # type: ignore
if request.GET.get("include_count", False):
actions_list.sort(key=lambda action: action.get("count", action["id"]), reverse=True)
return Response({"results": actions_list})
@action(methods=["GET"], detail=False)
def trends(self, request: request.Request, *args: Any, **kwargs: Any) -> Response:
result = self._calculate_trends(request)
return Response(result)
@cached_function
def _calculate_trends(self, request: request.Request) -> List[Dict[str, Any]]:
team = self.team
filter = Filter(request=request, team=self.team)
if filter.insight == INSIGHT_STICKINESS or filter.shown_as == TRENDS_STICKINESS:
earliest_timestamp_func = lambda team_id: Event.objects.earliest_timestamp(team_id)
stickiness_filter = StickinessFilter(
request=request, team=team, get_earliest_timestamp=earliest_timestamp_func
)
result = stickiness.Stickiness().run(stickiness_filter, team)
else:
result = trends.Trends().run(filter, team)
dashboard_id = request.GET.get("from_dashboard", None)
if dashboard_id:
DashboardItem.objects.filter(pk=dashboard_id).update(last_refresh=now())
return result
@action(methods=["GET"], detail=False)
def retention(self, request: request.Request, *args: Any, **kwargs: Any) -> Response:
team = self.team
properties = request.GET.get("properties", "{}")
try:
properties = json.loads(properties)
except json.decoder.JSONDecodeError:
raise ValidationError("Properties are unparsable!")
data: Dict[str, Any] = {"properties": properties}
start_entity_data = request.GET.get("start_entity", None)
if start_entity_data:
entity_data = json.loads(start_entity_data)
data.update({"entites": [Entity({"id": entity_data["id"], "type": entity_data["type"]})]})
data.update({"date_from": "-11d"})
filter = RetentionFilter(data=data, team=self.team)
result = retention.Retention().run(filter, team)
return Response({"data": result})
@action(methods=["GET"], detail=False)
def funnel(self, request: request.Request, *args: Any, **kwargs: Any) -> Response:
team = self.team
refresh = should_refresh(request)
dashboard_id = request.GET.get("from_dashboard", None)
filter = Filter(request=request, team=self.team)
cache_key = generate_cache_key("{}_{}".format(filter.toJSON(), team.pk))
result = {"loading": True}
if refresh:
cache.delete(cache_key)
else:
cached_result = get_safe_cache(cache_key)
if cached_result:
task_id = cached_result.get("task_id", None)
if not task_id:
return Response(cached_result["result"])
else:
return Response(result)
payload = {"filter": filter.toJSON(), "team_id": team.pk}
task = update_cache_item_task.delay(cache_key, CacheType.FUNNEL, payload)
if not task.ready():
task_id = task.id
cache.set(cache_key, {"task_id": task_id}, 180) # task will be live for 3 minutes
if dashboard_id:
DashboardItem.objects.filter(pk=dashboard_id).update(last_refresh=now())
return Response(result)
@action(methods=["GET"], detail=False)
def people(self, request: request.Request, *args: Any, **kwargs: Any) -> Response:
result = self.get_people(request)
return Response(result)
def get_people(self, request: request.Request) -> Union[Dict[str, Any], List]:
team = self.team
filter = Filter(request=request, team=self.team)
entity = get_target_entity(request)
events = filter_by_type(entity=entity, team=team, filter=filter)
people = calculate_people(team=team, events=events, filter=filter, request=request)
serialized_people = PersonSerializer(people, context={"request": request}, many=True).data
current_url = request.get_full_path()
next_url = paginated_result(serialized_people, request, filter.offset)
if request.accepted_renderer.format == "csv":
csvrenderers.CSVRenderer.header = ["Distinct ID", "Internal ID", "Email", "Name", "Properties"]
content = [
{
"Name": person.get("properties", {}).get("name"),
"Distinct ID": person.get("distinct_ids", [""])[0],
"Internal ID": person["uuid"],
"Email": person.get("properties", {}).get("email"),
"Properties": person.get("properties", {}),
}
for person in serialized_people
]
return content
return {
"results": [{"people": serialized_people, "count": len(serialized_people)}],
"next": next_url,
"previous": current_url[1:],
}
@action(methods=["GET"], detail=True)
def count(self, request: request.Request, **kwargs) -> Response:
count = self.get_queryset().first().count
return Response({"count": count})
def filter_by_type(entity: Entity, team: Team, filter: Filter) -> QuerySet:
events: Union[EventManager, QuerySet] = Event.objects.none()
if filter.session:
events = Event.objects.filter(team=team).filter(base.filter_events(team.pk, filter)).add_person_id(team.pk)
else:
if entity.type == TREND_FILTER_TYPE_ACTIONS:
actions = Action.objects.filter(deleted=False)
try:
actions.get(pk=entity.id)
except Action.DoesNotExist:
return events
events = base.process_entity_for_events(entity, team_id=team.pk, order_by=None).filter(
base.filter_events(team.pk, filter, entity)
)
return events
def _filter_cohort_breakdown(events: QuerySet, filter: Filter) -> QuerySet:
if filter.breakdown_type == "cohort" and filter.breakdown_value != "all":
events = events.filter(
Exists(
CohortPeople.objects.filter(
cohort_id=int(cast(str, filter.breakdown_value)), person_id=OuterRef("person_id"),
).only("id")
)
)
return events
def _filter_person_prop_breakdown(events: QuerySet, filter: Filter) -> QuerySet:
if filter.breakdown_type == "person":
events = events.filter(
Exists(
Person.objects.filter(
**{"id": OuterRef("person_id"), "properties__{}".format(filter.breakdown): filter.breakdown_value,}
).only("id")
)
)
return events
def _filter_event_prop_breakdown(events: QuerySet, filter: Filter) -> QuerySet:
if filter.breakdown_type == "event":
events = events.filter(**{"properties__{}".format(filter.breakdown): filter.breakdown_value,})
return events
def calculate_people(
team: Team, events: QuerySet, filter: Filter, request: request.Request, use_offset: bool = True
) -> QuerySet:
events = events.values("person_id").distinct()
events = _filter_cohort_breakdown(events, filter)
events = _filter_person_prop_breakdown(events, filter)
events = _filter_event_prop_breakdown(events, filter)
people = Person.objects.filter(
team=team,
id__in=[p["person_id"] for p in (events[filter.offset : filter.offset + 100] if use_offset else events)],
)
people = base.filter_persons(team.id, request, people) # type: ignore
people = people.prefetch_related(Prefetch("persondistinctid_set", to_attr="distinct_ids_cache"))
return people
@receiver(post_save, sender=Action, dispatch_uid="hook-action-defined")
def action_defined(sender, instance, created, raw, using, **kwargs):
"""Trigger action_defined hooks on Action creation."""
if created:
raw_hook_event.send(
sender=None,
event_name="action_defined",
instance=instance,
payload=ActionSerializer(instance).data,
user=instance.team,
)
class LegacyActionViewSet(ActionViewSet):
legacy_team_compatibility = True
| 39.523691 | 137 | 0.641302 |
b62836f99b8108cc1787fe43725534a12bea39f8 | 601 | py | Python | app/db/repositories/dividend_records.py | Chaoyingz/paper_trading | cd3af81c932e8f4b1586f2b9bf86b5b252bec896 | [
"MIT"
] | null | null | null | app/db/repositories/dividend_records.py | Chaoyingz/paper_trading | cd3af81c932e8f4b1586f2b9bf86b5b252bec896 | [
"MIT"
] | null | null | null | app/db/repositories/dividend_records.py | Chaoyingz/paper_trading | cd3af81c932e8f4b1586f2b9bf86b5b252bec896 | [
"MIT"
] | null | null | null | from app import settings
from app.db.repositories.base import BaseRepository
from app.models.domain.dividend_records import DividendRecordsInDB
class DividendRecordsRepository(BaseRepository):
COLLECTION_NAME = settings.db.collections.dividend_records
async def create_dividend_records(
self, dividend_records: DividendRecordsInDB
) -> DividendRecordsInDB:
dividend_records_row = await self.collection.insert_one(
dividend_records.dict(exclude={"id"})
)
dividend_records.id = dividend_records_row.inserted_id
return dividend_records
| 33.388889 | 66 | 0.765391 |
1ece9cb5b492d3c23013154bba4db3c35ecce1e2 | 1,834 | py | Python | robot/BCI.py | mluyuchen/wukong-robot | 67f5cdb06db9e5e256017925a5efe6721cb2bd1d | [
"MIT"
] | 8 | 2021-02-01T06:33:49.000Z | 2022-02-02T11:06:58.000Z | robot/BCI.py | mluyuchen/wukong-robot | 67f5cdb06db9e5e256017925a5efe6721cb2bd1d | [
"MIT"
] | 1 | 2020-06-10T10:59:02.000Z | 2020-06-10T10:59:02.000Z | robot/BCI.py | mluyuchen/wukong-robot | 67f5cdb06db9e5e256017925a5efe6721cb2bd1d | [
"MIT"
] | 6 | 2021-01-20T03:22:19.000Z | 2022-03-21T14:19:32.000Z | import importlib
import multiprocessing
from robot import config, logging
from datetime import datetime, timedelta
logger = logging.getLogger(__name__)
class MuseBCI(object):
def __init__(self, event):
self._wakeup_event = event
self.last_blink = datetime.now() - timedelta(days=1.5)
self.last_jaw = datetime.now() - timedelta(days=1.5)
def start(self):
osc_process = multiprocessing.Process(target=self._start_osc)
osc_process.start()
def blink_handler(self, unused_addr, args, blink):
if blink:
logger.info("blink detected")
self.last_blink = datetime.now()
if (self.last_blink - self.last_jaw) <= timedelta(seconds=1):
self._wakeup_event.set()
def jaw_clench_handler(self, unused_addr, args, jaw):
if jaw:
logger.info("Jaw_Clench detected")
self.last_jaw = datetime.now()
if (self.last_jaw - self.last_blink) <= timedelta(seconds=1):
self._wakeup_event.set()
def _start_osc(self):
if not importlib.util.find_spec('pythonosc'):
logger.critical('错误:请先安装 python-osc !')
return
from pythonosc import dispatcher as dsp
from pythonosc import osc_server
dispatcher = dsp.Dispatcher()
dispatcher.map("/muse/elements/blink", self.blink_handler, "EEG")
dispatcher.map("/muse/elements/jaw_clench", self.jaw_clench_handler, "EEG")
try:
server = osc_server.ThreadingOSCUDPServer(
(config.get('/muse/ip', '127.0.0.1'), int(config.get('/muse/port', '5001'))), dispatcher)
logger.info("Muse serving on {}".format(server.server_address))
server.serve_forever()
except Exception as e:
logger.error(e)
| 33.962963 | 105 | 0.626499 |
ecd713934fccf98b8272fb5533c44478927e85f6 | 4,540 | py | Python | tests/test_efro/test_dataclasses.py | ritiek/ballistica | 5f909d0b91bfbed3e96c21dbf342616a2d2e7b41 | [
"MIT"
] | null | null | null | tests/test_efro/test_dataclasses.py | ritiek/ballistica | 5f909d0b91bfbed3e96c21dbf342616a2d2e7b41 | [
"MIT"
] | null | null | null | tests/test_efro/test_dataclasses.py | ritiek/ballistica | 5f909d0b91bfbed3e96c21dbf342616a2d2e7b41 | [
"MIT"
] | null | null | null | # Released under the MIT License. See LICENSE for details.
#
"""Testing dataclasses functionality."""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import TYPE_CHECKING
import pytest
from efro.dataclasses import dataclass_assign, dataclass_validate
if TYPE_CHECKING:
from typing import Optional, List
def test_assign() -> None:
"""Testing various assignments."""
# pylint: disable=too-many-statements
@dataclass
class _TestClass:
ival: int = 0
sval: str = ''
bval: bool = True
fval: float = 1.0
oival: Optional[int] = None
osval: Optional[str] = None
obval: Optional[bool] = None
ofval: Optional[float] = None
lsval: List[str] = field(default_factory=list)
lival: List[int] = field(default_factory=list)
lbval: List[bool] = field(default_factory=list)
lfval: List[float] = field(default_factory=list)
tclass = _TestClass()
class _TestClass2:
pass
tclass2 = _TestClass2()
# Arg types:
with pytest.raises(TypeError):
dataclass_assign(tclass2, {})
with pytest.raises(TypeError):
dataclass_assign(tclass, []) # type: ignore
# Invalid attrs.
with pytest.raises(AttributeError):
dataclass_assign(tclass, {'nonexistent': 'foo'})
# Correct types.
dataclass_assign(
tclass, {
'ival': 1,
'sval': 'foo',
'bval': True,
'fval': 2.0,
'lsval': ['foo'],
'lival': [10],
'lbval': [False],
'lfval': [1.0]
})
dataclass_assign(
tclass, {
'oival': None,
'osval': None,
'obval': None,
'ofval': None,
'lsval': [],
'lival': [],
'lbval': [],
'lfval': []
})
dataclass_assign(
tclass, {
'oival': 1,
'osval': 'foo',
'obval': True,
'ofval': 2.0,
'lsval': ['foo', 'bar', 'eep'],
'lival': [10, 11, 12],
'lbval': [False, True],
'lfval': [1.0, 2.0, 3.0]
})
# Type mismatches.
with pytest.raises(TypeError):
dataclass_assign(tclass, {'ival': 'foo'})
with pytest.raises(TypeError):
dataclass_assign(tclass, {'sval': 1})
with pytest.raises(TypeError):
dataclass_assign(tclass, {'bval': 2})
with pytest.raises(TypeError):
dataclass_assign(tclass, {'oival': 'foo'})
with pytest.raises(TypeError):
dataclass_assign(tclass, {'osval': 1})
with pytest.raises(TypeError):
dataclass_assign(tclass, {'obval': 2})
with pytest.raises(TypeError):
dataclass_assign(tclass, {'ofval': 'blah'})
with pytest.raises(TypeError):
dataclass_assign(tclass, {'lsval': 'blah'})
with pytest.raises(TypeError):
dataclass_assign(tclass, {'lsval': [1]})
with pytest.raises(TypeError):
dataclass_assign(tclass, {'lbval': [None]})
with pytest.raises(TypeError):
dataclass_assign(tclass, {'lival': ['foo']})
with pytest.raises(TypeError):
dataclass_assign(tclass, {'lfval': [True]})
# More subtle ones (we currently require EXACT type matches)
with pytest.raises(TypeError):
dataclass_assign(tclass, {'ival': True})
with pytest.raises(TypeError):
dataclass_assign(tclass, {'fval': 2})
with pytest.raises(TypeError):
dataclass_assign(tclass, {'bval': 1})
with pytest.raises(TypeError):
dataclass_assign(tclass, {'ofval': 1})
with pytest.raises(TypeError):
dataclass_assign(tclass, {'lfval': [1]})
# noinspection PyTypeHints
def test_validate() -> None:
"""Testing validation."""
@dataclass
class _TestClass:
ival: int = 0
sval: str = ''
bval: bool = True
fval: float = 1.0
oival: Optional[int] = None
osval: Optional[str] = None
obval: Optional[bool] = None
ofval: Optional[float] = None
# Should pass by default.
tclass = _TestClass()
dataclass_validate(tclass)
# No longer valid.
tclass.fval = 1
with pytest.raises(TypeError):
dataclass_validate(tclass)
# Should pass by default.
tclass = _TestClass()
dataclass_validate(tclass)
# No longer valid.
tclass.ival = None # type: ignore
with pytest.raises(TypeError):
dataclass_validate(tclass)
| 25.795455 | 65 | 0.578855 |
02c067c8d2f2b56794f9af7cc2db0dc186afe018 | 916 | py | Python | dciclient/v1/api/identity.py | redhat-cip/python-dciclient | 0169e3872a64e614c2655626b04361b165f8513b | [
"Apache-2.0"
] | 6 | 2015-12-18T14:03:02.000Z | 2017-08-03T14:09:05.000Z | dciclient/v1/api/identity.py | redhat-cip/python-dciclient | 0169e3872a64e614c2655626b04361b165f8513b | [
"Apache-2.0"
] | 13 | 2015-12-23T22:27:23.000Z | 2018-05-14T20:51:42.000Z | dciclient/v1/api/identity.py | redhat-cip/python-dciclient | 0169e3872a64e614c2655626b04361b165f8513b | [
"Apache-2.0"
] | 2 | 2016-01-26T23:30:24.000Z | 2020-06-18T16:33:19.000Z | # -*- encoding: utf-8 -*-
#
# Copyright 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
RESOURCE = "identity"
def get(context):
uri = "%s/%s" % (context.dci_cs_api, RESOURCE)
return context.session.get(uri)
def my_team_id(context):
"""Asks the control-server for the team_id of the currently
authenticated resource.
"""
return get(context).json()["identity"]["team_id"]
| 30.533333 | 75 | 0.718341 |
a57ba20608552dbfba71da128a022ca442bd0a3b | 966 | py | Python | mypyc/crash.py | ooprathamm/mypy | 1ac9c77bb0b5b95a9b3ee8936ac74a52e6e641ac | [
"PSF-2.0"
] | null | null | null | mypyc/crash.py | ooprathamm/mypy | 1ac9c77bb0b5b95a9b3ee8936ac74a52e6e641ac | [
"PSF-2.0"
] | null | null | null | mypyc/crash.py | ooprathamm/mypy | 1ac9c77bb0b5b95a9b3ee8936ac74a52e6e641ac | [
"PSF-2.0"
] | null | null | null | from typing import Iterator
from typing_extensions import NoReturn
import sys
import traceback
from contextlib import contextmanager
@contextmanager
def catch_errors(module_path: str, line: int) -> Iterator[None]:
try:
yield
except Exception:
crash_report(module_path, line)
def crash_report(module_path: str, line: int) -> 'NoReturn':
# Adapted from report_internal_error in mypy
err = sys.exc_info()[1]
tb = traceback.extract_stack()[:-4]
# Excise all the traceback from the test runner
for i, x in enumerate(tb):
if x.name == 'pytest_runtest_call':
tb = tb[i + 1:]
break
tb2 = traceback.extract_tb(sys.exc_info()[2])[1:]
print('Traceback (most recent call last):')
for s in traceback.format_list(tb + tb2):
print(s.rstrip('\n'))
print('{}:{}: {}: {}'.format(module_path, line, type(err).__name__, err))
raise SystemExit(2)
| 30.1875 | 78 | 0.636646 |
a89efe5a93ff0944d56b9089d106b6844092491f | 393 | py | Python | momments/asgi.py | Ahmed-moringa/Moments | 90fe23e58274039d1dc8fa848129018d2369d06d | [
"MIT"
] | null | null | null | momments/asgi.py | Ahmed-moringa/Moments | 90fe23e58274039d1dc8fa848129018d2369d06d | [
"MIT"
] | null | null | null | momments/asgi.py | Ahmed-moringa/Moments | 90fe23e58274039d1dc8fa848129018d2369d06d | [
"MIT"
] | null | null | null | """
ASGI config for momments project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'momments.settings')
application = get_asgi_application()
| 23.117647 | 78 | 0.78626 |
2b7e7e5d9eaae166c12a74329098b4dcbdc04137 | 35,710 | py | Python | run_siamese.py | houpanpan/BERT-flow | b1ed085f0bae111fd5625b51320ace60cbb1908c | [
"Apache-2.0"
] | 482 | 2020-11-10T15:04:35.000Z | 2022-03-24T13:23:11.000Z | run_siamese.py | houpanpan/BERT-flow | b1ed085f0bae111fd5625b51320ace60cbb1908c | [
"Apache-2.0"
] | 16 | 2020-11-13T03:33:33.000Z | 2021-08-28T09:27:51.000Z | run_siamese.py | houpanpan/BERT-flow | b1ed085f0bae111fd5625b51320ace60cbb1908c | [
"Apache-2.0"
] | 57 | 2020-11-23T03:01:32.000Z | 2022-03-03T01:51:58.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner for regression tasks
A large portion of the code is adapted from
https://github.com/zihangdai/xlnet/blob/master/run_classifier.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import os
os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true"
import collections
import csv
import modeling
import optimization
import tokenization
import tensorflow as tf
import random
import numpy as np
from flow.glow_1x1 import AttrDict, Glow
from flow.glow_init_hook import GlowInitHook
import optimization_bert_flow
import json
from siamese_utils import StsbProcessor, SickRProcessor, MnliProcessor, QqpProcessor, \
SnliTrainProcessor, SnliDevTestProcessor, \
Sts_12_16_Processor, MrpcRegressionProcessor, QnliRegressionProcessor, \
file_based_convert_examples_to_features, file_based_input_fn_builder, \
get_input_mask_segment
flags = tf.flags
FLAGS = flags.FLAGS
# model
flags.DEFINE_string("bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_integer("max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_string("init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_bool("do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
# task and data
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string("data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_float("label_min", 0., None)
flags.DEFINE_float("label_max", 5., None)
# exp
flags.DEFINE_string("output_parent_dir", None, None)
flags.DEFINE_string("exp_name", None, None)
flags.DEFINE_string("exp_name_prefix", None, None)
flags.DEFINE_integer("log_every_step", 10, None)
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_bool("use_xla", False, None)
flags.DEFINE_integer("seed", 1234, None)
flags.DEFINE_string("cached_dir", None,
"Path to cached training and dev tfrecord file. "
"The file will be generated if not exist.")
# training
flags.DEFINE_bool("do_train", False, None)
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float("warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_bool("early_stopping", False, None)
flags.DEFINE_integer("start_delay_secs", 120, "for tf.estimator.EvalSpec")
flags.DEFINE_integer("throttle_secs", 600, "for tf.estimator.EvalSpec")
# eval
flags.DEFINE_bool("do_eval", False, None)
flags.DEFINE_bool("do_predict", False, None)
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_bool("predict_pool", False, None)
flags.DEFINE_bool("do_predict_on_dev", False, None)
flags.DEFINE_bool("do_predict_on_full", False, None)
flags.DEFINE_string("eval_checkpoint_name", None, "filename of a finetuned checkpoint")
flags.DEFINE_bool("auc", False, None)
# sentence embedding related parameters
flags.DEFINE_string("sentence_embedding_type", "avg", "avg, cls, ...")
# flow parameters
flags.DEFINE_integer("flow", 0, "use flow or not")
flags.DEFINE_integer("flow_loss", 0, "use flow loss or not")
flags.DEFINE_float("flow_learning_rate", 1e-3, "The initial learning rate for Adam.")
flags.DEFINE_string("flow_model_config", "config_l3_d3_w32", None)
# unsupervised or semi-supervised related parameters
flags.DEFINE_integer("num_examples", -1, "# of labeled training examples")
flags.DEFINE_integer("use_full_for_training", 0, None)
flags.DEFINE_integer("dupe_factor", 1, "Number of times to duplicate the input data (with different masks).")
# nli related parameters
# flags.DEFINE_integer("use_snli_full", 0, "augment MNLI training data with SNLI")
flags.DEFINE_float("l2_penalty", -1, "penalize l2 norm of sentence embeddings")
# dimension reduction related parameters
flags.DEFINE_integer("low_dim", -1, "avg pooling over the embedding")
# senteval
flags.DEFINE_bool("do_senteval", False, None)
flags.DEFINE_string("senteval_tasks", "", None)
def get_embedding(bert_config, is_training,
input_ids, input_mask, segment_ids, scope=None):
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
scope=scope)
if FLAGS.sentence_embedding_type == "avg":
sequence = model.get_sequence_output() # [batch_size, seq_length, hidden_size]
input_mask_ = tf.cast(tf.expand_dims(input_mask, axis=-1), dtype=tf.float32)
pooled = tf.reduce_sum(sequence * input_mask_, axis=1) / tf.reduce_sum(input_mask_, axis=1)
elif FLAGS.sentence_embedding_type == "cls":
pooled = model.get_pooled_output()
elif FLAGS.sentence_embedding_type.startswith("avg-last-last-"):
pooled = 0
n_last = int(FLAGS.sentence_embedding_type[-1])
input_mask_ = tf.cast(tf.expand_dims(input_mask, axis=-1), dtype=tf.float32)
sequence = model.all_encoder_layers[-n_last] # [batch_size, seq_length, hidden_size]
pooled += tf.reduce_sum(sequence * input_mask_, axis=1) / tf.reduce_sum(input_mask_, axis=1)
elif FLAGS.sentence_embedding_type.startswith("avg-last-"):
pooled = 0
n_last = int(FLAGS.sentence_embedding_type[-1])
input_mask_ = tf.cast(tf.expand_dims(input_mask, axis=-1), dtype=tf.float32)
for i in range(n_last):
sequence = model.all_encoder_layers[-i] # [batch_size, seq_length, hidden_size]
pooled += tf.reduce_sum(sequence * input_mask_, axis=1) / tf.reduce_sum(input_mask_, axis=1)
pooled /= float(n_last)
elif FLAGS.sentence_embedding_type.startswith("avg-last-concat-"):
pooled = []
n_last = int(FLAGS.sentence_embedding_type[-1])
input_mask_ = tf.cast(tf.expand_dims(input_mask, axis=-1), dtype=tf.float32)
for i in range(n_last):
sequence = model.all_encoder_layers[-i] # [batch_size, seq_length, hidden_size]
pooled += [tf.reduce_sum(sequence * input_mask_, axis=1) / tf.reduce_sum(input_mask_, axis=1)]
pooled = tf.concat(pooled, axis=-1)
else:
raise NotImplementedError
# flow
embedding = None
flow_loss_batch, flow_loss_example = None, None
if FLAGS.flow:
# load model and train config
with open(os.path.join("./flow/config", FLAGS.flow_model_config + ".json"), 'r') as jp:
flow_model_config = AttrDict(json.load(jp))
flow_model_config.is_training = is_training
flow_model = Glow(flow_model_config)
flow_loss_example = flow_model.body(pooled, is_training) # no l2 normalization here any more
flow_loss_batch = tf.math.reduce_mean(flow_loss_example)
embedding = tf.identity(tf.squeeze(flow_model.z, [1, 2])) # no l2 normalization here any more
else:
embedding = pooled
if FLAGS.low_dim > 0:
bsz, org_dim = modeling.get_shape_list(embedding)
embedding = tf.reduce_mean(
tf.reshape(embedding, [bsz, FLAGS.low_dim, org_dim // FLAGS.low_dim]), axis=-1)
return embedding, flow_loss_batch, flow_loss_example
def create_model(bert_config, is_regression,
is_training,
input_ids_a, input_mask_a, segment_ids_a,
input_ids_b, input_mask_b, segment_ids_b,
labels, num_labels):
"""Creates a classification model."""
with tf.variable_scope("bert") as scope:
embedding_a, flow_loss_batch_a, flow_loss_example_a = \
get_embedding(bert_config, is_training,
input_ids_a, input_mask_a, segment_ids_a, scope)
with tf.variable_scope("bert", reuse=tf.AUTO_REUSE) as scope:
embedding_b, flow_loss_batch_b, flow_loss_example_b = \
get_embedding(bert_config, is_training,
input_ids_b, input_mask_b, segment_ids_b, scope)
with tf.variable_scope("loss"):
cos_similarity = tf.reduce_sum(tf.multiply(
tf.nn.l2_normalize(embedding_a, axis=-1),
tf.nn.l2_normalize(embedding_b, axis=-1)), axis=-1)
if is_regression:
# changing cos_similarity into (cos_similarity + 1)/2.0
# leads to large performance decrease in practice
per_example_loss = tf.square(cos_similarity - labels)
loss = tf.reduce_mean(per_example_loss)
logits, predictions = None, None
else:
output_layer = tf.concat([
embedding_a, embedding_b, tf.math.abs(embedding_a - embedding_b)
], axis=-1)
output_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, output_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
probabilities = tf.nn.softmax(logits, axis=-1)
predictions = tf.argmax(probabilities, axis=-1, output_type=tf.int32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
if FLAGS.num_examples == 0:
per_example_loss = tf.zeros_like(per_example_loss)
loss = tf.zeros_like(loss)
elif FLAGS.num_examples > 0:
per_example_loss = per_example_loss * tf.cast(labels > -1, dtype=tf.float32)
loss = tf.reduce_mean(per_example_loss)
if FLAGS.l2_penalty > 0:
l2_penalty_loss = tf.norm(embedding_a, axis=-1, keepdims=False)
l2_penalty_loss += tf.norm(embedding_b, axis=-1, keepdims=False)
l2_penalty_loss *= FLAGS.l2_penalty
per_example_loss += l2_penalty_loss
loss += tf.reduce_mean(l2_penalty_loss)
model_output = {
"loss": loss,
"per_example_loss": per_example_loss,
"cos_similarity": cos_similarity,
"embedding_a": embedding_a,
"embedding_b": embedding_b,
"logits": logits,
"predictions": predictions,
}
if FLAGS.flow:
model_output["flow_example_loss"] = flow_loss_example_a + flow_loss_example_b
model_output["flow_loss"] = flow_loss_batch_a + flow_loss_batch_b
return model_output
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, is_regression):
"""Returns `model_fn` closure for Estimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for Estimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids_a = features["input_ids_a"]
input_mask_a = features["input_mask_a"]
segment_ids_a = features["segment_ids_a"]
input_ids_b = features["input_ids_b"]
input_mask_b = features["input_mask_b"]
segment_ids_b = features["segment_ids_b"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
#### Training or Evaluation
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
#### Get loss from inputs
model_output = create_model(
bert_config, is_regression,
is_training,
input_ids_a, input_mask_a, segment_ids_a,
input_ids_b, input_mask_b, segment_ids_b,
label_ids,
num_labels)
tvars = tf.trainable_variables()
initialized_variable_names = {}
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
# if "flow" in var.name:
# input()
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
if FLAGS.flow_loss:
train_op = optimization_bert_flow.create_optimizer(
model_output["loss"], model_output["flow_loss"],
learning_rate, FLAGS.flow_learning_rate,
num_train_steps, num_warmup_steps, use_tpu=False)
tf.summary.scalar("loss", model_output["loss"])
tf.summary.scalar("flow_loss", model_output["flow_loss"])
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=model_output["loss"] + model_output["flow_loss"],
train_op=train_op)
else:
train_op = optimization.create_optimizer(
model_output["loss"], learning_rate,
num_train_steps, num_warmup_steps, use_tpu=False)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=model_output["loss"],
train_op=train_op)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(model_output, label_ids, is_real_example):
predictions = tf.argmax(model_output["logits"], axis=-1, output_type=tf.int32)
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=model_output["predictions"],
weights=is_real_example)
loss = tf.metrics.mean(
values=model_output["per_example_loss"], weights=is_real_example)
metric_output = {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
if "flow_loss" in model_output:
metric_output["eval_loss_flow"] = \
tf.metrics.mean(values=model_output["flow_example_loss"], weights=is_real_example)
metric_output["eval_loss_total"] = \
tf.metrics.mean(
values=model_output["per_example_loss"] + model_output["flow_example_loss"],
weights=is_real_example)
return metric_output
def regression_metric_fn(model_output, label_ids, is_real_example):
metric_output = {
"eval_loss": tf.metrics.mean(
values=model_output["per_example_loss"], weights=is_real_example),
"eval_pearsonr": tf.contrib.metrics.streaming_pearson_correlation(
model_output["cos_similarity"], label_ids, weights=is_real_example)
}
# metric_output["auc"] = tf.compat.v1.metrics.auc(
# label_ids, tf.math.maximum(model_output["cos_similarity"], 0), weights=is_real_example, curve='ROC')
if "flow_loss" in model_output:
metric_output["eval_loss_flow"] = \
tf.metrics.mean(values=model_output["flow_example_loss"], weights=is_real_example)
metric_output["eval_loss_total"] = \
tf.metrics.mean(
values=model_output["per_example_loss"] + model_output["flow_example_loss"],
weights=is_real_example)
return metric_output
if is_regression:
metric_fn = regression_metric_fn
eval_metrics = metric_fn(model_output, label_ids, is_real_example)
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=model_output["loss"],
eval_metric_ops=eval_metrics)
else:
output_spec = tf.estimator.EstimatorSpec(
mode=mode,
predictions= {"embedding_a": model_output["embedding_a"],
"embedding_b": model_output["embedding_b"]} if FLAGS.predict_pool else \
{"cos_similarity": model_output["cos_similarity"]})
return output_spec
return model_fn
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
# random seed
random.seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
tf.compat.v1.set_random_seed(FLAGS.seed)
print("FLAGS.seed", FLAGS.seed)
# input()
# prevent double printing of the tf logs
logger = tf.get_logger()
logger.propagate = False
# get tokenizer
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
# get bert config
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
# GPU config
run_config = tf.compat.v1.ConfigProto()
if FLAGS.use_xla:
run_config.graph_options.optimizer_options.global_jit_level = \
tf.OptimizerOptions.ON_1
run_config.gpu_options.allow_growth = True
if FLAGS.do_senteval:
# Set up logger
import logging
tf.logging.set_verbosity(0)
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
# load senteval
import sys
PATH_TO_SENTEVAL, PATH_TO_DATA = '../SentEval', '../SentEval/data'
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
# model
tf.logging.info("***** Running SentEval *****")
with tf.Graph().as_default():
with tf.variable_scope("bert") as scope:
input_ids = tf.placeholder(shape=[None, None], dtype=tf.int32, name="input_ids")
input_mask = tf.placeholder(shape=[None, None], dtype=tf.int32, name="input_mask")
segment_ids = tf.placeholder(shape=[None, None], dtype=tf.int32, name="segment_ids")
embedding, flow_loss_batch, flow_loss_example = \
get_embedding(bert_config, False,
input_ids, input_mask, segment_ids, scope=scope)
embedding = tf.nn.l2_normalize(embedding, axis=-1)
tvars = tf.trainable_variables()
initialized_variable_names = {}
if FLAGS.init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, FLAGS.init_checkpoint)
tf.train.init_from_checkpoint(FLAGS.init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
with tf.train.MonitoredSession(
session_creator=tf.compat.v1.train.ChiefSessionCreator(config=run_config)) as session:
# SentEval prepare and batcher
def prepare(params, samples):
return
def batcher(params, batch):
batch_input_ids, batch_input_mask, batch_segment_ids = [], [], []
for sent in batch:
if type(sent[0]) == bytes:
sent = [_.decode() for _ in sent]
text = ' '.join(sent) if sent != [] else '.'
# print(text)
_input_ids, _input_mask, _segment_ids, _tokens = \
get_input_mask_segment(text, FLAGS.max_seq_length, tokenizer)
batch_input_ids.append(_input_ids)
batch_input_mask.append(_input_mask)
batch_segment_ids.append(_segment_ids)
batch_input_ids = np.asarray(batch_input_ids)
batch_input_mask = np.asarray(batch_input_mask)
batch_segment_ids = np.asarray(batch_segment_ids)
print(".", end="")
return session.run(embedding,
{input_ids: batch_input_ids,
input_mask: batch_input_mask,
segment_ids: batch_segment_ids})
# Set params for SentEval
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': True, 'kfold': 5}
params_senteval['classifier'] = {'nhid': 0, 'optim': 'rmsprop', 'batch_size': 128,
'tenacity': 3, 'epoch_size': 2}
# main
se = senteval.engine.SE(params_senteval, batcher, prepare)
# transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16',
# 'MR', 'CR', 'MPQA', 'SUBJ', 'SST2', 'SST5', 'TREC', 'MRPC',
# 'SICKEntailment', 'SICKRelatedness', 'STSBenchmark',
# 'Length', 'WordContent', 'Depth', 'TopConstituents',
# 'BigramShift', 'Tense', 'SubjNumber', 'ObjNumber',
# 'OddManOut', 'CoordinationInversion']
#transfer_tasks = ['STS12', 'STS13', 'STS14', 'STS15', 'STS16', 'STSBenchmark', 'SICKRelatedness']
#transfer_tasks = ['MR', 'CR', 'SUBJ', 'MPQA', 'SST2', 'TREC', 'MRPC']
transfer_tasks = FLAGS.senteval_tasks.split(",")
results = se.eval(transfer_tasks)
from collections import OrderedDict
results = OrderedDict(results)
for key in sorted(results):
value = results[key]
if key.startswith("STS"):
print("'" + key + "':", value["all"])
else:
print(key, value)
return
processors = {
'sts-b': StsbProcessor,
'sick-r': SickRProcessor,
'mnli': MnliProcessor,
'allnli': MnliProcessor,
'qqp': QqpProcessor,
'sts-12-16': Sts_12_16_Processor,
'sts-12': Sts_12_16_Processor,
'sts-13': Sts_12_16_Processor,
'sts-14': Sts_12_16_Processor,
'sts-15': Sts_12_16_Processor,
'sts-16': Sts_12_16_Processor,
'mrpc-regression': MrpcRegressionProcessor,
'qnli-regression': QnliRegressionProcessor,
}
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
if task_name == 'sick-r' or task_name.startswith("sts"):
is_regression = True
label_min, label_max = 0., 5.
elif task_name in ['qqp', 'mrpc-regression', 'qnli-regression']:
is_regression = True
label_min, label_max = 0., 1.
else:
is_regression = False
label_min, label_max = 0., 1.
dupe_factor = FLAGS.dupe_factor
processor = processors[task_name]()
label_list = processor.get_labels()
# this block is moved here for calculating the epoch_step for save_checkpoints_steps
train_examples = None
num_train_steps = None
num_warmup_steps = None
if task_name == "allnli":
FLAGS.data_dir = os.path.join(os.path.dirname(FLAGS.data_dir), "MNLI")
if FLAGS.do_train and FLAGS.num_train_epochs > 1e-6:
train_examples = processor.get_train_examples(FLAGS.data_dir)
if task_name == "allnli":
snli_data_dir = os.path.join(os.path.dirname(FLAGS.data_dir), "SNLI")
train_examples.extend(SnliTrainProcessor().get_train_examples(snli_data_dir))
train_examples.extend(SnliDevTestProcessor().get_dev_examples(snli_data_dir))
train_examples.extend(SnliDevTestProcessor().get_test_examples(snli_data_dir))
if FLAGS.use_full_for_training:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
predict_examples = processor.get_test_examples(FLAGS.data_dir)
train_examples.extend(eval_examples + predict_examples)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
epoch_step = int(len(train_examples) / FLAGS.train_batch_size)
if FLAGS.num_examples > 0:
random.shuffle(train_examples)
for i in range(FLAGS.num_examples, len(train_examples)):
train_examples[i].label = -10
random.shuffle(train_examples)
# ==== #
if FLAGS.early_stopping:
save_checkpoints_steps = epoch_step
else:
save_checkpoints_steps = FLAGS.save_checkpoints_steps
keep_checkpoint_max = 3
save_summary_steps = log_every_step = FLAGS.log_every_step
tf.logging.info("save_checkpoints_steps: %d" % save_checkpoints_steps)
# make exp dir
if FLAGS.exp_name:
output_dir = os.path.join(FLAGS.output_parent_dir, FLAGS.exp_name)
elif FLAGS.exp_name_prefix:
output_dir = os.path.join(FLAGS.output_parent_dir, FLAGS.exp_name_prefix)
output_dir += "_t_%s" % (FLAGS.task_name)
output_dir += "_ep_%.2f" % (FLAGS.num_train_epochs)
output_dir += "_lr_%.2e" % (FLAGS.learning_rate)
if FLAGS.train_batch_size != 32:
output_dir += "_bsz_%d" % (FLAGS.train_batch_size)
if FLAGS.sentence_embedding_type != "avg":
output_dir += "_e_%s" % (FLAGS.sentence_embedding_type)
if FLAGS.flow > 0:
output_dir += "_f_%d%d" % (FLAGS.flow, FLAGS.flow_loss)
if FLAGS.flow_loss > 0:
output_dir += "_%.2e" % (FLAGS.flow_learning_rate)
if FLAGS.use_full_for_training > 0:
output_dir += "_allsplits"
if FLAGS.flow_model_config != "config_l3_d3_w32":
output_dir += "_%s" % (FLAGS.flow_model_config)
if FLAGS.num_examples > 0:
output_dir += "_n_%d" % (FLAGS.num_examples)
if FLAGS.low_dim > -1:
output_dir += "_ld_%d" % (FLAGS.low_dim)
if FLAGS.l2_penalty > 0:
output_dir += "_l2_%.2e" % (FLAGS.l2_penalty)
else:
raise NotImplementedError
if tf.gfile.Exists(output_dir) and FLAGS.do_train:
tf.io.gfile.rmtree(output_dir)
tf.gfile.MakeDirs(output_dir)
# set up estimator
run_config = tf.estimator.RunConfig(
model_dir=output_dir,
save_summary_steps=save_summary_steps,
save_checkpoints_steps=save_checkpoints_steps,
keep_checkpoint_max=keep_checkpoint_max,
log_step_count_steps=log_every_step,
session_config=run_config)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
is_regression=is_regression)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
config=run_config,
params={
'train_batch_size': FLAGS.train_batch_size,
'eval_batch_size': FLAGS.eval_batch_size,
'predict_batch_size': FLAGS.predict_batch_size})
def get_train_input_fn():
cached_dir = FLAGS.cached_dir
if not cached_dir:
cached_dir = output_dir
data_name = task_name
if FLAGS.num_examples > 0:
train_file = os.path.join(cached_dir,
data_name + "_n_%d" % (FLAGS.num_examples) \
+ "_seed_%d" % (FLAGS.seed) + "_train.tf_record")
elif FLAGS.use_full_for_training > 0:
train_file = os.path.join(cached_dir, data_name + "_allsplits.tf_record")
else:
train_file = os.path.join(cached_dir, data_name + "_train.tf_record")
if not tf.gfile.Exists(train_file):
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file,
dupe_factor, label_min, label_max,
is_training=True)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True,
is_regression=is_regression)
return train_input_fn
def get_eval_input_fn():
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
cached_dir = FLAGS.cached_dir
if not cached_dir:
cached_dir = output_dir
eval_file = os.path.join(cached_dir, task_name + "_eval.tf_record")
if not tf.gfile.Exists(eval_file):
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file,
dupe_factor, label_min, label_max)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_drop_remainder = False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder,
is_regression=is_regression)
return eval_input_fn
def get_predict_input_fn():
predict_examples = None
if FLAGS.do_predict_on_dev:
predict_examples = processor.get_dev_examples(FLAGS.data_dir)
elif FLAGS.do_predict_on_full:
train_examples = processor.get_train_examples(FLAGS.data_dir)
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
predict_examples = processor.get_test_examples(FLAGS.data_dir)
predict_examples.extend(eval_examples + train_examples)
else:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
cached_dir = FLAGS.cached_dir
if not cached_dir:
cached_dir = output_dir
predict_file = os.path.join(cached_dir, task_name + "_predict.tf_record")
file_based_convert_examples_to_features(
predict_examples, label_list, FLAGS.max_seq_length, tokenizer, predict_file,
dupe_factor, label_min, label_max)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder,
is_regression=is_regression)
return predict_input_fn, num_actual_predict_examples
eval_steps = None
if FLAGS.do_train and FLAGS.num_train_epochs > 1e-6:
train_input_fn = get_train_input_fn()
if FLAGS.early_stopping:
eval_input_fn = get_eval_input_fn()
early_stopping_hook = tf.estimator.experimental.stop_if_no_decrease_hook(
estimator, metric_name="eval_pearsonr",
max_steps_without_decrease=epoch_step//2, run_every_steps=epoch_step, run_every_secs=None)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=num_train_steps,
hooks=[early_stopping_hook])
start_delay_secs = FLAGS.start_delay_secs
throttle_secs = FLAGS.throttle_secs
tf.logging.info("start_delay_secs: %d; throttle_secs: %d" % (start_delay_secs, throttle_secs))
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn, steps=eval_steps,
start_delay_secs=start_delay_secs, throttle_secs=throttle_secs)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
else:
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_input_fn = get_eval_input_fn()
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_input_fn, num_actual_predict_examples = get_predict_input_fn()
checkpoint_path = None
if FLAGS.eval_checkpoint_name:
checkpoint_path = os.path.join(output_dir, FLAGS.eval_checkpoint_name)
result = estimator.predict(input_fn=predict_input_fn,
checkpoint_path=checkpoint_path)
def round_float_list(values):
values = [round(float(x), 6) for x in values.flat]
return values
fname = ""
if FLAGS.do_predict_on_full:
fname += "full"
elif FLAGS.do_predict_on_dev:
fname += "dev"
else:
fname += "test"
if FLAGS.predict_pool:
fname += "_pooled.tsv"
else:
fname += "_results.tsv"
if FLAGS.eval_checkpoint_name:
fname = FLAGS.eval_checkpoint_name + "." + fname
output_predict_file = os.path.join(output_dir, fname)
with tf.gfile.GFile(output_predict_file, "w") as writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, prediction) in enumerate(result):
if is_regression:
if FLAGS.predict_pool:
embedding_a = prediction["embedding_a"]
embedding_b = prediction["embedding_b"]
output_json = collections.OrderedDict()
output_json["embedding_a"] = round_float_list(embedding_a)
output_json["embedding_b"] = round_float_list(embedding_b)
output_line = json.dumps(output_json) + "\n"
else:
cos_similarity = prediction["cos_similarity"]
if i >= num_actual_predict_examples:
break
output_line = str(cos_similarity) + "\n"
else:
raise NotImplementedError
writer.write(output_line)
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
tf.logging.info("*** output_dir ***")
tf.logging.info(output_dir)
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
tf.app.run()
| 38.773073 | 112 | 0.683338 |
267e75fc185a254126deac2705ef8741f636e8ab | 5,747 | py | Python | arkouda/sorting.py | ak47mrj/arkouda | a9167e674aff57e02e1bed49fbb0c3cf1b2f2707 | [
"MIT"
] | 75 | 2019-10-21T17:20:41.000Z | 2021-05-10T22:01:19.000Z | arkouda/sorting.py | ak47mrj/arkouda | a9167e674aff57e02e1bed49fbb0c3cf1b2f2707 | [
"MIT"
] | 424 | 2019-10-21T16:48:45.000Z | 2021-05-12T11:49:18.000Z | arkouda/sorting.py | ak47mrj/arkouda | a9167e674aff57e02e1bed49fbb0c3cf1b2f2707 | [
"MIT"
] | 36 | 2019-10-23T17:45:44.000Z | 2021-04-17T01:15:03.000Z | from __future__ import annotations
from typing import cast, Sequence, Union
from typeguard import typechecked, check_type
from arkouda.client import generic_msg
from arkouda.pdarrayclass import pdarray, create_pdarray
from arkouda.pdarraycreation import zeros
from arkouda.strings import Strings
from arkouda.dtypes import int64, float64
numeric_dtypes = {float64,int64}
__all__ = ["argsort", "coargsort", "sort"]
def argsort(pda : Union[pdarray,Strings,'Categorical']) -> pdarray: # type: ignore
"""
Return the permutation that sorts the array.
Parameters
----------
pda : pdarray or Strings or Categorical
The array to sort (int64 or float64)
Returns
-------
pdarray, int64
The indices such that ``pda[indices]`` is sorted
Raises
------
TypeError
Raised if the parameter is other than a pdarray or Strings
See Also
--------
coargsort
Notes
-----
Uses a least-significant-digit radix sort, which is stable and
resilient to non-uniformity in data but communication intensive.
Examples
--------
>>> a = ak.randint(0, 10, 10)
>>> perm = ak.argsort(a)
>>> a[perm]
array([0, 1, 1, 3, 4, 5, 7, 8, 8, 9])
"""
from arkouda.categorical import Categorical
check_type(argname='argsort', value=pda,
expected_type=Union[pdarray,Strings,Categorical])
if hasattr(pda, "argsort"):
return cast(Categorical,pda).argsort()
if pda.size == 0:
return zeros(0, dtype=int64)
if isinstance(pda, Strings):
name = '{}+{}'.format(pda.offsets.name, pda.bytes.name)
else:
name = pda.name
repMsg = generic_msg(cmd="argsort", args="{} {}".format(pda.objtype, name))
return create_pdarray(cast(str,repMsg))
def coargsort(arrays: Sequence[Union[Strings, pdarray, 'Categorical']]) -> pdarray: # type: ignore
"""
Return the permutation that groups the rows (left-to-right), if the
input arrays are treated as columns. The permutation sorts numeric
columns, but not strings/Categoricals -- strings/Categoricals are grouped, but not ordered.
Parameters
----------
arrays : Sequence[Union[Strings, pdarray, Categorical]]
The columns (int64, float64, Strings, or Categorical) to sort by row
Returns
-------
pdarray, int64
The indices that permute the rows to grouped order
Raises
------
ValueError
Raised if the pdarrays are not of the same size or if the parameter
is not an Iterable containing pdarrays, Strings, or Categoricals
See Also
--------
argsort
Notes
-----
Uses a least-significant-digit radix sort, which is stable and resilient
to non-uniformity in data but communication intensive. Starts with the
last array and moves forward. This sort operates directly on numeric types,
but for Strings, it operates on a hash. Thus, while grouping of equivalent
strings is guaranteed, lexicographic ordering of the groups is not. For Categoricals,
coargsort sorts based on Categorical.codes which guarantees grouping of equivalent categories
but not lexicographic ordering of those groups.
Examples
--------
>>> a = ak.array([0, 1, 0, 1])
>>> b = ak.array([1, 1, 0, 0])
>>> perm = ak.coargsort([a, b])
>>> perm
array([2, 0, 3, 1])
>>> a[perm]
array([0, 0, 1, 1])
>>> b[perm]
array([0, 1, 0, 1])
"""
from arkouda.categorical import Categorical
check_type(argname='coargsort', value=arrays, expected_type=Sequence[Union[pdarray, Strings, Categorical]])
size = -1
anames = []
atypes = []
for a in arrays:
if isinstance(a, (pdarray, Strings)):
anames.append('+'.join(a._list_component_names()))
atypes.append(a.objtype)
elif isinstance(a, Categorical):
anames.append(a.codes.name)
atypes.append(a.objtype)
else:
raise ValueError("Argument must be an iterable of pdarrays, Strings, or Categoricals")
if size == -1:
size = a.size
elif size != a.size:
raise ValueError("All pdarrays, Strings, or Categoricals must be of the same size")
if size == 0:
return zeros(0, dtype=int64)
repMsg = generic_msg(cmd="coargsort", args="{:n} {} {}".format(len(arrays),
' '.join(anames), ' '.join(atypes)))
return create_pdarray(cast(str, repMsg))
@typechecked
def sort(pda : pdarray) -> pdarray:
"""
Return a sorted copy of the array. Only sorts numeric arrays;
for Strings, use argsort.
Parameters
----------
pda : pdarray or Categorical
The array to sort (int64 or float64)
Returns
-------
pdarray, int64 or float64
The sorted copy of pda
Raises
------
TypeError
Raised if the parameter is not a pdarray
ValueError
Raised if sort attempted on a pdarray with an unsupported dtype
such as bool
See Also
--------
argsort
Notes
-----
Uses a least-significant-digit radix sort, which is stable and resilient
to non-uniformity in data but communication intensive.
Examples
--------
>>> a = ak.randint(0, 10, 10)
>>> sorted = ak.sort(a)
>>> a
array([0, 1, 1, 3, 4, 5, 7, 8, 8, 9])
"""
if pda.size == 0:
return zeros(0, dtype=int64)
if pda.dtype not in numeric_dtypes:
raise ValueError("ak.sort supports float64 or int64, not {}".format(pda.dtype))
repMsg = generic_msg(cmd="sort", args="{}".format(pda.name))
return create_pdarray(cast(str,repMsg))
| 31.576923 | 111 | 0.62102 |
10bf9fd981a4617a5a75b25950bf046ee77236c9 | 1,063 | py | Python | share/lib/python/neuron/gui.py | adamjhn/rxdtests | 86504f8cc363baef583166c8033694eb9b4b78e3 | [
"BSD-3-Clause"
] | null | null | null | share/lib/python/neuron/gui.py | adamjhn/rxdtests | 86504f8cc363baef583166c8033694eb9b4b78e3 | [
"BSD-3-Clause"
] | null | null | null | share/lib/python/neuron/gui.py | adamjhn/rxdtests | 86504f8cc363baef583166c8033694eb9b4b78e3 | [
"BSD-3-Clause"
] | null | null | null | """
Import this module if you would like to use the NEURON GUI.
It loads nrngui.hoc, and starts a thread to periodically process
the NEURON GUI event loop.
"""
from neuron import h
import threading
import time
def process_events() :
try:
h.doNotify()
except:
print ("Exception in gui thread")
class LoopTimer(threading.Thread) :
"""
a Timer that calls f every interval
"""
def __init__(self, interval, fun) :
"""
@param interval: time in seconds between call to fun()
@param fun: the function to call on timer update
"""
self.started = False
self.interval = interval
self.fun = fun
threading.Thread.__init__(self)
self.setDaemon(True)
def run(self) :
h.nrniv_bind_thread(threading.current_thread().ident);
self.started = True;
while True:
self.fun()
time.sleep(self.interval)
if h.nrnversion(9) == '2' or h.nrnversion(8).find('mingw') > 0:
timer = LoopTimer(0.1, process_events)
timer.start()
while not timer.started:
time.sleep(0.001)
h.load_file("nrngui.hoc")
| 21.693878 | 64 | 0.673565 |
16f849fd86eeb3312308e4c89e105a601d88416b | 22,767 | py | Python | src/coreclr/scripts/genLttngProvider.py | pyracanda/runtime | 72bee25ab532a4d0636118ec2ed3eabf3fd55245 | [
"MIT"
] | 9,402 | 2019-11-25T23:26:24.000Z | 2022-03-31T23:19:41.000Z | src/coreclr/scripts/genLttngProvider.py | pyracanda/runtime | 72bee25ab532a4d0636118ec2ed3eabf3fd55245 | [
"MIT"
] | 37,522 | 2019-11-25T23:30:32.000Z | 2022-03-31T23:58:30.000Z | src/coreclr/scripts/genLttngProvider.py | pyracanda/runtime | 72bee25ab532a4d0636118ec2ed3eabf3fd55245 | [
"MIT"
] | 3,629 | 2019-11-25T23:29:16.000Z | 2022-03-31T21:52:28.000Z | ##
## Licensed to the .NET Foundation under one or more agreements.
## The .NET Foundation licenses this file to you under the MIT license.
##
## Sample LTTng Instrumentation code that is generated:
##
## HEADER:
## #define GCFinalizersEnd_TRACEPOINT_ARGS \
##TP_ARGS(\
## const unsigned int ,Count\
##)
##TRACEPOINT_EVENT_CLASS(
## DotNETRuntime,
## GCFinalizersEnd,
## GCFinalizersEnd_TRACEPOINT_ARGS,
## TP_FIELDS(
## ctf_integer(unsigned int, Count, Count)
## )
##)
##
##CPP :
##
##extern "C" BOOL EventXplatEnabledGCFinalizersEnd(){ return TRUE;}
##extern "C" ULONG FireEtXplatGCFinalizersEnd(
## const unsigned int Count
##)
##{
## ULONG Error = ERROR_WRITE_FAULT;
## if (!EventXplatEnabledGCFinalizersEnd()){ return ERROR_SUCCESS;}
##
##
## tracepoint(
## DotNETRuntime,
## GCFinalizersEnd,
## Count
## );
## Error = ERROR_SUCCESS;
##
##return Error;
##}
##
###define GCFinalizersEndT_TRACEPOINT_INSTANCE(name) \
##TRACEPOINT_EVENT_INSTANCE(\
## DotNETRuntime,\
## GCFinalizersEnd,\
## name ,\
## GCFinalizersEnd_TRACEPOINT_ARGS \
##)
#
import os
from genEventing import *
from utilities import open_for_update
stdprolog="""
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
/******************************************************************
DO NOT MODIFY. AUTOGENERATED FILE.
This file is generated using the logic from <root>/src/scripts/genLttngProvider.py
******************************************************************/
"""
specialCaseSizes = { "BulkType" : { "Values" : "Values_ElementSize" }, "GCBulkRootCCW" : { "Values" : "Values_ElementSize" }, "GCBulkRCW" : { "Values" : "Values_ElementSize" }, "GCBulkRootStaticVar" : { "Values" : "Values_ElementSize" } }
lttngDataTypeMapping ={
#constructed types
"win:null" :" ",
"win:Int64" :"const __int64",
"win:ULong" :"const ULONG",
"win:count" :"*",
"win:Struct" :"const BYTE *",
#actual spec
"win:GUID" :"const int",
"win:AnsiString" :"const char*",
"win:UnicodeString" :"const char*",
"win:Double" :"const double",
"win:Int32" :"const signed int",
"win:Boolean" :"const BOOL",
"win:UInt64" :"const unsigned __int64",
"win:UInt32" :"const unsigned int",
"win:UInt16" :"const unsigned short",
"win:UInt8" :"const unsigned char",
"win:Pointer" :"const size_t",
"win:Binary" :"const BYTE"
}
ctfDataTypeMapping ={
#constructed types
"win:Int64" :"ctf_integer",
"win:ULong" :"ctf_integer",
"win:count" :"ctf_sequence",
"win:Struct" :"ctf_sequence",
#actual spec
"win:GUID" :"ctf_sequence",
"win:AnsiString" :"ctf_string",
"win:UnicodeString" :"ctf_string",
"win:Double" :"ctf_float",
"win:Int32" :"ctf_integer",
"win:Boolean" :"ctf_integer",
"win:UInt64" :"ctf_integer",
"win:UInt32" :"ctf_integer",
"win:UInt16" :"ctf_integer",
"win:UInt8" :"ctf_integer", #actually a character
"win:Pointer" :"ctf_integer",
"win:Binary" :"ctf_sequence"
}
MAX_LTTNG_ARGS = 9
def shouldPackTemplate(template):
return template.num_params > MAX_LTTNG_ARGS or len(template.structs) > 0 or len(template.arrays) > 0
def generateArgList(template):
header = "TP_ARGS( \\\n"
footer = ")\n"
if "MethodILToNative" in template.name:
pass
if shouldPackTemplate(template):
args = " const unsigned int, length, \\\n"
args += " const char *, __data__ \\\n"
else:
fnSig = template.signature
args = []
for params in fnSig.paramlist:
fnparam = fnSig.getParam(params)
wintypeName = fnparam.winType
typewName = lttngDataTypeMapping[wintypeName]
winCount = fnparam.count
countw = lttngDataTypeMapping[winCount]
arg = " " + typewName
if countw != " ":
arg += countw
arg += ", " + fnparam.name
args.append(arg)
args = ", \\\n".join(args) + " \\\n"
return header + args + footer
def generateFieldList(template):
header = " " + " TP_FIELDS(\n"
footer = "\n )\n)\n"
if shouldPackTemplate(template):
field_list = " ctf_integer(ULONG, length, length)\n"
field_list += " ctf_sequence(char, __data__, __data__, ULONG, length)"
else:
fnSig = template.signature
field_list = []
for params in fnSig.paramlist:
fnparam = fnSig.getParam(params)
wintypeName = fnparam.winType
winCount = fnparam.count
countw = lttngDataTypeMapping[winCount]
typewName = lttngDataTypeMapping[wintypeName].replace("const ","")
field_body = None
ctf_type = None
varname = fnparam.name
if fnparam.prop:
#this is an explicit struct treat as a sequence
ctf_type = "ctf_sequence"
sizeofseq = fnparam.prop
field_body = ", ".join((typewName, varname, varname, "size_t", sizeofseq))
else:
ctf_type = ctfDataTypeMapping[wintypeName]
if ctf_type == "ctf_string":
field_body = ", ".join((varname, varname))
elif ctf_type == "ctf_integer" or ctf_type == "ctf_float":
field_body = ", ".join((typewName, varname, varname))
elif ctf_type == "ctf_sequence":
raise Exception("ctf_sequence needs to have its memory expilicitly laid out")
else:
raise Exception("no such ctf intrinsic called: " + ctf_type)
field_list.append(" %s(%s)" % (ctf_type, field_body))
field_list = "\n".join(field_list)
return header + field_list + footer
def generateLttngHeader(providerName, allTemplates, eventNodes):
lTTngHdr = []
for templateName in allTemplates:
template = allTemplates[templateName]
fnSig = allTemplates[templateName].signature
lTTngHdr.append("\n#define " + templateName + "_TRACEPOINT_ARGS \\\n")
#TP_ARGS
tp_args = generateArgList(template)
lTTngHdr.append(tp_args)
#TP_EVENT_CLASS
lTTngHdr.append("TRACEPOINT_EVENT_CLASS(\n")
lTTngHdr.append(" " + providerName + ",\n")
lTTngHdr.append(" " + templateName + ",\n")
lTTngHdr.append(" " + templateName + "_TRACEPOINT_ARGS,\n")
#TP_FIELDS
tp_fields = generateFieldList(template)
lTTngHdr.append(tp_fields)
# Macro for defining event instance
lTTngHdr.append("\n#define " + templateName)
lTTngHdr.append("""T_TRACEPOINT_INSTANCE(name) \\
TRACEPOINT_EVENT_INSTANCE(\\
""")
lTTngHdr.append(" "+providerName + ",\\\n")
lTTngHdr.append(" " + templateName + ",\\\n")
lTTngHdr.append(" name ,\\\n")
lTTngHdr.append(" " + templateName + "_TRACEPOINT_ARGS \\\n)")
#add an empty template node to just specify the event name in the event stream
lTTngHdr.append("\n\nTRACEPOINT_EVENT_CLASS(\n")
lTTngHdr.append(" " + providerName + ",\n")
lTTngHdr.append(" emptyTemplate ,\n")
lTTngHdr.append(""" TP_ARGS(),
TP_FIELDS()
)
#define T_TRACEPOINT_INSTANCE(name) \\
TRACEPOINT_EVENT_INSTANCE(\\
""")
lTTngHdr.append(" " + providerName + ",\\\n")
lTTngHdr.append(" emptyTemplate,\\\n")
lTTngHdr.append(""" name ,\\
TP_ARGS()\\
)""")
#end of empty template
# create the event instance in headers
lTTngHdr.append("\n")
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol');
templateName = eventNode.getAttribute('template');
if not eventName :
raise Exception(eventNode + " event does not have a symbol")
if not templateName:
lTTngHdr.append("T_TRACEPOINT_INSTANCE(")
lTTngHdr.append(eventName +")\n")
continue
subevent = templateName.replace(templateName,'')
lTTngHdr.append(templateName)
lTTngHdr.append("T_TRACEPOINT_INSTANCE(")
lTTngHdr.append(eventName + subevent + ")\n")
lTTngHdr.append("\n#endif /* LTTNG_CORECLR_H")
lTTngHdr.append(providerName + " */\n")
lTTngHdr.append("#include <lttng/tracepoint-event.h>")
return ''.join(lTTngHdr)
def generateMethodBody(template, providerName, eventName):
#emit code to init variables convert unicode to ansi string
result = []
if template is None:
return "\n do_tracepoint(%s, %s);\n" % (providerName, eventName)
fnSig = template.signature
for paramName in fnSig.paramlist:
fnparam = fnSig.getParam(paramName)
paramname = fnparam.name
if fnparam.winType == "win:UnicodeString":
result.append(" INT " + paramname + "_path_size = -1;\n")
result.append(" PathCharString " + paramname + "_PS;\n")
result.append(" INT " + paramname + "_full_name_path_size")
result.append(" = (wcslen(" + paramname + ") + 1)*sizeof(WCHAR);\n")
result.append(" CHAR* " + paramname + "_full_name = ")
result.append(paramname + "_PS.OpenStringBuffer(" + paramname + "_full_name_path_size );\n")
result.append(" if (" + paramname + "_full_name == NULL )")
result.append(" { return ERROR_WRITE_FAULT; }\n")
result.append("\n")
#emit tracepoints
fnSig = template.signature
if not shouldPackTemplate(template):
linefnbody = [" do_tracepoint(%s,\n %s" % (providerName, eventName)]
for params in fnSig.paramlist:
fnparam = fnSig.getParam(params)
wintypeName = fnparam.winType
winCount = fnparam.count
paramname = fnparam.name
ctf_type = ctfDataTypeMapping.get(winCount)
line = " "
if not ctf_type:
ctf_type = ctfDataTypeMapping[wintypeName]
if ctf_type == "ctf_string" and wintypeName == "win:UnicodeString":
#emit code to convert unicode to ansi string
result.append(" " + paramname+ "_path_size = WideCharToMultiByte( CP_ACP, 0, ")
result.append(paramname + ", -1, ")
result.append(paramname + "_full_name, ")
result.append(paramname + "_full_name_path_size, NULL, NULL );\n")
result.append(" _ASSERTE(" +paramname+ "_path_size < " )
result.append(paramname + "_full_name_path_size );\n ")
result.append(paramname + "_PS.CloseBuffer(" + paramname + "_path_size );\n")
result.append(" if( " + paramname + "_path_size == 0 ){ return ERROR_INVALID_PARAMETER; }\n")
line += paramname + "_full_name"
linefnbody.append(line)
continue
elif ctf_type == "ctf_sequence" or wintypeName == "win:Pointer":
line += "(" + lttngDataTypeMapping[wintypeName]
if not lttngDataTypeMapping[winCount] == " ":
line += lttngDataTypeMapping[winCount]
line += ") "
linefnbody.append(line + paramname)
else:
linefnbody.append(line + paramname)
linefnbody = ",\n".join(linefnbody) + ");\n"
result.append(linefnbody)
return ''.join(result)
else:
pack_list = []
emittedWriteToBuffer = False
for paramName in fnSig.paramlist:
parameter = fnSig.getParam(paramName)
if paramName in template.structs:
size = "(int)%s_ElementSize * (int)%s" % (paramName, parameter.prop)
if template.name in specialCaseSizes and paramName in specialCaseSizes[template.name]:
size = "(int)(%s)" % specialCaseSizes[template.name][paramName]
pack_list.append(" success &= WriteToBuffer((const BYTE *)%s, %s, buffer, offset, size, fixedBuffer);" % (paramName, size))
emittedWriteToBuffer = True
elif paramName in template.arrays:
size = "sizeof(%s) * (int)%s" % (lttngDataTypeMapping[parameter.winType], parameter.prop)
if template.name in specialCaseSizes and paramName in specialCaseSizes[template.name]:
size = "(int)(%s)" % specialCaseSizes[template.name][paramName]
pack_list.append(" success &= WriteToBuffer((const BYTE *)%s, %s, buffer, offset, size, fixedBuffer);" % (paramName, size))
emittedWriteToBuffer = True
elif parameter.winType == "win:GUID":
pack_list.append(" success &= WriteToBuffer(*%s, buffer, offset, size, fixedBuffer);" % (parameter.name,))
emittedWriteToBuffer = True
else:
pack_list.append(" success &= WriteToBuffer(%s, buffer, offset, size, fixedBuffer);" % (parameter.name,))
emittedWriteToBuffer = True
header = """
size_t size = {0:d};
char stackBuffer[{0:d}];
char *buffer = stackBuffer;
size_t offset = 0;
""".format(template.estimated_size)
code = "\n".join(pack_list) + "\n\n"
tracepoint = ""
footer = ""
if emittedWriteToBuffer:
header += """
bool fixedBuffer = true;
bool success = true;
"""
tracepoint = """ if (!success)
{
if (!fixedBuffer)
delete[] buffer;
return ERROR_WRITE_FAULT;
}
do_tracepoint(%s, %s, offset, buffer);\n""" % (providerName, eventName)
footer = """
if (!fixedBuffer)
delete[] buffer;
"""
return header + code + tracepoint + footer
def generateLttngTpProvider(providerName, eventNodes, allTemplates, runtimeFlavor):
lTTngImpl = []
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
#generate EventXplatEnabled
lTTngImpl.append("extern \"C\" BOOL EventXplatEnabled%s(){ return tracepoint_enabled(%s, %s); }\n\n" % (eventName, providerName, eventName))
#generate FireEtw functions
fnptype = []
linefnptype = []
fnptype.append("extern \"C\" ULONG FireEtXplat")
fnptype.append(eventName)
fnptype.append("(\n")
if templateName:
template = allTemplates[templateName]
else:
template = None
if template:
fnSig = template.signature
for paramName in fnSig.paramlist:
fnparam = fnSig.getParam(paramName)
wintypeName = fnparam.winType
typewName = getPalDataTypeMapping(runtimeFlavor)[wintypeName]
winCount = fnparam.count
countw = getPalDataTypeMapping(runtimeFlavor)[winCount]
if paramName in template.structs:
linefnptype.append("%sint %s_ElementSize,\n" % (lindent, paramName))
linefnptype.append(lindent)
linefnptype.append(typewName)
if countw != " ":
linefnptype.append(countw)
linefnptype.append(" ")
linefnptype.append(fnparam.name)
linefnptype.append(",\n")
if len(linefnptype) > 0 :
del linefnptype[-1]
fnptype.extend(linefnptype)
fnptype.append(")\n{\n")
lTTngImpl.extend(fnptype)
#start of fn body
lTTngImpl.append(" if (!EventXplatEnabled%s())\n" % (eventName,))
lTTngImpl.append(" return ERROR_SUCCESS;\n")
result = generateMethodBody(template, providerName, eventName)
lTTngImpl.append(result)
lTTngImpl.append("\n return ERROR_SUCCESS;\n}\n\n")
return ''.join(lTTngImpl)
def generateLttngFiles(etwmanifest, eventprovider_directory, runtimeFlavor, dryRun):
eventprovider_directory = eventprovider_directory + "/"
tree = DOM.parse(etwmanifest)
#keep these relative
tracepointprovider_directory = "tracepointprovider"
lttng_directory = "lttng"
lttngevntprovPre = lttng_directory + "/eventprov"
lttngevntprovTpPre = lttng_directory + "/traceptprov"
if not os.path.exists(eventprovider_directory):
os.makedirs(eventprovider_directory)
if not os.path.exists(eventprovider_directory + lttng_directory):
os.makedirs(eventprovider_directory + lttng_directory)
if not os.path.exists(eventprovider_directory + tracepointprovider_directory):
os.makedirs(eventprovider_directory + tracepointprovider_directory)
# Generate Lttng specific instrumentation
for providerNode in tree.getElementsByTagName('provider'):
providerName = providerNode.getAttribute('name')
providerName = providerName.replace("Windows-",'')
providerName = providerName.replace("Microsoft-",'')
providerName_File = providerName.replace('-','')
providerName_File = providerName_File.lower()
providerName = providerName.replace('-','_')
lttngevntheadershortname = "tp" + providerName_File +".h"
lttngevntheader = eventprovider_directory + "lttng/" + lttngevntheadershortname
lttngevntprov = eventprovider_directory + lttngevntprovPre + providerName_File + ".cpp"
lttngevntprovTp = eventprovider_directory + lttngevntprovTpPre + providerName_File +".cpp"
templateNodes = providerNode.getElementsByTagName('template')
eventNodes = providerNode.getElementsByTagName('event')
allTemplates = parseTemplateNodes(templateNodes)
if dryRun:
print(lttngevntheader)
print(lttngevntprov)
print(lttngevntprovTp)
else:
with open_for_update(lttngevntheader) as lttnghdr_file:
lttnghdr_file.write(stdprolog + "\n")
lttnghdr_file.write("""
#include "palrt.h"
#include "pal.h"
#undef TRACEPOINT_PROVIDER
""")
lttnghdr_file.write("#define TRACEPOINT_PROVIDER " + providerName + "\n")
lttnghdr_file.write("""
#undef TRACEPOINT_INCLUDE
""")
lttnghdr_file.write("#define TRACEPOINT_INCLUDE \"./" + lttngevntheadershortname + "\"\n\n")
lttnghdr_file.write("#if !defined(LTTNG_CORECLR_H" + providerName + ") || defined(TRACEPOINT_HEADER_MULTI_READ)\n\n")
lttnghdr_file.write("#define LTTNG_CORECLR_H" + providerName + "\n")
lttnghdr_file.write("\n#include <lttng/tracepoint.h>\n\n")
lttnghdr_file.write(generateLttngHeader(providerName,allTemplates,eventNodes) + "\n")
with open_for_update(lttngevntprov) as lttngimpl_file:
lttngimpl_file.write(stdprolog + "\n")
lttngimpl_file.write("""
#define TRACEPOINT_DEFINE
#define TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#include "stdlib.h"
#include "pal_mstypes.h"
#include "pal_error.h"
#include "pal.h"
#define PAL_free free
#define PAL_realloc realloc
#include "pal/stackstring.hpp"
""")
lttngimpl_file.write("#include \"" + lttngevntheadershortname + "\"\n\n")
lttngimpl_file.write("""#ifndef tracepoint_enabled
extern "C" bool XplatEventLoggerIsEnabled();
#define tracepoint_enabled(provider, name) XplatEventLoggerIsEnabled()
#define do_tracepoint tracepoint
#endif
#define wcslen PAL_wcslen
bool ResizeBuffer(char *&buffer, size_t& size, size_t currLen, size_t newSize, bool &fixedBuffer);
bool WriteToBuffer(PCWSTR str, char *&buffer, size_t& offset, size_t& size, bool &fixedBuffer);
bool WriteToBuffer(const char *str, char *&buffer, size_t& offset, size_t& size, bool &fixedBuffer);
bool WriteToBuffer(const BYTE *src, size_t len, char *&buffer, size_t& offset, size_t& size, bool &fixedBuffer);
template <typename T>
bool WriteToBuffer(const T &value, char *&buffer, size_t& offset, size_t& size, bool &fixedBuffer)
{
if (sizeof(T) + offset > size)
{
if (!ResizeBuffer(buffer, size, offset, size + sizeof(T), fixedBuffer))
return false;
}
memcpy(buffer + offset, (char *)&value, sizeof(T));
offset += sizeof(T);
return true;
}
""")
lttngimpl_file.write(generateLttngTpProvider(providerName,eventNodes,allTemplates,runtimeFlavor) + "\n")
with open_for_update(lttngevntprovTp) as tpimpl_file:
tpimpl_file.write(stdprolog + "\n")
tpimpl_file.write("\n#define TRACEPOINT_CREATE_PROBES\n")
tpimpl_file.write("#include \"./"+lttngevntheadershortname + "\"\n")
import argparse
import sys
def main(argv):
#parse the command line
parser = argparse.ArgumentParser(description="Generates the Code required to instrument LTTtng logging mechanism")
required = parser.add_argument_group('required arguments')
required.add_argument('--man', type=str, required=True,
help='full path to manifest containig the description of events')
required.add_argument('--intermediate', type=str, required=True,
help='full path to eventprovider intermediate directory')
required.add_argument('--runtimeflavor', type=str,default="CoreCLR",
help='runtime flavor')
required.add_argument('--dry-run', action='store_true',
help='if specified, will output the names of the generated files instead of generating the files' )
args, unknown = parser.parse_known_args(argv)
if unknown:
print('Unknown argument(s): ', ', '.join(unknown))
return 1
sClrEtwAllMan = args.man
intermediate = args.intermediate
runtimeFlavor = RuntimeFlavor(args.runtimeflavor)
dryRun = args.dry_run
generateLttngFiles(sClrEtwAllMan, intermediate, runtimeFlavor, dryRun)
if __name__ == '__main__':
return_code = main(sys.argv[1:])
sys.exit(return_code)
| 36.661836 | 238 | 0.599069 |
7e8d7c999c89cb4bf81647b10dc9f9cb24cb845d | 413 | py | Python | CouchdbApplication/wsgi.py | dendgau/access-couchdb-application | c5e1e541d904800f77530f208bb5ed0c0bf93440 | [
"MIT"
] | null | null | null | CouchdbApplication/wsgi.py | dendgau/access-couchdb-application | c5e1e541d904800f77530f208bb5ed0c0bf93440 | [
"MIT"
] | null | null | null | CouchdbApplication/wsgi.py | dendgau/access-couchdb-application | c5e1e541d904800f77530f208bb5ed0c0bf93440 | [
"MIT"
] | null | null | null | """
WSGI config for CouchdbApplication project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CouchdbApplication.settings")
application = get_wsgi_application()
| 24.294118 | 78 | 0.79661 |
dd1cd34765ed862b3e23c936f2c546f063d1e9b9 | 1,859 | py | Python | transport.py | KingCprey/ncit.py | 25ffa9c5317bbcdcc8ef367bb7900bff910d792c | [
"MIT"
] | null | null | null | transport.py | KingCprey/ncit.py | 25ffa9c5317bbcdcc8ef367bb7900bff910d792c | [
"MIT"
] | null | null | null | transport.py | KingCprey/ncit.py | 25ffa9c5317bbcdcc8ef367bb7900bff910d792c | [
"MIT"
] | null | null | null | import socket,os,tarfile
from utils import *
def establish_connection(recv_host,recv_port,local_host=None,local_port=0,retry=True,retry_count=3,timeout=5,silent=False):
retries=0
while True:
try:
if not silent:log(Log.LOG_CONNECTION,"Attempting TCP connection to {0}:{1}",recv_host,recv_port)
conn=socket.create_connection((recv_host,recv_port),timeout=timeout,source_address=(local_host,local_port))
if not silent:log(Log.LOG_CONNECTION,"Successfully connected to {0}:{1}",recv_host,recv_port)
return conn
except socket.timeout:
if not silent:log(Log.LOG_ERROR,"Connection timed out to {0}:{1}.{2}",)
except Exception as e:
if not silent:log(Log.LOG_ERROR,"Connection failed. Unknown error occured")
raise e
if retries>=retry_count or not retry:break
def send_file(file_path,connection):
if not os.path.exists(file_path):raise FileNotFoundError(file_path)
if not os.path.isfile(file_path):
raise ValueError("\"%s\" is not a file"%file_path)
"""
if os.path.isdir(file_path):
#compress dir into a single file to send over network.
raise NotImplementedError("Directory archiving not implemented yet lmao")
elif not os.path.isfile(file_path):raise FileNotFoundError("idk what to do with this lmao")
"""
file_size=os.path.getsize(file_path)
with open(file_path,'rb')as inp:
log(Log.LOG_INFO,"Starting file transfer")
#In future, implement own copying so can present transfer progress
for complete,amount_written in copyfileobj(inp,conn):
percentage=int((amount_written/file_size)*100.0)
log(Log.LOG_INFO," [Transfer] %s of %s bytes (\%%s complete)"%(amount_written,file_size,percentage))
log(Log.LOG_INFO,"Transfer Complete")
| 51.638889 | 123 | 0.691232 |
0c6c8b46ba5013d91ff867ba0d69ad080950cade | 124,239 | py | Python | distributed/worker.py | abduhbm/distributed | d99752e030e2534c1f064865e2241289fedfe5a9 | [
"BSD-3-Clause"
] | null | null | null | distributed/worker.py | abduhbm/distributed | d99752e030e2534c1f064865e2241289fedfe5a9 | [
"BSD-3-Clause"
] | null | null | null | distributed/worker.py | abduhbm/distributed | d99752e030e2534c1f064865e2241289fedfe5a9 | [
"BSD-3-Clause"
] | null | null | null | import asyncio
import bisect
from collections import defaultdict, deque, namedtuple
from collections.abc import MutableMapping
from datetime import timedelta
from functools import partial
import heapq
from inspect import isawaitable
import logging
import os
from pickle import PicklingError
import random
import threading
import sys
import uuid
import warnings
import weakref
import dask
from dask.core import istask
from dask.compatibility import apply
from dask.utils import format_bytes, funcname
from dask.system import CPU_COUNT
from tlz import pluck, merge, first, keymap
from tornado import gen
from tornado.ioloop import IOLoop
from . import profile, comm, system
from .batched import BatchedSend
from .comm import get_address_host, connect
from .comm.addressing import address_from_user_args
from .core import error_message, CommClosedError, send_recv, pingpong, coerce_to_address
from .diskutils import WorkSpace
from .metrics import time
from .node import ServerNode
from . import preloading
from .proctitle import setproctitle
from .protocol import pickle, to_serialize, deserialize_bytes, serialize_bytelist
from .pubsub import PubSubWorkerExtension
from .security import Security
from .sizeof import safe_sizeof as sizeof
from .threadpoolexecutor import ThreadPoolExecutor, secede as tpe_secede
from .utils import (
get_ip,
typename,
has_arg,
_maybe_complex,
log_errors,
ignoring,
import_file,
silence_logging,
thread_state,
json_load_robust,
key_split,
offload,
PeriodicCallback,
parse_bytes,
parse_timedelta,
iscoroutinefunction,
warn_on_duration,
LRU,
TimeoutError,
)
from .utils_comm import pack_data, gather_from_workers, retry_operation
from .utils_perf import ThrottledGC, enable_gc_diagnosis, disable_gc_diagnosis
from .versions import get_versions
logger = logging.getLogger(__name__)
LOG_PDB = dask.config.get("distributed.admin.pdb-on-err")
no_value = "--no-value-sentinel--"
IN_PLAY = ("waiting", "ready", "executing", "long-running")
PENDING = ("waiting", "ready", "constrained")
PROCESSING = ("waiting", "ready", "constrained", "executing", "long-running")
READY = ("ready", "constrained")
DEFAULT_EXTENSIONS = [PubSubWorkerExtension]
DEFAULT_METRICS = {}
DEFAULT_STARTUP_INFORMATION = {}
SerializedTask = namedtuple("SerializedTask", ["function", "args", "kwargs", "task"])
class Worker(ServerNode):
""" Worker node in a Dask distributed cluster
Workers perform two functions:
1. **Serve data** from a local dictionary
2. **Perform computation** on that data and on data from peers
Workers keep the scheduler informed of their data and use that scheduler to
gather data from other workers when necessary to perform a computation.
You can start a worker with the ``dask-worker`` command line application::
$ dask-worker scheduler-ip:port
Use the ``--help`` flag to see more options::
$ dask-worker --help
The rest of this docstring is about the internal state the the worker uses
to manage and track internal computations.
**State**
**Informational State**
These attributes don't change significantly during execution.
* **nthreads:** ``int``:
Number of nthreads used by this worker process
* **executor:** ``concurrent.futures.ThreadPoolExecutor``:
Executor used to perform computation
* **local_directory:** ``path``:
Path on local machine to store temporary files
* **scheduler:** ``rpc``:
Location of scheduler. See ``.ip/.port`` attributes.
* **name:** ``string``:
Alias
* **services:** ``{str: Server}``:
Auxiliary web servers running on this worker
* **service_ports:** ``{str: port}``:
* **total_out_connections**: ``int``
The maximum number of concurrent outgoing requests for data
* **total_in_connections**: ``int``
The maximum number of concurrent incoming requests for data
* **total_comm_nbytes**: ``int``
* **batched_stream**: ``BatchedSend``
A batched stream along which we communicate to the scheduler
* **log**: ``[(message)]``
A structured and queryable log. See ``Worker.story``
**Volatile State**
This attributes track the progress of tasks that this worker is trying to
complete. In the descriptions below a ``key`` is the name of a task that
we want to compute and ``dep`` is the name of a piece of dependent data
that we want to collect from others.
* **data:** ``{key: object}``:
Prefer using the **host** attribute instead of this, unless
memory_limit and at least one of memory_target_fraction or
memory_spill_fraction values are defined, in that case, this attribute
is a zict.Buffer, from which information on LRU cache can be queried.
* **data.memory:** ``{key: object}``:
Dictionary mapping keys to actual values stored in memory. Only
available if condition for **data** being a zict.Buffer is met.
* **data.disk:** ``{key: object}``:
Dictionary mapping keys to actual values stored on disk. Only
available if condition for **data** being a zict.Buffer is met.
* **task_state**: ``{key: string}``:
The state of all tasks that the scheduler has asked us to compute.
Valid states include waiting, constrained, executing, memory, erred
* **tasks**: ``{key: dict}``
The function, args, kwargs of a task. We run this when appropriate
* **dependencies**: ``{key: {deps}}``
The data needed by this key to run
* **dependents**: ``{dep: {keys}}``
The keys that use this dependency
* **data_needed**: deque(keys)
The keys whose data we still lack, arranged in a deque
* **waiting_for_data**: ``{kep: {deps}}``
A dynamic verion of dependencies. All dependencies that we still don't
have for a particular key.
* **ready**: [keys]
Keys that are ready to run. Stored in a LIFO stack
* **constrained**: [keys]
Keys for which we have the data to run, but are waiting on abstract
resources like GPUs. Stored in a FIFO deque
* **executing**: {keys}
Keys that are currently executing
* **executed_count**: int
A number of tasks that this worker has run in its lifetime
* **long_running**: {keys}
A set of keys of tasks that are running and have started their own
long-running clients.
* **dep_state**: ``{dep: string}``:
The state of all dependencies required by our tasks
Valid states include waiting, flight, and memory
* **who_has**: ``{dep: {worker}}``
Workers that we believe have this data
* **has_what**: ``{worker: {deps}}``
The data that we care about that we think a worker has
* **pending_data_per_worker**: ``{worker: [dep]}``
The data on each worker that we still want, prioritized as a deque
* **in_flight_tasks**: ``{task: worker}``
All dependencies that are coming to us in current peer-to-peer
connections and the workers from which they are coming.
* **in_flight_workers**: ``{worker: {task}}``
The workers from which we are currently gathering data and the
dependencies we expect from those connections
* **comm_bytes**: ``int``
The total number of bytes in flight
* **suspicious_deps**: ``{dep: int}``
The number of times a dependency has not been where we expected it
* **nbytes**: ``{key: int}``
The size of a particular piece of data
* **types**: ``{key: type}``
The type of a particular piece of data
* **threads**: ``{key: int}``
The ID of the thread on which the task ran
* **active_threads**: ``{int: key}``
The keys currently running on active threads
* **exceptions**: ``{key: exception}``
The exception caused by running a task if it erred
* **tracebacks**: ``{key: traceback}``
The exception caused by running a task if it erred
* **startstops**: ``{key: [{startstop}]}``
Log of transfer, load, and compute times for a task
* **priorities**: ``{key: tuple}``
The priority of a key given by the scheduler. Determines run order.
* **durations**: ``{key: float}``
Expected duration of a task
* **resource_restrictions**: ``{key: {str: number}}``
Abstract resources required to run a task
Parameters
----------
scheduler_ip: str
scheduler_port: int
ip: str, optional
data: MutableMapping, type, None
The object to use for storage, builds a disk-backed LRU dict by default
nthreads: int, optional
loop: tornado.ioloop.IOLoop
local_directory: str, optional
Directory where we place local resources
name: str, optional
memory_limit: int, float, string
Number of bytes of memory that this worker should use.
Set to zero for no limit. Set to 'auto' to calculate
as system.MEMORY_LIMIT * min(1, nthreads / total_cores)
Use strings or numbers like 5GB or 5e9
memory_target_fraction: float
Fraction of memory to try to stay beneath
memory_spill_fraction: float
Fraction of memory at which we start spilling to disk
memory_pause_fraction: float
Fraction of memory at which we stop running new tasks
executor: concurrent.futures.Executor
resources: dict
Resources that this worker has like ``{'GPU': 2}``
nanny: str
Address on which to contact nanny, if it exists
lifetime: str
Amount of time like "1 hour" after which we gracefully shut down the worker.
This defaults to None, meaning no explicit shutdown time.
lifetime_stagger: str
Amount of time like "5 minutes" to stagger the lifetime value
The actual lifetime will be selected uniformly at random between
lifetime +/- lifetime_stagger
lifetime_restart: bool
Whether or not to restart a worker after it has reached its lifetime
Default False
Examples
--------
Use the command line to start a worker::
$ dask-scheduler
Start scheduler at 127.0.0.1:8786
$ dask-worker 127.0.0.1:8786
Start worker at: 127.0.0.1:1234
Registered with scheduler at: 127.0.0.1:8786
See Also
--------
distributed.scheduler.Scheduler
distributed.nanny.Nanny
"""
_instances = weakref.WeakSet()
def __init__(
self,
scheduler_ip=None,
scheduler_port=None,
scheduler_file=None,
ncores=None,
nthreads=None,
loop=None,
local_dir=None,
local_directory=None,
services=None,
service_ports=None,
service_kwargs=None,
name=None,
reconnect=True,
memory_limit="auto",
executor=None,
resources=None,
silence_logs=None,
death_timeout=None,
preload=None,
preload_argv=None,
security=None,
contact_address=None,
memory_monitor_interval="200ms",
extensions=None,
metrics=DEFAULT_METRICS,
startup_information=DEFAULT_STARTUP_INFORMATION,
data=None,
interface=None,
host=None,
port=None,
protocol=None,
dashboard_address=None,
nanny=None,
plugins=(),
low_level_profiler=dask.config.get("distributed.worker.profile.low-level"),
validate=None,
profile_cycle_interval=None,
lifetime=None,
lifetime_stagger=None,
lifetime_restart=None,
**kwargs
):
self.tasks = dict()
self.task_state = dict()
self.dep_state = dict()
self.dependencies = dict()
self.dependents = dict()
self.waiting_for_data = dict()
self.who_has = dict()
self.has_what = defaultdict(set)
self.pending_data_per_worker = defaultdict(deque)
self.nanny = nanny
self._lock = threading.Lock()
self.data_needed = deque() # TODO: replace with heap?
self.in_flight_tasks = dict()
self.in_flight_workers = dict()
self.total_out_connections = dask.config.get(
"distributed.worker.connections.outgoing"
)
self.total_in_connections = dask.config.get(
"distributed.worker.connections.incoming"
)
self.total_comm_nbytes = 10e6
self.comm_nbytes = 0
self.suspicious_deps = defaultdict(lambda: 0)
self._missing_dep_flight = set()
self.nbytes = dict()
self.types = dict()
self.threads = dict()
self.exceptions = dict()
self.tracebacks = dict()
self.active_threads_lock = threading.Lock()
self.active_threads = dict()
self.profile_keys = defaultdict(profile.create)
self.profile_keys_history = deque(maxlen=3600)
self.profile_recent = profile.create()
self.profile_history = deque(maxlen=3600)
self.priorities = dict()
self.generation = 0
self.durations = dict()
self.startstops = defaultdict(list)
self.resource_restrictions = dict()
self.ready = list()
self.constrained = deque()
self.executing = set()
self.executed_count = 0
self.long_running = set()
self.recent_messages_log = deque(
maxlen=dask.config.get("distributed.comm.recent-messages-log-length")
)
self.target_message_size = 50e6 # 50 MB
self.log = deque(maxlen=100000)
if validate is None:
validate = dask.config.get("distributed.scheduler.validate")
self.validate = validate
self._transitions = {
("waiting", "ready"): self.transition_waiting_ready,
("waiting", "memory"): self.transition_waiting_done,
("waiting", "error"): self.transition_waiting_done,
("ready", "executing"): self.transition_ready_executing,
("ready", "memory"): self.transition_ready_memory,
("constrained", "executing"): self.transition_constrained_executing,
("executing", "memory"): self.transition_executing_done,
("executing", "error"): self.transition_executing_done,
("executing", "rescheduled"): self.transition_executing_done,
("executing", "long-running"): self.transition_executing_long_running,
("long-running", "error"): self.transition_executing_done,
("long-running", "memory"): self.transition_executing_done,
("long-running", "rescheduled"): self.transition_executing_done,
}
self._dep_transitions = {
("waiting", "flight"): self.transition_dep_waiting_flight,
("waiting", "memory"): self.transition_dep_waiting_memory,
("flight", "waiting"): self.transition_dep_flight_waiting,
("flight", "memory"): self.transition_dep_flight_memory,
}
self.incoming_transfer_log = deque(maxlen=100000)
self.incoming_count = 0
self.outgoing_transfer_log = deque(maxlen=100000)
self.outgoing_count = 0
self.outgoing_current_count = 0
self.repetitively_busy = 0
self.bandwidth = parse_bytes(dask.config.get("distributed.scheduler.bandwidth"))
self.bandwidth_workers = defaultdict(
lambda: (0, 0)
) # bw/count recent transfers
self.bandwidth_types = defaultdict(lambda: (0, 0)) # bw/count recent transfers
self.latency = 0.001
self._client = None
if profile_cycle_interval is None:
profile_cycle_interval = dask.config.get("distributed.worker.profile.cycle")
profile_cycle_interval = parse_timedelta(profile_cycle_interval, default="ms")
self._setup_logging(logger)
if scheduler_file:
cfg = json_load_robust(scheduler_file)
scheduler_addr = cfg["address"]
elif scheduler_ip is None and dask.config.get("scheduler-address", None):
scheduler_addr = dask.config.get("scheduler-address")
elif scheduler_port is None:
scheduler_addr = coerce_to_address(scheduler_ip)
else:
scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port))
self.contact_address = contact_address
if protocol is None:
protocol_address = scheduler_addr.split("://")
if len(protocol_address) == 2:
protocol = protocol_address[0]
# Target interface on which we contact the scheduler by default
# TODO: it is unfortunate that we special-case inproc here
if not host and not interface and not scheduler_addr.startswith("inproc://"):
host = get_ip(get_address_host(scheduler_addr))
self._start_address = address_from_user_args(
host=host,
port=port,
interface=interface,
protocol=protocol,
security=security,
)
if ncores is not None:
warnings.warn("the ncores= parameter has moved to nthreads=")
nthreads = ncores
self.nthreads = nthreads or CPU_COUNT
self.total_resources = resources or {}
self.available_resources = (resources or {}).copy()
self.death_timeout = parse_timedelta(death_timeout)
self.memory_monitor_interval = parse_timedelta(
memory_monitor_interval, default="ms"
)
self.extensions = dict()
if silence_logs:
silence_logging(level=silence_logs)
if local_dir is not None:
warnings.warn("The local_dir keyword has moved to local_directory")
local_directory = local_dir
if local_directory is None:
local_directory = dask.config.get("temporary-directory") or os.getcwd()
if not os.path.exists(local_directory):
os.makedirs(local_directory)
local_directory = os.path.join(local_directory, "dask-worker-space")
with warn_on_duration(
"1s",
"Creating scratch directories is taking a surprisingly long time. "
"This is often due to running workers on a network file system. "
"Consider specifying a local-directory to point workers to write "
"scratch data to a local disk.",
):
self._workspace = WorkSpace(os.path.abspath(local_directory))
self._workdir = self._workspace.new_work_dir(prefix="worker-")
self.local_directory = self._workdir.dir_path
self.preload = preload
if self.preload is None:
self.preload = dask.config.get("distributed.worker.preload")
self.preload_argv = preload_argv
if self.preload_argv is None:
self.preload_argv = dask.config.get("distributed.worker.preload-argv")
self._preload_modules = preloading.on_creation(
self.preload, file_dir=self.local_directory
)
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args("worker")
self.memory_limit = parse_memory_limit(memory_limit, self.nthreads)
self.paused = False
if "memory_target_fraction" in kwargs:
self.memory_target_fraction = kwargs.pop("memory_target_fraction")
else:
self.memory_target_fraction = dask.config.get(
"distributed.worker.memory.target"
)
if "memory_spill_fraction" in kwargs:
self.memory_spill_fraction = kwargs.pop("memory_spill_fraction")
else:
self.memory_spill_fraction = dask.config.get(
"distributed.worker.memory.spill"
)
if "memory_pause_fraction" in kwargs:
self.memory_pause_fraction = kwargs.pop("memory_pause_fraction")
else:
self.memory_pause_fraction = dask.config.get(
"distributed.worker.memory.pause"
)
if isinstance(data, MutableMapping):
self.data = data
elif callable(data):
self.data = data()
elif isinstance(data, tuple):
self.data = data[0](**data[1])
elif self.memory_limit and (
self.memory_target_fraction or self.memory_spill_fraction
):
try:
from zict import Buffer, File, Func
except ImportError:
raise ImportError(
"Please `python -m pip install zict` for spill-to-disk workers"
)
path = os.path.join(self.local_directory, "storage")
storage = Func(
partial(serialize_bytelist, on_error="raise"),
deserialize_bytes,
File(path),
)
target = int(float(self.memory_limit) * self.memory_target_fraction)
self.data = Buffer({}, storage, target, weight)
self.data.memory = self.data.fast
self.data.disk = self.data.slow
else:
self.data = dict()
self.actors = {}
self.loop = loop or IOLoop.current()
self.status = None
self.reconnect = reconnect
self.executor = executor or ThreadPoolExecutor(
self.nthreads, thread_name_prefix="Dask-Worker-Threads'"
)
self.actor_executor = ThreadPoolExecutor(
1, thread_name_prefix="Dask-Actor-Threads"
)
self.batched_stream = BatchedSend(interval="2ms", loop=self.loop)
self.name = name
self.scheduler_delay = 0
self.stream_comms = dict()
self.heartbeat_active = False
self._ipython_kernel = None
if self.local_directory not in sys.path:
sys.path.insert(0, self.local_directory)
self.services = {}
self.service_specs = services or {}
if dashboard_address is not None:
try:
from distributed.dashboard import BokehWorker
except ImportError:
logger.debug("To start diagnostics web server please install Bokeh")
else:
self.service_specs[("dashboard", dashboard_address)] = (
BokehWorker,
(service_kwargs or {}).get("dashboard", {}),
)
self.metrics = dict(metrics) if metrics else {}
self.startup_information = (
dict(startup_information) if startup_information else {}
)
self.low_level_profiler = low_level_profiler
handlers = {
"gather": self.gather,
"run": self.run,
"run_coroutine": self.run_coroutine,
"get_data": self.get_data,
"update_data": self.update_data,
"delete_data": self.delete_data,
"terminate": self.close,
"ping": pingpong,
"upload_file": self.upload_file,
"start_ipython": self.start_ipython,
"call_stack": self.get_call_stack,
"profile": self.get_profile,
"profile_metadata": self.get_profile_metadata,
"get_logs": self.get_logs,
"keys": self.keys,
"versions": self.versions,
"actor_execute": self.actor_execute,
"actor_attribute": self.actor_attribute,
"plugin-add": self.plugin_add,
}
stream_handlers = {
"close": self.close,
"compute-task": self.add_task,
"release-task": partial(self.release_key, report=False),
"delete-data": self.delete_data,
"steal-request": self.steal_request,
}
super(Worker, self).__init__(
handlers=handlers,
stream_handlers=stream_handlers,
io_loop=self.loop,
connection_args=self.connection_args,
**kwargs
)
self.scheduler = self.rpc(scheduler_addr)
self.execution_state = {
"scheduler": self.scheduler.address,
"ioloop": self.loop,
"worker": self,
}
pc = PeriodicCallback(self.heartbeat, 1000, io_loop=self.io_loop)
self.periodic_callbacks["heartbeat"] = pc
pc = PeriodicCallback(
lambda: self.batched_stream.send({"op": "keep-alive"}),
60000,
io_loop=self.io_loop,
)
self.periodic_callbacks["keep-alive"] = pc
self._address = contact_address
if self.memory_limit:
self._memory_monitoring = False
pc = PeriodicCallback(
self.memory_monitor,
self.memory_monitor_interval * 1000,
io_loop=self.io_loop,
)
self.periodic_callbacks["memory"] = pc
if extensions is None:
extensions = DEFAULT_EXTENSIONS
for ext in extensions:
ext(self)
self._throttled_gc = ThrottledGC(logger=logger)
setproctitle("dask-worker [not started]")
pc = PeriodicCallback(
self.trigger_profile,
parse_timedelta(
dask.config.get("distributed.worker.profile.interval"), default="ms"
)
* 1000,
io_loop=self.io_loop,
)
self.periodic_callbacks["profile"] = pc
pc = PeriodicCallback(
self.cycle_profile, profile_cycle_interval * 1000, io_loop=self.io_loop
)
self.periodic_callbacks["profile-cycle"] = pc
self.plugins = {}
self._pending_plugins = plugins
self.lifetime = lifetime or dask.config.get(
"distributed.worker.lifetime.duration"
)
lifetime_stagger = lifetime_stagger or dask.config.get(
"distributed.worker.lifetime.stagger"
)
self.lifetime_restart = lifetime_restart or dask.config.get(
"distributed.worker.lifetime.restart"
)
if isinstance(self.lifetime, str):
self.lifetime = parse_timedelta(self.lifetime)
if isinstance(lifetime_stagger, str):
lifetime_stagger = parse_timedelta(lifetime_stagger)
if self.lifetime:
self.lifetime += (random.random() * 2 - 1) * lifetime_stagger
self.io_loop.call_later(self.lifetime, self.close_gracefully)
Worker._instances.add(self)
##################
# Administrative #
##################
def __repr__(self):
return (
"<%s: %r, %s, %s, stored: %d, running: %d/%d, ready: %d, comm: %d, waiting: %d>"
% (
self.__class__.__name__,
self.address,
self.name,
self.status,
len(self.data),
len(self.executing),
self.nthreads,
len(self.ready),
len(self.in_flight_tasks),
len(self.waiting_for_data),
)
)
@property
def logs(self):
return self._deque_handler.deque
@property
def worker_address(self):
""" For API compatibility with Nanny """
return self.address
@property
def local_dir(self):
""" For API compatibility with Nanny """
warnings.warn(
"The local_dir attribute has moved to local_directory", stacklevel=2
)
return self.local_directory
async def get_metrics(self):
core = dict(
executing=len(self.executing),
in_memory=len(self.data),
ready=len(self.ready),
in_flight=len(self.in_flight_tasks),
bandwidth={
"total": self.bandwidth,
"workers": dict(self.bandwidth_workers),
"types": keymap(typename, self.bandwidth_types),
},
)
custom = {}
for k, metric in self.metrics.items():
try:
result = metric(self)
if isawaitable(result):
result = await result
custom[k] = result
except Exception: # TODO: log error once
pass
return merge(custom, self.monitor.recent(), core)
async def get_startup_information(self):
result = {}
for k, f in self.startup_information.items():
try:
v = f(self)
if isawaitable(v):
v = await v
result[k] = v
except Exception: # TODO: log error once
pass
return result
def identity(self, comm=None):
return {
"type": type(self).__name__,
"id": self.id,
"scheduler": self.scheduler.address,
"nthreads": self.nthreads,
"ncores": self.nthreads, # backwards compatibility
"memory_limit": self.memory_limit,
}
#####################
# External Services #
#####################
async def _register_with_scheduler(self):
self.periodic_callbacks["keep-alive"].stop()
self.periodic_callbacks["heartbeat"].stop()
start = time()
if self.contact_address is None:
self.contact_address = self.address
logger.info("-" * 49)
while True:
try:
_start = time()
comm = await connect(self.scheduler.address, **self.connection_args)
comm.name = "Worker->Scheduler"
comm._server = weakref.ref(self)
await comm.write(
dict(
op="register-worker",
reply=False,
address=self.contact_address,
keys=list(self.data),
nthreads=self.nthreads,
name=self.name,
nbytes=self.nbytes,
types={k: typename(v) for k, v in self.data.items()},
now=time(),
resources=self.total_resources,
memory_limit=self.memory_limit,
local_directory=self.local_directory,
services=self.service_ports,
nanny=self.nanny,
pid=os.getpid(),
versions=get_versions(),
metrics=await self.get_metrics(),
extra=await self.get_startup_information(),
),
serializers=["msgpack"],
)
future = comm.read(deserializers=["msgpack"])
response = await future
if response.get("warning"):
logger.warning(response["warning"])
_end = time()
middle = (_start + _end) / 2
self._update_latency(_end - start)
self.scheduler_delay = response["time"] - middle
self.status = "running"
break
except EnvironmentError:
logger.info("Waiting to connect to: %26s", self.scheduler.address)
await asyncio.sleep(0.1)
except TimeoutError:
logger.info("Timed out when connecting to scheduler")
if response["status"] != "OK":
raise ValueError("Unexpected response from register: %r" % (response,))
else:
await asyncio.gather(
*[
self.plugin_add(plugin=plugin)
for plugin in response["worker-plugins"]
]
)
logger.info(" Registered to: %26s", self.scheduler.address)
logger.info("-" * 49)
self.batched_stream.start(comm)
self.periodic_callbacks["keep-alive"].start()
self.periodic_callbacks["heartbeat"].start()
self.loop.add_callback(self.handle_scheduler, comm)
def _update_latency(self, latency):
self.latency = latency * 0.05 + self.latency * 0.95
if self.digests is not None:
self.digests["latency"].add(latency)
async def heartbeat(self):
if not self.heartbeat_active:
self.heartbeat_active = True
logger.debug("Heartbeat: %s" % self.address)
try:
start = time()
response = await retry_operation(
self.scheduler.heartbeat_worker,
address=self.contact_address,
now=time(),
metrics=await self.get_metrics(),
)
end = time()
middle = (start + end) / 2
self._update_latency(end - start)
if response["status"] == "missing":
for i in range(10):
if self.status != "running":
break
else:
await asyncio.sleep(0.05)
else:
await self._register_with_scheduler()
return
self.scheduler_delay = response["time"] - middle
self.periodic_callbacks["heartbeat"].callback_time = (
response["heartbeat-interval"] * 1000
)
self.bandwidth_workers.clear()
self.bandwidth_types.clear()
except CommClosedError:
logger.warning("Heartbeat to scheduler failed")
if not self.reconnect:
await self.close(report=False)
except IOError as e:
# Scheduler is gone. Respect distributed.comm.timeouts.connect
if "Timed out trying to connect" in str(e):
await self.close(report=False)
else:
raise e
finally:
self.heartbeat_active = False
else:
logger.debug("Heartbeat skipped: channel busy")
async def handle_scheduler(self, comm):
try:
await self.handle_stream(
comm, every_cycle=[self.ensure_communicating, self.ensure_computing]
)
except Exception as e:
logger.exception(e)
raise
finally:
if self.reconnect and self.status == "running":
logger.info("Connection to scheduler broken. Reconnecting...")
self.loop.add_callback(self.heartbeat)
else:
await self.close(report=False)
def start_ipython(self, comm):
"""Start an IPython kernel
Returns Jupyter connection info dictionary.
"""
from ._ipython_utils import start_ipython
if self._ipython_kernel is None:
self._ipython_kernel = start_ipython(
ip=self.ip, ns={"worker": self}, log=logger
)
return self._ipython_kernel.get_connection_info()
async def upload_file(self, comm, filename=None, data=None, load=True):
out_filename = os.path.join(self.local_directory, filename)
def func(data):
if isinstance(data, str):
data = data.encode()
with open(out_filename, "wb") as f:
f.write(data)
f.flush()
return data
if len(data) < 10000:
data = func(data)
else:
data = await offload(func, data)
if load:
try:
import_file(out_filename)
except Exception as e:
logger.exception(e)
return {"status": "error", "exception": to_serialize(e)}
return {"status": "OK", "nbytes": len(data)}
def keys(self, comm=None):
return list(self.data)
async def gather(self, comm=None, who_has=None):
who_has = {
k: [coerce_to_address(addr) for addr in v]
for k, v in who_has.items()
if k not in self.data
}
result, missing_keys, missing_workers = await gather_from_workers(
who_has, rpc=self.rpc, who=self.address
)
if missing_keys:
logger.warning(
"Could not find data: %s on workers: %s (who_has: %s)",
missing_keys,
missing_workers,
who_has,
)
return {"status": "missing-data", "keys": missing_keys}
else:
self.update_data(data=result, report=False)
return {"status": "OK"}
#############
# Lifecycle #
#############
async def start(self):
if self.status and self.status.startswith("clos"):
return
assert self.status is None, self.status
await super().start()
enable_gc_diagnosis()
thread_state.on_event_loop_thread = True
await self.listen(
self._start_address, **self.security.get_listen_args("worker")
)
self.ip = get_address_host(self.address)
if self.name is None:
self.name = self.address
await preloading.on_start(
self._preload_modules, self, argv=self.preload_argv,
)
# Services listen on all addresses
# Note Nanny is not a "real" service, just some metadata
# passed in service_ports...
self.start_services(self.ip)
try:
listening_address = "%s%s:%d" % (self.listener.prefix, self.ip, self.port)
except Exception:
listening_address = "%s%s" % (self.listener.prefix, self.ip)
logger.info(" Start worker at: %26s", self.address)
logger.info(" Listening to: %26s", listening_address)
for k, v in self.service_ports.items():
logger.info(" %16s at: %26s" % (k, self.ip + ":" + str(v)))
logger.info("Waiting to connect to: %26s", self.scheduler.address)
logger.info("-" * 49)
logger.info(" Threads: %26d", self.nthreads)
if self.memory_limit:
logger.info(" Memory: %26s", format_bytes(self.memory_limit))
logger.info(" Local Directory: %26s", self.local_directory)
setproctitle("dask-worker [%s]" % self.address)
await asyncio.gather(
*[self.plugin_add(plugin=plugin) for plugin in self._pending_plugins]
)
self._pending_plugins = ()
await self._register_with_scheduler()
self.start_periodic_callbacks()
return self
def _close(self, *args, **kwargs):
warnings.warn("Worker._close has moved to Worker.close", stacklevel=2)
return self.close(*args, **kwargs)
async def close(
self, report=True, timeout=10, nanny=True, executor_wait=True, safe=False
):
with log_errors():
if self.status in ("closed", "closing"):
await self.finished()
return
self.reconnect = False
disable_gc_diagnosis()
try:
logger.info("Stopping worker at %s", self.address)
except ValueError: # address not available if already closed
logger.info("Stopping worker")
if self.status not in ("running", "closing-gracefully"):
logger.info("Closed worker has not yet started: %s", self.status)
self.status = "closing"
await preloading.on_teardown(self._preload_modules, self)
if nanny and self.nanny:
with self.rpc(self.nanny) as r:
await r.close_gracefully()
setproctitle("dask-worker [closing]")
teardowns = [
plugin.teardown(self)
for plugin in self.plugins.values()
if hasattr(plugin, "teardown")
]
await asyncio.gather(*[td for td in teardowns if isawaitable(td)])
for pc in self.periodic_callbacks.values():
pc.stop()
with ignoring(EnvironmentError, TimeoutError):
if report and self.contact_address is not None:
await asyncio.wait_for(
self.scheduler.unregister(
address=self.contact_address, safe=safe
),
timeout,
)
await self.scheduler.close_rpc()
self._workdir.release()
for k, v in self.services.items():
v.stop()
if (
self.batched_stream
and self.batched_stream.comm
and not self.batched_stream.comm.closed()
):
self.batched_stream.send({"op": "close-stream"})
if self.batched_stream:
with ignoring(TimeoutError):
await self.batched_stream.close(timedelta(seconds=timeout))
self.actor_executor._work_queue.queue.clear()
if isinstance(self.executor, ThreadPoolExecutor):
self.executor._work_queue.queue.clear()
self.executor.shutdown(wait=executor_wait, timeout=timeout)
else:
self.executor.shutdown(wait=False)
self.actor_executor.shutdown(wait=executor_wait, timeout=timeout)
self.stop()
await self.rpc.close()
self.status = "closed"
await ServerNode.close(self)
setproctitle("dask-worker [closed]")
return "OK"
async def close_gracefully(self):
""" Gracefully shut down a worker
This first informs the scheduler that we're shutting down, and asks it
to move our data elsewhere. Afterwards, we close as normal
"""
if self.status.startswith("closing"):
await self.finished()
if self.status == "closed":
return
logger.info("Closing worker gracefully: %s", self.address)
self.status = "closing-gracefully"
await self.scheduler.retire_workers(workers=[self.address], remove=False)
await self.close(safe=True, nanny=not self.lifetime_restart)
async def terminate(self, comm=None, report=True, **kwargs):
await self.close(report=report, **kwargs)
return "OK"
async def wait_until_closed(self):
warnings.warn("wait_until_closed has moved to finished()")
await self.finished()
assert self.status == "closed"
################
# Worker Peers #
################
def send_to_worker(self, address, msg):
if address not in self.stream_comms:
bcomm = BatchedSend(interval="1ms", loop=self.loop)
self.stream_comms[address] = bcomm
async def batched_send_connect():
comm = await connect(
address, **self.connection_args # TODO, serialization
)
comm.name = "Worker->Worker"
await comm.write({"op": "connection_stream"})
bcomm.start(comm)
self.loop.add_callback(batched_send_connect)
self.stream_comms[address].send(msg)
async def get_data(
self, comm, keys=None, who=None, serializers=None, max_connections=None
):
start = time()
if max_connections is None:
max_connections = self.total_in_connections
# Allow same-host connections more liberally
if (
max_connections
and comm
and get_address_host(comm.peer_address) == get_address_host(self.address)
):
max_connections = max_connections * 2
if self.paused:
max_connections = 1
throttle_msg = " Throttling outgoing connections because worker is paused."
else:
throttle_msg = ""
if (
max_connections is not False
and self.outgoing_current_count >= max_connections
):
logger.debug(
"Worker %s has too many open connections to respond to data request from %s (%d/%d).%s",
self.address,
who,
self.outgoing_current_count,
max_connections,
throttle_msg,
)
return {"status": "busy"}
self.outgoing_current_count += 1
data = {k: self.data[k] for k in keys if k in self.data}
if len(data) < len(keys):
for k in set(keys) - set(data):
if k in self.actors:
from .actor import Actor
data[k] = Actor(type(self.actors[k]), self.address, k)
msg = {"status": "OK", "data": {k: to_serialize(v) for k, v in data.items()}}
nbytes = {k: self.nbytes.get(k) for k in data}
stop = time()
if self.digests is not None:
self.digests["get-data-load-duration"].add(stop - start)
start = time()
try:
compressed = await comm.write(msg, serializers=serializers)
response = await comm.read(deserializers=serializers)
assert response == "OK", response
except EnvironmentError:
logger.exception(
"failed during get data with %s -> %s", self.address, who, exc_info=True
)
comm.abort()
raise
finally:
self.outgoing_current_count -= 1
stop = time()
if self.digests is not None:
self.digests["get-data-send-duration"].add(stop - start)
total_bytes = sum(filter(None, nbytes.values()))
self.outgoing_count += 1
duration = (stop - start) or 0.5 # windows
self.outgoing_transfer_log.append(
{
"start": start + self.scheduler_delay,
"stop": stop + self.scheduler_delay,
"middle": (start + stop) / 2,
"duration": duration,
"who": who,
"keys": nbytes,
"total": total_bytes,
"compressed": compressed,
"bandwidth": total_bytes / duration,
}
)
return "dont-reply"
###################
# Local Execution #
###################
def update_data(self, comm=None, data=None, report=True, serializers=None):
for key, value in data.items():
if key in self.task_state:
self.transition(key, "memory", value=value)
else:
self.put_key_in_memory(key, value)
self.task_state[key] = "memory"
self.tasks[key] = None
self.priorities[key] = None
self.durations[key] = None
self.dependencies[key] = set()
if key in self.dep_state:
self.transition_dep(key, "memory", value=value)
self.log.append((key, "receive-from-scatter"))
if report:
self.batched_stream.send({"op": "add-keys", "keys": list(data)})
info = {"nbytes": {k: sizeof(v) for k, v in data.items()}, "status": "OK"}
return info
async def delete_data(self, comm=None, keys=None, report=True):
if keys:
for key in list(keys):
self.log.append((key, "delete"))
if key in self.task_state:
self.release_key(key)
if key in self.dep_state:
self.release_dep(key)
logger.debug("Deleted %d keys", len(keys))
if report:
logger.debug("Reporting loss of keys to scheduler")
# TODO: this route seems to not exist?
await self.scheduler.remove_keys(
address=self.contact_address, keys=list(keys)
)
return "OK"
async def set_resources(self, **resources):
for r, quantity in resources.items():
if r in self.total_resources:
self.available_resources[r] += quantity - self.total_resources[r]
else:
self.available_resources[r] = quantity
self.total_resources[r] = quantity
await retry_operation(
self.scheduler.set_resources,
resources=self.total_resources,
worker=self.contact_address,
)
###################
# Task Management #
###################
def add_task(
self,
key,
function=None,
args=None,
kwargs=None,
task=no_value,
who_has=None,
nbytes=None,
priority=None,
duration=None,
resource_restrictions=None,
actor=False,
**kwargs2
):
try:
if key in self.tasks:
state = self.task_state[key]
if state == "memory":
assert key in self.data or key in self.actors
logger.debug(
"Asked to compute pre-existing result: %s: %s", key, state
)
self.send_task_state_to_scheduler(key)
return
if state in IN_PLAY:
return
if state == "erred":
del self.exceptions[key]
del self.tracebacks[key]
if priority is not None:
priority = tuple(priority) + (self.generation,)
self.generation -= 1
if self.dep_state.get(key) == "memory":
self.task_state[key] = "memory"
self.send_task_state_to_scheduler(key)
self.tasks[key] = None
self.log.append((key, "new-task-already-in-memory"))
self.priorities[key] = priority
self.durations[key] = duration
return
self.log.append((key, "new"))
self.tasks[key] = SerializedTask(function, args, kwargs, task)
if actor:
self.actors[key] = None
self.priorities[key] = priority
self.durations[key] = duration
if resource_restrictions:
self.resource_restrictions[key] = resource_restrictions
self.task_state[key] = "waiting"
if nbytes is not None:
self.nbytes.update(nbytes)
who_has = who_has or {}
self.dependencies[key] = set(who_has)
self.waiting_for_data[key] = set()
for dep in who_has:
if dep not in self.dependents:
self.dependents[dep] = set()
self.dependents[dep].add(key)
if dep not in self.dep_state:
if self.task_state.get(dep) == "memory":
state = "memory"
else:
state = "waiting"
self.dep_state[dep] = state
self.log.append((dep, "new-dep", state))
if self.dep_state[dep] != "memory":
self.waiting_for_data[key].add(dep)
for dep, workers in who_has.items():
assert workers
if dep not in self.who_has:
self.who_has[dep] = set(workers)
self.who_has[dep].update(workers)
for worker in workers:
self.has_what[worker].add(dep)
if self.dep_state[dep] != "memory":
self.pending_data_per_worker[worker].append(dep)
if self.waiting_for_data[key]:
self.data_needed.append(key)
else:
self.transition(key, "ready")
if self.validate:
if who_has:
assert all(dep in self.dep_state for dep in who_has)
assert all(dep in self.nbytes for dep in who_has)
for dep in who_has:
self.validate_dep(dep)
self.validate_key(key)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_dep(self, dep, finish, **kwargs):
try:
start = self.dep_state[dep]
except KeyError:
return
if start == finish:
return
func = self._dep_transitions[start, finish]
state = func(dep, **kwargs)
self.log.append(("dep", dep, start, state or finish))
if dep in self.dep_state:
self.dep_state[dep] = state or finish
if self.validate:
self.validate_dep(dep)
def transition_dep_waiting_flight(self, dep, worker=None):
try:
if self.validate:
assert dep not in self.in_flight_tasks
assert self.dependents[dep]
self.in_flight_tasks[dep] = worker
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_dep_flight_waiting(self, dep, worker=None, remove=True):
try:
if self.validate:
assert dep in self.in_flight_tasks
del self.in_flight_tasks[dep]
if remove:
try:
self.who_has[dep].remove(worker)
except KeyError:
pass
try:
self.has_what[worker].remove(dep)
except KeyError:
pass
if not self.who_has.get(dep):
if dep not in self._missing_dep_flight:
self._missing_dep_flight.add(dep)
self.loop.add_callback(self.handle_missing_dep, dep)
for key in self.dependents.get(dep, ()):
if self.task_state[key] == "waiting":
if remove: # try a new worker immediately
self.data_needed.appendleft(key)
else: # worker was probably busy, wait a while
self.data_needed.append(key)
if not self.dependents[dep]:
self.release_dep(dep)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_dep_flight_memory(self, dep, value=None):
try:
if self.validate:
assert dep in self.in_flight_tasks
del self.in_flight_tasks[dep]
if self.dependents[dep]:
self.dep_state[dep] = "memory"
self.put_key_in_memory(dep, value)
self.batched_stream.send({"op": "add-keys", "keys": [dep]})
else:
self.release_dep(dep)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_dep_waiting_memory(self, dep, value=None):
try:
if self.validate:
assert dep in self.data
assert dep in self.nbytes
assert dep in self.types
assert self.task_state[dep] == "memory"
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
if value is not no_value and dep not in self.data:
self.put_key_in_memory(dep, value, transition=False)
def transition(self, key, finish, **kwargs):
start = self.task_state[key]
if start == finish:
return
func = self._transitions[start, finish]
state = func(key, **kwargs)
self.log.append((key, start, state or finish))
self.task_state[key] = state or finish
if self.validate:
self.validate_key(key)
self._notify_transition(key, start, finish, **kwargs)
def transition_waiting_ready(self, key):
try:
if self.validate:
assert self.task_state[key] == "waiting"
assert key in self.waiting_for_data
assert not self.waiting_for_data[key]
assert all(
dep in self.data or dep in self.actors
for dep in self.dependencies[key]
)
assert key not in self.executing
assert key not in self.ready
self.waiting_for_data.pop(key, None)
if key in self.resource_restrictions:
self.constrained.append(key)
return "constrained"
else:
heapq.heappush(self.ready, (self.priorities[key], key))
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_waiting_done(self, key, value=None):
try:
if self.validate:
assert self.task_state[key] == "waiting"
assert key in self.waiting_for_data
assert key not in self.executing
assert key not in self.ready
del self.waiting_for_data[key]
self.send_task_state_to_scheduler(key)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_ready_executing(self, key):
try:
if self.validate:
assert key not in self.waiting_for_data
# assert key not in self.data
assert self.task_state[key] in READY
assert key not in self.ready
assert all(
dep in self.data or dep in self.actors
for dep in self.dependencies[key]
)
self.executing.add(key)
self.loop.add_callback(self.execute, key)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_ready_memory(self, key, value=None):
self.send_task_state_to_scheduler(key)
def transition_constrained_executing(self, key):
self.transition_ready_executing(key)
for resource, quantity in self.resource_restrictions[key].items():
self.available_resources[resource] -= quantity
if self.validate:
assert all(v >= 0 for v in self.available_resources.values())
def transition_executing_done(self, key, value=no_value, report=True):
try:
if self.validate:
assert key in self.executing or key in self.long_running
assert key not in self.waiting_for_data
assert key not in self.ready
out = None
if key in self.resource_restrictions:
for resource, quantity in self.resource_restrictions[key].items():
self.available_resources[resource] += quantity
if self.task_state[key] == "executing":
self.executing.remove(key)
self.executed_count += 1
elif self.task_state[key] == "long-running":
self.long_running.remove(key)
if value is not no_value:
try:
self.task_state[key] = "memory"
self.put_key_in_memory(key, value, transition=False)
except Exception as e:
logger.info("Failed to put key in memory", exc_info=True)
msg = error_message(e)
self.exceptions[key] = msg["exception"]
self.tracebacks[key] = msg["traceback"]
self.task_state[key] = "error"
out = "error"
if key in self.dep_state:
self.transition_dep(key, "memory")
if report and self.batched_stream and self.status == "running":
self.send_task_state_to_scheduler(key)
else:
raise CommClosedError
return out
except EnvironmentError:
logger.info("Comm closed")
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def transition_executing_long_running(self, key, compute_duration=None):
try:
if self.validate:
assert key in self.executing
self.executing.remove(key)
self.long_running.add(key)
self.batched_stream.send(
{"op": "long-running", "key": key, "compute_duration": compute_duration}
)
self.ensure_computing()
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def maybe_transition_long_running(self, key, compute_duration=None):
if self.task_state.get(key) == "executing":
self.transition(key, "long-running", compute_duration=compute_duration)
def stateof(self, key):
return {
"executing": key in self.executing,
"waiting_for_data": key in self.waiting_for_data,
"heap": key in pluck(1, self.ready),
"data": key in self.data,
}
def story(self, *keys):
return [
msg
for msg in self.log
if any(key in msg for key in keys)
or any(
key in c
for key in keys
for c in msg
if isinstance(c, (tuple, list, set))
)
]
def ensure_communicating(self):
changed = True
try:
while (
changed
and self.data_needed
and len(self.in_flight_workers) < self.total_out_connections
):
changed = False
logger.debug(
"Ensure communicating. Pending: %d. Connections: %d/%d",
len(self.data_needed),
len(self.in_flight_workers),
self.total_out_connections,
)
key = self.data_needed[0]
if key not in self.tasks:
self.data_needed.popleft()
changed = True
continue
if self.task_state.get(key) != "waiting":
self.log.append((key, "communication pass"))
self.data_needed.popleft()
changed = True
continue
deps = self.dependencies[key]
if self.validate:
assert all(dep in self.dep_state for dep in deps)
deps = [dep for dep in deps if self.dep_state[dep] == "waiting"]
missing_deps = {dep for dep in deps if not self.who_has.get(dep)}
if missing_deps:
logger.info("Can't find dependencies for key %s", key)
missing_deps2 = {
dep
for dep in missing_deps
if dep not in self._missing_dep_flight
}
for dep in missing_deps2:
self._missing_dep_flight.add(dep)
self.loop.add_callback(self.handle_missing_dep, *missing_deps2)
deps = [dep for dep in deps if dep not in missing_deps]
self.log.append(("gather-dependencies", key, deps))
in_flight = False
while deps and (
len(self.in_flight_workers) < self.total_out_connections
or self.comm_nbytes < self.total_comm_nbytes
):
dep = deps.pop()
if self.dep_state[dep] != "waiting":
continue
if dep not in self.who_has:
continue
workers = [
w for w in self.who_has[dep] if w not in self.in_flight_workers
]
if not workers:
in_flight = True
continue
host = get_address_host(self.address)
local = [w for w in workers if get_address_host(w) == host]
if local:
worker = random.choice(local)
else:
worker = random.choice(list(workers))
to_gather, total_nbytes = self.select_keys_for_gather(worker, dep)
self.comm_nbytes += total_nbytes
self.in_flight_workers[worker] = to_gather
for d in to_gather:
self.transition_dep(d, "flight", worker=worker)
self.loop.add_callback(
self.gather_dep, worker, dep, to_gather, total_nbytes, cause=key
)
changed = True
if not deps and not in_flight:
self.data_needed.popleft()
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def send_task_state_to_scheduler(self, key):
if key in self.data or self.actors.get(key):
nbytes = self.nbytes.get(key)
typ = self.types.get(key)
if nbytes is None or typ is None:
try:
value = self.data[key]
except KeyError:
value = self.actors[key]
nbytes = self.nbytes[key] = sizeof(value)
typ = self.types[key] = type(value)
del value
try:
typ_serialized = dumps_function(typ)
except PicklingError:
# Some types fail pickling (example: _thread.lock objects),
# send their name as a best effort.
typ_serialized = pickle.dumps(typ.__name__)
d = {
"op": "task-finished",
"status": "OK",
"key": key,
"nbytes": nbytes,
"thread": self.threads.get(key),
"type": typ_serialized,
"typename": typename(typ),
}
elif key in self.exceptions:
d = {
"op": "task-erred",
"status": "error",
"key": key,
"thread": self.threads.get(key),
"exception": self.exceptions[key],
"traceback": self.tracebacks[key],
}
else:
logger.error(
"Key not ready to send to worker, %s: %s", key, self.task_state[key]
)
return
if key in self.startstops:
d["startstops"] = self.startstops[key]
self.batched_stream.send(d)
def put_key_in_memory(self, key, value, transition=True):
if key in self.data:
return
if key in self.actors:
self.actors[key] = value
else:
start = time()
self.data[key] = value
stop = time()
if stop - start > 0.020:
self.startstops[key].append(
{"action": "disk-write", "start": start, "stop": stop}
)
if key not in self.nbytes:
self.nbytes[key] = sizeof(value)
self.types[key] = type(value)
for dep in self.dependents.get(key, ()):
if dep in self.waiting_for_data:
if key in self.waiting_for_data[dep]:
self.waiting_for_data[dep].remove(key)
if not self.waiting_for_data[dep]:
self.transition(dep, "ready")
if transition and key in self.task_state:
self.transition(key, "memory")
self.log.append((key, "put-in-memory"))
def select_keys_for_gather(self, worker, dep):
deps = {dep}
total_bytes = self.nbytes[dep]
L = self.pending_data_per_worker[worker]
while L:
d = L.popleft()
if self.dep_state.get(d) != "waiting":
continue
if total_bytes + self.nbytes[d] > self.target_message_size:
break
deps.add(d)
total_bytes += self.nbytes[d]
return deps, total_bytes
async def gather_dep(self, worker, dep, deps, total_nbytes, cause=None):
if self.status != "running":
return
with log_errors():
response = {}
try:
if self.validate:
self.validate_state()
# dep states may have changed before gather_dep runs
# if a dep is no longer in-flight then don't fetch it
deps = tuple(dep for dep in deps if self.dep_state.get(dep) == "flight")
self.log.append(("request-dep", dep, worker, deps))
logger.debug("Request %d keys", len(deps))
start = time()
response = await get_data_from_worker(
self.rpc, deps, worker, who=self.address
)
stop = time()
if response["status"] == "busy":
self.log.append(("busy-gather", worker, deps))
for dep in deps:
if self.dep_state.get(dep, None) == "flight":
self.transition_dep(dep, "waiting")
return
if cause:
self.startstops[cause].append(
{
"action": "transfer",
"start": start + self.scheduler_delay,
"stop": stop + self.scheduler_delay,
"source": worker,
}
)
total_bytes = sum(self.nbytes.get(dep, 0) for dep in response["data"])
duration = (stop - start) or 0.010
bandwidth = total_bytes / duration
self.incoming_transfer_log.append(
{
"start": start + self.scheduler_delay,
"stop": stop + self.scheduler_delay,
"middle": (start + stop) / 2.0 + self.scheduler_delay,
"duration": duration,
"keys": {
dep: self.nbytes.get(dep, None) for dep in response["data"]
},
"total": total_bytes,
"bandwidth": bandwidth,
"who": worker,
}
)
if total_bytes > 1000000:
self.bandwidth = self.bandwidth * 0.95 + bandwidth * 0.05
bw, cnt = self.bandwidth_workers[worker]
self.bandwidth_workers[worker] = (bw + bandwidth, cnt + 1)
types = set(map(type, response["data"].values()))
if len(types) == 1:
[typ] = types
bw, cnt = self.bandwidth_types[typ]
self.bandwidth_types[typ] = (bw + bandwidth, cnt + 1)
if self.digests is not None:
self.digests["transfer-bandwidth"].add(total_bytes / duration)
self.digests["transfer-duration"].add(duration)
self.counters["transfer-count"].add(len(response["data"]))
self.incoming_count += 1
self.log.append(("receive-dep", worker, list(response["data"])))
except EnvironmentError as e:
logger.exception("Worker stream died during communication: %s", worker)
self.log.append(("receive-dep-failed", worker))
for d in self.has_what.pop(worker):
self.who_has[d].remove(worker)
if not self.who_has[d]:
del self.who_has[d]
except Exception as e:
logger.exception(e)
if self.batched_stream and LOG_PDB:
import pdb
pdb.set_trace()
raise
finally:
self.comm_nbytes -= total_nbytes
busy = response.get("status", "") == "busy"
data = response.get("data", {})
for d in self.in_flight_workers.pop(worker):
if not busy and d in data:
self.transition_dep(d, "memory", value=data[d])
elif self.dep_state.get(d) != "memory":
self.transition_dep(
d, "waiting", worker=worker, remove=not busy
)
if not busy and d not in data and d in self.dependents:
self.log.append(("missing-dep", d))
self.batched_stream.send(
{"op": "missing-data", "errant_worker": worker, "key": d}
)
if self.validate:
self.validate_state()
self.ensure_computing()
if not busy:
self.repetitively_busy = 0
self.ensure_communicating()
else:
# Exponential backoff to avoid hammering scheduler/worker
self.repetitively_busy += 1
await asyncio.sleep(0.100 * 1.5 ** self.repetitively_busy)
# See if anyone new has the data
await self.query_who_has(dep)
self.ensure_communicating()
def bad_dep(self, dep):
exc = ValueError("Could not find dependent %s. Check worker logs" % str(dep))
for key in self.dependents[dep]:
msg = error_message(exc)
self.exceptions[key] = msg["exception"]
self.tracebacks[key] = msg["traceback"]
self.transition(key, "error")
self.release_dep(dep)
async def handle_missing_dep(self, *deps, **kwargs):
original_deps = list(deps)
self.log.append(("handle-missing", deps))
try:
deps = {dep for dep in deps if dep in self.dependents}
if not deps:
return
for dep in list(deps):
suspicious = self.suspicious_deps[dep]
if suspicious > 5:
deps.remove(dep)
self.bad_dep(dep)
if not deps:
return
for dep in deps:
logger.info(
"Dependent not found: %s %s . Asking scheduler",
dep,
self.suspicious_deps[dep],
)
who_has = await retry_operation(self.scheduler.who_has, keys=list(deps))
who_has = {k: v for k, v in who_has.items() if v}
self.update_who_has(who_has)
for dep in deps:
self.suspicious_deps[dep] += 1
if not who_has.get(dep):
self.log.append((dep, "no workers found", self.dependents.get(dep)))
self.release_dep(dep)
else:
self.log.append((dep, "new workers found"))
for key in self.dependents.get(dep, ()):
if key in self.waiting_for_data:
self.data_needed.append(key)
except Exception:
logger.error("Handle missing dep failed, retrying", exc_info=True)
retries = kwargs.get("retries", 5)
self.log.append(("handle-missing-failed", retries, deps))
if retries > 0:
await self.handle_missing_dep(self, *deps, retries=retries - 1)
else:
raise
finally:
try:
for dep in original_deps:
self._missing_dep_flight.remove(dep)
except KeyError:
pass
self.ensure_communicating()
async def query_who_has(self, *deps):
with log_errors():
response = await retry_operation(self.scheduler.who_has, keys=deps)
self.update_who_has(response)
return response
def update_who_has(self, who_has):
try:
for dep, workers in who_has.items():
if not workers:
continue
if dep in self.who_has:
self.who_has[dep].update(workers)
else:
self.who_has[dep] = set(workers)
for worker in workers:
self.has_what[worker].add(dep)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def steal_request(self, key):
state = self.task_state.get(key, None)
response = {"op": "steal-response", "key": key, "state": state}
self.batched_stream.send(response)
if state in ("ready", "waiting", "constrained"):
self.release_key(key)
def release_key(self, key, cause=None, reason=None, report=True):
try:
if key not in self.task_state:
return
state = self.task_state.pop(key)
if cause:
self.log.append((key, "release-key", {"cause": cause}))
else:
self.log.append((key, "release-key"))
del self.tasks[key]
if key in self.data and key not in self.dep_state:
try:
del self.data[key]
except FileNotFoundError:
logger.error("Tried to delete %s but no file found", exc_info=True)
del self.nbytes[key]
del self.types[key]
if key in self.actors and key not in self.dep_state:
del self.actors[key]
del self.nbytes[key]
del self.types[key]
if key in self.waiting_for_data:
del self.waiting_for_data[key]
for dep in self.dependencies.pop(key, ()):
if dep in self.dependents:
self.dependents[dep].discard(key)
if not self.dependents[dep] and self.dep_state[dep] in (
"waiting",
"flight",
):
self.release_dep(dep)
if key in self.threads:
del self.threads[key]
del self.priorities[key]
del self.durations[key]
if key in self.exceptions:
del self.exceptions[key]
if key in self.tracebacks:
del self.tracebacks[key]
if key in self.startstops:
del self.startstops[key]
if key in self.executing:
self.executing.remove(key)
if key in self.resource_restrictions:
if state == "executing":
for resource, quantity in self.resource_restrictions[key].items():
self.available_resources[resource] += quantity
del self.resource_restrictions[key]
if report and state in PROCESSING: # not finished
self.batched_stream.send({"op": "release", "key": key, "cause": cause})
except CommClosedError:
pass
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def release_dep(self, dep, report=False):
try:
if dep not in self.dep_state:
return
self.log.append((dep, "release-dep"))
state = self.dep_state.pop(dep)
if dep in self.suspicious_deps:
del self.suspicious_deps[dep]
if dep in self.who_has:
for worker in self.who_has.pop(dep):
self.has_what[worker].remove(dep)
if dep not in self.task_state:
if dep in self.data:
del self.data[dep]
del self.types[dep]
if dep in self.actors:
del self.actors[dep]
del self.types[dep]
del self.nbytes[dep]
if dep in self.in_flight_tasks:
worker = self.in_flight_tasks.pop(dep)
self.in_flight_workers[worker].remove(dep)
for key in self.dependents.pop(dep, ()):
if self.task_state[key] != "memory":
self.release_key(key, cause=dep)
if report and state == "memory":
self.batched_stream.send({"op": "release-worker-data", "keys": [dep]})
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def rescind_key(self, key):
try:
if self.task_state.get(key) not in PENDING:
return
del self.task_state[key]
del self.tasks[key]
if key in self.waiting_for_data:
del self.waiting_for_data[key]
for dep in self.dependencies.pop(key, ()):
self.dependents[dep].remove(key)
if not self.dependents[dep]:
del self.dependents[dep]
if key not in self.dependents:
# if key in self.nbytes:
# del self.nbytes[key]
if key in self.priorities:
del self.priorities[key]
if key in self.durations:
del self.durations[key]
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
################
# Execute Task #
################
@gen.coroutine
def executor_submit(self, key, function, args=(), kwargs=None, executor=None):
""" Safely run function in thread pool executor
We've run into issues running concurrent.future futures within
tornado. Apparently it's advantageous to use timeouts and periodic
callbacks to ensure things run smoothly. This can get tricky, so we
pull it off into an separate method.
"""
executor = executor or self.executor
job_counter[0] += 1
# logger.info("%s:%d Starts job %d, %s", self.ip, self.port, i, key)
kwargs = kwargs or {}
future = executor.submit(function, *args, **kwargs)
pc = PeriodicCallback(
lambda: logger.debug("future state: %s - %s", key, future._state), 1000
)
pc.start()
try:
yield future
finally:
pc.stop()
result = future.result()
# logger.info("Finish job %d, %s", i, key)
raise gen.Return(result)
def run(self, comm, function, args=(), wait=True, kwargs=None):
kwargs = kwargs or {}
return run(self, comm, function=function, args=args, kwargs=kwargs, wait=wait)
def run_coroutine(self, comm, function, args=(), kwargs=None, wait=True):
return run(self, comm, function=function, args=args, kwargs=kwargs, wait=wait)
async def plugin_add(self, comm=None, plugin=None, name=None):
with log_errors(pdb=False):
if isinstance(plugin, bytes):
plugin = pickle.loads(plugin)
if not name:
if hasattr(plugin, "name"):
name = plugin.name
else:
name = funcname(plugin) + "-" + str(uuid.uuid4())
assert name
if name in self.plugins:
return {"status": "repeat"}
else:
self.plugins[name] = plugin
logger.info("Starting Worker plugin %s" % name)
if hasattr(plugin, "setup"):
try:
result = plugin.setup(worker=self)
if isawaitable(result):
result = await result
except Exception as e:
msg = error_message(e)
return msg
return {"status": "OK"}
async def actor_execute(
self, comm=None, actor=None, function=None, args=(), kwargs={}
):
separate_thread = kwargs.pop("separate_thread", True)
key = actor
actor = self.actors[key]
func = getattr(actor, function)
name = key_split(key) + "." + function
if iscoroutinefunction(func):
result = await func(*args, **kwargs)
elif separate_thread:
result = await self.executor_submit(
name,
apply_function_actor,
args=(
func,
args,
kwargs,
self.execution_state,
name,
self.active_threads,
self.active_threads_lock,
),
executor=self.actor_executor,
)
else:
result = func(*args, **kwargs)
return {"status": "OK", "result": to_serialize(result)}
def actor_attribute(self, comm=None, actor=None, attribute=None):
value = getattr(self.actors[actor], attribute)
return {"status": "OK", "result": to_serialize(value)}
def meets_resource_constraints(self, key):
if key not in self.resource_restrictions:
return True
for resource, needed in self.resource_restrictions[key].items():
if self.available_resources[resource] < needed:
return False
return True
def _maybe_deserialize_task(self, key):
if not isinstance(self.tasks[key], SerializedTask):
return self.tasks[key]
try:
start = time()
function, args, kwargs = _deserialize(*self.tasks[key])
stop = time()
if stop - start > 0.010:
self.startstops[key].append(
{"action": "deserialize", "start": start, "stop": stop}
)
return function, args, kwargs
except Exception as e:
logger.warning("Could not deserialize task", exc_info=True)
emsg = error_message(e)
emsg["key"] = key
emsg["op"] = "task-erred"
self.batched_stream.send(emsg)
self.log.append((key, "deserialize-error"))
raise
def ensure_computing(self):
if self.paused:
return
try:
while self.constrained and len(self.executing) < self.nthreads:
key = self.constrained[0]
if self.task_state.get(key) != "constrained":
self.constrained.popleft()
continue
if self.meets_resource_constraints(key):
self.constrained.popleft()
try:
# Ensure task is deserialized prior to execution
self.tasks[key] = self._maybe_deserialize_task(key)
except Exception:
continue
self.transition(key, "executing")
else:
break
while self.ready and len(self.executing) < self.nthreads:
_, key = heapq.heappop(self.ready)
if self.task_state.get(key) in READY:
try:
# Ensure task is deserialized prior to execution
self.tasks[key] = self._maybe_deserialize_task(key)
except Exception:
continue
self.transition(key, "executing")
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
async def execute(self, key, report=False):
executor_error = None
if self.status in ("closing", "closed", "closing-gracefully"):
return
try:
if key not in self.executing or key not in self.task_state:
return
if self.validate:
assert key not in self.waiting_for_data
assert self.task_state[key] == "executing"
function, args, kwargs = self.tasks[key]
start = time()
data = {}
for k in self.dependencies[key]:
try:
data[k] = self.data[k]
except KeyError:
from .actor import Actor # TODO: create local actor
data[k] = Actor(type(self.actors[k]), self.address, k, self)
args2 = pack_data(args, data, key_types=(bytes, str))
kwargs2 = pack_data(kwargs, data, key_types=(bytes, str))
stop = time()
if stop - start > 0.005:
self.startstops[key].append(
{"action": "disk-read", "start": start, "stop": stop}
)
if self.digests is not None:
self.digests["disk-load-duration"].add(stop - start)
logger.debug(
"Execute key: %s worker: %s", key, self.address
) # TODO: comment out?
try:
result = await self.executor_submit(
key,
apply_function,
args=(
function,
args2,
kwargs2,
self.execution_state,
key,
self.active_threads,
self.active_threads_lock,
self.scheduler_delay,
),
)
except RuntimeError as e:
executor_error = e
raise
if self.task_state.get(key) not in ("executing", "long-running"):
return
result["key"] = key
value = result.pop("result", None)
self.startstops[key].append(
{"action": "compute", "start": result["start"], "stop": result["stop"]}
)
self.threads[key] = result["thread"]
if result["op"] == "task-finished":
self.nbytes[key] = result["nbytes"]
self.types[key] = result["type"]
self.transition(key, "memory", value=value)
if self.digests is not None:
self.digests["task-duration"].add(result["stop"] - result["start"])
else:
if isinstance(result.pop("actual-exception"), Reschedule):
self.batched_stream.send({"op": "reschedule", "key": key})
self.transition(key, "rescheduled", report=False)
self.release_key(key, report=False)
else:
self.exceptions[key] = result["exception"]
self.tracebacks[key] = result["traceback"]
logger.warning(
" Compute Failed\n"
"Function: %s\n"
"args: %s\n"
"kwargs: %s\n"
"Exception: %s\n",
str(funcname(function))[:1000],
convert_args_to_str(args2, max_len=1000),
convert_kwargs_to_str(kwargs2, max_len=1000),
repr(result["exception"].data),
)
self.transition(key, "error")
logger.debug("Send compute response to scheduler: %s, %s", key, result)
if self.validate:
assert key not in self.executing
assert key not in self.waiting_for_data
self.ensure_computing()
self.ensure_communicating()
except Exception as e:
if executor_error is e:
logger.error("Thread Pool Executor error: %s", e)
else:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
finally:
if key in self.executing:
self.executing.remove(key)
##################
# Administrative #
##################
async def memory_monitor(self):
""" Track this process's memory usage and act accordingly
If we rise above 70% memory use, start dumping data to disk.
If we rise above 80% memory use, stop execution of new tasks
"""
if self._memory_monitoring:
return
self._memory_monitoring = True
total = 0
proc = self.monitor.proc
memory = proc.memory_info().rss
frac = memory / self.memory_limit
def check_pause(memory):
frac = memory / self.memory_limit
# Pause worker threads if above 80% memory use
if self.memory_pause_fraction and frac > self.memory_pause_fraction:
# Try to free some memory while in paused state
self._throttled_gc.collect()
if not self.paused:
logger.warning(
"Worker is at %d%% memory usage. Pausing worker. "
"Process memory: %s -- Worker memory limit: %s",
int(frac * 100),
format_bytes(memory),
format_bytes(self.memory_limit)
if self.memory_limit is not None
else "None",
)
self.paused = True
elif self.paused:
logger.warning(
"Worker is at %d%% memory usage. Resuming worker. "
"Process memory: %s -- Worker memory limit: %s",
int(frac * 100),
format_bytes(memory),
format_bytes(self.memory_limit)
if self.memory_limit is not None
else "None",
)
self.paused = False
self.ensure_computing()
check_pause(memory)
# Dump data to disk if above 70%
if self.memory_spill_fraction and frac > self.memory_spill_fraction:
logger.debug(
"Worker is at %d%% memory usage. Start spilling data to disk.",
int(frac * 100),
)
start = time()
target = self.memory_limit * self.memory_target_fraction
count = 0
need = memory - target
while memory > target:
if not self.data.fast:
logger.warning(
"Memory use is high but worker has no data "
"to store to disk. Perhaps some other process "
"is leaking memory? Process memory: %s -- "
"Worker memory limit: %s",
format_bytes(memory),
format_bytes(self.memory_limit)
if self.memory_limit is not None
else "None",
)
break
k, v, weight = self.data.fast.evict()
del k, v
total += weight
count += 1
# If the current buffer is filled with a lot of small values,
# evicting one at a time is very slow and the worker might
# generate new data faster than it is able to evict. Therefore,
# only pass on control if we spent at least 0.5s evicting
if time() - start > 0.5:
await asyncio.sleep(0)
start = time()
memory = proc.memory_info().rss
if total > need and memory > target:
# Issue a GC to ensure that the evicted data is actually
# freed from memory and taken into account by the monitor
# before trying to evict even more data.
self._throttled_gc.collect()
memory = proc.memory_info().rss
check_pause(memory)
if count:
logger.debug(
"Moved %d pieces of data data and %s to disk",
count,
format_bytes(total),
)
self._memory_monitoring = False
return total
def cycle_profile(self):
now = time() + self.scheduler_delay
prof, self.profile_recent = self.profile_recent, profile.create()
self.profile_history.append((now, prof))
self.profile_keys_history.append((now, dict(self.profile_keys)))
self.profile_keys.clear()
def trigger_profile(self):
"""
Get a frame from all actively computing threads
Merge these frames into existing profile counts
"""
if not self.active_threads: # hope that this is thread-atomic?
return
start = time()
with self.active_threads_lock:
active_threads = self.active_threads.copy()
frames = sys._current_frames()
frames = {ident: frames[ident] for ident in active_threads}
llframes = {}
if self.low_level_profiler:
llframes = {ident: profile.ll_get_stack(ident) for ident in active_threads}
for ident, frame in frames.items():
if frame is not None:
key = key_split(active_threads[ident])
llframe = llframes.get(ident)
state = profile.process(
frame, True, self.profile_recent, stop="distributed/worker.py"
)
profile.llprocess(llframe, None, state)
profile.process(
frame, True, self.profile_keys[key], stop="distributed/worker.py"
)
stop = time()
if self.digests is not None:
self.digests["profile-duration"].add(stop - start)
async def get_profile(
self, comm=None, start=None, stop=None, key=None, server=False
):
now = time() + self.scheduler_delay
if server:
history = self.io_loop.profile
elif key is None:
history = self.profile_history
else:
history = [(t, d[key]) for t, d in self.profile_keys_history if key in d]
if start is None:
istart = 0
else:
istart = bisect.bisect_left(history, (start,))
if stop is None:
istop = None
else:
istop = bisect.bisect_right(history, (stop,)) + 1
if istop >= len(history):
istop = None # include end
if istart == 0 and istop is None:
history = list(history)
else:
iistop = len(history) if istop is None else istop
history = [history[i] for i in range(istart, iistop)]
prof = profile.merge(*pluck(1, history))
if not history:
return profile.create()
if istop is None and (start is None or start < now):
if key is None:
recent = self.profile_recent
else:
recent = self.profile_keys[key]
prof = profile.merge(prof, recent)
return prof
async def get_profile_metadata(self, comm=None, start=0, stop=None):
if stop is None:
add_recent = True
now = time() + self.scheduler_delay
stop = stop or now
start = start or 0
result = {
"counts": [
(t, d["count"]) for t, d in self.profile_history if start < t < stop
],
"keys": [
(t, {k: d["count"] for k, d in v.items()})
for t, v in self.profile_keys_history
if start < t < stop
],
}
if add_recent:
result["counts"].append((now, self.profile_recent["count"]))
result["keys"].append(
(now, {k: v["count"] for k, v in self.profile_keys.items()})
)
return result
def get_call_stack(self, comm=None, keys=None):
with self.active_threads_lock:
frames = sys._current_frames()
active_threads = self.active_threads.copy()
frames = {k: frames[ident] for ident, k in active_threads.items()}
if keys is not None:
frames = {k: frame for k, frame in frames.items() if k in keys}
result = {k: profile.call_stack(frame) for k, frame in frames.items()}
return result
def _notify_transition(self, key, start, finish, **kwargs):
for name, plugin in self.plugins.items():
if hasattr(plugin, "transition"):
try:
plugin.transition(key, start, finish, **kwargs)
except Exception:
logger.info(
"Plugin '%s' failed with exception" % name, exc_info=True
)
##############
# Validation #
##############
def validate_key_memory(self, key):
assert key in self.data or key in self.actors
assert key in self.nbytes
assert key not in self.waiting_for_data
assert key not in self.executing
assert key not in self.ready
if key in self.dep_state:
assert self.dep_state[key] == "memory"
def validate_key_executing(self, key):
assert key in self.executing
assert key not in self.data
assert key not in self.waiting_for_data
assert all(
dep in self.data or dep in self.actors for dep in self.dependencies[key]
)
def validate_key_ready(self, key):
assert key in pluck(1, self.ready)
assert key not in self.data
assert key not in self.executing
assert key not in self.waiting_for_data
assert all(
dep in self.data or dep in self.actors for dep in self.dependencies[key]
)
def validate_key_waiting(self, key):
assert key not in self.data
assert not all(dep in self.data for dep in self.dependencies[key])
def validate_key(self, key):
try:
state = self.task_state[key]
if state == "memory":
self.validate_key_memory(key)
elif state == "waiting":
self.validate_key_waiting(key)
elif state == "ready":
self.validate_key_ready(key)
elif state == "executing":
self.validate_key_executing(key)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def validate_dep_waiting(self, dep):
assert dep not in self.data
assert dep in self.nbytes
assert self.dependents[dep]
assert not any(key in self.ready for key in self.dependents[dep])
def validate_dep_flight(self, dep):
assert dep not in self.data
assert dep in self.nbytes
assert not any(key in self.ready for key in self.dependents[dep])
peer = self.in_flight_tasks[dep]
assert dep in self.in_flight_workers[peer]
def validate_dep_memory(self, dep):
assert dep in self.data or dep in self.actors
assert dep in self.nbytes
assert dep in self.types
if dep in self.task_state:
assert self.task_state[dep] == "memory"
def validate_dep(self, dep):
try:
state = self.dep_state[dep]
if state == "waiting":
self.validate_dep_waiting(dep)
elif state == "flight":
self.validate_dep_flight(dep)
elif state == "memory":
self.validate_dep_memory(dep)
else:
raise ValueError("Unknown dependent state", state)
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
def validate_state(self):
if self.status != "running":
return
try:
for key, workers in self.who_has.items():
for w in workers:
assert key in self.has_what[w]
for worker, keys in self.has_what.items():
for k in keys:
assert worker in self.who_has[k]
for key in self.task_state:
self.validate_key(key)
for dep in self.dep_state:
self.validate_dep(dep)
for key, deps in self.waiting_for_data.items():
if key not in self.data_needed:
for dep in deps:
assert (
dep in self.in_flight_tasks
or dep in self._missing_dep_flight
or self.who_has[dep].issubset(self.in_flight_workers)
)
for key in self.tasks:
if self.task_state[key] == "memory":
assert isinstance(self.nbytes[key], int)
assert key not in self.waiting_for_data
assert key in self.data or key in self.actors
except Exception as e:
logger.exception(e)
if LOG_PDB:
import pdb
pdb.set_trace()
raise
#######################################
# Worker Clients (advanced workloads) #
#######################################
@property
def client(self):
with self._lock:
if self._client:
return self._client
else:
return self._get_client()
def _get_client(self, timeout=3):
""" Get local client attached to this worker
If no such client exists, create one
See Also
--------
get_client
"""
try:
from .client import default_client
client = default_client()
except ValueError: # no clients found, need to make a new one
pass
else:
if (
client.scheduler
and client.scheduler.address == self.scheduler.address
or client._start_arg == self.scheduler.address
):
self._client = client
if not self._client:
from .client import Client
asynchronous = self.loop is IOLoop.current()
self._client = Client(
self.scheduler,
loop=self.loop,
security=self.security,
set_as_default=True,
asynchronous=asynchronous,
direct_to_workers=True,
name="worker",
timeout=timeout,
)
if not asynchronous:
assert self._client.status == "running"
return self._client
def get_current_task(self):
""" Get the key of the task we are currently running
This only makes sense to run within a task
Examples
--------
>>> from dask.distributed import get_worker
>>> def f():
... return get_worker().get_current_task()
>>> future = client.submit(f) # doctest: +SKIP
>>> future.result() # doctest: +SKIP
'f-1234'
See Also
--------
get_worker
"""
return self.active_threads[threading.get_ident()]
def get_worker():
""" Get the worker currently running this task
Examples
--------
>>> def f():
... worker = get_worker() # The worker on which this task is running
... return worker.address
>>> future = client.submit(f) # doctest: +SKIP
>>> future.result() # doctest: +SKIP
'tcp://127.0.0.1:47373'
See Also
--------
get_client
worker_client
"""
try:
return thread_state.execution_state["worker"]
except AttributeError:
try:
return first(w for w in Worker._instances if w.status == "running")
except StopIteration:
raise ValueError("No workers found")
def get_client(address=None, timeout=3, resolve_address=True):
"""Get a client while within a task.
This client connects to the same scheduler to which the worker is connected
Parameters
----------
address : str, optional
The address of the scheduler to connect to. Defaults to the scheduler
the worker is connected to.
timeout : int, default 3
Timeout (in seconds) for getting the Client
resolve_address : bool, default True
Whether to resolve `address` to its canonical form.
Returns
-------
Client
Examples
--------
>>> def f():
... client = get_client()
... futures = client.map(lambda x: x + 1, range(10)) # spawn many tasks
... results = client.gather(futures)
... return sum(results)
>>> future = client.submit(f) # doctest: +SKIP
>>> future.result() # doctest: +SKIP
55
See Also
--------
get_worker
worker_client
secede
"""
if address and resolve_address:
address = comm.resolve_address(address)
try:
worker = get_worker()
except ValueError: # could not find worker
pass
else:
if not address or worker.scheduler.address == address:
return worker._get_client(timeout=timeout)
from .client import _get_global_client
client = _get_global_client() # TODO: assumes the same scheduler
if client and (not address or client.scheduler.address == address):
return client
elif address:
from .client import Client
return Client(address, timeout=timeout)
else:
raise ValueError("No global client found and no address provided")
def secede():
"""
Have this task secede from the worker's thread pool
This opens up a new scheduling slot and a new thread for a new task. This
enables the client to schedule tasks on this node, which is
especially useful while waiting for other jobs to finish (e.g., with
``client.gather``).
Examples
--------
>>> def mytask(x):
... # do some work
... client = get_client()
... futures = client.map(...) # do some remote work
... secede() # while that work happens, remove ourself from the pool
... return client.gather(futures) # return gathered results
See Also
--------
get_client
get_worker
"""
worker = get_worker()
tpe_secede() # have this thread secede from the thread pool
duration = time() - thread_state.start_time
worker.loop.add_callback(
worker.maybe_transition_long_running,
thread_state.key,
compute_duration=duration,
)
class Reschedule(Exception):
""" Reschedule this task
Raising this exception will stop the current execution of the task and ask
the scheduler to reschedule this task, possibly on a different machine.
This does not guarantee that the task will move onto a different machine.
The scheduler will proceed through its normal heuristics to determine the
optimal machine to accept this task. The machine will likely change if the
load across the cluster has significantly changed since first scheduling
the task.
"""
pass
def parse_memory_limit(memory_limit, nthreads, total_cores=CPU_COUNT):
if memory_limit is None:
return None
if memory_limit == "auto":
memory_limit = int(system.MEMORY_LIMIT * min(1, nthreads / total_cores))
with ignoring(ValueError, TypeError):
memory_limit = float(memory_limit)
if isinstance(memory_limit, float) and memory_limit <= 1:
memory_limit = int(memory_limit * system.MEMORY_LIMIT)
if isinstance(memory_limit, str):
memory_limit = parse_bytes(memory_limit)
else:
memory_limit = int(memory_limit)
return min(memory_limit, system.MEMORY_LIMIT)
async def get_data_from_worker(
rpc,
keys,
worker,
who=None,
max_connections=None,
serializers=None,
deserializers=None,
):
""" Get keys from worker
The worker has a two step handshake to acknowledge when data has been fully
delivered. This function implements that handshake.
See Also
--------
Worker.get_data
Worker.gather_deps
utils_comm.gather_data_from_workers
"""
if serializers is None:
serializers = rpc.serializers
if deserializers is None:
deserializers = rpc.deserializers
async def _get_data():
comm = await rpc.connect(worker)
comm.name = "Ephemeral Worker->Worker for gather"
try:
response = await send_recv(
comm,
serializers=serializers,
deserializers=deserializers,
op="get_data",
keys=keys,
who=who,
max_connections=max_connections,
)
try:
status = response["status"]
except KeyError:
raise ValueError("Unexpected response", response)
else:
if status == "OK":
await comm.write("OK")
return response
finally:
rpc.reuse(worker, comm)
return await retry_operation(_get_data, operation="get_data_from_worker")
job_counter = [0]
cache_loads = LRU(maxsize=100)
def loads_function(bytes_object):
""" Load a function from bytes, cache bytes """
if len(bytes_object) < 100000:
try:
result = cache_loads[bytes_object]
except KeyError:
result = pickle.loads(bytes_object)
cache_loads[bytes_object] = result
return result
return pickle.loads(bytes_object)
def _deserialize(function=None, args=None, kwargs=None, task=no_value):
""" Deserialize task inputs and regularize to func, args, kwargs """
if function is not None:
function = loads_function(function)
if args:
args = pickle.loads(args)
if kwargs:
kwargs = pickle.loads(kwargs)
if task is not no_value:
assert not function and not args and not kwargs
function = execute_task
args = (task,)
return function, args or (), kwargs or {}
def execute_task(task):
""" Evaluate a nested task
>>> inc = lambda x: x + 1
>>> execute_task((inc, 1))
2
>>> execute_task((sum, [1, 2, (inc, 3)]))
7
"""
if istask(task):
func, args = task[0], task[1:]
return func(*map(execute_task, args))
elif isinstance(task, list):
return list(map(execute_task, task))
else:
return task
cache_dumps = LRU(maxsize=100)
_cache_lock = threading.Lock()
def dumps_function(func):
""" Dump a function to bytes, cache functions """
try:
with _cache_lock:
result = cache_dumps[func]
except KeyError:
result = pickle.dumps(func)
if len(result) < 100000:
with _cache_lock:
cache_dumps[func] = result
except TypeError: # Unhashable function
result = pickle.dumps(func)
return result
def dumps_task(task):
""" Serialize a dask task
Returns a dict of bytestrings that can each be loaded with ``loads``
Examples
--------
Either returns a task as a function, args, kwargs dict
>>> from operator import add
>>> dumps_task((add, 1)) # doctest: +SKIP
{'function': b'\x80\x04\x95\x00\x8c\t_operator\x94\x8c\x03add\x94\x93\x94.'
'args': b'\x80\x04\x95\x07\x00\x00\x00K\x01K\x02\x86\x94.'}
Or as a single task blob if it can't easily decompose the result. This
happens either if the task is highly nested, or if it isn't a task at all
>>> dumps_task(1) # doctest: +SKIP
{'task': b'\x80\x04\x95\x03\x00\x00\x00\x00\x00\x00\x00K\x01.'}
"""
if istask(task):
if task[0] is apply and not any(map(_maybe_complex, task[2:])):
d = {"function": dumps_function(task[1]), "args": warn_dumps(task[2])}
if len(task) == 4:
d["kwargs"] = warn_dumps(task[3])
return d
elif not any(map(_maybe_complex, task[1:])):
return {"function": dumps_function(task[0]), "args": warn_dumps(task[1:])}
return to_serialize(task)
_warn_dumps_warned = [False]
def warn_dumps(obj, dumps=pickle.dumps, limit=1e6):
""" Dump an object to bytes, warn if those bytes are large """
b = dumps(obj)
if not _warn_dumps_warned[0] and len(b) > limit:
_warn_dumps_warned[0] = True
s = str(obj)
if len(s) > 70:
s = s[:50] + " ... " + s[-15:]
warnings.warn(
"Large object of size %s detected in task graph: \n"
" %s\n"
"Consider scattering large objects ahead of time\n"
"with client.scatter to reduce scheduler burden and \n"
"keep data on workers\n\n"
" future = client.submit(func, big_data) # bad\n\n"
" big_future = client.scatter(big_data) # good\n"
" future = client.submit(func, big_future) # good"
% (format_bytes(len(b)), s)
)
return b
def apply_function(
function,
args,
kwargs,
execution_state,
key,
active_threads,
active_threads_lock,
time_delay,
):
""" Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
ident = threading.get_ident()
with active_threads_lock:
active_threads[ident] = key
thread_state.start_time = time()
thread_state.execution_state = execution_state
thread_state.key = key
start = time()
try:
result = function(*args, **kwargs)
except Exception as e:
msg = error_message(e)
msg["op"] = "task-erred"
msg["actual-exception"] = e
else:
msg = {
"op": "task-finished",
"status": "OK",
"result": result,
"nbytes": sizeof(result),
"type": type(result) if result is not None else None,
}
finally:
end = time()
msg["start"] = start + time_delay
msg["stop"] = end + time_delay
msg["thread"] = ident
with active_threads_lock:
del active_threads[ident]
return msg
def apply_function_actor(
function, args, kwargs, execution_state, key, active_threads, active_threads_lock
):
""" Run a function, collect information
Returns
-------
msg: dictionary with status, result/error, timings, etc..
"""
ident = threading.get_ident()
with active_threads_lock:
active_threads[ident] = key
thread_state.execution_state = execution_state
thread_state.key = key
result = function(*args, **kwargs)
with active_threads_lock:
del active_threads[ident]
return result
def get_msg_safe_str(msg):
""" Make a worker msg, which contains args and kwargs, safe to cast to str:
allowing for some arguments to raise exceptions during conversion and
ignoring them.
"""
class Repr:
def __init__(self, f, val):
self._f = f
self._val = val
def __repr__(self):
return self._f(self._val)
msg = msg.copy()
if "args" in msg:
msg["args"] = Repr(convert_args_to_str, msg["args"])
if "kwargs" in msg:
msg["kwargs"] = Repr(convert_kwargs_to_str, msg["kwargs"])
return msg
def convert_args_to_str(args, max_len=None):
""" Convert args to a string, allowing for some arguments to raise
exceptions during conversion and ignoring them.
"""
length = 0
strs = ["" for i in range(len(args))]
for i, arg in enumerate(args):
try:
sarg = repr(arg)
except Exception:
sarg = "< could not convert arg to str >"
strs[i] = sarg
length += len(sarg) + 2
if max_len is not None and length > max_len:
return "({}".format(", ".join(strs[: i + 1]))[:max_len]
else:
return "({})".format(", ".join(strs))
def convert_kwargs_to_str(kwargs, max_len=None):
""" Convert kwargs to a string, allowing for some arguments to raise
exceptions during conversion and ignoring them.
"""
length = 0
strs = ["" for i in range(len(kwargs))]
for i, (argname, arg) in enumerate(kwargs.items()):
try:
sarg = repr(arg)
except Exception:
sarg = "< could not convert arg to str >"
skwarg = repr(argname) + ": " + sarg
strs[i] = skwarg
length += len(skwarg) + 2
if max_len is not None and length > max_len:
return "{{{}".format(", ".join(strs[: i + 1]))[:max_len]
else:
return "{{{}}}".format(", ".join(strs))
def weight(k, v):
return sizeof(v)
async def run(server, comm, function, args=(), kwargs={}, is_coro=None, wait=True):
function = pickle.loads(function)
if is_coro is None:
is_coro = iscoroutinefunction(function)
else:
warnings.warn(
"The is_coro= parameter is deprecated. "
"We now automatically detect coroutines/async functions"
)
assert wait or is_coro, "Combination not supported"
if args:
args = pickle.loads(args)
if kwargs:
kwargs = pickle.loads(kwargs)
if has_arg(function, "dask_worker"):
kwargs["dask_worker"] = server
if has_arg(function, "dask_scheduler"):
kwargs["dask_scheduler"] = server
logger.info("Run out-of-band function %r", funcname(function))
try:
if not is_coro:
result = function(*args, **kwargs)
else:
if wait:
result = await function(*args, **kwargs)
else:
server.loop.add_callback(function, *args, **kwargs)
result = None
except Exception as e:
logger.warning(
"Run Failed\nFunction: %s\nargs: %s\nkwargs: %s\n",
str(funcname(function))[:1000],
convert_args_to_str(args, max_len=1000),
convert_kwargs_to_str(kwargs, max_len=1000),
exc_info=True,
)
response = error_message(e)
else:
response = {"status": "OK", "result": to_serialize(result)}
return response
_global_workers = Worker._instances
try:
from .diagnostics import nvml
except Exception:
pass
else:
@gen.coroutine
def gpu_metric(worker):
result = yield offload(nvml.real_time)
return result
DEFAULT_METRICS["gpu"] = gpu_metric
def gpu_startup(worker):
return nvml.one_time()
DEFAULT_STARTUP_INFORMATION["gpu"] = gpu_startup
| 34.918212 | 104 | 0.543469 |
553b30d54060e4ef460932e0345413f2c29aaef2 | 11,408 | py | Python | perception/evaluation/model_evaluation.py | jostl/masters-thesis | 211e1f12a07428d37507e2bddc808f6da1149efb | [
"MIT"
] | 3 | 2021-06-19T10:49:26.000Z | 2022-03-26T11:31:28.000Z | perception/evaluation/model_evaluation.py | jostl/masters-thesis | 211e1f12a07428d37507e2bddc808f6da1149efb | [
"MIT"
] | 1 | 2021-10-12T15:40:55.000Z | 2021-10-12T15:40:55.000Z | perception/evaluation/model_evaluation.py | jostl/masters-thesis | 211e1f12a07428d37507e2bddc808f6da1149efb | [
"MIT"
] | null | null | null | import functools
from collections import defaultdict
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from perception.custom_datasets import ComparisonDataset
from perception.utils.visualization import plot_segmentation, plot_image, display_images_horizontally
from perception.utils.segmentation_labels import DEFAULT_CLASSES
def mean_jaccard_index(target, predictions):
"""
Semantic segmentation metric. Calculates mean intersection over union over all classes.
Works for batches of data.
"""
intersection = torch.logical_and(target, predictions)
union = torch.logical_or(target, predictions)
intersection_sums = torch.sum(intersection, dim=(-2, -1))
union_sums = torch.sum(union, dim=(-2,-1))
class_exists_mask = union_sums != 0
# union_sums will contain 0's if a class is not present in an image, which will give division by zero
iou_scores_classwise = intersection_sums / (union_sums + 0.00000000001)
iou_scores_imagewise_sum = iou_scores_classwise.sum(dim=1)
class_exists_mask_sum = class_exists_mask.sum(dim=1)
iou_scores_imagewise_mean = iou_scores_imagewise_sum / class_exists_mask_sum
iou_score_batch_mean = torch.mean(iou_scores_imagewise_mean)
return iou_score_batch_mean.numpy(), iou_scores_classwise.numpy()
def weighted_jaccard_index(target, predictions):
"""
Semantic segmentation metric. Calculates mean intersection over union over all classes, weighted by class.
Works for batches of data.
"""
class_counts = torch.sum(target, dim=(-2, -1), dtype=torch.int32)
n_pixels = torch.sum(class_counts, dim=(-1))[0]
class_frequencies = class_counts / n_pixels
intersection = torch.logical_and(target, predictions)
union = torch.logical_or(target, predictions)
intersection_sums = torch.sum(intersection, dim=(-2, -1))
union_sums = torch.sum(union, dim=(-2,-1))
# union_sums will contain 0's if a class is not present in an image, which will give division by zero
iou_scores_classwise = intersection_sums / (union_sums + 0.00000000001)
iou_scores_weighted = torch.sum(iou_scores_classwise * class_frequencies, dim=(-1))
iou_score_batch_weighted_mean = torch.mean(iou_scores_weighted)
return iou_score_batch_weighted_mean.numpy()
def rmse(targets, predictions):
"""
Depth estimation evaluation method. Average Root Mean Squared Error for each pixel in an image.
Should work for batches and single images.
"""
return np.sqrt(np.average((targets-predictions)**2))
def accuracy_within_threshold(targets, predictions, threshold=1.25):
"""
Depth estimation evaluation method. Calculates a delta value for each pixel in an image, and
then checks which percentage of pixels are within a certain threshold.
Should work for batches and single images.
"""
targets_over_preds = targets / predictions
preds_over_targets = predictions / targets
deltas = np.maximum(targets_over_preds, preds_over_targets)
within_threshold_matrix = deltas < threshold
uniques, counts = np.unique(within_threshold_matrix, return_counts=True)
if len(counts) > 1:
accuracy = counts[1] / (counts[0] + counts[1]) # this will work as long as there are both True and False values
else:
if True in uniques:
accuracy = 1.
# print("Accuracy within threshold warning: Accuracy is 1. uniques:", uniques)# TODO uncomment for real eval
else:
accuracy = 0.
print("Accuracy within threshold warning: Accuracy is 0. uniques:", uniques)
return accuracy
def compare_models(data_folder, segmentation_models, depth_models, batch_size=1, max_n_instances=None,
n_classes=len(DEFAULT_CLASSES)+1):
targets = ComparisonDataset(data_folder, segmentation_models, depth_models, max_n_instances=max_n_instances)
dataloader = DataLoader(targets, batch_size=batch_size, shuffle=False, num_workers=0,
pin_memory=True)
# semantic segmentation metrics
mean_intersection_over_union_accumulated = defaultdict(int)
weighted_mean_intersection_over_union_accumulated = defaultdict(int)
# depth estimation metrics
accuracy_with_threshold_accumulated = defaultdict(int)
accuracy_with_threshold2_accumulated = defaultdict(int)
accuracy_with_threshold3_accumulated = defaultdict(int)
rmse_accumulated = defaultdict(int)
classwise_iou_accumulated = defaultdict(functools.partial(np.zeros, n_classes))
classwise_iou_class_counts = defaultdict(functools.partial(np.zeros, n_classes))
for rgb_targets, segmentation_targets, depth_targets, segmentation_preds, depth_preds in tqdm(dataloader):
#print("SEMANTIC SEGMENTATION:")
#pepe = depth_targets[0].numpy().transpose(1, 2, 0)
#plot_image(depth_targets[0].numpy().transpose(1, 2, 0), title="ground truth")
#plot_image(depth_targets[0].numpy().transpose(1, 2, 0), title="ground truth gray", cmap="gray")
for model in segmentation_preds:
mean_iou, batch_classwise_iou = mean_jaccard_index(segmentation_targets, segmentation_preds[model])
mean_intersection_over_union_accumulated[model] += mean_iou
for img_classwise_iou in batch_classwise_iou:
classwise_iou_accumulated[model] += img_classwise_iou
# count if class actually is in img, to get correct averages
for i_class in range(len(img_classwise_iou)):
if img_classwise_iou[i_class] > 0:
classwise_iou_class_counts[model][i_class] += 1
weighted_mean_intersection_over_union_accumulated[model] \
+= weighted_jaccard_index(segmentation_targets, segmentation_preds[model])
#img = segmentation_preds[model].numpy()[0].transpose(1, 2, 0)
#plot_segmentation(img, title=model)
#print("\nDEPTH ESTIMATION")
for model in depth_preds:
accuracy_with_threshold_accumulated[model] += accuracy_within_threshold(depth_targets, depth_preds[model],
threshold=1.25)
accuracy_with_threshold2_accumulated[model] += accuracy_within_threshold(depth_targets, depth_preds[model],
threshold=1.25**2)
accuracy_with_threshold3_accumulated[model] += accuracy_within_threshold(depth_targets, depth_preds[model],
threshold=1.25**3)
rmse_accumulated[model] += rmse(depth_targets, depth_preds[model])
#img = depth_preds[model].numpy()[0].transpose(1, 2, 0)
#plot_image(img, title=model, cmap="gray")
n_batches = np.ceil(len(targets) / batch_size)
# calculate average over batches, semantic segmentation
mean_intersection_over_union_avg = {}
weighted_mean_intersection_over_union_avg = {}
class_intersection_over_union_avg = defaultdict(functools.partial(np.zeros, n_classes))
for model in segmentation_models:
model_name = model[0]
mean_intersection_over_union_avg[model_name] = mean_intersection_over_union_accumulated[model_name] / n_batches
weighted_mean_intersection_over_union_avg[model_name] = weighted_mean_intersection_over_union_accumulated[model_name] / n_batches
for i_class in range(len(classwise_iou_accumulated[model_name])):
class_intersection_over_union_avg[model_name][i_class] = classwise_iou_accumulated[model_name][i_class] / (classwise_iou_class_counts[model_name][i_class]+0.0000000001)
print("---")
print("Model:", model_name, "has mean jaccard index avg:", mean_intersection_over_union_avg[model_name])
print("Model:", model_name, "has weighted jaccard index avg:", weighted_mean_intersection_over_union_avg[model_name])
print("Model:", model_name, "has classwise iou's:", [i for i in class_intersection_over_union_avg[model_name]])
print("---")
# calculate average over batches, depth estimation
accuracy_within_threshold_avg = {}
accuracy_within_threshold2_avg = {}
accuracy_within_threshold3_avg = {}
rmse_avg = {}
for model in depth_models:
model_name = model[0]
accuracy_within_threshold_avg[model_name] = accuracy_with_threshold_accumulated[model_name] / n_batches
accuracy_within_threshold2_avg[model_name] = accuracy_with_threshold2_accumulated[model_name] / n_batches
accuracy_within_threshold3_avg[model_name] = accuracy_with_threshold3_accumulated[model_name] / n_batches
rmse_avg[model_name] = rmse_accumulated[model_name] / n_batches
print("---")
print("Model:", model_name, "has accuracy within threshold avg:", accuracy_within_threshold_avg[model_name])
print("Model:", model_name, "has accuracy within threshold2 avg:", accuracy_within_threshold2_avg[model_name])
print("Model:", model_name, "has accuracy within threshold3 avg:", accuracy_within_threshold3_avg[model_name])
print("Model:", model_name, "has rmse avg:", rmse_avg[model_name])
print("---")
if __name__ == "__main__":
test = "test2"
# location of where to find training, test1, test2
data_folder = Path("data/perception") / test
predictions_folder = Path("data/perception/predictions")
# lagres på formatet (Navn, lokasjon)
segmentation_models = [("unet_resnet50", predictions_folder / "semseg/unet_resnet50" / test),
("unet_resnet50_weighted_2.5", predictions_folder / "semseg/unet_resnet50_weighted_2.5" / test),
("unet_resnet50_weighted_5", predictions_folder / "semseg/unet_resnet50_weighted_5" / test),
("fcn_resnet101", predictions_folder / "semseg/fcn_resnet101" / test),
("deeplabv3-mobilenet", predictions_folder / "semseg/deeplabv3_mobilenet" / test),
("deeplabv3-resnet50", predictions_folder / "semseg/deeplabv3_resnet50" / test),
("deeplabv3-resnet101", predictions_folder / "semseg/deeplabv3_resnet101" / test),
]
#("semantic-test1 (ground truf)", predictions_folder / "semantic_test1"),
#("semantic-test2 (ground truf)", predictions_folder / "semantic_test2")]
# lagres på formatet (Navn, lokasjon, invert_pixels_in_loading)
# ("test1-depth", data_folder / "depth", False)
depth_models = [("midas-small", predictions_folder / "depth/midas_small" / test, True),
("midas-large", predictions_folder / "depth/midas_large" / test, True),
("UNet", predictions_folder / "depth/unet" / test, False),
("UNet-resnet34", predictions_folder / "depth/unet_resnet34" / test, False)
]
#("depth-test1", predictions_folder / "depth_test1", False),
#("depth-test2", predictions_folder / "depth_test2", False)
#]
compare_models(data_folder, segmentation_models, depth_models, batch_size=20, max_n_instances=None)
| 49.816594 | 180 | 0.693022 |
0f1e7989cec68aa304b0a7ebc239511f88493830 | 2,694 | py | Python | tests/terraform/runner/test_runner.py | fossabot/checkov | d09938cffe3588dd1f472bcf2382e07fa7f9010b | [
"Apache-2.0"
] | null | null | null | tests/terraform/runner/test_runner.py | fossabot/checkov | d09938cffe3588dd1f472bcf2382e07fa7f9010b | [
"Apache-2.0"
] | null | null | null | tests/terraform/runner/test_runner.py | fossabot/checkov | d09938cffe3588dd1f472bcf2382e07fa7f9010b | [
"Apache-2.0"
] | null | null | null | import os
import unittest
from checkov.terraform.runner import Runner
from checkov.terraform.context_parsers.registry import parser_registry
class TestRunnerValid(unittest.TestCase):
def test_runner_valid_tf(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
valid_dir_path = current_dir + "/resources/example"
runner = Runner()
report = runner.run(root_folder=valid_dir_path, external_checks_dir=None)
report_json = report.get_json()
self.assertTrue(isinstance(report_json, str))
self.assertIsNotNone(report_json)
self.assertIsNotNone(report.get_test_suites())
self.assertEqual(report.get_exit_code(), 1)
summary = report.get_summary()
self.assertGreaterEqual(summary['passed'], 1)
self.assertGreaterEqual(summary['failed'], 1)
self.assertEqual(summary["parsing_errors"], 1)
report.print_json()
report.print_console()
report.print_junit_xml()
def test_runner_passing_valid_tf(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
passing_tf_dir_path = current_dir + "/resources/valid_tf_only_passed_checks"
print("testing dir" + passing_tf_dir_path)
runner = Runner()
report = runner.run(root_folder=passing_tf_dir_path, external_checks_dir=None)
report_json = report.get_json()
self.assertTrue(isinstance(report_json, str))
self.assertIsNotNone(report_json)
self.assertIsNotNone(report.get_test_suites())
# self.assertEqual(report.get_exit_code(), 0)
summary = report.get_summary()
self.assertGreaterEqual(summary['passed'], 1)
self.assertEqual(summary['failed'], 0)
self.assertEqual(summary["parsing_errors"], 0)
def test_runner_specific_file(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
passing_tf_file_path = current_dir + "/resources/valid_tf_only_passed_checks/example.tf"
runner = Runner()
report = runner.run(root_folder=None, external_checks_dir=None, files=[passing_tf_file_path])
report_json = report.get_json()
self.assertTrue(isinstance(report_json, str))
self.assertIsNotNone(report_json)
self.assertIsNotNone(report.get_test_suites())
# self.assertEqual(report.get_exit_code(), 0)
summary = report.get_summary()
self.assertGreaterEqual(summary['passed'], 1)
self.assertEqual(summary['failed'], 0)
self.assertEqual(summary["parsing_errors"], 0)
def tearDown(self):
parser_registry.definitions_context = {}
if __name__ == '__main__':
unittest.main()
| 39.043478 | 101 | 0.697105 |
368d0fc024a84a21b4703d6949b60e4e1ed84f04 | 1,732 | py | Python | server/kraken/migrations/versions/8200318b9e18_added_several_stats_fields_to_run.py | fossabot/kraken-3 | 7ac472de8ff6f44aac4dbd231f896f00e6f3b278 | [
"Apache-2.0"
] | 66 | 2020-08-14T12:52:39.000Z | 2022-03-31T13:56:25.000Z | server/kraken/migrations/versions/8200318b9e18_added_several_stats_fields_to_run.py | kinsanras/kraken | 3938ee4e65ba8f67ec5ee0e912b43fad84548f2c | [
"Apache-2.0"
] | 110 | 2020-07-23T07:12:09.000Z | 2022-03-26T05:54:18.000Z | server/kraken/migrations/versions/8200318b9e18_added_several_stats_fields_to_run.py | kinsanras/kraken | 3938ee4e65ba8f67ec5ee0e912b43fad84548f2c | [
"Apache-2.0"
] | 4 | 2021-03-10T05:25:03.000Z | 2022-01-24T10:12:33.000Z | """added several stats fields to Run
Revision ID: 8200318b9e18
Revises: b908e219f07f
Create Date: 2020-02-08 11:50:24.255977
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8200318b9e18'
down_revision = 'b908e219f07f'
branch_labels = None
depends_on = None
def upgrade():
op.drop_table('preferences')
op.add_column('runs', sa.Column('issues_new', sa.Integer(), nullable=True))
op.add_column('runs', sa.Column('issues_total', sa.Integer(), nullable=True))
op.add_column('runs', sa.Column('jobs_error', sa.Integer(), nullable=True))
op.add_column('runs', sa.Column('jobs_total', sa.Integer(), nullable=True))
op.add_column('runs', sa.Column('tests_not_run', sa.Integer(), nullable=True))
op.add_column('runs', sa.Column('tests_passed', sa.Integer(), nullable=True))
op.add_column('runs', sa.Column('tests_total', sa.Integer(), nullable=True))
def downgrade():
op.drop_column('runs', 'tests_total')
op.drop_column('runs', 'tests_passed')
op.drop_column('runs', 'tests_not_run')
op.drop_column('runs', 'jobs_total')
op.drop_column('runs', 'jobs_error')
op.drop_column('runs', 'issues_total')
op.drop_column('runs', 'issues_new')
op.create_table('preferences',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('name', sa.VARCHAR(length=50), autoincrement=False, nullable=True),
sa.Column('value', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('val_type', sa.VARCHAR(length=8), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='preferences_pkey'))
| 40.27907 | 100 | 0.674942 |
e2e3d64709e71213834b4ba808443d21fcf847d4 | 1,234 | py | Python | process_by_group.py | greenmoon55/yangpu8k | 2d306d0a06aa1c29a4b76136786a46cb6875733d | [
"MIT"
] | null | null | null | process_by_group.py | greenmoon55/yangpu8k | 2d306d0a06aa1c29a4b76136786a46cb6875733d | [
"MIT"
] | null | null | null | process_by_group.py | greenmoon55/yangpu8k | 2d306d0a06aa1c29a4b76136786a46cb6875733d | [
"MIT"
] | null | null | null | import csv
from collections import defaultdict
import datetime
import pygal
def hms_to_seconds(t):
h, m, s = [int(i) for i in t.split(':')]
return 3600*h + 60*m + s
def get_tuples():
tuples = []
with open('result.csv', 'rb') as csvfile:
r = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in r:
uid = row[0]
t = row[1]
if t == 'None':
continue
t = hms_to_seconds(t)
tuples.append((uid, t))
return tuples
def draw(dictlist):
from datetime import datetime, timedelta
date_chart = pygal.Line(x_label_rotation=20, x_labels_major_every=5, show_minor_x_labels=False)
date_chart.x_labels = range(90)
for i, d in enumerate(dictlist):
date_chart.add(str(i+1), d)
date_chart.render_to_file('chart.svg')
if __name__ == "__main__":
results = get_tuples()
dictlist = [defaultdict(int) for i in range(5)]
for row in results:
time = row[1]
minute = time / 60 / 2
group = int(row[0]) / 500
dictlist[group][minute] += 1
values = []
for d in dictlist:
values.append([d.get(k) for k in range(90)])
print values
draw(values)
| 26.255319 | 99 | 0.5859 |
16682c4663abe0aae7379f74ca3ee01ce07b13a4 | 1,626 | py | Python | autogluon/searcher/grid_searcher.py | jhutchings1/autogluon | 9a0eb8a8f7c88cd09b081adf5d4c6c281d113d75 | [
"Apache-2.0"
] | null | null | null | autogluon/searcher/grid_searcher.py | jhutchings1/autogluon | 9a0eb8a8f7c88cd09b081adf5d4c6c281d113d75 | [
"Apache-2.0"
] | null | null | null | autogluon/searcher/grid_searcher.py | jhutchings1/autogluon | 9a0eb8a8f7c88cd09b081adf5d4c6c281d113d75 | [
"Apache-2.0"
] | 2 | 2020-12-13T16:40:04.000Z | 2021-03-08T09:14:16.000Z | __all__ = ['GridSearcher']
from .searcher import BaseSearcher
from sklearn.model_selection import ParameterGrid
class GridSearcher(BaseSearcher):
"""Grid Searcher that exhaustively tries all possible configurations.
This Searcher can only be used for discrete search spaces of type :class:`autogluon.space.Categorical`
Examples
--------
>>> import autogluon as ag
>>> @ag.args(
... x=ag.space.Categorical(0, 1, 2),
... y=ag.space.Categorical('a', 'b', 'c'))
>>> def train_fn(args, reporter):
... pass
>>> searcher = ag.searcher.GridSearcher(train_fn.cs)
>>> searcher.get_config()
Number of configurations for grid search is 9
{'x.choice': 2, 'y.choice': 2}
"""
def __init__(self, configspace, **kwargs):
super().__init__(
configspace, reward_attribute=kwargs.get('reward_attribute'))
param_grid = {}
hp_ordering = configspace.get_hyperparameter_names()
for hp in hp_ordering:
hp_obj = configspace.get_hyperparameter(hp)
hp_type = str(type(hp_obj)).lower()
assert 'categorical' in hp_type, \
'Only Categorical is supported, but {} is {}'.format(hp, hp_type)
param_grid[hp] = hp_obj.choices
self._configs = list(ParameterGrid(param_grid))
print('Number of configurations for grid search is {}'.format(len(self._configs)))
def __len__(self):
return len(self._configs)
def get_config(self):
""" Return new hyperparameter configuration to try next.
"""
return self._configs.pop()
| 35.347826 | 109 | 0.634071 |
0da451172ea694551e075692451622bb538c1dad | 2,932 | py | Python | apps/data_taking_scripts/2016-06-jpl-hex-271/heterodyne_scan_with_blackbody.py | danielflanigan/kid_readout | 07202090d468669200cab78297122880c1c03e87 | [
"BSD-2-Clause"
] | null | null | null | apps/data_taking_scripts/2016-06-jpl-hex-271/heterodyne_scan_with_blackbody.py | danielflanigan/kid_readout | 07202090d468669200cab78297122880c1c03e87 | [
"BSD-2-Clause"
] | null | null | null | apps/data_taking_scripts/2016-06-jpl-hex-271/heterodyne_scan_with_blackbody.py | danielflanigan/kid_readout | 07202090d468669200cab78297122880c1c03e87 | [
"BSD-2-Clause"
] | null | null | null | import time
import numpy as np
from kid_readout.interactive import *
from kid_readout.measurement import acquire
from kid_readout.roach import r2heterodyne, attenuator, hardware_tools
from equipment.custom import mmwave_source
from equipment.hittite import signal_generator
from equipment.srs import lockin
from kid_readout.equipment.agilent_33220 import FunctionGenerator
logger.setLevel(logging.DEBUG)
fg = FunctionGenerator()
fg.set_load_ohms(1e6)
hittite = signal_generator.Hittite(ipaddr='192.168.0.200')
hittite.set_power(0)
hittite.on()
hittite.set_freq(148e9/12.)
lockin = lockin.Lockin(LOCKIN_SERIAL_PORT)
tic = time.time()
print lockin.identification
print time.time()-tic
tic = time.time()
print lockin.fast_state
print time.time()-tic
source = mmwave_source.MMWaveSource()
source.set_attenuator_turns(3.0,3.0)
source.multiplier_input = 'hittite'
source.waveguide_twist_angle = 45
source.ttl_modulation_source = 'roach'
setup = hardware.Hardware(hittite, source,lockin)
ri = hardware_tools.r2_with_mk1(1000.)
ri.iq_delay=-1
ri.set_dac_atten(0)
ri.set_fft_gain(6)
nsamp = 2**15
step = 1
nstep = 32
#f0binned = np.round(f0s * nsamp / 512.0) * 512.0 / nsamp
offset_bins = np.arange(-(nstep), (nstep)) * step
offsets = offset_bins * 512.0 / nsamp
ri.set_modulation_output(7)
time.sleep(1)
lockin.auto_gain(wait_until_done=True)
time.sleep(3)
rms_voltage, signal_phase = lockin.snap(3, 4)
logger.info("zbd voltage: %f" % rms_voltage)
ri.set_modulation_output('low')
ri.set_lo(1250.)
#legacy.load_heterodyne_sweep_tones(ri,(np.arange(1,129)[None,:]*7/4.+ri.lo_frequency + offsets[:,None]),
# num_tone_samples=nsamp)
state = dict(field_canceling_magnet=False,magnetic_shield=True,cryostat='starcryo',initial_zbd_rms_voltage=rms_voltage)
state.update(**setup.state())
for heater_voltage in np.arange(0,4,.2):
logger.info("Measuring at %.1f volts" % heater_voltage)
fg.set_dc_voltage(heater_voltage)
fg.enable_output(True)
if heater_voltage > 0:
logger.info("Waiting 10 minutes for load to stabilize")
time.sleep(10*60)
tic = time.time()
for lo in 830.+190*np.arange(0,4):
logger.info("Measuring at LO %.1f" % lo)
ri.set_lo(lo)
df = acquire.new_nc_file(suffix='scan_lo_%.1f_MHz' % lo)
state.update(**setup.state())
state['heater_voltage'] = heater_voltage
swa = acquire.run_sweep(ri, (np.arange(1, 257)[None, :] * 7 / 8. + ri.lo_frequency + offsets[:, None]),
num_tone_samples=nsamp, length_seconds=0.1, state=state, verbose=True)
df.write(swa)
df.close()
print "elapsed:", (time.time()-tic)/60.0,'minutes'
#time.sleep(60.)
# while time.time() - tic < 5*60:
# print "waiting... %.1f min remaining" % ((5*60 - (time.time() - tic))/60)
# time.sleep(60)
logger.info("Turning off heater")
fg.enable_output(False)
| 30.863158 | 119 | 0.706003 |
b15065cd4e50700f8d98873d6212b5adb7b68956 | 1,968 | py | Python | gnuradio-3.7.13.4/gr-audio/examples/python/noise.py | v1259397/cosmic-gnuradio | 64c149520ac6a7d44179c3f4a38f38add45dd5dc | [
"BSD-3-Clause"
] | 1 | 2021-03-09T07:32:37.000Z | 2021-03-09T07:32:37.000Z | gnuradio-3.7.13.4/gr-audio/examples/python/noise.py | v1259397/cosmic-gnuradio | 64c149520ac6a7d44179c3f4a38f38add45dd5dc | [
"BSD-3-Clause"
] | null | null | null | gnuradio-3.7.13.4/gr-audio/examples/python/noise.py | v1259397/cosmic-gnuradio | 64c149520ac6a7d44179c3f4a38f38add45dd5dc | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import audio
from gnuradio import digital
from gnuradio.eng_option import eng_option
from optparse import OptionParser
class my_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
parser = OptionParser(option_class=eng_option)
parser.add_option("-O", "--audio-output", type="string", default="",
help="pcm output device name. E.g., hw:0,0 or /dev/dsp")
parser.add_option("-r", "--sample-rate", type="eng_float", default=48000,
help="set sample rate to RATE (48000)")
(options, args) = parser.parse_args ()
if len(args) != 0:
parser.print_help()
raise SystemExit, 1
sample_rate = int(options.sample_rate)
ampl = 0.1
src = digital.glfsr_source_b(32) # Pseudorandom noise source
b2f = digital.chunks_to_symbols_bf([ampl, -ampl], 1)
dst = audio.sink(sample_rate, options.audio_output)
self.connect(src, b2f, dst)
if __name__ == '__main__':
try:
my_top_block().run()
except KeyboardInterrupt:
pass
| 34.526316 | 83 | 0.675813 |
f72b045654dc44f3155f6d877133a3202b759449 | 5,054 | py | Python | python-lib/dku_error_analysis_mpp/dku_error_visualizer.py | dataiku/dss-plugin-model-error-analysis | 4c0f42a5c0aa1710005db3d81ca9bd9d7f829e6b | [
"Apache-2.0"
] | null | null | null | python-lib/dku_error_analysis_mpp/dku_error_visualizer.py | dataiku/dss-plugin-model-error-analysis | 4c0f42a5c0aa1710005db3d81ca9bd9d7f829e6b | [
"Apache-2.0"
] | 2 | 2021-09-29T15:08:25.000Z | 2022-01-13T11:20:58.000Z | python-lib/dku_error_analysis_mpp/dku_error_visualizer.py | dataiku/dss-plugin-model-error-analysis | 4c0f42a5c0aa1710005db3d81ca9bd9d7f829e6b | [
"Apache-2.0"
] | 1 | 2021-09-10T12:25:08.000Z | 2021-09-10T12:25:08.000Z | # -*- coding: utf-8 -*-
import numpy as np
from graphviz import Source
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from dku_error_analysis_mpp.dku_error_analyzer import DkuErrorAnalyzer
from mealy import _BaseErrorVisualizer, ErrorAnalyzerConstants
from dku_error_analysis_utils import safe_str, format_float
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='Error Analysis Plugin | %(levelname)s - %(message)s')
plt.rc('font', family="sans-serif")
SMALL_SIZE, MEDIUM_SIZE, BIGGER_SIZE = 8, 10, 12
plt.rc('axes', titlesize=BIGGER_SIZE, labelsize=MEDIUM_SIZE)
plt.rc('xtick', labelsize=SMALL_SIZE)
plt.rc('ytick', labelsize=SMALL_SIZE)
plt.rc('legend', fontsize=SMALL_SIZE)
plt.rc("hatch", color="white", linewidth=4)
class DkuErrorVisualizer(_BaseErrorVisualizer):
"""
ErrorVisualizer provides visual utilities to analyze the error classifier in ErrorAnalyzer and DkuErrorAnalyzer.
"""
def __init__(self, error_analyzer):
if not isinstance(error_analyzer, DkuErrorAnalyzer):
raise TypeError('You need to input a DkuErrorAnalyzer object.')
super(DkuErrorVisualizer, self).__init__(error_analyzer)
self._tree = error_analyzer.tree
def plot_error_tree(self, size=(50, 50)):
""" Plot the graph of the decision tree
Args:
size (tuple): Size of the output plot as (width, length), in inches.
"""
return Source(self._tree.to_dot_string(size))
def plot_feature_distributions_on_leaves(self, leaf_selector=None, top_k_features=ErrorAnalyzerConstants.TOP_K_FEATURES,
show_global=True, show_class=False, rank_leaves_by="total_error_fraction", nr_bins=10, figsize=(15, 10)):
""" Return plot of error node feature distribution and compare to global baseline """
leaf_nodes = self._get_ranked_leaf_ids(leaf_selector, rank_leaves_by)
ranked_features = self._tree.ranked_features[:top_k_features]
nr_leaves, nr_features = len(leaf_nodes), len(ranked_features)
logger.info("{} lea{} selected: {}".format(nr_leaves,
"f" if nr_leaves == 1 else "ves",
leaf_nodes))
logger.info("{} feature distribution{} plotted: {}".format(nr_features,
"" if nr_features == 1 else "s",
[f["name"] for f in ranked_features]))
for leaf_id in leaf_nodes:
leaf = self._tree.get_node(leaf_id)
suptitle = 'Leaf {} ({}: {}'.format(leaf.id, leaf.probabilities[0][0], format_float(leaf.probabilities[0][1], 3))
suptitle += ', {}: {})'.format(leaf.probabilities[1][0], format_float(leaf.probabilities[1][1], 3))
for feature in ranked_features:
feature_name = feature["name"]
leaf_stats = self._tree.get_stats(leaf.id, feature_name, nr_bins)
feature_is_numerical = feature["numerical"]
bins = leaf_stats["bin_edge"] if feature_is_numerical else leaf_stats["bin_value"]
if show_global:
root_samples = self._tree.get_node(0).samples[0]
root_stats = self._tree.get_stats(0, feature_name, nr_bins, bins) # TODO: optimize
if show_class:
root_hist_data = {}
for class_value, bar_heights in root_stats["target_distrib"].items():
root_hist_data[class_value] = np.array(bar_heights)/root_samples
else:
root_hist_data, root_prediction = {}, self._tree.get_node(0).prediction
root_hist_data[root_prediction] = np.array(root_stats["count"])/root_samples
else:
root_hist_data = None
if bins:
leaf_hist_data = {}
if show_class:
for class_value, bar_heights in leaf_stats["target_distrib"].items():
leaf_hist_data[class_value] = np.array(bar_heights)/leaf.samples[0]
else:
leaf_hist_data = {leaf.prediction: np.array(leaf_stats["count"])/leaf.samples[0]}
else:
leaf_hist_data = None
logger.info("No values for the feature {} at the leaf {}".format(feature_name, leaf.id))
if show_global:
bins = root_stats["bin_edge"] if feature_is_numerical else root_stats["bin_value"]
x_ticks = range(len(bins))
_BaseErrorVisualizer._add_new_plot(figsize, bins, x_ticks, feature_name, suptitle)
_BaseErrorVisualizer._plot_feature_distribution(x_ticks, feature_is_numerical, leaf_hist_data, root_hist_data)
plt.show()
| 49.54902 | 149 | 0.609616 |
4ca448669f486e9b4a60650e476f18ebb46f3575 | 7,326 | py | Python | Chapter04/bitcoin_address_lookup.py | raminfp/Learning-Python-for-Forensics-Second-Edition | 6643d6a809c2b6d391e49babd34f2b84fa9f5e89 | [
"MIT"
] | 30 | 2019-02-08T09:43:52.000Z | 2021-12-19T09:35:11.000Z | Chapter04/bitcoin_address_lookup.py | raminfp/Learning-Python-for-Forensics-Second-Edition | 6643d6a809c2b6d391e49babd34f2b84fa9f5e89 | [
"MIT"
] | 11 | 2021-02-22T12:33:03.000Z | 2022-03-11T23:39:34.000Z | Chapter04/bitcoin_address_lookup.py | cheahengsoon/Learning-Python-for-Forensics-Second-Edition | 97612919073599c8cb107ea3c6a9f27a3a938fad | [
"MIT"
] | 22 | 2019-02-01T18:11:17.000Z | 2022-03-09T06:49:03.000Z | """Final iteration of the Bitcoin JSON transaction parser."""
from __future__ import print_function
import argparse
import csv
import json
import logging
import sys
import os
if sys.version_info[0] == 2:
from urllib2 import urlopen
from urllib2 import URLError
elif sys.version_info[0] == 3:
from urllib.request import urlopen
from urllib.error import URLError
else:
print("Unsupported Python version. Exiting..")
sys.exit(1)
import unix_converter as unix
"""
MIT License
Copyright (c) 2018 Chapin Bryce, Preston Miller
Please share comments and questions at:
https://github.com/PythonForensics/Learning-Python-for-Forensics
or email pyforcookbook@gmail.com
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__author__ = 'Preston Miller & Chapin Bryce'
__date__ = '20180729'
__description__ = """This scripts downloads address transactions
using blockchain.info public APIs"""
def main(address, output_dir):
"""
The main function handles coordinating logic
:param address: The Bitcoin Address to lookup
:param output_dir: The output directory to write the CSV results
:return: Nothing
"""
logging.info('Initiated program for {} address'.format(address))
logging.info('Obtaining JSON structured data from blockchain.info')
raw_account = get_address(address)
account = json.loads(raw_account.read())
print_header(account)
parse_transactions(account, output_dir)
def get_address(address):
"""
The get_address function uses the blockchain.info Data API
to pull pull down account information and transactions for
address of interest
:param address: The Bitcoin Address to lookup
:return: The response of the url request
"""
url = 'https://blockchain.info/address/{}?format=json'
formatted_url = url.format(address)
try:
return urlopen(formatted_url)
except URLError as e:
logging.error('URL Error for {}'.format(formatted_url))
if hasattr(e, 'code') and hasattr(e, 'headers'):
logging.debug('{}: {}'.format(e.code, e.reason))
logging.debug('{}'.format(e.headers))
print('Received URL Error for {}'.format(formatted_url))
logging.info('Program exiting...')
sys.exit(2)
def parse_transactions(account, output_dir):
"""
The parse_transactions function appends transaction data into a
nested list structure so it can be successfully used by the
csv_writer function.
:param account: The JSON decoded account and transaction data
:param output_dir: The output directory to write the CSV
results
:return: Nothing
"""
msg = 'Parsing transactions...'
logging.info(msg)
print(msg)
transactions = []
for i, tx in enumerate(account['txs']):
transaction = []
outputs = {}
inputs = get_inputs(tx)
transaction.append(i)
transaction.append(unix.unix_converter(tx['time']))
transaction.append(tx['hash'])
transaction.append(inputs)
for output in tx['out']:
outputs[output['addr']] = output['value'] * 10**-8
transaction.append('\n'.join(outputs.keys()))
transaction.append(
'\n'.join(str(v) for v in outputs.values()))
transaction.append('{:.8f}'.format(sum(outputs.values())))
transactions.append(transaction)
csv_writer(transactions, output_dir)
def print_header(account):
"""
The print_header function prints overall header information
containing basic address information.
:param account: The JSON decoded account and transaction data
:return: Nothing
"""
print('Address:', account['address'])
print('Current Balance: {:.8f} BTC'.format(
account['final_balance'] * 10**-8))
print('Total Sent: {:.8f} BTC'.format(
account['total_sent'] * 10**-8))
print('Total Received: {:.8f} BTC'.format(
account['total_received'] * 10**-8))
print('Number of Transactions:', account['n_tx'])
print('{:=^22}\n'.format(''))
def get_inputs(tx):
"""
The get_inputs function is a small helper function that returns
input addresses for a given transaction
:param tx: A single instance of a Bitcoin transaction
:return: inputs, a list of inputs
"""
inputs = []
for input_addr in tx['inputs']:
inputs.append(input_addr['prev_out']['addr'])
if len(inputs) > 1:
input_string = '\n'.join(inputs)
else:
input_string = ''.join(inputs)
return input_string
def csv_writer(data, output_dir):
"""
The csv_writer function writes transaction data into a CSV file
:param data: The parsed transaction data in nested list
:param output_dir: The output directory to write the CSV
results
:return: Nothing
"""
logging.info('Writing output to {}'.format(output_dir))
print('Writing output.')
headers = ['Index', 'Date', 'Transaction Hash',
'Inputs', 'Outputs', 'Values', 'Total']
try:
if sys.version_info[0] == 2:
csvfile = open(output_dir, 'wb')
else:
csvfile = open(output_dir, 'w', newline='')
with csvfile:
writer = csv.writer(csvfile)
writer.writerow(headers)
for transaction in data:
writer.writerow(transaction)
csvfile.flush()
csvfile.close()
except IOError as e:
logging.error("""Error writing output to {}.
\nGenerated message: {}.""".format(e.filename,
e.strerror))
print("""Error writing to CSV file.
Please check output argument {}""".format(e.filename))
logging.info('Program exiting.')
sys.exit(1)
logging.info('Program exiting.')
print('Program exiting.')
sys.exit(0)
if __name__ == '__main__':
# Run this code if the script is run from the command line.
parser = argparse.ArgumentParser(
description='BTC Address Lookup',
epilog='Developed by ' + __author__ + ' on ' + __date__)
parser.add_argument('ADDR', help='Bitcoin Address')
parser.add_argument('OUTPUT', help='Output CSV file')
parser.add_argument('-l', help="""Specify log directory.
Defaults to current working directory.""")
args = parser.parse_args()
# Set up Log
if args.l:
if not os.path.exists(args.l):
os.makedirs(args.l) # create log directory path
log_path = os.path.join(args.l, 'btc_addr_lookup.log')
else:
log_path = 'btc_addr_lookup.log'
logging.basicConfig(
filename=log_path, level=logging.DEBUG,
format='%(asctime)s | %(levelname)s | %(message)s',
filemode='w')
logging.info('Starting Bitcoin Address Lookup')
logging.debug('System ' + sys.platform)
logging.debug('Version ' + sys.version)
# Print Script Information
print('{:=^22}'.format(''))
print('{}'.format('Bitcoin Address Lookup'))
print('{:=^22} \n'.format(''))
# Run main program
main(args.ADDR, args.OUTPUT)
| 31.714286 | 68 | 0.733279 |
92e0db5f7e996c35a559678b91cc5268fe8e1bba | 49,022 | py | Python | .history/src/_fighter_20190423010657.py | vidalmatheus/MK-Project | 6646020c59367ba0424d73a5861e13bbc0daac1f | [
"MIT"
] | 1 | 2019-12-25T10:25:30.000Z | 2019-12-25T10:25:30.000Z | .history/src/_fighter_20190423010657.py | vidalmatheus/MK-Project | 6646020c59367ba0424d73a5861e13bbc0daac1f | [
"MIT"
] | 1 | 2019-12-25T10:27:15.000Z | 2019-12-25T10:27:15.000Z | .history/src/_fighter_20190423010657.py | vidalmatheus/MK-Project | 6646020c59367ba0424d73a5861e13bbc0daac1f | [
"MIT"
] | 1 | 2019-12-25T10:50:05.000Z | 2019-12-25T10:50:05.000Z |
from pygame_functions import *
import fightScene
import engine
import menu
import LifeBars
import projectile
class Fighter:
fighterNames = ["Sub-Zero", "Scorpion"]
fightMoves = [["w", "s", "a", "d"], ["up", "down", "left", "right"]]
combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]]
danceLimit = 7
walkLimit = 9
jumpLimit = 3
crouchLimit = 3
punchLimit = [3, 11, 3, 5, 3]
kickLimit = [7, 9, 7, 6, 3]
hitLimit = [3, 3, 6, 2, 3, 14, 11, 10]
blockLimit = 3
specialLimit = [12,7]
hitSpecialLimit = [3,1]
specialSound = [["iceSound","Hit10"],["ComeHere"]]
victoryLimit = 3
fatalityLimit = 20
dizzyLimit = 7
deadLimit = 6
# indexação
# moves
dance = 0
walk = 1
jump = 2
crouch = 3
# punches
Apunch = 4 # soco fraco
Bpunch = 5 # soco forte
Cpunch = 6 # soco agachado fraco
Dpunch = 7 # soco agachado forte: gancho
# kicks
Akick = 8 # chute fraco
Bkick = 9 # chute forte
Ckick = 10 # chute agachado fraco
Dkick = 11 # chute agachado forte: banda
# hits
Ahit = 12 # soco fraco
Bhit = 13 # chute fraco
Chit = 14 # soco forte
Dhit = 15 # chute agrachado fraco
Ehit = 16 # soco agachado fraco
Fhit = 17 # chute forte e soco forte agachado (gancho)
Ghit = 18 # chute agachado forte: banda
Hhit = 19 # specialMove
# block
Ablock = 20
Bblock = 21
# special move
special = 22
# dizzy
dizzy = 23
# dead
dead = 18
# fatality
fatality = 25
fatalityHit = 26 # fatality hit
def __init__(self, id, scenario):
self.fighterId = id
self.name = self.fighterNames[id]
self.move = self.fightMoves[id]
self.combat = self.combatMoves[id]
self.lostOnce = False
self.waitingFatality = False
self.waitTime = [48,240]# '0' é pos derrota e '1' espera do fatality
self.wait = 0
self.isDead = False
if id == 0:
self.life = LifeBars.Player1LifeBar("Subzero")
self.life.setLifePosition([200-self.life.getLifeImage().get_width()/2,10])
self.life.addDamage(99)
else:
self.life = LifeBars.Player2LifeBar("Scorpion")
self.life.setLifePosition([600-self.life.getLifeImage().get_width()/2,10])
self.life.addDamage(99)
# Position
self.x = 150+id*500
if scenario == 1:
self.y = 350
elif scenario == 2:
self.y = 370
elif scenario == 3:
self.y = 400
elif scenario == 4:
self.y = 370
elif scenario == 5:
self.y = 380
elif scenario == 6:
self.y = 380
elif scenario == 7:
self.y = 360
elif scenario == 8:
self.y = 395
# Loading sprites
self.spriteList = []
# moves
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/dance.png', self.danceLimit))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/walk.png', self.walkLimit))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/jump.png', self.jumpLimit))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/crouch.png', self.crouchLimit))
# Punch sprites
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Apunch.png', self.punchLimit[0]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Bpunch.png', self.punchLimit[1]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Cpunch.png', self.punchLimit[2]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Dpunch.png', self.punchLimit[3]))
# Kick sprites
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Akick.png', self.kickLimit[0]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Bkick.png', self.kickLimit[1]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ckick.png', self.kickLimit[2]))
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Dkick.png', self.kickLimit[3]))
# Hit sprites
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ahit.png', self.hitLimit[0])) # soco fraco
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Bhit.png', self.hitLimit[1])) # chute fraco
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Chit.png', self.hitLimit[2])) # soco forte
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Dhit.png', self.hitLimit[3])) # chute agrachado fraco
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ehit.png', self.hitLimit[4])) # soco agachado fraco
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Fhit.png', self.hitLimit[5])) # chute forte e soco forte agachado (gancho)
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ghit.png', self.hitLimit[6])) # chute agachado forte: banda
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/hitSpecial.png', self.hitSpecialLimit[self.fighterId])) # specialMove
# blocking sprites
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Ablock.png', self.blockLimit)) # defesa em pé
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Bblock.png', self.blockLimit)) # defesa agachado
# special sprite ----------------------------------
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/Special.png', self.specialLimit[self.fighterId])) # Especial
# dizzy sprite ----------------------------------
self.spriteList.append(makeSprite('../res/Char/'+str(self.name)+'/dizzy.png', self.dizzyLimit)) # Dizzy
# finish him sprite ----------------------------------
self.spriteFinish = makeSprite('../res/finishhim.png', 1) # Dizzy
# wins sprite ----------------------------------
if self.fighterId == 0:
self.spriteWins = makeSprite('../res/'+str("Scorpion")+'wins.png', 1) # wins
else:
self.spriteWins = makeSprite('../res/'+str("Sub-Zero")+'wins.png', 1) # wins
self.act()
def getLife(self):
return self.life
def act(self):
# projétil
self.projectileFighter = projectile.Projectile([self.getX(),self.getY()],self.fighterId)
self.projectileFighter.moveProjectile()
# Combat control
# Control reflection var
reflection = False
# Dance vars
self.dancing = True
self.frame_dance = 0
self.dance_step = 1
# Walk vars
self.frame_walk = 0
self.walking = False # Variável de status
# Jump vars
self.jumpHeight = 10 # Altura do pulo
self.jumpCounter = 1 # Contador correspodente à subida e descida do pulo
self.jumping = False # Variável de status
self.frame_jumping = 0
self.jump_step = 1
self.end_jump = True
# Crouch vars
self.crouching = False # Variável de status
self.frame_crouching = 0
self.crouch_step = 1
# Punch vars
self.Apunching = False
self.frame_Apunching = 0
self.Apunch_step = 1
self.end_Apunch = True
self.Bpunching = False
self.frame_Bpunching = 0
self.Bpunch_step = 1
self.end_Bpunch = True
self.Cpunching = False
self.frame_Cpunching = 0
self.Cpunch_step = 1
self.end_Cpunch = True
self.Dpunching = False
self.frame_Dpunching = 0
self.Dpunch_step = 1
self.end_Dpunch = True
# Kick vars
self.Akicking = False
self.frame_Akicking = 0
self.Akick_step = 1
self.end_Akick = True
self.Bkicking = False
self.frame_Bkicking = 0
self.Bkick_step = 1
self.end_Bkick = True
self.Ckicking = False
self.frame_Ckicking = 0
self.Ckick_step = 1
self.end_Ckick = True
self.Dkicking = False
self.frame_Dkicking = 0
self.Dkick_step = 1
self.end_Dkick = True
# Blocking vars
self.Ablocking = False
self.frame_Ablocking = 0
self.Ablock_step = 1
self.Bblocking = False
self.frame_Bblocking = 0
self.Bblock_step = 1
# Special vars
self.specialMove = False
self.end_special = True
self.frame_special = 0
self.special_step = 1
# Hit vars
self.hit = False
self.downHit = False
self.hitName = ""
self.Ahitting = False
self.Bhitting = False
self.Chitting = False
self.Dhitting = False
self.Ehitting = False
self.Fhitting = False
self.Ghitting = False
self.hitSpecial = False
self.frame_Ahit = 0
self.frame_Bhit = 0
self.frame_Chit = 0
self.frame_Dhit = 0
self.frame_Ehit = 0
self.frame_Fhit = 0
self.frame_Ghit = 0
self.frame_Hhit = 0
self.hit_step = 1
# dizzy vars
self.dizzing = False
self.frame_dizzy = 0
self.dizzy_counter = 1
# dead vars
self.deading = False
self.frame_dead = 0
self.posFighter()
def fight(self, time, nextFrame):
frame_step = 60
"""if self.isDead:
if self.wait > 0:
self.wait = self.wait - 1
self.curr_sprite = self.spriteList[self.Ckick]
self.Ckicking = self.setState()
self.crouching = True
self.end_Ckick = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Ckick], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ckick])
changeSpriteImage(self.spriteList[self.Ckick], self.frame_Ckicking)
self.frame_Ckicking = (self.frame_Ckicking+self.Ckick_step) % (self.kickLimit[2]+1)
if (self.frame_Ckicking == self.kickLimit[2]-1):
self.Ckick_step = -1
if (self.frame_Ckicking == self.kickLimit[2]):
self.frame_Ckicking = 0
self.Ckick_step = 1
self.end_Ckick = True
elif not self.lostOnce:
self.isDead = False
self.lostOnce = True
self.life.returnLife()
else:
"""
# Animação dos projéteis (iceshot e snake)
if not self.projectileFighter.isProjectileEnded() and self.fighterId == 0:
self.projectileFighter.drawProjectile(time,nextFrame)
elif not self.projectileFighter.isProjectileEnded() and self.fighterId == 1:
if not self.end_special and self.projectileFighter.isProjectileEnded():
self.frame_special = 0
self.special_step = 1
self.end_special = True
self.projectileFighter.endProjectile()
else:
print("SpecialMove")
print("self.end_special: " + str(self.end_special))
self.curr_sprite = self.spriteList[self.special]
self.projectileFighter.startProjectile()
self.projectileFighter.setPos([self.getX(),self.getY()])
self.specialMove = self.setState()
self.setEndState()
self.end_special = False
if time > nextFrame:
moveSprite(self.spriteList[self.special], self.x, self.y, True)
self.setSprite(self.spriteList[self.special])
changeSpriteImage(self.spriteList[self.special], self.frame_special)
self.projectileFighter.drawProjectile(clock(),nextFrame)
self.frame_special = (self.frame_special+self.special_step) % (self.specialLimit[self.fighterId]+1)
if (self.frame_special == self.specialLimit[self.fighterId]-1):
self.special_step = -1
if (self.frame_special == self.specialLimit[self.fighterId]):
self.frame_special = 0
self.special_step = 1
self.end_special = True
self.projectileFighter.endProjectile()
nextFrame += 1*frame_step
return nextFrame
if not self.jumping:
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> jump
if keyPressed(self.move[0]) and not self.hit:
self.jumping = True
self.end_jump = False
self.curr_sprite = self.spriteList[self.jump]
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> right
elif keyPressed(self.move[3]) and not self.hit:
self.curr_sprite = self.spriteList[self.walk]
self.walking = self.setState()
self.setEndState()
self.x += 6
moveSprite(self.spriteList[self.walk], self.x, self.y, True)
self.setSprite(self.spriteList[self.walk])
changeSpriteImage(self.spriteList[self.walk], self.frame_walk)
if time > nextFrame:
# There are 9 frames of animation in each direction
self.frame_walk = (self.frame_walk+1) % self.walkLimit
# so the modulus 9 allows it to loop
nextFrame += frame_step
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> left
elif keyPressed(self.move[2]) and not self.hit:# SEGUNDA MUDANÇA and not self.jumping:
self.curr_sprite = self.spriteList[self.walk]
self.walking = self.setState()
self.setEndState()
self.x -= 6
moveSprite(self.spriteList[self.walk], self.x, self.y, True)
self.setSprite(self.spriteList[self.walk])
changeSpriteImage(self.spriteList[self.walk], self.walkLimit-1-self.frame_walk)
if time > nextFrame:
# There are 9 frames of animation in each direction
self.frame_walk = (self.frame_walk+1) % self.walkLimit
nextFrame += frame_step
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> crouch
elif (keyPressed(self.move[1]) and not self.hit) or self.downHit:
if self.end_Cpunch and self.end_Dpunch and self.end_Ckick and self.end_Dkick and not self.hit and not self.downHit and not self.Bblocking:
self.curr_sprite = self.spriteList[self.crouch]
self.crouching = self.setState()
self.setEndState()
if time > nextFrame:
if self.end_Cpunch and self.end_Dpunch and self.end_Ckick and self.end_Dkick and not self.hit and not self.downHit and not self.Bblocking:
moveSprite(self.spriteList[self.crouch], self.x, self.y, True)
self.setSprite(self.spriteList[self.crouch])
changeSpriteImage(self.spriteList[self.crouch], self.frame_crouching)
self.frame_crouching = (self.frame_crouching+self.crouch_step) % self.crouchLimit
if self.frame_crouching == self.crouchLimit - 2:
self.crouch_step = 0
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> crouch and jab
if ( (keyPressed(self.combat[0]) and self.end_Cpunch) or (not self.end_Cpunch) ) and (not self.hit) and not self.downHit:
self.curr_sprite = self.spriteList[self.Cpunch]
self.Cpunching = self.setState()
self.crouching = True
self.setEndState()
self.end_Cpunch = False
if time > nextFrame:
moveSprite(self.spriteList[self.Cpunch], self.x, self.y, True)
self.setSprite(self.spriteList[self.Cpunch])
changeSpriteImage(self.spriteList[self.Cpunch], self.frame_Cpunching)
self.frame_Cpunching = (self.frame_Cpunching+self.Cpunch_step) % (self.punchLimit[2]+1)
if (self.frame_Cpunching == self.punchLimit[2]-1):
self.Cpunch_step = -1
if (self.frame_Cpunching == self.punchLimit[2]):
self.frame_Cpunching = 0
self.Cpunch_step = 1
self.end_Cpunch = True
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> crouch and strong punch
elif ( (keyPressed(self.combat[1]) and self.end_Dpunch) or ( not self.end_Dpunch) ) and (not self.hit) and not self.downHit:
self.curr_sprite = self.spriteList[self.Dpunch]
self.Dpunching = self.setState()
self.crouching = True
self.setEndState()
self.end_Dpunch = False
if time > nextFrame:
moveSprite(self.spriteList[self.Dpunch], self.x, self.y, True)
self.setSprite(self.spriteList[self.Dpunch])
changeSpriteImage(self.spriteList[self.Dpunch], self.frame_Dpunching)
self.frame_Dpunching = (self.frame_Dpunching+self.Dpunch_step) % (self.punchLimit[3]+1)
if (self.frame_Dpunching == self.punchLimit[3]-1):
self.Dpunch_step = -1
if (self.frame_Dpunching == self.punchLimit[3]):
self.frame_Dpunching = 0
self.Dpunch_step = 1
self.end_Dpunch = True
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> crouch and kick
elif ( (keyPressed(self.combat[2]) and self.end_Ckick) or ( not self.end_Ckick) ) and (not self.hit) and not self.downHit:
print("Crouch_Kick!")
self.curr_sprite = self.spriteList[self.Ckick]
self.Ckicking = self.setState()
self.crouching = True
self.end_Ckick = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Ckick], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ckick])
changeSpriteImage(self.spriteList[self.Ckick], self.frame_Ckicking)
self.frame_Ckicking = (self.frame_Ckicking+self.Ckick_step) % (self.kickLimit[2]+1)
if (self.frame_Ckicking == self.kickLimit[2]-1):
self.Ckick_step = -1
if (self.frame_Ckicking == self.kickLimit[2]):
self.frame_Ckicking = 0
self.Ckick_step = 1
self.end_Ckick = True
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> Crouch and strong kick
elif ( (keyPressed(self.combat[3]) and self.end_Dkick) or ( not self.end_Dkick) ) and (not self.hit) and not self.downHit:
self.curr_sprite = self.spriteList[self.Dkick]
self.Dkicking = self.setState()
self.crouching = True
self.end_Dkick = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Dkick], self.x, self.y, True)
self.setSprite(self.spriteList[self.Dkick])
changeSpriteImage(self.spriteList[self.Dkick], self.frame_Dkicking)
self.frame_Dkicking = (self.frame_Dkicking+self.Dkick_step) % self.kickLimit[3]
if (self.frame_Dkicking == 0):
self.end_Dkick = True
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> defesa agachado
elif keyPressed(self.combat[5]) and not self.hit and not self.downHit:
self.curr_sprite = self.spriteList[self.Bblock]
self.Bblocking = self.setState()
self.crouching = True
self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Bblock], self.x, self.y, True)
self.setSprite(self.spriteList[self.Bblock])
changeSpriteImage(self.spriteList[self.Bblock], self.frame_Bblocking)
self.frame_Bblocking = (self.frame_Bblocking+self.Bblock_step) % self.blockLimit
if self.frame_Bblocking == self.blockLimit - 2:
self.Bblock_step = 0
#--------------Hits em agachado--------------------
#Ehit = 16 # chute ou soco agachado fraco
elif self.downHit and self.hitName == "Ehit":
self.curr_sprite = self.spriteList[self.Ehit]
self.Ehitting = self.setState()
self.crouching = True
moveSprite(self.spriteList[self.Ehit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ehit])
changeSpriteImage(self.spriteList[self.Ehit], self.frame_Ehit)
if time > nextFrame:
self.frame_Ehit = (self.frame_Ehit+self.hit_step) % self.hitLimit[4]
if (self.frame_Ehit == self.hitLimit[4] - 1):
self.hit_step = -1
if (self.frame_Ehit == 0):
self.hit_step = 1
self.downHit = False
#BblockHit = 21 hit agachado
elif (self.downHit or self.hit) and self.hitName == "Bblocking":
self.curr_sprite = self.spriteList[self.Bblock]
self.Bblocking = self.setState()
self.crouching = True
if time > nextFrame:
moveSprite(self.spriteList[self.Bblock], self.x, self.y, True)
self.setSprite(self.spriteList[self.Bblock])
changeSpriteImage(self.spriteList[self.Bblock], self.frame_Bblocking)
self.frame_Bblocking = (self.frame_Bblocking+self.hit_step) % self.blockLimit
if self.frame_Bblocking == self.blockLimit - 1:
self.hit_step = -1
if self.frame_Bblocking == 1:
self.hit_step = 1
self.hit = False
self.downHit = False
elif not self.downHit:
self.frame_Bblocking = 0
self.Bblock_step = 1
self.Bblocking = False
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> jab
elif ((keyPressed(self.combat[0]) and self.end_Apunch) or ( not self.end_Apunch) ) and (not self.hit) :
self.curr_sprite = self.spriteList[self.Apunch]
self.Apunching = self.setState()
self.setEndState()
self.end_Apunch = False
if time > nextFrame:
moveSprite(self.spriteList[self.Apunch], self.x, self.y, True)
self.setSprite(self.spriteList[self.Apunch])
changeSpriteImage(self.spriteList[self.Apunch], self.frame_Apunching)
self.frame_Apunching = (self.frame_Apunching+self.Apunch_step) % (self.punchLimit[0]+1)
if (self.frame_Apunching == self.punchLimit[0]-1):
self.Apunch_step = -1
if (self.frame_Apunching == self.punchLimit[0]):
self.frame_Apunching = 0
self.Apunch_step = 1
self.end_Apunch = True
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> strong punch
elif ( (keyPressed(self.combat[1]) and self.end_Bpunch) or ( not self.end_Bpunch) ) and (not self.hit) :
self.curr_sprite = self.spriteList[self.Bpunch]
self.Bpunching = self.setState()
self.end_Bpunch = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Bpunch], self.x, self.y, True)
self.setSprite(self.spriteList[self.Bpunch])
changeSpriteImage(self.spriteList[self.Bpunch], self.frame_Bpunching)
self.frame_Bpunching = (self.frame_Bpunching+self.Bpunch_step) % self.punchLimit[1]
if (self.frame_Bpunching == 0):
self.end_Bpunch = True
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> kick
elif ( (keyPressed(self.combat[2]) and self.end_Akick) or ( not self.end_Akick) ) and (not self.hit):
self.curr_sprite = self.spriteList[self.Akick]
self.Akicking = self.setState()
self.end_Akick = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Akick], self.x, self.y, True)
self.setSprite(self.spriteList[self.Akick])
changeSpriteImage(self.spriteList[self.Akick], self.frame_Akicking)
self.frame_Akicking = (self.frame_Akicking+self.Akick_step) % (self.kickLimit[0]+1)
if (self.frame_Akicking == self.kickLimit[0]-1):
self.Akick_step = -1
if (self.frame_Akicking == self.kickLimit[0]):
self.frame_Akicking = 0
self.Akick_step = 1
self.end_Akick = True
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> strong kick
elif ( (keyPressed(self.combat[3]) and self.end_Bkick) or ( not self.end_Bkick) ) and (not self.hit):
self.curr_sprite = self.spriteList[self.Bkick]
self.Bkicking = self.setState()
self.end_Bkick = self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Bkick], self.x, self.y, True)
self.setSprite(self.spriteList[self.Bkick])
changeSpriteImage(self.spriteList[self.Bkick], self.frame_Bkicking)
self.frame_Bkicking = (self.frame_Bkicking+self.Bkick_step) % self.kickLimit[1]
if (self.frame_Bkicking == 0):
self.end_Bkick = True
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> defesa em pé
elif keyPressed(self.combat[5]) and not self.hit:
self.curr_sprite = self.spriteList[self.Ablock]
self.Ablocking = self.setState()
self.setEndState()
if time > nextFrame:
moveSprite(self.spriteList[self.Ablock], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ablock])
changeSpriteImage(self.spriteList[self.Ablock], self.frame_Ablocking)
self.frame_Ablocking = (self.frame_Ablocking+self.Ablock_step) % self.blockLimit
if self.frame_Ablocking == self.blockLimit - 2:
self.Ablock_step = 0
nextFrame += 1*frame_step
# combatMoves = [["j","n","k","m","l","u","f"],["1","4","2","5","3","0","6"]] -> special move
elif ((keyPressed(self.combat[4]) and self.end_special) or ( not self.end_special) ) and (not self.hit):
if not self.end_special and self.projectileFighter.isProjectileEnded():
self.frame_special = 0
self.special_step = 1
self.end_special = True
self.projectileFighter.endProjectile()
else:
print("SpecialMove")
print("self.end_special: " + str(self.end_special))
if (self.frame_special == 0): engine.Sound(self.specialSound[self.fighterId][0]).play()
self.curr_sprite = self.spriteList[self.special]
self.projectileFighter.startProjectile()
self.projectileFighter.setPos([self.getX(),self.getY()])
self.specialMove = self.setState()
self.setEndState()
if self.end_special and self.fighterId == 1:
self.frame_special = 0
self.special_step = 1
self.end_special = False
if time > nextFrame:
moveSprite(self.spriteList[self.special], self.x, self.y, True)
self.setSprite(self.spriteList[self.special])
changeSpriteImage(self.spriteList[self.special], self.frame_special)
self.projectileFighter.drawProjectile(clock(),nextFrame)
self.frame_special = (self.frame_special+self.special_step) % (self.specialLimit[self.fighterId]+1)
if (self.frame_special == self.specialLimit[self.fighterId]-1):
self.special_step = -1
if (self.frame_special == self.specialLimit[self.fighterId]):
self.frame_special = 0
self.special_step = 1
self.end_special = True
self.projectileFighter.endProjectile()
nextFrame += 1*frame_step
# just dance :)
elif not self.hit :
# reset block (hold type)
self.frame_Ablocking = 0
self.Ablock_step = 1
self.frame_Bblocking = 0
self.Bblock_step = 1
# reset down (hold type)
self.frame_crouching = 0
self.crouch_step = 1
# reset other movement
self.frame_walk = self.frame_jumping = 0
# reset combat frames
self.frame_Apunching = self.frame_Bpunching = self.frame_Cpunching = self.frame_Dpunching = self.frame_Akicking = self.frame_Bkicking = self.frame_Ckicking = self.frame_Dkicking = 0
self.setEndState()
# start to dance
self.curr_sprite = self.spriteList[self.dance]
self.dancing = self.setState()
if time > nextFrame:
moveSprite(self.spriteList[self.dance], self.x, self.y, True)
self.setSprite(self.spriteList[self.dance])
changeSpriteImage(self.spriteList[self.dance], self.frame_dance)
self.frame_dance = (self.frame_dance+self.dance_step) % self.danceLimit
if (self.frame_dance == self.danceLimit-1):
self.dance_step = -1
if (self.frame_dance == 0):
self.dance_step = 1
nextFrame += frame_step
#--------------Hit em pé--------------------
# Ouch! Punch on a face (Ahit = 12 # soco fraco)
elif self.hit and self.hitName == "Apunching":
self.curr_sprite = self.spriteList[self.Ahit]
self.Ahitting = self.setState()
moveSprite(self.spriteList[self.Ahit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ahit])
changeSpriteImage(self.spriteList[self.Ahit], self.frame_Ahit)
if time > nextFrame:
self.frame_Ahit = (self.frame_Ahit+self.hit_step) % self.hitLimit[0]
if (self.frame_Ahit == self.hitLimit[0] - 1):
self.hit_step = -1
if (self.frame_Ahit == 0):
self.hit_step = 1
self.hit = False
nextFrame += 1.2*frame_step
# Ouch! kick on a face (Bhit = 13 # chute fraco)
elif self.hit and self.hitName == "Akicking":
self.curr_sprite = self.spriteList[self.Bhit]
self.Bhitting = self.setState()
if self.fighterId == 0:
self.x -=0.8
else: self.x +=0.8
moveSprite(self.spriteList[self.Bhit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Bhit])
changeSpriteImage(self.spriteList[self.Bhit], self.frame_Bhit)
if time > nextFrame:
# There are 8 frames of animation in each direction
self.frame_Bhit = (self.frame_Bhit+self.hit_step) % self.hitLimit[1]
if (self.frame_Bhit == self.hitLimit[1] - 1):
self.hit_step = -1
if (self.frame_Bhit == 0):
self.hit_step = 1
self.hit = False
nextFrame += 1.2*frame_step
# Ouch! combo punch (Chit = 14 # soco forte)
elif self.hit and self.hitName == "Bpunching":
self.curr_sprite = self.spriteList[self.Chit]
self.Chitting = self.setState()
if self.fighterId == 0:
self.x -=2
else: self.x +=2
moveSprite(self.spriteList[self.Chit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Chit])
changeSpriteImage(self.spriteList[self.Chit], self.frame_Chit)
if time > nextFrame:
self.frame_Chit = (self.frame_Chit+self.hit_step) % self.hitLimit[2]
if (self.frame_Chit == self.hitLimit[2] - 1):
self.hit_step = -1
if (self.frame_Chit == 0):
self.hit_step = 1
self.hit = False
nextFrame += 1.2*frame_step
#Dhit = 15 # soco agrachado fraco
elif self.hit and self.hitName == "Cpunching":
self.curr_sprite = self.spriteList[self.Dhit]
self.Dhitting = self.setState()
moveSprite(self.spriteList[self.Dhit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Dhit])
changeSpriteImage(self.spriteList[self.Dhit], self.frame_Dhit)
if time > nextFrame:
self.frame_Dhit = (self.frame_Dhit+self.hit_step) % self.hitLimit[3]
if (self.frame_Dhit == self.hitLimit[3] - 1):
self.hit_step = -1
if (self.frame_Dhit == 0):
self.hit_step = 1
self.hit = False
nextFrame += 1.2*frame_step
#Fhit = 17 # chute forte e soco forte agachado (gancho)
elif self.hit and self.hitName == "Bkicking":
self.curr_sprite = self.spriteList[self.Fhit]
self.Fhitting = self.setState()
if self.frame_Fhit <= 6:
if self.fighterId == 0:
self.x -=5
else: self.x +=5
moveSprite(self.spriteList[self.Fhit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Fhit])
changeSpriteImage(self.spriteList[self.Fhit], self.frame_Fhit)
if time > nextFrame:
self.frame_Fhit = (self.frame_Fhit+self.hit_step) % self.hitLimit[5]
if (self.frame_Fhit == self.hitLimit[5] - 1):
self.hit = False
nextFrame += 1.2*frame_step
#Ghit = 18 # chute agachado forte: banda
elif self.hit and self.hitName == "Dkicking":
self.curr_sprite = self.spriteList[self.Ghit]
self.Ghitting = self.setState()
moveSprite(self.spriteList[self.Ghit], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ghit])
changeSpriteImage(self.spriteList[self.Ghit], self.frame_Ghit)
if time > nextFrame:
self.frame_Ghit = (self.frame_Ghit+self.hit_step) % self.hitLimit[6]
if (self.frame_Ghit == self.hitLimit[6] - 1):
self.hit = False
nextFrame += 1.2*frame_step
#Hhit = 19 # specialHit
elif self.hit and self.hitName == "special":
if (self.frame_Hhit == 0 and self.fighterId == 0): engine.Sound(self.specialSound[self.fighterId][1]).play()
self.curr_sprite = self.spriteList[self.Hhit]
self.hitSpecial = self.setState()
moveSprite(self.spriteList[self.Hhit], self.x, self.y, True)
if self.fighterId == 0: # subzero
self.x += 20
self.setSprite(self.spriteList[self.Hhit])
changeSpriteImage(self.spriteList[self.Hhit], self.frame_Hhit)
if time > nextFrame:
self.frame_Hhit = (self.frame_Hhit+self.hit_step) % self.hitSpecialLimit[self.fighterId]
if (self.frame_Hhit == self.hitSpecialLimit[self.fighterId] - 1):
self.hit = False
nextFrame += 1.2*frame_step
#blockHit! Defesa em pé.
elif self.hit and self.hitName == "Ablocking":
self.curr_sprite = self.spriteList[self.Ablock]
self.Ablocking = self.setState()
if time > nextFrame:
moveSprite(self.spriteList[self.Ablock], self.x, self.y, True)
self.setSprite(self.spriteList[self.Ablock])
changeSpriteImage(self.spriteList[self.Ablock], self.frame_Ablocking)
self.frame_Ablocking = (self.frame_Ablocking+self.hit_step) % self.blockLimit
if self.frame_Ablocking == self.blockLimit - 1:
self.hit_step = -1
if self.frame_Ablocking == 1:
self.hit_step = 1
self.hit = False
nextFrame += 1*frame_step
# dizzy
elif self.hit and self.hitName == "dizzy":
self.curr_sprite = self.spriteList[self.dizzy]
self.dizzing = self.setState()
moveSprite(self.spriteList[self.dizzy], self.x, self.y, True)
self.setSprite(self.spriteList[self.dizzy])
changeSpriteImage(self.spriteList[self.dizzy], self.frame_dizzy)
moveSprite(self.spriteFinish, 400, 80, True)
showSprite(self.spriteFinish)
if time > nextFrame:
self.frame_dizzy = (self.frame_dizzy+self.hit_step) % self.dizzyLimit
nextFrame += 1.8*frame_step
# Dead
elif self.hit and self.hitName == "dead":
self.curr_sprite = self.spriteList[self.dead]
self.deading = self.setState()
moveSprite(self.spriteList[self.dead], self.x, self.y, True)
self.setSprite(self.spriteList[self.dead])
changeSpriteImage(self.spriteList[self.dead], self.frame_dead)
if time > nextFrame:
self.frame_dead = (self.frame_dead+self.hit_step) % self.deadLimit
if (self.frame_dead == self.deadLimit - 1):
self.hit_step = 0
if self.fighterId == 0:
if not pygame.mixer.get_busy() and not self.lostOnce:
engine.Sound("ScorpionWins").play()
self.lostOnce = True
hideSprite(self.spriteFinish)
moveSprite(self.spriteWins, 400, 80, True)
showSprite(self.spriteWins)
else:
print("Scorpion Wins")
if not pygame.mixer.get_busy() and not self.lostOnce:
self.lostOnce = True
engine.Sound("SubZeroWins").play()
hideSprite(self.spriteFinish)
moveSprite(self.spriteWins, 400, 80, True)
showSprite(self.spriteWins)
nextFrame += 1.2*frame_step
else:
# fightMoves = [ ["w", "s", "a", "d"], ["up", "down", "left", "right"] ] -> jump
if time > nextFrame:
if keyPressed(self.move[2]):
self.x -= 15
if keyPressed(self.move[3]):
self.x += 15
moveSprite(self.spriteList[self.jump], self.x, self.y, True)
self.setSprite(self.spriteList[self.jump])
self.y -= (self.jumpHeight-self.jumpCounter)*7
changeSpriteImage(self.spriteList[self.jump], self.frame_jumping)
if (self.jumpCounter < self.jumpHeight -1 or self.jumpCounter > self.jumpHeight +1): # subindo ou descendo
self.frame_jumping = 1
if (self.jumpHeight - 1 <= self.jumpCounter <= self.jumpHeight + 1): # quase parado
self.frame_jumping = 2
if (self.jumpCounter == 2*self.jumpHeight-1):
self.frame_jumping = 0
self.jumpCounter = -1
if clock() > nextFrame:
self.setSprite(self.spriteList[self.jump])
changeSpriteImage(self.spriteList[self.jump], self.frame_jumping)
moveSprite(self.spriteList[self.jump], self.x, self.y, True)
self.end_jump = self.setState()# MUDANÇA
self.jumping = self.setEndState() #MUDANÇA
self.jumpCounter += 2
nextFrame += 1*frame_step
return nextFrame
def getProjectile(self):
return self.projectileFighter
def getX(self):
return self.x
def getY(self):
return self.y
def setX(self,X):
self.x = X
moveSprite(self.curr_sprite,self.x,self.y,True)
def setY(self,Y):
self.y = Y
moveSprite(self.curr_sprite,self.x,self.y,True)
def isWalking(self):
return self.walking
def isJumping(self):
return self.jumping
def isCrouching(self):
return self.crouching
def isDancing(self):
return self.dancing
def isApunching(self):
return self.Apunching
def isBpunching(self):
return self.Bpunching
def isCpunching(self):
return self.Cpunching
def isDpunching(self):
return self.Dpunching
def isAkicking(self):
return self.Akicking
def isBkicking(self):
return self.Bkicking
def isCkicking(self):
return self.Ckicking
def isDkicking(self):
return self.Dkicking
def isSpecialMove(self):
return self.specialMove
def isAblocking(self):
return self.Ablocking
def isBblocking(self):
return self.Bblocking
def isHit(self):
return self.hit
def ishitSpecial(self):
return self.hitSpecial
def isAlive(self):
return not self.isDead
def killPlayer(self):
for i in range(0,len(self.spriteList)):
killSprite(self.spriteList[i])
def currentSprite(self):
return self.curr_sprite
def takeHit(self,by):
self.hit = True
self.hitName = by
dicionario = {"Apunching":5,"Bpunching":8,"Akicking":3,"Ablocking":0,"Bkicking":10,"Cpunching":6,"Dkicking":9,"special":5}
if by in dicionario:
self.life.addDamage(dicionario[by])
if self.life.isDead():
pygame.mixer.music.stop()
engine.Sound("FinishHim").play()
self.isDead = True
def takeDownHit(self,by):
self.downHit = True
self.hitName = by
def setHitName(self,by):
self.hitName = by
def stopHit(self,by = ""):
self.hit = False
self.hitName = by
def setState(self):
# moves
self.walking = False
self.dancing = False
self.jumping = False
self.crouching = False
# punches
self.Apunching = False
self.Bpunching = False
self.Cpunching = False
self.Dpunching = False
# kicks
self.Akicking = False
self.Bkicking = False
self.Ckicking = False
self.Dkicking = False
# punch hits
self.Ahitting = False
self.Bhitting = False
self.Chitting = False
self.Dhitting = False
self.Ehitting = False
self.Fhitting = False
self.Ghitting = False
self.hitSpecial = False
# blocks
self.Ablocking = False
self.Bblocking = False
# special move
self.specialMove = False
# dizzy
self.dizzing = False
# fatality
self.fatality = False
# actual states
return True
def setEndState(self):
self.end_jump = True
self.end_Apunch = True
self.end_Bpunch = True
self.end_Cpunch = True
self.end_Dpunch = True
self.end_Akick = True
self.end_Bkick = True
self.end_Ckick = True
self.end_Dkick = True
self.end_special = True
return False
def setSprite(self,sprite):
for i in range(0,len(self.spriteList)):
if (not sprite == self.spriteList[i]):
hideSprite(self.spriteList[i])
showSprite(sprite)
def posFighter(self):
for i in range(0,len(self.spriteList)):
moveSprite(self.spriteList[i], self.x, self.y, True) | 47.455954 | 197 | 0.512178 |
a453bc165c45aacde7e3235a89db59af5f4e2e82 | 64,290 | py | Python | test/ext/declarative/test_inheritance.py | gujun4990/sqlalchemy | 057bae2295feb86529a04f09cd2f3d4c2c6d88a8 | [
"MIT"
] | 1 | 2018-11-15T16:02:17.000Z | 2018-11-15T16:02:17.000Z | test/ext/declarative/test_inheritance.py | gujun4990/sqlalchemy | 057bae2295feb86529a04f09cd2f3d4c2c6d88a8 | [
"MIT"
] | null | null | null | test/ext/declarative/test_inheritance.py | gujun4990/sqlalchemy | 057bae2295feb86529a04f09cd2f3d4c2c6d88a8 | [
"MIT"
] | null | null | null |
from sqlalchemy.testing import eq_, le_, assert_raises, \
assert_raises_message, is_, is_true, is_false
from sqlalchemy.ext import declarative as decl
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy import Integer, String, ForeignKey
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.orm import relationship, create_session, class_mapper, \
configure_mappers, clear_mappers, \
polymorphic_union, deferred, Session, mapper
from sqlalchemy.ext.declarative import declared_attr, AbstractConcreteBase, \
ConcreteBase, has_inherited_table
from sqlalchemy.testing import fixtures, mock
from test.orm.test_events import _RemoveListeners
Base = None
class DeclarativeTestBase(fixtures.TestBase, testing.AssertsExecutionResults):
def setup(self):
global Base
Base = decl.declarative_base(testing.db)
def teardown(self):
Session.close_all()
clear_mappers()
Base.metadata.drop_all()
class DeclarativeInheritanceTest(DeclarativeTestBase):
def test_we_must_copy_mapper_args(self):
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator,
'polymorphic_identity': 'person'}
class Engineer(Person):
primary_language = Column(String(50))
assert 'inherits' not in Person.__mapper_args__
assert class_mapper(Engineer).polymorphic_identity is None
assert class_mapper(Engineer).polymorphic_on is Person.__table__.c.type
def test_we_must_only_copy_column_mapper_args(self):
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
a = Column(Integer)
b = Column(Integer)
c = Column(Integer)
d = Column(Integer)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator,
'polymorphic_identity': 'person',
'version_id_col': 'a',
'column_prefix': 'bar',
'include_properties': ['id', 'a', 'b'],
}
assert class_mapper(Person).version_id_col == 'a'
assert class_mapper(Person).include_properties == set(['id', 'a', 'b'])
def test_custom_join_condition(self):
class Foo(Base):
__tablename__ = 'foo'
id = Column('id', Integer, primary_key=True)
class Bar(Foo):
__tablename__ = 'bar'
bar_id = Column('id', Integer, primary_key=True)
foo_id = Column('foo_id', Integer)
__mapper_args__ = {'inherit_condition': foo_id == Foo.id}
# compile succeeds because inherit_condition is honored
configure_mappers()
def test_joined(self):
class Company(Base, fixtures.ComparableEntity):
__tablename__ = 'companies'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column('name', String(50))
employees = relationship('Person')
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
company_id = Column('company_id', Integer,
ForeignKey('companies.id'))
name = Column('name', String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
id = Column('id', Integer, ForeignKey('people.id'),
primary_key=True)
primary_language = Column('primary_language', String(50))
class Manager(Person):
__tablename__ = 'managers'
__mapper_args__ = {'polymorphic_identity': 'manager'}
id = Column('id', Integer, ForeignKey('people.id'),
primary_key=True)
golf_swing = Column('golf_swing', String(50))
Base.metadata.create_all()
sess = create_session()
c1 = Company(
name='MegaCorp, Inc.',
employees=[
Engineer(name='dilbert', primary_language='java'),
Engineer(name='wally', primary_language='c++'),
Manager(name='dogbert', golf_swing='fore!')])
c2 = Company(name='Elbonia, Inc.',
employees=[Engineer(name='vlad',
primary_language='cobol')])
sess.add(c1)
sess.add(c2)
sess.flush()
sess.expunge_all()
eq_(sess.query(Company).filter(Company.employees.of_type(Engineer).
any(Engineer.primary_language
== 'cobol')).first(), c2)
# ensure that the Manager mapper was compiled with the Manager id
# column as higher priority. this ensures that "Manager.id"
# is appropriately treated as the "id" column in the "manager"
# table (reversed from 0.6's behavior.)
eq_(
Manager.id.property.columns,
[Manager.__table__.c.id, Person.__table__.c.id]
)
# assert that the "id" column is available without a second
# load. as of 0.7, the ColumnProperty tests all columns
# in its list to see which is present in the row.
sess.expunge_all()
def go():
assert sess.query(Manager).filter(Manager.name == 'dogbert'
).one().id
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
assert sess.query(Person).filter(Manager.name == 'dogbert'
).one().id
self.assert_sql_count(testing.db, go, 1)
def test_add_subcol_after_the_fact(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column('name', String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
id = Column('id', Integer, ForeignKey('people.id'),
primary_key=True)
Engineer.primary_language = Column('primary_language',
String(50))
Base.metadata.create_all()
sess = create_session()
e1 = Engineer(primary_language='java', name='dilbert')
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).first(),
Engineer(primary_language='java', name='dilbert'))
def test_add_parentcol_after_the_fact(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
id = Column('id', Integer, ForeignKey('people.id'),
primary_key=True)
Person.name = Column('name', String(50))
Base.metadata.create_all()
sess = create_session()
e1 = Engineer(primary_language='java', name='dilbert')
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).first(),
Engineer(primary_language='java', name='dilbert'))
def test_add_sub_parentcol_after_the_fact(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
id = Column('id', Integer, ForeignKey('people.id'),
primary_key=True)
class Admin(Engineer):
__tablename__ = 'admins'
__mapper_args__ = {'polymorphic_identity': 'admin'}
workstation = Column(String(50))
id = Column('id', Integer, ForeignKey('engineers.id'),
primary_key=True)
Person.name = Column('name', String(50))
Base.metadata.create_all()
sess = create_session()
e1 = Admin(primary_language='java', name='dilbert',
workstation='foo')
sess.add(e1)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).first(),
Admin(primary_language='java', name='dilbert', workstation='foo'))
def test_subclass_mixin(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True)
name = Column('name', String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class MyMixin(object):
pass
class Engineer(MyMixin, Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
id = Column('id', Integer, ForeignKey('people.id'),
primary_key=True)
primary_language = Column('primary_language', String(50))
assert class_mapper(Engineer).inherits is class_mapper(Person)
def test_intermediate_abstract_class_on_classical(self):
class Person(object):
pass
person_table = Table('people', Base.metadata,
Column('id', Integer, primary_key=True),
Column('kind', String(50)))
mapper(Person, person_table,
polymorphic_on='kind', polymorphic_identity='person')
class SpecialPerson(Person):
__abstract__ = True
class Manager(SpecialPerson, Base):
__tablename__ = 'managers'
id = Column(Integer, ForeignKey(Person.id), primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'manager'
}
from sqlalchemy import inspect
assert inspect(Manager).inherits is inspect(Person)
eq_(set(class_mapper(Person).class_manager), {'id', 'kind'})
eq_(set(class_mapper(Manager).class_manager), {'id', 'kind'})
def test_intermediate_unmapped_class_on_classical(self):
class Person(object):
pass
person_table = Table('people', Base.metadata,
Column('id', Integer, primary_key=True),
Column('kind', String(50)))
mapper(Person, person_table,
polymorphic_on='kind', polymorphic_identity='person')
class SpecialPerson(Person):
pass
class Manager(SpecialPerson, Base):
__tablename__ = 'managers'
id = Column(Integer, ForeignKey(Person.id), primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'manager'
}
from sqlalchemy import inspect
assert inspect(Manager).inherits is inspect(Person)
eq_(set(class_mapper(Person).class_manager), {'id', 'kind'})
eq_(set(class_mapper(Manager).class_manager), {'id', 'kind'})
def test_class_w_invalid_multiple_bases(self):
class Person(object):
pass
person_table = Table('people', Base.metadata,
Column('id', Integer, primary_key=True),
Column('kind', String(50)))
mapper(Person, person_table,
polymorphic_on='kind', polymorphic_identity='person')
class DeclPerson(Base):
__tablename__ = 'decl_people'
id = Column(Integer, primary_key=True)
kind = Column(String(50))
class SpecialPerson(Person):
pass
def go():
class Manager(SpecialPerson, DeclPerson):
__tablename__ = 'managers'
id = Column(Integer,
ForeignKey(DeclPerson.id), primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'manager'
}
assert_raises_message(
sa.exc.InvalidRequestError,
r"Class .*Manager.* has multiple mapped "
r"bases: \[.*Person.*DeclPerson.*\]",
go
)
def test_with_undefined_foreignkey(self):
class Parent(Base):
__tablename__ = 'parent'
id = Column('id', Integer, primary_key=True)
tp = Column('type', String(50))
__mapper_args__ = dict(polymorphic_on=tp)
class Child1(Parent):
__tablename__ = 'child1'
id = Column('id', Integer, ForeignKey('parent.id'),
primary_key=True)
related_child2 = Column('c2', Integer,
ForeignKey('child2.id'))
__mapper_args__ = dict(polymorphic_identity='child1')
# no exception is raised by the ForeignKey to "child2" even
# though child2 doesn't exist yet
class Child2(Parent):
__tablename__ = 'child2'
id = Column('id', Integer, ForeignKey('parent.id'),
primary_key=True)
related_child1 = Column('c1', Integer)
__mapper_args__ = dict(polymorphic_identity='child2')
sa.orm.configure_mappers() # no exceptions here
def test_foreign_keys_with_col(self):
"""Test that foreign keys that reference a literal 'id' subclass
'id' attribute behave intuitively.
See [ticket:1892].
"""
class Booking(Base):
__tablename__ = 'booking'
id = Column(Integer, primary_key=True)
class PlanBooking(Booking):
__tablename__ = 'plan_booking'
id = Column(Integer, ForeignKey(Booking.id),
primary_key=True)
# referencing PlanBooking.id gives us the column
# on plan_booking, not booking
class FeatureBooking(Booking):
__tablename__ = 'feature_booking'
id = Column(Integer, ForeignKey(Booking.id),
primary_key=True)
plan_booking_id = Column(Integer,
ForeignKey(PlanBooking.id))
plan_booking = relationship(PlanBooking,
backref='feature_bookings')
assert FeatureBooking.__table__.c.plan_booking_id.\
references(PlanBooking.__table__.c.id)
assert FeatureBooking.__table__.c.id.\
references(Booking.__table__.c.id)
def test_single_colsonbase(self):
"""test single inheritance where all the columns are on the base
class."""
class Company(Base, fixtures.ComparableEntity):
__tablename__ = 'companies'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column('name', String(50))
employees = relationship('Person')
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
company_id = Column('company_id', Integer,
ForeignKey('companies.id'))
name = Column('name', String(50))
discriminator = Column('type', String(50))
primary_language = Column('primary_language', String(50))
golf_swing = Column('golf_swing', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
class Manager(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
Base.metadata.create_all()
sess = create_session()
c1 = Company(
name='MegaCorp, Inc.',
employees=[
Engineer(name='dilbert', primary_language='java'),
Engineer(name='wally', primary_language='c++'),
Manager(name='dogbert', golf_swing='fore!')])
c2 = Company(name='Elbonia, Inc.',
employees=[Engineer(name='vlad',
primary_language='cobol')])
sess.add(c1)
sess.add(c2)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).filter(Engineer.primary_language
== 'cobol').first(),
Engineer(name='vlad'))
eq_(sess.query(Company).filter(Company.employees.of_type(Engineer).
any(Engineer.primary_language
== 'cobol')).first(), c2)
def test_single_colsonsub(self):
"""test single inheritance where the columns are local to their
class.
this is a newer usage.
"""
class Company(Base, fixtures.ComparableEntity):
__tablename__ = 'companies'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column('name', String(50))
employees = relationship('Person')
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
company_id = Column(Integer, ForeignKey('companies.id'))
name = Column(String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
class Manager(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
golf_swing = Column(String(50))
# we have here a situation that is somewhat unique. the Person
# class is mapped to the "people" table, but it was mapped when
# the table did not include the "primary_language" or
# "golf_swing" columns. declarative will also manipulate the
# exclude_properties collection so that sibling classes don't
# cross-pollinate.
assert Person.__table__.c.company_id is not None
assert Person.__table__.c.golf_swing is not None
assert Person.__table__.c.primary_language is not None
assert Engineer.primary_language is not None
assert Manager.golf_swing is not None
assert not hasattr(Person, 'primary_language')
assert not hasattr(Person, 'golf_swing')
assert not hasattr(Engineer, 'golf_swing')
assert not hasattr(Manager, 'primary_language')
Base.metadata.create_all()
sess = create_session()
e1 = Engineer(name='dilbert', primary_language='java')
e2 = Engineer(name='wally', primary_language='c++')
m1 = Manager(name='dogbert', golf_swing='fore!')
c1 = Company(name='MegaCorp, Inc.', employees=[e1, e2, m1])
e3 = Engineer(name='vlad', primary_language='cobol')
c2 = Company(name='Elbonia, Inc.', employees=[e3])
sess.add(c1)
sess.add(c2)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).filter(Engineer.primary_language
== 'cobol').first(),
Engineer(name='vlad'))
eq_(sess.query(Company).filter(Company.employees.of_type(Engineer).
any(Engineer.primary_language
== 'cobol')).first(), c2)
eq_(sess.query(Engineer).filter_by(primary_language='cobol'
).one(),
Engineer(name='vlad', primary_language='cobol'))
def test_single_cols_on_sub_base_of_joined(self):
"""test [ticket:3895]"""
class Person(Base):
__tablename__ = "person"
id = Column(Integer, primary_key=True)
type = Column(String)
__mapper_args__ = {
"polymorphic_on": type,
}
class Contractor(Person):
contractor_field = Column(String)
__mapper_args__ = {
"polymorphic_identity": "contractor",
}
class Employee(Person):
__tablename__ = "employee"
id = Column(Integer, ForeignKey(Person.id), primary_key=True)
class Engineer(Employee):
__mapper_args__ = {
"polymorphic_identity": "engineer",
}
configure_mappers()
is_false(hasattr(Person, 'contractor_field'))
is_true(hasattr(Contractor, 'contractor_field'))
is_false(hasattr(Employee, 'contractor_field'))
is_false(hasattr(Engineer, 'contractor_field'))
def test_single_cols_on_sub_to_joined(self):
"""test [ticket:3797]"""
class BaseUser(Base):
__tablename__ = 'root'
id = Column(Integer, primary_key=True)
row_type = Column(String)
__mapper_args__ = {
'polymorphic_on': row_type,
'polymorphic_identity': 'baseuser'
}
class User(BaseUser):
__tablename__ = 'user'
__mapper_args__ = {
'polymorphic_identity': 'user'
}
baseuser_id = Column(
Integer, ForeignKey('root.id'), primary_key=True)
class Bat(Base):
__tablename__ = 'bat'
id = Column(Integer, primary_key=True)
class Thing(Base):
__tablename__ = 'thing'
id = Column(Integer, primary_key=True)
owner_id = Column(Integer, ForeignKey('user.baseuser_id'))
owner = relationship('User')
class SubUser(User):
__mapper_args__ = {
'polymorphic_identity': 'subuser'
}
sub_user_custom_thing = Column(Integer, ForeignKey('bat.id'))
eq_(
User.__table__.foreign_keys,
User.baseuser_id.foreign_keys.union(
SubUser.sub_user_custom_thing.foreign_keys))
is_true(Thing.owner.property.primaryjoin.compare(
Thing.owner_id == User.baseuser_id))
def test_single_constraint_on_sub(self):
"""test the somewhat unusual case of [ticket:3341]"""
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
__hack_args_one__ = sa.UniqueConstraint(
Person.name, primary_language)
__hack_args_two__ = sa.CheckConstraint(
Person.name != primary_language)
uq = [c for c in Person.__table__.constraints
if isinstance(c, sa.UniqueConstraint)][0]
ck = [c for c in Person.__table__.constraints
if isinstance(c, sa.CheckConstraint)][0]
eq_(
list(uq.columns),
[Person.__table__.c.name, Person.__table__.c.primary_language]
)
eq_(
list(ck.columns),
[Person.__table__.c.name, Person.__table__.c.primary_language]
)
@testing.skip_if(lambda: testing.against('oracle'),
"Test has an empty insert in it at the moment")
def test_columns_single_inheritance_conflict_resolution(self):
"""Test that a declared_attr can return the existing column and it will
be ignored. this allows conditional columns to be added.
See [ticket:2472].
"""
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
class Engineer(Person):
"""single table inheritance"""
@declared_attr
def target_id(cls):
return cls.__table__.c.get(
'target_id',
Column(Integer, ForeignKey('other.id')))
@declared_attr
def target(cls):
return relationship("Other")
class Manager(Person):
"""single table inheritance"""
@declared_attr
def target_id(cls):
return cls.__table__.c.get(
'target_id',
Column(Integer, ForeignKey('other.id')))
@declared_attr
def target(cls):
return relationship("Other")
class Other(Base):
__tablename__ = 'other'
id = Column(Integer, primary_key=True)
is_(
Engineer.target_id.property.columns[0],
Person.__table__.c.target_id
)
is_(
Manager.target_id.property.columns[0],
Person.__table__.c.target_id
)
# do a brief round trip on this
Base.metadata.create_all()
session = Session()
o1, o2 = Other(), Other()
session.add_all([
Engineer(target=o1),
Manager(target=o2),
Manager(target=o1)
])
session.commit()
eq_(session.query(Engineer).first().target, o1)
def test_columns_single_inheritance_conflict_resolution_pk(self):
"""Test #2472 in terms of a primary key column. This is
#4352.
"""
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
target_id = Column(Integer, primary_key=True)
class Engineer(Person):
"""single table inheritance"""
@declared_attr
def target_id(cls):
return cls.__table__.c.get(
'target_id',
Column(Integer, primary_key=True))
class Manager(Person):
"""single table inheritance"""
@declared_attr
def target_id(cls):
return cls.__table__.c.get(
'target_id',
Column(Integer, primary_key=True))
is_(
Engineer.target_id.property.columns[0],
Person.__table__.c.target_id
)
is_(
Manager.target_id.property.columns[0],
Person.__table__.c.target_id
)
def test_columns_single_inheritance_cascading_resolution_pk(self):
"""An additional test for #4352 in terms of the requested use case.
"""
class TestBase(Base):
__abstract__ = True
@declared_attr.cascading
def id(cls):
col_val = None
if TestBase not in cls.__bases__:
col_val = cls.__table__.c.get('id')
if col_val is None:
col_val = Column(Integer, primary_key=True)
return col_val
class Person(TestBase):
"""single table base class"""
__tablename__ = 'person'
class Engineer(Person):
""" single table inheritance, no extra cols """
class Manager(Person):
""" single table inheritance, no extra cols """
is_(Engineer.id.property.columns[0], Person.__table__.c.id)
is_(Manager.id.property.columns[0], Person.__table__.c.id)
def test_joined_from_single(self):
class Company(Base, fixtures.ComparableEntity):
__tablename__ = 'companies'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column('name', String(50))
employees = relationship('Person')
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
company_id = Column(Integer, ForeignKey('companies.id'))
name = Column(String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Manager(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
golf_swing = Column(String(50))
class Engineer(Person):
__tablename__ = 'engineers'
__mapper_args__ = {'polymorphic_identity': 'engineer'}
id = Column(Integer, ForeignKey('people.id'),
primary_key=True)
primary_language = Column(String(50))
assert Person.__table__.c.golf_swing is not None
assert 'primary_language' not in Person.__table__.c
assert Engineer.__table__.c.primary_language is not None
assert Engineer.primary_language is not None
assert Manager.golf_swing is not None
assert not hasattr(Person, 'primary_language')
assert not hasattr(Person, 'golf_swing')
assert not hasattr(Engineer, 'golf_swing')
assert not hasattr(Manager, 'primary_language')
Base.metadata.create_all()
sess = create_session()
e1 = Engineer(name='dilbert', primary_language='java')
e2 = Engineer(name='wally', primary_language='c++')
m1 = Manager(name='dogbert', golf_swing='fore!')
c1 = Company(name='MegaCorp, Inc.', employees=[e1, e2, m1])
e3 = Engineer(name='vlad', primary_language='cobol')
c2 = Company(name='Elbonia, Inc.', employees=[e3])
sess.add(c1)
sess.add(c2)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).with_polymorphic(Engineer).
filter(Engineer.primary_language
== 'cobol').first(), Engineer(name='vlad'))
eq_(sess.query(Company).filter(Company.employees.of_type(Engineer).
any(Engineer.primary_language
== 'cobol')).first(), c2)
eq_(sess.query(Engineer).filter_by(primary_language='cobol'
).one(),
Engineer(name='vlad', primary_language='cobol'))
def test_single_from_joined_colsonsub(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Manager(Person):
__tablename__ = 'manager'
__mapper_args__ = {'polymorphic_identity': 'manager'}
id = Column(Integer, ForeignKey('people.id'), primary_key=True)
golf_swing = Column(String(50))
class Boss(Manager):
boss_name = Column(String(50))
is_(
Boss.__mapper__.column_attrs['boss_name'].columns[0],
Manager.__table__.c.boss_name
)
def test_polymorphic_on_converted_from_inst(self):
class A(Base):
__tablename__ = 'A'
id = Column(Integer, primary_key=True)
discriminator = Column(String)
@declared_attr
def __mapper_args__(cls):
return {
'polymorphic_identity': cls.__name__,
'polymorphic_on': cls.discriminator
}
class B(A):
pass
is_(B.__mapper__.polymorphic_on, A.__table__.c.discriminator)
def test_add_deferred(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True,
test_needs_autoincrement=True)
Person.name = deferred(Column(String(10)))
Base.metadata.create_all()
sess = create_session()
p = Person(name='ratbert')
sess.add(p)
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).all(), [Person(name='ratbert')])
sess.expunge_all()
person = sess.query(Person).filter(Person.name == 'ratbert'
).one()
assert 'name' not in person.__dict__
def test_single_fksonsub(self):
"""test single inheritance with a foreign key-holding column on
a subclass.
"""
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language_id = Column(Integer,
ForeignKey('languages.id'))
primary_language = relationship('Language')
class Language(Base, fixtures.ComparableEntity):
__tablename__ = 'languages'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
assert not hasattr(Person, 'primary_language_id')
Base.metadata.create_all()
sess = create_session()
java, cpp, cobol = Language(name='java'), Language(name='cpp'), \
Language(name='cobol')
e1 = Engineer(name='dilbert', primary_language=java)
e2 = Engineer(name='wally', primary_language=cpp)
e3 = Engineer(name='vlad', primary_language=cobol)
sess.add_all([e1, e2, e3])
sess.flush()
sess.expunge_all()
eq_(sess.query(Person).filter(Engineer.primary_language.has(
Language.name
== 'cobol')).first(),
Engineer(name='vlad', primary_language=Language(name='cobol')))
eq_(sess.query(Engineer).filter(Engineer.primary_language.has(
Language.name
== 'cobol')).one(),
Engineer(name='vlad', primary_language=Language(name='cobol')))
eq_(sess.query(Person).join(Engineer.primary_language).order_by(
Language.name).all(),
[Engineer(name='vlad',
primary_language=Language(name='cobol')),
Engineer(name='wally', primary_language=Language(name='cpp'
)),
Engineer(name='dilbert', primary_language=Language(name='java'))])
def test_single_three_levels(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
name = Column(String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
class JuniorEngineer(Engineer):
__mapper_args__ = \
{'polymorphic_identity': 'junior_engineer'}
nerf_gun = Column(String(50))
class Manager(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
golf_swing = Column(String(50))
assert JuniorEngineer.nerf_gun
assert JuniorEngineer.primary_language
assert JuniorEngineer.name
assert Manager.golf_swing
assert Engineer.primary_language
assert not hasattr(Engineer, 'golf_swing')
assert not hasattr(Engineer, 'nerf_gun')
assert not hasattr(Manager, 'nerf_gun')
assert not hasattr(Manager, 'primary_language')
def test_single_detects_conflict(self):
class Person(Base):
__tablename__ = 'people'
id = Column(Integer, primary_key=True)
name = Column(String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column(String(50))
# test sibling col conflict
def go():
class Manager(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
golf_swing = Column(String(50))
primary_language = Column(String(50))
assert_raises(sa.exc.ArgumentError, go)
# test parent col conflict
def go():
class Salesman(Person):
__mapper_args__ = {'polymorphic_identity': 'manager'}
name = Column(String(50))
assert_raises(sa.exc.ArgumentError, go)
def test_single_no_special_cols(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True)
name = Column('name', String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
def go():
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column('primary_language',
String(50))
foo_bar = Column(Integer, primary_key=True)
assert_raises_message(sa.exc.ArgumentError,
'place primary key', go)
def test_single_no_table_args(self):
class Person(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column('id', Integer, primary_key=True)
name = Column('name', String(50))
discriminator = Column('type', String(50))
__mapper_args__ = {'polymorphic_on': discriminator}
def go():
class Engineer(Person):
__mapper_args__ = {'polymorphic_identity': 'engineer'}
primary_language = Column('primary_language',
String(50))
# this should be on the Person class, as this is single
# table inheritance, which is why we test that this
# throws an exception!
__table_args__ = {'mysql_engine': 'InnoDB'}
assert_raises_message(sa.exc.ArgumentError,
'place __table_args__', go)
@testing.emits_warning("This declarative")
def test_dupe_name_in_hierarchy(self):
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
a_1 = A
class A(a_1):
__tablename__ = 'b'
id = Column(Integer(), ForeignKey(a_1.id), primary_key=True)
assert A.__mapper__.inherits is a_1.__mapper__
class OverlapColPrecedenceTest(DeclarativeTestBase):
"""test #1892 cases when declarative does column precedence."""
def _run_test(self, Engineer, e_id, p_id):
p_table = Base.metadata.tables['person']
e_table = Base.metadata.tables['engineer']
assert Engineer.id.property.columns[0] is e_table.c[e_id]
assert Engineer.id.property.columns[1] is p_table.c[p_id]
def test_basic(self):
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
class Engineer(Person):
__tablename__ = 'engineer'
id = Column(Integer, ForeignKey('person.id'), primary_key=True)
self._run_test(Engineer, "id", "id")
def test_alt_name_base(self):
class Person(Base):
__tablename__ = 'person'
id = Column("pid", Integer, primary_key=True)
class Engineer(Person):
__tablename__ = 'engineer'
id = Column(Integer, ForeignKey('person.pid'), primary_key=True)
self._run_test(Engineer, "id", "pid")
def test_alt_name_sub(self):
class Person(Base):
__tablename__ = 'person'
id = Column(Integer, primary_key=True)
class Engineer(Person):
__tablename__ = 'engineer'
id = Column("eid", Integer, ForeignKey('person.id'),
primary_key=True)
self._run_test(Engineer, "eid", "id")
def test_alt_name_both(self):
class Person(Base):
__tablename__ = 'person'
id = Column("pid", Integer, primary_key=True)
class Engineer(Person):
__tablename__ = 'engineer'
id = Column("eid", Integer, ForeignKey('person.pid'),
primary_key=True)
self._run_test(Engineer, "eid", "pid")
class ConcreteInhTest(_RemoveListeners, DeclarativeTestBase):
def _roundtrip(self, Employee, Manager, Engineer, Boss,
polymorphic=True, explicit_type=False):
Base.metadata.create_all()
sess = create_session()
e1 = Engineer(name='dilbert', primary_language='java')
e2 = Engineer(name='wally', primary_language='c++')
m1 = Manager(name='dogbert', golf_swing='fore!')
e3 = Engineer(name='vlad', primary_language='cobol')
b1 = Boss(name="pointy haired")
if polymorphic:
for obj in [e1, e2, m1, e3, b1]:
if explicit_type:
eq_(obj.type, obj.__mapper__.polymorphic_identity)
else:
assert_raises_message(
AttributeError,
"does not implement attribute .?'type' "
"at the instance level.",
getattr, obj, "type"
)
else:
assert "type" not in Engineer.__dict__
assert "type" not in Manager.__dict__
assert "type" not in Boss.__dict__
sess.add_all([e1, e2, m1, e3, b1])
sess.flush()
sess.expunge_all()
if polymorphic:
eq_(sess.query(Employee).order_by(Employee.name).all(),
[Engineer(name='dilbert'), Manager(name='dogbert'),
Boss(name='pointy haired'),
Engineer(name='vlad'), Engineer(name='wally')])
else:
eq_(sess.query(Engineer).order_by(Engineer.name).all(),
[Engineer(name='dilbert'), Engineer(name='vlad'),
Engineer(name='wally')])
eq_(sess.query(Manager).all(), [Manager(name='dogbert')])
eq_(sess.query(Boss).all(), [Boss(name='pointy haired')])
e1 = sess.query(Engineer).order_by(Engineer.name).first()
sess.expire(e1)
eq_(e1.name, 'dilbert')
def test_explicit(self):
engineers = Table(
'engineers', Base.metadata,
Column('id',
Integer, primary_key=True, test_needs_autoincrement=True),
Column('name', String(50)),
Column('primary_language', String(50)))
managers = Table('managers', Base.metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('golf_swing', String(50))
)
boss = Table('boss', Base.metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('golf_swing', String(50))
)
punion = polymorphic_union({
'engineer': engineers,
'manager': managers,
'boss': boss}, 'type', 'punion')
class Employee(Base, fixtures.ComparableEntity):
__table__ = punion
__mapper_args__ = {'polymorphic_on': punion.c.type}
class Engineer(Employee):
__table__ = engineers
__mapper_args__ = {'polymorphic_identity': 'engineer',
'concrete': True}
class Manager(Employee):
__table__ = managers
__mapper_args__ = {'polymorphic_identity': 'manager',
'concrete': True}
class Boss(Manager):
__table__ = boss
__mapper_args__ = {'polymorphic_identity': 'boss',
'concrete': True}
self._roundtrip(Employee, Manager, Engineer, Boss)
def test_concrete_inline_non_polymorphic(self):
"""test the example from the declarative docs."""
class Employee(Base, fixtures.ComparableEntity):
__tablename__ = 'people'
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
class Engineer(Employee):
__tablename__ = 'engineers'
__mapper_args__ = {'concrete': True}
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
primary_language = Column(String(50))
name = Column(String(50))
class Manager(Employee):
__tablename__ = 'manager'
__mapper_args__ = {'concrete': True}
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
golf_swing = Column(String(50))
name = Column(String(50))
class Boss(Manager):
__tablename__ = 'boss'
__mapper_args__ = {'concrete': True}
id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
golf_swing = Column(String(50))
name = Column(String(50))
self._roundtrip(Employee, Manager, Engineer, Boss, polymorphic=False)
def test_abstract_concrete_extension(self):
class Employee(AbstractConcreteBase, Base, fixtures.ComparableEntity):
pass
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
golf_swing = Column(String(40))
__mapper_args__ = {
'polymorphic_identity': 'manager',
'concrete': True}
class Boss(Manager):
__tablename__ = 'boss'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
golf_swing = Column(String(40))
__mapper_args__ = {
'polymorphic_identity': 'boss',
'concrete': True}
class Engineer(Employee):
__tablename__ = 'engineer'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
primary_language = Column(String(40))
__mapper_args__ = {'polymorphic_identity': 'engineer',
'concrete': True}
self._roundtrip(Employee, Manager, Engineer, Boss)
def test_abstract_concrete_extension_descriptor_refresh(self):
class Employee(AbstractConcreteBase, Base, fixtures.ComparableEntity):
@declared_attr
def name(cls):
return Column(String(50))
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
paperwork = Column(String(10))
__mapper_args__ = {
'polymorphic_identity': 'manager', 'concrete': True}
class Engineer(Employee):
__tablename__ = 'engineer'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
@property
def paperwork(self):
return "p"
__mapper_args__ = {
'polymorphic_identity': 'engineer', 'concrete': True}
Base.metadata.create_all()
sess = Session()
sess.add(Engineer(name='d'))
sess.commit()
# paperwork is excluded because there's a descritor; so it is
# not in the Engineers mapped properties at all, though is inside the
# class manager. Maybe it shouldn't be in the class manager either.
assert 'paperwork' in Engineer.__mapper__.class_manager
assert 'paperwork' not in Engineer.__mapper__.attrs.keys()
# type currently does get mapped, as a
# ConcreteInheritedProperty, which means, "ignore this thing inherited
# from the concrete base". if we didn't specify concrete=True, then
# this one gets stuck in the error condition also.
assert 'type' in Engineer.__mapper__.class_manager
assert 'type' in Engineer.__mapper__.attrs.keys()
e1 = sess.query(Engineer).first()
eq_(e1.name, 'd')
sess.expire(e1)
eq_(e1.name, 'd')
def test_concrete_extension(self):
class Employee(ConcreteBase, Base, fixtures.ComparableEntity):
__tablename__ = 'employee'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
__mapper_args__ = {
'polymorphic_identity': 'employee',
'concrete': True}
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
golf_swing = Column(String(40))
__mapper_args__ = {
'polymorphic_identity': 'manager',
'concrete': True}
class Boss(Manager):
__tablename__ = 'boss'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
golf_swing = Column(String(40))
__mapper_args__ = {
'polymorphic_identity': 'boss',
'concrete': True}
class Engineer(Employee):
__tablename__ = 'engineer'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
primary_language = Column(String(40))
__mapper_args__ = {'polymorphic_identity': 'engineer',
'concrete': True}
self._roundtrip(Employee, Manager, Engineer, Boss)
def test_has_inherited_table_doesnt_consider_base(self):
class A(Base):
__tablename__ = 'a'
id = Column(Integer, primary_key=True)
assert not has_inherited_table(A)
class B(A):
__tablename__ = 'b'
id = Column(Integer, ForeignKey('a.id'), primary_key=True)
assert has_inherited_table(B)
def test_has_inherited_table_in_mapper_args(self):
class Test(Base):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
type = Column(String(20))
@declared_attr
def __mapper_args__(cls):
if not has_inherited_table(cls):
ret = {
'polymorphic_identity': 'default',
'polymorphic_on': cls.type,
}
else:
ret = {'polymorphic_identity': cls.__name__}
return ret
class PolyTest(Test):
__tablename__ = 'poly_test'
id = Column(Integer, ForeignKey(Test.id), primary_key=True)
configure_mappers()
assert Test.__mapper__.polymorphic_on is Test.__table__.c.type
assert PolyTest.__mapper__.polymorphic_on is Test.__table__.c.type
def test_ok_to_override_type_from_abstract(self):
class Employee(AbstractConcreteBase, Base, fixtures.ComparableEntity):
pass
class Manager(Employee):
__tablename__ = 'manager'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
golf_swing = Column(String(40))
@property
def type(self):
return "manager"
__mapper_args__ = {
'polymorphic_identity': "manager",
'concrete': True}
class Boss(Manager):
__tablename__ = 'boss'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
golf_swing = Column(String(40))
@property
def type(self):
return "boss"
__mapper_args__ = {
'polymorphic_identity': "boss",
'concrete': True}
class Engineer(Employee):
__tablename__ = 'engineer'
employee_id = Column(Integer, primary_key=True,
test_needs_autoincrement=True)
name = Column(String(50))
primary_language = Column(String(40))
@property
def type(self):
return "engineer"
__mapper_args__ = {'polymorphic_identity': "engineer",
'concrete': True}
self._roundtrip(Employee, Manager, Engineer, Boss, explicit_type=True)
class ConcreteExtensionConfigTest(
_RemoveListeners, testing.AssertsCompiledSQL, DeclarativeTestBase):
__dialect__ = 'default'
def test_classreg_setup(self):
class A(Base, fixtures.ComparableEntity):
__tablename__ = 'a'
id = Column(Integer,
primary_key=True, test_needs_autoincrement=True)
data = Column(String(50))
collection = relationship("BC", primaryjoin="BC.a_id == A.id",
collection_class=set)
class BC(AbstractConcreteBase, Base, fixtures.ComparableEntity):
pass
class B(BC):
__tablename__ = 'b'
id = Column(Integer,
primary_key=True, test_needs_autoincrement=True)
a_id = Column(Integer, ForeignKey('a.id'))
data = Column(String(50))
b_data = Column(String(50))
__mapper_args__ = {
"polymorphic_identity": "b",
"concrete": True
}
class C(BC):
__tablename__ = 'c'
id = Column(Integer,
primary_key=True, test_needs_autoincrement=True)
a_id = Column(Integer, ForeignKey('a.id'))
data = Column(String(50))
c_data = Column(String(50))
__mapper_args__ = {
"polymorphic_identity": "c",
"concrete": True
}
Base.metadata.create_all()
sess = Session()
sess.add_all([
A(data='a1', collection=set([
B(data='a1b1', b_data='a1b1'),
C(data='a1b2', c_data='a1c1'),
B(data='a1b2', b_data='a1b2'),
C(data='a1c2', c_data='a1c2'),
])),
A(data='a2', collection=set([
B(data='a2b1', b_data='a2b1'),
C(data='a2c1', c_data='a2c1'),
B(data='a2b2', b_data='a2b2'),
C(data='a2c2', c_data='a2c2'),
]))
])
sess.commit()
sess.expunge_all()
eq_(
sess.query(A).filter_by(data='a2').all(),
[
A(data='a2', collection=set([
B(data='a2b1', b_data='a2b1'),
B(data='a2b2', b_data='a2b2'),
C(data='a2c1', c_data='a2c1'),
C(data='a2c2', c_data='a2c2'),
]))
]
)
self.assert_compile(
sess.query(A).join(A.collection),
"SELECT a.id AS a_id, a.data AS a_data FROM a JOIN "
"(SELECT c.id AS id, c.a_id AS a_id, c.data AS data, "
"c.c_data AS c_data, CAST(NULL AS VARCHAR(50)) AS b_data, "
"'c' AS type FROM c UNION ALL SELECT b.id AS id, b.a_id AS a_id, "
"b.data AS data, CAST(NULL AS VARCHAR(50)) AS c_data, "
"b.b_data AS b_data, 'b' AS type FROM b) AS pjoin "
"ON pjoin.a_id = a.id"
)
def test_prop_on_base(self):
"""test [ticket:2670] """
counter = mock.Mock()
class Something(Base):
__tablename__ = 'something'
id = Column(Integer, primary_key=True)
class AbstractConcreteAbstraction(AbstractConcreteBase, Base):
id = Column(Integer, primary_key=True)
x = Column(Integer)
y = Column(Integer)
@declared_attr
def something_id(cls):
return Column(ForeignKey(Something.id))
@declared_attr
def something(cls):
counter(cls, "something")
return relationship("Something")
@declared_attr
def something_else(cls):
counter(cls, "something_else")
return relationship("Something")
class ConcreteConcreteAbstraction(AbstractConcreteAbstraction):
__tablename__ = 'cca'
__mapper_args__ = {
'polymorphic_identity': 'ccb',
'concrete': True}
# concrete is mapped, the abstract base is not (yet)
assert ConcreteConcreteAbstraction.__mapper__
assert not hasattr(AbstractConcreteAbstraction, '__mapper__')
session = Session()
self.assert_compile(
session.query(ConcreteConcreteAbstraction).filter(
ConcreteConcreteAbstraction.something.has(id=1)),
"SELECT cca.id AS cca_id, cca.x AS cca_x, cca.y AS cca_y, "
"cca.something_id AS cca_something_id FROM cca WHERE EXISTS "
"(SELECT 1 FROM something WHERE something.id = cca.something_id "
"AND something.id = :id_1)"
)
# now it is
assert AbstractConcreteAbstraction.__mapper__
self.assert_compile(
session.query(ConcreteConcreteAbstraction).filter(
ConcreteConcreteAbstraction.something_else.has(id=1)),
"SELECT cca.id AS cca_id, cca.x AS cca_x, cca.y AS cca_y, "
"cca.something_id AS cca_something_id FROM cca WHERE EXISTS "
"(SELECT 1 FROM something WHERE something.id = cca.something_id "
"AND something.id = :id_1)"
)
self.assert_compile(
session.query(AbstractConcreteAbstraction).filter(
AbstractConcreteAbstraction.something.has(id=1)),
"SELECT pjoin.id AS pjoin_id, pjoin.x AS pjoin_x, "
"pjoin.y AS pjoin_y, pjoin.something_id AS pjoin_something_id, "
"pjoin.type AS pjoin_type FROM "
"(SELECT cca.id AS id, cca.x AS x, cca.y AS y, "
"cca.something_id AS something_id, 'ccb' AS type FROM cca) "
"AS pjoin WHERE EXISTS (SELECT 1 FROM something "
"WHERE something.id = pjoin.something_id AND something.id = :id_1)"
)
self.assert_compile(
session.query(AbstractConcreteAbstraction).filter(
AbstractConcreteAbstraction.something_else.has(id=1)),
"SELECT pjoin.id AS pjoin_id, pjoin.x AS pjoin_x, "
"pjoin.y AS pjoin_y, pjoin.something_id AS pjoin_something_id, "
"pjoin.type AS pjoin_type FROM "
"(SELECT cca.id AS id, cca.x AS x, cca.y AS y, "
"cca.something_id AS something_id, 'ccb' AS type FROM cca) "
"AS pjoin WHERE EXISTS (SELECT 1 FROM something "
"WHERE something.id = pjoin.something_id AND something.id = :id_1)"
)
def test_abstract_in_hierarchy(self):
class Document(Base, AbstractConcreteBase):
doctype = Column(String)
class ContactDocument(Document):
__abstract__ = True
send_method = Column(String)
class ActualDocument(ContactDocument):
__tablename__ = 'actual_documents'
__mapper_args__ = {
'concrete': True,
'polymorphic_identity': 'actual'}
id = Column(Integer, primary_key=True)
configure_mappers()
session = Session()
self.assert_compile(
session.query(Document),
"SELECT pjoin.doctype AS pjoin_doctype, "
"pjoin.send_method AS pjoin_send_method, "
"pjoin.id AS pjoin_id, pjoin.type AS pjoin_type "
"FROM (SELECT actual_documents.doctype AS doctype, "
"actual_documents.send_method AS send_method, "
"actual_documents.id AS id, 'actual' AS type "
"FROM actual_documents) AS pjoin"
)
def test_column_attr_names(self):
"""test #3480"""
class Document(Base, AbstractConcreteBase):
documentType = Column('documenttype', String)
class Offer(Document):
__tablename__ = 'offers'
id = Column(Integer, primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'offer'
}
configure_mappers()
session = Session()
self.assert_compile(
session.query(Document),
"SELECT pjoin.documenttype AS pjoin_documenttype, "
"pjoin.id AS pjoin_id, pjoin.type AS pjoin_type FROM "
"(SELECT offers.documenttype AS documenttype, offers.id AS id, "
"'offer' AS type FROM offers) AS pjoin"
)
self.assert_compile(
session.query(Document.documentType),
"SELECT pjoin.documenttype AS pjoin_documenttype FROM "
"(SELECT offers.documenttype AS documenttype, offers.id AS id, "
"'offer' AS type FROM offers) AS pjoin"
)
| 36.077441 | 79 | 0.563602 |
b0f27810008b82d91a2006adfc44ca52b30aca74 | 622 | py | Python | runs/par-bro-iter00900.cfg.py | janpawellek/broeval | 57e31aa6e354d0bba88103b44910483e8d982d00 | [
"MIT"
] | null | null | null | runs/par-bro-iter00900.cfg.py | janpawellek/broeval | 57e31aa6e354d0bba88103b44910483e8d982d00 | [
"MIT"
] | null | null | null | runs/par-bro-iter00900.cfg.py | janpawellek/broeval | 57e31aa6e354d0bba88103b44910483e8d982d00 | [
"MIT"
] | null | null | null |
# Write results to this file
OUTFILE = 'runs/par-bro-iter00900.result.csv'
# Source computers for the requests
SOURCE = ['10.0.0.1']
# Should Bro be enabled on the source machines?
SOURCE_BRO = [True]
# Target machines for the requests (aka server)
TARGET = ['10.0.0.2']
# Should Bro be enabled on the target machines?
TARGET_BRO = [True]
# Connection mode (par = parallel, seq = sequential)
MODE = 'par'
# Number of evaluation repetitions to run
EPOCHS = 100
# Number of iterations to be run in each evaluation repetition
ITER = 900
# Size of the file to be downloaded from target (in Bytes * 10^SIZE)
SIZE = 5
| 21.448276 | 68 | 0.720257 |
c1672d851f5eb2f58a859a7aa9e3ce9e1a6c270a | 1,592 | py | Python | marco/portal/data_catalog/views.py | MidAtlanticPortal/marco-portal2 | b47e7bfa171e98a6cf499b2d411fc743caae91c2 | [
"0BSD"
] | 4 | 2016-09-24T00:57:45.000Z | 2019-07-28T23:35:15.000Z | marco/portal/data_catalog/views.py | MidAtlanticPortal/marco-portal2 | b47e7bfa171e98a6cf499b2d411fc743caae91c2 | [
"0BSD"
] | 146 | 2016-09-27T23:16:52.000Z | 2022-03-09T16:55:32.000Z | marco/portal/data_catalog/views.py | MidAtlanticPortal/marco-portal2 | b47e7bfa171e98a6cf499b2d411fc743caae91c2 | [
"0BSD"
] | 1 | 2019-07-03T23:42:05.000Z | 2019-07-03T23:42:05.000Z | from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext
from data_manager.models import *
from portal.base.models import PortalImage
# hack for POR-224, until POR-206
def wagtail_feature_image(self):
image = PortalImage.objects.filter(tags__name__in=["theme"]).filter(tags__name__in=[self.name]).first()
return image or None
Theme.wagtail_feature_image = wagtail_feature_image
def theme_query():
return Theme.objects.filter(visible=True).exclude(name='companion').extra(
select={
'layer_count': "SELECT COUNT(*) FROM data_manager_layer_themes as mm LEFT JOIN data_manager_layer as l ON mm.layer_id = l.id WHERE mm.theme_id = data_manager_theme.id AND l.layer_type != 'placeholder'"
}
).order_by('order')
def theme(request, theme_slug):
from django.contrib.sites.shortcuts import get_current_site
site = get_current_site(request)
theme = get_object_or_404(theme_query(), name=theme_slug)
template = 'data_catalog/theme.html'
# layers = [x.dictCache(site.pk) for x in theme.layer_set.all().exclude(layer_type='placeholder').exclude(is_sublayer=True).order_by('order')]
layers = []
for layer in theme.layer_set.all().exclude(layer_type='placeholder').exclude(is_sublayer=True).order_by('name'):
layers.append(layer.shortDict(site.pk))
return render_to_response(
template,
{
'theme': theme,
'layers': layers,
},
context_instance=RequestContext(request)
);
| 41.894737 | 214 | 0.702889 |
ddeade03ba9e76b62326181f3af9f929ee2ada59 | 9,132 | py | Python | aiokafka/conn.py | vineet-rh/aiokafka | 485351d7f9e583debd628de72062b4fd4f7c2d65 | [
"Apache-2.0"
] | null | null | null | aiokafka/conn.py | vineet-rh/aiokafka | 485351d7f9e583debd628de72062b4fd4f7c2d65 | [
"Apache-2.0"
] | 1 | 2018-06-12T18:44:26.000Z | 2018-06-12T18:44:26.000Z | aiokafka/conn.py | vineet-rh/aiokafka | 485351d7f9e583debd628de72062b4fd4f7c2d65 | [
"Apache-2.0"
] | null | null | null | import asyncio
import struct
import logging
from kafka.protocol.api import RequestHeader
from kafka.protocol.commit import (
GroupCoordinatorResponse_v0 as GroupCoordinatorResponse)
import aiokafka.errors as Errors
from aiokafka import ensure_future
__all__ = ['AIOKafkaConnection', 'create_conn']
READER_LIMIT = 2 ** 16
class CloseReason:
CONNECTION_BROKEN = 0
CONNECTION_TIMEOUT = 1
OUT_OF_SYNC = 2
IDLE_DROP = 3
SHUTDOWN = 4
@asyncio.coroutine
def create_conn(host, port, *, loop=None, client_id='aiokafka',
request_timeout_ms=40000, api_version=(0, 8, 2),
ssl_context=None, security_protocol="PLAINTEXT",
max_idle_ms=None, on_close=None):
if loop is None:
loop = asyncio.get_event_loop()
conn = AIOKafkaConnection(
host, port, loop=loop, client_id=client_id,
request_timeout_ms=request_timeout_ms,
api_version=api_version,
ssl_context=ssl_context, security_protocol=security_protocol,
max_idle_ms=max_idle_ms, on_close=on_close)
yield from conn.connect()
return conn
class AIOKafkaProtocol(asyncio.StreamReaderProtocol):
def __init__(self, closed_fut, *args, loop, **kw):
self._closed_fut = closed_fut
super().__init__(*args, loop=loop, **kw)
def connection_lost(self, exc):
super().connection_lost(exc)
if not self._closed_fut.cancelled():
self._closed_fut.set_result(None)
class AIOKafkaConnection:
"""Class for manage connection to Kafka node"""
HEADER = struct.Struct('>i')
log = logging.getLogger(__name__)
def __init__(self, host, port, *, loop, client_id='aiokafka',
request_timeout_ms=40000, api_version=(0, 8, 2),
ssl_context=None, security_protocol="PLAINTEXT",
max_idle_ms=None, on_close=None):
self._loop = loop
self._host = host
self._port = port
self._request_timeout = request_timeout_ms / 1000
self._api_version = api_version
self._client_id = client_id
self._ssl_context = ssl_context
self._secutity_protocol = security_protocol
self._reader = self._writer = self._protocol = None
self._requests = []
self._read_task = None
self._correlation_id = 0
self._closed_fut = None
self._max_idle_ms = max_idle_ms
self._last_action = loop.time()
self._idle_handle = None
self._on_close_cb = on_close
@asyncio.coroutine
def connect(self):
loop = self._loop
self._closed_fut = asyncio.Future(loop=loop)
if self._secutity_protocol == "PLAINTEXT":
ssl = None
else:
assert self._secutity_protocol == "SSL"
assert self._ssl_context is not None
ssl = self._ssl_context
# Create streams same as `open_connection`, but using custom protocol
reader = asyncio.StreamReader(limit=READER_LIMIT, loop=loop)
protocol = AIOKafkaProtocol(self._closed_fut, reader, loop=loop)
transport, _ = yield from asyncio.wait_for(
loop.create_connection(
lambda: protocol, self.host, self.port, ssl=ssl),
loop=loop, timeout=self._request_timeout)
writer = asyncio.StreamWriter(transport, protocol, reader, loop)
self._reader, self._writer, self._protocol = reader, writer, protocol
# Start reader task.
self._read_task = ensure_future(self._read(), loop=loop)
# Start idle checker
if self._max_idle_ms is not None:
self._idle_handle = self._loop.call_soon(self._idle_check)
return reader, writer
def _idle_check(self):
idle_for = self._loop.time() - self._last_action
timeout = self._max_idle_ms / 1000
# If we have any pending requests, we are assumed to be not idle.
# it's up to `request_timeout_ms` to break those.
if (idle_for >= timeout) and not self._requests:
self.close(CloseReason.IDLE_DROP)
else:
if self._requests:
# We must wait at least max_idle_ms anyway. Mostly this setting
# is quite high so we shouldn't spend many CPU on this
wake_up_in = timeout
else:
wake_up_in = timeout - idle_for
self._idle_handle = self._loop.call_later(
wake_up_in, self._idle_check)
def __repr__(self):
return "<AIOKafkaConnection host={0.host} port={0.port}>".format(self)
@property
def host(self):
return self._host
@property
def port(self):
return self._port
def send(self, request, expect_response=True):
if self._writer is None:
raise Errors.ConnectionError(
"No connection to broker at {0}:{1}"
.format(self._host, self._port))
correlation_id = self._next_correlation_id()
header = RequestHeader(request,
correlation_id=correlation_id,
client_id=self._client_id)
message = header.encode() + request.encode()
size = self.HEADER.pack(len(message))
try:
self._writer.write(size + message)
except OSError as err:
self.close(reason=CloseReason.CONNECTION_BROKEN)
raise Errors.ConnectionError(
"Connection at {0}:{1} broken: {2}".format(
self._host, self._port, err))
if not expect_response:
return self._writer.drain()
fut = asyncio.Future(loop=self._loop)
self._requests.append((correlation_id, request.RESPONSE_TYPE, fut))
return asyncio.wait_for(fut, self._request_timeout, loop=self._loop)
def connected(self):
return bool(self._reader is not None and not self._reader.at_eof())
def close(self, reason=None):
if self._reader is not None:
self._writer.close()
self._writer = self._reader = None
self._read_task.cancel()
self._read_task = None
error = Errors.ConnectionError(
"Connection at {0}:{1} closed".format(
self._host, self._port))
for _, _, fut in self._requests:
if not fut.done():
fut.set_exception(error)
self._requests = []
if self._on_close_cb is not None:
self._on_close_cb(self, reason)
self._on_close_cb = None
if self._idle_handle is not None:
self._idle_handle.cancel()
# transport.close() will close socket, but not right ahead. Return
# a future in case we need to wait on it.
return self._closed_fut
@asyncio.coroutine
def _read(self):
try:
while True:
resp = yield from self._reader.readexactly(4)
size, = self.HEADER.unpack(resp)
resp = yield from self._reader.readexactly(size)
recv_correlation_id, = self.HEADER.unpack(resp[:4])
correlation_id, resp_type, fut = self._requests.pop(0)
if (self._api_version == (0, 8, 2) and
resp_type is GroupCoordinatorResponse and
correlation_id != 0 and recv_correlation_id == 0):
self.log.warning(
'Kafka 0.8.2 quirk -- GroupCoordinatorResponse'
' coorelation id does not match request. This'
' should go away once at least one topic has been'
' initialized on the broker')
elif correlation_id != recv_correlation_id:
error = Errors.CorrelationIdError(
'Correlation ids do not match: sent {}, recv {}'
.format(correlation_id, recv_correlation_id))
if not fut.done():
fut.set_exception(error)
self.close(reason=CloseReason.OUT_OF_SYNC)
break
if not fut.done():
response = resp_type.decode(resp[4:])
self.log.debug('%s Response %d: %s',
self, correlation_id, response)
fut.set_result(response)
# Update idle timer.
self._last_action = self._loop.time()
except (OSError, EOFError, ConnectionError) as exc:
conn_exc = Errors.ConnectionError(
"Connection at {0}:{1} broken".format(self._host, self._port))
conn_exc.__cause__ = exc
conn_exc.__context__ = exc
for _, _, fut in self._requests:
fut.set_exception(conn_exc)
self.close(reason=CloseReason.CONNECTION_BROKEN)
except asyncio.CancelledError:
pass
def _next_correlation_id(self):
self._correlation_id = (self._correlation_id + 1) % 2**31
return self._correlation_id
| 37.273469 | 79 | 0.597459 |
e4755b3d1f8697f9766a2ec817d280ae0ba07d6f | 41,871 | py | Python | yt_dlp/extractor/extractors.py | ITZNEON12/yt-dlp | d76d15a6699dc41eea26a96d054a1b7bcb12c69b | [
"Unlicense"
] | 2 | 2021-08-24T16:53:31.000Z | 2021-12-17T16:55:04.000Z | yt_dlp/extractor/extractors.py | ITZNEON12/yt-dlp | d76d15a6699dc41eea26a96d054a1b7bcb12c69b | [
"Unlicense"
] | null | null | null | yt_dlp/extractor/extractors.py | ITZNEON12/yt-dlp | d76d15a6699dc41eea26a96d054a1b7bcb12c69b | [
"Unlicense"
] | 1 | 2021-07-20T14:58:10.000Z | 2021-07-20T14:58:10.000Z | # flake8: noqa
from __future__ import unicode_literals
from .abc import (
ABCIE,
ABCIViewIE,
)
from .abcnews import (
AbcNewsIE,
AbcNewsVideoIE,
)
from .abcotvs import (
ABCOTVSIE,
ABCOTVSClipsIE,
)
from .academicearth import AcademicEarthCourseIE
from .acast import (
ACastIE,
ACastChannelIE,
)
from .adn import ADNIE
from .adobeconnect import AdobeConnectIE
from .adobetv import (
AdobeTVEmbedIE,
AdobeTVIE,
AdobeTVShowIE,
AdobeTVChannelIE,
AdobeTVVideoIE,
)
from .adultswim import AdultSwimIE
from .aenetworks import (
AENetworksIE,
AENetworksCollectionIE,
AENetworksShowIE,
HistoryTopicIE,
HistoryPlayerIE,
BiographyIE,
)
from .afreecatv import AfreecaTVIE
from .airmozilla import AirMozillaIE
from .aljazeera import AlJazeeraIE
from .alphaporno import AlphaPornoIE
from .amara import AmaraIE
from .alura import (
AluraIE,
AluraCourseIE
)
from .amcnetworks import AMCNetworksIE
from .animelab import (
AnimeLabIE,
AnimeLabShowsIE,
)
from .amazon import AmazonStoreIE
from .americastestkitchen import (
AmericasTestKitchenIE,
AmericasTestKitchenSeasonIE,
)
from .animeondemand import AnimeOnDemandIE
from .anvato import AnvatoIE
from .aol import AolIE
from .allocine import AllocineIE
from .aliexpress import AliExpressLiveIE
from .apa import APAIE
from .aparat import AparatIE
from .appleconnect import AppleConnectIE
from .appletrailers import (
AppleTrailersIE,
AppleTrailersSectionIE,
)
from .applepodcasts import ApplePodcastsIE
from .archiveorg import (
ArchiveOrgIE,
YoutubeWebArchiveIE,
)
from .arcpublishing import ArcPublishingIE
from .arkena import ArkenaIE
from .ard import (
ARDBetaMediathekIE,
ARDIE,
ARDMediathekIE,
)
from .arte import (
ArteTVIE,
ArteTVEmbedIE,
ArteTVPlaylistIE,
)
from .arnes import ArnesIE
from .asiancrush import (
AsianCrushIE,
AsianCrushPlaylistIE,
)
from .atresplayer import AtresPlayerIE
from .atttechchannel import ATTTechChannelIE
from .atvat import ATVAtIE
from .audimedia import AudiMediaIE
from .audioboom import AudioBoomIE
from .audiomack import AudiomackIE, AudiomackAlbumIE
from .audius import (
AudiusIE,
AudiusTrackIE,
AudiusPlaylistIE,
AudiusProfileIE,
)
from .awaan import (
AWAANIE,
AWAANVideoIE,
AWAANLiveIE,
AWAANSeasonIE,
)
from .azmedien import AZMedienIE
from .baidu import BaiduVideoIE
from .bandaichannel import BandaiChannelIE
from .bandcamp import (
BandcampIE,
BandcampAlbumIE,
BandcampWeeklyIE,
BandcampMusicIE,
)
from .bannedvideo import BannedVideoIE
from .bbc import (
BBCCoUkIE,
BBCCoUkArticleIE,
BBCCoUkIPlayerEpisodesIE,
BBCCoUkIPlayerGroupIE,
BBCCoUkPlaylistIE,
BBCIE,
)
from .beeg import BeegIE
from .behindkink import BehindKinkIE
from .bellmedia import BellMediaIE
from .beatport import BeatportIE
from .bet import BetIE
from .bfi import BFIPlayerIE
from .bfmtv import (
BFMTVIE,
BFMTVLiveIE,
BFMTVArticleIE,
)
from .bibeltv import BibelTVIE
from .bigflix import BigflixIE
from .bild import BildIE
from .bilibili import (
BiliBiliIE,
BiliBiliSearchIE,
BilibiliCategoryIE,
BiliBiliBangumiIE,
BilibiliAudioIE,
BilibiliAudioAlbumIE,
BiliBiliPlayerIE,
BilibiliChannelIE,
BiliIntlIE,
BiliIntlSeriesIE,
)
from .biobiochiletv import BioBioChileTVIE
from .bitchute import (
BitChuteIE,
BitChuteChannelIE,
)
from .bitwave import (
BitwaveReplayIE,
BitwaveStreamIE,
)
from .biqle import BIQLEIE
from .blackboardcollaborate import BlackboardCollaborateIE
from .bleacherreport import (
BleacherReportIE,
BleacherReportCMSIE,
)
from .blogger import BloggerIE
from .bloomberg import BloombergIE
from .bokecc import BokeCCIE
from .bongacams import BongaCamsIE
from .bostonglobe import BostonGlobeIE
from .box import BoxIE
from .bpb import BpbIE
from .br import (
BRIE,
BRMediathekIE,
)
from .bravotv import BravoTVIE
from .breakcom import BreakIE
from .breitbart import BreitBartIE
from .brightcove import (
BrightcoveLegacyIE,
BrightcoveNewIE,
)
from .businessinsider import BusinessInsiderIE
from .buzzfeed import BuzzFeedIE
from .byutv import BYUtvIE
from .c56 import C56IE
from .cableav import CableAVIE
from .cam4 import CAM4IE
from .camdemy import (
CamdemyIE,
CamdemyFolderIE
)
from .cammodels import CamModelsIE
from .camwithher import CamWithHerIE
from .canalalpha import CanalAlphaIE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE
from .canvas import (
CanvasIE,
CanvasEenIE,
VrtNUIE,
DagelijkseKostIE,
)
from .carambatv import (
CarambaTVIE,
CarambaTVPageIE,
)
from .cartoonnetwork import CartoonNetworkIE
from .cbc import (
CBCIE,
CBCPlayerIE,
CBCGemIE,
CBCGemPlaylistIE,
CBCGemLiveIE,
)
from .cbs import CBSIE
from .cbslocal import (
CBSLocalIE,
CBSLocalArticleIE,
)
from .cbsinteractive import CBSInteractiveIE
from .cbsnews import (
CBSNewsEmbedIE,
CBSNewsIE,
CBSNewsLiveVideoIE,
)
from .cbssports import (
CBSSportsEmbedIE,
CBSSportsIE,
TwentyFourSevenSportsIE,
)
from .ccc import (
CCCIE,
CCCPlaylistIE,
)
from .ccma import CCMAIE
from .cctv import CCTVIE
from .cda import CDAIE
from .ceskatelevize import CeskaTelevizeIE
from .cgtn import CGTNIE
from .channel9 import Channel9IE
from .charlierose import CharlieRoseIE
from .chaturbate import ChaturbateIE
from .chilloutzone import ChilloutzoneIE
from .chingari import (
ChingariIE,
ChingariUserIE,
)
from .chirbit import (
ChirbitIE,
ChirbitProfileIE,
)
from .cinchcast import CinchcastIE
from .cinemax import CinemaxIE
from .ciscolive import (
CiscoLiveSessionIE,
CiscoLiveSearchIE,
)
from .ciscowebex import CiscoWebexIE
from .cjsw import CJSWIE
from .cliphunter import CliphunterIE
from .clippit import ClippitIE
from .cliprs import ClipRsIE
from .clipsyndicate import ClipsyndicateIE
from .closertotruth import CloserToTruthIE
from .cloudflarestream import CloudflareStreamIE
from .cloudy import CloudyIE
from .clubic import ClubicIE
from .clyp import ClypIE
from .cmt import CMTIE
from .cnbc import (
CNBCIE,
CNBCVideoIE,
)
from .cnn import (
CNNIE,
CNNBlogsIE,
CNNArticleIE,
)
from .coub import CoubIE
from .comedycentral import (
ComedyCentralIE,
ComedyCentralTVIE,
)
from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
from .commonprotocols import (
MmsIE,
RtmpIE,
ViewSourceIE,
)
from .condenast import CondeNastIE
from .contv import CONtvIE
from .corus import CorusIE
from .cozytv import CozyTVIE
from .cracked import CrackedIE
from .crackle import CrackleIE
from .crooksandliars import CrooksAndLiarsIE
from .crunchyroll import (
CrunchyrollIE,
CrunchyrollShowPlaylistIE,
CrunchyrollBetaIE,
CrunchyrollBetaShowIE,
)
from .cspan import CSpanIE
from .ctsnews import CtsNewsIE
from .ctv import CTVIE
from .ctvnews import CTVNewsIE
from .cultureunplugged import CultureUnpluggedIE
from .curiositystream import (
CuriosityStreamIE,
CuriosityStreamCollectionsIE,
CuriosityStreamSeriesIE,
)
from .cwtv import CWTVIE
from .dailymail import DailyMailIE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
DailymotionUserIE,
)
from .damtomo import (
DamtomoRecordIE,
DamtomoVideoIE,
)
from .daum import (
DaumIE,
DaumClipIE,
DaumPlaylistIE,
DaumUserIE,
)
from .dbtv import DBTVIE
from .dctp import DctpTvIE
from .deezer import (
DeezerPlaylistIE,
DeezerAlbumIE,
)
from .democracynow import DemocracynowIE
from .dfb import DFBIE
from .dhm import DHMIE
from .digg import DiggIE
from .dotsub import DotsubIE
from .douyutv import (
DouyuShowIE,
DouyuTVIE,
)
from .dplay import (
DPlayIE,
DiscoveryPlusIE,
HGTVDeIE,
ScienceChannelIE,
DIYNetworkIE,
AnimalPlanetIE,
DiscoveryPlusIndiaIE,
DiscoveryNetworksDeIE,
DiscoveryPlusItalyShowIE,
DiscoveryPlusIndiaShowIE,
)
from .dreisat import DreiSatIE
from .drbonanza import DRBonanzaIE
from .drtuber import DrTuberIE
from .drtv import (
DRTVIE,
DRTVLiveIE,
)
from .dtube import DTubeIE
from .dvtv import DVTVIE
from .duboku import (
DubokuIE,
DubokuPlaylistIE
)
from .dumpert import DumpertIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
from .discoverygo import (
DiscoveryGoIE,
DiscoveryGoPlaylistIE,
)
from .discoveryvr import DiscoveryVRIE
from .disney import DisneyIE
from .dispeak import DigitallySpeakingIE
from .doodstream import DoodStreamIE
from .dropbox import DropboxIE
from .dw import (
DWIE,
DWArticleIE,
)
from .eagleplatform import EaglePlatformIE
from .ebaumsworld import EbaumsWorldIE
from .echomsk import EchoMskIE
from .egghead import (
EggheadCourseIE,
EggheadLessonIE,
)
from .ehow import EHowIE
from .eighttracks import EightTracksIE
from .einthusan import EinthusanIE
from .eitb import EitbIE
from .ellentube import (
EllenTubeIE,
EllenTubeVideoIE,
EllenTubePlaylistIE,
)
from .elonet import ElonetIE
from .elpais import ElPaisIE
from .embedly import EmbedlyIE
from .engadget import EngadgetIE
from .epicon import (
EpiconIE,
EpiconSeriesIE,
)
from .eporner import EpornerIE
from .eroprofile import (
EroProfileIE,
EroProfileAlbumIE,
)
from .escapist import EscapistIE
from .espn import (
ESPNIE,
ESPNArticleIE,
FiveThirtyEightIE,
ESPNCricInfoIE,
)
from .esri import EsriVideoIE
from .europa import EuropaIE
from .euscreen import EUScreenIE
from .expotv import ExpoTVIE
from .expressen import ExpressenIE
from .extremetube import ExtremeTubeIE
from .eyedotv import EyedoTVIE
from .facebook import (
FacebookIE,
FacebookPluginsVideoIE,
)
from .fancode import (
FancodeVodIE,
FancodeLiveIE
)
from .faz import FazIE
from .fc2 import (
FC2IE,
FC2EmbedIE,
)
from .fczenit import FczenitIE
from .filmmodu import FilmmoduIE
from .filmon import (
FilmOnIE,
FilmOnChannelIE,
)
from .filmweb import FilmwebIE
from .firsttv import FirstTVIE
from .fivemin import FiveMinIE
from .fivetv import FiveTVIE
from .flickr import FlickrIE
from .folketinget import FolketingetIE
from .footyroom import FootyRoomIE
from .formula1 import Formula1IE
from .fourtube import (
FourTubeIE,
PornTubeIE,
PornerBrosIE,
FuxIE,
)
from .fox import FOXIE
from .fox9 import (
FOX9IE,
FOX9NewsIE,
)
from .foxgay import FoxgayIE
from .foxnews import (
FoxNewsIE,
FoxNewsArticleIE,
)
from .foxsports import FoxSportsIE
from .franceculture import FranceCultureIE
from .franceinter import FranceInterIE
from .francetv import (
FranceTVIE,
FranceTVSiteIE,
FranceTVInfoIE,
)
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .freshlive import FreshLiveIE
from .frontendmasters import (
FrontendMastersIE,
FrontendMastersLessonIE,
FrontendMastersCourseIE
)
from .fujitv import FujiTVFODPlus7IE
from .funimation import (
FunimationIE,
FunimationPageIE,
FunimationShowIE,
)
from .funk import FunkIE
from .fusion import FusionIE
from .gab import (
GabTVIE,
GabIE,
)
from .gaia import GaiaIE
from .gameinformer import GameInformerIE
from .gamespot import GameSpotIE
from .gamestar import GameStarIE
from .gaskrank import GaskrankIE
from .gazeta import GazetaIE
from .gdcvault import GDCVaultIE
from .gedidigital import GediDigitalIE
from .generic import GenericIE
from .gettr import GettrIE
from .gfycat import GfycatIE
from .giantbomb import GiantBombIE
from .giga import GigaIE
from .glide import GlideIE
from .globo import (
GloboIE,
GloboArticleIE,
)
from .go import GoIE
from .godtube import GodTubeIE
from .gofile import GofileIE
from .golem import GolemIE
from .googledrive import GoogleDriveIE
from .googlepodcasts import (
GooglePodcastsIE,
GooglePodcastsFeedIE,
)
from .googlesearch import GoogleSearchIE
from .gopro import GoProIE
from .goshgay import GoshgayIE
from .gotostage import GoToStageIE
from .gputechconf import GPUTechConfIE
from .gronkh import GronkhIE
from .groupon import GrouponIE
from .hbo import HBOIE
from .hearthisat import HearThisAtIE
from .heise import HeiseIE
from .hellporno import HellPornoIE
from .helsinki import HelsinkiIE
from .hentaistigma import HentaiStigmaIE
from .hgtv import HGTVComShowIE
from .hketv import HKETVIE
from .hidive import HiDiveIE
from .historicfilms import HistoricFilmsIE
from .hitbox import HitboxIE, HitboxLiveIE
from .hitrecord import HitRecordIE
from .hornbunny import HornBunnyIE
from .hotnewhiphop import HotNewHipHopIE
from .hotstar import (
HotStarIE,
HotStarPlaylistIE,
HotStarSeriesIE,
)
from .howcast import HowcastIE
from .howstuffworks import HowStuffWorksIE
from .hrfensehen import HRFernsehenIE
from .hrti import (
HRTiIE,
HRTiPlaylistIE,
)
from .huajiao import HuajiaoIE
from .huffpost import HuffPostIE
from .hungama import (
HungamaIE,
HungamaSongIE,
HungamaAlbumPlaylistIE,
)
from .hypem import HypemIE
from .ichinanalive import (
IchinanaLiveIE,
IchinanaLiveClipIE,
)
from .ign import (
IGNIE,
IGNVideoIE,
IGNArticleIE,
)
from .iheart import (
IHeartRadioIE,
IHeartRadioPodcastIE,
)
from .imdb import (
ImdbIE,
ImdbListIE
)
from .imgur import (
ImgurIE,
ImgurAlbumIE,
ImgurGalleryIE,
)
from .ina import InaIE
from .inc import IncIE
from .indavideo import IndavideoEmbedIE
from .infoq import InfoQIE
from .instagram import (
InstagramIE,
InstagramIOSIE,
InstagramUserIE,
InstagramTagIE,
)
from .internazionale import InternazionaleIE
from .internetvideoarchive import InternetVideoArchiveIE
from .iprima import (
IPrimaIE,
IPrimaCNNIE
)
from .iqiyi import IqiyiIE
from .ir90tv import Ir90TvIE
from .itv import (
ITVIE,
ITVBTCCIE,
)
from .ivi import (
IviIE,
IviCompilationIE
)
from .ivideon import IvideonIE
from .iwara import IwaraIE
from .izlesene import IzleseneIE
from .jamendo import (
JamendoIE,
JamendoAlbumIE,
)
from .jeuxvideo import JeuxVideoIE
from .jove import JoveIE
from .joj import JojIE
from .jwplatform import JWPlatformIE
from .kakao import KakaoIE
from .kaltura import KalturaIE
from .kankan import KankanIE
from .karaoketv import KaraoketvIE
from .karrierevideos import KarriereVideosIE
from .keezmovies import KeezMoviesIE
from .ketnet import KetnetIE
from .khanacademy import (
KhanAcademyIE,
KhanAcademyUnitIE,
)
from .kickstarter import KickStarterIE
from .kinja import KinjaEmbedIE
from .kinopoisk import KinoPoiskIE
from .konserthusetplay import KonserthusetPlayIE
from .koo import KooIE
from .krasview import KrasViewIE
from .ku6 import Ku6IE
from .kusi import KUSIIE
from .kuwo import (
KuwoIE,
KuwoAlbumIE,
KuwoChartIE,
KuwoSingerIE,
KuwoCategoryIE,
KuwoMvIE,
)
from .la7 import (
LA7IE,
LA7PodcastEpisodeIE,
LA7PodcastIE,
)
from .laola1tv import (
Laola1TvEmbedIE,
Laola1TvIE,
EHFTVIE,
ITTFIE,
)
from .lbry import (
LBRYIE,
LBRYChannelIE,
)
from .lci import LCIIE
from .lcp import (
LcpPlayIE,
LcpIE,
)
from .lecture2go import Lecture2GoIE
from .lecturio import (
LecturioIE,
LecturioCourseIE,
LecturioDeCourseIE,
)
from .leeco import (
LeIE,
LePlaylistIE,
LetvCloudIE,
)
from .lego import LEGOIE
from .lemonde import LemondeIE
from .lenta import LentaIE
from .libraryofcongress import LibraryOfCongressIE
from .libsyn import LibsynIE
from .lifenews import (
LifeNewsIE,
LifeEmbedIE,
)
from .limelight import (
LimelightMediaIE,
LimelightChannelIE,
LimelightChannelListIE,
)
from .line import (
LineTVIE,
LineLiveIE,
LineLiveChannelIE,
)
from .linkedin import (
LinkedInIE,
LinkedInLearningIE,
LinkedInLearningCourseIE,
)
from .linuxacademy import LinuxAcademyIE
from .litv import LiTVIE
from .livejournal import LiveJournalIE
from .livestream import (
LivestreamIE,
LivestreamOriginalIE,
LivestreamShortenerIE,
)
from .lnkgo import LnkGoIE
from .localnews8 import LocalNews8IE
from .lovehomeporn import LoveHomePornIE
from .lrt import LRTIE
from .lynda import (
LyndaIE,
LyndaCourseIE
)
from .m6 import M6IE
from .magentamusik360 import MagentaMusik360IE
from .mailru import (
MailRuIE,
MailRuMusicIE,
MailRuMusicSearchIE,
)
from .malltv import MallTVIE
from .mangomolo import (
MangomoloVideoIE,
MangomoloLiveIE,
)
from .manoto import (
ManotoTVIE,
ManotoTVShowIE,
ManotoTVLiveIE,
)
from .manyvids import ManyVidsIE
from .maoritv import MaoriTVIE
from .markiza import (
MarkizaIE,
MarkizaPageIE,
)
from .massengeschmacktv import MassengeschmackTVIE
from .matchtv import MatchTVIE
from .mdr import MDRIE
from .medaltv import MedalTVIE
from .mediaite import MediaiteIE
from .mediaklikk import MediaKlikkIE
from .mediaset import (
MediasetIE,
MediasetShowIE,
)
from .mediasite import (
MediasiteIE,
MediasiteCatalogIE,
MediasiteNamedCatalogIE,
)
from .medici import MediciIE
from .megaphone import MegaphoneIE
from .meipai import MeipaiIE
from .melonvod import MelonVODIE
from .meta import METAIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mgoon import MgoonIE
from .mgtv import MGTVIE
from .miaopai import MiaoPaiIE
from .microsoftstream import MicrosoftStreamIE
from .microsoftvirtualacademy import (
MicrosoftVirtualAcademyIE,
MicrosoftVirtualAcademyCourseIE,
)
from .mildom import (
MildomIE,
MildomVodIE,
MildomUserVodIE,
)
from .minds import (
MindsIE,
MindsChannelIE,
MindsGroupIE,
)
from .ministrygrid import MinistryGridIE
from .minoto import MinotoIE
from .miomio import MioMioIE
from .mirrativ import (
MirrativIE,
MirrativUserIE,
)
from .mit import TechTVMITIE, OCWMITIE
from .mitele import MiTeleIE
from .mixch import MixchIE
from .mixcloud import (
MixcloudIE,
MixcloudUserIE,
MixcloudPlaylistIE,
)
from .mlb import (
MLBIE,
MLBVideoIE,
)
from .mlssoccer import MLSSoccerIE
from .mnet import MnetIE
from .moevideo import MoeVideoIE
from .mofosex import (
MofosexIE,
MofosexEmbedIE,
)
from .mojvideo import MojvideoIE
from .morningstar import MorningstarIE
from .motherless import (
MotherlessIE,
MotherlessGroupIE
)
from .motorsport import MotorsportIE
from .movieclips import MovieClipsIE
from .moviezine import MoviezineIE
from .movingimage import MovingImageIE
from .msn import MSNIE
from .mtv import (
MTVIE,
MTVVideoIE,
MTVServicesEmbeddedIE,
MTVDEIE,
MTVJapanIE,
MTVItaliaIE,
MTVItaliaProgrammaIE,
)
from .muenchentv import MuenchenTVIE
from .musescore import MuseScoreIE
from .mwave import MwaveIE, MwaveMeetGreetIE
from .mxplayer import (
MxplayerIE,
MxplayerShowIE,
)
from .mychannels import MyChannelsIE
from .myspace import MySpaceIE, MySpaceAlbumIE
from .myspass import MySpassIE
from .myvi import (
MyviIE,
MyviEmbedIE,
)
from .myvideoge import MyVideoGeIE
from .myvidster import MyVidsterIE
from .n1 import (
N1InfoAssetIE,
N1InfoIIE,
)
from .nate import (
NateIE,
NateProgramIE,
)
from .nationalgeographic import (
NationalGeographicVideoIE,
NationalGeographicTVIE,
)
from .naver import (
NaverIE,
NaverLiveIE,
)
from .nba import (
NBAWatchEmbedIE,
NBAWatchIE,
NBAWatchCollectionIE,
NBAEmbedIE,
NBAIE,
NBAChannelIE,
)
from .nbc import (
NBCIE,
NBCNewsIE,
NBCOlympicsIE,
NBCOlympicsStreamIE,
NBCSportsIE,
NBCSportsStreamIE,
NBCSportsVPlayerIE,
)
from .ndr import (
NDRIE,
NJoyIE,
NDREmbedBaseIE,
NDREmbedIE,
NJoyEmbedIE,
)
from .ndtv import NDTVIE
from .nebula import (
NebulaIE,
NebulaCollectionIE,
)
from .nerdcubed import NerdCubedFeedIE
from .netzkino import NetzkinoIE
from .neteasemusic import (
NetEaseMusicIE,
NetEaseMusicAlbumIE,
NetEaseMusicSingerIE,
NetEaseMusicListIE,
NetEaseMusicMvIE,
NetEaseMusicProgramIE,
NetEaseMusicDjRadioIE,
)
from .newgrounds import (
NewgroundsIE,
NewgroundsPlaylistIE,
NewgroundsUserIE,
)
from .newstube import NewstubeIE
from .nextmedia import (
NextMediaIE,
NextMediaActionNewsIE,
AppleDailyIE,
NextTVIE,
)
from .nexx import (
NexxIE,
NexxEmbedIE,
)
from .nfhsnetwork import NFHSNetworkIE
from .nfl import (
NFLIE,
NFLArticleIE,
)
from .nhk import (
NhkVodIE,
NhkVodProgramIE,
)
from .nhl import NHLIE
from .nick import (
NickIE,
NickBrIE,
NickDeIE,
NickNightIE,
NickRuIE,
)
from .niconico import (
NiconicoIE,
NiconicoPlaylistIE,
NiconicoUserIE,
NicovideoSearchDateIE,
NicovideoSearchIE,
NicovideoSearchURLIE,
)
from .ninecninemedia import (
NineCNineMediaIE,
CPTwentyFourIE,
)
from .ninegag import NineGagIE
from .ninenow import NineNowIE
from .nintendo import NintendoIE
from .nitter import NitterIE
from .njpwworld import NJPWWorldIE
from .nobelprize import NobelPrizeIE
from .nonktube import NonkTubeIE
from .noovo import NoovoIE
from .normalboots import NormalbootsIE
from .nosvideo import NosVideoIE
from .nova import (
NovaEmbedIE,
NovaIE,
)
from .novaplay import NovaPlayIE
from .nowness import (
NownessIE,
NownessPlaylistIE,
NownessSeriesIE,
)
from .noz import NozIE
from .npo import (
AndereTijdenIE,
NPOIE,
NPOLiveIE,
NPORadioIE,
NPORadioFragmentIE,
SchoolTVIE,
HetKlokhuisIE,
VPROIE,
WNLIE,
)
from .npr import NprIE
from .nrk import (
NRKIE,
NRKPlaylistIE,
NRKSkoleIE,
NRKTVIE,
NRKTVDirekteIE,
NRKRadioPodkastIE,
NRKTVEpisodeIE,
NRKTVEpisodesIE,
NRKTVSeasonIE,
NRKTVSeriesIE,
)
from .nrl import NRLTVIE
from .ntvcojp import NTVCoJpCUIE
from .ntvde import NTVDeIE
from .ntvru import NTVRuIE
from .nytimes import (
NYTimesIE,
NYTimesArticleIE,
NYTimesCookingIE,
)
from .nuvid import NuvidIE
from .nzherald import NZHeraldIE
from .nzz import NZZIE
from .odatv import OdaTVIE
from .odnoklassniki import OdnoklassnikiIE
from .oktoberfesttv import OktoberfestTVIE
from .olympics import OlympicsReplayIE
from .on24 import On24IE
from .ondemandkorea import OnDemandKoreaIE
from .onefootball import OneFootballIE
from .onet import (
OnetIE,
OnetChannelIE,
OnetMVPIE,
OnetPlIE,
)
from .onionstudios import OnionStudiosIE
from .ooyala import (
OoyalaIE,
OoyalaExternalIE,
)
from .openrec import (
OpenRecIE,
OpenRecCaptureIE,
)
from .ora import OraTVIE
from .orf import (
ORFTVthekIE,
ORFFM4IE,
ORFFM4StoryIE,
ORFOE1IE,
ORFOE3IE,
ORFNOEIE,
ORFWIEIE,
ORFBGLIE,
ORFOOEIE,
ORFSTMIE,
ORFKTNIE,
ORFSBGIE,
ORFTIRIE,
ORFVBGIE,
ORFIPTVIE,
)
from .outsidetv import OutsideTVIE
from .packtpub import (
PacktPubIE,
PacktPubCourseIE,
)
from .palcomp3 import (
PalcoMP3IE,
PalcoMP3ArtistIE,
PalcoMP3VideoIE,
)
from .pandoratv import PandoraTVIE
from .paramountplus import (
ParamountPlusIE,
ParamountPlusSeriesIE,
)
from .parliamentliveuk import ParliamentLiveUKIE
from .parlview import ParlviewIE
from .patreon import (
PatreonIE,
PatreonUserIE
)
from .pbs import PBSIE
from .pearvideo import PearVideoIE
from .peertube import (
PeerTubeIE,
PeerTubePlaylistIE,
)
from .peertv import PeerTVIE
from .peloton import (
PelotonIE,
PelotonLiveIE
)
from .people import PeopleIE
from .performgroup import PerformGroupIE
from .periscope import (
PeriscopeIE,
PeriscopeUserIE,
)
from .philharmoniedeparis import PhilharmonieDeParisIE
from .phoenix import PhoenixIE
from .photobucket import PhotobucketIE
from .picarto import (
PicartoIE,
PicartoVodIE,
)
from .piksel import PikselIE
from .pinkbike import PinkbikeIE
from .pinterest import (
PinterestIE,
PinterestCollectionIE,
)
from .pladform import PladformIE
from .planetmarathi import PlanetMarathiIE
from .platzi import (
PlatziIE,
PlatziCourseIE,
)
from .playfm import PlayFMIE
from .playplustv import PlayPlusTVIE
from .plays import PlaysTVIE
from .playstuff import PlayStuffIE
from .playtvak import PlaytvakIE
from .playvid import PlayvidIE
from .playwire import PlaywireIE
from .plutotv import PlutoTVIE
from .pluralsight import (
PluralsightIE,
PluralsightCourseIE,
)
from .podomatic import PodomaticIE
from .pokemon import (
PokemonIE,
PokemonWatchIE,
)
from .polsatgo import PolsatGoIE
from .polskieradio import (
PolskieRadioIE,
PolskieRadioCategoryIE,
PolskieRadioPlayerIE,
PolskieRadioPodcastIE,
PolskieRadioPodcastListIE,
PolskieRadioRadioKierowcowIE,
)
from .popcorntimes import PopcorntimesIE
from .popcorntv import PopcornTVIE
from .porn91 import Porn91IE
from .porncom import PornComIE
from .pornflip import PornFlipIE
from .pornhd import PornHdIE
from .pornhub import (
PornHubIE,
PornHubUserIE,
PornHubPlaylistIE,
PornHubPagedVideoListIE,
PornHubUserVideosUploadIE,
)
from .pornotube import PornotubeIE
from .pornovoisines import PornoVoisinesIE
from .pornoxo import PornoXOIE
from .puhutv import (
PuhuTVIE,
PuhuTVSerieIE,
)
from .presstv import PressTVIE
from .projectveritas import ProjectVeritasIE
from .prosiebensat1 import ProSiebenSat1IE
from .puls4 import Puls4IE
from .pyvideo import PyvideoIE
from .qqmusic import (
QQMusicIE,
QQMusicSingerIE,
QQMusicAlbumIE,
QQMusicToplistIE,
QQMusicPlaylistIE,
)
from .r7 import (
R7IE,
R7ArticleIE,
)
from .radiko import RadikoIE, RadikoRadioIE
from .radiocanada import (
RadioCanadaIE,
RadioCanadaAudioVideoIE,
)
from .radiode import RadioDeIE
from .radiojavan import RadioJavanIE
from .radiobremen import RadioBremenIE
from .radiofrance import RadioFranceIE
from .radiozet import RadioZetPodcastIE
from .radiokapital import (
RadioKapitalIE,
RadioKapitalShowIE,
)
from .radlive import (
RadLiveIE,
RadLiveChannelIE,
RadLiveSeasonIE,
)
from .rai import (
RaiPlayIE,
RaiPlayLiveIE,
RaiPlayPlaylistIE,
RaiIE,
RaiPlayRadioIE,
RaiPlayRadioPlaylistIE,
)
from .raywenderlich import (
RayWenderlichIE,
RayWenderlichCourseIE,
)
from .rbmaradio import RBMARadioIE
from .rcs import (
RCSIE,
RCSEmbedsIE,
RCSVariousIE,
)
from .rcti import (
RCTIPlusIE,
RCTIPlusSeriesIE,
RCTIPlusTVIE,
)
from .rds import RDSIE
from .redbulltv import (
RedBullTVIE,
RedBullEmbedIE,
RedBullTVRrnContentIE,
RedBullIE,
)
from .reddit import RedditIE
from .redgifs import (
RedGifsIE,
RedGifsSearchIE,
RedGifsUserIE,
)
from .redtube import RedTubeIE
from .regiotv import RegioTVIE
from .rentv import (
RENTVIE,
RENTVArticleIE,
)
from .restudy import RestudyIE
from .reuters import ReutersIE
from .reverbnation import ReverbNationIE
from .rice import RICEIE
from .rmcdecouverte import RMCDecouverteIE
from .ro220 import Ro220IE
from .rockstargames import RockstarGamesIE
from .roosterteeth import RoosterTeethIE, RoosterTeethSeriesIE
from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE
from .rozhlas import RozhlasIE
from .rtbf import RTBFIE
from .rte import RteIE, RteRadioIE
from .rtlnl import RtlNlIE
from .rtl2 import (
RTL2IE,
RTL2YouIE,
RTL2YouSeriesIE,
)
from .rtp import RTPIE
from .rtrfm import RTRFMIE
from .rts import RTSIE
from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE, RTVELiveIE, RTVETelevisionIE
from .rtvnh import RTVNHIE
from .rtvs import RTVSIE
from .ruhd import RUHDIE
from .rumble import (
RumbleEmbedIE,
RumbleChannelIE,
)
from .rutube import (
RutubeIE,
RutubeChannelIE,
RutubeEmbedIE,
RutubeMovieIE,
RutubePersonIE,
RutubePlaylistIE,
RutubeTagsIE,
)
from .rutv import RUTVIE
from .ruutu import RuutuIE
from .ruv import RuvIE
from .safari import (
SafariIE,
SafariApiIE,
SafariCourseIE,
)
from .saitosan import SaitosanIE
from .samplefocus import SampleFocusIE
from .sapo import SapoIE
from .savefrom import SaveFromIE
from .sbs import SBSIE
from .screencast import ScreencastIE
from .screencastomatic import ScreencastOMaticIE
from .scrippsnetworks import (
ScrippsNetworksWatchIE,
ScrippsNetworksIE,
)
from .scte import (
SCTEIE,
SCTECourseIE,
)
from .seeker import SeekerIE
from .senategov import SenateISVPIE, SenateGovIE
from .sendtonews import SendtoNewsIE
from .servus import ServusIE
from .sevenplus import SevenPlusIE
from .sexu import SexuIE
from .seznamzpravy import (
SeznamZpravyIE,
SeznamZpravyArticleIE,
)
from .shahid import (
ShahidIE,
ShahidShowIE,
)
from .shared import (
SharedIE,
VivoIE,
)
from .shemaroome import ShemarooMeIE
from .showroomlive import ShowRoomLiveIE
from .simplecast import (
SimplecastIE,
SimplecastEpisodeIE,
SimplecastPodcastIE,
)
from .sina import SinaIE
from .sixplay import SixPlayIE
from .skeb import SkebIE
from .skyit import (
SkyItPlayerIE,
SkyItVideoIE,
SkyItVideoLiveIE,
SkyItIE,
SkyItAcademyIE,
SkyItArteIE,
CieloTVItIE,
TV8ItIE,
)
from .skylinewebcams import SkylineWebcamsIE
from .skynewsarabia import (
SkyNewsArabiaIE,
SkyNewsArabiaArticleIE,
)
from .skynewsau import SkyNewsAUIE
from .sky import (
SkyNewsIE,
SkyNewsStoryIE,
SkySportsIE,
SkySportsNewsIE,
)
from .slideshare import SlideshareIE
from .slideslive import SlidesLiveIE
from .slutload import SlutloadIE
from .snotr import SnotrIE
from .sohu import SohuIE
from .sonyliv import (
SonyLIVIE,
SonyLIVSeriesIE,
)
from .soundcloud import (
SoundcloudEmbedIE,
SoundcloudIE,
SoundcloudSetIE,
SoundcloudUserIE,
SoundcloudTrackStationIE,
SoundcloudPlaylistIE,
SoundcloudSearchIE,
)
from .soundgasm import (
SoundgasmIE,
SoundgasmProfileIE
)
from .southpark import (
SouthParkIE,
SouthParkDeIE,
SouthParkDkIE,
SouthParkEsIE,
SouthParkNlIE
)
from .sovietscloset import (
SovietsClosetIE,
SovietsClosetPlaylistIE
)
from .spankbang import (
SpankBangIE,
SpankBangPlaylistIE,
)
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE
from .spike import (
BellatorIE,
ParamountNetworkIE,
)
from .stitcher import (
StitcherIE,
StitcherShowIE,
)
from .sport5 import Sport5IE
from .sportbox import SportBoxIE
from .sportdeutschland import SportDeutschlandIE
from .spotify import (
SpotifyIE,
SpotifyShowIE,
)
from .spreaker import (
SpreakerIE,
SpreakerPageIE,
SpreakerShowIE,
SpreakerShowPageIE,
)
from .springboardplatform import SpringboardPlatformIE
from .sprout import SproutIE
from .srgssr import (
SRGSSRIE,
SRGSSRPlayIE,
)
from .srmediathek import SRMediathekIE
from .stanfordoc import StanfordOpenClassroomIE
from .startv import StarTVIE
from .steam import SteamIE
from .storyfire import (
StoryFireIE,
StoryFireUserIE,
StoryFireSeriesIE,
)
from .streamable import StreamableIE
from .streamanity import StreamanityIE
from .streamcloud import StreamcloudIE
from .streamcz import StreamCZIE
from .streamff import StreamFFIE
from .streetvoice import StreetVoiceIE
from .stretchinternet import StretchInternetIE
from .stripchat import StripchatIE
from .stv import STVPlayerIE
from .sunporno import SunPornoIE
from .sverigesradio import (
SverigesRadioEpisodeIE,
SverigesRadioPublicationIE,
)
from .svt import (
SVTIE,
SVTPageIE,
SVTPlayIE,
SVTSeriesIE,
)
from .swrmediathek import SWRMediathekIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .tagesschau import TagesschauIE
from .tass import TassIE
from .tbs import TBSIE
from .tdslifeway import TDSLifewayIE
from .teachable import (
TeachableIE,
TeachableCourseIE,
)
from .teachertube import (
TeacherTubeIE,
TeacherTubeUserIE,
)
from .teachingchannel import TeachingChannelIE
from .teamcoco import TeamcocoIE
from .teamtreehouse import TeamTreeHouseIE
from .techtalks import TechTalksIE
from .ted import TEDIE
from .tele5 import Tele5IE
from .tele13 import Tele13IE
from .telebruxelles import TeleBruxellesIE
from .telecinco import TelecincoIE
from .telegraaf import TelegraafIE
from .telemb import TeleMBIE
from .telemundo import TelemundoIE
from .telequebec import (
TeleQuebecIE,
TeleQuebecSquatIE,
TeleQuebecEmissionIE,
TeleQuebecLiveIE,
TeleQuebecVideoIE,
)
from .teletask import TeleTaskIE
from .telewebion import TelewebionIE
from .tennistv import TennisTVIE
from .tenplay import TenPlayIE
from .testurl import TestURLIE
from .tf1 import TF1IE
from .tfo import TFOIE
from .theintercept import TheInterceptIE
from .theplatform import (
ThePlatformIE,
ThePlatformFeedIE,
)
from .thescene import TheSceneIE
from .thestar import TheStarIE
from .thesun import TheSunIE
from .theta import (
ThetaVideoIE,
ThetaStreamIE,
)
from .theweatherchannel import TheWeatherChannelIE
from .thisamericanlife import ThisAmericanLifeIE
from .thisav import ThisAVIE
from .thisoldhouse import ThisOldHouseIE
from .threespeak import (
ThreeSpeakIE,
ThreeSpeakUserIE,
)
from .threeqsdn import ThreeQSDNIE
from .tiktok import (
TikTokIE,
TikTokUserIE,
DouyinIE,
)
from .tinypic import TinyPicIE
from .tmz import TMZIE
from .tnaflix import (
TNAFlixNetworkEmbedIE,
TNAFlixIE,
EMPFlixIE,
MovieFapIE,
)
from .toggle import (
ToggleIE,
MeWatchIE,
)
from .toggo import (
ToggoIE,
)
from .tokentube import (
TokentubeIE,
TokentubeChannelIE
)
from .tonline import TOnlineIE
from .toongoggles import ToonGogglesIE
from .toutv import TouTvIE
from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE
from .trilulilu import TriluliluIE
from .trovo import (
TrovoIE,
TrovoVodIE,
TrovoChannelVodIE,
TrovoChannelClipIE,
)
from .trueid import TrueIDIE
from .trunews import TruNewsIE
from .trutv import TruTVIE
from .tube8 import Tube8IE
from .tubitv import (
TubiTvIE,
TubiTvShowIE,
)
from .tumblr import TumblrIE
from .tunein import (
TuneInClipIE,
TuneInStationIE,
TuneInProgramIE,
TuneInTopicIE,
TuneInShortenerIE,
)
from .tunepk import TunePkIE
from .turbo import TurboIE
from .tv2 import (
TV2IE,
TV2ArticleIE,
KatsomoIE,
MTVUutisetArticleIE,
)
from .tv2dk import (
TV2DKIE,
TV2DKBornholmPlayIE,
)
from .tv2hu import (
TV2HuIE,
TV2HuSeriesIE,
)
from .tv4 import TV4IE
from .tv5mondeplus import TV5MondePlusIE
from .tv5unis import (
TV5UnisVideoIE,
TV5UnisIE,
)
from .tva import (
TVAIE,
QubIE,
)
from .tvanouvelles import (
TVANouvellesIE,
TVANouvellesArticleIE,
)
from .tvc import (
TVCIE,
TVCArticleIE,
)
from .tver import TVerIE
from .tvigle import TvigleIE
from .tvland import TVLandIE
from .tvn24 import TVN24IE
from .tvnet import TVNetIE
from .tvnoe import TVNoeIE
from .tvnow import (
TVNowIE,
TVNowFilmIE,
TVNowNewIE,
TVNowSeasonIE,
TVNowAnnualIE,
TVNowShowIE,
)
from .tvp import (
TVPEmbedIE,
TVPIE,
TVPStreamIE,
TVPWebsiteIE,
)
from .tvplay import (
TVPlayIE,
ViafreeIE,
TVPlayHomeIE,
)
from .tvplayer import TVPlayerIE
from .tweakers import TweakersIE
from .twentyfourvideo import TwentyFourVideoIE
from .twentymin import TwentyMinutenIE
from .twentythreevideo import TwentyThreeVideoIE
from .twitcasting import (
TwitCastingIE,
TwitCastingLiveIE,
TwitCastingUserIE,
)
from .twitch import (
TwitchVodIE,
TwitchCollectionIE,
TwitchVideosIE,
TwitchVideosClipsIE,
TwitchVideosCollectionsIE,
TwitchStreamIE,
TwitchClipsIE,
)
from .twitter import (
TwitterCardIE,
TwitterIE,
TwitterAmplifyIE,
TwitterBroadcastIE,
TwitterShortenerIE,
)
from .udemy import (
UdemyIE,
UdemyCourseIE
)
from .udn import UDNEmbedIE
from .ufctv import (
UFCTVIE,
UFCArabiaIE,
)
from .ukcolumn import UkColumnIE
from .uktvplay import UKTVPlayIE
from .digiteka import DigitekaIE
from .dlive import (
DLiveVODIE,
DLiveStreamIE,
)
from .umg import UMGDeIE
from .unistra import UnistraIE
from .unity import UnityIE
from .uol import UOLIE
from .uplynk import (
UplynkIE,
UplynkPreplayIE,
)
from .urort import UrortIE
from .urplay import URPlayIE
from .usanetwork import USANetworkIE
from .usatoday import USATodayIE
from .ustream import UstreamIE, UstreamChannelIE
from .ustudio import (
UstudioIE,
UstudioEmbedIE,
)
from .utreon import UtreonIE
from .varzesh3 import Varzesh3IE
from .vbox7 import Vbox7IE
from .veehd import VeeHDIE
from .veo import VeoIE
from .veoh import VeohIE
from .vesti import VestiIE
from .vevo import (
VevoIE,
VevoPlaylistIE,
)
from .vgtv import (
BTArticleIE,
BTVestlendingenIE,
VGTVIE,
)
from .vh1 import VH1IE
from .vice import (
ViceIE,
ViceArticleIE,
ViceShowIE,
)
from .vidbit import VidbitIE
from .viddler import ViddlerIE
from .videa import VideaIE
from .videodetective import VideoDetectiveIE
from .videofyme import VideofyMeIE
from .videomore import (
VideomoreIE,
VideomoreVideoIE,
VideomoreSeasonIE,
)
from .videopress import VideoPressIE
from .vidio import (
VidioIE,
VidioPremierIE,
VidioLiveIE
)
from .vidlii import VidLiiIE
from .vier import VierIE, VierVideosIE
from .viewlift import (
ViewLiftIE,
ViewLiftEmbedIE,
)
from .viidea import ViideaIE
from .vimeo import (
VimeoIE,
VimeoAlbumIE,
VimeoChannelIE,
VimeoGroupsIE,
VimeoLikesIE,
VimeoOndemandIE,
VimeoReviewIE,
VimeoUserIE,
VimeoWatchLaterIE,
VHXEmbedIE,
)
from .vimple import VimpleIE
from .vine import (
VineIE,
VineUserIE,
)
from .viki import (
VikiIE,
VikiChannelIE,
)
from .viqeo import ViqeoIE
from .viu import (
ViuIE,
ViuPlaylistIE,
ViuOTTIE,
)
from .vk import (
VKIE,
VKUserVideosIE,
VKWallPostIE,
)
from .vlive import (
VLiveIE,
VLivePostIE,
VLiveChannelIE,
)
from .vodlocker import VodlockerIE
from .vodpl import VODPlIE
from .vodplatform import VODPlatformIE
from .voicerepublic import VoiceRepublicIE
from .voicy import (
VoicyIE,
VoicyChannelIE,
)
from .voot import (
VootIE,
VootSeriesIE,
)
from .voxmedia import (
VoxMediaVolumeIE,
VoxMediaIE,
)
from .vrt import VRTIE
from .vrak import VrakIE
from .vrv import (
VRVIE,
VRVSeriesIE,
)
from .vshare import VShareIE
from .vtm import VTMIE
from .medialaan import MedialaanIE
from .vube import VubeIE
from .vuclip import VuClipIE
from .vupload import VuploadIE
from .vvvvid import (
VVVVIDIE,
VVVVIDShowIE,
)
from .vyborymos import VyboryMosIE
from .vzaar import VzaarIE
from .wakanim import WakanimIE
from .walla import WallaIE
from .washingtonpost import (
WashingtonPostIE,
WashingtonPostArticleIE,
)
from .wat import WatIE
from .watchbox import WatchBoxIE
from .watchindianporn import WatchIndianPornIE
from .wdr import (
WDRIE,
WDRPageIE,
WDRElefantIE,
WDRMobileIE,
)
from .webcaster import (
WebcasterIE,
WebcasterFeedIE,
)
from .webofstories import (
WebOfStoriesIE,
WebOfStoriesPlaylistIE,
)
from .weibo import (
WeiboIE,
WeiboMobileIE
)
from .weiqitv import WeiqiTVIE
from .willow import WillowIE
from .wimtv import WimTVIE
from .whowatch import WhoWatchIE
from .wistia import (
WistiaIE,
WistiaPlaylistIE,
)
from .worldstarhiphop import WorldStarHipHopIE
from .wppilot import (
WPPilotIE,
WPPilotChannelsIE,
)
from .wsj import (
WSJIE,
WSJArticleIE,
)
from .wwe import WWEIE
from .xbef import XBefIE
from .xboxclips import XboxClipsIE
from .xfileshare import XFileShareIE
from .xhamster import (
XHamsterIE,
XHamsterEmbedIE,
XHamsterUserIE,
)
from .xiami import (
XiamiSongIE,
XiamiAlbumIE,
XiamiArtistIE,
XiamiCollectionIE
)
from .ximalaya import (
XimalayaIE,
XimalayaAlbumIE
)
from .xminus import XMinusIE
from .xnxx import XNXXIE
from .xstream import XstreamIE
from .xtube import XTubeUserIE, XTubeIE
from .xuite import XuiteIE
from .xvideos import XVideosIE
from .xxxymovies import XXXYMoviesIE
from .yahoo import (
YahooIE,
YahooSearchIE,
YahooGyaOPlayerIE,
YahooGyaOIE,
YahooJapanNewsIE,
)
from .yandexdisk import YandexDiskIE
from .yandexmusic import (
YandexMusicTrackIE,
YandexMusicAlbumIE,
YandexMusicPlaylistIE,
YandexMusicArtistTracksIE,
YandexMusicArtistAlbumsIE,
)
from .yandexvideo import (
YandexVideoIE,
ZenYandexIE,
ZenYandexChannelIE,
)
from .yapfiles import YapFilesIE
from .yesjapan import YesJapanIE
from .yinyuetai import YinYueTaiIE
from .ynet import YnetIE
from .youjizz import YouJizzIE
from .youku import (
YoukuIE,
YoukuShowIE,
)
from .younow import (
YouNowLiveIE,
YouNowChannelIE,
YouNowMomentIE,
)
from .youporn import YouPornIE
from .yourporn import YourPornIE
from .yourupload import YourUploadIE
from .youtube import (
YoutubeIE,
YoutubeClipIE,
YoutubeFavouritesIE,
YoutubeHistoryIE,
YoutubeTabIE,
YoutubePlaylistIE,
YoutubeRecommendedIE,
YoutubeSearchDateIE,
YoutubeSearchIE,
YoutubeSearchURLIE,
YoutubeSubscriptionsIE,
YoutubeTruncatedIDIE,
YoutubeTruncatedURLIE,
YoutubeYtBeIE,
YoutubeYtUserIE,
YoutubeWatchLaterIE,
)
from .zapiks import ZapiksIE
from .zattoo import (
BBVTVIE,
EinsUndEinsTVIE,
EWETVIE,
GlattvisionTVIE,
MNetTVIE,
MyVisionTVIE,
NetPlusIE,
OsnatelTVIE,
QuantumTVIE,
QuicklineIE,
QuicklineLiveIE,
SaltTVIE,
SAKTVIE,
VTXTVIE,
WalyTVIE,
ZattooIE,
ZattooLiveIE,
)
from .zdf import ZDFIE, ZDFChannelIE
from .zee5 import (
Zee5IE,
Zee5SeriesIE,
)
from .zhihu import ZhihuIE
from .zingmp3 import (
ZingMp3IE,
ZingMp3AlbumIE,
)
from .zoom import ZoomIE
from .zype import ZypeIE
| 21.505393 | 90 | 0.760765 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.