hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a1a1ef8f99f89b3757dd63b7c63fce1737020f4
| 2,343
|
py
|
Python
|
funcx_endpoint/funcx_endpoint/mock_broker/mock_broker.py
|
ravescovi/funcX
|
325b55248622ab455199492aafb0c01487f3c80c
|
[
"Apache-2.0"
] | 86
|
2019-05-16T21:48:37.000Z
|
2022-03-10T03:03:27.000Z
|
funcx_endpoint/funcx_endpoint/mock_broker/mock_broker.py
|
ravescovi/funcX
|
325b55248622ab455199492aafb0c01487f3c80c
|
[
"Apache-2.0"
] | 520
|
2019-06-05T15:42:59.000Z
|
2022-03-31T21:33:32.000Z
|
funcx_endpoint/funcx_endpoint/mock_broker/mock_broker.py
|
ravescovi/funcX
|
325b55248622ab455199492aafb0c01487f3c80c
|
[
"Apache-2.0"
] | 33
|
2019-07-21T20:26:14.000Z
|
2022-01-31T23:43:10.000Z
|
""" The broker service
This REST service fields incoming registration requests from endpoints,
creates an appropriate forwarder to which the endpoint can connect up.
"""
import bottle
from bottle import post, run, request, route
import argparse
import json
import uuid
import sys
from funcx_endpoint.mock_broker.forwarder import Forwarder, spawn_forwarder
@post('/register')
def register():
""" Register an endpoint request
1. Start an executor client object corresponding to the endpoint
2. Pass connection info back as a json response.
"""
print("Request: ", request)
print("foo: ", request.app.ep_mapping)
print(json.load(request.body))
endpoint_details = json.load(request.body)
print(endpoint_details)
# Here we want to start an executor client.
# Make sure to not put anything into the client, until after an interchange has
# connected to avoid clogging up the pipe. Submits will block if the client has
# no endpoint connected.
endpoint_id = str(uuid.uuid4())
fw = spawn_forwarder(request.app.address, endpoint_id=endpoint_id)
connection_info = fw.connection_info
ret_package = {'endpoint_id': endpoint_id}
ret_package.update(connection_info)
print("Ret_package : ", ret_package)
print("Ep_id: ", endpoint_id)
request.app.ep_mapping[endpoint_id] = ret_package
return ret_package
@route('/list_mappings')
def list_mappings():
return request.app.ep_mapping
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--port", default=8088,
help="Port at which the service will listen on")
parser.add_argument("-a", "--address", default='127.0.0.1',
help="Address at which the service is running")
parser.add_argument("-c", "--config", default=None,
help="Config file")
parser.add_argument("-d", "--debug", action='store_true',
help="Enables debug logging")
args = parser.parse_args()
app = bottle.default_app()
app.address = args.address
app.ep_mapping = {}
try:
run(host='localhost', app=app, port=int(args.port), debug=True)
except Exception as e:
# This doesn't do anything
print("Caught exception : {}".format(e))
exit(-1)
| 30.828947
| 83
| 0.675203
|
4a1a1f4510503f4d61063e433d683afc119d333e
| 3,283
|
py
|
Python
|
matchzoo/data_generator/callbacks/dynamic_pooling.py
|
baajur/MatchZoo
|
fe0ccdd82500d116a7f945539ed05566fce90434
|
[
"Apache-2.0"
] | 2,209
|
2018-10-15T08:31:35.000Z
|
2022-03-31T14:29:11.000Z
|
matchzoo/data_generator/callbacks/dynamic_pooling.py
|
dutyhong/MatchZoo
|
2547f9d1b302d0f166508ba39fa659dfa210a276
|
[
"Apache-2.0"
] | 398
|
2018-10-15T07:35:01.000Z
|
2022-03-13T21:31:26.000Z
|
matchzoo/data_generator/callbacks/dynamic_pooling.py
|
dutyhong/MatchZoo
|
2547f9d1b302d0f166508ba39fa659dfa210a276
|
[
"Apache-2.0"
] | 535
|
2018-10-16T09:29:02.000Z
|
2022-03-31T02:12:52.000Z
|
import numpy as np
from matchzoo.data_generator.callbacks import Callback
class DynamicPooling(Callback):
""":class:`DPoolPairDataGenerator` constructor.
:param fixed_length_left: max length of left text.
:param fixed_length_right: max length of right text.
:param compress_ratio_left: the length change ratio,
especially after normal pooling layers.
:param compress_ratio_right: the length change ratio,
especially after normal pooling layers.
"""
def __init__(
self,
fixed_length_left: int,
fixed_length_right: int,
compress_ratio_left: float = 1,
compress_ratio_right: float = 1,
):
"""Init."""
self._fixed_length_left = fixed_length_left
self._fixed_length_right = fixed_length_right
self._compress_ratio_left = compress_ratio_left
self._compress_ratio_right = compress_ratio_right
def on_batch_unpacked(self, x, y):
"""
Insert `dpool_index` into `x`.
:param x: unpacked x.
:param y: unpacked y.
"""
x['dpool_index'] = _dynamic_pooling_index(
x['length_left'],
x['length_right'],
self._fixed_length_left,
self._fixed_length_right,
self._compress_ratio_left,
self._compress_ratio_right
)
def _dynamic_pooling_index(length_left: np.array,
length_right: np.array,
fixed_length_left: int,
fixed_length_right: int,
compress_ratio_left: float,
compress_ratio_right: float) -> np.array:
def _dpool_index(one_length_left: int,
one_length_right: int,
fixed_length_left: int,
fixed_length_right: int):
if one_length_left == 0:
stride_left = fixed_length_left
else:
stride_left = 1.0 * fixed_length_left / one_length_left
if one_length_right == 0:
stride_right = fixed_length_right
else:
stride_right = 1.0 * fixed_length_right / one_length_right
one_idx_left = [int(i / stride_left)
for i in range(fixed_length_left)]
one_idx_right = [int(i / stride_right)
for i in range(fixed_length_right)]
mesh1, mesh2 = np.meshgrid(one_idx_left, one_idx_right)
index_one = np.transpose(
np.stack([mesh1, mesh2]), (2, 1, 0))
return index_one
index = []
dpool_bias_left = dpool_bias_right = 0
if fixed_length_left % compress_ratio_left != 0:
dpool_bias_left = 1
if fixed_length_right % compress_ratio_right != 0:
dpool_bias_right = 1
cur_fixed_length_left = int(
fixed_length_left // compress_ratio_left) + dpool_bias_left
cur_fixed_length_right = int(
fixed_length_right // compress_ratio_right) + dpool_bias_right
for i in range(len(length_left)):
index.append(_dpool_index(
length_left[i] // compress_ratio_left,
length_right[i] // compress_ratio_right,
cur_fixed_length_left,
cur_fixed_length_right))
return np.array(index)
| 35.301075
| 70
| 0.614682
|
4a1a1ff4dc3cd7c2340004feb86a36272f831801
| 38
|
py
|
Python
|
wp api/errors.py
|
aouwalitshikkha/wp-gutenberg
|
fc1f94ccaede1fd7520645d0c8922cdeaaa28279
|
[
"MIT"
] | 1
|
2022-03-25T08:16:35.000Z
|
2022-03-25T08:16:35.000Z
|
wp api/errors.py
|
aouwalitshikkha/wp-gutenberg
|
fc1f94ccaede1fd7520645d0c8922cdeaaa28279
|
[
"MIT"
] | null | null | null |
wp api/errors.py
|
aouwalitshikkha/wp-gutenberg
|
fc1f94ccaede1fd7520645d0c8922cdeaaa28279
|
[
"MIT"
] | null | null | null |
class WpApiError(Exception):
pass
| 12.666667
| 28
| 0.736842
|
4a1a20b9e6eddd07f3e797493f68092cafb615de
| 915
|
py
|
Python
|
nextstop/nextstop/urls.py
|
ericmhuntley/now-boarding
|
3c7bcecbca3ebd2ef1076049cd239b9b33fd0ba1
|
[
"MIT"
] | null | null | null |
nextstop/nextstop/urls.py
|
ericmhuntley/now-boarding
|
3c7bcecbca3ebd2ef1076049cd239b9b33fd0ba1
|
[
"MIT"
] | null | null | null |
nextstop/nextstop/urls.py
|
ericmhuntley/now-boarding
|
3c7bcecbca3ebd2ef1076049cd239b9b33fd0ba1
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path, re_path
from django.conf.urls import url, include
from django.views.generic import RedirectView
from django.conf import settings
from django.conf.urls.static import static
from rest_framework import routers
from survey import views
router = routers.DefaultRouter()
router.register(r'responses', views.ResponseViewSet)
# manually specifying basename is necessary because of the RandomCardViewSet class' custom get_queryset
router.register(r'random', views.RandomCardViewSet, basename='random-list')
router.register(r'qcount', views.QuestionCountViewSet)
router.register(r'qacount', views.AnswerCountViewSet, basename='qa-list')
urlpatterns = [
path('admin/', admin.site.urls),
path('', RedirectView.as_view(url='/admin/')),
url(r'^', include(router.urls)),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 38.125
| 103
| 0.793443
|
4a1a2181531418d39850d9929ab6777911cdafe0
| 13,804
|
py
|
Python
|
sbibm/utils/nflows.py
|
rdgao/sbibm
|
9b453b600d92ba5c213f7bcf35a93c9b96729b67
|
[
"MIT"
] | null | null | null |
sbibm/utils/nflows.py
|
rdgao/sbibm
|
9b453b600d92ba5c213f7bcf35a93c9b96729b67
|
[
"MIT"
] | null | null | null |
sbibm/utils/nflows.py
|
rdgao/sbibm
|
9b453b600d92ba5c213f7bcf35a93c9b96729b67
|
[
"MIT"
] | null | null | null |
from copy import deepcopy
from typing import Optional
import numpy as np
import torch
from nflows import distributions as distributions_
from nflows import flows, transforms
from nflows.distributions.base import Distribution
from nflows.nn import nets
from nflows.utils import torchutils
from sbi.utils.torchutils import create_alternating_binary_mask
from torch import distributions as dist
from torch import optim
from torch.nn import functional as F
from torch.nn.utils import clip_grad_norm_
from torch.utils import data
from torch.utils.data.sampler import SubsetRandomSampler
from tqdm import tqdm # noqa
from sbibm.utils.torch import get_default_device
def get_flow(
model: str,
dim_distribution: int,
dim_context: Optional[int] = None,
embedding: Optional[torch.nn.Module] = None,
hidden_features: int = 50,
made_num_mixture_components: int = 10,
made_num_blocks: int = 4,
flow_num_transforms: int = 5,
mean=0.0,
std=1.0,
) -> torch.nn.Module:
"""Density estimator
Args:
model: Model, one of maf / made / nsf
dim_distribution: Dim of distribution
dim_context: Dim of context
embedding: Embedding network
hidden_features: For all, number of hidden features
made_num_mixture_components: For MADEs only, number of mixture components
made_num_blocks: For MADEs only, number of blocks
flow_num_transforms: For flows only, number of transforms
mean: For normalization
std: For normalization
Returns:
Neural network
"""
standardizing_transform = transforms.AffineTransform(
shift=-mean / std, scale=1 / std
)
features = dim_distribution
context_features = dim_context
if model == "made":
transform = standardizing_transform
distribution = distributions_.MADEMoG(
features=features,
hidden_features=hidden_features,
context_features=context_features,
num_blocks=made_num_blocks,
num_mixture_components=made_num_mixture_components,
use_residual_blocks=True,
random_mask=False,
activation=torch.relu,
dropout_probability=0.0,
use_batch_norm=False,
custom_initialization=True,
)
neural_net = flows.Flow(transform, distribution, embedding)
elif model == "maf":
transform = transforms.CompositeTransform(
[
transforms.CompositeTransform(
[
transforms.MaskedAffineAutoregressiveTransform(
features=features,
hidden_features=hidden_features,
context_features=context_features,
num_blocks=2,
use_residual_blocks=False,
random_mask=False,
activation=torch.tanh,
dropout_probability=0.0,
use_batch_norm=True,
),
transforms.RandomPermutation(features=features),
]
)
for _ in range(flow_num_transforms)
]
)
transform = transforms.CompositeTransform(
[
standardizing_transform,
transform,
]
)
distribution = distributions_.StandardNormal((features,))
neural_net = flows.Flow(transform, distribution, embedding)
elif model == "nsf":
transform = transforms.CompositeTransform(
[
transforms.CompositeTransform(
[
transforms.PiecewiseRationalQuadraticCouplingTransform(
mask=create_alternating_binary_mask(
features=features, even=(i % 2 == 0)
),
transform_net_create_fn=lambda in_features, out_features: nets.ResidualNet(
in_features=in_features,
out_features=out_features,
hidden_features=hidden_features,
context_features=context_features,
num_blocks=2,
activation=torch.relu,
dropout_probability=0.0,
use_batch_norm=False,
),
num_bins=10,
tails="linear",
tail_bound=3.0,
apply_unconditional_transform=False,
),
transforms.LULinear(features, identity_init=True),
]
)
for i in range(flow_num_transforms)
]
)
transform = transforms.CompositeTransform([standardizing_transform, transform])
distribution = distributions_.StandardNormal((features,))
neural_net = flows.Flow(transform, distribution, embedding)
elif model == "nsf_bounded":
transform = transforms.CompositeTransform(
[
transforms.CompositeTransform(
[
transforms.PiecewiseRationalQuadraticCouplingTransform(
mask=create_alternating_binary_mask(
features=dim_distribution, even=(i % 2 == 0)
),
transform_net_create_fn=lambda in_features, out_features: nets.ResidualNet(
in_features=in_features,
out_features=out_features,
hidden_features=hidden_features,
context_features=context_features,
num_blocks=2,
activation=F.relu,
dropout_probability=0.0,
use_batch_norm=False,
),
num_bins=10,
tails="linear",
tail_bound=np.sqrt(
3
), # uniform with sqrt(3) bounds has unit-variance
apply_unconditional_transform=False,
),
transforms.RandomPermutation(features=dim_distribution),
]
)
for i in range(flow_num_transforms)
]
)
transform = transforms.CompositeTransform([standardizing_transform, transform])
distribution = StandardUniform(shape=(dim_distribution,))
neural_net = flows.Flow(transform, distribution, embedding)
else:
raise ValueError
return neural_net
def train_flow(
flow,
dataset,
batch_size=100,
learning_rate=5e-4,
validation_fraction=0.1,
stop_after_epochs=20,
clip_grad_norm=True,
transform=False,
):
"""
Train a normalizing flow with maximum likelihood
Args:
flow: nflows.flows.Flow
dataset: torch.tensor()
batch_size: size of the minibatch
learning_rate: learning rate
validation_fraction: fraction of datapoints to be used for validation
stop_after_epochs: stop training after validation loss has not decreased for this many epochs
clip_grad_norm: whether to clip the norm of the gradient
transform: Optional transformation added to output of flow
Returns:
Trained flow
"""
if transform is None or not transform:
transform = dist.transforms.identity_transform
dataset = transform(dataset)
# Get total number of training examples
num_examples = dataset.shape[0]
# Select random test and validation splits from (parameter, observation) pairs
permuted_indices = torch.randperm(num_examples)
num_training_examples = int((1 - validation_fraction) * num_examples)
num_validation_examples = num_examples - num_training_examples
train_indices, val_indices = (
permuted_indices[:num_training_examples],
permuted_indices[num_training_examples:],
)
device = get_default_device()
dataset = dataset.to(device)
# Dataset is shared for training and validation loaders.
dataset = data.TensorDataset(dataset)
flow = flow.to(device)
# Create neural_net and validation loaders using a subset sampler.
train_loader = data.DataLoader(
dataset,
batch_size=batch_size,
drop_last=True,
sampler=SubsetRandomSampler(train_indices),
)
val_loader = data.DataLoader(
dataset,
batch_size=min(batch_size, num_validation_examples),
shuffle=False,
drop_last=True,
sampler=SubsetRandomSampler(val_indices),
)
optimizer = optim.Adam(
list(flow.parameters()),
lr=learning_rate,
)
# Keep track of best_validation log_prob seen so far.
best_validation_log_prob = -1e100
# Keep track of number of epochs since last improvement.
epochs_since_last_improvement = 0
# Keep track of model with best validation performance.
best_model_state_dict = None
# Each run also has a dictionary of summary statistics which are populated
# over the course of training.
summary = {
"epochs": [],
"best-validation-log-probs": [],
}
epochs = 0
converged = False
while not converged:
# Train for a single epoch.
flow.train()
for batch in train_loader:
optimizer.zero_grad()
inputs = (batch[0],) # .to(device),
# just do maximum likelihood
log_prob = flow.log_prob(inputs[0])
loss = -torch.mean(log_prob)
loss.backward()
if clip_grad_norm:
clip_grad_norm_(flow.parameters(), max_norm=5.0)
optimizer.step()
epochs += 1
# Calculate validation performance.
flow.eval()
log_prob_sum = 0
with torch.no_grad():
for batch in val_loader:
inputs = (batch[0].to(device),)
# just do maximum likelihood in the first round
log_prob = flow.log_prob(inputs[0])
log_prob_sum += log_prob.sum().item()
validation_log_prob = log_prob_sum / num_validation_examples
print("Epoch:", epochs, " -- validation loss", -validation_log_prob)
# Check for improvement in validation performance over previous epochs.
if validation_log_prob > best_validation_log_prob:
best_validation_log_prob = validation_log_prob
epochs_since_last_improvement = 0
best_model_state_dict = deepcopy(flow.state_dict())
else:
epochs_since_last_improvement += 1
# If no validation improvement over many epochs, stop training.
if epochs_since_last_improvement > stop_after_epochs - 1:
flow.load_state_dict(best_model_state_dict)
converged = True
# Update summary.
summary["epochs"].append(epochs)
summary["best-validation-log-probs"].append(best_validation_log_prob)
# Transforms
flow = FlowWrapper(flow, transform)
return flow
class FlowWrapper:
def __init__(self, flow, transform):
self.flow = flow
self.transform = transform
def sample(self, *args, **kwargs):
Y = self.flow.sample(*args, **kwargs)
return self.transform.inv(Y)
def log_prob(self, parameters_constrained):
parameters_unconstrained = self.transform(parameters_constrained)
log_probs = self.flow.log_prob(parameters_unconstrained)
# NOTE: Does not need sum over axis anymore, is now summed in torch:
# https://pytorch.org/docs/stable/_modules/torch/distributions/transforms.html#Transform.log_abs_det_jacobian
log_probs += self.transform.log_abs_det_jacobian(
parameters_constrained, parameters_unconstrained
)
return log_probs
class StandardUniform(Distribution):
"""A multivariate Normal with zero mean and unit covariance."""
def __init__(self, shape):
super().__init__()
self._shape = torch.Size(shape)
self._log_z = np.log(1 / (2 * np.sqrt(3)) ** shape[0])
def _log_prob(self, inputs, context):
all_probs = torch.tensor([])
for i in inputs:
if torch.all(torch.abs(i) < torch.sqrt(torch.tensor([3.0]))):
all_probs = torch.cat((all_probs, torch.tensor([self._log_z])), 0)
else:
all_probs = torch.cat((all_probs, torch.tensor([-1e10])), 0)
return all_probs
def _sample(self, num_samples, context):
if context is None:
return (torch.rand(num_samples, *self._shape) - 0.5) * 2 * np.sqrt(3)
else:
# The value of the context is ignored, only its size is taken into account.
context_size = context.shape[0]
samples = (
(torch.rand(context_size * num_samples, *self._shape) - 0.5)
* 2
* np.sqrt(3)
)
return torchutils.split_leading_dim(samples, [context_size, num_samples])
def _mean(self, context):
if context is None:
return torch.zeros(self._shape)
else:
# The value of the context is ignored, only its size is taken into account.
return torch.zeros(context.shape[0], *self._shape)
| 35.947917
| 117
| 0.589249
|
4a1a21e5142d023181f7e7006bfdadff8a82e2a2
| 653
|
py
|
Python
|
scene/intro.py
|
wonknu/head_shot_adfab
|
7e85c483ef3d4c8044e24f0132c0a2b700108d29
|
[
"MIT"
] | null | null | null |
scene/intro.py
|
wonknu/head_shot_adfab
|
7e85c483ef3d4c8044e24f0132c0a2b700108d29
|
[
"MIT"
] | null | null | null |
scene/intro.py
|
wonknu/head_shot_adfab
|
7e85c483ef3d4c8044e24f0132c0a2b700108d29
|
[
"MIT"
] | null | null | null |
import pygame
from utils.index import *
from constantes.index import *
from components.ui.button.index import *
class Intro:
def __init__(self, screen, callback):
self.screen = screen
self.callback = callback
self.btnPlay = Button()
def draw(self, events):
for event in events:
if event.type == MOUSEBUTTONDOWN:
if self.btnPlay.pressed(pygame.mouse.get_pos()):
self.callback(SCENE_GAME)
# self, surface, color, x, y, length, height, width, text, text_color
self.btnPlay.create_button(self.screen, (0,0,0), WINDOW_WIDTH * 0.5 - 50, WINDOW_HEIGHT * 0.5 - 35, 100, 30, 0, "Play !", (255,255,255))
| 32.65
| 140
| 0.672282
|
4a1a21faaf7785fbefeecd56d5f9061e84c9a6e8
| 695
|
py
|
Python
|
CodeWars/7-Mumbling.py
|
SabariVig/python-programs
|
682f11df8157d4a070c294eded341f86845e38a8
|
[
"MIT"
] | null | null | null |
CodeWars/7-Mumbling.py
|
SabariVig/python-programs
|
682f11df8157d4a070c294eded341f86845e38a8
|
[
"MIT"
] | 1
|
2018-07-11T10:49:29.000Z
|
2018-07-11T10:51:25.000Z
|
CodeWars/7-Mumbling.py
|
SabariVig/python-programs
|
682f11df8157d4a070c294eded341f86845e38a8
|
[
"MIT"
] | null | null | null |
def accum(s):
q=""
s=s.split()
for i in range(len(s[0])):
q+=((s[0][i])*(i+1)+"-").title()
return(q[:-1])
print(accum("ZpglnRxqenU"), "\nZ-Pp-Ggg-Llll-Nnnnn-Rrrrrr-Xxxxxxx-Qqqqqqqq-Eeeeeeeee-Nnnnnnnnnn-Uuuuuuuuuuu")
# print(accum("NyffsGeyylB"), "N-Yy-Fff-Ffff-Sssss-Gggggg-Eeeeeee-Yyyyyyyy-Yyyyyyyyy-Llllllllll-Bbbbbbbbbbb")
# print(accum("MjtkuBovqrU"), "M-Jj-Ttt-Kkkk-Uuuuu-Bbbbbb-Ooooooo-Vvvvvvvv-Qqqqqqqqq-Rrrrrrrrrr-Uuuuuuuuuuu")
# print(accum("EvidjUnokmM"), "E-Vv-Iii-Dddd-Jjjjj-Uuuuuu-Nnnnnnn-Oooooooo-Kkkkkkkkk-Mmmmmmmmmm-Mmmmmmmmmmm")
# print(accum("HbideVbxncC"), "H-Bb-Iii-Dddd-Eeeee-Vvvvvv-Bbbbbbb-Xxxxxxxx-Nnnnnnnnn-Cccccccccc-Ccccccccccc")
| 49.642857
| 110
| 0.702158
|
4a1a222603f5698a28add3d30da1c35bc3e2343d
| 1,945
|
py
|
Python
|
tests/test_spaces.py
|
uwe-iben/torchphysics
|
f0a56539cff331d49caaa90bc2fdd0d238b298f8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_spaces.py
|
uwe-iben/torchphysics
|
f0a56539cff331d49caaa90bc2fdd0d238b298f8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_spaces.py
|
uwe-iben/torchphysics
|
f0a56539cff331d49caaa90bc2fdd0d238b298f8
|
[
"Apache-2.0"
] | null | null | null |
from collections import Counter, OrderedDict
from torchphysics.problem.spaces.space import Space, R1, R2, R3
def test_create_space():
s = Space({'x': 1})
assert isinstance(s, Counter)
assert isinstance(s, OrderedDict)
def test_product_of_spaces():
s1 = Space({'x': 1})
s2 = Space({'t': 3})
s = s1 * s2
assert s.dim == 4
def test_space_contains_variable_name():
s = Space({'x':1, 'y': 2})
assert 'x' in s
assert not 't' in s
def test_space_get_variable_dimension():
s = Space({'x':1, 'y': 2})
assert s['x'] == 1
def test_space_get_variable_dimension_for_list():
s = Space({'x':1, 'y': 2, 't': 4})
s2 = s[['x', 'y']]
assert isinstance(s2, Space)
assert 'x' in s2
assert 'y' in s2
assert not 't' in s2
def test_space_slice():
s = Space({'x':1, 'y': 2})
s2 = s[:'y']
assert isinstance(s2, Space)
assert 'x' in s2
assert not 'y' in s2
s2 = s['y':]
assert isinstance(s2, Space)
assert 'y' in s2
assert not 'x' in s2
def test_space_contains_other_space():
s = Space({'x':1, 'y': 2})
s2 = Space({'x': 1})
assert s2 in s
def test_space_doe_not_contain_other_objects():
s = Space({'x':1, 'y': 2})
assert not 5 in s
def test_space_get_variables():
s = Space({'x':1, 'y': 2})
assert 'x' in s.variables
assert 'y' in s.variables
def test_space_serialize():
s = Space({'x':1, 'y': 2})
s_cls, s_dict = s.__reduce__()
assert s_cls == Space
assert isinstance(s_dict[0], OrderedDict)
def test_create_R1():
r = R1('x')
assert isinstance(r, Counter)
assert isinstance(r, OrderedDict)
assert r.dim == 1
def test_create_R2():
r = R2('x')
assert isinstance(r, Counter)
assert isinstance(r, OrderedDict)
assert r.dim == 2
def test_create_R3():
r = R3('x')
assert isinstance(r, Counter)
assert isinstance(r, OrderedDict)
assert r.dim == 3
| 20.913978
| 63
| 0.601028
|
4a1a23e113b02e0ab3d29428e97908bccb25af31
| 2,451
|
py
|
Python
|
lvt/utils/afwa/templates/submit_lvt_conrad_3hr_noah.py
|
jvgeiger/lisf-test
|
59e0206a0c8ddef4704b172dfd220e6285442478
|
[
"Apache-2.0"
] | null | null | null |
lvt/utils/afwa/templates/submit_lvt_conrad_3hr_noah.py
|
jvgeiger/lisf-test
|
59e0206a0c8ddef4704b172dfd220e6285442478
|
[
"Apache-2.0"
] | null | null | null |
lvt/utils/afwa/templates/submit_lvt_conrad_3hr_noah.py
|
jvgeiger/lisf-test
|
59e0206a0c8ddef4704b172dfd220e6285442478
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os
import subprocess
import sys
import time
vars = ['RelSMC_inst','SmLiqFrac_inst',
'SoilMoist_inst', 'SoilMoist_tavg',
'SoilTemp_inst', 'SoilTemp_tavg',
'RHMin_inst',
'Albedo_tavg', 'AvgSurfT_inst', 'AvgSurfT_tavg',
'CanopInt_inst', 'Elevation_inst', 'Evap_tavg',
'LWdown_f_inst', 'LWdown_f_tavg',
'Landcover_inst', 'Landmask_inst', 'PotEvap_tavg',
'Psurf_f_inst', 'Psurf_f_tavg',
'Qair_f_inst', 'Qair_f_tavg',
'Qg_tavg', 'Qh_tavg', 'Qle_tavg', 'Qs_acc',
'Qsb_acc', 'SWE_inst',
'SWdown_f_inst', 'SWdown_f_tavg',
'SnowDepth_inst', 'Snowcover_inst',
'Soiltype_inst',
'Tair_f_inst', 'Tair_f_max',
'Tair_f_tavg',
'TotalPrecip_acc', 'Wind_f_inst', 'Wind_f_tavg']
# Handle command line
def usage():
print "Usage: %s chargecode queue" %(sys.argv[0])
print " where chargecode is PBS project_code"
print " and queue is PBS queue OR reservation number"
if len(sys.argv) != 3:
print "ERROR, problem with command line arguments!"
usage()
sys.exit(1)
project_code = sys.argv[1]
reservation = sys.argv[2]
# Make sure LVT executable is in place before launching jobs
if not os.path.exists("LVT"):
print "ERROR, LVT executable does not exist!"
sys.exit(1)
# Loop through each invocation, create a batch script, and launch the
# batch script.
for var in vars:
scriptname = "run_lvt.%s_3hr.sh" %(var)
f = open(scriptname,"w")
line = """#!/bin/sh
#PBS -A %s\n""" %(project_code)
line += """#PBS -j oe
#PBS -l walltime=0:15:00
#PBS -l select=1:ncpus=32
#PBS -N %s.3hr\n""" %(var)
line += """#PBS -q %s\n""" %(reservation)
line +="""#PBS -W sandbox=PRIVATE
#PBS -V
module use --append ~jim/README
module load lis_7_intel_17_0_2_174
ulimit -c unlimited
ulimit -m unlimited
ulimit -s unlimited
cd "$PBS_O_WORKDIR" || exit 1
echo `pwd`
if [ ! -e ./LVT ] ; then
echo "ERROR, LVT does not exist!" && exit 1
fi
if [ ! -e lvt.config.%s.3hr ] ; then
echo "ERROR, lvt.config.%s.3hr does not exist!" && exit 1
fi
aprun -n 1 -j 1 ./LVT lvt.config.%s.3hr || exit 1
exit 0
""" %(var,var,var)
f.write(line)
f.close()
cmd = "qsub %s" %(scriptname)
print cmd
rc = subprocess.call(cmd,shell=True)
if rc != 0:
print "[ERR] Problem with qsub!"
sys.exit(1)
time.sleep(1) # Don't overwhelm PBS!
| 26.641304
| 69
| 0.624643
|
4a1a240e085692be755251b0cd92c5d70955d6e9
| 1,824
|
py
|
Python
|
examples/load_ESL.py
|
abkoesdw/ml-datasets
|
c8c7b85ba8ed9c0ea233b4092d499d5022952011
|
[
"MIT"
] | 1
|
2020-07-05T04:58:07.000Z
|
2020-07-05T04:58:07.000Z
|
examples/load_ESL.py
|
abkoesdw/ml-datasets
|
c8c7b85ba8ed9c0ea233b4092d499d5022952011
|
[
"MIT"
] | null | null | null |
examples/load_ESL.py
|
abkoesdw/ml-datasets
|
c8c7b85ba8ed9c0ea233b4092d499d5022952011
|
[
"MIT"
] | null | null | null |
from ml_datasets.esl import Mixture
from ml_datasets.esl import ProstateCancer
from ml_datasets.esl import EmailSpam
from ml_datasets.esl import HandwrittenDigit
from ml_datasets.esl import NCI
from ml_datasets.utils import plot_2D, plot_dna
def main(**kwargs):
dataset = kwargs.get("dataset", None)
if dataset == "mixture":
mixture = Mixture(
verbose=False,
force=False,
)
x, y = mixture.load()
print("x: {}, y: {}".format(x.shape, y.shape))
plot_2D(x, y, "ESL-Mixture Dataset")
elif dataset == "prostate":
prostate_cancer = ProstateCancer(
verbose=False,
force=False,
)
df = prostate_cancer.load()
print("columns: {}".format(prostate_cancer.meta))
print(prostate_cancer.info)
elif dataset == "spam":
spam = EmailSpam(
verbose=False,
force=False,
)
df, x_train, y_train, x_test, y_test = spam.load()
print("x_train: {}, y_train: {}".format(x_train.shape, y_train.shape))
print("x_test: {}, y_test: {}".format(x_test.shape, y_test.shape))
elif dataset == "digit":
digit = HandwrittenDigit(
verbose=False,
force=False,
)
x_train, y_train, x_test, y_test = digit.load()
print("x_train: {}, y_train: {}".format(x_train.shape, y_train.shape))
print("x_test: {}, y_test: {}".format(x_test.shape, y_test.shape))
elif dataset == "nci":
nci = NCI(
verbose=False,
force=False,
)
df, label = nci.load()
plot_dna(df, label)
if __name__ == "__main__":
available_dataset = ["mixture", "prostate", "spam", "digit", "nci"]
dataset = available_dataset[4]
main(dataset=dataset)
| 27.223881
| 78
| 0.582237
|
4a1a24785ccd960b090598e7f00b1710f8968a9a
| 80
|
py
|
Python
|
setup.py
|
Cold-Bore-Capital/cbc-dbmanager
|
d863b298badf4e32964a0ae1f1b9aef6dd51fa68
|
[
"MIT"
] | 1
|
2021-11-17T13:29:32.000Z
|
2021-11-17T13:29:32.000Z
|
setup.py
|
Cold-Bore-Capital/cbc-dbmanager
|
d863b298badf4e32964a0ae1f1b9aef6dd51fa68
|
[
"MIT"
] | null | null | null |
setup.py
|
Cold-Bore-Capital/cbc-dbmanager
|
d863b298badf4e32964a0ae1f1b9aef6dd51fa68
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name='cbcdb',
packages=['cbcdb']
)
| 11.428571
| 28
| 0.65
|
4a1a279b97c1fe9056acc7df716cba43ad1ceda9
| 1,493
|
py
|
Python
|
classy/migrations/0012_auto_20180219_1401.py
|
Krocodial/DSC
|
91063b06b536e732e655ce7f1ad0b7c2caa61e0d
|
[
"Apache-2.0"
] | null | null | null |
classy/migrations/0012_auto_20180219_1401.py
|
Krocodial/DSC
|
91063b06b536e732e655ce7f1ad0b7c2caa61e0d
|
[
"Apache-2.0"
] | null | null | null |
classy/migrations/0012_auto_20180219_1401.py
|
Krocodial/DSC
|
91063b06b536e732e655ce7f1ad0b7c2caa61e0d
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.0.1 on 2018-02-19 22:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('classy', '0011_classification_logs_o_classification'),
]
operations = [
migrations.AlterField(
model_name='classification',
name='category',
field=models.CharField(max_length=40, null=True),
),
migrations.AlterField(
model_name='classification',
name='column_name',
field=models.CharField(max_length=50, null=True),
),
migrations.AlterField(
model_name='classification',
name='creation_date',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='classification',
name='datasource_description',
field=models.CharField(max_length=200, null=True),
),
migrations.AlterField(
model_name='classification',
name='schema',
field=models.CharField(max_length=50, null=True),
),
migrations.AlterField(
model_name='classification',
name='table_name',
field=models.CharField(max_length=50, null=True),
),
migrations.AlterField(
model_name='classification_logs',
name='action_time',
field=models.DateTimeField(auto_now_add=True),
),
]
| 30.469388
| 64
| 0.584729
|
4a1a284c083d51c0163be5f1e8a0183f6c157213
| 4,241
|
py
|
Python
|
ARIMA Model.py
|
Wathminishari/solar-forecasting-using-time-series
|
c3bca342cc3d70a1cbf1d60a506b0aa36db0858f
|
[
"Apache-2.0"
] | null | null | null |
ARIMA Model.py
|
Wathminishari/solar-forecasting-using-time-series
|
c3bca342cc3d70a1cbf1d60a506b0aa36db0858f
|
[
"Apache-2.0"
] | null | null | null |
ARIMA Model.py
|
Wathminishari/solar-forecasting-using-time-series
|
c3bca342cc3d70a1cbf1d60a506b0aa36db0858f
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# In[1]:
import matplotlib
import pylab
import matplotlib.pyplot as plt
import matplotlib.dates as md
import datetime
import seaborn as sns
from scipy.stats import pearsonr
from matplotlib import cm as cm
import calendar
import warnings
import itertools
from statsmodels.tsa.stattools import adfuller
import statsmodels.formula.api as smf
import statsmodels.api as sm
import statsmodels.tsa.api as smt
from sklearn.metrics import mean_squared_error
import pandas as pd
import seaborn as sb
import itertools
import numpy as np
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.arima_model import ARIMA
from numpy import loadtxt
import os
import json
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.ensemble import RandomForestClassifier
from sklearn.pipeline import make_pipeline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.externals import joblib
import warnings
warnings.filterwarnings("ignore")
y = 2016
new_data = pd.DataFrame()
sample_times = []
for y in range(2014,2016,1):
print (y)
for m in range(1,13,1):
no_of_days = calendar.monthrange(2014,m)[1]
for d in range (1,no_of_days+1,1):
data = pd.read_csv("C:\\Users\\WATHMINI SHARI\\Desktop\\Semester 8\\Research_sem8\\Data\\D120318_%d%02d%02d_0000.csv"%(y,m,d));
pd
if (pd.to_datetime(data['Date/time'][2]) -pd.to_datetime(data['Date/time'][1])).seconds ==600:
new_data_temp = data[['Date/time','Anemometer;wind_speed;Avg','Wind Vane;wind_direction;Avg','Hygro/Thermo;humidity;Avg', 'Hygro/Thermo;temperature;Avg','Barometer;air_pressure;Avg','Pyranometer-Diffused;solar_irradiance;Avg', 'Pyranometer-Global;solar_irradiance;Avg', 'Silicon;voltage;Avg']][0:144].copy()
new_data = new_data.append(new_data_temp)
for i in range(len(new_data_temp)):
sample_times.append(datetime.datetime(y, m, d, 6, 00, 0)+ i*datetime.timedelta(minutes=10))
elif (pd.to_datetime(data['Date/time'][2]) -pd.to_datetime(data['Date/time'][1])).seconds ==60:
new_data_temp = data[['Date/time','Anemometer;wind_speed;Avg','Wind Vane;wind_direction;Avg','Hygro/Thermo;humidity;Avg', 'Hygro/Thermo;temperature;Avg','Barometer;air_pressure;Avg','Pyranometer-Diffused;solar_irradiance;Avg', 'Pyranometer-Global;solar_irradiance;Avg', 'Silicon;voltage;Avg']][0:1440].copy()
new_data = new_data.append(new_data_temp)
for i in range(len(new_data_temp)):
sample_times.append(datetime.datetime(y, m, d, 6, 00, 0)+ i*datetime.timedelta(minutes=1))
new_data.columns=['time','wind_speed','wind_dir','humidity','temperature','pressure','dhi','ghi','voltage']
sample_times_series = pd.Series(sample_times)
new_data['time'] = sample_times_series.values
# In[2]:
new_data = new_data.reset_index().set_index('time').resample('10min').mean()
import numpy as np
import pandas as pd
import pytz
solar_irra= new_data.dhi.values
# In[3]:
size = int(len(solar_irra) * 0.75)
train, test = solar_irra[0:size], solar_irra[size:len(solar_irra)]
train= np.nan_to_num(train)
test= np.nan_to_num(test)
history = [x for x in train]
predictions = list()
for t in range(len(test)):
model = ARIMA(history, order=(2,1,0))
model_fit = model.fit(disp=0)
output = model_fit.forecast()
yhat = output[0]
predictions.append(yhat[0])
obs = test[t]
history.append(obs)
print('predicted=%f, expected=%f' % (yhat, obs))
pred = np.array(predictions)
# In[4]:
import math
error = math.sqrt(mean_squared_error(test, predictions))
print('Test RMSE: %.3f' % error)
# In[5]:
plt.figure(figsize=(15,5))
plt.plot(predictions,color='blue',label='Predicted')
plt.plot(test,color='red',label='Actual')
plt.legend()
plt.xlabel('index')
plt.ylabel('Solar irradiance')
plt.show()
# In[ ]:
| 30.510791
| 323
| 0.723178
|
4a1a294091a63e04302dcd70ea0ca1e7a211dda7
| 123,236
|
py
|
Python
|
src/transformers/models/longformer/modeling_tf_longformer.py
|
terrenceedmonds/transformers
|
1c19b423bf274a465f95725a79819bf82f71329e
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/models/longformer/modeling_tf_longformer.py
|
terrenceedmonds/transformers
|
1c19b423bf274a465f95725a79819bf82f71329e
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/models/longformer/modeling_tf_longformer.py
|
terrenceedmonds/transformers
|
1c19b423bf274a465f95725a79819bf82f71329e
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow Longformer model. """
from dataclasses import dataclass
from typing import Optional, Tuple
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
MULTIPLE_CHOICE_DUMMY_INPUTS,
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from ...modeling_tf_utils import (
TFMaskedLanguageModelingLoss,
TFMultipleChoiceLoss,
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFTokenClassificationLoss,
get_initializer,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_longformer import LongformerConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "LongformerConfig"
_TOKENIZER_FOR_DOC = "LongformerTokenizer"
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
"allenai/longformer-base-4096",
"allenai/longformer-large-4096",
"allenai/longformer-large-4096-finetuned-triviaqa",
"allenai/longformer-base-4096-extra.pos.embd.only",
"allenai/longformer-large-4096-extra.pos.embd.only",
# See all Longformer models at https://huggingface.co/models?filter=longformer
]
@dataclass
class TFLongformerBaseModelOutput(ModelOutput):
"""
Base class for Longformer's outputs, with potential hidden states, local and global attentions.
Args:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x +
attention_window + 1)`, where ``x`` is the number of tokens with global attention mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x)`,
where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
last_hidden_state: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
global_attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFLongformerBaseModelOutputWithPooling(ModelOutput):
"""
Base class for Longformer's outputs that also contains a pooling of the last hidden states.
Args:
last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`tf.Tensor` of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token) further processed by a
Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence
prediction (classification) objective during pretraining.
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x +
attention_window + 1)`, where ``x`` is the number of tokens with global attention mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x)`,
where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
last_hidden_state: tf.Tensor = None
pooler_output: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
global_attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFLongformerMaskedLMOutput(ModelOutput):
"""
Base class for masked language models outputs.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Masked language modeling (MLM) loss.
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x +
attention_window + 1)`, where ``x`` is the number of tokens with global attention mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x)`,
where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
global_attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFLongformerQuestionAnsweringModelOutput(ModelOutput):
"""
Base class for outputs of question answering Longformer models.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
start_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Span-start scores (before SoftMax).
end_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`):
Span-end scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x +
attention_window + 1)`, where ``x`` is the number of tokens with global attention mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x)`,
where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[tf.Tensor] = None
start_logits: tf.Tensor = None
end_logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
global_attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFLongformerSequenceClassifierOutput(ModelOutput):
"""
Base class for outputs of sentence classification models.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x +
attention_window + 1)`, where ``x`` is the number of tokens with global attention mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x)`,
where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
global_attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFLongformerMultipleChoiceModelOutput(ModelOutput):
"""
Base class for outputs of multiple choice models.
Args:
loss (:obj:`tf.Tensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided):
Classification loss.
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices)`):
`num_choices` is the second dimension of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x +
attention_window + 1)`, where ``x`` is the number of tokens with global attention mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x)`,
where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
global_attentions: Optional[Tuple[tf.Tensor]] = None
@dataclass
class TFLongformerTokenClassifierOutput(ModelOutput):
"""
Base class for outputs of token classification models.
Args:
loss (:obj:`tf.Tensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) :
Classification loss.
logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`):
Classification scores (before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x +
attention_window + 1)`, where ``x`` is the number of tokens with global attention mask.
Local attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token in the sequence to every token with
global attention (first ``x`` values) and to every token in the attention window (remaining
``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in
the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the
attention weight of a token to itself is located at index ``x + attention_window / 2`` and the
``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window
/ 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the
attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x``
attention weights. If a token has global attention, the attention weights to all other tokens in
:obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`.
global_attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x)`,
where ``x`` is the number of tokens with global attention mask.
Global attentions weights after the attention softmax, used to compute the weighted average in the
self-attention heads. Those are the attention weights from every token with global attention to every token
in the sequence.
"""
loss: Optional[tf.Tensor] = None
logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
global_attentions: Optional[Tuple[tf.Tensor]] = None
def _compute_global_attention_mask(input_ids_shape, sep_token_indices, before_sep_token=True):
"""
Computes global attention mask by putting attention on all tokens before `sep_token_id` if `before_sep_token is
True` else after `sep_token_id`.
"""
assert shape_list(sep_token_indices)[1] == 2, "`input_ids` should have two dimensions"
question_end_index = tf.reshape(sep_token_indices, (input_ids_shape[0], 3, 2))[:, 0, 1]
question_end_index = tf.cast(question_end_index[:, None], tf.dtypes.int32) # size: batch_size x 1
# bool attention mask with True in locations of global attention
attention_mask = tf.range(input_ids_shape[1])
if before_sep_token is True:
attention_mask = tf.cast(
tf.broadcast_to(attention_mask, input_ids_shape) < tf.broadcast_to(question_end_index, input_ids_shape),
tf.dtypes.int32,
)
else:
# last token is separation token and should not be counted and in the middle are two separation tokens
attention_mask = (
tf.cast(
tf.broadcast_to(attention_mask, input_ids_shape)
> tf.broadcast_to(question_end_index + 1, input_ids_shape),
tf.dtypes.int32,
)
* tf.cast(tf.broadcast_to(attention_mask, input_ids_shape) < input_ids_shape[-1], tf.dtypes.int32)
)
return attention_mask
# Copied from transformers.models.roberta.modeling_tf_roberta.TFRobertaLMHead
class TFLongformerLMHead(tf.keras.layers.Layer):
"""Roberta Head for masked language modeling."""
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
self.act = get_tf_activation("gelu")
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.layer_norm(hidden_states)
# project back to size of vocabulary with bias
hidden_states = self.decoder(hidden_states, mode="linear") + self.bias
return hidden_states
class TFLongformerEmbeddings(tf.keras.layers.Layer):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.padding_idx = 1
self.vocab_size = config.vocab_size
self.hidden_size = config.hidden_size
self.initializer_range = config.initializer_range
self.position_embeddings = tf.keras.layers.Embedding(
config.max_position_embeddings,
config.hidden_size,
embeddings_initializer=get_initializer(self.initializer_range),
name="position_embeddings",
)
self.token_type_embeddings = tf.keras.layers.Embedding(
config.type_vocab_size,
config.hidden_size,
embeddings_initializer=get_initializer(self.initializer_range),
name="token_type_embeddings",
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def build(self, input_shape):
"""Build shared word embedding layer """
with tf.name_scope("word_embeddings"):
# Create and initialize weights. The random normal initializer was chosen
# arbitrarily, and works well.
self.word_embeddings = self.add_weight(
"weight",
shape=[self.vocab_size, self.hidden_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
def create_position_ids_from_input_ids(self, input_ids):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
symbols are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
input_ids: tf.Tensor
Returns: tf.Tensor
"""
input_ids_shape = shape_list(input_ids)
# multiple choice has 3 dimensions
if len(input_ids_shape) == 3:
input_ids = tf.reshape(input_ids, (input_ids_shape[0] * input_ids_shape[1], input_ids_shape[2]))
mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=tf.int32)
incremental_indices = tf.math.cumsum(mask, axis=1) * mask
return incremental_indices + self.padding_idx
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: tf.Tensor
Returns: tf.Tensor
"""
seq_length = shape_list(inputs_embeds)[1]
position_ids = tf.range(self.padding_idx + 1, seq_length + self.padding_idx + 1, dtype=tf.int32)[tf.newaxis, :]
return position_ids
def call(
self,
input_ids=None,
position_ids=None,
token_type_ids=None,
inputs_embeds=None,
mode="embedding",
training=False,
):
"""
Get token embeddings of inputs.
Args:
inputs: list of three int64 tensors with shape [batch_size, length]: (input_ids, position_ids, token_type_ids)
mode: string, a valid value is one of "embedding" and "linear".
Returns:
outputs: If mode == "embedding", output embedding tensor, float32 with shape [batch_size, length,
embedding_size]; if mode == "linear", output linear tensor, float32 with shape [batch_size, length,
vocab_size].
Raises:
ValueError: if mode is not valid.
Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
if mode == "embedding":
return self._embedding(input_ids, position_ids, token_type_ids, inputs_embeds, training=training)
elif mode == "linear":
return self._linear(input_ids)
else:
raise ValueError("mode {} is not valid.".format(mode))
def _embedding(self, input_ids, position_ids, token_type_ids, inputs_embeds, training=False):
"""Applies embedding based on inputs tensor."""
assert not (input_ids is None and inputs_embeds is None)
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = self.create_position_ids_from_input_ids(input_ids)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = shape_list(input_ids)
else:
input_shape = shape_list(inputs_embeds)[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = tf.range(seq_length, dtype=tf.int32)[tf.newaxis, :]
if token_type_ids is None:
token_type_ids = tf.fill(input_shape, 0)
if inputs_embeds is None:
inputs_embeds = tf.gather(self.word_embeddings, input_ids)
position_embeddings = tf.cast(self.position_embeddings(position_ids), inputs_embeds.dtype)
token_type_embeddings = tf.cast(self.token_type_embeddings(token_type_ids), inputs_embeds.dtype)
embeddings = inputs_embeds + position_embeddings + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings, training=training)
return embeddings
def _linear(self, inputs):
"""
Computes logits by running inputs through a linear layer.
Args:
inputs: A float32 tensor with shape [batch_size, length, hidden_size]
Returns:
float32 tensor with shape [batch_size, length, vocab_size].
"""
batch_size = shape_list(inputs)[0]
length = shape_list(inputs)[1]
x = tf.reshape(inputs, [-1, self.hidden_size])
logits = tf.matmul(x, self.word_embeddings, transpose_b=True)
return tf.reshape(logits, [batch_size, length, self.vocab_size])
# Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate
class TFLongformerIntermediate(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.experimental.EinsumDense(
equation="abc,cd->abd",
output_shape=(None, config.intermediate_size),
bias_axes="d",
kernel_initializer=get_initializer(initializer_range=config.initializer_range),
name="dense",
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = get_tf_activation(activation_string=config.hidden_act)
else:
self.intermediate_act_fn = config.hidden_act
def call(self, hidden_states):
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.intermediate_act_fn(inputs=hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput
class TFLongformerOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.experimental.EinsumDense(
equation="abc,cd->abd",
bias_axes="d",
output_shape=(None, config.hidden_size),
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def call(self, hidden_states, input_tensor, training=False):
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.dropout(inputs=hidden_states, training=training)
hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler
class TFLongformerPooler(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
activation="tanh",
name="dense",
)
def call(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
return pooled_output
class TFLongformerSelfOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, hidden_states, input_tensor, training=False):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFLongformerSelfAttention(tf.keras.layers.Layer):
def __init__(self, config, layer_id, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_heads = config.num_attention_heads
self.head_dim = int(config.hidden_size / config.num_attention_heads)
self.embed_dim = config.hidden_size
self.query = tf.keras.layers.Dense(
self.embed_dim,
kernel_initializer=get_initializer(config.initializer_range),
name="query",
)
self.key = tf.keras.layers.Dense(
self.embed_dim,
kernel_initializer=get_initializer(config.initializer_range),
name="key",
)
self.value = tf.keras.layers.Dense(
self.embed_dim,
kernel_initializer=get_initializer(config.initializer_range),
name="value",
)
# separate projection layers for tokens with global attention
self.query_global = tf.keras.layers.Dense(
self.embed_dim,
kernel_initializer=get_initializer(config.initializer_range),
name="query_global",
)
self.key_global = tf.keras.layers.Dense(
self.embed_dim,
kernel_initializer=get_initializer(config.initializer_range),
name="key_global",
)
self.value_global = tf.keras.layers.Dense(
self.embed_dim,
kernel_initializer=get_initializer(config.initializer_range),
name="value_global",
)
self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)
self.global_dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)
self.layer_id = layer_id
attention_window = config.attention_window[self.layer_id]
assert (
attention_window % 2 == 0
), f"`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}"
assert (
attention_window > 0
), f"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}"
self.one_sided_attn_window_size = attention_window // 2
def call(
self,
inputs,
training=False,
):
"""
LongformerSelfAttention expects `len(hidden_states)` to be multiple of `attention_window`. Padding to
`attention_window` happens in LongformerModel.forward to avoid redoing the padding on each layer.
The `attention_mask` is changed in :meth:`LongformerModel.forward` from 0, 1, 2 to:
* -10000: no attention
* 0: local attention
* +10000: global attention
"""
# retrieve input args
(
hidden_states,
attention_mask,
is_index_masked,
is_index_global_attn,
is_global_attn,
) = inputs
# project hidden states
query_vectors = self.query(hidden_states)
key_vectors = self.key(hidden_states)
value_vectors = self.value(hidden_states)
batch_size, seq_len, embed_dim = shape_list(hidden_states)
tf.debugging.assert_equal(
embed_dim,
self.embed_dim,
message=f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}",
)
# normalize query
query_vectors /= tf.math.sqrt(tf.convert_to_tensor(self.head_dim, dtype=tf.dtypes.float32))
query_vectors = tf.reshape(query_vectors, (batch_size, seq_len, self.num_heads, self.head_dim))
key_vectors = tf.reshape(key_vectors, (batch_size, seq_len, self.num_heads, self.head_dim))
# attn_probs = (batch_size, seq_len, num_heads, window*2+1)
attn_scores = self._sliding_chunks_query_key_matmul(
query_vectors, key_vectors, self.one_sided_attn_window_size
)
# diagonal mask with zeros everywhere and -inf inplace of padding
diagonal_mask = self._sliding_chunks_query_key_matmul(
tf.ones(shape_list(attention_mask), dtype=tf.float32),
attention_mask,
self.one_sided_attn_window_size,
)
# pad local attention probs
attn_scores += diagonal_mask
tf.debugging.assert_equal(
shape_list(attn_scores),
[batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1],
message=f"attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}, {self.one_sided_attn_window_size * 2 + 1}), but is of size {shape_list(attn_scores)}",
)
# compute global attn indices required through out forward fn
(
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
) = self._get_global_attn_indices(is_index_global_attn)
# this function is only relevant for global attention
attn_scores = tf.cond(
is_global_attn,
lambda: self._concat_with_global_key_attn_probs(
attn_scores=attn_scores,
query_vectors=query_vectors,
key_vectors=key_vectors,
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
),
lambda: attn_scores,
)
attn_probs = tf.nn.softmax(attn_scores, axis=-1)
# softmax sometimes inserts NaN if all positions are masked, replace them with 0
attn_probs = tf.where(
tf.broadcast_to(is_index_masked[:, :, None, None], shape_list(attn_probs)),
0.0,
attn_probs,
)
# apply dropout
attn_probs = self.dropout(attn_probs, training=training)
value_vectors = tf.reshape(value_vectors, (batch_size, seq_len, self.num_heads, self.head_dim))
# if global attention, compute sum of global and local attn
attn_output = tf.cond(
is_global_attn,
lambda: self._compute_attn_output_with_global_indices(
value_vectors=value_vectors,
attn_probs=attn_probs,
max_num_global_attn_indices=max_num_global_attn_indices,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
),
lambda: self._sliding_chunks_matmul_attn_probs_value(
attn_probs, value_vectors, self.one_sided_attn_window_size
),
)
tf.debugging.assert_equal(
shape_list(attn_output),
[batch_size, seq_len, self.num_heads, self.head_dim],
message="Unexpected size",
)
attn_output = tf.reshape(attn_output, (batch_size, seq_len, embed_dim))
# compute value for global attention and overwrite to attention output
# TODO: remove the redundant computation
attn_output, global_attn_probs = tf.cond(
is_global_attn,
lambda: self._compute_global_attn_output_from_hidden(
attn_output=attn_output,
hidden_states=hidden_states,
max_num_global_attn_indices=max_num_global_attn_indices,
is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero,
is_index_global_attn_nonzero=is_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero,
is_index_masked=is_index_masked,
training=training,
),
lambda: (attn_output, tf.zeros((batch_size, self.num_heads, max_num_global_attn_indices, seq_len))),
)
# make sure that local attention probabilities are set to 0 for indices of global attn
attn_probs = tf.where(
tf.broadcast_to(is_index_global_attn[:, :, None, None], shape_list(attn_probs)),
tf.zeros(shape_list(attn_probs), dtype=tf.dtypes.float32),
attn_probs,
)
outputs = (attn_output, attn_probs, global_attn_probs)
return outputs
def _sliding_chunks_query_key_matmul(self, query, key, window_overlap):
"""
Matrix multiplication of query and key tensors using with a sliding window attention pattern. This
implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer) with an
overlap of size window_overlap
"""
batch_size, seq_len, num_heads, head_dim = shape_list(query)
tf.debugging.assert_equal(
seq_len % (window_overlap * 2),
0,
message=f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}",
)
tf.debugging.assert_equal(
shape_list(query),
shape_list(key),
message=f"Shape of query and key should be equal, but got query: {shape_list(query)} and key: {shape_list(key)}",
)
chunks_count = seq_len // window_overlap - 1
# group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2
query = tf.reshape(
tf.transpose(query, (0, 2, 1, 3)),
(batch_size * num_heads, seq_len, head_dim),
)
key = tf.reshape(tf.transpose(key, (0, 2, 1, 3)), (batch_size * num_heads, seq_len, head_dim))
chunked_query = self._chunk(query, window_overlap)
chunked_key = self._chunk(key, window_overlap)
# matrix multiplication
# bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim
# bcxy: batch_size * num_heads x chunks x 2window_overlap x 2window_overlap
chunked_attention_scores = tf.einsum("bcxd,bcyd->bcxy", chunked_query, chunked_key) # multiply
# convert diagonals into columns
paddings = tf.convert_to_tensor([[0, 0], [0, 0], [0, 1], [0, 0]], dtype=tf.dtypes.int32)
diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims(chunked_attention_scores, paddings)
# allocate space for the overall attention matrix where the chunks are combined. The last dimension
# has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower triangles (attention from a word to
# window_overlap previous words). The following column is attention score from each word to itself, then
# followed by window_overlap columns for the upper triangle.
# copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions
# - copying the main diagonal and the upper triangle
# TODO: This code is most likely not very efficient and should be improved
diagonal_attn_scores_up_triang = tf.concat(
[
diagonal_chunked_attention_scores[:, :, :window_overlap, : window_overlap + 1],
diagonal_chunked_attention_scores[:, -1:, window_overlap:, : window_overlap + 1],
],
axis=1,
)
# - copying the lower triangle
diagonal_attn_scores_low_triang = tf.concat(
[
tf.zeros((batch_size * num_heads, 1, window_overlap, window_overlap)),
diagonal_chunked_attention_scores[:, :, -(window_overlap + 1) : -1, window_overlap + 1 :],
],
axis=1,
)
diagonal_attn_scores_first_chunk = tf.concat(
[
tf.roll(
diagonal_chunked_attention_scores,
shift=[1, window_overlap],
axis=[2, 3],
)[:, :, :window_overlap, :window_overlap],
tf.zeros((batch_size * num_heads, 1, window_overlap, window_overlap)),
],
axis=1,
)
first_chunk_mask = (
tf.broadcast_to(
tf.range(chunks_count + 1)[None, :, None, None],
shape=(
batch_size * num_heads,
chunks_count + 1,
window_overlap,
window_overlap,
),
)
< 1
)
diagonal_attn_scores_low_triang = tf.where(
first_chunk_mask,
diagonal_attn_scores_first_chunk,
diagonal_attn_scores_low_triang,
)
# merging upper and lower triangle
diagonal_attention_scores = tf.concat(
[diagonal_attn_scores_low_triang, diagonal_attn_scores_up_triang], axis=-1
)
# separate batch_size and num_heads dimensions again
diagonal_attention_scores = tf.transpose(
tf.reshape(
diagonal_attention_scores,
(batch_size, num_heads, seq_len, 2 * window_overlap + 1),
),
(0, 2, 1, 3),
)
diagonal_attention_scores = self._mask_invalid_locations(diagonal_attention_scores, window_overlap)
return diagonal_attention_scores
@staticmethod
def _mask_invalid_locations(input_tensor, window_overlap):
# create correct upper triangle bool mask
mask_2d_upper = tf.reverse(
tf.linalg.band_part(tf.ones(shape=(window_overlap, window_overlap + 1)), -1, 0),
axis=[0],
)
# pad to full matrix
padding = tf.convert_to_tensor(
[[0, shape_list(input_tensor)[1] - window_overlap], [0, shape_list(input_tensor)[3] - window_overlap - 1]]
)
# create lower mask
mask_2d = tf.pad(mask_2d_upper, padding)
# combine with upper mask
mask_2d = mask_2d + tf.reverse(mask_2d, axis=[0, 1])
# broadcast to full matrix
mask_4d = tf.broadcast_to(mask_2d[None, :, None, :], shape_list(input_tensor))
# inf tensor used for masking
inf_tensor = -float("inf") * tf.ones_like(input_tensor, dtype=tf.dtypes.float32)
# mask
input_tensor = tf.where(tf.math.greater(mask_4d, 0), inf_tensor, input_tensor)
return input_tensor
def _sliding_chunks_matmul_attn_probs_value(self, attn_probs, value, window_overlap):
"""
Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the
same shape as `attn_probs`
"""
batch_size, seq_len, num_heads, head_dim = shape_list(value)
tf.debugging.assert_equal(
seq_len % (window_overlap * 2),
0,
message="Seq_len has to be multiple of 2 * window_overlap",
)
tf.debugging.assert_equal(
shape_list(attn_probs)[:3],
shape_list(value)[:3],
message="value and attn_probs must have same dims (except head_dim)",
)
tf.debugging.assert_equal(
shape_list(attn_probs)[3],
2 * window_overlap + 1,
message="attn_probs last dim has to be 2 * window_overlap + 1",
)
chunks_count = seq_len // window_overlap - 1
# group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap
chunked_attn_probs = tf.reshape(
tf.transpose(attn_probs, (0, 2, 1, 3)),
(
batch_size * num_heads,
seq_len // window_overlap,
window_overlap,
2 * window_overlap + 1,
),
)
# group batch_size and num_heads dimensions into one
value = tf.reshape(
tf.transpose(value, (0, 2, 1, 3)),
(batch_size * num_heads, seq_len, head_dim),
)
# pad seq_len with w at the beginning of the sequence and another window overlap at the end
paddings = tf.convert_to_tensor([[0, 0], [window_overlap, window_overlap], [0, 0]], dtype=tf.dtypes.int32)
padded_value = tf.pad(value, paddings, constant_values=-1)
# chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap
frame_size = 3 * window_overlap * head_dim
frame_hop_size = (shape_list(padded_value)[1] * head_dim - frame_size) // chunks_count
chunked_value = tf.signal.frame(
tf.reshape(padded_value, (batch_size * num_heads, -1)),
frame_size,
frame_hop_size,
)
chunked_value = tf.reshape(
chunked_value,
(batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim),
)
tf.debugging.assert_equal(
shape_list(chunked_value),
[batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim],
message="Chunked value has the wrong shape",
)
chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs)
context = tf.einsum("bcwd,bcdh->bcwh", chunked_attn_probs, chunked_value)
context = tf.transpose(
tf.reshape(context, (batch_size, num_heads, seq_len, head_dim)),
(0, 2, 1, 3),
)
return context
@staticmethod
def _pad_and_transpose_last_two_dims(hidden_states_padded, paddings):
"""pads rows and then flips rows and columns"""
hidden_states_padded = tf.pad(
hidden_states_padded, paddings
) # padding value is not important because it will be overwritten
batch_size, chunk_size, seq_length, hidden_dim = shape_list(hidden_states_padded)
hidden_states_padded = tf.reshape(hidden_states_padded, (batch_size, chunk_size, hidden_dim, seq_length))
return hidden_states_padded
@staticmethod
def _pad_and_diagonalize(chunked_hidden_states):
"""
shift every row 1 step right, converting columns into diagonals.
Example::
chunked_hidden_states: [ 0.4983, 2.6918, -0.0071, 1.0492,
-1.8348, 0.7672, 0.2986, 0.0285,
-0.7584, 0.4206, -0.0405, 0.1599,
2.0514, -1.1600, 0.5372, 0.2629 ]
window_overlap = num_rows = 4
(pad & diagonalize) =>
[ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000
0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000
0.0000, 0.0000, -0.7584, 0.4206, -0.0405, 0.1599, 0.0000
0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ]
"""
total_num_heads, num_chunks, window_overlap, hidden_dim = shape_list(chunked_hidden_states)
paddings = tf.convert_to_tensor([[0, 0], [0, 0], [0, 0], [0, window_overlap + 1]])
chunked_hidden_states = tf.pad(
chunked_hidden_states, paddings
) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). Padding value is not important because it'll be overwritten
chunked_hidden_states = tf.reshape(
chunked_hidden_states, (total_num_heads, num_chunks, -1)
) # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap+window_overlap
chunked_hidden_states = chunked_hidden_states[
:, :, :-window_overlap
] # total_num_heads x num_chunks x window_overlapL+window_overlapwindow_overlap
chunked_hidden_states = tf.reshape(
chunked_hidden_states,
(total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim),
) # total_num_heads x num_chunks, window_overlap x hidden_dim+window_overlap
chunked_hidden_states = chunked_hidden_states[:, :, :, :-1]
return chunked_hidden_states
@staticmethod
def _chunk(hidden_states, window_overlap):
"""convert into overlapping chunks. Chunk size = 2w, overlap size = w"""
batch_size, seq_length, hidden_dim = shape_list(hidden_states)
num_output_chunks = 2 * (seq_length // (2 * window_overlap)) - 1
# define frame size and frame stride (similar to convolution)
frame_hop_size = window_overlap * hidden_dim
frame_size = 2 * frame_hop_size
hidden_states = tf.reshape(hidden_states, (batch_size, seq_length * hidden_dim))
# chunk with overlap
chunked_hidden_states = tf.signal.frame(hidden_states, frame_size, frame_hop_size)
tf.debugging.assert_equal(
shape_list(chunked_hidden_states),
[batch_size, num_output_chunks, frame_size],
message=f"Make sure chunking is correctly applied. `Chunked hidden states should have output dimension {[batch_size, frame_size, num_output_chunks]}, but got {shape_list(chunked_hidden_states)}.",
)
chunked_hidden_states = tf.reshape(
chunked_hidden_states,
(batch_size, num_output_chunks, 2 * window_overlap, hidden_dim),
)
return chunked_hidden_states
@staticmethod
def _get_global_attn_indices(is_index_global_attn):
""" compute global attn indices required throughout forward pass """
# helper variable
num_global_attn_indices = tf.reduce_sum(tf.cast(is_index_global_attn, dtype=tf.dtypes.int32), axis=1)
# max number of global attn indices in batch
max_num_global_attn_indices = tf.reduce_max(num_global_attn_indices)
# indices of global attn
is_index_global_attn_nonzero = tf.where(is_index_global_attn)
# helper variable
is_local_index_global_attn = tf.range(max_num_global_attn_indices) < tf.expand_dims(
num_global_attn_indices, axis=-1
)
# location of the non-padding values within global attention indices
is_local_index_global_attn_nonzero = tf.where(is_local_index_global_attn)
# location of the padding values within global attention indices
is_local_index_no_global_attn_nonzero = tf.where(tf.math.logical_not(is_local_index_global_attn))
return (
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
)
def _concat_with_global_key_attn_probs(
self,
attn_scores,
key_vectors,
query_vectors,
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
):
batch_size = shape_list(key_vectors)[0]
# select global key vectors
global_key_vectors = tf.gather_nd(key_vectors, is_index_global_attn_nonzero)
# create only global key vectors
key_vectors_only_global = tf.scatter_nd(
is_local_index_global_attn_nonzero,
global_key_vectors,
shape=(
batch_size,
max_num_global_attn_indices,
self.num_heads,
self.head_dim,
),
)
# (batch_size, seq_len, num_heads, max_num_global_attn_indices)
attn_probs_from_global_key = tf.einsum("blhd,bshd->blhs", query_vectors, key_vectors_only_global)
# (batch_size, max_num_global_attn_indices, seq_len, num_heads)
attn_probs_from_global_key_trans = tf.transpose(attn_probs_from_global_key, (0, 3, 1, 2))
mask_shape = (shape_list(is_local_index_no_global_attn_nonzero)[0],) + tuple(
shape_list(attn_probs_from_global_key_trans)[-2:]
)
mask = tf.ones(mask_shape) * -10000.0
# scatter mask
attn_probs_from_global_key_trans = tf.tensor_scatter_nd_update(
attn_probs_from_global_key_trans,
is_local_index_no_global_attn_nonzero,
mask,
)
# (batch_size, seq_len, num_heads, max_num_global_attn_indices)
attn_probs_from_global_key = tf.transpose(attn_probs_from_global_key_trans, (0, 2, 3, 1))
# concat to attn_probs
# (batch_size, seq_len, num_heads, extra attention count + 2*window+1)
attn_scores = tf.concat((attn_probs_from_global_key, attn_scores), axis=-1)
return attn_scores
def _compute_attn_output_with_global_indices(
self,
value_vectors,
attn_probs,
max_num_global_attn_indices,
is_index_global_attn_nonzero,
is_local_index_global_attn_nonzero,
):
batch_size = shape_list(attn_probs)[0]
# cut local attn probs to global only
attn_probs_only_global = attn_probs[:, :, :, :max_num_global_attn_indices]
# select global value vectors
global_value_vectors = tf.gather_nd(value_vectors, is_index_global_attn_nonzero)
# create only global value vectors
value_vectors_only_global = tf.scatter_nd(
is_local_index_global_attn_nonzero,
global_value_vectors,
shape=(
batch_size,
max_num_global_attn_indices,
self.num_heads,
self.head_dim,
),
)
# compute attn output only global
attn_output_only_global = tf.einsum("blhs,bshd->blhd", attn_probs_only_global, value_vectors_only_global)
# reshape attn probs
attn_probs_without_global = attn_probs[:, :, :, max_num_global_attn_indices:]
# compute attn output with global
attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value(
attn_probs_without_global, value_vectors, self.one_sided_attn_window_size
)
return attn_output_only_global + attn_output_without_global
def _compute_global_attn_output_from_hidden(
self,
attn_output,
hidden_states,
max_num_global_attn_indices,
is_local_index_global_attn_nonzero,
is_index_global_attn_nonzero,
is_local_index_no_global_attn_nonzero,
is_index_masked,
training,
):
batch_size, seq_len = shape_list(hidden_states)[:2]
# prepare global hidden states
global_attn_hidden_states = tf.gather_nd(hidden_states, is_index_global_attn_nonzero)
global_attn_hidden_states = tf.scatter_nd(
is_local_index_global_attn_nonzero,
global_attn_hidden_states,
shape=(batch_size, max_num_global_attn_indices, self.embed_dim),
)
# global key, query, value
global_query_vectors_only_global = self.query_global(global_attn_hidden_states)
global_key_vectors = self.key_global(hidden_states)
global_value_vectors = self.value_global(hidden_states)
# normalize
global_query_vectors_only_global /= tf.math.sqrt(tf.convert_to_tensor(self.head_dim, dtype=tf.dtypes.float32))
global_query_vectors_only_global = self.reshape_and_transpose(global_query_vectors_only_global, batch_size)
global_key_vectors = self.reshape_and_transpose(global_key_vectors, batch_size)
global_value_vectors = self.reshape_and_transpose(global_value_vectors, batch_size)
# compute attn scores
global_attn_scores = tf.matmul(global_query_vectors_only_global, global_key_vectors, transpose_b=True)
tf.debugging.assert_equal(
shape_list(global_attn_scores),
[batch_size * self.num_heads, max_num_global_attn_indices, seq_len],
message=f"global_attn_scores have the wrong size. Size should be {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is {shape_list(global_attn_scores)}.",
)
global_attn_scores = tf.reshape(
global_attn_scores,
(batch_size, self.num_heads, max_num_global_attn_indices, seq_len),
)
global_attn_scores_trans = tf.transpose(global_attn_scores, (0, 2, 1, 3))
mask_shape = (shape_list(is_local_index_no_global_attn_nonzero)[0],) + tuple(
shape_list(global_attn_scores_trans)[-2:]
)
global_attn_mask = tf.ones(mask_shape) * -10000.0
# scatter mask
global_attn_scores_trans = tf.tensor_scatter_nd_update(
global_attn_scores_trans,
is_local_index_no_global_attn_nonzero,
global_attn_mask,
)
global_attn_scores = tf.transpose(global_attn_scores_trans, (0, 2, 1, 3))
# mask global attn scores
attn_mask = tf.broadcast_to(is_index_masked[:, None, None, :], shape_list(global_attn_scores))
global_attn_scores = tf.where(attn_mask, -10000.0, global_attn_scores)
global_attn_scores = tf.reshape(
global_attn_scores,
(batch_size * self.num_heads, max_num_global_attn_indices, seq_len),
)
# compute global attn probs
global_attn_probs_float = tf.nn.softmax(global_attn_scores, axis=-1)
# dropout
global_attn_probs = self.global_dropout(global_attn_probs_float, training=training)
# global attn output
global_attn_output = tf.matmul(global_attn_probs, global_value_vectors)
tf.debugging.assert_equal(
shape_list(global_attn_output),
[batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim],
message=f"global_attn_output tensor has the wrong size. Size should be {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is {shape_list(global_attn_output)}.",
)
global_attn_output = tf.reshape(
global_attn_output,
(batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim),
)
# get only non zero global attn output
nonzero_global_attn_output = tf.gather_nd(
tf.transpose(global_attn_output, (0, 2, 1, 3)),
is_local_index_global_attn_nonzero,
)
nonzero_global_attn_output = tf.reshape(
nonzero_global_attn_output,
(shape_list(is_local_index_global_attn_nonzero)[0], -1),
)
# overwrite values with global attention
attn_output = tf.tensor_scatter_nd_update(
attn_output, is_index_global_attn_nonzero, nonzero_global_attn_output
)
global_attn_probs = tf.reshape(
global_attn_probs, (batch_size, self.num_heads, max_num_global_attn_indices, seq_len)
)
return attn_output, global_attn_probs
def reshape_and_transpose(self, vector, batch_size):
return tf.reshape(
tf.transpose(
tf.reshape(vector, (batch_size, -1, self.num_heads, self.head_dim)),
(0, 2, 1, 3),
),
(batch_size * self.num_heads, -1, self.head_dim),
)
class TFLongformerAttention(tf.keras.layers.Layer):
def __init__(self, config, layer_id=0, **kwargs):
super().__init__(**kwargs)
self.self_attention = TFLongformerSelfAttention(config, layer_id, name="self")
self.dense_output = TFLongformerSelfOutput(config, name="output")
def prune_heads(self, heads):
raise NotImplementedError
def call(self, inputs, training=False):
(
hidden_states,
attention_mask,
is_index_masked,
is_index_global_attn,
is_global_attn,
) = inputs
self_outputs = self.self_attention(
[hidden_states, attention_mask, is_index_masked, is_index_global_attn, is_global_attn],
training=training,
)
attention_output = self.dense_output(self_outputs[0], hidden_states, training=training)
outputs = (attention_output,) + self_outputs[1:]
return outputs
class TFLongformerLayer(tf.keras.layers.Layer):
def __init__(self, config, layer_id=0, **kwargs):
super().__init__(**kwargs)
self.attention = TFLongformerAttention(config, layer_id, name="attention")
self.intermediate = TFLongformerIntermediate(config, name="intermediate")
self.longformer_output = TFLongformerOutput(config, name="output")
def call(self, inputs, training=False):
(
hidden_states,
attention_mask,
is_index_masked,
is_index_global_attn,
is_global_attn,
) = inputs
attention_outputs = self.attention(
[hidden_states, attention_mask, is_index_masked, is_index_global_attn, is_global_attn],
training=training,
)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.longformer_output(intermediate_output, attention_output, training=training)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class TFLongformerEncoder(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.output_hidden_states = config.output_hidden_states
self.output_attentions = config.output_attentions
self.layer = [
TFLongformerLayer(config, i, name="layer_._{}".format(i)) for i in range(config.num_hidden_layers)
]
def call(
self,
hidden_states,
attention_mask=None,
head_mask=None,
padding_len=0,
is_index_masked=None,
is_index_global_attn=None,
is_global_attn=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
):
all_hidden_states = () if output_hidden_states else None
all_attentions = all_global_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
hidden_states_to_add = hidden_states[:, :-padding_len] if padding_len > 0 else hidden_states
all_hidden_states = all_hidden_states + (hidden_states_to_add,)
layer_outputs = layer_module(
[
hidden_states,
attention_mask,
is_index_masked,
is_index_global_attn,
is_global_attn,
],
training=training,
)
hidden_states = layer_outputs[0]
if output_attentions:
# bzs x seq_len x num_attn_heads x (num_global_attn + attention_window_len + 1) => bzs x num_attn_heads x seq_len x (num_global_attn + attention_window_len + 1)
all_attentions = all_attentions + (tf.transpose(layer_outputs[1], (0, 2, 1, 3)),)
# bzs x num_attn_heads x num_global_attn x seq_len => bzs x num_attn_heads x seq_len x num_global_attn
all_global_attentions = all_global_attentions + (tf.transpose(layer_outputs[2], (0, 1, 3, 2)))
# Add last layer
if output_hidden_states:
hidden_states_to_add = hidden_states[:, :-padding_len] if padding_len > 0 else hidden_states
all_hidden_states = all_hidden_states + (hidden_states_to_add,)
if not return_dict:
return tuple(
v for v in [hidden_states, all_hidden_states, all_attentions, all_global_attentions] if v is not None
)
return TFLongformerBaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_attentions,
global_attentions=all_global_attentions,
)
@keras_serializable
class TFLongformerMainLayer(tf.keras.layers.Layer):
config_class = LongformerConfig
def __init__(self, config, add_pooling_layer=True, **kwargs):
super().__init__(**kwargs)
if isinstance(config.attention_window, int):
assert config.attention_window % 2 == 0, "`config.attention_window` has to be an even value"
assert config.attention_window > 0, "`config.attention_window` has to be positive"
config.attention_window = [config.attention_window] * config.num_hidden_layers # one value per layer
else:
assert len(config.attention_window) == config.num_hidden_layers, (
"`len(config.attention_window)` should equal `config.num_hidden_layers`. "
f"Expected {config.num_hidden_layers}, given {len(config.attention_window)}"
)
self.config = config
self.num_hidden_layers = config.num_hidden_layers
self.initializer_range = config.initializer_range
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.return_dict = config.use_return_dict
self.pad_token_id = config.pad_token_id
self.attention_window = config.attention_window
self.embeddings = TFLongformerEmbeddings(config, name="embeddings")
self.encoder = TFLongformerEncoder(config, name="encoder")
self.pooler = TFLongformerPooler(config, name="pooler") if add_pooling_layer else None
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
self.embeddings.vocab_size = value.shape[0]
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError
def call(
self,
input_ids=None,
attention_mask=None,
global_attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["attention_mask"] is None:
inputs["attention_mask"] = tf.fill(input_shape, 1)
if inputs["token_type_ids"] is None:
inputs["token_type_ids"] = tf.fill(input_shape, 0)
# merge `global_attention_mask` and `attention_mask`
if inputs["global_attention_mask"] is not None:
inputs["attention_mask"] = self._merge_to_attention_mask(
inputs["attention_mask"], inputs["global_attention_mask"]
)
(
padding_len,
inputs["input_ids"],
inputs["attention_mask"],
inputs["token_type_ids"],
inputs["position_ids"],
inputs["inputs_embeds"],
) = self._pad_to_window_size(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
inputs_embeds=inputs["inputs_embeds"],
pad_token_id=self.pad_token_id,
)
# is index masked or global attention
is_index_masked = tf.math.less(inputs["attention_mask"], 1)
is_index_global_attn = tf.math.greater(inputs["attention_mask"], 1)
is_global_attn = tf.math.reduce_any(is_index_global_attn)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, to_seq_length, 1, 1]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = inputs["attention_mask"][:, :, tf.newaxis, tf.newaxis]
# Since attention_mask is 1.0 for positions we want to locall attend locally and 0.0 for
# masked and global attn positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = tf.cast(tf.math.abs(1 - extended_attention_mask), tf.dtypes.float32) * -10000.0
embedding_output = self.embeddings(
inputs["input_ids"],
inputs["position_ids"],
inputs["token_type_ids"],
inputs["inputs_embeds"],
training=inputs["training"],
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
padding_len=padding_len,
is_index_masked=is_index_masked,
is_index_global_attn=is_index_global_attn,
is_global_attn=is_global_attn,
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
# undo padding
if padding_len > 0:
# unpad `sequence_output` because the calling function is expecting a length == input_ids.size(1)
sequence_output = sequence_output[:, :-padding_len]
if not inputs["return_dict"]:
return (
sequence_output,
pooled_output,
) + encoder_outputs[1:]
return TFLongformerBaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
global_attentions=encoder_outputs.global_attentions,
)
def _pad_to_window_size(
self,
input_ids,
attention_mask,
token_type_ids,
position_ids,
inputs_embeds,
pad_token_id,
):
"""A helper function to pad tokens and mask to work with implementation of Longformer selfattention."""
# padding
attention_window = (
self.attention_window if isinstance(self.attention_window, int) else max(self.attention_window)
)
assert attention_window % 2 == 0, f"`attention_window` should be an even value. Given {attention_window}"
input_shape = shape_list(input_ids) if input_ids is not None else shape_list(inputs_embeds)
batch_size, seq_len = input_shape[:2]
padding_len = (attention_window - seq_len % attention_window) % attention_window
if padding_len > 0:
logger.info(
"Input ids are automatically padded from {} to {} to be a multiple of `config.attention_window`: {}".format(
seq_len, seq_len + padding_len, attention_window
)
)
paddings = tf.convert_to_tensor([[0, 0], [0, padding_len]])
if input_ids is not None:
input_ids = tf.pad(input_ids, paddings, constant_values=pad_token_id)
if position_ids is not None:
# pad with position_id = pad_token_id as in modeling_roberta.RobertaEmbeddings
position_ids = tf.pad(position_ids, paddings, constant_values=pad_token_id)
if inputs_embeds is not None:
def pad_embeddings():
input_ids_padding = tf.fill((batch_size, padding_len), self.pad_token_id)
inputs_embeds_padding = self.embeddings(input_ids_padding)
return tf.concat([inputs_embeds, inputs_embeds_padding], axis=-2)
inputs_embeds = tf.cond(padding_len > 0, pad_embeddings, lambda: inputs_embeds)
attention_mask = tf.pad(attention_mask, paddings, constant_values=False) # no attention on the padding tokens
token_type_ids = tf.pad(token_type_ids, paddings, constant_values=0) # pad with token_type_id = 0
return (
padding_len,
input_ids,
attention_mask,
token_type_ids,
position_ids,
inputs_embeds,
)
@staticmethod
def _merge_to_attention_mask(attention_mask: tf.Tensor, global_attention_mask: tf.Tensor):
# longformer self attention expects attention mask to have 0 (no attn), 1 (local attn), 2 (global attn)
# (global_attention_mask + 1) => 1 for local attention, 2 for global attention
# => final attention_mask => 0 for no attention, 1 for local attention 2 for global attention
if attention_mask is not None:
attention_mask = attention_mask * (global_attention_mask + 1)
else:
# simply use `global_attention_mask` as `attention_mask`
# if no `attention_mask` is given
attention_mask = global_attention_mask + 1
return attention_mask
class TFLongformerPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LongformerConfig
base_model_prefix = "longformer"
@property
def dummy_inputs(self):
input_ids = tf.convert_to_tensor([[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])
# make sure global layers are initialized
attention_mask = tf.convert_to_tensor([[1, 1, 0, 0, 1], [1, 1, 1, 0, 0], [1, 0, 0, 1, 1]])
global_attention_mask = tf.convert_to_tensor([[0, 0, 0, 0, 1], [0, 0, 1, 0, 0], [0, 0, 0, 0, 1]])
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"global_attention_mask": global_attention_mask,
}
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
LONGFORMER_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the
generic methods the library implements for all its model (such as downloading or saving, resizing the input
embeddings, pruning heads etc.)
This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use
it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage
and behavior.
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all
the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in
the first positional argument :
- a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
Parameters:
config (:class:`~transformers.LongformerConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
LONGFORMER_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`tf.Tensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.LongformerTokenizer`. See
:func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
global_attention_mask (:obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Mask to decide the attention given on each token, local attention or global attention. Tokens with global
attention attends to all other tokens, and all other tokens attend to them. This is important for
task-specific finetuning because it makes the model more flexible at representing the task. For example,
for classification, the <s> token should be given global attention. For QA, all question tokens should also
have global attention. Please refer to the `Longformer paper <https://arxiv.org/abs/2004.05150>`__ for more
details. Mask values selected in ``[0, 1]``:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
token_type_ids (:obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`__
position_ids (:obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`__
inputs_embeds (:obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
training (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare Longformer Model outputting raw hidden-states without any specific head on top.",
LONGFORMER_START_DOCSTRING,
)
class TFLongformerModel(TFLongformerPreTrainedModel):
"""
This class copies code from :class:`~transformers.TFRobertaModel` and overwrites standard self-attention with
longformer self-attention to provide the ability to process long sequences following the self-attention approach
described in `Longformer: the Long-Document Transformer <https://arxiv.org/abs/2004.05150>`__ by Iz Beltagy,
Matthew E. Peters, and Arman Cohan. Longformer self-attention combines a local (sliding window) and global
attention to extend to long documents without the O(n^2) increase in memory and compute.
The self-attention module :obj:`TFLongformerSelfAttention` implemented here supports the combination of local and
global attention but it lacks support for autoregressive attention and dilated attention. Autoregressive and
dilated attention are more relevant for autoregressive language modeling than finetuning on downstream tasks.
Future release will add support for autoregressive attention, but the support for dilated attention requires a
custom CUDA kernel to be memory and compute efficient.
"""
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.longformer = TFLongformerMainLayer(config, name="longformer")
@add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def call(
self,
input_ids=None,
attention_mask=None,
global_attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.longformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
global_attention_mask=inputs["global_attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
g_attns = tf.convert_to_tensor(output.global_attentions) if self.config.output_attentions else None
return TFLongformerBaseModelOutputWithPooling(
last_hidden_state=output.last_hidden_state,
pooler_output=output.pooler_output,
hidden_states=hs,
attentions=attns,
global_attentions=g_attns,
)
@add_start_docstrings(
"""Longformer Model with a `language modeling` head on top. """,
LONGFORMER_START_DOCSTRING,
)
class TFLongformerForMaskedLM(TFLongformerPreTrainedModel, TFMaskedLanguageModelingLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.longformer = TFLongformerMainLayer(config, add_pooling_layer=False, name="longformer")
self.lm_head = TFLongformerLMHead(config, self.longformer.embeddings, name="lm_head")
def get_output_embeddings(self):
return self.lm_head.decoder
def get_output_layer_with_bias(self):
return self.lm_head
def get_prefix_bias_name(self):
return self.name + "/" + self.lm_head.name
@add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="allenai/longformer-base-4096",
output_type=TFLongformerMaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
global_attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.longformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
global_attention_mask=inputs["global_attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output, training=inputs["training"])
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], prediction_scores)
if not inputs["return_dict"]:
output = (prediction_scores,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFLongformerMaskedLMOutput(
loss=loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
global_attentions=outputs.global_attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
g_attns = tf.convert_to_tensor(output.global_attentions) if self.config.output_attentions else None
return TFLongformerMaskedLMOutput(
loss=None,
logits=output.logits,
hidden_states=hs,
attentions=attns,
global_attentions=g_attns,
)
@add_start_docstrings(
"""
Longformer Model with a span classification head on top for extractive question-answering tasks like SQuAD /
TriviaQA (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
LONGFORMER_START_DOCSTRING,
)
class TFLongformerForQuestionAnswering(TFLongformerPreTrainedModel, TFQuestionAnsweringLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.longformer = TFLongformerMainLayer(config, add_pooling_layer=False, name="longformer")
self.qa_outputs = tf.keras.layers.Dense(
config.num_labels,
kernel_initializer=get_initializer(config.initializer_range),
name="qa_outputs",
)
@add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="allenai/longformer-large-4096-finetuned-triviaqa",
output_type=TFLongformerQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
global_attention_mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
start_positions=None,
end_positions=None,
training=False,
**kwargs,
):
r"""
start_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
start_positions=start_positions,
end_positions=end_positions,
training=training,
kwargs_call=kwargs,
)
# set global attention on question tokens
if inputs["global_attention_mask"] is None and inputs["input_ids"] is not None:
if (
shape_list(tf.where(inputs["input_ids"] == self.config.sep_token_id))[0]
!= 3 * shape_list(inputs["input_ids"])[0]
):
logger.warning(
f"There should be exactly three separator tokens: {self.config.sep_token_id} in every sample for questions answering. You might also consider to set `global_attention_mask` manually in the forward function to avoid this. This is most likely an error. The global attention is disabled for this forward pass."
)
inputs["global_attention_mask"] = tf.fill(shape_list(inputs["input_ids"]), value=0)
else:
logger.info("Initializing global attention on question tokens...")
# put global attention on all tokens until `config.sep_token_id` is reached
sep_token_indices = tf.where(inputs["input_ids"] == self.config.sep_token_id)
inputs["global_attention_mask"] = _compute_global_attention_mask(
shape_list(inputs["input_ids"]), sep_token_indices
)
outputs = self.longformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
global_attention_mask=inputs["global_attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)
loss = None
if inputs["start_positions"] is not None and inputs["end_positions"] is not None:
labels = {"start_position": inputs["start_positions"]}
labels["end_position"] = inputs["end_positions"]
loss = self.compute_loss(labels, (start_logits, end_logits))
if not inputs["return_dict"]:
output = (start_logits, end_logits) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFLongformerQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
global_attentions=outputs.global_attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
g_attns = tf.convert_to_tensor(output.global_attentions) if self.config.output_attentions else None
return TFLongformerQuestionAnsweringModelOutput(
start_logits=output.start_logits,
end_logits=output.end_logits,
hidden_states=hs,
attentions=attns,
global_attentions=g_attns,
)
class TFLongformerClassificationHead(tf.keras.layers.Layer):
"""Head for sentence-level classification tasks."""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
activation="tanh",
name="dense",
)
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.out_proj = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
)
def call(self, hidden_states, training=False):
hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
output = self.out_proj(hidden_states)
return output
@add_start_docstrings(
"""
Longformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
LONGFORMER_START_DOCSTRING,
)
class TFLongformerForSequenceClassification(TFLongformerPreTrainedModel, TFSequenceClassificationLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.longformer = TFLongformerMainLayer(config, add_pooling_layer=False, name="longformer")
self.classifier = TFLongformerClassificationHead(config, name="classifier")
@add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="allenai/longformer-base-4096",
output_type=TFLongformerSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
global_attention_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["global_attention_mask"] is None and inputs["input_ids"] is not None:
logger.info("Initializing global attention on CLS token...")
# global attention on cls token
inputs["global_attention_mask"] = tf.zeros_like(inputs["input_ids"])
inputs["global_attention_mask"] = tf.tensor_scatter_nd_update(
inputs["global_attention_mask"],
[[i, 0] for i in range(shape_list(inputs["input_ids"])[0])],
[1 for _ in range(shape_list(inputs["input_ids"])[0])],
)
outputs = self.longformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
global_attention_mask=inputs["global_attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.classifier(sequence_output)
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFLongformerSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
global_attentions=outputs.global_attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
g_attns = tf.convert_to_tensor(output.global_attentions) if self.config.output_attentions else None
return TFLongformerSequenceClassifierOutput(
logits=output.logits,
hidden_states=hs,
attentions=attns,
global_attentions=g_attns,
)
@add_start_docstrings(
"""
Longformer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
a softmax) e.g. for RocStories/SWAG tasks.
""",
LONGFORMER_START_DOCSTRING,
)
class TFLongformerForMultipleChoice(TFLongformerPreTrainedModel, TFMultipleChoiceLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_missing = [r"dropout"]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.longformer = TFLongformerMainLayer(config, name="longformer")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@property
def dummy_inputs(self):
input_ids = tf.convert_to_tensor(MULTIPLE_CHOICE_DUMMY_INPUTS)
# make sure global layers are initialized
global_attention_mask = tf.convert_to_tensor([[[0, 0, 0, 1], [0, 0, 0, 1]]] * 2)
return {"input_ids": input_ids, "global_attention_mask": global_attention_mask}
@add_start_docstrings_to_model_forward(
LONGFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="allenai/longformer-base-4096",
output_type=TFLongformerMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
global_attention_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None:
num_choices = shape_list(inputs["input_ids"])[1]
seq_length = shape_list(inputs["input_ids"])[2]
else:
num_choices = shape_list(inputs["inputs_embeds"])[1]
seq_length = shape_list(inputs["inputs_embeds"])[2]
flat_input_ids = tf.reshape(inputs["input_ids"], (-1, seq_length)) if inputs["input_ids"] is not None else None
flat_attention_mask = (
tf.reshape(inputs["attention_mask"], (-1, seq_length)) if inputs["attention_mask"] is not None else None
)
flat_token_type_ids = (
tf.reshape(inputs["token_type_ids"], (-1, seq_length)) if inputs["token_type_ids"] is not None else None
)
flat_position_ids = (
tf.reshape(inputs["position_ids"], (-1, seq_length)) if inputs["position_ids"] is not None else None
)
flat_global_attention_mask = (
tf.reshape(inputs["global_attention_mask"], (-1, shape_list(inputs["global_attention_mask"])[-1]))
if inputs["global_attention_mask"] is not None
else None
)
flat_inputs_embeds = (
tf.reshape(inputs["inputs_embeds"], (-1, seq_length, shape_list(inputs["inputs_embeds"])[3]))
if inputs["inputs_embeds"] is not None
else None
)
outputs = self.longformer(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
global_attention_mask=flat_global_attention_mask,
inputs_embeds=flat_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=inputs["return_dict"],
training=inputs["training"],
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = tf.reshape(logits, (-1, num_choices))
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], reshaped_logits)
if not inputs["return_dict"]:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFLongformerMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
global_attentions=outputs.global_attentions,
)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
g_attns = tf.convert_to_tensor(output.global_attentions) if self.config.output_attentions else None
return TFLongformerMultipleChoiceModelOutput(
logits=output.logits,
hidden_states=hs,
attentions=attns,
global_attentions=g_attns,
)
@add_start_docstrings(
"""
Longformer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
for Named-Entity-Recognition (NER) tasks.
""",
LONGFORMER_START_DOCSTRING,
)
class TFLongformerForTokenClassification(TFLongformerPreTrainedModel, TFTokenClassificationLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"dropout"]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.longformer = TFLongformerMainLayer(config=config, add_pooling_layer=False, name="longformer")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="allenai/longformer-base-4096",
output_type=TFLongformerTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
global_attention_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
global_attention_mask=global_attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.longformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
global_attention_mask=inputs["global_attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFLongformerTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
global_attentions=outputs.global_attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
g_attns = tf.convert_to_tensor(output.global_attentions) if self.config.output_attentions else None
return TFLongformerTokenClassifierOutput(
logits=output.logits,
hidden_states=hs,
attentions=attns,
global_attentions=g_attns,
)
| 45.949292
| 327
| 0.663443
|
4a1a29590bc1e3d2d8c14ece00b03914acaf8e05
| 637
|
py
|
Python
|
shop_mngr_project/manage.py
|
jtroussard/auto-shop-management-software
|
163494d83b3ab75aaf974ddb882902035a095770
|
[
"Apache-2.0"
] | null | null | null |
shop_mngr_project/manage.py
|
jtroussard/auto-shop-management-software
|
163494d83b3ab75aaf974ddb882902035a095770
|
[
"Apache-2.0"
] | null | null | null |
shop_mngr_project/manage.py
|
jtroussard/auto-shop-management-software
|
163494d83b3ab75aaf974ddb882902035a095770
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shop_mngr_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.954545
| 81
| 0.687598
|
4a1a296deb709e1ea3d954a5ba2565be0e97ddb8
| 8,496
|
py
|
Python
|
telemetry/telemetry/benchmark_unittest.py
|
chandakumari/catapult
|
7ae76b98cd134229b4008596d48d31f986cd0641
|
[
"BSD-3-Clause"
] | null | null | null |
telemetry/telemetry/benchmark_unittest.py
|
chandakumari/catapult
|
7ae76b98cd134229b4008596d48d31f986cd0641
|
[
"BSD-3-Clause"
] | null | null | null |
telemetry/telemetry/benchmark_unittest.py
|
chandakumari/catapult
|
7ae76b98cd134229b4008596d48d31f986cd0641
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import shutil
import tempfile
import unittest
import mock
from telemetry import android
from telemetry import benchmark
from telemetry.testing import options_for_unittests
from telemetry.timeline import chrome_trace_category_filter
from telemetry import page
from telemetry.page import legacy_page_test
from telemetry.page import shared_page_state
from telemetry import story as story_module
from telemetry.web_perf import timeline_based_measurement
from telemetry.story import typ_expectations
class DummyPageTest(legacy_page_test.LegacyPageTest):
def ValidateAndMeasurePage(self, *_):
pass
class TestBenchmark(benchmark.Benchmark):
def __init__(self, story):
super(TestBenchmark, self).__init__()
self._story_set = story_module.StorySet()
self._story_set.AddStory(story)
def CreatePageTest(self, _):
return DummyPageTest()
def CreateStorySet(self, _):
return self._story_set
class BenchmarkTest(unittest.TestCase):
def setUp(self):
self.options = options_for_unittests.GetRunOptions(
output_dir=tempfile.mkdtemp())
def tearDown(self):
shutil.rmtree(self.options.output_dir)
def testNewTestExpectationsFormatIsUsed(self):
b = TestBenchmark(
story_module.Story(
name='test name',
shared_state_class=shared_page_state.SharedPageState))
b.AugmentExpectationsWithFile('# results: [ Skip ]\nb1 [ Skip ]\n')
self.assertIsInstance(
b.expectations, typ_expectations.StoryExpectations)
def testPageTestWithIncompatibleStory(self):
b = TestBenchmark(story_module.Story(
name='test story',
shared_state_class=shared_page_state.SharedPageState))
with self.assertRaisesRegexp(
Exception, 'containing only telemetry.page.Page stories'):
b.Run(self.options)
state_class = story_module.SharedState
b = TestBenchmark(story_module.Story(
name='test benchmark',
shared_state_class=state_class))
with self.assertRaisesRegexp(
Exception, 'containing only telemetry.page.Page stories'):
b.Run(self.options)
b = TestBenchmark(android.AndroidStory(
name='test benchmark', start_intent=None))
with self.assertRaisesRegexp(
Exception, 'containing only telemetry.page.Page stories'):
b.Run(self.options)
@mock.patch('telemetry.internal.story_runner.RunStorySet')
def testPageTestWithCompatibleStory(self, mock_run_story_set):
b = TestBenchmark(page.Page(url='about:blank', name='about:blank'))
b.Run(self.options)
self.assertTrue(mock_run_story_set.called)
def testBenchmarkMakesTbmTestByDefault(self):
class DefaultTbmBenchmark(benchmark.Benchmark):
pass
self.assertIsInstance(
DefaultTbmBenchmark().CreatePageTest(options=None),
timeline_based_measurement.TimelineBasedMeasurement)
def testUnknownTestTypeRaises(self):
class UnknownTestType(object):
pass
class UnknownTestTypeBenchmark(benchmark.Benchmark):
test = UnknownTestType
type_error_regex = (
'"UnknownTestType" is not a PageTest or a StoryTest')
with self.assertRaisesRegexp(TypeError, type_error_regex):
UnknownTestTypeBenchmark().CreatePageTest(options=None)
def testGetOwners(self):
@benchmark.Owner(emails=['alice@chromium.org'])
class FooBenchmark(benchmark.Benchmark):
@classmethod
def Name(cls):
return "foo"
@benchmark.Owner(emails=['bob@chromium.org', 'ben@chromium.org'],
component='xyzzyx')
class BarBenchmark(benchmark.Benchmark):
@classmethod
def Name(cls):
return "bar"
@benchmark.Owner(component='xyzzyx')
class BazBenchmark(benchmark.Benchmark):
@classmethod
def Name(cls):
return "baz"
foo_owners_diagnostic = FooBenchmark(None).GetOwners()
bar_owners_diagnostic = BarBenchmark(None).GetOwners()
baz_owners_diagnostic = BazBenchmark(None).GetOwners()
self.assertEqual(foo_owners_diagnostic, ['alice@chromium.org'])
self.assertEqual(bar_owners_diagnostic,
['bob@chromium.org', 'ben@chromium.org'])
self.assertIsNone(baz_owners_diagnostic)
def testGetBugComponents(self):
@benchmark.Owner(emails=['alice@chromium.org'])
class FooBenchmark(benchmark.Benchmark):
@classmethod
def Name(cls):
return "foo"
@benchmark.Owner(emails=['bob@chromium.org'], component='xyzzyx')
class BarBenchmark(benchmark.Benchmark):
@classmethod
def Name(cls):
return "bar"
foo_bug_components_diagnostic = FooBenchmark(None).GetBugComponents()
bar_bug_components_diagnostic = BarBenchmark(None).GetBugComponents()
self.assertIsNone(foo_bug_components_diagnostic)
self.assertEqual(bar_bug_components_diagnostic, 'xyzzyx')
def testChromeTraceOptionsUpdateFilterString(self):
class TbmBenchmark(benchmark.Benchmark):
def CreateCoreTimelineBasedMeasurementOptions(self):
tbm_options = timeline_based_measurement.Options(
chrome_trace_category_filter.ChromeTraceCategoryFilter(
filter_string='rail,toplevel'))
tbm_options.config.enable_chrome_trace = True
return tbm_options
self.options.extra_chrome_categories = 'toplevel,net'
b = TbmBenchmark(None)
tbm = b.CreatePageTest(self.options)
self.assertEqual(
'net,rail,toplevel',
tbm.tbm_options.category_filter.stable_filter_string)
def testAtraceOptionsTurnsOnAtrace(self):
class TbmBenchmark(benchmark.Benchmark):
def CreateCoreTimelineBasedMeasurementOptions(self):
tbm_options = timeline_based_measurement.Options()
tbm_options.config.atrace_config.categories = []
return tbm_options
self.options.extra_atrace_categories = 'foo,bar'
b = TbmBenchmark(None)
tbm = b.CreatePageTest(self.options)
self.assertTrue(tbm.tbm_options.config.enable_atrace_trace)
self.assertEqual(
['foo', 'bar'],
tbm.tbm_options.config.atrace_config.categories)
def testAdditionalAtraceCategories(self):
class TbmBenchmark(benchmark.Benchmark):
def CreateCoreTimelineBasedMeasurementOptions(self):
tbm_options = timeline_based_measurement.Options()
tbm_options.config.enable_atrace_trace = True
tbm_options.config.atrace_config.categories = 'string,foo,stuff'
return tbm_options
self.options.extra_atrace_categories = 'foo,bar'
b = TbmBenchmark(None)
tbm = b.CreatePageTest(self.options)
self.assertTrue(tbm.tbm_options.config.enable_atrace_trace)
self.assertEqual(
['string', 'foo', 'stuff', 'bar'],
tbm.tbm_options.config.atrace_config.categories)
def testEnableSystrace(self):
class TbmBenchmark(benchmark.Benchmark):
def CreateCoreTimelineBasedMeasurementOptions(self):
return timeline_based_measurement.Options()
self.options.enable_systrace = True
b = TbmBenchmark(None)
tbm = b.CreatePageTest(self.options)
self.assertTrue(
tbm.tbm_options.config.chrome_trace_config.enable_systrace)
def testCanRunOnPlatformReturnTrue(self):
b = TestBenchmark(story_module.Story(
name='test name',
shared_state_class=shared_page_state.SharedPageState))
# We can pass None for both arguments because it defaults to ALL for
# supported platforms, which always returns true.
self.assertTrue(b.CanRunOnPlatform(None, None))
def testCanRunOnPlatformReturnFalse(self):
b = TestBenchmark(story_module.Story(
name='test name',
shared_state_class=shared_page_state.SharedPageState))
b.SUPPORTED_PLATFORMS = [] # pylint: disable=invalid-name
# We can pass None for both arguments because we select no platforms as
# supported, which always returns false.
self.assertFalse(b.CanRunOnPlatform(None, None))
def testAugmentExpectationsWithFileData(self):
b = TestBenchmark(story_module.Story(
name='test_name',
shared_state_class=shared_page_state.SharedPageState))
data = ('# results: [ skip ]\n'
'crbug.com/123 benchmark_unittest.TestBenchmark/test_name [ Skip ]')
b.AugmentExpectationsWithFile(data)
story = mock.MagicMock()
story.name = 'test_name'
self.assertTrue(b.expectations.IsStoryDisabled(story))
| 34.962963
| 80
| 0.732227
|
4a1a29c6cfbbf0b3e954ab432e16d3dd525be883
| 1,366
|
py
|
Python
|
snorkel/parser/corpus_parser.py
|
Conengmo/snorkel
|
36868e8a84de19b94e1c4b8eceaa64969a61a46b
|
[
"Apache-2.0"
] | 30
|
2019-08-22T19:27:59.000Z
|
2022-03-13T22:03:15.000Z
|
snorkel/parser/corpus_parser.py
|
Conengmo/snorkel
|
36868e8a84de19b94e1c4b8eceaa64969a61a46b
|
[
"Apache-2.0"
] | 2
|
2019-08-22T16:51:58.000Z
|
2022-03-21T02:59:18.000Z
|
snorkel/parser/corpus_parser.py
|
Conengmo/snorkel
|
36868e8a84de19b94e1c4b8eceaa64969a61a46b
|
[
"Apache-2.0"
] | 31
|
2019-08-22T19:28:08.000Z
|
2022-03-23T12:50:49.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from snorkel.parser.spacy_parser import Spacy
from snorkel.models import Candidate, Context, Sentence
from snorkel.udf import UDF, UDFRunner
class CorpusParser(UDFRunner):
def __init__(self, parser=None, fn=None):
self.parser = parser or Spacy()
super(CorpusParser, self).__init__(CorpusParserUDF,
parser=self.parser,
fn=fn)
def clear(self, session, **kwargs):
session.query(Context).delete()
# We cannot cascade up from child contexts to parent Candidates,
# so we delete all Candidates too
session.query(Candidate).delete()
class CorpusParserUDF(UDF):
def __init__(self, parser, fn, **kwargs):
super(CorpusParserUDF, self).__init__(**kwargs)
self.parser = parser
self.req_handler = parser.connect()
self.fn = fn
def apply(self, x, **kwargs):
"""Given a Document object and its raw text, parse into Sentences"""
doc, text = x
for parts in self.req_handler.parse(doc, text):
parts = self.fn(parts) if self.fn is not None else parts
yield Sentence(**parts)
| 34.15
| 76
| 0.648609
|
4a1a2a74337a119a0da2d0e27838b1ca7ac7cf76
| 5,282
|
py
|
Python
|
qiskit/aqua/components/optimizers/tnc.py
|
chunfuchen/aqua
|
fde435203a2799433a4e50897554fa226c8ff1dc
|
[
"Apache-2.0"
] | null | null | null |
qiskit/aqua/components/optimizers/tnc.py
|
chunfuchen/aqua
|
fde435203a2799433a4e50897554fa226c8ff1dc
|
[
"Apache-2.0"
] | null | null | null |
qiskit/aqua/components/optimizers/tnc.py
|
chunfuchen/aqua
|
fde435203a2799433a4e50897554fa226c8ff1dc
|
[
"Apache-2.0"
] | 2
|
2020-02-13T02:17:58.000Z
|
2020-08-09T07:56:25.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Truncated Newton (TNC) algorithm. """
import logging
from scipy.optimize import minimize
from qiskit.aqua.components.optimizers import Optimizer
logger = logging.getLogger(__name__)
class TNC(Optimizer):
"""Truncated Newton (TNC) algorithm.
Uses scipy.optimize.minimize TNC
See https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html
"""
CONFIGURATION = {
'name': 'TNC',
'description': 'TNC Optimizer',
'input_schema': {
'$schema': 'http://json-schema.org/draft-07/schema#',
'id': 'tnc_schema',
'type': 'object',
'properties': {
'maxiter': {
'type': 'integer',
'default': 100
},
'disp': {
'type': 'boolean',
'default': False
},
'accuracy': {
'type': 'number',
'default': 0
},
'ftol': {
'type': 'number',
'default': -1
},
'xtol': {
'type': 'number',
'default': -1
},
'gtol': {
'type': 'number',
'default': -1
},
'tol': {
'type': ['number', 'null'],
'default': None
},
'eps': {
'type': 'number',
'default': 1e-08
}
},
'additionalProperties': False
},
'support_level': {
'gradient': Optimizer.SupportLevel.supported,
'bounds': Optimizer.SupportLevel.supported,
'initial_point': Optimizer.SupportLevel.required
},
'options': ['maxiter', 'disp', 'accuracy', 'ftol', 'xtol', 'gtol', 'eps'],
'optimizer': ['local']
}
# pylint: disable=unused-argument
def __init__(self, maxiter=100, disp=False, accuracy=0, ftol=-1, xtol=-1,
gtol=-1, tol=None, eps=1e-08):
"""
Constructor.
For details, please refer to
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html.
Args:
maxiter (int): Maximum number of function evaluation.
disp (bool): Set to True to print convergence messages.
accuracy (float): Relative precision for finite difference calculations.
If <= machine_precision, set to sqrt(machine_precision).
Defaults to 0.
ftol (float): Precision goal for the value of f in the stopping criterion.
If ftol < 0.0, ftol is set to 0.0 defaults to -1.
xtol (float): Precision goal for the value of x in the stopping criterion
(after applying x scaling factors).
If xtol < 0.0, xtol is set to sqrt(machine_precision).
Defaults to -1.
gtol (float): Precision goal for the value of the projected gradient in
the stopping criterion (after applying x scaling factors).
If gtol < 0.0, gtol is set to 1e-2 * sqrt(accuracy).
Setting it to 0.0 is not recommended. Defaults to -1.
tol (float or None): Tolerance for termination.
eps (float): Step size used for numerical approximation of the jacobian.
"""
self.validate(locals())
super().__init__()
for k, v in locals().items():
if k in self._configuration['options']:
self._options[k] = v
self._tol = tol
def optimize(self, num_vars, objective_function, gradient_function=None,
variable_bounds=None, initial_point=None):
super().optimize(num_vars, objective_function, gradient_function,
variable_bounds, initial_point)
if gradient_function is None and self._max_evals_grouped > 1:
epsilon = self._options['eps']
gradient_function = Optimizer.wrap_function(Optimizer.gradient_num_diff,
(objective_function,
epsilon, self._max_evals_grouped))
res = minimize(objective_function, initial_point, jac=gradient_function, tol=self._tol,
bounds=variable_bounds, method="TNC", options=self._options)
# Note: nfev here seems to be iterations not function evaluations
return res.x, res.fun, res.nfev
| 39.125926
| 95
| 0.525937
|
4a1a2d12f60a7b8acb9ea194a5791190cd109fd6
| 648
|
py
|
Python
|
cauldron/docgen/conversions.py
|
DanMayhew/cauldron
|
ac41481830fc1a363c145f4b58ce785aac054d10
|
[
"MIT"
] | 90
|
2016-09-02T15:11:10.000Z
|
2022-01-02T11:37:57.000Z
|
cauldron/docgen/conversions.py
|
DanMayhew/cauldron
|
ac41481830fc1a363c145f4b58ce785aac054d10
|
[
"MIT"
] | 86
|
2016-09-23T16:52:22.000Z
|
2022-03-31T21:39:56.000Z
|
cauldron/docgen/conversions.py
|
DanMayhew/cauldron
|
ac41481830fc1a363c145f4b58ce785aac054d10
|
[
"MIT"
] | 261
|
2016-12-22T05:36:48.000Z
|
2021-11-26T12:40:42.000Z
|
def arg_type_to_string(arg_type) -> str:
"""
Converts the argument type to a string
:param arg_type:
:return:
String representation of the argument type. Multiple return types are
turned into a comma delimited list of type names
"""
union_params = (
getattr(arg_type, '__union_params__', None) or
getattr(arg_type, '__args__', None)
)
if union_params and isinstance(union_params, (list, tuple)):
return ', '.join([arg_type_to_string(item) for item in union_params])
try:
return arg_type.__name__
except AttributeError:
return '{}'.format(arg_type)
| 28.173913
| 77
| 0.654321
|
4a1a2d32104b3c29b5cf076aafa995ce8fc5708e
| 1,464
|
py
|
Python
|
setup.py
|
PythonCHB/lat_lon_parser
|
6b942061bca374238781e3cfa05dab7554c2cdd0
|
[
"CC0-1.0"
] | 8
|
2019-05-27T10:34:22.000Z
|
2022-03-29T13:26:53.000Z
|
setup.py
|
PythonCHB/lat_lon_parser
|
6b942061bca374238781e3cfa05dab7554c2cdd0
|
[
"CC0-1.0"
] | 7
|
2019-04-08T21:35:55.000Z
|
2022-01-23T08:10:46.000Z
|
setup.py
|
PythonCHB/lat_lon_parser
|
6b942061bca374238781e3cfa05dab7554c2cdd0
|
[
"CC0-1.0"
] | 8
|
2017-07-16T07:21:42.000Z
|
2022-01-23T08:18:24.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from setuptools import setup, find_packages
def get_version():
with open(os.path.join("lat_lon_parser", "__init__.py")) as initfile:
for line in initfile:
if line.strip().startswith("__version__"):
version = line.split('=')[1].strip().strip("'")
return version
raise ValueError("__version__ is not specified in __init__.py")
with open('README.rst') as readme_file:
readme = readme_file.read()
test_requirements = ['pytest']
setup(
name='lat_lon_parser',
version=get_version(),
description="Simple parser for latitude-longitude strings",
long_description=readme,
author="Christopher Barker",
author_email='Chris.Barker@noaa.gov',
url='https://github.com/NOAA-ORR-ERD/lat_lon_parser',
packages=find_packages(),
zip_safe=False,
keywords='lat_lon_parser',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication',
],
test_suite='tests',
tests_require=test_requirements
)
| 28.705882
| 74
| 0.673497
|
4a1a2d75c398bcfb936e546408fd5081f33780fb
| 4,376
|
py
|
Python
|
docs/source/examples/make_example_rst.py
|
ghanashyamchalla/cis_interface
|
7b59439276eacb66f1f6ea4177d3a85cc061eed5
|
[
"BSD-3-Clause"
] | null | null | null |
docs/source/examples/make_example_rst.py
|
ghanashyamchalla/cis_interface
|
7b59439276eacb66f1f6ea4177d3a85cc061eed5
|
[
"BSD-3-Clause"
] | null | null | null |
docs/source/examples/make_example_rst.py
|
ghanashyamchalla/cis_interface
|
7b59439276eacb66f1f6ea4177d3a85cc061eed5
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from yggdrasil.examples import source, yamls
rst_dir = os.path.dirname(os.path.abspath(__file__))
toc_file = os.path.join(rst_dir, 'examples_toc.rst')
lang2print = {'python': 'Python',
'matlab': 'Matlab',
'cmake': 'CMake',
'make': 'Make',
'r': 'R',
'R': 'R',
'c': 'C',
'cpp': 'C++',
'all': 'Mixed',
'all_nomatlab': 'Mixed w/o Matlab'}
_default_lang = 'python'
def get_file(fname, local=False):
if local:
return os.path.join(rst_dir, fname)
else:
return fname
def get_rst_file(k, local=False):
return get_file('%s.rst' % (k), local=local)
def get_html_file(k, local=False):
return get_file('%s.html' % (k), local=local)
def get_src_file(k, local=False):
return get_file('%s_src.rst' % (k), local=local)
def get_yml_file(k, local=False):
return get_file('%s_yml.rst' % (k), local=local)
def make_toc_file(key_list):
with open(toc_file, 'w') as fd:
write_toc_file(fd, key_list)
def make_rst_file(k):
fname = get_rst_file(k, local=True)
with open(fname, 'w') as fd:
write_rst(fd, k)
make_src_file(k)
make_yml_file(k)
def make_src_file(k):
fname = get_src_file(k, local=True)
with open(fname, 'w') as fd:
write_src_ref(fd, k)
def make_yml_file(k):
fname = get_yml_file(k, local=True)
with open(fname, 'w') as fd:
write_yml_ref(fd, k)
def get_rel_path(fname, upone=False):
if upone:
top_dir = os.path.dirname(rst_dir)
else:
top_dir = rst_dir
return os.path.relpath(fname, top_dir)
def write_src_ref(fd, k):
write_src(fd, k, _default_lang, upone=True)
fd.write('\n')
write_ref_link(fd, k)
def write_yml_ref(fd, k):
write_yml(fd, k, _default_lang, upone=True)
fd.write('\n')
write_ref_link(fd, k)
def write_ref_link(fd, k):
fd.write("(`%s <%s>`__)\n" % ('Example in other languages',
os.path.join('examples', get_html_file(k))))
def write_toc_file(fd, key_list):
head = "Examples"
fd.write(head + '\n')
fd.write(len(head)*'=' + '\n\n')
fd.write(".. toctree::\n\n")
for k in key_list:
fd.write(" %s\n" % get_rst_file(k).split('.rst')[0])
fd.write("\n")
def write_rst(fd, k):
head = '%s Example' % k
fd.write(head + '\n')
fd.write(len(head)*'=' + '\n\n')
for l in source[k]:
write_lang(fd, k, l)
fd.write('\n')
def write_lang(fd, k, l):
head = '%s Version' % lang2print[l]
fd.write(head + '\n')
fd.write(len(head)*'-' + '\n\n')
write_src(fd, k, l)
fd.write('\n')
write_yml(fd, k, l)
fd.write('\n')
def write_code_line(fd, s, upone=False, language=None):
p = os.path.sep + get_rel_path(s, upone=True)
ext2lang = {'.yml': 'yaml', '.py': 'python',
'.c': 'c', '.cpp': 'c++', '.m': 'matlab'}
if language is None:
ext = os.path.splitext(p)[-1]
language = ext2lang.get(ext, 'python')
fd.write(".. literalinclude:: %s\n" % p)
fd.write(" :language: %s\n" % language)
fd.write(" :linenos:\n")
# fd.write(".. include:: %s\n" % get_rel_path(s, upone=upone))
# fd.write(" :code: %s\n" % language)
# fd.write(" :number-lines:\n")
fd.write("\n")
def write_src(fd, k, l, upone=False):
fd.write("Model Code:\n\n")
if isinstance(source[k][l], list):
for s in source[k][l]:
write_code_line(fd, s, upone=upone)
else:
write_code_line(fd, source[k][l], upone=upone)
def write_yml(fd, k, l, upone=False):
fd.write("Model YAML:\n\n")
if isinstance(yamls[k][l], list):
for y in yamls[k][l]:
write_code_line(fd, y, upone=upone, language='yaml')
else:
write_code_line(fd, yamls[k][l], upone=upone, language='yaml')
# rst_examples = source.keys() # all examples
rst_examples = ['gs_lesson%d' % x for x in range(1, 5)]
rst_examples.append('gs_lesson4b') # Special case
rst_examples += ['formatted_io%d' % x for x in range(1, 10)]
rst_examples += ['rpc_lesson%d' % x for x in range(1, 3)]
rst_examples += ['model_function', 'conditional_io', 'transformed_io']
make_toc_file(rst_examples)
for k in rst_examples:
make_rst_file(k)
| 26.521212
| 78
| 0.573126
|
4a1a2e5d0dbc78ba4b060bc2efdf55e5db2bf46c
| 10,010
|
py
|
Python
|
src/kgtests/src/cleaning/test_export_predications.py
|
HermannKroll/KGExtractionToolbox
|
c17a55dd1fa098f5033b7765ed0f80d3abb44cb7
|
[
"MIT"
] | 6
|
2021-09-17T09:49:59.000Z
|
2021-12-06T10:07:01.000Z
|
src/kgtests/src/cleaning/test_export_predications.py
|
HermannKroll/KGExtractionToolbox
|
c17a55dd1fa098f5033b7765ed0f80d3abb44cb7
|
[
"MIT"
] | null | null | null |
src/kgtests/src/cleaning/test_export_predications.py
|
HermannKroll/KGExtractionToolbox
|
c17a55dd1fa098f5033b7765ed0f80d3abb44cb7
|
[
"MIT"
] | 1
|
2021-09-18T17:56:12.000Z
|
2021-09-18T17:56:12.000Z
|
import unittest
import rdflib
from kgextractiontoolbox.backend.database import Session
from kgextractiontoolbox.backend.models import Document, Sentence, Predication
from kgextractiontoolbox.extraction.export_predications import export_predications_as_tsv, export_predications_as_rdf
from kgtests import util
class ExportPredicationsTest(unittest.TestCase):
def setUp(self) -> None:
session = Session.get()
documents = [dict(id=1, collection="Test_Export", title="ABC", abstract=""),
dict(id=2, collection="Test_Export", title="DEF", abstract="")]
Document.bulk_insert_values_into_table(session, documents)
sentences = [dict(id=11, document_collection="Test_Export", text="Hello", md5hash="1"),
dict(id=12, document_collection="Test_Export", text="World. Nice", md5hash="2")]
Sentence.bulk_insert_values_into_table(session, sentences)
predications = [dict(id=11,
document_id=1, document_collection="Test_Export",
subject_id="A", subject_type="Drug", subject_str="ab",
predicate="treat", relation="treats",
object_id="B", object_type="Disease", object_str="bc",
sentence_id=11, extraction_type="PathIE"),
dict(id=12,
document_id=1, document_collection="Test_Export",
subject_id="C", subject_type="Disease", subject_str="c a",
predicate="treat", relation="treats",
object_id="B", object_type="Disease", object_str="b a",
sentence_id=11, extraction_type="PathIE"),
dict(id=13,
document_id=2, document_collection="Test_Export",
subject_id="A", subject_type="Disease", subject_str="a",
predicate="induce", relation="induces",
object_id="B", object_type="Disease", object_str="b",
sentence_id=12, extraction_type="PathIE"),
dict(id=14,
document_id=2, document_collection="Test_Export",
subject_id="C", subject_type="Gene", subject_str="",
predicate="induce", relation="induces",
object_id="D", object_type="Gene", object_str="",
sentence_id=12, extraction_type="PathIE"),
dict(id=15,
document_id=2, document_collection="Test_Export_Not",
subject_id="C", subject_type="Gene", subject_str="",
predicate="induce", relation="induces",
object_id="D", object_type="Gene", object_str="",
sentence_id=12, extraction_type="PathIE")
]
Predication.bulk_insert_values_into_table(session, predications)
def test_export_predications_as_tsv_without_metadata(self):
output_file = util.tmp_rel_path("export_predications_without_metadata.tsv")
export_predications_as_tsv(output_file, document_collection="Test_Export")
tuples = set()
with open(output_file, 'rt') as f:
for line in f:
tuples.add(tuple(line.strip().split('\t')))
self.assertEqual(5, len(tuples))
self.assertIn(('subject_id', 'relation', 'object_id'), tuples)
self.assertIn(('A', 'treats', 'B'), tuples)
self.assertIn(('C', 'treats', 'B'), tuples)
self.assertIn(('A', 'induces', 'B'), tuples)
self.assertIn(('C', 'induces', 'D'), tuples)
def test_export_predications_as_tsv_with_metadata(self):
output_file = util.tmp_rel_path("export_predications_with_metadata.tsv")
export_predications_as_tsv(output_file, document_collection="Test_Export", export_metadata=True)
tuples = set()
with open(output_file, 'rt') as f:
for line in f:
tuples.add(tuple(line.strip().split('\t')))
self.assertEqual(5, len(tuples))
self.assertIn(("document_id", "document_collection",
"subject_id", "subject_type", "subject_str",
"predicate", "relation",
"object_id", "object_type", "object_str",
"sentence_id", "extraction_type"), tuples)
self.assertIn(
('1', 'Test_Export', 'A', 'Drug', 'ab', 'treat', 'treats', 'B', 'Disease', 'bc', 'Hello', 'PathIE'),
tuples)
self.assertIn(
('1', 'Test_Export', 'C', 'Disease', 'c a', 'treat', 'treats', 'B', 'Disease', 'b a', 'Hello', 'PathIE'),
tuples)
self.assertIn(('2', 'Test_Export', 'A', 'Disease', 'a', 'induce', 'induces', 'B', 'Disease', 'b', 'World. Nice',
'PathIE'),
tuples)
self.assertIn(
('2', 'Test_Export', 'C', 'Gene', '', 'induce', 'induces', 'D', 'Gene', '', 'World. Nice', 'PathIE'),
tuples)
def test_export_predications_as_rdf(self):
output_file = util.tmp_rel_path("export_predications.ttl")
export_predications_as_rdf(output_file, document_collection="Test_Export")
g = rdflib.Graph()
g.parse(output_file, format="turtle")
tuples = set([(s.split('/')[-1], p.split('/')[-1], o.split('/')[-1]) for s, p, o in g])
self.assertEqual(4, len(tuples))
self.assertIn(('A', 'treats', 'B'), tuples)
self.assertIn(('C', 'treats', 'B'), tuples)
self.assertIn(('A', 'induces', 'B'), tuples)
self.assertIn(('C', 'induces', 'D'), tuples)
def test_export_predications_as_rdf_with_metadata(self):
output_file = util.tmp_rel_path("export_predications_with_metadata.ttl")
export_predications_as_rdf(output_file, document_collection="Test_Export", export_metadata=True)
g = rdflib.Graph()
g.parse(output_file, format="turtle")
tuples = set([(s.split('/')[-1], p.split('/')[-1], o.split('/')[-1]) for s, p, o in g])
self.assertEqual(4 * 12 + 2, len(tuples))
self.assertIn(('sentence_id_11', 'text', 'Hello'), tuples)
self.assertIn(('sentence_id_12', 'text', 'World. Nice'), tuples)
self.assertIn(('statement_11', 'document_id', '1'), tuples)
self.assertIn(('statement_11', 'document_collection', 'Test_Export'), tuples)
self.assertIn(('statement_11', 'subject_id', 'A'), tuples)
self.assertIn(('statement_11', 'subject_type', 'Drug'), tuples)
self.assertIn(('statement_11', 'subject_str', 'ab'), tuples)
self.assertIn(('statement_11', 'predicate', 'treat'), tuples)
self.assertIn(('statement_11', 'relation', 'treats'), tuples)
self.assertIn(('statement_11', 'object_id', 'B'), tuples)
self.assertIn(('statement_11', 'object_type', 'Disease'), tuples)
self.assertIn(('statement_11', 'object_str', 'bc'), tuples)
self.assertIn(('statement_11', 'sentence_id', 'sentence_id_11'), tuples)
self.assertIn(('statement_11', 'extraction_type', 'PathIE'), tuples)
self.assertIn(('statement_12', 'document_id', '1'), tuples)
self.assertIn(('statement_12', 'document_collection', 'Test_Export'), tuples)
self.assertIn(('statement_12', 'subject_id', 'C'), tuples)
self.assertIn(('statement_12', 'subject_type', 'Disease'), tuples)
self.assertIn(('statement_12', 'subject_str', 'c%20a'), tuples)
self.assertIn(('statement_12', 'predicate', 'treat'), tuples)
self.assertIn(('statement_12', 'relation', 'treats'), tuples)
self.assertIn(('statement_12', 'object_id', 'B'), tuples)
self.assertIn(('statement_12', 'object_type', 'Disease'), tuples)
self.assertIn(('statement_12', 'object_str', 'b%20a'), tuples)
self.assertIn(('statement_12', 'sentence_id', 'sentence_id_11'), tuples)
self.assertIn(('statement_12', 'extraction_type', 'PathIE'), tuples)
self.assertIn(('statement_13', 'document_id', '2'), tuples)
self.assertIn(('statement_13', 'document_collection', 'Test_Export'), tuples)
self.assertIn(('statement_13', 'subject_id', 'A'), tuples)
self.assertIn(('statement_13', 'subject_type', 'Disease'), tuples)
self.assertIn(('statement_13', 'subject_str', 'a'), tuples)
self.assertIn(('statement_13', 'predicate', 'induce'), tuples)
self.assertIn(('statement_13', 'relation', 'induces'), tuples)
self.assertIn(('statement_13', 'object_id', 'B'), tuples)
self.assertIn(('statement_13', 'object_type', 'Disease'), tuples)
self.assertIn(('statement_13', 'object_str', 'b'), tuples)
self.assertIn(('statement_13', 'sentence_id', 'sentence_id_12'), tuples)
self.assertIn(('statement_13', 'extraction_type', 'PathIE'), tuples)
self.assertIn(('statement_14', 'document_id', '2'), tuples)
self.assertIn(('statement_14', 'document_collection', 'Test_Export'), tuples)
self.assertIn(('statement_14', 'subject_id', 'C'), tuples)
self.assertIn(('statement_14', 'subject_type', 'Gene'), tuples)
self.assertIn(('statement_14', 'subject_str', rdflib.term.Literal('')), tuples)
self.assertIn(('statement_14', 'predicate', 'induce'), tuples)
self.assertIn(('statement_14', 'relation', 'induces'), tuples)
self.assertIn(('statement_14', 'object_id', 'D'), tuples)
self.assertIn(('statement_14', 'object_type', 'Gene'), tuples)
self.assertIn(('statement_14', 'object_str', rdflib.term.Literal('')), tuples)
self.assertIn(('statement_14', 'sentence_id', 'sentence_id_12'), tuples)
self.assertIn(('statement_14', 'extraction_type', 'PathIE'), tuples)
| 57.2
| 120
| 0.59001
|
4a1a2eb5209770759774c9fca6e87e1e5fdacdd5
| 2,946
|
py
|
Python
|
lib/modules/base_module.py
|
ace-ecosystem/faqueue
|
a53b5577892ace4ca918f76ef9e676e85e30c93f
|
[
"Apache-2.0"
] | null | null | null |
lib/modules/base_module.py
|
ace-ecosystem/faqueue
|
a53b5577892ace4ca918f76ef9e676e85e30c93f
|
[
"Apache-2.0"
] | null | null | null |
lib/modules/base_module.py
|
ace-ecosystem/faqueue
|
a53b5577892ace4ca918f76ef9e676e85e30c93f
|
[
"Apache-2.0"
] | null | null | null |
import threading
import logging
from threading import Lock
log = logging.getLogger()
class BaseModule(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.crits_data = { 'module_status' : 'not initialized' }
self.data_lock = Lock()
pass
def run(self):
pass
def stop(self):
pass
def set_crits_data(self, crits_data):
with self.data_lock:
self.crits_data = crits_data
def get_valid_indicator_types(self):
return []
def poll(self):
return { 'module_status' : 'not initialized' }
def get_module_status(self):
return self.crits_data['module_status']
def add_indicator(self, indicator_objectid, indicator_type, indicator_value):
if self.has_indicator(indicator_objectid):
log.warning('Tried to add an indicator {}, this module already '\
'{} has it!'.format(indicator_objectid, self.name))
return False
if indicator_type not in self.get_valid_indicator_types():
log.warning('Tried to add indicator {} with type {} to module {}. \
This is not a valid type!'.format(indicator_objectid, indicator_type, self.name))
return False
with self.data_lock:
self.crits_data['indicators'][indicator_objectid] = {
'type' : indicator_type,
'value' : indicator_value,
'status' : 'New',
'completed' : False,
'processing_results' : False,
'results' : []
}
return True
def check_indicator_status(self, indicator_objectid):
if not self.has_indicator(indicator_objectid):
return False
with self.data_lock:
status = self.crits_data['indicators'][indicator_objectid]['status']
return status
def get_indicator_data(self, indicator_objectid):
if not self.has_indicator(indicator_objectid):
return False
with self.data_lock:
if indicator_objectid not in self.crits_data['indicators']:
return False
indicator_data = self.crits_data['indicators'][indicator_objectid]
return indicator_data
def remove_indicator(self, indicator_objectid):
if not self.has_indicator(indicator_objectid):
return False
with self.data_lock:
return_data = self.crits_data['indicators'].pop(indicator_objectid, False)
return return_data
def has_indicator(self, indicator_objectid):
with self.data_lock:
if 'indicators' not in self.crits_data:
log.error('indicators key not found in crits_data.')
return False
if indicator_objectid in self.crits_data['indicators'].keys():
return True
return False
| 30.371134
| 93
| 0.612695
|
4a1a2efd61b55d3260c74fa8bbc6a37e6d7d2c7b
| 7,660
|
py
|
Python
|
doc/source/conf.py
|
keszybz/pelita
|
081329340b61e73436fb860f66882ffa47d0009e
|
[
"BSD-2-Clause"
] | null | null | null |
doc/source/conf.py
|
keszybz/pelita
|
081329340b61e73436fb860f66882ffa47d0009e
|
[
"BSD-2-Clause"
] | null | null | null |
doc/source/conf.py
|
keszybz/pelita
|
081329340b61e73436fb860f66882ffa47d0009e
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Pelita documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 18 14:32:16 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import pelita
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
needs_sphinx='1.1'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.append('sphinxext')
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'numpydoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pelita'
copyright = u'2012, Valentin Haenel, Rike-Benjamin Shuppner et. al.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
version = pelita.version
# The full version, including alpha/beta/rc tags.
release = pelita.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_templates']
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'py:obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
html_sidebars = {
'**': ['globaltoc.html',
'sourcelink.html',
'searchbox.html'] }
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pelitadoc'
# Style sheet to use (overrides default styles, they must be included there manually)
html_style = 'pelita.css'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Pelita.tex', u'Pelita Documentation',
u'Valentin Haenel, Rike-Benjamin Shuppner', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
autosummary_generate = True
autodoc_default_flags = ['show-inheritance']
autoclass_content = 'both'
numpydoc_show_class_members = False
def _pelita_member_filter(parent_name, item_names):
"""
Filter a list of autodoc items for which to generate documentation.
Include only imports that come from the documented module or its
submodules.
"""
filtered_names = []
if parent_name not in sys.modules:
return item_names
module = sys.modules[parent_name]
for item_name in item_names:
item = getattr(module, item_name, None)
location = getattr(item, '__module__', None)
if location is None or (location + ".").startswith(parent_name + "."):
filtered_names.append(item_name)
return filtered_names
# Using undocumented features of Jinja, not nice...
from jinja2.defaults import DEFAULT_NAMESPACE
DEFAULT_NAMESPACE['pelita_member_filter'] = _pelita_member_filter
| 32.184874
| 85
| 0.720104
|
4a1a2f4a6a8629c5cdab899fa2b83e63e2716830
| 63,011
|
py
|
Python
|
synapse/storage/databases/main/events_worker.py
|
maxkratz/synapse
|
e46ac85d674d90fa01aa49aee9587093ab6d8677
|
[
"Apache-2.0"
] | 1
|
2021-10-16T07:33:46.000Z
|
2021-10-16T07:33:46.000Z
|
synapse/storage/databases/main/events_worker.py
|
maxkratz/synapse
|
e46ac85d674d90fa01aa49aee9587093ab6d8677
|
[
"Apache-2.0"
] | 1
|
2021-10-16T08:27:46.000Z
|
2021-10-16T08:27:46.000Z
|
synapse/storage/databases/main/events_worker.py
|
maxkratz/synapse
|
e46ac85d674d90fa01aa49aee9587093ab6d8677
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
from typing import (
Collection,
Container,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
overload,
)
import attr
from constantly import NamedConstant, Names
from typing_extensions import Literal
from twisted.internet import defer
from synapse.api.constants import EventTypes
from synapse.api.errors import NotFoundError, SynapseError
from synapse.api.room_versions import (
KNOWN_ROOM_VERSIONS,
EventFormatVersions,
RoomVersions,
)
from synapse.events import EventBase, make_event_from_dict
from synapse.events.snapshot import EventContext
from synapse.events.utils import prune_event
from synapse.logging.context import (
PreserveLoggingContext,
current_context,
make_deferred_yieldable,
)
from synapse.metrics.background_process_metrics import (
run_as_background_process,
wrap_as_background_process,
)
from synapse.replication.slave.storage._slaved_id_tracker import SlavedIdTracker
from synapse.replication.tcp.streams import BackfillStream
from synapse.replication.tcp.streams.events import EventsStream
from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_clause
from synapse.storage.database import DatabasePool
from synapse.storage.engines import PostgresEngine
from synapse.storage.util.id_generators import MultiWriterIdGenerator, StreamIdGenerator
from synapse.storage.util.sequence import build_sequence_generator
from synapse.types import JsonDict, get_domain_from_id
from synapse.util import unwrapFirstError
from synapse.util.async_helpers import ObservableDeferred
from synapse.util.caches.descriptors import cached, cachedList
from synapse.util.caches.lrucache import LruCache
from synapse.util.iterutils import batch_iter
from synapse.util.metrics import Measure
logger = logging.getLogger(__name__)
# These values are used in the `enqueus_event` and `_do_fetch` methods to
# control how we batch/bulk fetch events from the database.
# The values are plucked out of thing air to make initial sync run faster
# on jki.re
# TODO: Make these configurable.
EVENT_QUEUE_THREADS = 3 # Max number of threads that will fetch events
EVENT_QUEUE_ITERATIONS = 3 # No. times we block waiting for requests for events
EVENT_QUEUE_TIMEOUT_S = 0.1 # Timeout when waiting for requests for events
@attr.s(slots=True, auto_attribs=True)
class _EventCacheEntry:
event: EventBase
redacted_event: Optional[EventBase]
class EventRedactBehaviour(Names):
"""
What to do when retrieving a redacted event from the database.
"""
AS_IS = NamedConstant()
REDACT = NamedConstant()
BLOCK = NamedConstant()
class EventsWorkerStore(SQLBaseStore):
# Whether to use dedicated DB threads for event fetching. This is only used
# if there are multiple DB threads available. When used will lock the DB
# thread for periods of time (so unit tests want to disable this when they
# run DB transactions on the main thread). See EVENT_QUEUE_* for more
# options controlling this.
USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING = True
def __init__(self, database: DatabasePool, db_conn, hs):
super().__init__(database, db_conn, hs)
if isinstance(database.engine, PostgresEngine):
# If we're using Postgres than we can use `MultiWriterIdGenerator`
# regardless of whether this process writes to the streams or not.
self._stream_id_gen = MultiWriterIdGenerator(
db_conn=db_conn,
db=database,
stream_name="events",
instance_name=hs.get_instance_name(),
tables=[("events", "instance_name", "stream_ordering")],
sequence_name="events_stream_seq",
writers=hs.config.worker.writers.events,
)
self._backfill_id_gen = MultiWriterIdGenerator(
db_conn=db_conn,
db=database,
stream_name="backfill",
instance_name=hs.get_instance_name(),
tables=[("events", "instance_name", "stream_ordering")],
sequence_name="events_backfill_stream_seq",
positive=False,
writers=hs.config.worker.writers.events,
)
else:
# We shouldn't be running in worker mode with SQLite, but its useful
# to support it for unit tests.
#
# If this process is the writer than we need to use
# `StreamIdGenerator`, otherwise we use `SlavedIdTracker` which gets
# updated over replication. (Multiple writers are not supported for
# SQLite).
if hs.get_instance_name() in hs.config.worker.writers.events:
self._stream_id_gen = StreamIdGenerator(
db_conn,
"events",
"stream_ordering",
)
self._backfill_id_gen = StreamIdGenerator(
db_conn,
"events",
"stream_ordering",
step=-1,
extra_tables=[("ex_outlier_stream", "event_stream_ordering")],
)
else:
self._stream_id_gen = SlavedIdTracker(
db_conn, "events", "stream_ordering"
)
self._backfill_id_gen = SlavedIdTracker(
db_conn, "events", "stream_ordering", step=-1
)
if hs.config.worker.run_background_tasks:
# We periodically clean out old transaction ID mappings
self._clock.looping_call(
self._cleanup_old_transaction_ids,
5 * 60 * 1000,
)
self._get_event_cache = LruCache(
cache_name="*getEvent*",
max_size=hs.config.caches.event_cache_size,
)
# Map from event ID to a deferred that will result in a map from event
# ID to cache entry. Note that the returned dict may not have the
# requested event in it if the event isn't in the DB.
self._current_event_fetches: Dict[
str, ObservableDeferred[Dict[str, _EventCacheEntry]]
] = {}
self._event_fetch_lock = threading.Condition()
self._event_fetch_list = []
self._event_fetch_ongoing = 0
# We define this sequence here so that it can be referenced from both
# the DataStore and PersistEventStore.
def get_chain_id_txn(txn):
txn.execute("SELECT COALESCE(max(chain_id), 0) FROM event_auth_chains")
return txn.fetchone()[0]
self.event_chain_id_gen = build_sequence_generator(
db_conn,
database.engine,
get_chain_id_txn,
"event_auth_chain_id",
table="event_auth_chains",
id_column="chain_id",
)
def process_replication_rows(self, stream_name, instance_name, token, rows):
if stream_name == EventsStream.NAME:
self._stream_id_gen.advance(instance_name, token)
elif stream_name == BackfillStream.NAME:
self._backfill_id_gen.advance(instance_name, -token)
super().process_replication_rows(stream_name, instance_name, token, rows)
async def get_received_ts(self, event_id: str) -> Optional[int]:
"""Get received_ts (when it was persisted) for the event.
Raises an exception for unknown events.
Args:
event_id: The event ID to query.
Returns:
Timestamp in milliseconds, or None for events that were persisted
before received_ts was implemented.
"""
return await self.db_pool.simple_select_one_onecol(
table="events",
keyvalues={"event_id": event_id},
retcol="received_ts",
desc="get_received_ts",
)
# Inform mypy that if allow_none is False (the default) then get_event
# always returns an EventBase.
@overload
async def get_event(
self,
event_id: str,
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT,
get_prev_content: bool = False,
allow_rejected: bool = False,
allow_none: Literal[False] = False,
check_room_id: Optional[str] = None,
) -> EventBase:
...
@overload
async def get_event(
self,
event_id: str,
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT,
get_prev_content: bool = False,
allow_rejected: bool = False,
allow_none: Literal[True] = False,
check_room_id: Optional[str] = None,
) -> Optional[EventBase]:
...
async def get_event(
self,
event_id: str,
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT,
get_prev_content: bool = False,
allow_rejected: bool = False,
allow_none: bool = False,
check_room_id: Optional[str] = None,
) -> Optional[EventBase]:
"""Get an event from the database by event_id.
Args:
event_id: The event_id of the event to fetch
redact_behaviour: Determine what to do with a redacted event. Possible values:
* AS_IS - Return the full event body with no redacted content
* REDACT - Return the event but with a redacted body
* DISALLOW - Do not return redacted events (behave as per allow_none
if the event is redacted)
get_prev_content: If True and event is a state event,
include the previous states content in the unsigned field.
allow_rejected: If True, return rejected events. Otherwise,
behave as per allow_none.
allow_none: If True, return None if no event found, if
False throw a NotFoundError
check_room_id: if not None, check the room of the found event.
If there is a mismatch, behave as per allow_none.
Returns:
The event, or None if the event was not found.
"""
if not isinstance(event_id, str):
raise TypeError("Invalid event event_id %r" % (event_id,))
events = await self.get_events_as_list(
[event_id],
redact_behaviour=redact_behaviour,
get_prev_content=get_prev_content,
allow_rejected=allow_rejected,
)
event = events[0] if events else None
if event is not None and check_room_id is not None:
if event.room_id != check_room_id:
event = None
if event is None and not allow_none:
raise NotFoundError("Could not find event %s" % (event_id,))
return event
async def get_events(
self,
event_ids: Iterable[str],
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT,
get_prev_content: bool = False,
allow_rejected: bool = False,
) -> Dict[str, EventBase]:
"""Get events from the database
Args:
event_ids: The event_ids of the events to fetch
redact_behaviour: Determine what to do with a redacted event. Possible
values:
* AS_IS - Return the full event body with no redacted content
* REDACT - Return the event but with a redacted body
* DISALLOW - Do not return redacted events (omit them from the response)
get_prev_content: If True and event is a state event,
include the previous states content in the unsigned field.
allow_rejected: If True, return rejected events. Otherwise,
omits rejeted events from the response.
Returns:
A mapping from event_id to event.
"""
events = await self.get_events_as_list(
event_ids,
redact_behaviour=redact_behaviour,
get_prev_content=get_prev_content,
allow_rejected=allow_rejected,
)
return {e.event_id: e for e in events}
async def get_events_as_list(
self,
event_ids: Collection[str],
redact_behaviour: EventRedactBehaviour = EventRedactBehaviour.REDACT,
get_prev_content: bool = False,
allow_rejected: bool = False,
) -> List[EventBase]:
"""Get events from the database and return in a list in the same order
as given by `event_ids` arg.
Unknown events will be omitted from the response.
Args:
event_ids: The event_ids of the events to fetch
redact_behaviour: Determine what to do with a redacted event. Possible values:
* AS_IS - Return the full event body with no redacted content
* REDACT - Return the event but with a redacted body
* DISALLOW - Do not return redacted events (omit them from the response)
get_prev_content: If True and event is a state event,
include the previous states content in the unsigned field.
allow_rejected: If True, return rejected events. Otherwise,
omits rejected events from the response.
Returns:
List of events fetched from the database. The events are in the same
order as `event_ids` arg.
Note that the returned list may be smaller than the list of event
IDs if not all events could be fetched.
"""
if not event_ids:
return []
# there may be duplicates so we cast the list to a set
event_entry_map = await self._get_events_from_cache_or_db(
set(event_ids), allow_rejected=allow_rejected
)
events = []
for event_id in event_ids:
entry = event_entry_map.get(event_id, None)
if not entry:
continue
if not allow_rejected:
assert not entry.event.rejected_reason, (
"rejected event returned from _get_events_from_cache_or_db despite "
"allow_rejected=False"
)
# We may not have had the original event when we received a redaction, so
# we have to recheck auth now.
if not allow_rejected and entry.event.type == EventTypes.Redaction:
if entry.event.redacts is None:
# A redacted redaction doesn't have a `redacts` key, in
# which case lets just withhold the event.
#
# Note: Most of the time if the redactions has been
# redacted we still have the un-redacted event in the DB
# and so we'll still see the `redacts` key. However, this
# isn't always true e.g. if we have censored the event.
logger.debug(
"Withholding redaction event %s as we don't have redacts key",
event_id,
)
continue
redacted_event_id = entry.event.redacts
event_map = await self._get_events_from_cache_or_db([redacted_event_id])
original_event_entry = event_map.get(redacted_event_id)
if not original_event_entry:
# we don't have the redacted event (or it was rejected).
#
# We assume that the redaction isn't authorized for now; if the
# redacted event later turns up, the redaction will be re-checked,
# and if it is found valid, the original will get redacted before it
# is served to the client.
logger.debug(
"Withholding redaction event %s since we don't (yet) have the "
"original %s",
event_id,
redacted_event_id,
)
continue
original_event = original_event_entry.event
if original_event.type == EventTypes.Create:
# we never serve redactions of Creates to clients.
logger.info(
"Withholding redaction %s of create event %s",
event_id,
redacted_event_id,
)
continue
if original_event.room_id != entry.event.room_id:
logger.info(
"Withholding redaction %s of event %s from a different room",
event_id,
redacted_event_id,
)
continue
if entry.event.internal_metadata.need_to_check_redaction():
original_domain = get_domain_from_id(original_event.sender)
redaction_domain = get_domain_from_id(entry.event.sender)
if original_domain != redaction_domain:
# the senders don't match, so this is forbidden
logger.info(
"Withholding redaction %s whose sender domain %s doesn't "
"match that of redacted event %s %s",
event_id,
redaction_domain,
redacted_event_id,
original_domain,
)
continue
# Update the cache to save doing the checks again.
entry.event.internal_metadata.recheck_redaction = False
event = entry.event
if entry.redacted_event:
if redact_behaviour == EventRedactBehaviour.BLOCK:
# Skip this event
continue
elif redact_behaviour == EventRedactBehaviour.REDACT:
event = entry.redacted_event
events.append(event)
if get_prev_content:
if "replaces_state" in event.unsigned:
prev = await self.get_event(
event.unsigned["replaces_state"],
get_prev_content=False,
allow_none=True,
)
if prev:
event.unsigned = dict(event.unsigned)
event.unsigned["prev_content"] = prev.content
event.unsigned["prev_sender"] = prev.sender
return events
async def _get_events_from_cache_or_db(
self, event_ids: Iterable[str], allow_rejected: bool = False
) -> Dict[str, _EventCacheEntry]:
"""Fetch a bunch of events from the cache or the database.
If events are pulled from the database, they will be cached for future lookups.
Unknown events are omitted from the response.
Args:
event_ids: The event_ids of the events to fetch
allow_rejected: Whether to include rejected events. If False,
rejected events are omitted from the response.
Returns:
map from event id to result
"""
event_entry_map = self._get_events_from_cache(
event_ids,
)
missing_events_ids = {e for e in event_ids if e not in event_entry_map}
# We now look up if we're already fetching some of the events in the DB,
# if so we wait for those lookups to finish instead of pulling the same
# events out of the DB multiple times.
#
# Note: we might get the same `ObservableDeferred` back for multiple
# events we're already fetching, so we deduplicate the deferreds to
# avoid extraneous work (if we don't do this we can end up in a n^2 mode
# when we wait on the same Deferred N times, then try and merge the
# same dict into itself N times).
already_fetching_ids: Set[str] = set()
already_fetching_deferreds: Set[
ObservableDeferred[Dict[str, _EventCacheEntry]]
] = set()
for event_id in missing_events_ids:
deferred = self._current_event_fetches.get(event_id)
if deferred is not None:
# We're already pulling the event out of the DB. Add the deferred
# to the collection of deferreds to wait on.
already_fetching_ids.add(event_id)
already_fetching_deferreds.add(deferred)
missing_events_ids.difference_update(already_fetching_ids)
if missing_events_ids:
log_ctx = current_context()
log_ctx.record_event_fetch(len(missing_events_ids))
# Add entries to `self._current_event_fetches` for each event we're
# going to pull from the DB. We use a single deferred that resolves
# to all the events we pulled from the DB (this will result in this
# function returning more events than requested, but that can happen
# already due to `_get_events_from_db`).
fetching_deferred: ObservableDeferred[
Dict[str, _EventCacheEntry]
] = ObservableDeferred(defer.Deferred())
for event_id in missing_events_ids:
self._current_event_fetches[event_id] = fetching_deferred
# Note that _get_events_from_db is also responsible for turning db rows
# into FrozenEvents (via _get_event_from_row), which involves seeing if
# the events have been redacted, and if so pulling the redaction event out
# of the database to check it.
#
try:
missing_events = await self._get_events_from_db(
missing_events_ids,
)
event_entry_map.update(missing_events)
except Exception as e:
with PreserveLoggingContext():
fetching_deferred.errback(e)
raise e
finally:
# Ensure that we mark these events as no longer being fetched.
for event_id in missing_events_ids:
self._current_event_fetches.pop(event_id, None)
with PreserveLoggingContext():
fetching_deferred.callback(missing_events)
if already_fetching_deferreds:
# Wait for the other event requests to finish and add their results
# to ours.
results = await make_deferred_yieldable(
defer.gatherResults(
(d.observe() for d in already_fetching_deferreds),
consumeErrors=True,
)
).addErrback(unwrapFirstError)
for result in results:
# We filter out events that we haven't asked for as we might get
# a *lot* of superfluous events back, and there is no point
# going through and inserting them all (which can take time).
event_entry_map.update(
(event_id, entry)
for event_id, entry in result.items()
if event_id in already_fetching_ids
)
if not allow_rejected:
event_entry_map = {
event_id: entry
for event_id, entry in event_entry_map.items()
if not entry.event.rejected_reason
}
return event_entry_map
def _invalidate_get_event_cache(self, event_id):
self._get_event_cache.invalidate((event_id,))
def _get_events_from_cache(
self, events: Iterable[str], update_metrics: bool = True
) -> Dict[str, _EventCacheEntry]:
"""Fetch events from the caches.
May return rejected events.
Args:
events: list of event_ids to fetch
update_metrics: Whether to update the cache hit ratio metrics
"""
event_map = {}
for event_id in events:
ret = self._get_event_cache.get(
(event_id,), None, update_metrics=update_metrics
)
if not ret:
continue
event_map[event_id] = ret
return event_map
async def get_stripped_room_state_from_event_context(
self,
context: EventContext,
state_types_to_include: Container[str],
membership_user_id: Optional[str] = None,
) -> List[JsonDict]:
"""
Retrieve the stripped state from a room, given an event context to retrieve state
from as well as the state types to include. Optionally, include the membership
events from a specific user.
"Stripped" state means that only the `type`, `state_key`, `content` and `sender` keys
are included from each state event.
Args:
context: The event context to retrieve state of the room from.
state_types_to_include: The type of state events to include.
membership_user_id: An optional user ID to include the stripped membership state
events of. This is useful when generating the stripped state of a room for
invites. We want to send membership events of the inviter, so that the
invitee can display the inviter's profile information if the room lacks any.
Returns:
A list of dictionaries, each representing a stripped state event from the room.
"""
current_state_ids = await context.get_current_state_ids()
# We know this event is not an outlier, so this must be
# non-None.
assert current_state_ids is not None
# The state to include
state_to_include_ids = [
e_id
for k, e_id in current_state_ids.items()
if k[0] in state_types_to_include
or (membership_user_id and k == (EventTypes.Member, membership_user_id))
]
state_to_include = await self.get_events(state_to_include_ids)
return [
{
"type": e.type,
"state_key": e.state_key,
"content": e.content,
"sender": e.sender,
}
for e in state_to_include.values()
]
def _do_fetch(self, conn):
"""Takes a database connection and waits for requests for events from
the _event_fetch_list queue.
"""
i = 0
while True:
with self._event_fetch_lock:
event_list = self._event_fetch_list
self._event_fetch_list = []
if not event_list:
single_threaded = self.database_engine.single_threaded
if (
not self.USE_DEDICATED_DB_THREADS_FOR_EVENT_FETCHING
or single_threaded
or i > EVENT_QUEUE_ITERATIONS
):
self._event_fetch_ongoing -= 1
return
else:
self._event_fetch_lock.wait(EVENT_QUEUE_TIMEOUT_S)
i += 1
continue
i = 0
self._fetch_event_list(conn, event_list)
def _fetch_event_list(self, conn, event_list):
"""Handle a load of requests from the _event_fetch_list queue
Args:
conn (twisted.enterprise.adbapi.Connection): database connection
event_list (list[Tuple[list[str], Deferred]]):
The fetch requests. Each entry consists of a list of event
ids to be fetched, and a deferred to be completed once the
events have been fetched.
The deferreds are callbacked with a dictionary mapping from event id
to event row. Note that it may well contain additional events that
were not part of this request.
"""
with Measure(self._clock, "_fetch_event_list"):
try:
events_to_fetch = {
event_id for events, _ in event_list for event_id in events
}
row_dict = self.db_pool.new_transaction(
conn, "do_fetch", [], [], self._fetch_event_rows, events_to_fetch
)
# We only want to resolve deferreds from the main thread
def fire():
for _, d in event_list:
d.callback(row_dict)
with PreserveLoggingContext():
self.hs.get_reactor().callFromThread(fire)
except Exception as e:
logger.exception("do_fetch")
# We only want to resolve deferreds from the main thread
def fire(evs, exc):
for _, d in evs:
if not d.called:
with PreserveLoggingContext():
d.errback(exc)
with PreserveLoggingContext():
self.hs.get_reactor().callFromThread(fire, event_list, e)
async def _get_events_from_db(
self, event_ids: Iterable[str]
) -> Dict[str, _EventCacheEntry]:
"""Fetch a bunch of events from the database.
May return rejected events.
Returned events will be added to the cache for future lookups.
Unknown events are omitted from the response.
Args:
event_ids: The event_ids of the events to fetch
Returns:
map from event id to result. May return extra events which
weren't asked for.
"""
fetched_events = {}
events_to_fetch = event_ids
while events_to_fetch:
row_map = await self._enqueue_events(events_to_fetch)
# we need to recursively fetch any redactions of those events
redaction_ids = set()
for event_id in events_to_fetch:
row = row_map.get(event_id)
fetched_events[event_id] = row
if row:
redaction_ids.update(row["redactions"])
events_to_fetch = redaction_ids.difference(fetched_events.keys())
if events_to_fetch:
logger.debug("Also fetching redaction events %s", events_to_fetch)
# build a map from event_id to EventBase
event_map = {}
for event_id, row in fetched_events.items():
if not row:
continue
assert row["event_id"] == event_id
rejected_reason = row["rejected_reason"]
# If the event or metadata cannot be parsed, log the error and act
# as if the event is unknown.
try:
d = db_to_json(row["json"])
except ValueError:
logger.error("Unable to parse json from event: %s", event_id)
continue
try:
internal_metadata = db_to_json(row["internal_metadata"])
except ValueError:
logger.error(
"Unable to parse internal_metadata from event: %s", event_id
)
continue
format_version = row["format_version"]
if format_version is None:
# This means that we stored the event before we had the concept
# of a event format version, so it must be a V1 event.
format_version = EventFormatVersions.V1
room_version_id = row["room_version_id"]
if not room_version_id:
# this should only happen for out-of-band membership events which
# arrived before #6983 landed. For all other events, we should have
# an entry in the 'rooms' table.
#
# However, the 'out_of_band_membership' flag is unreliable for older
# invites, so just accept it for all membership events.
#
if d["type"] != EventTypes.Member:
raise Exception(
"Room %s for event %s is unknown" % (d["room_id"], event_id)
)
# so, assuming this is an out-of-band-invite that arrived before #6983
# landed, we know that the room version must be v5 or earlier (because
# v6 hadn't been invented at that point, so invites from such rooms
# would have been rejected.)
#
# The main reason we need to know the room version here (other than
# choosing the right python Event class) is in case the event later has
# to be redacted - and all the room versions up to v5 used the same
# redaction algorithm.
#
# So, the following approximations should be adequate.
if format_version == EventFormatVersions.V1:
# if it's event format v1 then it must be room v1 or v2
room_version = RoomVersions.V1
elif format_version == EventFormatVersions.V2:
# if it's event format v2 then it must be room v3
room_version = RoomVersions.V3
else:
# if it's event format v3 then it must be room v4 or v5
room_version = RoomVersions.V5
else:
room_version = KNOWN_ROOM_VERSIONS.get(room_version_id)
if not room_version:
logger.warning(
"Event %s in room %s has unknown room version %s",
event_id,
d["room_id"],
room_version_id,
)
continue
if room_version.event_format != format_version:
logger.error(
"Event %s in room %s with version %s has wrong format: "
"expected %s, was %s",
event_id,
d["room_id"],
room_version_id,
room_version.event_format,
format_version,
)
continue
original_ev = make_event_from_dict(
event_dict=d,
room_version=room_version,
internal_metadata_dict=internal_metadata,
rejected_reason=rejected_reason,
)
original_ev.internal_metadata.stream_ordering = row["stream_ordering"]
original_ev.internal_metadata.outlier = row["outlier"]
event_map[event_id] = original_ev
# finally, we can decide whether each one needs redacting, and build
# the cache entries.
result_map = {}
for event_id, original_ev in event_map.items():
redactions = fetched_events[event_id]["redactions"]
redacted_event = self._maybe_redact_event_row(
original_ev, redactions, event_map
)
cache_entry = _EventCacheEntry(
event=original_ev, redacted_event=redacted_event
)
self._get_event_cache.set((event_id,), cache_entry)
result_map[event_id] = cache_entry
return result_map
async def _enqueue_events(self, events):
"""Fetches events from the database using the _event_fetch_list. This
allows batch and bulk fetching of events - it allows us to fetch events
without having to create a new transaction for each request for events.
Args:
events (Iterable[str]): events to be fetched.
Returns:
Dict[str, Dict]: map from event id to row data from the database.
May contain events that weren't requested.
"""
events_d = defer.Deferred()
with self._event_fetch_lock:
self._event_fetch_list.append((events, events_d))
self._event_fetch_lock.notify()
if self._event_fetch_ongoing < EVENT_QUEUE_THREADS:
self._event_fetch_ongoing += 1
should_start = True
else:
should_start = False
if should_start:
run_as_background_process(
"fetch_events", self.db_pool.runWithConnection, self._do_fetch
)
logger.debug("Loading %d events: %s", len(events), events)
with PreserveLoggingContext():
row_map = await events_d
logger.debug("Loaded %d events (%d rows)", len(events), len(row_map))
return row_map
def _fetch_event_rows(self, txn, event_ids):
"""Fetch event rows from the database
Events which are not found are omitted from the result.
The returned per-event dicts contain the following keys:
* event_id (str)
* stream_ordering (int): stream ordering for this event
* json (str): json-encoded event structure
* internal_metadata (str): json-encoded internal metadata dict
* format_version (int|None): The format of the event. Hopefully one
of EventFormatVersions. 'None' means the event predates
EventFormatVersions (so the event is format V1).
* room_version_id (str|None): The version of the room which contains the event.
Hopefully one of RoomVersions.
Due to historical reasons, there may be a few events in the database which
do not have an associated room; in this case None will be returned here.
* rejected_reason (str|None): if the event was rejected, the reason
why.
* redactions (List[str]): a list of event-ids which (claim to) redact
this event.
Args:
txn (twisted.enterprise.adbapi.Connection):
event_ids (Iterable[str]): event IDs to fetch
Returns:
Dict[str, Dict]: a map from event id to event info.
"""
event_dict = {}
for evs in batch_iter(event_ids, 200):
sql = """\
SELECT
e.event_id,
e.stream_ordering,
ej.internal_metadata,
ej.json,
ej.format_version,
r.room_version,
rej.reason,
e.outlier
FROM events AS e
JOIN event_json AS ej USING (event_id)
LEFT JOIN rooms r ON r.room_id = e.room_id
LEFT JOIN rejections as rej USING (event_id)
WHERE """
clause, args = make_in_list_sql_clause(
txn.database_engine, "e.event_id", evs
)
txn.execute(sql + clause, args)
for row in txn:
event_id = row[0]
event_dict[event_id] = {
"event_id": event_id,
"stream_ordering": row[1],
"internal_metadata": row[2],
"json": row[3],
"format_version": row[4],
"room_version_id": row[5],
"rejected_reason": row[6],
"redactions": [],
"outlier": row[7],
}
# check for redactions
redactions_sql = "SELECT event_id, redacts FROM redactions WHERE "
clause, args = make_in_list_sql_clause(txn.database_engine, "redacts", evs)
txn.execute(redactions_sql + clause, args)
for (redacter, redacted) in txn:
d = event_dict.get(redacted)
if d:
d["redactions"].append(redacter)
return event_dict
def _maybe_redact_event_row(
self,
original_ev: EventBase,
redactions: Iterable[str],
event_map: Dict[str, EventBase],
) -> Optional[EventBase]:
"""Given an event object and a list of possible redacting event ids,
determine whether to honour any of those redactions and if so return a redacted
event.
Args:
original_ev: The original event.
redactions: list of event ids of potential redaction events
event_map: other events which have been fetched, in which we can
look up the redaaction events. Map from event id to event.
Returns:
If the event should be redacted, a pruned event object. Otherwise, None.
"""
if original_ev.type == "m.room.create":
# we choose to ignore redactions of m.room.create events.
return None
for redaction_id in redactions:
redaction_event = event_map.get(redaction_id)
if not redaction_event or redaction_event.rejected_reason:
# we don't have the redaction event, or the redaction event was not
# authorized.
logger.debug(
"%s was redacted by %s but redaction not found/authed",
original_ev.event_id,
redaction_id,
)
continue
if redaction_event.room_id != original_ev.room_id:
logger.debug(
"%s was redacted by %s but redaction was in a different room!",
original_ev.event_id,
redaction_id,
)
continue
# Starting in room version v3, some redactions need to be
# rechecked if we didn't have the redacted event at the
# time, so we recheck on read instead.
if redaction_event.internal_metadata.need_to_check_redaction():
expected_domain = get_domain_from_id(original_ev.sender)
if get_domain_from_id(redaction_event.sender) == expected_domain:
# This redaction event is allowed. Mark as not needing a recheck.
redaction_event.internal_metadata.recheck_redaction = False
else:
# Senders don't match, so the event isn't actually redacted
logger.debug(
"%s was redacted by %s but the senders don't match",
original_ev.event_id,
redaction_id,
)
continue
logger.debug("Redacting %s due to %s", original_ev.event_id, redaction_id)
# we found a good redaction event. Redact!
redacted_event = prune_event(original_ev)
redacted_event.unsigned["redacted_by"] = redaction_id
# It's fine to add the event directly, since get_pdu_json
# will serialise this field correctly
redacted_event.unsigned["redacted_because"] = redaction_event
return redacted_event
# no valid redaction found for this event
return None
async def have_events_in_timeline(self, event_ids):
"""Given a list of event ids, check if we have already processed and
stored them as non outliers.
"""
rows = await self.db_pool.simple_select_many_batch(
table="events",
retcols=("event_id",),
column="event_id",
iterable=list(event_ids),
keyvalues={"outlier": False},
desc="have_events_in_timeline",
)
return {r["event_id"] for r in rows}
async def have_seen_events(
self, room_id: str, event_ids: Iterable[str]
) -> Set[str]:
"""Given a list of event ids, check if we have already processed them.
The room_id is only used to structure the cache (so that it can later be
invalidated by room_id) - there is no guarantee that the events are actually
in the room in question.
Args:
room_id: Room we are polling
event_ids: events we are looking for
Returns:
set[str]: The events we have already seen.
"""
res = await self._have_seen_events_dict(
(room_id, event_id) for event_id in event_ids
)
return {eid for ((_rid, eid), have_event) in res.items() if have_event}
@cachedList("have_seen_event", "keys")
async def _have_seen_events_dict(
self, keys: Iterable[Tuple[str, str]]
) -> Dict[Tuple[str, str], bool]:
"""Helper for have_seen_events
Returns:
a dict {(room_id, event_id)-> bool}
"""
# if the event cache contains the event, obviously we've seen it.
cache_results = {
(rid, eid) for (rid, eid) in keys if self._get_event_cache.contains((eid,))
}
results = {x: True for x in cache_results}
def have_seen_events_txn(txn, chunk: Tuple[Tuple[str, str], ...]):
# we deliberately do *not* query the database for room_id, to make the
# query an index-only lookup on `events_event_id_key`.
#
# We therefore pull the events from the database into a set...
sql = "SELECT event_id FROM events AS e WHERE "
clause, args = make_in_list_sql_clause(
txn.database_engine, "e.event_id", [eid for (_rid, eid) in chunk]
)
txn.execute(sql + clause, args)
found_events = {eid for eid, in txn}
# ... and then we can update the results for each row in the batch
results.update({(rid, eid): (eid in found_events) for (rid, eid) in chunk})
# each batch requires its own index scan, so we make the batches as big as
# possible.
for chunk in batch_iter((k for k in keys if k not in cache_results), 500):
await self.db_pool.runInteraction(
"have_seen_events", have_seen_events_txn, chunk
)
return results
@cached(max_entries=100000, tree=True)
async def have_seen_event(self, room_id: str, event_id: str):
# this only exists for the benefit of the @cachedList descriptor on
# _have_seen_events_dict
raise NotImplementedError()
def _get_current_state_event_counts_txn(self, txn, room_id):
"""
See get_current_state_event_counts.
"""
sql = "SELECT COUNT(*) FROM current_state_events WHERE room_id=?"
txn.execute(sql, (room_id,))
row = txn.fetchone()
return row[0] if row else 0
async def get_current_state_event_counts(self, room_id: str) -> int:
"""
Gets the current number of state events in a room.
Args:
room_id: The room ID to query.
Returns:
The current number of state events.
"""
return await self.db_pool.runInteraction(
"get_current_state_event_counts",
self._get_current_state_event_counts_txn,
room_id,
)
async def get_room_complexity(self, room_id):
"""
Get a rough approximation of the complexity of the room. This is used by
remote servers to decide whether they wish to join the room or not.
Higher complexity value indicates that being in the room will consume
more resources.
Args:
room_id (str)
Returns:
dict[str:int] of complexity version to complexity.
"""
state_events = await self.get_current_state_event_counts(room_id)
# Call this one "v1", so we can introduce new ones as we want to develop
# it.
complexity_v1 = round(state_events / 500, 2)
return {"v1": complexity_v1}
def get_current_events_token(self):
"""The current maximum token that events have reached"""
return self._stream_id_gen.get_current_token()
async def get_all_new_forward_event_rows(
self, instance_name: str, last_id: int, current_id: int, limit: int
) -> List[Tuple]:
"""Returns new events, for the Events replication stream
Args:
last_id: the last stream_id from the previous batch.
current_id: the maximum stream_id to return up to
limit: the maximum number of rows to return
Returns:
a list of events stream rows. Each tuple consists of a stream id as
the first element, followed by fields suitable for casting into an
EventsStreamRow.
"""
def get_all_new_forward_event_rows(txn):
sql = (
"SELECT e.stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts, relates_to_id, membership, rejections.reason IS NOT NULL"
" FROM events AS e"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" LEFT JOIN event_relations USING (event_id)"
" LEFT JOIN room_memberships USING (event_id)"
" LEFT JOIN rejections USING (event_id)"
" WHERE ? < stream_ordering AND stream_ordering <= ?"
" AND instance_name = ?"
" ORDER BY stream_ordering ASC"
" LIMIT ?"
)
txn.execute(sql, (last_id, current_id, instance_name, limit))
return txn.fetchall()
return await self.db_pool.runInteraction(
"get_all_new_forward_event_rows", get_all_new_forward_event_rows
)
async def get_ex_outlier_stream_rows(
self, instance_name: str, last_id: int, current_id: int
) -> List[Tuple]:
"""Returns de-outliered events, for the Events replication stream
Args:
last_id: the last stream_id from the previous batch.
current_id: the maximum stream_id to return up to
Returns:
a list of events stream rows. Each tuple consists of a stream id as
the first element, followed by fields suitable for casting into an
EventsStreamRow.
"""
def get_ex_outlier_stream_rows_txn(txn):
sql = (
"SELECT event_stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts, relates_to_id, membership, rejections.reason IS NOT NULL"
" FROM events AS e"
" INNER JOIN ex_outlier_stream AS out USING (event_id)"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" LEFT JOIN event_relations USING (event_id)"
" LEFT JOIN room_memberships USING (event_id)"
" LEFT JOIN rejections USING (event_id)"
" WHERE ? < event_stream_ordering"
" AND event_stream_ordering <= ?"
" AND out.instance_name = ?"
" ORDER BY event_stream_ordering ASC"
)
txn.execute(sql, (last_id, current_id, instance_name))
return txn.fetchall()
return await self.db_pool.runInteraction(
"get_ex_outlier_stream_rows", get_ex_outlier_stream_rows_txn
)
async def get_all_new_backfill_event_rows(
self, instance_name: str, last_id: int, current_id: int, limit: int
) -> Tuple[List[Tuple[int, list]], int, bool]:
"""Get updates for backfill replication stream, including all new
backfilled events and events that have gone from being outliers to not.
NOTE: The IDs given here are from replication, and so should be
*positive*.
Args:
instance_name: The writer we want to fetch updates from. Unused
here since there is only ever one writer.
last_id: The token to fetch updates from. Exclusive.
current_id: The token to fetch updates up to. Inclusive.
limit: The requested limit for the number of rows to return. The
function may return more or fewer rows.
Returns:
A tuple consisting of: the updates, a token to use to fetch
subsequent updates, and whether we returned fewer rows than exists
between the requested tokens due to the limit.
The token returned can be used in a subsequent call to this
function to get further updatees.
The updates are a list of 2-tuples of stream ID and the row data
"""
if last_id == current_id:
return [], current_id, False
def get_all_new_backfill_event_rows(txn):
sql = (
"SELECT -e.stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts, relates_to_id"
" FROM events AS e"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" LEFT JOIN event_relations USING (event_id)"
" WHERE ? > stream_ordering AND stream_ordering >= ?"
" AND instance_name = ?"
" ORDER BY stream_ordering ASC"
" LIMIT ?"
)
txn.execute(sql, (-last_id, -current_id, instance_name, limit))
new_event_updates = [(row[0], row[1:]) for row in txn]
limited = False
if len(new_event_updates) == limit:
upper_bound = new_event_updates[-1][0]
limited = True
else:
upper_bound = current_id
sql = (
"SELECT -event_stream_ordering, e.event_id, e.room_id, e.type,"
" state_key, redacts, relates_to_id"
" FROM events AS e"
" INNER JOIN ex_outlier_stream AS out USING (event_id)"
" LEFT JOIN redactions USING (event_id)"
" LEFT JOIN state_events USING (event_id)"
" LEFT JOIN event_relations USING (event_id)"
" WHERE ? > event_stream_ordering"
" AND event_stream_ordering >= ?"
" AND out.instance_name = ?"
" ORDER BY event_stream_ordering DESC"
)
txn.execute(sql, (-last_id, -upper_bound, instance_name))
new_event_updates.extend((row[0], row[1:]) for row in txn)
if len(new_event_updates) >= limit:
upper_bound = new_event_updates[-1][0]
limited = True
return new_event_updates, upper_bound, limited
return await self.db_pool.runInteraction(
"get_all_new_backfill_event_rows", get_all_new_backfill_event_rows
)
async def get_all_updated_current_state_deltas(
self, instance_name: str, from_token: int, to_token: int, target_row_count: int
) -> Tuple[List[Tuple], int, bool]:
"""Fetch updates from current_state_delta_stream
Args:
from_token: The previous stream token. Updates from this stream id will
be excluded.
to_token: The current stream token (ie the upper limit). Updates up to this
stream id will be included (modulo the 'limit' param)
target_row_count: The number of rows to try to return. If more rows are
available, we will set 'limited' in the result. In the event of a large
batch, we may return more rows than this.
Returns:
A triplet `(updates, new_last_token, limited)`, where:
* `updates` is a list of database tuples.
* `new_last_token` is the new position in stream.
* `limited` is whether there are more updates to fetch.
"""
def get_all_updated_current_state_deltas_txn(txn):
sql = """
SELECT stream_id, room_id, type, state_key, event_id
FROM current_state_delta_stream
WHERE ? < stream_id AND stream_id <= ?
AND instance_name = ?
ORDER BY stream_id ASC LIMIT ?
"""
txn.execute(sql, (from_token, to_token, instance_name, target_row_count))
return txn.fetchall()
def get_deltas_for_stream_id_txn(txn, stream_id):
sql = """
SELECT stream_id, room_id, type, state_key, event_id
FROM current_state_delta_stream
WHERE stream_id = ?
"""
txn.execute(sql, [stream_id])
return txn.fetchall()
# we need to make sure that, for every stream id in the results, we get *all*
# the rows with that stream id.
rows: List[Tuple] = await self.db_pool.runInteraction(
"get_all_updated_current_state_deltas",
get_all_updated_current_state_deltas_txn,
)
# if we've got fewer rows than the limit, we're good
if len(rows) < target_row_count:
return rows, to_token, False
# we hit the limit, so reduce the upper limit so that we exclude the stream id
# of the last row in the result.
assert rows[-1][0] <= to_token
to_token = rows[-1][0] - 1
# search backwards through the list for the point to truncate
for idx in range(len(rows) - 1, 0, -1):
if rows[idx - 1][0] <= to_token:
return rows[:idx], to_token, True
# bother. We didn't get a full set of changes for even a single
# stream id. let's run the query again, without a row limit, but for
# just one stream id.
to_token += 1
rows = await self.db_pool.runInteraction(
"get_deltas_for_stream_id", get_deltas_for_stream_id_txn, to_token
)
return rows, to_token, True
async def is_event_after(self, event_id1, event_id2):
"""Returns True if event_id1 is after event_id2 in the stream"""
to_1, so_1 = await self.get_event_ordering(event_id1)
to_2, so_2 = await self.get_event_ordering(event_id2)
return (to_1, so_1) > (to_2, so_2)
@cached(max_entries=5000)
async def get_event_ordering(self, event_id):
res = await self.db_pool.simple_select_one(
table="events",
retcols=["topological_ordering", "stream_ordering"],
keyvalues={"event_id": event_id},
allow_none=True,
)
if not res:
raise SynapseError(404, "Could not find event %s" % (event_id,))
return int(res["topological_ordering"]), int(res["stream_ordering"])
async def get_next_event_to_expire(self) -> Optional[Tuple[str, int]]:
"""Retrieve the entry with the lowest expiry timestamp in the event_expiry
table, or None if there's no more event to expire.
Returns:
A tuple containing the event ID as its first element and an expiry timestamp
as its second one, if there's at least one row in the event_expiry table.
None otherwise.
"""
def get_next_event_to_expire_txn(txn):
txn.execute(
"""
SELECT event_id, expiry_ts FROM event_expiry
ORDER BY expiry_ts ASC LIMIT 1
"""
)
return txn.fetchone()
return await self.db_pool.runInteraction(
desc="get_next_event_to_expire", func=get_next_event_to_expire_txn
)
async def get_event_id_from_transaction_id(
self, room_id: str, user_id: str, token_id: int, txn_id: str
) -> Optional[str]:
"""Look up if we have already persisted an event for the transaction ID,
returning the event ID if so.
"""
return await self.db_pool.simple_select_one_onecol(
table="event_txn_id",
keyvalues={
"room_id": room_id,
"user_id": user_id,
"token_id": token_id,
"txn_id": txn_id,
},
retcol="event_id",
allow_none=True,
desc="get_event_id_from_transaction_id",
)
async def get_already_persisted_events(
self, events: Iterable[EventBase]
) -> Dict[str, str]:
"""Look up if we have already persisted an event for the transaction ID,
returning a mapping from event ID in the given list to the event ID of
an existing event.
Also checks if there are duplicates in the given events, if there are
will map duplicates to the *first* event.
"""
mapping = {}
txn_id_to_event: Dict[Tuple[str, int, str], str] = {}
for event in events:
token_id = getattr(event.internal_metadata, "token_id", None)
txn_id = getattr(event.internal_metadata, "txn_id", None)
if token_id and txn_id:
# Check if this is a duplicate of an event in the given events.
existing = txn_id_to_event.get((event.room_id, token_id, txn_id))
if existing:
mapping[event.event_id] = existing
continue
# Check if this is a duplicate of an event we've already
# persisted.
existing = await self.get_event_id_from_transaction_id(
event.room_id, event.sender, token_id, txn_id
)
if existing:
mapping[event.event_id] = existing
txn_id_to_event[(event.room_id, token_id, txn_id)] = existing
else:
txn_id_to_event[(event.room_id, token_id, txn_id)] = event.event_id
return mapping
@wrap_as_background_process("_cleanup_old_transaction_ids")
async def _cleanup_old_transaction_ids(self):
"""Cleans out transaction id mappings older than 24hrs."""
def _cleanup_old_transaction_ids_txn(txn):
sql = """
DELETE FROM event_txn_id
WHERE inserted_ts < ?
"""
one_day_ago = self._clock.time_msec() - 24 * 60 * 60 * 1000
txn.execute(sql, (one_day_ago,))
return await self.db_pool.runInteraction(
"_cleanup_old_transaction_ids",
_cleanup_old_transaction_ids_txn,
)
| 39.455855
| 95
| 0.586183
|
4a1a2fae85f5df9c1569eb6ce35d1817acbb9ec0
| 1,661
|
py
|
Python
|
Calibration/HcalCalibAlgos/test/python/hcalHBHEMuon_cfg.py
|
malbouis/cmssw
|
16173a30d3f0c9ecc5419c474bb4d272c58b65c8
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
Calibration/HcalCalibAlgos/test/python/hcalHBHEMuon_cfg.py
|
gartung/cmssw
|
3072dde3ce94dcd1791d778988198a44cde02162
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
Calibration/HcalCalibAlgos/test/python/hcalHBHEMuon_cfg.py
|
gartung/cmssw
|
3072dde3ce94dcd1791d778988198a44cde02162
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Run2_2017_cff import Run2_2017
process = cms.Process("RaddamMuon",Run2_2017)
process.load("FWCore.MessageService.MessageLogger_cfi")
process.load("Configuration.StandardSequences.GeometryDB_cff")
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("RecoJets.Configuration.CaloTowersES_cfi")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.autoCond import autoCond
process.GlobalTag.globaltag=autoCond['run2_data']
process.load("RecoLocalCalo.EcalRecAlgos.EcalSeverityLevelESProducer_cfi")
process.load("Calibration.HcalCalibAlgos.hcalHBHEMuon_cfi")
if 'MessageLogger' in process.__dict__:
process.MessageLogger.HBHEMuon=dict()
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'file:/afs/cern.ch/work/a/amkaur/public/ForSunandaDa/AlcaProducer_codecheck/old/OutputHBHEMuon_old_2017.root'
)
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string("ValidationOld.root")
)
process.hcalHBHEMuon.useRaw = 0
process.hcalHBHEMuon.unCorrect = True
process.hcalHBHEMuon.getCharge = True
process.hcalHBHEMuon.ignoreHECorr = False
process.hcalHBHEMuon.maxDepth = 7
process.hcalHBHEMuon.verbosity = 0
process.hcalTopologyIdeal.MergePosition = False
process.p = cms.Path(process.hcalHBHEMuon)
| 37.75
| 141
| 0.768212
|
4a1a3101b93bf3eee1d6edaee4a2b33959442ebd
| 1,240
|
py
|
Python
|
shotglass/app/migrations/0001_initial.py
|
johntellsall/shotglass
|
0ad5311f92bd79cd5b3bafe4a91dd9cdcce947e8
|
[
"MIT"
] | 10
|
2015-08-23T14:39:13.000Z
|
2022-02-14T08:11:07.000Z
|
shotglass/app/migrations/0001_initial.py
|
johntellsall/shotglass
|
0ad5311f92bd79cd5b3bafe4a91dd9cdcce947e8
|
[
"MIT"
] | 13
|
2015-08-24T21:54:16.000Z
|
2022-02-04T04:34:48.000Z
|
shotglass/app/migrations/0001_initial.py
|
johntellsall/shotglass
|
0ad5311f92bd79cd5b3bafe4a91dd9cdcce947e8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name="SourceLine",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
("project", models.CharField(max_length=200)),
("path", models.CharField(max_length=20)),
("line_number", models.IntegerField()),
(
"kind",
models.CharField(
max_length=2,
choices=[
(b"f", b"function"),
(b"v", b"variable"),
(b"s", b"struct"),
(b"c", b"macro"),
(b"m", b"member"),
],
),
),
],
),
]
| 28.837209
| 62
| 0.348387
|
4a1a321c7ceb7552463fd75b216cfcab624bd2d7
| 1,596
|
py
|
Python
|
easytext/metrics/acc_metric.py
|
cuilunan/easytext
|
95511a5a12d3af4a102ecb7a4d10e09a7d9b8680
|
[
"MIT"
] | 1
|
2020-07-20T06:40:19.000Z
|
2020-07-20T06:40:19.000Z
|
easytext/metrics/acc_metric.py
|
cuilunan/easytext
|
95511a5a12d3af4a102ecb7a4d10e09a7d9b8680
|
[
"MIT"
] | null | null | null |
easytext/metrics/acc_metric.py
|
cuilunan/easytext
|
95511a5a12d3af4a102ecb7a4d10e09a7d9b8680
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python 3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Baidu.com, Inc. All Rights Reserved
#
"""
acc 指标
Authors: panxu(panxu@baidu.com)
Date: 2020/05/30 07:36:00
"""
from typing import Dict
import torch
from .metric import Metric
class AccMetric(Metric):
"""
Acc Metric
"""
ACC = "acc"
def __init__(self):
super().__init__()
self._num_true = 0
self._num_total = 0
def __call__(self,
prediction_labels: torch.Tensor,
gold_labels: torch.Tensor, mask: torch.LongTensor) -> Dict:
"""
Acc metric 计算
:param prediction_labels: 预测的标签
:param gold_labels: gold 标签
:param mask:
:return:
"""
if mask is not None:
raise RuntimeError("对于 Acc 来说, mask 应该为 None")
prediction_labels, gold_labels = prediction_labels.detach().cpu(), gold_labels.detach().cpu()
num_true = (prediction_labels == gold_labels).sum().item()
num_total = gold_labels.size(0)
self._num_true += num_true
self._num_total += num_total
acc = AccMetric._compute(num_true=num_true, num_total=num_total)
return {AccMetric.ACC: acc}
@staticmethod
def _compute(num_true: int, num_total: int):
return num_true / (float(num_total) + 1e-10)
@property
def metric(self) -> Dict:
acc = AccMetric._compute(self._num_true, self._num_total)
return {AccMetric.ACC: acc}
def reset(self):
self._num_true = 0
self._num_total = 0
return self
| 22.166667
| 101
| 0.599624
|
4a1a3353419a09928bca1797314e1a855ab1ab73
| 4,097
|
py
|
Python
|
tests/Modules/test_crds.py
|
hugs-cloud/hugs
|
65aef97d662746f1382bd03b15f46a6647c7b7f5
|
[
"Apache-2.0"
] | null | null | null |
tests/Modules/test_crds.py
|
hugs-cloud/hugs
|
65aef97d662746f1382bd03b15f46a6647c7b7f5
|
[
"Apache-2.0"
] | 5
|
2020-08-18T12:22:46.000Z
|
2020-09-30T14:30:11.000Z
|
tests/Modules/test_crds.py
|
hugs-cloud/hugs
|
65aef97d662746f1382bd03b15f46a6647c7b7f5
|
[
"Apache-2.0"
] | 2
|
2020-06-11T20:30:24.000Z
|
2020-10-29T17:30:21.000Z
|
import datetime
import logging
import os
import uuid
from pathlib import Path
import pandas as pd
import pytest
from Acquire.ObjectStore import datetime_to_datetime, datetime_to_string
from HUGS.Modules import CRDS, Datasource
from HUGS.ObjectStore import get_local_bucket
mpl_logger = logging.getLogger("matplotlib")
mpl_logger.setLevel(logging.WARNING)
# @pytest.fixture(scope="session")
# def data():
# filename = "bsd.picarro.1minute.248m.dat"
# dir_path = os.path.dirname(__file__)
# test_data = "../data/proc_test_data/CRDS"
# filepath = os.path.join(dir_path, test_data, filename)
# return pd.read_csv(filepath, header=None, skiprows=1, sep=r"\s+")
def get_datapath(filename, data_type):
return Path(__file__).resolve(strict=True).parent.joinpath(f"../data/proc_test_data/{data_type}/{filename}")
@pytest.fixture(autouse=True)
def hfd_filepath():
return get_datapath(filename="hfd.picarro.1minute.100m.min.dat", data_type="CRDS")
def test_read_file():
hfd_filepath = get_datapath(filename="hfd.picarro.1minute.100m.min.dat", data_type="CRDS")
crds = CRDS()
gas_data = crds.read_file(data_filepath=hfd_filepath)
ch4_data = gas_data["ch4"]["data"]
co2_data = gas_data["co2"]["data"]
co_data = gas_data["co"]["data"]
assert ch4_data["ch4"][0].values == pytest.approx(1993.83)
assert ch4_data["ch4_stdev"][0].values == pytest.approx(1.555)
assert ch4_data["ch4_n_meas"][0].values == pytest.approx(19.0)
assert co2_data["co2"][0] == pytest.approx(414.21)
assert co2_data["co2_stdev"][0] == pytest.approx(0.109)
assert co2_data["co2_n_meas"][0] == pytest.approx(19.0)
assert co_data["co"][0] == pytest.approx(214.28)
assert co_data["co_stdev"][0] == pytest.approx(4.081)
assert co_data["co_n_meas"][0] == pytest.approx(19.0)
def test_read_data():
crds = CRDS()
tac_filepath = get_datapath(filename="tac.picarro.1minute.100m.test.dat", data_type="CRDS")
combined = crds.read_data(data_filepath=tac_filepath, site="tac")
assert len(combined) == 2
assert list(combined.keys()) == ["ch4", "co2"]
ch4_metadata = combined["ch4"]["metadata"]
assert ch4_metadata["site"] == "tac"
assert ch4_metadata["instrument"] == "picarro"
assert ch4_metadata["time_resolution"] == "1_minute"
assert ch4_metadata["inlet"] == "100m"
assert ch4_metadata["port"] == "9"
assert ch4_metadata["type"] == "air"
assert ch4_metadata["species"] == "ch4"
ch4_data = combined["ch4"]["data"]
assert ch4_data.time[0] == pd.Timestamp("2012-07-31 14:50:30")
assert ch4_data["ch4"][0] == pytest.approx(1905.28)
assert ch4_data["ch4 stdev"][0] == pytest.approx(0.268)
assert ch4_data["ch4 n_meas"][0] == pytest.approx(20)
def test_read_data_no_inlet_raises():
crds = CRDS()
filepath = Path("tac.picarro.1minute.no_inlet.dat")
with pytest.raises(ValueError):
crds.read_data(data_filepath=filepath, site="tac")
def test_gas_info(hfd_filepath):
crds = CRDS()
data = pd.read_csv(
hfd_filepath,
header=None,
skiprows=1,
sep=r"\s+",
index_col=["0_1"],
parse_dates=[[0, 1]],
)
n_gases, n_cols = crds.gas_info(data=data)
assert n_gases == 3
assert n_cols == 3
def test_get_site_attributes():
crds = CRDS()
attrs = crds.get_site_attributes(site="bsd", inlet="108m")
assert attrs == {'data_owner': "Simon O'Doherty", 'data_owner_email': 's.odoherty@bristol.ac.uk',
'inlet_height_magl': '108m', 'comment': 'Cavity ring-down measurements. Output from GCWerks'}
attrs = crds.get_site_attributes(site="tac", inlet="50m")
assert attrs == {'data_owner': "Simon O'Doherty", 'data_owner_email': 's.odoherty@bristol.ac.uk',
'inlet_height_magl': '50m', 'comment': 'Cavity ring-down measurements. Output from GCWerks'}
def test_get_site_attributes_unknown_site_raises():
crds = CRDS()
with pytest.raises(ValueError):
_ = crds.get_site_attributes(site="jupiter", inlet="10008m")
| 29.688406
| 113
| 0.676104
|
4a1a33ae3f513aa8294abc01a7290186b1ce15c9
| 1,827
|
py
|
Python
|
oneflow/compatible_single_client_python/framework/op_util.py
|
xcnick/oneflow
|
7b786b27069dec35d2493256011e773988c91f56
|
[
"Apache-2.0"
] | null | null | null |
oneflow/compatible_single_client_python/framework/op_util.py
|
xcnick/oneflow
|
7b786b27069dec35d2493256011e773988c91f56
|
[
"Apache-2.0"
] | null | null | null |
oneflow/compatible_single_client_python/framework/op_util.py
|
xcnick/oneflow
|
7b786b27069dec35d2493256011e773988c91f56
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from oneflow.compatible.single_client.core.operator.op_conf_pb2 import OperatorConf
import oneflow._oneflow_internal
def IsOpConfOnlyCpuSupported(op_conf):
assert isinstance(op_conf, OperatorConf)
"""
global _cpu_only_op_type_cases
if _cpu_only_op_type_cases == None:
_cpu_only_op_type_cases = set()
for field in OperatorConf.DESCRIPTOR.oneofs_by_name["op_type"].fields:
if oneflow._oneflow_internal.IsOpTypeCaseCpuSupportOnly(field.number):
_cpu_only_op_type_cases.add(field.number)
op_type_field = op_conf.WhichOneof("op_type")
return OperatorConf.DESCRIPTOR.fields_by_name[op_type_field].number in _cpu_only_op_type_cases
"""
op_type_field = op_conf.WhichOneof("op_type")
if op_type_field == "user_conf":
return IsUserOpOnlyCpuSupported(op_conf.user_conf.op_type_name)
else:
field_number = OperatorConf.DESCRIPTOR.fields_by_name[op_type_field].number
return oneflow._oneflow_internal.IsOpTypeCaseCpuSupportOnly(field_number)
def IsUserOpOnlyCpuSupported(op_type_name):
return oneflow._oneflow_internal.IsOpTypeNameCpuSupportOnly(op_type_name)
# _cpu_only_op_type_cases = None
| 38.87234
| 98
| 0.782704
|
4a1a33d79684bfac0791df6e457f49efddc08456
| 1,810
|
py
|
Python
|
plotxy.py
|
peterneorr/plotbot
|
7e8422f1dc16768b50018bc7bbd8b45192c63e66
|
[
"MIT"
] | null | null | null |
plotxy.py
|
peterneorr/plotbot
|
7e8422f1dc16768b50018bc7bbd8b45192c63e66
|
[
"MIT"
] | null | null | null |
plotxy.py
|
peterneorr/plotbot
|
7e8422f1dc16768b50018bc7bbd8b45192c63e66
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import logging
import threading
import sys
import time
import threading
import RPi.GPIO as GPIO
from pb.homing_motor import HomingMotor
import pb.plotbot as PB
def main():
GPIO.setmode(GPIO.BCM)
config = PB.read_config()
x, y, z = PB.init_motors(config)
z.go_home()
x.go_home()
y.go_home()
x_home = PB.named_point(config, "x", "home")
y_home = PB.named_point(config, "y", "home")
up = PB.named_point(config, "z", "up")
down = PB.named_point(config, "z", "down")
x.go_home = lambda: x.goto_pos(x_home)
y.go_home = lambda: y.goto_pos(y_home)
pen_down = lambda: z.goto_pos(down)
pen_up = lambda: z.goto_pos(up)
def border():
# draw bounding box
pen_up()
x.go_home()
y.go_home()
pen_down()
x.goto_pos(x.get_max_steps())
y.goto_pos(y.get_max_steps())
x.go_home()
y.go_home()
pen_up()
try:
x.go_home()
y.go_home()
#border()
y_center = y_home + (y.get_max_steps() - y_home) / 2
x_center = x_home + (x.get_max_steps() - x_home) / 2
x.goto_pos(x_center)
y.goto_pos(y_center)
px = x_center
py = y_center
i = 1
pen_down()
while y.get_max_steps() > py > y_home and\
x.get_max_steps() > px > x_home:
px += i * 10
x.goto_pos(px)
py += i * 10
y.goto_pos(py)
i += 1
px -= i * 10
x.goto_pos(px)
py -= i * 10
y.goto_pos(py)
i += 1
pen_up()
GPIO.cleanup()
except KeyboardInterrupt:
print("shutting down...")
z.go_home()
GPIO.cleanup()
if __name__ == '__main__':
main()
| 22.345679
| 60
| 0.530387
|
4a1a342375c20a09de24bae32b2cfca889818519
| 457
|
py
|
Python
|
src/layouts/buttonbox/builder/buttonbox.py
|
webpedrovinicius/gui-python-gtk
|
eb85ecd99bfcded376190a2770aabd31d8db8fa5
|
[
"MIT"
] | 1
|
2021-02-14T19:00:31.000Z
|
2021-02-14T19:00:31.000Z
|
src/layouts/buttonbox/builder/buttonbox.py
|
webpedrovinicius/gui-python-gtk
|
eb85ecd99bfcded376190a2770aabd31d8db8fa5
|
[
"MIT"
] | null | null | null |
src/layouts/buttonbox/builder/buttonbox.py
|
webpedrovinicius/gui-python-gtk
|
eb85ecd99bfcded376190a2770aabd31d8db8fa5
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Contêiner do tipo ButtonBox Layout."""
import gi
gi.require_version(namespace='Gtk', version='3.0')
from gi.repository import Gtk
# @Gtk.Template(string, filename, resource_path)
@Gtk.Template(filename='./buttonbox.glade')
class MainWindow(Gtk.ApplicationWindow):
__gtype_name__ = 'MainWindow'
if __name__ == '__main__':
win = MainWindow()
win.connect('destroy', Gtk.main_quit)
win.show_all()
Gtk.main()
| 21.761905
| 50
| 0.698031
|
4a1a345cd2cee1c8c9c5fcd395346137c6eae613
| 1,387
|
py
|
Python
|
ematch/venv/Lib/site-packages/djangorave/tests/test_serializers.py
|
Faisal-Sey/Dreamnmatch.com
|
f69f8be8e825b37c6abda5e169cc794b5ea2bd4e
|
[
"MIT"
] | null | null | null |
ematch/venv/Lib/site-packages/djangorave/tests/test_serializers.py
|
Faisal-Sey/Dreamnmatch.com
|
f69f8be8e825b37c6abda5e169cc794b5ea2bd4e
|
[
"MIT"
] | null | null | null |
ematch/venv/Lib/site-packages/djangorave/tests/test_serializers.py
|
Faisal-Sey/Dreamnmatch.com
|
f69f8be8e825b37c6abda5e169cc794b5ea2bd4e
|
[
"MIT"
] | null | null | null |
# stdlib imports
# django imports
from django.test import TestCase
# 3rd party imports
from rest_framework.exceptions import ValidationError
# project imports
from djangorave.serializers import TransactionSerializer
from djangorave.tests.factories import PaymentTypeModelFactory, UserFactory
class TestTransactionSerializer(TestCase):
"""Test suite for the TransactionSerializer"""
def test_validate_reference(self):
"""Ensure the serializer raises an exception for an invalid
payment_type_id or user_id """
payment_type = PaymentTypeModelFactory()
user = UserFactory()
expected_response = f"{payment_type.id}__test__{user.id}"
actual_response = TransactionSerializer.validate_reference(
self=None, value=expected_response
)
self.assertEqual(expected_response, actual_response)
with self.assertRaises(ValidationError) as e:
TransactionSerializer.validate_reference(
self=None, value=f"123__test__{user.id}"
)
self.assertEqual(e.exception.detail[0], "Payment type does not exist")
with self.assertRaises(ValidationError) as e:
TransactionSerializer.validate_reference(
self=None, value=f"{payment_type.id}__test__123"
)
self.assertEqual(e.exception.detail[0], "User does not exist")
| 34.675
| 78
| 0.708003
|
4a1a34b6742e1d607d88a61c025870a3c71f23c1
| 6,067
|
py
|
Python
|
main_eos_table.py
|
sotzee/quaryonic_eos
|
2f0f16040d111d8c9e75795a63a6a29d26ea9dca
|
[
"MIT"
] | 1
|
2021-02-25T06:58:14.000Z
|
2021-02-25T06:58:14.000Z
|
main_eos_table.py
|
sotzee/quaryonic_eos
|
2f0f16040d111d8c9e75795a63a6a29d26ea9dca
|
[
"MIT"
] | null | null | null |
main_eos_table.py
|
sotzee/quaryonic_eos
|
2f0f16040d111d8c9e75795a63a6a29d26ea9dca
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 2 15:06:25 2020
@author: sotzee
"""
import numpy as np
import scipy.optimize as opt
from joblib import Parallel, delayed
import unitconvert
import eos_quarkyonic
import eos_potential
from config import saturation_density,u_max,N1,N2,path,args,eos_shape,L,BE,Sv,gamma,num_cores,nB_grid,args_Lattimer_pnm_init,s_k
import os
def ensure_dir(path,dir_name):
try:
os.stat(path+dir_name)
except:
os.mkdir(path+dir_name)
# Define PNM potential fixed by: BE+Sv, L, gamma
Potential_Lattimer_PNM_list=[]
for L_i in L:
args_Lattimer=[]
args_Lattimer_pnm=np.concatenate((opt.root(eos_potential.fit_lattimer_pnm,args_Lattimer_pnm_init,args=([unitconvert.m_n_MeV+BE+Sv,L_i,gamma],),method='hybr').x,[gamma]))
Potential_Lattimer_pnm=eos_potential.Potential_single(args_Lattimer_pnm,eos_potential.syms_Lattimer,eos_potential.V_Lattimer_expr)
Potential_Lattimer_PNM_list.append(Potential_Lattimer_pnm)
# use joblib to take advantage of mutiple cpu in computer.
def main_parallel_unsave(Calculation_i,parameter_list,other_args=[],verbose=1):
Output=Parallel(n_jobs=num_cores,verbose=verbose)(delayed(Calculation_i)(parameter_i,other_args) for parameter_i in parameter_list)
return np.array(Output)
def Calculation_creat_EOS(eos_args_args_array,other):
return eos_quarkyonic.EOS_Quarkyonic_Potential(eos_args_args_array,potential=other,s_k=s_k,defaut_u_max=u_max)
# Define EOSs of all parameter sets.
eos_flat=[]
for Potential_Lattimer_PNM_i,L_i in zip(Potential_Lattimer_PNM_list,L):
print('Calculating %d*%d configuration with L=%.2f MeV'%(eos_shape[1],eos_shape[2],L_i))
eos_flat.append(main_parallel_unsave(Calculation_creat_EOS,args[1:,0].reshape((2,-1)).transpose(),other_args=Potential_Lattimer_PNM_i))
print('EOS calculated successfully!\n')
eos_flat=np.array(eos_flat).flatten()
eos=eos_flat.reshape(eos_shape)
# eos contains 3D array of EOS of shape (N_L, N_Lambda, N_nt).
# eos_flat is 1D array of all EOS of lenth N_L*N_Lambda*N_nt.
# e.g. if args = np.mgrid[20:80:4j,300:2500:23j,0.2:0.6:21j],
# eos[1,5,5]=eos_flat[1*23*21+5*21+5], for L=40 MeV, Lambda=800 MeV, nt=0.3 fm^-3
#generate a grand EOS table with regular k_Fn grid.(Analytical)
print('\nGenerating a grand EOS table with regular k_Fn grid...')
table_dir_name='EOS_table_regular_kFn'
ensure_dir(path,table_dir_name)
eos_crust=eos_flat[0].crust_eos_array
eos_crust=np.concatenate((eos_crust,eos_flat[0].eosCs2(eos_crust[2])[np.newaxis]),axis=0)
eos_crust_header='baryon number density nB (fm-3), \t energy density epsilon (MeV fm-3), \t pressure p (MeV fm-3), \t sound speed square cs2/c2'
np.savetxt(table_dir_name+'/crust_eos.txt',eos_crust.transpose(),fmt='%.8g',header=eos_crust_header)
kFn_n_e_p_cs2=np.zeros((5,N1+N2-1,np.prod(eos_shape)))
k0_kt_kmax=np.zeros((3,np.prod(eos_shape)))
for i in range(np.prod(eos_shape)):
kFn_n_e_p_cs2[0,:,i]=eos_flat[i].k_Fn_array
kFn_n_e_p_cs2[1:4,:,i]=eos_flat[i].eos_array_high
kFn_n_e_p_cs2[4,:,i]=eos_flat[i].eosCs2(kFn_n_e_p_cs2[3,:,i])
k0_kt_kmax[:,i]=np.array([eos_flat[i].k_F0,eos_flat[i].k_Ft,eos_flat[i].k_Fmax])
kFn_n_e_p_cs2_name=['neutron_fermi_momentum','baryon_number_density','energy_density','pressure','sound_speed_square']
for i in range(len(kFn_n_e_p_cs2_name)):
np.savetxt(table_dir_name+'/core_'+kFn_n_e_p_cs2_name[i]+'.txt',kFn_n_e_p_cs2[i].transpose(),fmt='%.8g')
k0_kt_kmax_header='neutron upper Fermi momentums (MeV) at: core-crust transition, quark drip transition, and 12*saturation density.'
np.savetxt(table_dir_name+'/k0_kt_kmax.txt',k0_kt_kmax.transpose(),fmt='%.8g',header=k0_kt_kmax_header)
print('Table saved in dir \'./'+table_dir_name+'\'')
#generate a grand EOS table with regular baryon number density grid.(Intepolation)
print('\nGenerating a grand EOS table with regular baryon number density grid...')
table_dir_name='EOS_table_regular_nB'
ensure_dir(path,table_dir_name)
eos_crust=eos_flat[0].crust_eos_array
eos_crust=np.concatenate((eos_crust,eos_flat[0].eosCs2(eos_crust[2])[np.newaxis]),axis=0)
eos_crust_header='baryon number density nB (fm-3), \t energy density epsilon (MeV fm-3), \t pressure p (MeV fm-3), \t sound speed square cs2/c2'
np.savetxt(table_dir_name+'/crust_eos.txt',eos_crust.transpose(),fmt='%.8g',header=eos_crust_header)
e_p_cs2=np.zeros((3,len(nB_grid),np.prod(eos_shape)))
for i in range(np.prod(eos_shape)):
e_p_cs2[1,:,i]=eos_flat[i].eosPressure_frombaryon(nB_grid)
e_p_cs2[0,:,i]=eos_flat[i].eosDensity(e_p_cs2[1,:,i])
e_p_cs2[2,:,i]=eos_flat[i].eosCs2(e_p_cs2[1,:,i])
e_p_cs2_name=['energy_density','pressure','sound_speed_square']
for i in range(len(e_p_cs2)):
np.savetxt(table_dir_name+'/core_'+e_p_cs2_name[i]+'.txt',e_p_cs2[i].transpose(),fmt='%.8g')
np.savetxt(table_dir_name+'/core_nB_grid.txt',nB_grid,fmt='%.8g')
print('Table saved in dir \'./'+table_dir_name+'\'')
# e.g. if args = np.mgrid[20:80:4j,300:2500:23j,0.2:0.6:21j],
# eos[1,5,5]=eos_flat[1*23*21+5*21+5], for L=40 MeV, Lambda=800 MeV, nt=0.3 fm^-3
eos_to_check=eos[1,5,5]
# To get [n, epsilon, p] array of a EOS, modify N1, N2 to vary the size of the array
eos_array_test = eos_to_check.eos_array
n_array=eos_array_test[0]
epsilon_array=eos_array_test[1]
p_array=eos_array_test[2]
# Analyical solution for PNM quarkyonic matter is aviable given k_Fn as prime
# argument. If one need e.g. pressure as a function of baryon density,
# intepolation of array is needed. EOS has been intepolated already.
print('\nHere is the example: eos[1,5,5]')
ns=saturation_density
ps=eos_to_check.eosPressure_frombaryon(ns) #get pressure in MeV/fm^-3
es=eos_to_check.eosDensity(ps) #get energy density in MeV/fm^-3
cs2=eos_to_check.eosCs2(ps) #get sound speed square
chempo=eos_to_check.eosChempo(ps) #get chemical potential
#ns=eos_to_check.eosBaryonDensity(ps)
print('at nB=%.6f fm-3f, e=%.3f MeV fm-3, p=%.6f, cs^2=%.6f'%(ns,es,ps,cs2))
print('L=%.6f MeV'%(3*ps/ns))
print('BE+Sv=%.6f MeV'%(es/ns-unitconvert.m_n_MeV))
| 52.301724
| 173
| 0.755893
|
4a1a350c0849e8c9e2ffde06ef0974f057b215af
| 210
|
py
|
Python
|
app/tests/common/test_exceptions.py
|
isayakhov/duty-schedule-bot
|
2515fe8e1ba71b183c31363d99d1c8f1d552826a
|
[
"MIT"
] | 6
|
2021-01-30T11:31:46.000Z
|
2022-02-15T10:09:07.000Z
|
app/tests/common/test_exceptions.py
|
isayakhov/duty-schedule-bot
|
2515fe8e1ba71b183c31363d99d1c8f1d552826a
|
[
"MIT"
] | 2
|
2021-02-04T19:45:07.000Z
|
2021-02-05T12:23:56.000Z
|
app/tests/common/test_exceptions.py
|
isayakhov/duty-schedule-bot
|
2515fe8e1ba71b183c31363d99d1c8f1d552826a
|
[
"MIT"
] | 2
|
2022-02-06T11:22:30.000Z
|
2022-02-06T18:46:14.000Z
|
from app.common.exceptions import DutyError
def test_duty_error():
msg = "Custom message"
exc = DutyError(message=msg)
assert repr(exc) == f"DutyError(code='{DutyError.code}', message='{msg}',)"
| 23.333333
| 79
| 0.685714
|
4a1a36052246701f6ef629ef5466a5587970080b
| 204
|
py
|
Python
|
carla_ego_vehicle/setup.py
|
VILMA-LMA/ros-bridge
|
e635635c2657b81d914596bec289f49bbde910e5
|
[
"MIT"
] | 205
|
2018-05-14T16:37:06.000Z
|
2022-03-26T21:32:29.000Z
|
ros/src/ros_carla_bridge/carla_ego_vehicle/setup.py
|
xmeng17/ALVNS
|
8d891600af3d851add27a10ae45cf3c2108bb87c
|
[
"MIT"
] | 19
|
2018-05-27T21:06:52.000Z
|
2021-09-08T00:37:46.000Z
|
ros/src/ros_carla_bridge/carla_ego_vehicle/setup.py
|
xmeng17/ALVNS
|
8d891600af3d851add27a10ae45cf3c2108bb87c
|
[
"MIT"
] | 73
|
2018-09-13T05:04:47.000Z
|
2022-02-26T14:51:02.000Z
|
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages=['carla_ego_vehicle'],
package_dir={'': 'src'}
)
setup(**d)
| 18.545455
| 60
| 0.754902
|
4a1a361177ecd7f650f564e15a4591d21a7265ab
| 6,665
|
py
|
Python
|
data/convert_jigsaw.py
|
vilka-lab/JigsawRate
|
d4d70f6611209b5c9b5bcda9f9380d317ef1737e
|
[
"MIT"
] | null | null | null |
data/convert_jigsaw.py
|
vilka-lab/JigsawRate
|
d4d70f6611209b5c9b5bcda9f9380d317ef1737e
|
[
"MIT"
] | null | null | null |
data/convert_jigsaw.py
|
vilka-lab/JigsawRate
|
d4d70f6611209b5c9b5bcda9f9380d317ef1737e
|
[
"MIT"
] | null | null | null |
import pandas as pd
import click
from pathlib import Path
# from typing import List
import re
import emoji
from tqdm import tqdm
def read_toxic_data(folder_toxic: str) -> pd.DataFrame:
train_path = 'train.csv.zip'
test_labels_path = 'test_labels.csv.zip'
test_path = 'test.csv.zip'
folder_toxic = Path(folder_toxic)
train_toxic = pd.read_csv(folder_toxic.joinpath(train_path))
print('Shape of train data:', train_toxic.shape)
test_toxic = pd.read_csv(folder_toxic.joinpath(test_path))
test_labels = pd.read_csv(folder_toxic.joinpath(test_labels_path))
test_toxic = test_toxic.merge(test_labels, how='outer', left_on='id', right_on='id')
print('Shape of test data:', test_toxic.shape)
# -1 - unlabeled data
test_toxic = test_toxic[test_toxic['toxic'] != -1]
total = pd.concat([train_toxic, test_toxic])
return total
def read_unintended_data_dense(folder_unintended: str) -> pd.DataFrame:
folder_unintended = Path(folder_unintended)
df = pd.read_csv(folder_unintended.joinpath('all_data.csv'))
cols = ['id', 'comment_text', 'toxicity', 'severe_toxicity', 'obscene',
'identity_attack', 'sexual_explicit', 'insult', 'threat']
df['severe_toxicity'] *= 2
df = df[cols]
cols_with_scores = df.drop(['id', 'comment_text'], axis=1).columns
df['offensiveness_score'] = df[cols_with_scores].mean(axis=1)
df = df[['id', 'comment_text', 'offensiveness_score']]
print('Shape of unintended data:', df.shape)
return df
def read_unintended_data_sparse(folder_unintended: str, threshold: float) -> pd.DataFrame:
folder_unintended = Path(folder_unintended)
df = pd.read_csv(folder_unintended.joinpath('all_data.csv'))
cols = ['id', 'comment_text', 'toxicity', 'severe_toxicity', 'obscene',
'identity_attack', 'insult', 'threat']
df = df[cols]
df = df.rename({
'toxicity': 'toxic',
'severe_toxicity': 'severe_toxic',
'identity_attack': 'identity_hate'},
axis=1)
cols_with_scores = df.drop(['id', 'comment_text'], axis=1).columns
df[cols_with_scores] = (df[cols_with_scores] > threshold).astype(int)
print('Shape of unintended data:', df.shape)
return df
def read_ruddit(folder_ruddit: str) -> pd.DataFrame:
folder_ruddit = Path(folder_ruddit)
df = pd.read_csv(folder_ruddit)
df = df.rename({'txt': 'comment_text', 'comment_id': 'id'}, axis=1)
columns = ['id', 'comment_text', 'offensiveness_score']
df = df.loc[:, columns]
df = df[df['comment_text'] != '[deleted]']
df.loc[df['offensiveness_score'] < 0] = 0
print('Shape of ruddit data:', df.shape)
return df
def make_sample(df: pd.DataFrame) -> pd.DataFrame:
return pd.concat([
df.head(20),
df.sample(n=40),
df.tail(20)
])
def calculate_score(df: pd.DataFrame) -> pd.DataFrame:
columns = ['toxic', 'obscene', 'threat', 'insult', 'identity_hate', 'severe_toxic']
df['severe_toxic'] *= 2
df['offensiveness_score'] = 0
for col in columns:
df['offensiveness_score'] += df[col]
df['offensiveness_score'] = df['offensiveness_score'] / len(columns)
df = df[['id', 'comment_text', 'offensiveness_score']]
return df
def process_text(full_line: str, full_process: bool = False) -> str:
full_line = str(full_line)
full_line = re.sub(r'https.*[^ ]', 'URL', full_line)
full_line = re.sub(r'http.*[^ ]', 'URL', full_line)
full_line = re.sub(r'@([^ ]*)', '@USER', full_line)
if full_process:
full_line = re.sub(r'#([^ ]*)', r'\1', full_line)
full_line = emoji.demojize(full_line)
full_line = re.sub(r'(:.*?:)', r' \1 ', full_line)
full_line = re.sub(' +', ' ', full_line)
return full_line
@click.command()
@click.option('--folder_toxic', help='Path to folder of jigsaw toxic comment classification', default='jigsaw-toxic-comment-classification-challenge')
@click.option('--folder_unintended', help='Path to folder of jigsaw unintended bias', default='jigsaw-unintended-bias-in-toxicity-classification')
@click.option('--folder_ruddit', help='Path to ruddit dataset (full path to file)', default='ruddit/Dataset/ruddit_with_text.csv')
@click.option('--output', help='Output path', default='jigsaw_train.csv')
@click.option('--unintended_threshold', help='Threshold for unintended dataset classification', default=0.5)
@click.option('--text_process/--no-text_process', help='Full text preprocess', default=False)
@click.option('--preprocess_type', type=click.Choice(['sparse', 'dense'], case_sensitive=True),
help='"dense" or "sparse", see documentation for details', default='dense')
def main(
folder_toxic: str,
folder_unintended: str,
output: str,
unintended_threshold: float,
folder_ruddit: str,
text_process: bool,
preprocess_type: str
) -> None:
"""Tool to convert test and train dataset from
https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge/data?select=train.csv.zip
and
https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification/data?select=all_data.csv
to train dataset.
"""
print('Toxic Comment Classification Challenge')
toxic_df = read_toxic_data(folder_toxic)
print('Ruddit dataset')
ruddit = read_ruddit(folder_ruddit)
print('Jigsaw Unintended Bias in Toxicity Classification')
if preprocess_type == 'dense':
toxic_df = calculate_score(toxic_df)
unintented_df = read_unintended_data_dense(folder_unintended)
total = pd.concat([toxic_df, unintented_df, ruddit])
else:
unintented_df = read_unintended_data_sparse(folder_unintended, unintended_threshold)
total = pd.concat([toxic_df, unintented_df])
total = calculate_score(total)
total = pd.concat([total, ruddit])
# total = unintented_df
total.loc[total['offensiveness_score'] > 1, 'offensiveness_score'] = 1
print('Data preprocessing')
tqdm.pandas()
total['comment_text'] = total['comment_text'].progress_apply(process_text, full_process=text_process)
num_duplicates = total.duplicated(subset='comment_text').sum()
if num_duplicates > 0:
print(f'Founded {num_duplicates} duplicated rows, will be deleted')
total = total.drop_duplicates(subset='comment_text')
print('Shape of united data after filtering:', total.shape)
total.to_csv(output, index=False)
print('Making sample dataset to check quality')
sample = make_sample(total)
sample.to_csv('sample.csv', index=False)
if __name__ == '__main__':
main()
| 38.085714
| 150
| 0.67952
|
4a1a36e216121e19856726b7109d167557fa13c6
| 4,315
|
py
|
Python
|
baselines/customised_plotter.py
|
dkangin/baselines
|
91b1c35e8f60a09bb10e3f1e76a03eeb5682bfae
|
[
"MIT"
] | 5
|
2019-01-21T06:56:01.000Z
|
2020-09-06T16:11:32.000Z
|
baselines/customised_plotter.py
|
dkangin/baselines
|
91b1c35e8f60a09bb10e3f1e76a03eeb5682bfae
|
[
"MIT"
] | null | null | null |
baselines/customised_plotter.py
|
dkangin/baselines
|
91b1c35e8f60a09bb10e3f1e76a03eeb5682bfae
|
[
"MIT"
] | 2
|
2019-09-02T09:22:31.000Z
|
2019-12-22T09:11:44.000Z
|
import matplotlib
matplotlib.use('Agg') # Can change to 'Agg' for non-interactive mode
import matplotlib.pyplot as plt
import argparse
import os
from baselines.bench.monitor import load_results
import numpy as np
def ts2xy(ts):
x = np.cumsum(ts.l.values)
y = ts.r.values
return x, y
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def window_func(x, y, window, func):
yw_func = []
for i in range(len(y)):
yw = rolling_window(y[i], window)
yw_func.append(func(yw, axis=-1))
return x[window-1:], yw_func
def plot_single_directory (env_id, directory, method_name, num_folds, EPISODES_WINDOW=100):
directory_name = directory + '/' + 'logs_' + env_id + '_'
if not os.path.isdir(directory_name+'0'):
print ('Warning: directory ' + directory_name + '0' + 'does not exist, skipping...')
return
results_x = []
results_y = []
results_x_all = []
for i in range (0, num_folds):
directory_name_i = directory_name + str(i)
current_results = load_results(directory_name_i)
current_results_x, current_results_y = ts2xy (current_results)
results_x.append (current_results_x)
results_y.append (current_results_y)
results_x_all.extend (current_results_x)
#plt.plot (current_results_x, current_results_y)
results_x_all = np.sort (results_x_all)
results_y_all = []
for i in range (num_folds):
np.append(results_x[i], results_x_all[-1])
np.append(results_y[i], results_y[i][-1])
results_y_all.append (np.interp(results_x_all, results_x[i], results_y[i]))
results_x_all, results_y_all = window_func(results_x_all, results_y_all, EPISODES_WINDOW, np.mean)
plt.plot (results_x_all, np.mean (results_y_all, 0), label=method_name)
plt.fill_between (results_x_all, np.mean(results_y_all, 0) - np.std (results_y_all, 0), np.mean(results_y_all, 0) + np.std (results_y_all, 0), alpha = 0.3)
def plot_results (env_id, directories, method_names, num_folds, postfix=''):
plt.clf()
for i in range(len(directories)):
directory = directories[i]
method_name = method_names[i]
plot_single_directory (env_id, directory, method_name, num_folds)
plt.gca().set_xlabel('Steps')
plt.gca().set_ylabel('Rewards')
plt.legend()
#plt.show ()
plt.savefig (env_id + postfix + '.png', bbox_inches='tight', pad_inches=0)
def main():
#directories = ['experimental_gradient/log_bak_01_05', 'acktr']
#directories = ['experimental_gradient', 'ddpg/log_10_05', 'acktr/log_results', 'experimental_gradient_09_08_buffer_3', 'trpo_mpi']
#method_names = ['Proposed method', 'DDPG', 'ACKTR', 'Proposed_3', 'TRPO_MPI']
#directories = ['experimental_gradient_09_08_buffer_3', 'experimental_gradient_12_08_buffer_5', 'trpo_mpi/log_bak', 'ppo1', 'acktr/log_results']
#method_names = ['Proposed, Buffer Size=3', 'Proposed, Buffer Size=5', 'TRPO_MPI', 'PPO', 'ACKTR']
#env_ids = {'HumanoidStandup-v2', 'Striker-v2', 'Thrower-v2', 'Pusher-v2', 'Reacher-v2', 'HalfCheetah-v2', 'Swimmer-v2', 'Ant-v2', 'Humanoid-v2', 'Hopper-v2', 'Walker2d-v2', 'InvertedPendulum-v2', 'InvertedDoublePendulum-v2'}
directories = ['trpo_replay']
method_names = ['Proposed']
env_ids = {'Ant-v2'}
#env_ids = {'Reacher-v2', 'HalfCheetah-v2', 'Swimmer-v2', 'Ant-v2', 'Humanoid-v2', 'Hopper-v2', 'Walker2d-v2', 'InvertedPendulum-v2', 'InvertedDoublePendulum-v2', 'swimmer_swimmer6', 'fish_swim', 'walker_stand', 'ball_in_cup_catch', 'humanoid_stand', 'fish_upright', 'finger_spin', 'cheetah_run', 'walker_walk', 'walker_run'}
num_folds = 1
for env_id in env_ids:
plot_results (env_id, directories, method_names, num_folds, postfix='_3_minus_cov_acktr')
#directories = ['experimental_gradient']
#method_names = ['Proposed method']
#env_ids = {'Reacher-v2', 'HalfCheetah-v2', 'Swimmer-v2', 'Ant-v2', 'Humanoid-v2', 'Hopper-v2', 'Walker2d-v2', 'InvertedPendulum-v2', 'InvertedDoublePendulum-v2'}
#num_folds = 1
#postfix = '_tr_perf'
#for env_id in env_ids:
# plot_results (env_id, directories, method_names, num_folds, postfix)
if __name__ == '__main__':
main()
| 48.483146
| 328
| 0.692932
|
4a1a3724616ad5e8de05d1ec75a8e0d683576db5
| 4,566
|
py
|
Python
|
controllers/fin.py
|
nursix/eden
|
61d5a947da20bbf4d6458c9be88ed37b1330518c
|
[
"MIT"
] | 4
|
2015-04-08T19:51:44.000Z
|
2016-08-06T07:05:35.000Z
|
controllers/fin.py
|
waidyanatha/eden
|
a275ed7d10c2bf8839de86b7ac7c549186fc94b7
|
[
"MIT"
] | 27
|
2015-02-18T23:38:23.000Z
|
2020-04-27T13:53:23.000Z
|
controllers/fin.py
|
waidyanatha/eden
|
a275ed7d10c2bf8839de86b7ac7c549186fc94b7
|
[
"MIT"
] | 5
|
2015-09-10T05:31:14.000Z
|
2017-06-07T11:06:27.000Z
|
# -*- coding: utf-8 -*-
"""
Finance
"""
if not settings.has_module(c):
raise HTTP(404, body="Module disabled: %s" % c)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
return s3db.cms_index(c)
# -----------------------------------------------------------------------------
def expense():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def voucher_program():
""" Voucher Programs: RESTful CRUD controller """
def prep(r):
if r.component_name == "voucher_billing":
program = s3db.fin_VoucherProgram(r.id)
ctable = r.component.table
# Configure date and status fields
if not r.component_id:
field = ctable.date
field.writable = True
program.earliest_billing_date(configure=field)
else:
query = (ctable.id == r.component_id)
row = db(query).select(ctable.status, limitby=(0, 1)).first()
if row and row.status == "SCHEDULED":
field = ctable.date
field.writable = True
program.earliest_billing_date(billing_id = r.component_id,
configure = field,
)
field = ctable.status
field.writable = True
return True
s3.prep = prep
return s3_rest_controller(rheader = s3db.fin_rheader)
# -----------------------------------------------------------------------------
def voucher():
""" Vouchers: RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def voucher_debit():
""" Voucher Debits: RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def voucher_claim():
""" Compensation Claims: RESTful CRUD controller """
def prep(r):
table = r.resource.table
record = r.record
if not record or record.status == "NEW":
# Make additional fields writable in new claims
writable = ("account_holder",
"account_number",
"bank_name",
"bank_address",
"status",
)
for fn in writable:
field = table[fn]
if field.readable:
field.writable = True
return True
s3.prep = prep
return s3_rest_controller(rheader = s3db.fin_rheader)
# -----------------------------------------------------------------------------
def voucher_invoice():
""" Voucher Invoice: RESTful CRUD controller """
def prep(r):
table = r.resource.table
record = r.record
if not record or record.status != "PAID":
# Make additional fields writable in unpaid invoices
writable = ("status",
"reason",
)
for fn in writable:
field = table[fn]
if field.readable:
field.writable = True
return True
s3.prep = prep
return s3_rest_controller(rheader = s3db.fin_rheader)
# -----------------------------------------------------------------------------
def payment_service():
""" Payment Services: RESTful CRUD controller """
return s3_rest_controller(rheader = s3db.fin_rheader)
# -----------------------------------------------------------------------------
def product():
""" Billable Products/Services: RESTful CRUD controller """
# TODO prep
# - on product_service tab, limit service selector to services of owner org
return s3_rest_controller(rheader = s3db.fin_rheader)
# -----------------------------------------------------------------------------
def subscription_plan():
""" Subscription Plans: RESTful CRUD controller """
return s3_rest_controller(rheader = s3db.fin_rheader)
# -----------------------------------------------------------------------------
def subscription():
""" Subscriptions: RESTful CRUD controller """
return s3_rest_controller(rheader = s3db.fin_rheader)
# END =========================================================================
| 31.489655
| 84
| 0.438458
|
4a1a376bbcd6680c3bf56556fc5713080f7f6db7
| 4,945
|
py
|
Python
|
bin/texpost.py
|
jt14den/teachtogether.tech
|
e3f67366ef5d717167f83a0bd5bc5c8545531fb8
|
[
"CC-BY-4.0"
] | null | null | null |
bin/texpost.py
|
jt14den/teachtogether.tech
|
e3f67366ef5d717167f83a0bd5bc5c8545531fb8
|
[
"CC-BY-4.0"
] | 1
|
2018-09-14T10:12:27.000Z
|
2018-09-18T07:46:24.000Z
|
bin/texpost.py
|
jt14den/teachtogether.tech
|
e3f67366ef5d717167f83a0bd5bc5c8545531fb8
|
[
"CC-BY-4.0"
] | null | null | null |
#!/usr/bin/env python
'''
Post-process what Pandoc produces from the book's Markdown.
I'm not proud of this...
'''
import sys
import re
import yaml
STRINGS = [(r'\/-', '-'),
(r'\begin{center}\rule{0.5\linewidth}{\linethickness}\end{center}', ''),
(r'\def\labelenumi{\arabic{enumi}.}', ''),
(r'\tightlist', '')]
UNTEX = [('_', r'\_'), ('%', r'\%'), ('#', r'\#'), ('\n', ' ')]
STACK = []
def main(linksPath):
doc = sys.stdin.read()
for (src, dst) in STRINGS:
doc = doc.replace(src, dst)
doc = headerOnce.pattern.sub(headerOnce, doc, count=1)
for func in FUNCS:
doc = func.pattern.sub(func, doc)
doc = hrefSub(doc, linksPath)
sys.stdout.write(doc)
while STACK:
print(STACK.pop())
def rxc(r):
return re.compile(r, re.DOTALL)
def headerOnce(m):
data = yaml.load(m.group(1))
if 'title' not in data:
return ''
title = untex(data['title'])
label = data['permalink'].strip('/').split('/')[1]
result = r'\chapter{' + title + r'}\label{s:' + label + '}\n'
if 'objectives' in data:
result += '\\begin{objectives}\n'
result += '\n'.join(r'\item ' + untex(obj) + '\n' for obj in data['objectives'])
result += '\\end{objectives}\n'
return result
headerOnce.pattern = rxc(r'\\begin{verbatim}(.+?)\\end{verbatim}')
def appref(m):
return r'\appref{' + m.group(1) + '}'
appref.pattern = rxc(r'\\protect\\hyperlink{APPENDIX}{(.+?)}')
def chapref(m):
return r'\chapref{' + m.group(1) + '}'
chapref.pattern = rxc(r'\\protect\\hyperlink{CHAPTER}{(.+?)}')
def cite(m, subPat = re.compile(r'\\protect\\hyperlink{CITE}{(.+?)}')):
cites = subPat.findall(m.group(1))
result = r'\cite{' + ','.join(cites) + '}'
return result
cite.pattern = rxc(r'{\[}(\\protect\\hyperlink{CITE}{.+?,?}+?){\]}')
def figref(m):
return r'\figref{' + m.group(1) + '}'
figref.pattern = rxc(r'\\protect\\hyperlink{FIGURE}{(f:.+?)}')
def figure(m):
body = m.group(1)
src = figure.src.search(body).group(1).replace('.svg', '.pdf')
title = figure.title.search(body).group(1)
ident = figure.ident.search(body).group(1)
return figure.template.format(src, title, ident)
figure.pattern = rxc(r'\\begin{verbatim}\s*FIGURE\s*(.+?)\\end{verbatim}')
figure.src = re.compile('src\s*=\s*"(.+?)"')
figure.title = re.compile('title\s*=\s*"(.+?)"')
figure.ident = re.compile('ident\s*=\s*"(.+?)"')
figure.template = '''
\\begin{{figure}}
\\centering
\\includegraphics{{{}}}
\\caption{{{}}}
\\label{{{}}}
\\end{{figure}}
'''
def glossdef(m):
prefix = ''
if glossdef.first:
prefix = '\\begin{description}\n'
STACK.append(r'\end{description}')
glossdef.first = False
return prefix + r'\glossdef{' + m.group(2) + '}{' + m.group(1) + '}'
glossdef.pattern = rxc(r'\\textbf{(.+?)}\\{:\\#(g:.+?)\\}')
glossdef.first = True
def glossref(m):
return r'\glossref{' + m.group(1) + '}{' + m.group(2) + '}'
glossref.pattern = rxc(r'\\protect\\hyperlink{(g:.+?)}{(.+?)}')
def hyperlink(m):
return r'\href{' + m.group(2) + '}{' + m.group(1) + '}'
hyperlink.pattern = rxc(r'{\[}([^}]+?){\]}{\[}(.+?){\]}')
def secref(m):
return r'\secref{' + m.group(1) + '}'
secref.pattern = rxc(r'\\protect\\hyperlink{SECTION}{(.+?)}')
def section(m):
title = untex(m.group(1))
label = m.group(2)
if label.startswith('s:'):
result = r'\section{' + title + r'}\label{' + label + '}'
else:
result = r'\section*{' + title + r'}'
return result
section.pattern = rxc(r'\\subsection{(.+?)}\\label{(.+?)}')
def subsection(m):
title = untex(m.group(1))
label = m.group(2)
if label.startswith('s:'):
result = r'\subsection{' + title + r'}\label{' + label + '}'
else:
result = r'\subsection*{' + title + r'}'
return result
subsection.pattern = rxc(r'\\subsubsection{(.+?)}\\label{(.+?)}')
FUNCS = [appref, chapref, cite, figref, figure, glossdef, glossref,
hyperlink, secref, section, subsection]
def hrefSub(doc, linksPath):
links = readLinks(linksPath)
def replaceUse(m):
if m.group(1) in links:
result = r'\href{' + links[m.group(1)] + '}{' + m.group(2) + '}'
else:
result = m.group(0)
return result
uses = rxc(r'\\href{(.+?)}{(.+?)}')
doc = uses.sub(replaceUse, doc)
return doc
def readLinks(linksPath):
with open(linksPath, 'r') as reader:
defs = re.compile(r'\[(.+?)\]\s*:\s*(.+)')
links = [defs.search(line) for line in reader.readlines()]
links = dict([(m.group(1), untex(m.group(2))) for m in links if m])
return links
def untex(x):
for (src, dst) in UNTEX:
x = x.replace(src, dst)
return x
if __name__ == '__main__':
if len(sys.argv) != 2:
sys.stderr.write('Usage: texpost.py /path/to/links.md < src > dst\n')
sys.exit(1)
main(sys.argv[1])
| 27.320442
| 88
| 0.554702
|
4a1a37eb06e45664dfa9683d7f7f383e5444098f
| 613
|
py
|
Python
|
sevennotes/helpers/decoraters.py
|
sathaaaaaa/SevenNotesMusic
|
917f718eaab8a4b6a23aaaadfc3e61071fb24d6b
|
[
"MIT"
] | null | null | null |
sevennotes/helpers/decoraters.py
|
sathaaaaaa/SevenNotesMusic
|
917f718eaab8a4b6a23aaaadfc3e61071fb24d6b
|
[
"MIT"
] | null | null | null |
sevennotes/helpers/decoraters.py
|
sathaaaaaa/SevenNotesMusic
|
917f718eaab8a4b6a23aaaadfc3e61071fb24d6b
|
[
"MIT"
] | null | null | null |
from pyrogram import Client, filters
from pyrogram.types import Message
def admin_check(function):
async def wrapper(c, m, *args, **kwargs):
chat = m.chat.id
user = m.from_user.id
admins = await c.get_chat_members(
chat_id=chat,
filter="administrators",
)
admin_ids = []
for admin in admins:
if admin.can_manage_voice_chats:
admin_ids.append(admin.user.id)
for i in admin_ids:
if i == user:
return await function(c, m, *args, **kwargs)
else:
await m.reply_text("Sorry! You don't have enough permissions!!")
return wrapper
| 27.863636
| 72
| 0.644372
|
4a1a38a844ca6130ad47a54e6dd18d84232012f9
| 3,861
|
py
|
Python
|
stwfsapy/tests/thesaurus_features_test.py
|
mo-fu/stwfsapy
|
dd47c15e5b1b5422fd4ce6fe63ceb3e25ef15322
|
[
"Apache-2.0"
] | null | null | null |
stwfsapy/tests/thesaurus_features_test.py
|
mo-fu/stwfsapy
|
dd47c15e5b1b5422fd4ce6fe63ceb3e25ef15322
|
[
"Apache-2.0"
] | null | null | null |
stwfsapy/tests/thesaurus_features_test.py
|
mo-fu/stwfsapy
|
dd47c15e5b1b5422fd4ce6fe63ceb3e25ef15322
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Leibniz Information Centre for Economics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numpy import array
from rdflib.namespace import SKOS
from stwfsapy import thesaurus as t
from stwfsapy import thesaurus_features as tf
from stwfsapy.tests.thesaurus import common as tc
from stwfsapy.tests import common as c
from scipy.sparse import coo_matrix, csr_matrix
from sklearn.exceptions import NotFittedError
import pytest
def test_collect_po_from_tuples():
tuples = [
(tc.concept_ref_printed, tc.concept_ref_media),
(tc.concept_ref_media, tc.thsys_ref_media),
(tc.concept_ref_printed, tc.thsys_ref_print)
]
po = tf._collect_po_from_tuples(tuples)
assert po == {
tc.concept_ref_printed: {tc.concept_ref_media, tc.thsys_ref_print},
tc.concept_ref_media: {tc.thsys_ref_media}
}
def test_unfitted_raises():
feat = tf.ThesaurusFeatureTransformation(None, None, None, None)
with pytest.raises(NotFittedError):
feat.transform([])
def test_transform():
trans = tf.ThesaurusFeatureTransformation(None, None, None, None)
trans.mapping_ = {
'a': coo_matrix([[1]]), 'b': coo_matrix([[2]]), 'c': coo_matrix([[3]])}
res = trans.transform(['c', 'c', 'a'])
assert (res.toarray() == array([[3], [3], [1]])).all()
def test_fit(full_graph):
concepts = set(t.extract_by_type_uri(
full_graph,
c.test_type_concept))
thesauri = set(t.extract_by_type_uri(
full_graph,
c.test_type_thesaurus))
trans = tf.ThesaurusFeatureTransformation(
full_graph,
concepts,
thesauri,
SKOS.broader)
trans.fit()
mapping = trans.mapping_
assert len(mapping) == len(c.test_concepts)
for x in mapping.values():
assert x.shape[1] == 6
# Can not test positions because retrieval from graph is not deterministic.
# Therefore, test non zero entries only.
assert mapping[c.test_concept_uri_0_0].getnnz() == 1
assert mapping[c.test_concept_uri_01_0].getnnz() == 2
assert mapping[c.test_concept_uri_01_00].getnnz() == 2
assert mapping[c.test_concept_uri_10_0].getnnz() == 2
assert mapping[c.test_concept_uri_10_1].getnnz() == 2
assert mapping[c.test_concept_uri_100_0].getnnz() == 3
assert mapping[c.test_concept_uri_100_00].getnnz() == 3
assert mapping[c.test_concept_uri_100_01].getnnz() == 3
assert mapping[c.test_concept_uri_100_02].getnnz() == 3
def test_transform_unknown():
trans = tf.ThesaurusFeatureTransformation(
None,
None,
None,
None,)
feature_dim = 12
trans.feature_dim_ = feature_dim
known = csr_matrix(([1], ([0], [4])), shape=(1, feature_dim))
trans.mapping_ = {'key': known}
random_results = trans.transform([
'some random stuff edsfysdfhjsedf',
'key'])
assert random_results.shape == (2, feature_dim)
assert random_results.getrow(0).getnnz() == 0
assert random_results.getrow(1).getnnz() == 1
def test_empty_relation(full_graph):
trans = tf.ThesaurusFeatureTransformation(
full_graph,
set(t.extract_by_type_uri(full_graph, c.test_type_concept)),
set(),
"",
)
trans.fit([], [])
features = trans.transform(['empty'])
assert features.shape == (1, 1)
assert features.getnnz() == 0
| 33.868421
| 79
| 0.688164
|
4a1a3931bb3b077c37511818c876506191bd41a0
| 90,104
|
py
|
Python
|
test/functional/p2p_segwit.py
|
Chellit/Chellit
|
7d804cfc64b4e91234b68f14b82f12c752eb6aae
|
[
"MIT"
] | 2
|
2021-02-01T08:29:18.000Z
|
2021-06-28T23:45:28.000Z
|
test/functional/p2p_segwit.py
|
Chellit/Chellit
|
7d804cfc64b4e91234b68f14b82f12c752eb6aae
|
[
"MIT"
] | null | null | null |
test/functional/p2p_segwit.py
|
Chellit/Chellit
|
7d804cfc64b4e91234b68f14b82f12c752eb6aae
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Copyright (c) 2017-2018 The Chellit Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
from test_framework.mininode import *
from test_framework.test_framework import ChellitTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import CECKey, CPubKey
import time
import random
from binascii import hexlify
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
# Calculate the virtual size of a witness block:
# (base + witness/4)
def get_virtual_size(witness_block):
base_size = len(witness_block.serialize())
total_size = len(witness_block.serialize(with_witness=True))
# the "+3" is so we round up
vsize = int((3*base_size + total_size + 3)/4)
return vsize
class TestNode(NodeConnCB):
def __init__(self):
super().__init__()
self.getdataset = set()
def on_getdata(self, conn, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
self.wait_for_getdata(timeout)
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [ CBlockHeader(block) ]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
def test_transaction_acceptance(self, tx, with_witness, accepted, reason=None):
tx_message = msg_tx(tx)
if with_witness:
tx_message = msg_witness_tx(tx)
self.send_message(tx_message)
self.sync_with_ping()
assert_equal(tx.hash in self.connection.rpc.getrawmempool(), accepted)
if (reason != None and not accepted):
# Check the rejection reason as well.
with mininode_lock:
assert_equal(self.last_message["reject"].reason, reason)
# Test whether a witness block had the correct effect on the tip
def test_witness_block(self, block, accepted, with_witness=True):
if with_witness:
self.send_message(msg_witness_block(block))
else:
self.send_message(msg_block(block))
self.sync_with_ping()
assert_equal(self.connection.rpc.getbestblockhash() == block.hash, accepted)
# Used to keep track of anyone-can-spend outputs that we can use in the tests
class UTXO():
def __init__(self, sha256, n, nValue):
self.sha256 = sha256
self.n = n
self.nValue = nValue
# Helper for getting the script associated with a P2PKH
def GetP2PKHScript(pubkeyhash):
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
# Add signature for a P2PK witness program.
def sign_P2PK_witness_input(script, txTo, inIdx, hashtype, value, key):
tx_hash = SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, value)
signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1')
txTo.wit.vtxinwit[inIdx].scriptWitness.stack = [signature, script]
txTo.rehash()
class SegWitTest(ChellitTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-whitelist=127.0.0.1"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0"], ["-whitelist=127.0.0.1"]]
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
''' Helpers '''
# Build a block on top of node0's tip.
def build_next_block(self, nVersion=4):
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = nVersion
block.rehash()
return block
# Adds list of transactions to block, adds witness commitment, then solves.
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
return
''' Individual tests '''
def test_witness_services(self):
self.log.info("Verifying NODE_WITNESS service bit")
assert((self.test_node.connection.nServices & NODE_WITNESS) != 0)
# See if sending a regular transaction works, and create a utxo
# to use in later tests.
def test_non_witness_transaction(self):
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
self.log.info("Testing non-witness transaction")
block = self.build_next_block(nVersion=1)
block.solve()
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert(tx.hash in self.nodes[0].getrawmempool())
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49*100000000))
self.nodes[0].generate(1)
# Verify that blocks with witnesses are rejected before activation.
def test_unnecessary_witness_before_segwit_activation(self):
self.log.info("Testing behavior of unnecessary witnesses")
# For now, rely on earlier tests to have created at least one utxo for
# us to use
assert(len(self.utxo) > 0)
assert(get_bip9_status(self.nodes[0], 'segwit')['status'] != 'active')
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert(tx.sha256 != tx.calc_sha256(with_witness=True))
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(nVersion=(VB_TOP_BITS|(1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
self.test_node.test_witness_block(block, accepted=False)
# TODO: fix synchronization so we can test reject reason
# Right now, chellitd delays sending reject messages for blocks
# until the future, making synchronization here difficult.
#assert_equal(self.test_node.last_message["reject"].reason, "unexpected-witness")
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
sync_blocks(self.nodes)
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey))
tx2.rehash()
self.test_node.test_transaction_acceptance(tx2, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness before
# segwit activation doesn't blind a node to a transaction. Transactions
# rejected for having a witness before segwit activation shouldn't be added
# to the rejection cache.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptPubKey))
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000]
tx3.rehash()
# Note that this should be rejected for the premature witness reason,
# rather than a policy check, since segwit hasn't activated yet.
self.std_node.test_transaction_acceptance(tx3, True, False, b'no-witness-yet')
# If we send without witness, it should be accepted.
self.std_node.test_transaction_acceptance(tx3, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program])))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, CScript([OP_TRUE])))
tx4.rehash()
self.test_node.test_transaction_acceptance(tx3, False, True)
self.test_node.test_transaction_acceptance(tx4, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx4.sha256, 0, tx4.vout[0].nValue))
# Mine enough blocks for segwit's vb state to be 'started'.
def advance_to_segwit_started(self):
height = self.nodes[0].getblockcount()
# Will need to rewrite the tests here if we are past the first period
assert(height < VB_PERIOD - 1)
# Genesis block is 'defined'.
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'defined')
# Advance to end of period, status should now be 'started'
self.nodes[0].generate(VB_PERIOD-height-1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Mine enough blocks to lock in segwit, but don't activate.
# TODO: we could verify that lockin only happens at the right threshold of
# signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_lockin(self):
height = self.nodes[0].getblockcount()
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Advance to end of period, and verify lock-in happens at the end
self.nodes[0].generate(VB_PERIOD-1)
height = self.nodes[0].getblockcount()
assert((height % VB_PERIOD) == VB_PERIOD - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
# Mine enough blocks to activate segwit.
# TODO: we could verify that activation only happens at the right threshold
# of signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_active(self):
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - (height%VB_PERIOD) - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
# This test can only be run after segwit has activated
def test_witness_commitments(self):
self.log.info("Testing witness commitments")
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
# This empty block should be valid.
self.test_node.test_witness_block(block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
# This should also be valid.
self.test_node.test_witness_block(block_2, accepted=True)
# Now test commitments with actual transactions
assert (len(self.utxo) > 0)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
self.test_node.test_witness_block(block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_block_malleability(self):
self.log.info("Testing witness block malleability")
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a'*5000000)
assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
# Now make sure that malleating the witness nonce doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(1) ]
self.test_node.test_witness_block(block, accepted=False)
# Changing the witness nonce doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(0) ]
self.test_node.test_witness_block(block, accepted=True)
def test_witness_block_size(self):
self.log.info("Testing witness block size limit")
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p_fullblock.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert(len(self.utxo) > 0)
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP]*NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
scriptPubKey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value/NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, scriptPubKey))
parent_tx.vout[0].nValue -= 50000
assert(parent_tx.vout[0].nValue > 0)
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a'*195]*(2*NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize)*4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes+1, 55)
block.vtx[-1].wit.vtxinwit[int(i/(2*NUM_DROPS))].scriptWitness.stack[i%(2*NUM_DROPS)] = b'a'*(195+extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert(len(block.serialize(True)) > 2*1024*1024)
self.test_node.test_witness_block(block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(cur_length-1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
self.test_node.test_witness_block(block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
# submitblock will try to add the nonce automatically, so that mining
# software doesn't need to worry about doing so itself.
def test_submit_block(self):
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
# Now redo commitment with the standard nonce, but let chellitd fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
# Tip should not advance!
assert(self.nodes[0].getbestblockhash() != block_2.hash)
# Consensus tests of extra witness data in a transaction.
def test_extra_witness_data(self):
self.log.info("Testing extra witness data in tx")
assert(len(self.utxo) > 0)
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-2000, scriptPubKey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
self.test_node.test_witness_block(block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [ CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program ]
tx2.wit.vtxinwit[1].scriptWitness.stack = [ CScript([OP_TRUE]) ]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_push_length(self):
''' Should only allow up to 520 byte pushes in witness stack '''
self.log.info("Testing maximum witness push size")
MAX_SCRIPT_ELEMENT_SIZE = 520
assert(len(self.utxo))
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [ b'a'*(MAX_SCRIPT_ELEMENT_SIZE+1), witness_program ]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_program_length(self):
# Can create witness outputs that are long, but can't be greater than
# 10k bytes to successfully spend
self.log.info("Testing maximum witness program length")
assert(len(self.utxo))
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a'*520]*19 + [OP_DROP]*63 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH+1)
long_witness_hash = sha256(long_witness_program)
long_scriptPubKey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, long_scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a'*520]*19 + [OP_DROP]*62 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, scriptPubKey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_input_length(self):
''' Ensure that vin length must match vtxinwit length '''
self.log.info("Testing witness input length")
assert(len(self.utxo))
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
nValue = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(nValue/10), scriptPubKey))
tx.vout[0].nValue -= 1000
assert(tx.vout[0].nValue >= 0)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(nValue-3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [ witness_program ]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_tx_relay_before_segwit_activation(self):
self.log.info("Testing relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert(self.old_node.last_message["getdata"].inv[0].type == 1)
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
try:
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2)
self.log.error("Error: duplicate tx getdata!")
assert(False)
except AssertionError as e:
pass
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.old_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
# After segwit activates, verify that mempool:
# - rejects transactions with unnecessary/extra witnesses
# - accepts transactions with valid witnesses
# and that witness transactions are relayed to non-upgraded peers.
def test_tx_relay_after_segwit_activation(self):
self.log.info("Testing relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a'*400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue-1000, CScript([OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program ]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
vsize = (len(tx3.serialize_with_witness()) + 3*len(tx3.serialize_without_witness()) + 3) / 4
assert_equal(raw_tx["vsize"], vsize)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
assert(vsize != raw_tx["size"])
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
# Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG
# This is true regardless of segwit activation.
# Also test that we don't ask for blocks from unupgraded peers
def test_block_relay(self, segwit_activated):
self.log.info("Testing block relay")
blocktype = 2|MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
self.test_node.test_witness_block(block1, True)
block2 = self.build_next_block(nVersion=4)
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
self.test_node.test_witness_block(block2, True)
block3 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
self.test_node.test_witness_block(block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if segwit_activated == False:
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height+1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2|MSG_WITNESS_FLAG)
assert_equal(block.serialize(True), wit_block.serialize(True))
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
self.test_node.test_witness_block(block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2|MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3*len(block.serialize(False)) + len(block.serialize(True))
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(nVersion=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [ CBlockHeader(block4) ]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
# V0 segwit outputs should be standard after activation, but not before.
def test_standardness_v0(self, segwit_activated):
self.log.info("Testing standardness of v0 outputs (%s activation)" % ("after" if segwit_activated else "before"))
assert(len(self.utxo))
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue-1000, p2sh_scriptPubKey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
self.test_node.test_transaction_acceptance(p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue-10000, scriptPubKey)]
tx.vout.append(CTxOut(8000, scriptPubKey)) # Might burn this later
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=segwit_activated)
# Now create something that looks like a P2PKH output. This won't be spendable.
scriptPubKey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
if segwit_activated:
# if tx was accepted, then we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(7000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
else:
# if tx wasn't accepted, we just re-spend the p2sh output we started with.
tx2.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, scriptPubKey)]
tx2.rehash()
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=segwit_activated)
# Now update self.utxo for later tests.
tx3 = CTransaction()
if segwit_activated:
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
else:
# tx and tx2 didn't go anywhere; just clean up the p2sh_tx output.
tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, witness_program)]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Verify that future segwit upgraded transactions are non-standard,
# but valid in blocks. Can run this before and after segwit activation.
def test_segwit_versions(self):
self.log.info("Testing standardness/consensus for segwit versions (0-16)")
assert(len(self.utxo))
NUM_TESTS = 17 # will test OP_0, OP1, ..., OP_16
if (len(self.utxo) < NUM_TESTS):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_TESTS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
sync_blocks(self.nodes)
temp_utxo = []
tx = CTransaction()
count = 0
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16+1)) + [OP_0]:
count += 1
# First try to spend to a future version segwit scriptPubKey.
scriptPubKey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue-1000, scriptPubKey)]
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
sync_blocks(self.nodes)
assert(len(self.nodes[0].getrawmempool()) == 0)
# Finally, verify that version 0 -> version 1 transactions
# are non-standard
scriptPubKey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue-1000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
tx2.rehash()
# Gets accepted to test_node, because standardness of outputs isn't
# checked with fRequireStandard
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
self.test_node.sync_with_ping()
with mininode_lock:
assert(b"reserved for soft-fork upgrades" in self.test_node.last_message["reject"].reason)
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_premature_coinbase_witness_spend(self):
self.log.info("Testing premature coinbase witness spend")
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = scriptPubKey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=True)
sync_blocks(self.nodes)
def test_signature_version_1(self):
self.log.info("Testing segwit signature hash version 1")
key = CECKey()
key.set_secretbytes(b"9")
pubkey = CPubKey(key.get_pubkey())
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [ 0, SIGHASH_ANYONECANPAY ]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue+1, key)
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Too-small input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue-1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Now try correct value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert(len(temp_utxos) > num_inputs)
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_P2PK_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, scriptPKH))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests. Just spend everything in
# temp_utxos to a corresponding entry in self.utxos
tx = CTransaction()
index = 0
for i in temp_utxos:
# Just spend to our usual anyone-can-spend output
# Use SIGHASH_SINGLE|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.vout.append(CTxOut(i.nValue, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, index, SIGHASH_SINGLE|SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
# Test P2SH wrapped witness programs.
def test_p2sh_witness(self, segwit_activated):
self.log.info("Testing P2SH witness transactions")
assert(len(self.utxo))
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
scriptPubKey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
# Verify mempool acceptance and block validity
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True, with_witness=segwit_activated)
sync_blocks(self.nodes)
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), scriptSig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older chellitd's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = scriptSig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a', witness_program ]
# Verify mempool acceptance
self.test_node.test_transaction_acceptance(spend_tx, with_witness=True, accepted=segwit_activated)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're before activation, then sending this without witnesses
# should be valid. If we're after activation, then sending this with
# witnesses should be valid.
if segwit_activated:
self.test_node.test_witness_block(block, accepted=True)
else:
self.test_node.test_witness_block(block, accepted=True, with_witness=False)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
# Test the behavior of starting up a segwit-aware node after the softfork
# has activated. As segwit requires different block data than pre-segwit
# nodes would have stored, this requires special handling.
# To enable this test, pass --oldbinary=<path-to-pre-segwit-chellitd> to
# the test.
def test_upgrade_after_activation(self, node_id):
self.log.info("Testing software upgrade after softfork activation")
assert(node_id != 0) # node0 is assumed to be a segwit-active chellitd
# Make sure the nodes are all up
sync_blocks(self.nodes)
# Restart with the new binary
self.stop_node(node_id)
self.start_node(node_id, extra_args=[])
connect_nodes(self.nodes[0], node_id)
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(self.nodes[node_id], 'segwit')['status'] == "active")
# Make sure this peers blocks match those of node0.
height = self.nodes[node_id].getblockcount()
while height >= 0:
block_hash = self.nodes[node_id].getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), self.nodes[node_id].getblock(block_hash))
height -= 1
def test_witness_sigops(self):
'''Ensure sigop counting is correct inside witnesses.'''
self.log.info("Testing sigops limit")
assert(len(self.utxo))
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG]*5 + [OP_CHECKSIG]*193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
sigops_per_script = 20*5 + 193*1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
scriptPubKey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
scriptPubKey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.vout[-2].scriptPubKey = scriptPubKey_toomany
tx.vout[-1].scriptPubKey = scriptPubKey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
self.test_node.test_witness_block(block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs-1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program ]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_toomany ]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
self.test_node.test_witness_block(block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
scriptPubKey_checksigs = CScript([OP_CHECKSIG]*checksig_count)
tx2.vout.append(CTxOut(0, scriptPubKey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
self.test_node.test_witness_block(block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG]*(checksig_count-1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
self.test_node.test_witness_block(block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs-1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_justright ]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
self.test_node.test_witness_block(block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_getblocktemplate_before_lockin(self):
self.log.info("Testing getblocktemplate setting of segwit versionbit (before lockin)")
# Node0 is segwit aware, node2 is not.
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate()
block_version = gbt_results['version']
# If we're not indicating segwit support, we will still be
# signalling for segwit activation.
assert_equal((block_version & (1 << VB_WITNESS_BIT) != 0), node == self.nodes[0])
# If we don't specify the segwit rule, then we won't get a default
# commitment.
assert('default_witness_commitment' not in gbt_results)
# Workaround:
# Can either change the tip, or change the mempool and wait 5 seconds
# to trigger a recomputation of getblocktemplate.
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
# Using mocktime lets us avoid sleep()
sync_mempools(self.nodes)
self.nodes[0].setmocktime(int(time.time())+10)
self.nodes[2].setmocktime(int(time.time())+10)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules" : ["segwit"]})
block_version = gbt_results['version']
if node == self.nodes[2]:
# If this is a non-segwit node, we should still not get a witness
# commitment, nor a version bit signalling segwit.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
else:
# For segwit-aware nodes, check the version bit and the witness
# commitment are correct.
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
assert('default_witness_commitment' in gbt_results)
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, bytes_to_hex_str(script))
# undo mocktime
self.nodes[0].setmocktime(0)
self.nodes[2].setmocktime(0)
# Uncompressed pubkeys are no longer supported in default relay policy,
# but (for now) are still valid in blocks.
def test_uncompressed_pubkey(self):
self.log.info("Testing uncompressed pubkeys")
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = CECKey()
key.set_secretbytes(b"9")
key.set_compressed(False)
pubkey = CPubKey(key.get_pubkey())
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
assert(len(self.utxo) > 0)
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue-1000, scriptPKH))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptWSH = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptWSH))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ signature, pubkey ]
tx2.rehash()
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(scriptWSH)
scriptP2SH = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([scriptWSH])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptP2SH))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
self.test_node.test_witness_block(block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
scriptPubKey = GetP2PKHScript(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), scriptSig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, scriptPubKey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
self.test_node.test_witness_block(block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue-1000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(scriptPubKey, tx5, 0, SIGHASH_ALL)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
self.test_node.test_transaction_acceptance(tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
def test_non_standard_witness(self):
self.log.info("Testing detection of non-standard P2WSH witness")
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid,i*2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid,i*2+1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2wsh_txs[0], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2wsh_txs[3], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2sh_txs[0], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2sh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2sh_txs[3], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode() # sets NODE_WITNESS|NODE_NETWORK
self.old_node = TestNode() # only NODE_NETWORK
self.std_node = TestNode() # for testing node1 (fRequireStandard=true)
self.p2p_connections = [self.test_node, self.old_node]
self.connections = []
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node, services=NODE_NETWORK|NODE_WITNESS))
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.old_node, services=NODE_NETWORK))
self.connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.std_node, services=NODE_NETWORK|NODE_WITNESS))
self.test_node.add_connection(self.connections[0])
self.old_node.add_connection(self.connections[1])
self.std_node.add_connection(self.connections[2])
NetworkThread().start() # Start up network handling in another thread
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
# Test logic begins here
self.test_node.wait_for_verack()
self.log.info("Starting tests before segwit lock in:")
self.test_witness_services() # Verifies NODE_WITNESS
self.test_non_witness_transaction() # non-witness tx's are accepted
self.test_unnecessary_witness_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
# Advance to segwit being 'started'
self.advance_to_segwit_started()
sync_blocks(self.nodes)
self.test_getblocktemplate_before_lockin()
sync_blocks(self.nodes)
# At lockin, nothing should change.
self.log.info("Testing behavior post lockin, pre-activation")
self.advance_to_segwit_lockin()
# Retest unnecessary witnesses
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
self.test_p2sh_witness(segwit_activated=False)
self.test_standardness_v0(segwit_activated=False)
sync_blocks(self.nodes)
# Now activate segwit
self.log.info("Testing behavior after segwit activation")
self.advance_to_segwit_active()
sync_blocks(self.nodes)
# Test P2SH witness handling again
self.test_p2sh_witness(segwit_activated=True)
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay(segwit_activated=True)
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0(segwit_activated=True)
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness()
sync_blocks(self.nodes)
self.test_upgrade_after_activation(node_id=2)
self.test_witness_sigops()
if __name__ == '__main__':
SegWitTest().main()
| 46.183496
| 143
| 0.658162
|
4a1a3c31cf1ac5aeccf2c305684f5b1eaf09db96
| 13,937
|
py
|
Python
|
Code/predict_tensorflow.py
|
yolsever/ML-in-equity-prediction
|
48aec0e1d26f7f715461cbcb76d78ab527ec096c
|
[
"MIT"
] | 31
|
2019-10-21T20:48:38.000Z
|
2022-03-26T02:52:34.000Z
|
Code/predict_tensorflow.py
|
yolsever/ML-in-equity-prediction
|
48aec0e1d26f7f715461cbcb76d78ab527ec096c
|
[
"MIT"
] | 1
|
2019-05-26T14:16:37.000Z
|
2021-06-20T14:44:28.000Z
|
Code/predict_tensorflow.py
|
yolsever/ML-in-equity-prediction
|
48aec0e1d26f7f715461cbcb76d78ab527ec096c
|
[
"MIT"
] | 11
|
2020-02-14T08:58:20.000Z
|
2022-03-21T15:21:40.000Z
|
# test_data = mid_prices[11000:]
vals = stock.values
n_train = floor(vals.shape[0]*0.7)
n_val = floor(n_train + vals.shape[0]*0.15)
vals[:n_val,:] = scaler.fit_transform(vals[:n_val,:])
vals[n_val:,:] = scaler.transform(vals[n_val:,:])
train = vals[:n_train,:]
valid = vals[n_train:n_val,:]
test = vals[n_val:,:]
[n,d] = train.shape
# Scale the data to be between 0 and 1
# When scaling remember! You normalize both test and train data w.r.t training data
# Because you are not supposed to have access to test data
scaler = MinMaxScaler()
train_data = train_data.reshape(-1,1)
test_data = test_data.reshape(-1,1)
# Train the Scaler with training data and smooth data
smoothing_window_size = 20 # 2500
for di in range(0,n-n%smoothing_window_size,smoothing_window_size): # FIXX!!
scaler.fit(train_data[di:di+smoothing_window_size,:])
train_data[di:di+smoothing_window_size,:] = scaler.transform(train_data[di:di+smoothing_window_size,:])
# You normalize the last bit of remaining data
scaler.fit(train_data[di+smoothing_window_size:,:])
train_data[di+smoothing_window_size:,:] = scaler.transform(train_data[di+smoothing_window_size:,:])
# Reshape both train and test data
train_data = train_data.reshape(-1)
# Normalize test data
test_data = scaler.transform(test_data).reshape(-1)
# Now perform exponential moving average smoothing
# So the data will have a smoother curve than the original ragged data
EMA = 0.0
gamma = 0.1
for ti in range(11000):
EMA = gamma*train_data[ti] + (1-gamma)*EMA
train_data[ti] = EMA
# Used for visualization and test purposes
all_mid_data = np.concatenate([train_data,test_data],axis=0)
D = d # Dimensionality of the data. Since our data is 1-D this would be 1
num_unrollings = 20 # Number of time steps you look into the future.
batch_size = 30 # Number of samples in a batch
num_nodes = [200,200,150] # Number of hidden nodes in each layer of the deep LSTM stack we're using
n_layers = len(num_nodes) # number of layers
dropout = 0.2 # dropout amount
tf.reset_default_graph() # This is important in case you run this multiple times
# Input data.
train_inputs, train_outputs = [],[]
# You unroll the input over time defining placeholders for each time step
for ui in range(num_unrollings):
train_inputs.append(tf.placeholder(tf.float32, shape=[batch_size,D],name='train_inputs_%d'%ui))
train_outputs.append(tf.placeholder(tf.float32, shape=[batch_size,1], name = 'train_outputs_%d'%ui))
lstm_cells = [
tf.contrib.rnn.LSTMCell(num_units=num_nodes[li],
state_is_tuple=True,
initializer= tf.contrib.layers.xavier_initializer()
)
for li in range(n_layers)]
drop_lstm_cells = [tf.contrib.rnn.DropoutWrapper(
lstm, input_keep_prob=1.0,output_keep_prob=1.0-dropout, state_keep_prob=1.0-dropout
) for lstm in lstm_cells]
drop_multi_cell = tf.contrib.rnn.MultiRNNCell(drop_lstm_cells)
multi_cell = tf.contrib.rnn.MultiRNNCell(lstm_cells)
w = tf.get_variable('w',shape=[num_nodes[-1], 1], initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable('b',initializer=tf.random_uniform([1],-0.1,0.1))
# Create cell state and hidden state variables to maintain the state of the LSTM
c, h = [],[]
initial_state = []
for li in range(n_layers):
c.append(tf.Variable(tf.zeros([batch_size, num_nodes[li]]), trainable=False))
h.append(tf.Variable(tf.zeros([batch_size, num_nodes[li]]), trainable=False))
initial_state.append(tf.contrib.rnn.LSTMStateTuple(c[li], h[li]))
# Do several tensor transofmations, because the function dynamic_rnn requires the output to be of
# a specific format. Read more at: https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn
all_inputs = tf.concat([tf.expand_dims(t,0) for t in train_inputs],axis=0)
# all_outputs is [seq_length, batch_size, num_nodes]
all_lstm_outputs, state = tf.nn.dynamic_rnn(
drop_multi_cell, all_inputs, initial_state=tuple(initial_state),
time_major = True, dtype=tf.float32)
all_lstm_outputs = tf.reshape(all_lstm_outputs, [batch_size*num_unrollings,num_nodes[-1]])
all_outputs = tf.nn.xw_plus_b(all_lstm_outputs,w,b)
split_outputs = tf.split(all_outputs,num_unrollings,axis=0)
# When calculating the loss you need to be careful about the exact form, because you calculate
# loss of all the unrolled steps at the same time
# Therefore, take the mean error or each batch and get the sum of that over all the unrolled steps
``
print('Defining training Loss')
loss = 0.0
with tf.control_dependencies([tf.assign(c[li], state[li][0]) for li in range(n_layers)]+
[tf.assign(h[li], state[li][1]) for li in range(n_layers)]):
for ui in range(num_unrollings):
loss += tf.reduce_mean(0.5*(split_outputs[ui]-train_outputs[ui])**2)
print('Learning rate decay operations')
global_step = tf.Variable(0, trainable=False)
inc_gstep = tf.assign(global_step,global_step + 1)
tf_learning_rate = tf.placeholder(shape=None,dtype=tf.float32)
tf_min_learning_rate = tf.placeholder(shape=None,dtype=tf.float32)
learning_rate = tf.maximum(
tf.train.exponential_decay(tf_learning_rate, global_step, decay_steps=1, decay_rate=0.5, staircase=True),
tf_min_learning_rate)
# Optimizer.
print('TF Optimization operations')
optimizer = tf.train.AdamOptimizer(learning_rate)
gradients, v = zip(*optimizer.compute_gradients(loss))
gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
optimizer = optimizer.apply_gradients(zip(gradients, v))
print('\tAll done')
print('Defining prediction related TF functions')
sample_inputs = tf.placeholder(tf.float32, shape=[1,D])
# Maintaining LSTM state for prediction stage
sample_c, sample_h, initial_sample_state = [],[],[]
for li in range(n_layers):
sample_c.append(tf.Variable(tf.zeros([1, num_nodes[li]]), trainable=False))
sample_h.append(tf.Variable(tf.zeros([1, num_nodes[li]]), trainable=False))
initial_sample_state.append(tf.contrib.rnn.LSTMStateTuple(sample_c[li],sample_h[li]))
reset_sample_states = tf.group(*[tf.assign(sample_c[li],tf.zeros([1, num_nodes[li]])) for li in range(n_layers)],
*[tf.assign(sample_h[li],tf.zeros([1, num_nodes[li]])) for li in range(n_layers)])
sample_outputs, sample_state = tf.nn.dynamic_rnn(multi_cell, tf.expand_dims(sample_inputs,0),
initial_state=tuple(initial_sample_state),
time_major = True,
dtype=tf.float32)
with tf.control_dependencies([tf.assign(sample_c[li],sample_state[li][0]) for li in range(n_layers)]+
[tf.assign(sample_h[li],sample_state[li][1]) for li in range(n_layers)]):
sample_prediction = tf.nn.xw_plus_b(tf.reshape(sample_outputs,[1,-1]), w, b)
print('\tAll done')
class DataGeneratorSeq(object):
def __init__(self,prices,batch_size,num_unroll):
self._prices = prices
self._prices_length = len(self._prices) - num_unroll
self._batch_size = batch_size
self._num_unroll = num_unroll
self._segments = self._prices_length //self._batch_size
self._cursor = [offset * self._segments for offset in range(self._batch_size)]
def next_batch(self):
batch_data = np.zeros((self._batch_size),dtype=np.float32)
batch_labels = np.zeros((self._batch_size),dtype=np.float32)
for b in range(self._batch_size):
if self._cursor[b]+1>=self._prices_length:
#self._cursor[b] = b * self._segments
self._cursor[b] = np.random.randint(0,(b+1)*self._segments)
batch_data[b] = self._prices[self._cursor[b]]
batch_labels[b]= self._prices[self._cursor[b]+np.random.randint(0,5)]
self._cursor[b] = (self._cursor[b]+1)%self._prices_length
return batch_data,batch_labels
def unroll_batches(self):
unroll_data,unroll_labels = [],[]
init_data, init_label = None,None
for ui in range(self._num_unroll):
data, labels = self.next_batch()
unroll_data.append(data)
unroll_labels.append(labels)
return unroll_data, unroll_labels
def reset_indices(self):
for b in range(self._batch_size):
self._cursor[b] = np.random.randint(0,min((b+1)*self._segments,self._prices_length-1))
dg = DataGeneratorSeq(train_data,5,5)
u_data, u_labels = dg.unroll_batches()
for ui,(dat,lbl) in enumerate(zip(u_data,u_labels)):
print('\n\nUnrolled index %d'%ui)
dat_ind = dat
lbl_ind = lbl
print('\tInputs: ',dat )
print('\n\tOutput:',lbl)
epochs = 30
valid_summary = 1 # Interval you make test predictions
n_predict_once = 50 # Number of steps you continously predict for
train_seq_length = train_data.size # Full length of the training data
train_mse_ot = [] # Accumulate Train losses
test_mse_ot = [] # Accumulate Test loss
predictions_over_time = [] # Accumulate predictions
session = tf.InteractiveSession()
tf.global_variables_initializer().run()
# Used for decaying learning rate
loss_nondecrease_count = 0
loss_nondecrease_threshold = 2 # If the test error hasn't increased in this many steps, decrease learning rate
print('Initialized')
average_loss = 0
# Define data generator
data_gen = DataGeneratorSeq(train_data,batch_size,num_unrollings)
x_axis_seq = []
# Points you start our test predictions from
test_points_seq = np.arange(11000,12000,50).tolist()
for ep in range(epochs):
# ========================= Training =====================================
for step in range(train_seq_length//batch_size):
u_data, u_labels = data_gen.unroll_batches()
feed_dict = {}
for ui,(dat,lbl) in enumerate(zip(u_data,u_labels)):
feed_dict[train_inputs[ui]] = dat.reshape(-1,1)
feed_dict[train_outputs[ui]] = lbl.reshape(-1,1)
feed_dict.update({tf_learning_rate: 0.0001, tf_min_learning_rate:0.000001})
_, l = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += l
# ============================ Validation ==============================
if (ep+1) % valid_summary == 0:
average_loss = average_loss/(valid_summary*(train_seq_length//batch_size))
# The average loss
if (ep+1)%valid_summary==0:
print('Average loss at step %d: %f' % (ep+1, average_loss))
train_mse_ot.append(average_loss)
average_loss = 0 # reset loss
predictions_seq = []
mse_test_loss_seq = []
# ===================== Updating State and Making Predicitons ========================
for w_i in test_points_seq:
mse_test_loss = 0.0
our_predictions = []
if (ep+1)-valid_summary==0:
# Only calculate x_axis values in the first validation epoch
x_axis=[]
# Feed in the recent past behavior of stock prices
# to make predictions from that point onwards
for tr_i in range(w_i-num_unrollings+1,w_i-1):
current_price = all_mid_data[tr_i]
feed_dict[sample_inputs] = np.array(current_price).reshape(1,1)
_ = session.run(sample_prediction,feed_dict=feed_dict)
feed_dict = {}
current_price = all_mid_data[w_i-1]
feed_dict[sample_inputs] = np.array(current_price).reshape(1,1)
# Make predictions for this many steps
# Each prediction uses previous prediciton as it's current input
for pred_i in range(n_predict_once):
pred = session.run(sample_prediction,feed_dict=feed_dict)
our_predictions.append(np.asscalar(pred))
feed_dict[sample_inputs] = np.asarray(pred).reshape(-1,1)
if (ep+1)-valid_summary==0:
# Only calculate x_axis values in the first validation epoch
x_axis.append(w_i+pred_i)
mse_test_loss += 0.5*(pred-all_mid_data[w_i+pred_i])**2
session.run(reset_sample_states)
predictions_seq.append(np.array(our_predictions))
mse_test_loss /= n_predict_once
mse_test_loss_seq.append(mse_test_loss)
if (ep+1)-valid_summary==0:
x_axis_seq.append(x_axis)
current_test_mse = np.mean(mse_test_loss_seq)
# Learning rate decay logic
if len(test_mse_ot)>0 and current_test_mse > min(test_mse_ot):
loss_nondecrease_count += 1
else:
loss_nondecrease_count = 0
if loss_nondecrease_count > loss_nondecrease_threshold :
session.run(inc_gstep)
loss_nondecrease_count = 0
print('\tDecreasing learning rate by 0.5')
test_mse_ot.append(current_test_mse)
print('\tTest MSE: %.5f'%np.mean(mse_test_loss_seq))
predictions_over_time.append(predictions_seq)
print('\tFinished Predictions')
best_prediction_epoch = 16 # replace this with the epoch that you got the best results when running the plotting code
plt.figure(figsize = (18,18))
plt.subplot(2,1,1)
plt.plot(range(df.shape[0]),all_mid_data,color='b')
# Plotting how the predictions change over time
# Plot older predictions with low alpha and newer predictions with high alpha
start_alpha = 0.25
alpha = np.arange(start_alpha,1.1,(1.0-start_alpha)/len(predictions_over_time[::3]))
for p_i,p in enumerate(predictions_over_time[::3]):
for xval,yval in zip(x_axis_seq,p):
plt.plot(xval,yval,color='r',alpha=alpha[p_i])
plt.title('Evolution of Test Predictions Over Time',fontsize=18)
plt.xlabel('Date',fontsize=18)
plt.ylabel('Mid Price',fontsize=18)
plt.xlim(11000,12500)
plt.subplot(2,1,2)
# Predicting the best test prediction you got
plt.plot(range(df.shape[0]),all_mid_data,color='b')
for xval,yval in zip(x_axis_seq,predictions_over_time[best_prediction_epoch]):
plt.plot(xval,yval,color='r')
plt.title('Best Test Predictions Over Time',fontsize=18)
plt.xlabel('Date',fontsize=18)
plt.ylabel('Mid Price',fontsize=18)
plt.xlim(11000,12500)
plt.show()
| 37.165333
| 121
| 0.695272
|
4a1a3c645379c8c98ed9e7f98e3f57571c58250b
| 80,089
|
py
|
Python
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/vport.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 20
|
2019-05-07T01:59:14.000Z
|
2022-02-11T05:24:47.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/vport.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 60
|
2019-04-03T18:59:35.000Z
|
2022-02-22T12:05:05.000Z
|
ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/vport.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 13
|
2019-05-20T10:48:31.000Z
|
2021-10-06T07:45:44.000Z
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
from typing import List, Any, Union
class Vport(Base):
"""This is the virtual port hierarchy, which is used to configure IxNetwork.
The Vport class encapsulates a list of vport resources that are managed by the user.
A list of resources can be retrieved from the server using the Vport.find() method.
The list can be managed by using the Vport.add() and Vport.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'vport'
_SDM_ATT_MAP = {
'ActualSpeed': 'actualSpeed',
'AdminMode': 'adminMode',
'AssignedTo': 'assignedTo',
'AssignedToDisplayName': 'assignedToDisplayName',
'CaptureSupported': 'captureSupported',
'ConnectedTo': 'connectedTo',
'ConnectionInfo': 'connectionInfo',
'ConnectionState': 'connectionState',
'ConnectionStatus': 'connectionStatus',
'ConnectionStatusDisplayName': 'connectionStatusDisplayName',
'DpdkPerformanceAcceleration': 'dpdkPerformanceAcceleration',
'InternalId': 'internalId',
'IsAvailable': 'isAvailable',
'IsConnected': 'isConnected',
'IsFramePreemptionSupported': 'isFramePreemptionSupported',
'IsMapped': 'isMapped',
'IsPullOnly': 'isPullOnly',
'IsVMPort': 'isVMPort',
'IxnChassisVersion': 'ixnChassisVersion',
'IxnClientVersion': 'ixnClientVersion',
'IxosChassisVersion': 'ixosChassisVersion',
'Licenses': 'licenses',
'Location': 'location',
'Name': 'name',
'ResourceMode': 'resourceMode',
'RxMode': 'rxMode',
'State': 'state',
'StateDetail': 'stateDetail',
'TraceEnabled': 'traceEnabled',
'TraceLevel': 'traceLevel',
'TraceTag': 'traceTag',
'TransmitIgnoreLinkStatus': 'transmitIgnoreLinkStatus',
'TxGapControlMode': 'txGapControlMode',
'TxMode': 'txMode',
'Type': 'type',
'UseGlobalSettings': 'useGlobalSettings',
'ValidTxModes': 'validTxModes',
}
_SDM_ENUM_MAP = {
'captureSupported': ['data', 'control', 'dataAndControl', 'none'],
'connectionState': ['assignedInUseByOther', 'assignedUnconnected', 'connectedLinkDown', 'connectedLinkUp', 'connecting', 'unassigned'],
'rxMode': ['capture', 'measure', 'captureAndMeasure', 'packetImpairment'],
'state': ['busy', 'down', 'unassigned', 'up', 'versionMismatch'],
'stateDetail': ['busy', 'cpuNotReady', 'idle', 'inActive', 'l1ConfigFailed', 'protocolsNotSupported', 'versionMismatched', 'waitingForCPUStatus'],
'traceLevel': ['kCritical', 'kDebug', 'kError', 'kInfo', 'kNote', 'kTrace', 'kWarning'],
'txGapControlMode': ['fixedMode', 'averageMode'],
'txMode': ['sequential', 'interleaved', 'sequentialCoarse', 'interleavedCoarse', 'packetImpairment'],
'type': ['ethernet', 'ethernetvm', 'ethernetFcoe', 'atm', 'pos', 'tenGigLan', 'tenGigLanFcoe', 'fortyGigLan', 'fortyGigLanFcoe', 'tenGigWan', 'tenGigWanFcoe', 'hundredGigLan', 'hundredGigLanFcoe', 'tenFortyHundredGigLan', 'tenFortyHundredGigLanFcoe', 'fc', 'ethernetImpairment', 'novusHundredGigLan', 'novusHundredGigLanFcoe', 'novusTenGigLan', 'novusTenGigLanFcoe', 'krakenFourHundredGigLan', 'krakenFourHundredGigLanFcoe', 'aresOneFourHundredGigLan', 'aresOneFourHundredGigLanFcoe', 'uhdOneHundredGigLan'],
}
def __init__(self, parent, list_op=False):
super(Vport, self).__init__(parent, list_op)
@property
def Capture(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.capture.capture.Capture): An instance of the Capture class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.capture.capture import Capture
if self._properties.get('Capture', None) is not None:
return self._properties.get('Capture')
else:
return Capture(self)._select()
@property
def DiscoveredNeighbor(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.discoveredneighbor.discoveredneighbor.DiscoveredNeighbor): An instance of the DiscoveredNeighbor class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.discoveredneighbor.discoveredneighbor import DiscoveredNeighbor
if self._properties.get('DiscoveredNeighbor', None) is not None:
return self._properties.get('DiscoveredNeighbor')
else:
return DiscoveredNeighbor(self)
@property
def Interface(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.interface.interface.Interface): An instance of the Interface class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.interface.interface import Interface
if self._properties.get('Interface', None) is not None:
return self._properties.get('Interface')
else:
return Interface(self)
@property
def InterfaceDiscoveredAddress(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.interfacediscoveredaddress.interfacediscoveredaddress.InterfaceDiscoveredAddress): An instance of the InterfaceDiscoveredAddress class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.interfacediscoveredaddress.interfacediscoveredaddress import InterfaceDiscoveredAddress
if self._properties.get('InterfaceDiscoveredAddress', None) is not None:
return self._properties.get('InterfaceDiscoveredAddress')
else:
return InterfaceDiscoveredAddress(self)._select()
@property
def L1Config(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.l1config.l1config.L1Config): An instance of the L1Config class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.l1config.l1config import L1Config
if self._properties.get('L1Config', None) is not None:
return self._properties.get('L1Config')
else:
return L1Config(self)._select()
@property
def ProtocolStack(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.protocolstack.ProtocolStack): An instance of the ProtocolStack class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocolstack.protocolstack import ProtocolStack
if self._properties.get('ProtocolStack', None) is not None:
return self._properties.get('ProtocolStack')
else:
return ProtocolStack(self)._select()
@property
def Protocols(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.protocols.Protocols): An instance of the Protocols class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.protocols import Protocols
if self._properties.get('Protocols', None) is not None:
return self._properties.get('Protocols')
else:
return Protocols(self)
@property
def RateControlParameters(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.ratecontrolparameters.ratecontrolparameters.RateControlParameters): An instance of the RateControlParameters class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.ratecontrolparameters.ratecontrolparameters import RateControlParameters
if self._properties.get('RateControlParameters', None) is not None:
return self._properties.get('RateControlParameters')
else:
return RateControlParameters(self)._select()
@property
def TapSettings(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.tapsettings.tapsettings.TapSettings): An instance of the TapSettings class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.tapsettings.tapsettings import TapSettings
if self._properties.get('TapSettings', None) is not None:
return self._properties.get('TapSettings')
else:
return TapSettings(self)
@property
def ActualSpeed(self):
# type: () -> int
"""
Returns
-------
- number: The actual speed.
"""
return self._get_attribute(self._SDM_ATT_MAP['ActualSpeed'])
@property
def AdminMode(self):
# type: () -> str
"""
Returns
-------
- str:
"""
return self._get_attribute(self._SDM_ATT_MAP['AdminMode'])
@property
def AssignedTo(self):
# type: () -> str
"""DEPRECATED
Returns
-------
- str: (Read Only) A new port is assigned with this option.
"""
return self._get_attribute(self._SDM_ATT_MAP['AssignedTo'])
@property
def AssignedToDisplayName(self):
# type: () -> str
"""
Returns
-------
- str:
"""
return self._get_attribute(self._SDM_ATT_MAP['AssignedToDisplayName'])
@property
def CaptureSupported(self):
# type: () -> str
"""
Returns
-------
- str(data | control | dataAndControl | none):
"""
return self._get_attribute(self._SDM_ATT_MAP['CaptureSupported'])
@property
def ConnectedTo(self):
# type: () -> str
"""DEPRECATED
Returns
-------
- str(None | /api/v1/sessions/1/ixnetwork/availableHardware/.../port): The physical port to which the unassigned port is assigned.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConnectedTo'])
@ConnectedTo.setter
def ConnectedTo(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['ConnectedTo'], value)
@property
def ConnectionInfo(self):
# type: () -> str
"""
Returns
-------
- str: Detailed information about location of the physical port that is assigned to this port configuration.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConnectionInfo'])
@property
def ConnectionState(self):
# type: () -> str
"""
Returns
-------
- str(assignedInUseByOther | assignedUnconnected | connectedLinkDown | connectedLinkUp | connecting | unassigned): Consolidated state of the vport. This combines the connection state with link state.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConnectionState'])
@property
def ConnectionStatus(self):
# type: () -> str
"""
Returns
-------
- str: A string describing the status of the hardware connected to this vport
"""
return self._get_attribute(self._SDM_ATT_MAP['ConnectionStatus'])
@property
def ConnectionStatusDisplayName(self):
# type: () -> str
"""
Returns
-------
- str:
"""
return self._get_attribute(self._SDM_ATT_MAP['ConnectionStatusDisplayName'])
@property
def DpdkPerformanceAcceleration(self):
# type: () -> str
"""
Returns
-------
- str:
"""
return self._get_attribute(self._SDM_ATT_MAP['DpdkPerformanceAcceleration'])
@property
def InternalId(self):
# type: () -> int
"""
Returns
-------
- number: For internal use.
"""
return self._get_attribute(self._SDM_ATT_MAP['InternalId'])
@property
def IsAvailable(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, this virtual port is available for assigning to a physical port.
"""
return self._get_attribute(self._SDM_ATT_MAP['IsAvailable'])
@property
def IsConnected(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, indicates that the port is connected.
"""
return self._get_attribute(self._SDM_ATT_MAP['IsConnected'])
@property
def IsFramePreemptionSupported(self):
# type: () -> bool
"""
Returns
-------
- bool:
"""
return self._get_attribute(self._SDM_ATT_MAP['IsFramePreemptionSupported'])
@property
def IsMapped(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, this virtual port is mapped.
"""
return self._get_attribute(self._SDM_ATT_MAP['IsMapped'])
@property
def IsPullOnly(self):
# type: () -> bool
"""
Returns
-------
- bool: (This action only affects assigned ports.) This action will temporarily set the port as an Unassigned Port. This function is used to pull the configuration set by a Tcl script or an IxExplorer port file into the IxNetwork configuration.
"""
return self._get_attribute(self._SDM_ATT_MAP['IsPullOnly'])
@IsPullOnly.setter
def IsPullOnly(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['IsPullOnly'], value)
@property
def IsVMPort(self):
# type: () -> bool
"""
Returns
-------
- bool: If true the hardware connected to this vport is a virtual machine port
"""
return self._get_attribute(self._SDM_ATT_MAP['IsVMPort'])
@property
def IxnChassisVersion(self):
# type: () -> str
"""
Returns
-------
- str: (Read Only) If true, the installer installs the same resources as installed by the IxNetwork Full installer/IxNetwork Chassis installer on chassis.
"""
return self._get_attribute(self._SDM_ATT_MAP['IxnChassisVersion'])
@property
def IxnClientVersion(self):
# type: () -> str
"""
Returns
-------
- str: (Read Only) If true, this installs full client side IxNetwork or IxNetwork-FT components.
"""
return self._get_attribute(self._SDM_ATT_MAP['IxnClientVersion'])
@property
def IxosChassisVersion(self):
# type: () -> str
"""
Returns
-------
- str: (Read Only) If true, the installer installs the same resources as installed by IxOS on a chassis.
"""
return self._get_attribute(self._SDM_ATT_MAP['IxosChassisVersion'])
@property
def Licenses(self):
# type: () -> str
"""
Returns
-------
- str: Number of licenses.
"""
return self._get_attribute(self._SDM_ATT_MAP['Licenses'])
@property
def Location(self):
# type: () -> str
"""
Returns
-------
- str: The current format is {chassisIp}/{frontPanelPort}.{fanoutPort} or {chassisIp};{cardId};{portId} for legacy systems.
"""
return self._get_attribute(self._SDM_ATT_MAP['Location'])
@Location.setter
def Location(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Location'], value)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: The description of the port: (1) For an assigned port, the format is: (Port type) (card no.): (port no.) - (chassis name or IP). (2) For an (unassigned) port configuration, the format is: (Port type) Port 00x.
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def ResourceMode(self):
# type: () -> str
"""
Returns
-------
- str:
"""
return self._get_attribute(self._SDM_ATT_MAP['ResourceMode'])
@property
def RxMode(self):
# type: () -> str
"""
Returns
-------
- str(capture | measure | captureAndMeasure | packetImpairment): The receive mode of the virtual port.
"""
return self._get_attribute(self._SDM_ATT_MAP['RxMode'])
@RxMode.setter
def RxMode(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['RxMode'], value)
@property
def State(self):
# type: () -> str
"""DEPRECATED
Returns
-------
- str(busy | down | unassigned | up | versionMismatch): The virtual port state.
"""
return self._get_attribute(self._SDM_ATT_MAP['State'])
@property
def StateDetail(self):
# type: () -> str
"""DEPRECATED
Returns
-------
- str(busy | cpuNotReady | idle | inActive | l1ConfigFailed | protocolsNotSupported | versionMismatched | waitingForCPUStatus): This attribute describes the state of the port.
"""
return self._get_attribute(self._SDM_ATT_MAP['StateDetail'])
@property
def TraceEnabled(self):
# type: () -> bool
"""
Returns
-------
- bool: Enables/Disables rpf port trace for this port
"""
return self._get_attribute(self._SDM_ATT_MAP['TraceEnabled'])
@TraceEnabled.setter
def TraceEnabled(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['TraceEnabled'], value)
@property
def TraceLevel(self):
# type: () -> str
"""
Returns
-------
- str(kCritical | kDebug | kError | kInfo | kNote | kTrace | kWarning): PCPU Trace level
"""
return self._get_attribute(self._SDM_ATT_MAP['TraceLevel'])
@TraceLevel.setter
def TraceLevel(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['TraceLevel'], value)
@property
def TraceTag(self):
# type: () -> str
"""
Returns
-------
- str: PCPU Trace Tag
"""
return self._get_attribute(self._SDM_ATT_MAP['TraceTag'])
@TraceTag.setter
def TraceTag(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['TraceTag'], value)
@property
def TransmitIgnoreLinkStatus(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, the port ingores the link status when transmitting data.
"""
return self._get_attribute(self._SDM_ATT_MAP['TransmitIgnoreLinkStatus'])
@TransmitIgnoreLinkStatus.setter
def TransmitIgnoreLinkStatus(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['TransmitIgnoreLinkStatus'], value)
@property
def TxGapControlMode(self):
# type: () -> str
"""
Returns
-------
- str(fixedMode | averageMode): This object controls the Gap Control mode of the port.
"""
return self._get_attribute(self._SDM_ATT_MAP['TxGapControlMode'])
@TxGapControlMode.setter
def TxGapControlMode(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['TxGapControlMode'], value)
@property
def TxMode(self):
# type: () -> str
"""
Returns
-------
- str(sequential | interleaved | sequentialCoarse | interleavedCoarse | packetImpairment): The transmit mode.
"""
return self._get_attribute(self._SDM_ATT_MAP['TxMode'])
@TxMode.setter
def TxMode(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['TxMode'], value)
@property
def Type(self):
# type: () -> str
"""
Returns
-------
- str(ethernet | ethernetvm | ethernetFcoe | atm | pos | tenGigLan | tenGigLanFcoe | fortyGigLan | fortyGigLanFcoe | tenGigWan | tenGigWanFcoe | hundredGigLan | hundredGigLanFcoe | tenFortyHundredGigLan | tenFortyHundredGigLanFcoe | fc | ethernetImpairment | novusHundredGigLan | novusHundredGigLanFcoe | novusTenGigLan | novusTenGigLanFcoe | krakenFourHundredGigLan | krakenFourHundredGigLanFcoe | aresOneFourHundredGigLan | aresOneFourHundredGigLanFcoe | uhdOneHundredGigLan): The type of port selection.
"""
return self._get_attribute(self._SDM_ATT_MAP['Type'])
@Type.setter
def Type(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Type'], value)
@property
def UseGlobalSettings(self):
# type: () -> bool
"""
Returns
-------
- bool: Enables/Disables use of global settings instead of local settings on port
"""
return self._get_attribute(self._SDM_ATT_MAP['UseGlobalSettings'])
@UseGlobalSettings.setter
def UseGlobalSettings(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['UseGlobalSettings'], value)
@property
def ValidTxModes(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[interleaved | interleavedCoarse | packetImpairment | sequential | sequentialCoarse]):
"""
return self._get_attribute(self._SDM_ATT_MAP['ValidTxModes'])
def update(self, ConnectedTo=None, IsPullOnly=None, Location=None, Name=None, RxMode=None, TraceEnabled=None, TraceLevel=None, TraceTag=None, TransmitIgnoreLinkStatus=None, TxGapControlMode=None, TxMode=None, Type=None, UseGlobalSettings=None):
# type: (str, bool, str, str, str, bool, str, str, bool, str, str, str, bool) -> Vport
"""Updates vport resource on the server.
Args
----
- ConnectedTo (str(None | /api/v1/sessions/1/ixnetwork/availableHardware/.../port)): The physical port to which the unassigned port is assigned.
- IsPullOnly (bool): (This action only affects assigned ports.) This action will temporarily set the port as an Unassigned Port. This function is used to pull the configuration set by a Tcl script or an IxExplorer port file into the IxNetwork configuration.
- Location (str): The current format is {chassisIp}/{frontPanelPort}.{fanoutPort} or {chassisIp};{cardId};{portId} for legacy systems.
- Name (str): The description of the port: (1) For an assigned port, the format is: (Port type) (card no.): (port no.) - (chassis name or IP). (2) For an (unassigned) port configuration, the format is: (Port type) Port 00x.
- RxMode (str(capture | measure | captureAndMeasure | packetImpairment)): The receive mode of the virtual port.
- TraceEnabled (bool): Enables/Disables rpf port trace for this port
- TraceLevel (str(kCritical | kDebug | kError | kInfo | kNote | kTrace | kWarning)): PCPU Trace level
- TraceTag (str): PCPU Trace Tag
- TransmitIgnoreLinkStatus (bool): If true, the port ingores the link status when transmitting data.
- TxGapControlMode (str(fixedMode | averageMode)): This object controls the Gap Control mode of the port.
- TxMode (str(sequential | interleaved | sequentialCoarse | interleavedCoarse | packetImpairment)): The transmit mode.
- Type (str(ethernet | ethernetvm | ethernetFcoe | atm | pos | tenGigLan | tenGigLanFcoe | fortyGigLan | fortyGigLanFcoe | tenGigWan | tenGigWanFcoe | hundredGigLan | hundredGigLanFcoe | tenFortyHundredGigLan | tenFortyHundredGigLanFcoe | fc | ethernetImpairment | novusHundredGigLan | novusHundredGigLanFcoe | novusTenGigLan | novusTenGigLanFcoe | krakenFourHundredGigLan | krakenFourHundredGigLanFcoe | aresOneFourHundredGigLan | aresOneFourHundredGigLanFcoe | uhdOneHundredGigLan)): The type of port selection.
- UseGlobalSettings (bool): Enables/Disables use of global settings instead of local settings on port
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ConnectedTo=None, IsPullOnly=None, Location=None, Name=None, RxMode=None, TraceEnabled=None, TraceLevel=None, TraceTag=None, TransmitIgnoreLinkStatus=None, TxGapControlMode=None, TxMode=None, Type=None, UseGlobalSettings=None):
# type: (str, bool, str, str, str, bool, str, str, bool, str, str, str, bool) -> Vport
"""Adds a new vport resource on the server and adds it to the container.
Args
----
- ConnectedTo (str(None | /api/v1/sessions/1/ixnetwork/availableHardware/.../port)): The physical port to which the unassigned port is assigned.
- IsPullOnly (bool): (This action only affects assigned ports.) This action will temporarily set the port as an Unassigned Port. This function is used to pull the configuration set by a Tcl script or an IxExplorer port file into the IxNetwork configuration.
- Location (str): The current format is {chassisIp}/{frontPanelPort}.{fanoutPort} or {chassisIp};{cardId};{portId} for legacy systems.
- Name (str): The description of the port: (1) For an assigned port, the format is: (Port type) (card no.): (port no.) - (chassis name or IP). (2) For an (unassigned) port configuration, the format is: (Port type) Port 00x.
- RxMode (str(capture | measure | captureAndMeasure | packetImpairment)): The receive mode of the virtual port.
- TraceEnabled (bool): Enables/Disables rpf port trace for this port
- TraceLevel (str(kCritical | kDebug | kError | kInfo | kNote | kTrace | kWarning)): PCPU Trace level
- TraceTag (str): PCPU Trace Tag
- TransmitIgnoreLinkStatus (bool): If true, the port ingores the link status when transmitting data.
- TxGapControlMode (str(fixedMode | averageMode)): This object controls the Gap Control mode of the port.
- TxMode (str(sequential | interleaved | sequentialCoarse | interleavedCoarse | packetImpairment)): The transmit mode.
- Type (str(ethernet | ethernetvm | ethernetFcoe | atm | pos | tenGigLan | tenGigLanFcoe | fortyGigLan | fortyGigLanFcoe | tenGigWan | tenGigWanFcoe | hundredGigLan | hundredGigLanFcoe | tenFortyHundredGigLan | tenFortyHundredGigLanFcoe | fc | ethernetImpairment | novusHundredGigLan | novusHundredGigLanFcoe | novusTenGigLan | novusTenGigLanFcoe | krakenFourHundredGigLan | krakenFourHundredGigLanFcoe | aresOneFourHundredGigLan | aresOneFourHundredGigLanFcoe | uhdOneHundredGigLan)): The type of port selection.
- UseGlobalSettings (bool): Enables/Disables use of global settings instead of local settings on port
Returns
-------
- self: This instance with all currently retrieved vport resources using find and the newly added vport resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained vport resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ActualSpeed=None, AdminMode=None, AssignedTo=None, AssignedToDisplayName=None, CaptureSupported=None, ConnectedTo=None, ConnectionInfo=None, ConnectionState=None, ConnectionStatus=None, ConnectionStatusDisplayName=None, DpdkPerformanceAcceleration=None, InternalId=None, IsAvailable=None, IsConnected=None, IsFramePreemptionSupported=None, IsMapped=None, IsPullOnly=None, IsVMPort=None, IxnChassisVersion=None, IxnClientVersion=None, IxosChassisVersion=None, Licenses=None, Location=None, Name=None, ResourceMode=None, RxMode=None, State=None, StateDetail=None, TraceEnabled=None, TraceLevel=None, TraceTag=None, TransmitIgnoreLinkStatus=None, TxGapControlMode=None, TxMode=None, Type=None, UseGlobalSettings=None, ValidTxModes=None):
# type: (int, str, str, str, str, str, str, str, str, str, str, int, bool, bool, bool, bool, bool, bool, str, str, str, str, str, str, str, str, str, str, bool, str, str, bool, str, str, str, bool, List[str]) -> Vport
"""Finds and retrieves vport resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve vport resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all vport resources from the server.
Args
----
- ActualSpeed (number): The actual speed.
- AdminMode (str):
- AssignedTo (str): (Read Only) A new port is assigned with this option.
- AssignedToDisplayName (str):
- CaptureSupported (str(data | control | dataAndControl | none)):
- ConnectedTo (str(None | /api/v1/sessions/1/ixnetwork/availableHardware/.../port)): The physical port to which the unassigned port is assigned.
- ConnectionInfo (str): Detailed information about location of the physical port that is assigned to this port configuration.
- ConnectionState (str(assignedInUseByOther | assignedUnconnected | connectedLinkDown | connectedLinkUp | connecting | unassigned)): Consolidated state of the vport. This combines the connection state with link state.
- ConnectionStatus (str): A string describing the status of the hardware connected to this vport
- ConnectionStatusDisplayName (str):
- DpdkPerformanceAcceleration (str):
- InternalId (number): For internal use.
- IsAvailable (bool): If true, this virtual port is available for assigning to a physical port.
- IsConnected (bool): If true, indicates that the port is connected.
- IsFramePreemptionSupported (bool):
- IsMapped (bool): If true, this virtual port is mapped.
- IsPullOnly (bool): (This action only affects assigned ports.) This action will temporarily set the port as an Unassigned Port. This function is used to pull the configuration set by a Tcl script or an IxExplorer port file into the IxNetwork configuration.
- IsVMPort (bool): If true the hardware connected to this vport is a virtual machine port
- IxnChassisVersion (str): (Read Only) If true, the installer installs the same resources as installed by the IxNetwork Full installer/IxNetwork Chassis installer on chassis.
- IxnClientVersion (str): (Read Only) If true, this installs full client side IxNetwork or IxNetwork-FT components.
- IxosChassisVersion (str): (Read Only) If true, the installer installs the same resources as installed by IxOS on a chassis.
- Licenses (str): Number of licenses.
- Location (str): The current format is {chassisIp}/{frontPanelPort}.{fanoutPort} or {chassisIp};{cardId};{portId} for legacy systems.
- Name (str): The description of the port: (1) For an assigned port, the format is: (Port type) (card no.): (port no.) - (chassis name or IP). (2) For an (unassigned) port configuration, the format is: (Port type) Port 00x.
- ResourceMode (str):
- RxMode (str(capture | measure | captureAndMeasure | packetImpairment)): The receive mode of the virtual port.
- State (str(busy | down | unassigned | up | versionMismatch)): The virtual port state.
- StateDetail (str(busy | cpuNotReady | idle | inActive | l1ConfigFailed | protocolsNotSupported | versionMismatched | waitingForCPUStatus)): This attribute describes the state of the port.
- TraceEnabled (bool): Enables/Disables rpf port trace for this port
- TraceLevel (str(kCritical | kDebug | kError | kInfo | kNote | kTrace | kWarning)): PCPU Trace level
- TraceTag (str): PCPU Trace Tag
- TransmitIgnoreLinkStatus (bool): If true, the port ingores the link status when transmitting data.
- TxGapControlMode (str(fixedMode | averageMode)): This object controls the Gap Control mode of the port.
- TxMode (str(sequential | interleaved | sequentialCoarse | interleavedCoarse | packetImpairment)): The transmit mode.
- Type (str(ethernet | ethernetvm | ethernetFcoe | atm | pos | tenGigLan | tenGigLanFcoe | fortyGigLan | fortyGigLanFcoe | tenGigWan | tenGigWanFcoe | hundredGigLan | hundredGigLanFcoe | tenFortyHundredGigLan | tenFortyHundredGigLanFcoe | fc | ethernetImpairment | novusHundredGigLan | novusHundredGigLanFcoe | novusTenGigLan | novusTenGigLanFcoe | krakenFourHundredGigLan | krakenFourHundredGigLanFcoe | aresOneFourHundredGigLan | aresOneFourHundredGigLanFcoe | uhdOneHundredGigLan)): The type of port selection.
- UseGlobalSettings (bool): Enables/Disables use of global settings instead of local settings on port
- ValidTxModes (list(str[interleaved | interleavedCoarse | packetImpairment | sequential | sequentialCoarse])):
Returns
-------
- self: This instance with matching vport resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of vport data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the vport resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def AddQuickFlowGroups(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the addQuickFlowGroups operation on the server.
Add quick flow traffic items to the configuration.
addQuickFlowGroups(Arg2=number, async_operation=bool)
-----------------------------------------------------
- Arg2 (number): The number of quick flow groups to add.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('addQuickFlowGroups', payload=payload, response_object=None)
def AssignPorts(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the assignPorts operation on the server.
Assign hardware ports to virtual ports using port display names. It connects all the ports in the list provided using their location attribute. It takes a bool as input which says ClearOwnership is required or not.
assignPorts(Arg2=bool, async_operation=bool)list
------------------------------------------------
- Arg2 (bool): If true, it will clear ownership on the hardware ports which have location attribute set.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str[None | /api/v1/sessions/1/ixnetwork/vport]): Returns a list of virtual port object references that were successfully connected.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('assignPorts', payload=payload, response_object=None)
def ClearNeighborSolicitation(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[bool, None]
"""Executes the clearNeighborSolicitation operation on the server.
NOT DEFINED
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
clearNeighborSolicitation(async_operation=bool)
-----------------------------------------------
This function signature is used when there is a list of vports
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
clearNeighborSolicitation(async_operation=bool)bool
---------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns bool: NOT DEFINED
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('clearNeighborSolicitation', payload=payload, response_object=None)
def ClearNeighborTable(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[bool, None]
"""Executes the clearNeighborTable operation on the server.
This exec clears the learned neighbor table for the specified vport.
clearNeighborTable(async_operation=bool)bool
--------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns bool: NOT DEFINED
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('clearNeighborTable', payload=payload, response_object=None)
def ClearPortTransmitDuration(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the clearPortTransmitDuration operation on the server.
Clear the port transmit duration.
clearPortTransmitDuration(async_operation=bool)
-----------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('clearPortTransmitDuration', payload=payload, response_object=None)
def ConnectPort(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the connectPort operation on the server.
Connect a list of ports.
connectPort(async_operation=bool)
---------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('connectPort', payload=payload, response_object=None)
def ConnectPorts(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the connectPorts operation on the server.
Connect a list of ports.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
connectPorts(async_operation=bool)
----------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
connectPorts(Arg2=bool, async_operation=bool)
---------------------------------------------
- Arg2 (bool): a boolean indicating if ownership should be taken forcefully
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('connectPorts', payload=payload, response_object=None)
def CopyTapSettings(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the copyTapSettings operation on the server.
It will copy the values from a port to the given ports.
copyTapSettings(Arg2=list, async_operation=bool)
------------------------------------------------
- Arg2 (list(str[None | /api/v1/sessions/1/ixnetwork/vport])):
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('copyTapSettings', payload=payload, response_object=None)
def DeleteCustomDefaults(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the deleteCustomDefaults operation on the server.
It will delete custom defaults for the given ports.
deleteCustomDefaults(async_operation=bool)
------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('deleteCustomDefaults', payload=payload, response_object=None)
def EnableOAM(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the enableOAM operation on the server.
Enable/Disable OAM on a list of ports.
enableOAM(Arg2=bool, async_operation=bool)
------------------------------------------
- Arg2 (bool): If true, it will enable OAM. Otherwise, it will disable OAM.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('enableOAM', payload=payload, response_object=None)
def GetTapSettings(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the getTapSettings operation on the server.
Get TAP Settings for the given ports.
getTapSettings(async_operation=bool)
------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('getTapSettings', payload=payload, response_object=None)
def IgmpJoin(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the igmpJoin operation on the server.
NOT DEFINED
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
igmpJoin(Arg2=string, async_operation=bool)
-------------------------------------------
- Arg2 (str): NOT DEFINED
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
igmpJoin(Arg2=string, Arg3=number, async_operation=bool)
--------------------------------------------------------
- Arg2 (str): NOT DEFINED
- Arg3 (number): NOT DEFINED
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('igmpJoin', payload=payload, response_object=None)
def IgmpLeave(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the igmpLeave operation on the server.
NOT DEFINED
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
igmpLeave(Arg2=string, async_operation=bool)
--------------------------------------------
- Arg2 (str): NOT DEFINED
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
igmpLeave(Arg2=string, Arg3=number, async_operation=bool)
---------------------------------------------------------
- Arg2 (str): NOT DEFINED
- Arg3 (number): NOT DEFINED
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('igmpLeave', payload=payload, response_object=None)
def Import(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the import operation on the server.
Imports the port file (also supports legacy port files).
import(Arg2=href, async_operation=bool)
---------------------------------------
- Arg2 (obj(ixnetwork_restpy.files.Files)): The file to be imported.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('import', payload=payload, response_object=None)
def LinkUpDn(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the linkUpDn operation on the server.
Simulate port link up/down.
linkUpDn(Arg2=enum, async_operation=bool)
-----------------------------------------
- Arg2 (str(down | up)): A valid enum value as specified by the restriction.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('linkUpDn', payload=payload, response_object=None)
def PauseStatelessTraffic(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the pauseStatelessTraffic operation on the server.
Pause or Resume stateless traffic.
pauseStatelessTraffic(Arg2=bool, async_operation=bool)
------------------------------------------------------
- Arg2 (bool): If true, it will pause running traffic. If false, it will resume previously paused traffic.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('pauseStatelessTraffic', payload=payload, response_object=None)
def PullPort(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the pullPort operation on the server.
Pulls config onto vport or group of vports.
pullPort(async_operation=bool)
------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('pullPort', payload=payload, response_object=None)
def RefreshUnresolvedNeighbors(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[bool, None]
"""Executes the refreshUnresolvedNeighbors operation on the server.
Refresh unresolved neighbours.
refreshUnresolvedNeighbors(async_operation=bool)bool
----------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns bool: NOT DEFINED
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('refreshUnresolvedNeighbors', payload=payload, response_object=None)
def ReleaseCapturePorts(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the releaseCapturePorts operation on the server.
Release capture buffer from a list of ports.
releaseCapturePorts(async_operation=bool)
-----------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('releaseCapturePorts', payload=payload, response_object=None)
def ReleasePort(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the releasePort operation on the server.
Release a hardware port.
releasePort(async_operation=bool)
---------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('releasePort', payload=payload, response_object=None)
def ResetPortCpu(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the resetPortCpu operation on the server.
Reboot port CPU.
resetPortCpu(async_operation=bool)
----------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('resetPortCpu', payload=payload, response_object=None)
def ResetPortCpuAndFactoryDefault(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the resetPortCpuAndFactoryDefault operation on the server.
Reboots the port CPU and restores the default settings.
resetPortCpuAndFactoryDefault(async_operation=bool)
---------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('resetPortCpuAndFactoryDefault', payload=payload, response_object=None)
def RestartPppNegotiation(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the restartPppNegotiation operation on the server.
Restarts the PPP negotiation on the port.
restartPppNegotiation(async_operation=bool)
-------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('restartPppNegotiation', payload=payload, response_object=None)
def RestoreCustomDefaults(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the restoreCustomDefaults operation on the server.
It will restore custom defaults for the given ports.
restoreCustomDefaults(async_operation=bool)
-------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('restoreCustomDefaults', payload=payload, response_object=None)
def RestoreDefaults(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the restoreDefaults operation on the server.
Restore the default values for the given ports.
restoreDefaults(async_operation=bool)
-------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('restoreDefaults', payload=payload, response_object=None)
def SaveCustomDefaults(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the saveCustomDefaults operation on the server.
It will save custom defaults for the given ports.
saveCustomDefaults(async_operation=bool)
----------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('saveCustomDefaults', payload=payload, response_object=None)
def SendArp(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[bool, None]
"""Executes the sendArp operation on the server.
NOT DEFINED
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
sendArp(async_operation=bool)bool
---------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns bool: NOT DEFINED
sendArp(Arg2=list, async_operation=bool)bool
--------------------------------------------
- Arg2 (list(str[None | /api/v1/sessions/1/ixnetwork/vport/.../interface])): NOT DEFINED
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns bool: NOT DEFINED
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('sendArp', payload=payload, response_object=None)
def SendArpAll(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the sendArpAll operation on the server.
NOT DEFINED
sendArpAll(async_operation=bool)
--------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('sendArpAll', payload=payload, response_object=None)
def SendNs(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[bool, None]
"""Executes the sendNs operation on the server.
NOT DEFINED
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
sendNs(async_operation=bool)bool
--------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns bool: NOT DEFINED
sendNs(Arg2=list, async_operation=bool)bool
-------------------------------------------
- Arg2 (list(str[None | /api/v1/sessions/1/ixnetwork/vport/.../interface])): NOT DEFINED
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns bool: NOT DEFINED
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('sendNs', payload=payload, response_object=None)
def SendNsAll(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the sendNsAll operation on the server.
NOT DEFINED
sendNsAll(async_operation=bool)
-------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('sendNsAll', payload=payload, response_object=None)
def SendRs(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[bool, None]
"""Executes the sendRs operation on the server.
NOT DEFINED
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
sendRs(async_operation=bool)bool
--------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns bool: NOT DEFINED
sendRs(Arg2=list, async_operation=bool)bool
-------------------------------------------
- Arg2 (list(str[None | /api/v1/sessions/1/ixnetwork/vport/.../interface])): NOT DEFINED
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns bool: NOT DEFINED
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('sendRs', payload=payload, response_object=None)
def SendRsAll(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the sendRsAll operation on the server.
NOT DEFINED
sendRsAll(async_operation=bool)
-------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('sendRsAll', payload=payload, response_object=None)
def SetFactoryDefaults(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the setFactoryDefaults operation on the server.
Set default values for port settings.
setFactoryDefaults(async_operation=bool)
----------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('setFactoryDefaults', payload=payload, response_object=None)
def SetTapSettings(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the setTapSettings operation on the server.
Send TAP Settings to IxServer for the given ports.
setTapSettings(async_operation=bool)
------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('setTapSettings', payload=payload, response_object=None)
def StartStatelessTraffic(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the startStatelessTraffic operation on the server.
Start the traffic configuration for stateless traffic items only.
startStatelessTraffic(async_operation=bool)
-------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('startStatelessTraffic', payload=payload, response_object=None)
def StartStatelessTrafficBlocking(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the startStatelessTrafficBlocking operation on the server.
Start the traffic configuration for stateless traffic items only. This will block until traffic is fully started.
startStatelessTrafficBlocking(async_operation=bool)
---------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('startStatelessTrafficBlocking', payload=payload, response_object=None)
def StopStatelessTraffic(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the stopStatelessTraffic operation on the server.
Stop the stateless traffic items.
stopStatelessTraffic(async_operation=bool)
------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stopStatelessTraffic', payload=payload, response_object=None)
def StopStatelessTrafficBlocking(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the stopStatelessTrafficBlocking operation on the server.
Stop the traffic configuration for stateless traffic items only. This will block until traffic is fully stopped.
stopStatelessTrafficBlocking(async_operation=bool)
--------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stopStatelessTrafficBlocking', payload=payload, response_object=None)
def SwitchMode(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the switchMode operation on the server.
Switches the port mode. Takes vports as input.
switchMode(Arg2=list, Arg3=bool, async_operation=bool)string
------------------------------------------------------------
- Arg2 (list(str)): List of valid Modes
- Arg3 (bool): If true, it will clear ownership on the hardware ports for which mode switch is being done.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: Warning Messages
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('switchMode', payload=payload, response_object=None)
def UnassignPorts(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the unassignPorts operation on the server.
Unassign hardware ports.
unassignPorts(Arg2=bool, async_operation=bool)
----------------------------------------------
- Arg2 (bool): If true, virtual ports will be deleted.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('unassignPorts', payload=payload, response_object=None)
| 47.672024
| 753
| 0.638465
|
4a1a3cbec08e6ba469abc1791a94cfcdd24aecc6
| 1,982
|
py
|
Python
|
app/utils/bangs.py
|
Y50ES/whoogle-search
|
1af4566991f5248fd0d3576ca4fbcc0a84293004
|
[
"MIT"
] | 1
|
2022-02-19T19:56:37.000Z
|
2022-02-19T19:56:37.000Z
|
app/utils/bangs.py
|
Y50ES/whoogle-search
|
1af4566991f5248fd0d3576ca4fbcc0a84293004
|
[
"MIT"
] | null | null | null |
app/utils/bangs.py
|
Y50ES/whoogle-search
|
1af4566991f5248fd0d3576ca4fbcc0a84293004
|
[
"MIT"
] | null | null | null |
import json
import requests
DDG_BANGS = 'https://duckduckgo.com/bang.v255.js'
def gen_bangs_json(bangs_file: str) -> None:
"""Generates a json file from the DDG bangs list
Args:
bangs_file: The str path to the new DDG bangs json file
Returns:
None
"""
try:
# Request full list from DDG
r = requests.get(DDG_BANGS)
r.raise_for_status()
except requests.exceptions.HTTPError as err:
raise SystemExit(err)
# Convert to json
data = json.loads(r.text)
# Set up a json object (with better formatting) for all available bangs
bangs_data = {}
for row in data:
bang_command = '!' + row['t']
bangs_data[bang_command] = {
'url': row['u'].replace('{{{s}}}', '{}'),
'suggestion': bang_command + ' (' + row['s'] + ')'
}
json.dump(bangs_data, open(bangs_file, 'w'))
print('* Finished creating ddg bangs json')
def resolve_bang(query: str, bangs_dict: dict) -> str:
"""Transform's a user's query to a bang search, if an operator is found
Args:
query: The search query
bangs_dict: The dict of available bang operators, with corresponding
format string search URLs
(i.e. "!w": "https://en.wikipedia.org...?search={}")
Returns:
str: A formatted redirect for a bang search, or an empty str if there
wasn't a match or didn't contain a bang operator
"""
# Ensure bang search is case insensitive
query = query.lower()
split_query = query.split(' ')
for operator in bangs_dict.keys():
if operator not in split_query \
and operator[1:] + operator[0] not in split_query:
continue
return bangs_dict[operator]['url'].replace(
'{}',
query.replace(operator if operator in split_query
else operator[1:] + operator[0], '').strip(), 1)
return ''
| 29.58209
| 77
| 0.58779
|
4a1a3d238f4d58e50b116bc0ea7884f10819c9dc
| 628
|
py
|
Python
|
src/repository.py
|
JoseTorquato/crypto-report
|
3e53bd630c66bd27b85c7da52292c03af672d9fe
|
[
"MIT"
] | null | null | null |
src/repository.py
|
JoseTorquato/crypto-report
|
3e53bd630c66bd27b85c7da52292c03af672d9fe
|
[
"MIT"
] | null | null | null |
src/repository.py
|
JoseTorquato/crypto-report
|
3e53bd630c66bd27b85c7da52292c03af672d9fe
|
[
"MIT"
] | null | null | null |
from src.config.client import ClientPancakeSwap
from src.services.token import Token
class Repository:
@classmethod
def parse(self, list_address):
message = ""
for obj in list_address:
response = ClientPancakeSwap().get_token(obj["address"])
if response["status"] == 200:
token = Token(response["body"]["name"], response["body"]["symbol"], response["body"]["price"], response["body"]["price_BNB"], obj["quantity"])
message += token.__str__()
else:
print(response["body"])
print(message)
return message
| 34.888889
| 158
| 0.590764
|
4a1a3d98c8536cfdb53fb681e7faafc091f3250f
| 2,697
|
py
|
Python
|
contact_test.py
|
petersoleeh/Contact-list
|
54606f860496a90b6d1c93ab501c6f4d39676c99
|
[
"MIT"
] | null | null | null |
contact_test.py
|
petersoleeh/Contact-list
|
54606f860496a90b6d1c93ab501c6f4d39676c99
|
[
"MIT"
] | null | null | null |
contact_test.py
|
petersoleeh/Contact-list
|
54606f860496a90b6d1c93ab501c6f4d39676c99
|
[
"MIT"
] | null | null | null |
import unittest # Importing the uniitest module
import pyperclip
from contact import Contact # import the Contact class
class TestContact(unittest.TestCase):
def setUp(self):
self.new_contact = Contact(
"Peter", "Maina", "072012131", "petersoleeh@yahoo.com") # create contact object
def test_init(self):
self.assertEqual(self.new_contact.first_name, "Peter")
self.assertEqual(self.new_contact.last_name, "Maina")
self.assertEqual(self.new_contact.phone_number, "072012131")
self.assertEqual(self.new_contact.email, "petersoleeh@yahoo.com")
def test_save_contact(self):
self.new_contact.save_contact()
self.assertEqual(len(Contact.contact_list), 1)
def tearDown(self):
Contact.contact_list = []
def test_save_multiple_contact(self):
self.new_contact.save_contact()
test_contact = Contact("Test", "user", "0712345678",
"test@user.com") # new contact_list
test_contact.save_contact()
self.assertEqual(len(Contact.contact_list), 2)
def test_delete_contact(self):
self.new_contact.save_contact()
test_contact = Contact("Test", "user", "0712345678",
"test@user.com") # new contact_list
test_contact.save_contact()
self.new_contact.delete_contact() # delete contact
self.assertEqual(len(Contact.contact_list), 1)
def test_find_contact_by_number(self):
# test to check if we can find a contact by phone number
self.new_contact.save_contact()
test_contact = Contact("Test", "user", "0712345678",
"test@user.com") # new contact_list
test_contact.save_contact()
found_contact = Contact.find_by_number("0712345678")
self.assertEqual(found_contact.email, test_contact.email)
def test_contact_exists(self):
self.new_contact.save_contact()
test_contact = Contact("Test", "user", "0712345678",
"test@user.com") # new contact_list
test_contact.save_contact()
contact_exists = Contact.contact_exist("0712345678")
self.assertTrue(contact_exists)
def test_display_all_contacts(self):
self.assertEqual(Contact.display_contacts(), Contact.contact_list)
def test_copy_email(self):
# test to confirm we are copying the email address from a found contact
self.new_contact.save_contact()
Contact.copy_email("072012131")
self.assertEqual(self.new_contact.email, pyperclip.paste())
if __name__ == '__main__':
unittest.main()
| 34.139241
| 92
| 0.651835
|
4a1a3dfcee5b95524324eba86f33d72496ff73ae
| 9,537
|
py
|
Python
|
sdk/tools/flash_writer/scripts/flash_writer.py
|
EthanZhao802738/spresense
|
5e8abfc8096283fcf6eaa2d574a29cbd2c6a88e6
|
[
"Apache-2.0"
] | 110
|
2018-07-12T16:04:50.000Z
|
2022-02-26T12:27:56.000Z
|
sdk/tools/flash_writer/scripts/flash_writer.py
|
EthanZhao802738/spresense
|
5e8abfc8096283fcf6eaa2d574a29cbd2c6a88e6
|
[
"Apache-2.0"
] | 37
|
2018-08-10T13:05:45.000Z
|
2022-03-18T20:33:18.000Z
|
sdk/tools/flash_writer/scripts/flash_writer.py
|
EthanZhao802738/spresense
|
5e8abfc8096283fcf6eaa2d574a29cbd2c6a88e6
|
[
"Apache-2.0"
] | 94
|
2018-07-13T03:48:34.000Z
|
2022-03-19T07:32:08.000Z
|
#! /usr/bin/env python3
__copyright__ = ['Copyright (C) 2018, 2021 Sony Semiconductor Solutions Corp.']
__license__ = 'LGPL v2.1'
import time
import sys
import os
import struct
import glob
import fnmatch
import errno
import argparse
import shutil
import subprocess
import re
import xmodem
import_serial_module = True
# When SDK release, plase set SDK_RELEASE as True.
SDK_RELEASE = False
if SDK_RELEASE :
PRINT_RAW_COMMAND = False
REBOOT_AT_END = True
else :
PRINT_RAW_COMMAND = True
REBOOT_AT_END = True
try:
import serial
except:
import_serial_module = False
PROTOCOL_SERIAL = 0
MAX_DOT_COUNT = 70
# configure parameters and default value
class ConfigArgs:
PROTOCOL_TYPE = PROTOCOL_SERIAL
SERIAL_PORT = "COM1"
EOL = bytes([10])
DTR_RESET = False
XMODEM_BAUD = 0
NO_SET_BOOTABLE = False
PACKAGE_NAME = []
ERASE_NAME = []
ROM_MSG = [b"Welcome to nash"]
XMDM_MSG = "Waiting for XMODEM (CRC or 1K) transfer. Ctrl-X to cancel."
class ConfigArgsLoader():
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
self.parser.add_argument("package_name", help="the name of the package to install", nargs='*')
self.parser.add_argument("-e", "--erase", dest="erase_name", help="erase file", action='append')
self.parser.add_argument("-d", "--dtr-reset", dest="dtr_reset",
action="store_true", default=None,
help="try to auto reset develop board if possible")
self.parser.add_argument("-n", "--no-set-bootable", dest="no_set_bootable",
action="store_true", default=None,
help="not to set bootable")
group = self.parser.add_argument_group()
group.add_argument("-c", "--serial-port", dest="serial_port", help="the serial port")
group.add_argument("-b", "--xmodem-baudrate", dest="xmodem_baud", help="Use the faster baudrate in xmodem")
mutually_group = self.parser.add_mutually_exclusive_group()
mutually_group.add_argument("-s", "--serial-protocol", dest="serial_protocol",
action="store_true", default=None,
help="use the serial port for binary transmission, default options")
def update_config(self):
args = self.parser.parse_args()
ConfigArgs.PACKAGE_NAME = args.package_name
ConfigArgs.ERASE_NAME = args.erase_name
if args.serial_protocol == True:
ConfigArgs.PROTOCOL_TYPE = PROTOCOL_SERIAL
if ConfigArgs.PROTOCOL_TYPE == None:
ConfigArgs.PROTOCOL_TYPE = PROTOCOL_SERIAL
if ConfigArgs.PROTOCOL_TYPE == PROTOCOL_SERIAL:
if args.serial_port is not None:
ConfigArgs.SERIAL_PORT = args.serial_port
if args.xmodem_baud is not None:
ConfigArgs.XMODEM_BAUD = args.xmodem_baud
if args.dtr_reset is not None:
ConfigArgs.DTR_RESET = args.dtr_reset
if args.no_set_bootable is not None:
ConfigArgs.NO_SET_BOOTABLE = args.no_set_bootable
class SerialDev:
def __init__(self):
if import_serial_module is False:
print("Cannot import serial module, maybe it's not install yet.")
print("\n", end="")
print("Please install python-setuptool by Cygwin installer.")
print("After that use easy_intall command to install serial module")
print(" $ cd tool/")
print(" $ python3 -m easy_install pyserial-2.7.tar.gz")
quit()
else:
port = ConfigArgs.SERIAL_PORT
try:
self.serial = serial.Serial(port, baudrate=115200,
parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS, timeout=0.03)
except Exception as e:
print("Cannot open port : " + port)
sys.exit(e.args[0])
def readline(self, size=None):
return self.serial.readline(size)
def write(self, buffer):
self.serial.write(buffer)
self.serial.flush()
def discard_inputs(self, timeout=1.0):
time.sleep(timeout)
self.serial.flushInput()
def getc(self, size, timeout=1):
self.serial.timeout = timeout
c = self.serial.read(size)
self.serial.timeout = 0.1
return c
def putc(self, buffer, timeout=1):
self.serial.timeout = timeout
self.serial.write(buffer)
self.serial.flush()
self.serial.timeout = 0.1
self.show_progress(len(buffer))
# Note: windows platform dependent code
def putc_win(self, buffer, timeout=1):
self.serial.write(buffer)
self.show_progress(len(buffer))
while True:
if self.serial.out_waiting == 0:
break
def setBaudrate(self, baudrate):
# self.serial.setBaudrate(baudrate)
self.serial.baudrate = baudrate
def reboot(self):
# Target Reset by DTR
self.serial.setDTR(False)
self.serial.setDTR(True)
self.serial.setDTR(False)
def set_file_size(self, filesize):
self.bytes_transfered = 0
self.filesize = filesize
self.count = 0
def show_progress(self, sendsize):
if PRINT_RAW_COMMAND:
if self.count < MAX_DOT_COUNT:
self.bytes_transfered = self.bytes_transfered + sendsize
cur_count = int(self.bytes_transfered * MAX_DOT_COUNT / self.filesize)
if MAX_DOT_COUNT < cur_count:
cur_count = MAX_DOT_COUNT
for idx in range(cur_count - self.count):
print('#',end='')
sys.stdout.flush()
self.count = cur_count
if self.count == MAX_DOT_COUNT:
print("\n")
class FlashWriter:
def __init__(self, protocol_sel=PROTOCOL_SERIAL):
if protocol_sel == PROTOCOL_SERIAL:
self.serial = SerialDev()
def cancel_autoboot(self) :
boot_msg = ''
retry = 0
self.serial.reboot() # Target reboot before send 'r'
while boot_msg == '' :
if retry > 10:
# If retry 10 times, reset spresense board
self.serial.reboot()
retry = 0
rx = self.serial.readline().strip()
self.serial.write(b"r") # Send "r" key to avoid auto boot
for msg in ROM_MSG :
if msg in rx :
boot_msg = msg
break
retry = retry + 1
while True :
rx = self.serial.readline().decode(errors="replace").strip()
if "updater" in rx :
# Workaround : Sometime first character is dropped.
# Send line feed as air shot before actual command.
self.serial.write(b"\n") # Send line feed
self.serial.discard_inputs()# Clear input buffer to sync
return boot_msg.decode(errors="ignore")
def recv(self):
rx = self.serial.readline()
if PRINT_RAW_COMMAND :
serial_line = rx.decode(errors="replace")
if serial_line.strip() != "" and not serial_line.startswith(XMDM_MSG):
print(serial_line, end="")
return rx
def wait(self, string):
while True:
rx = self.recv()
if string.encode() in rx:
time.sleep(0.1)
break
def wait_for_prompt(self):
prompt_pat = re.compile(b"updater")
while True:
rx = self.recv()
if prompt_pat.search(rx):
time.sleep(0.1)
break
def send(self, string):
self.serial.write(str(string).encode() + b"\n")
rx = self.serial.readline()
if PRINT_RAW_COMMAND :
print(rx.decode(errors="replace"), end="")
def read_output(self, prompt_text) :
output = []
while True :
rx = self.serial.readline()
if prompt_text.encode() in rx :
time.sleep(0.1)
break
if rx != "" :
output.append(rx.decode(errors="ignore").rstrip())
return output
def install_files(self, files, command) :
if ConfigArgs.XMODEM_BAUD:
command += " -b " + ConfigArgs.XMODEM_BAUD
if os.name == 'nt':
modem = xmodem.XMODEM(self.serial.getc, self.serial.putc_win, 'xmodem1k')
else:
modem = xmodem.XMODEM(self.serial.getc, self.serial.putc, 'xmodem1k')
for file in files:
with open(file, "rb") as bin :
self.send(command)
print("Install " + file)
self.wait(XMDM_MSG)
print("|0%" +
"-" * (int(MAX_DOT_COUNT / 2) - 6) +
"50%" +
"-" * (MAX_DOT_COUNT - int(MAX_DOT_COUNT / 2) - 5) +
"100%|")
if ConfigArgs.XMODEM_BAUD:
self.serial.setBaudrate(ConfigArgs.XMODEM_BAUD)
self.serial.discard_inputs() # Clear input buffer to sync
self.serial.set_file_size(os.path.getsize(file))
modem.send(bin)
if ConfigArgs.XMODEM_BAUD:
self.serial.setBaudrate(115200)
self.wait_for_prompt()
def delete_files(self, files) :
for file in files :
self.delete_binary(file)
def delete_binary(self, bin_name) :
self.send("rm " + bin_name)
self.wait_for_prompt()
def main():
try:
config_loader = ConfigArgsLoader()
config_loader.update_config()
except:
return errno.EINVAL
# Wait to reset the board
writer = FlashWriter(ConfigArgs.PROTOCOL_TYPE)
do_wait_reset = True
if ConfigArgs.DTR_RESET:
do_wait_reset = False
bootrom_msg = writer.cancel_autoboot()
if do_wait_reset == True:
rx = writer.recv()
time.sleep(1)
for i in range(3):
writer.send("")
rx = writer.recv()
if "updater".encode() in rx:
# No need to wait for reset
do_wait_reset = False
break
time.sleep(1)
if do_wait_reset:
# Wait to reset the board
print('Please press RESET button on target board')
sys.stdout.flush()
bootrom_msg = writer.cancel_autoboot()
# Remove files
if ConfigArgs.ERASE_NAME :
print(">>> Remove exisiting files ...")
writer.delete_files(ConfigArgs.ERASE_NAME)
# Install files
if ConfigArgs.PACKAGE_NAME:
print(">>> Install files ...")
if ConfigArgs.PACKAGE_NAME :
writer.install_files(ConfigArgs.PACKAGE_NAME, "install")
# Set auto boot
if not ConfigArgs.NO_SET_BOOTABLE:
print(">>> Save Configuration to FlashROM ...")
writer.send("set bootable M0P")
writer.wait_for_prompt()
# Sync all cached data to flash
writer.send("sync")
writer.wait_for_prompt()
if REBOOT_AT_END :
print("Restarting the board ...")
writer.send("reboot")
return 0
if __name__ == "__main__":
try:
sys.exit(main())
except KeyboardInterrupt:
print("Canceled by keyboard interrupt.")
pass
| 27.016997
| 109
| 0.700849
|
4a1a3e2c6893239df24704738369b2576f43eb47
| 1,754
|
py
|
Python
|
Credential.py
|
Ruterana/password_locker
|
eadc92b506bea9607c395fe5027b8cc61722a6a3
|
[
"MIT"
] | null | null | null |
Credential.py
|
Ruterana/password_locker
|
eadc92b506bea9607c395fe5027b8cc61722a6a3
|
[
"MIT"
] | null | null | null |
Credential.py
|
Ruterana/password_locker
|
eadc92b506bea9607c395fe5027b8cc61722a6a3
|
[
"MIT"
] | null | null | null |
class Credential:
"""
Class that generates new instances of Credentials .
"""
Credential_list = [] # Empty User list
def __init__(self,Account,user_name,password):
# docstring removed for simplicity
self.Account= Account
self.user_name = user_name
self.password = password
def save_Credential(self):
'''
save_Credential method saves Credential objects into Credential_list
'''
Credential.Credential_list.append(self)
def delete_Credential(self):
'''
delete_Credential method deletes a saved Credential from the Credential_list
'''
Credential.Credential_list.remove(self)
@classmethod
def find_by_Account(cls,Account):
'''
Method that takes in a credential and returns a account that matches that account .
Args:
Account: Account to search for
Returns :
Credential of Account that matches the Account.
'''
for Credential in cls.Credential_list:
if Credential.Account == Account:
return Credential
@classmethod
def Credential_exist(cls,Account):
'''
Method that checks if a Account exists from the Credential list.
Args:
Account: ACCOUNT to search if it exists
Returns :
Boolean: True or false depending if the Account exists
'''
for Credential in cls.Credential_list:
if Credential.Account == Account:
return True
return False
@classmethod
def display_Credentials(cls):
'''
method that returns the Credential list
'''
return cls.Credential_list
| 27.84127
| 92
| 0.611745
|
4a1a3e84d395b2d3dd3a3793ed13b04ff29fa1e8
| 386
|
py
|
Python
|
Ejercicios/b. Listas/Reto 8. 16-04.py
|
lufe089/material-intro-prog
|
c5b653587aa128e957d31b7f818795303da216d5
|
[
"MIT"
] | 1
|
2022-01-30T17:19:08.000Z
|
2022-01-30T17:19:08.000Z
|
Ejercicios/b. Listas/Reto 8. 16-04.py
|
lufe089/material-intro-prog
|
c5b653587aa128e957d31b7f818795303da216d5
|
[
"MIT"
] | null | null | null |
Ejercicios/b. Listas/Reto 8. 16-04.py
|
lufe089/material-intro-prog
|
c5b653587aa128e957d31b7f818795303da216d5
|
[
"MIT"
] | 1
|
2021-12-27T15:47:04.000Z
|
2021-12-27T15:47:04.000Z
|
# Haga un función que le pida la nota final de cada alumno de un curso de N estudiantes, imprima el promedio y retorne una lista con las notas del curso.
# Haga un procedimiento que reciba N cantidad de cursos para usar la función anterior.
# A partir de los vectores, crear una función que cuente cuales estudiantes superaron 3.5
# La función main tiene que coordinar todo el proceso.
| 96.5
| 154
| 0.790155
|
4a1a3f3740efd3b4f98945c29225283d1a900bec
| 2,813
|
py
|
Python
|
tools/server-side/svnpubsub/commit-hook.py
|
ruchirarya/svn
|
81502a213251c2af21361a942bd9a8cd7d3adb9f
|
[
"Apache-2.0"
] | 7
|
2018-01-18T06:13:21.000Z
|
2020-07-09T03:46:16.000Z
|
depe/subversion/tools/server-side/svnpubsub/commit-hook.py
|
louis-tru/TouchCode2
|
91c182aeaa37fba16e381ea749d32906dab1aeea
|
[
"BSD-3-Clause-Clear"
] | 4
|
2015-01-12T22:23:41.000Z
|
2015-01-12T22:33:52.000Z
|
src/subversion/tools/server-side/svnpubsub/commit-hook.py
|
schwern/alien-svn
|
7423b08f9bc4fdf0ac0d7ea53495269b21b3e8f9
|
[
"Apache-2.0"
] | 1
|
2020-11-04T07:25:22.000Z
|
2020-11-04T07:25:22.000Z
|
#!/usr/local/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
SVNLOOK="/usr/local/svn-install/current/bin/svnlook"
#SVNLOOK="/usr/local/bin/svnlook"
HOST="127.0.0.1"
PORT=2069
import sys
import subprocess
try:
import simplejson as json
except ImportError:
import json
import urllib2
def svncmd(cmd):
return subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
def svncmd_uuid(repo):
cmd = "%s uuid %s" % (SVNLOOK, repo)
p = svncmd(cmd)
return p.stdout.read().strip()
def svncmd_info(repo, revision):
cmd = "%s info -r %s %s" % (SVNLOOK, revision, repo)
p = svncmd(cmd)
data = p.stdout.read().split("\n")
#print data
return {'author': data[0].strip(),
'date': data[1].strip(),
'log': "\n".join(data[3:]).strip()}
def svncmd_changed(repo, revision):
cmd = "%s changed -r %s %s" % (SVNLOOK, revision, repo)
p = svncmd(cmd)
changed = {}
while True:
line = p.stdout.readline()
if not line:
break
line = line.strip()
(flags, filename) = (line[0:3], line[4:])
changed[filename] = {'flags': flags}
return changed
def do_put(body):
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request("http://%s:%d/commits" %(HOST, PORT), data=body)
request.add_header('Content-Type', 'application/json')
request.get_method = lambda: 'PUT'
url = opener.open(request)
def main(repo, revision):
revision = revision.lstrip('r')
i = svncmd_info(repo, revision)
data = {'type': 'svn',
'format': 1,
'id': int(revision),
'changed': {},
'repository': svncmd_uuid(repo),
'committer': i['author'],
'log': i['log'],
'date': i['date'],
}
data['changed'].update(svncmd_changed(repo, revision))
body = json.dumps(data)
do_put(body)
if __name__ == "__main__":
if len(sys.argv) not in (3, 4):
sys.stderr.write("invalid args\n")
sys.exit(0)
main(*sys.argv[1:3])
| 29.925532
| 78
| 0.636331
|
4a1a3f86977432a5afe5281ac35538d23317fd30
| 11,502
|
py
|
Python
|
pvmismatch/contrib/xlsio/xlsio.py
|
adambgnr/PVMismatch
|
66d8ef38319cbd8c36e032ae29f92d679bae7da9
|
[
"BSD-3-Clause"
] | null | null | null |
pvmismatch/contrib/xlsio/xlsio.py
|
adambgnr/PVMismatch
|
66d8ef38319cbd8c36e032ae29f92d679bae7da9
|
[
"BSD-3-Clause"
] | null | null | null |
pvmismatch/contrib/xlsio/xlsio.py
|
adambgnr/PVMismatch
|
66d8ef38319cbd8c36e032ae29f92d679bae7da9
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from scipy.interpolate import interp1d
# Pandas is an optional dependency only used by xlsio, therefore
# not installed with PVMismatch:
try:
import pandas as pd
except ImportError:
print("Pandas module not found. You need to install it before proceeding:")
print("https://pandas.pydata.org/pandas-docs/stable/install.html")
raise
def _create_cell_pos_df(pv_mod, nr_string, nr_mod):
"""Create cell position dataframe of a module in the PV system"""
cell_pos = pv_mod.cell_pos
nrows = int(pv_mod.numberCells / sum(pv_mod.subStrCells))
cell_pos_df = pd.DataFrame(index=['{}_{}'.format(nr_mod, nr)
for nr
in range(nrows)])
for b, bypass in enumerate(cell_pos):
for c, col in enumerate(bypass):
cell_pos_df['{}_{}_{}'.format(nr_string, b, c)] = [i['idx']
for i
in col]
return cell_pos_df
def _create_nan_df(pv_mod, nr_string, nr_mod):
"""Create an "nan" dataframe of a module in the PV system for the case
when the bypass diode activation is not calculated yet"""
cell_pos = pv_mod.cell_pos
nrows = int(pv_mod.numberCells / sum(pv_mod.subStrCells))
nan_df = pd.DataFrame(index=['{}_{}'.format(nr_mod, nr)
for nr
in range(nrows)])
for b, bypass in enumerate(cell_pos):
for c, col in enumerate(bypass):
nan_df['{}_{}_{}'.format(nr_string, b, c)] = ['nan'] * len(col)
return nan_df
def _create_irrad_df(pv_mod, cell_pos_df):
"""Create irradiance dataframe of a module in the PV system"""
irrad = pd.Series(pv_mod.Ee.flatten())
irrad_df = pd.DataFrame(index=cell_pos_df.index,
columns=cell_pos_df.columns)
for column in cell_pos_df.columns:
for row in cell_pos_df.index:
cell_index = cell_pos_df.loc[row, column]
irrad_df.loc[row, column] = irrad[cell_index]
return irrad_df
def _create_temp_df(pv_mod, cell_pos_df):
"""Create temperature dataframe of a module in the PV system"""
temp = pd.Series(pv_mod.Tcell.flatten())
temp_df = pd.DataFrame(index=cell_pos_df.index,
columns=cell_pos_df.columns)
for column in cell_pos_df.columns:
for row in cell_pos_df.index:
cell_index = cell_pos_df.loc[row, column]
temp_df.loc[row, column] = temp[cell_index]
return temp_df
<<<<<<< HEAD
=======
def testfunc():
print('hey')
>>>>>>> Completed xlsio functionality and docs
def system_layout_to_xls(output_xls_name, pv_sys, write_bpd_act):
"""Write an xls with worksheets of irradiance, cell temperature
and cell index. If "write_bpd_act" is True, bypass diode activation is
checked and on the ActiveBpd tab bypassed cells are represented with 1 and
non-bypassed cells with 0."""
writer = pd.ExcelWriter(output_xls_name, engine='xlsxwriter')
workbook = writer.book
writer.sheets['CellIndexes'] = workbook.add_worksheet('CellIndexes')
writer.sheets['Irradiance'] = workbook.add_worksheet('Irradiance')
writer.sheets['CellTemp'] = workbook.add_worksheet('CellTemp')
writer.sheets['BpdAndRbc'] = workbook.add_worksheet('BpdAndRbc')
if write_bpd_act:
pv_sys_vmp = pv_sys.Vmp
print(pv_sys.Pmp)
for s, string in enumerate(pv_sys.pvstrs):
if write_bpd_act:
interp_string_iv = interp1d(string.Vstring, string.Istring)
string_imp = interp_string_iv(pv_sys_vmp)
for m, module in enumerate(string.pvmods):
cell_pos_df = _create_cell_pos_df(pv_mod=module, nr_string=s,
nr_mod=m)
ncols = sum(module.subStrCells)
nrows = int(module.numberCells / ncols)
v_bpd_trig = module.Vbypass
if write_bpd_act:
cols_per_substr = module.subStrCells
bpd = []
cis = []
rbc = []
# checking for bypass diode activation and reverse bised cells
for ss in range(module.numSubStr):
interp_substring_vi = interp1d(module.Isubstr[ss],
module.Vsubstr[ss])
substring_vmp = interp_substring_vi(string_imp)
if substring_vmp < 0: # doublecheck if we should compare to 0 here
[bpd.append(2) for nss in range(cols_per_substr[ss])]
else:
[bpd.append(0) for nss in range(cols_per_substr[ss])]
cis_inss = []
for col in range(cols_per_substr[ss]):
cis_inss += [i['idx'] for i in module.cell_pos[ss][col]]
cells_inss = [module.pvcells[ci] for ci in cis_inss]
for cell in cells_inss:
interp_cell_vi = interp1d(cell.Icell.flatten(),
cell.Vcell.flatten())
cell_vmp = interp_cell_vi(string_imp)
if cell_vmp < 0:
rbc.append(1)
else:
rbc.append(0)
cis += cis_inss
cis_series = pd.Series(index=cis, data=rbc)
bpd_df = pd.DataFrame(index=cell_pos_df.index,
columns=cell_pos_df.columns)
bpdcols = [[c] * len(bpd_df) for c in bpd]
rbc_df = pd.DataFrame(index=cell_pos_df.index,
columns=cell_pos_df.columns)
for c, column in enumerate(cell_pos_df.columns):
bpd_df[column] = bpdcols[c]
for row in cell_pos_df.index:
ci = cell_pos_df.loc[row, column]
rbc_df.loc[row, column] = cis_series[ci]
# merging bpd and rbc dataframes into one dataframe, where
# 2 = bypassed cells and 1 = reverse biased cells
bpdrbc_df = (bpd_df * 2 + rbc_df).clip(upper=2)
# writing xls files
if not write_bpd_act:
bpdrbc_df = _create_nan_df(pv_mod=module, nr_string=s, nr_mod=m)
startcol = 0 if s == 0 else s*(ncols+1)
startrow = 0 if m == 0 else m*(nrows+1)
cell_pos_df.to_excel(writer, sheet_name='CellIndexes',
startrow=startrow , startcol=startcol)
irrad_df = _create_irrad_df(pv_mod=module, cell_pos_df=cell_pos_df)
irrad_df.to_excel(writer, sheet_name='Irradiance',
startrow=startrow, startcol=startcol)
temp_df = _create_temp_df(pv_mod=module, cell_pos_df=cell_pos_df)
temp_df.to_excel(writer, sheet_name='CellTemp', startrow=startrow,
startcol=startcol)
bpdrbc_df.to_excel(writer, sheet_name='BpdAndRbc',
startrow=startrow, startcol=startcol)
# formatting the Irradiance worksheet
writer.sheets['Irradiance'].conditional_format(0, 0,
writer.sheets['Irradiance'].dim_rowmax,
writer.sheets['Irradiance'].dim_colmax,
{'type': '2_color_scale',
'min_type': 'num',
'max_type': 'num',
'min_value':0,
'max_value':1,
'min_color':'#808080',
'max_color':'#FFD700'})
# formatting the CellTemp worksheet
writer.sheets['CellTemp'].conditional_format(0, 0,
writer.sheets['CellTemp'].dim_rowmax,
writer.sheets['CellTemp'].dim_colmax,
{'type': '3_color_scale',
'min_type': 'num',
'mid_type': 'num',
'max_type': 'num',
'min_value':273.15,
'mid_value':273.15 + 25,
'max_value':273.15 + 85,
'min_color':'#85C1E9',
'mid_color':'#E5E7E9',
'max_color':'#E74C3C'})
# formatting BpdAndRbc worksheet
writer.sheets['BpdAndRbc'].conditional_format(0, 0,
writer.sheets['BpdAndRbc'].dim_rowmax,
writer.sheets['BpdAndRbc'].dim_colmax,
{'type': '3_color_scale',
'min_type': 'num',
'mid_type': 'num',
'max_type': 'num',
'min_value':0,
'mid_value':1,
'max_value':2,
'min_color':'#FFFFFF',
'mid_color':'#FF6347',
'max_color':'#36C1FF'})
writer.save()
writer.close()
def set_input_from_xls(input_xls_name, pv_sys, str_num, str_len):
"""Set cell temperatures of a PVMM PV system from an xls"""
for string in list(range(str_num)):
for module in list(range(str_len)):
ncols = sum(pv_sys.pvstrs[string].pvmods[module].subStrCells)
nrows = int(pv_sys.pvstrs[string].pvmods[module].numberCells/ncols)
irrad = pd.read_excel(input_xls_name, sheet_name='Irradiance',
skiprows=module*(nrows+1),nrows=nrows,
usecols=range(string*(ncols+1),
(string+1)*(ncols+1)),
index_col=0, header=0)
cell_temp = pd.read_excel(input_xls_name, sheet_name='CellTemp',
skiprows=module*(nrows+1), nrows=nrows,
usecols=range(string*(ncols+1),
(string+1)*(ncols+1)),
index_col=0, header=0)
cell_pos = pd.read_excel(input_xls_name, sheet_name='CellIndexes',
skiprows=module*(nrows+1), nrows=nrows,
usecols=range(string*(ncols+1),
(string+1)*(ncols+1)),
index_col=0, header=0)
Ee = []
Tc = []
mod_cell_idxs = []
for column in cell_pos.columns:
for row in cell_pos.index:
Ee.append(irrad.loc[row, column])
Tc.append(cell_temp.loc[row, column])
mod_cell_idxs.append(cell_pos.loc[row, column])
pv_sys.setTemps({string:{module:[Tc, mod_cell_idxs]}})
pv_sys.setSuns({string:{module:[Ee, mod_cell_idxs]}})
| 51.810811
| 86
| 0.505477
|
4a1a3fa7ab1b142e5e894f1aed407e5db0c238dc
| 42
|
py
|
Python
|
tests/test_underdevelopment.py
|
GoodManWEN/aiohttp-debugmode
|
1c49167165c59a981e38ec0174deb0e674c61f48
|
[
"MIT"
] | 2
|
2020-06-15T15:52:22.000Z
|
2021-05-05T22:59:35.000Z
|
tests/test_underdevelopment.py
|
GoodManWEN/aiohttp-debugmode
|
1c49167165c59a981e38ec0174deb0e674c61f48
|
[
"MIT"
] | 1
|
2021-07-21T11:51:45.000Z
|
2021-07-21T11:51:45.000Z
|
tests/test_underdevelopment.py
|
GoodManWEN/aiohttp-debugmode
|
1c49167165c59a981e38ec0174deb0e674c61f48
|
[
"MIT"
] | 1
|
2021-05-05T21:04:49.000Z
|
2021-05-05T21:04:49.000Z
|
'''
'''
def test_item():
assert True
| 7
| 16
| 0.52381
|
4a1a3ff9f49b9b947fb6f4c93f6603d56e77ae54
| 33,881
|
py
|
Python
|
src/python/pants/jvm/resolve/coursier_fetch.py
|
alexey-tereshenkov-oxb/pants
|
920591edc6e0fb372930253c821b712a74b0dec8
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/jvm/resolve/coursier_fetch.py
|
alexey-tereshenkov-oxb/pants
|
920591edc6e0fb372930253c821b712a74b0dec8
|
[
"Apache-2.0"
] | 8
|
2022-02-06T12:14:38.000Z
|
2022-02-16T11:22:05.000Z
|
src/python/pants/jvm/resolve/coursier_fetch.py
|
alexey-tereshenkov-oxb/pants
|
920591edc6e0fb372930253c821b712a74b0dec8
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import dataclasses
import json
import logging
import os
import re
from dataclasses import dataclass
from itertools import chain
from typing import Any, FrozenSet, Iterable, Iterator, List, Tuple
from urllib.parse import quote_plus as url_quote_plus
from pants.base.glob_match_error_behavior import GlobMatchErrorBehavior
from pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest
from pants.engine.addresses import UnparsedAddressInputs
from pants.engine.collection import Collection, DeduplicatedCollection
from pants.engine.fs import (
AddPrefix,
Digest,
DigestContents,
DigestSubset,
FileDigest,
MergeDigests,
PathGlobs,
RemovePrefix,
Snapshot,
)
from pants.engine.process import BashBinary, Process, ProcessResult
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import CoarsenedTargets, Target, Targets
from pants.engine.unions import UnionRule
from pants.jvm.compile import (
ClasspathEntry,
ClasspathEntryRequest,
CompileResult,
FallibleClasspathEntry,
)
from pants.jvm.resolve import coursier_setup
from pants.jvm.resolve.coursier_setup import Coursier
from pants.jvm.resolve.key import CoursierResolveKey
from pants.jvm.subsystems import JvmSubsystem
from pants.jvm.target_types import (
JvmArtifactArtifactField,
JvmArtifactFieldSet,
JvmArtifactGroupField,
JvmArtifactJarSourceField,
JvmArtifactTarget,
JvmArtifactUrlField,
JvmArtifactVersionField,
)
from pants.jvm.util_rules import ExtractFileDigest
from pants.util.docutil import doc_url
from pants.util.logging import LogLevel
from pants.util.strutil import bullet_list, pluralize
logger = logging.getLogger(__name__)
class CoursierFetchRequest(ClasspathEntryRequest):
field_sets = (JvmArtifactFieldSet,)
class CoursierError(Exception):
"""An exception relating to invoking Coursier or processing its output."""
class NoCompatibleResolve(Exception):
"""No compatible resolve could be found for a set of targets."""
def __init__(self, jvm: JvmSubsystem, msg_prefix: str, incompatible_targets: Iterable[Target]):
targets_and_resolves_str = bullet_list(
f"{t.address.spec}\t{jvm.resolves_for_target(t)}" for t in incompatible_targets
)
super().__init__(
f"{msg_prefix}:\n"
f"{targets_and_resolves_str}\n"
"Targets which will be merged onto the same classpath must have at least one compatible "
f"resolve (from the [resolve]({doc_url('reference-deploy_jar#coderesolvecode')}) or "
f"[compatible_resolves]({doc_url('reference-java_sources#codecompatible_resolvescode')}) "
"fields) in common."
)
class InvalidCoordinateString(Exception):
"""The coordinate string being passed is invalid or malformed."""
def __init__(self, coords: str) -> None:
super().__init__(f"Received invalid artifact coordinates: {coords}")
@dataclass(frozen=True)
class Coordinate:
"""A single Maven-style coordinate for a JVM dependency.
Coursier uses at least two string serializations of coordinates:
1. A format that is accepted by the Coursier CLI which uses trailing attributes to specify
optional fields like `packaging`/`type`, `classifier`, `url`, etc. See `to_coord_arg_str`.
2. A format in the JSON report, which uses token counts to specify optional fields. We
additionally use this format in our own lockfile. See `to_coord_str` and `from_coord_str`.
"""
REGEX = re.compile("([^: ]+):([^: ]+)(:([^: ]*)(:([^: ]+))?)?:([^: ]+)")
group: str
artifact: str
version: str
packaging: str = "jar"
classifier: str | None = None
# True to enforce that the exact declared version of a coordinate is fetched, rather than
# allowing dependency resolution to adjust the version when conflicts occur.
strict: bool = True
@staticmethod
def from_json_dict(data: dict) -> Coordinate:
return Coordinate(
group=data["group"],
artifact=data["artifact"],
version=data["version"],
packaging=data.get("packaging", "jar"),
classifier=data.get("classifier", None),
)
def to_json_dict(self) -> dict:
ret = {
"group": self.group,
"artifact": self.artifact,
"version": self.version,
"packaging": self.packaging,
"classifier": self.classifier,
}
return ret
@classmethod
def from_coord_str(cls, s: str) -> Coordinate:
"""Parses from a coordinate string with optional `packaging` and `classifier` coordinates.
See the classdoc for more information on the format.
Using Aether's implementation as reference
http://www.javased.com/index.php?source_dir=aether-core/aether-api/src/main/java/org/eclipse/aether/artifact/DefaultArtifact.java
${organisation}:${artifact}[:${packaging}[:${classifier}]]:${version}
See also: `to_coord_str`.
"""
parts = Coordinate.REGEX.match(s)
if parts is not None:
packaging_part = parts.group(4)
return cls(
group=parts.group(1),
artifact=parts.group(2),
packaging=packaging_part if packaging_part is not None else "jar",
classifier=parts.group(6),
version=parts.group(7),
)
else:
raise InvalidCoordinateString(s)
def as_requirement(self) -> ArtifactRequirement:
"""Creates a `RequirementCoordinate` from a `Coordinate`."""
return ArtifactRequirement(coordinate=self)
def to_coord_str(self, versioned: bool = True) -> str:
"""Renders the coordinate in Coursier's JSON-report format, which does not use attributes.
See also: `from_coord_str`.
"""
unversioned = f"{self.group}:{self.artifact}"
if self.classifier is not None:
unversioned += f":{self.packaging}:{self.classifier}"
elif self.packaging != "jar":
unversioned += f":{self.packaging}"
version_suffix = ""
if versioned:
version_suffix = f":{self.version}"
return f"{unversioned}{version_suffix}"
def to_coord_arg_str(self, extra_attrs: dict[str, str] | None = None) -> str:
"""Renders the coordinate in Coursier's CLI input format.
The CLI input format uses trailing key-val attributes to specify `packaging`, `url`, etc.
See https://github.com/coursier/coursier/blob/b5d5429a909426f4465a9599d25c678189a54549/modules/coursier/shared/src/test/scala/coursier/parse/DependencyParserTests.scala#L7
"""
attrs = dict(extra_attrs or {})
if self.packaging != "jar":
# NB: Coursier refers to `packaging` as `type` internally.
attrs["type"] = self.packaging
if self.classifier:
attrs["classifier"] = self.classifier
attrs_sep_str = "," if attrs else ""
attrs_str = ",".join((f"{k}={v}" for k, v in attrs.items()))
return f"{self.group}:{self.artifact}:{self.version}{attrs_sep_str}{attrs_str}"
class Coordinates(DeduplicatedCollection[Coordinate]):
"""An ordered list of `Coordinate`s."""
@dataclass(frozen=True)
class ArtifactRequirement:
"""A single Maven-style coordinate for a JVM dependency, along with information of how to fetch
the dependency if it is not to be fetched from a Maven repository."""
coordinate: Coordinate
url: str | None = None
jar: JvmArtifactJarSourceField | None = None
@classmethod
def from_jvm_artifact_target(cls, target: Target) -> ArtifactRequirement:
if not JvmArtifactFieldSet.is_applicable(target):
raise AssertionError(
"`ArtifactRequirement.from_jvm_artifact_target()` only works on targets with "
"`JvmArtifactFieldSet` fields present."
)
return ArtifactRequirement(
coordinate=Coordinate(
group=target[JvmArtifactGroupField].value,
artifact=target[JvmArtifactArtifactField].value,
version=target[JvmArtifactVersionField].value,
),
url=target[JvmArtifactUrlField].value,
jar=(
target[JvmArtifactJarSourceField]
if target[JvmArtifactJarSourceField].value
else None
),
)
def to_coord_arg_str(self) -> str:
return self.coordinate.to_coord_arg_str(
{"url": url_quote_plus(self.url)} if self.url else {}
)
# TODO: Consider whether to carry classpath scope in some fashion via ArtifactRequirements.
class ArtifactRequirements(DeduplicatedCollection[ArtifactRequirement]):
"""An ordered list of Coordinates used as requirements."""
@classmethod
def from_coordinates(cls, coordinates: Iterable[Coordinate]) -> ArtifactRequirements:
return ArtifactRequirements(coord.as_requirement() for coord in coordinates)
@dataclass(frozen=True)
class CoursierLockfileEntry:
"""A single artifact entry from a Coursier-resolved lockfile.
These fields are nearly identical to the JSON objects from the
"dependencies" entries in Coursier's --json-output-file format.
But unlike Coursier's JSON report, a CoursierLockfileEntry
includes the content-address of the artifact fetched by Coursier
and ingested by Pants.
For example, a Coursier JSON report dependency entry might look like this:
```
{
"coord": "com.chuusai:shapeless_2.13:2.3.3",
"file": "/home/USER/.cache/coursier/v1/https/repo1.maven.org/maven2/com/chuusai/shapeless_2.13/2.3.3/shapeless_2.13-2.3.3.jar",
"directDependencies": [
"org.scala-lang:scala-library:2.13.0"
],
"dependencies": [
"org.scala-lang:scala-library:2.13.0"
]
}
```
The equivalent CoursierLockfileEntry would look like this:
```
CoursierLockfileEntry(
coord="com.chuusai:shapeless_2.13:2.3.3", # identical
file_name="shapeless_2.13-2.3.3.jar" # PurePath(entry["file"].name)
direct_dependencies=(Coordinate.from_coord_str("org.scala-lang:scala-library:2.13.0"),),
dependencies=(Coordinate.from_coord_str("org.scala-lang:scala-library:2.13.0"),),
file_digest=FileDigest(fingerprint=<sha256 of the jar>, ...),
)
```
The fields `remote_url` and `pants_address` are set by Pants if the `coord` field matches a
`jvm_artifact` that had either the `url` or `jar` fields set.
"""
coord: Coordinate
file_name: str
direct_dependencies: Coordinates
dependencies: Coordinates
file_digest: FileDigest
remote_url: str | None = None
pants_address: str | None = None
@classmethod
def from_json_dict(cls, entry) -> CoursierLockfileEntry:
"""Construct a CoursierLockfileEntry from its JSON dictionary representation."""
return cls(
coord=Coordinate.from_json_dict(entry["coord"]),
file_name=entry["file_name"],
direct_dependencies=Coordinates(
Coordinate.from_json_dict(d) for d in entry["directDependencies"]
),
dependencies=Coordinates(Coordinate.from_json_dict(d) for d in entry["dependencies"]),
file_digest=FileDigest(
fingerprint=entry["file_digest"]["fingerprint"],
serialized_bytes_length=entry["file_digest"]["serialized_bytes_length"],
),
remote_url=entry.get("remote_url"),
pants_address=entry.get("pants_address"),
)
def to_json_dict(self) -> dict[str, Any]:
"""Export this CoursierLockfileEntry to a JSON object."""
return dict(
coord=self.coord.to_json_dict(),
directDependencies=[coord.to_json_dict() for coord in self.direct_dependencies],
dependencies=[coord.to_json_dict() for coord in self.dependencies],
file_name=self.file_name,
file_digest=dict(
fingerprint=self.file_digest.fingerprint,
serialized_bytes_length=self.file_digest.serialized_bytes_length,
),
remote_url=self.remote_url,
pants_address=self.pants_address,
)
@dataclass(frozen=True)
class CoursierResolvedLockfile:
"""An in-memory representation of Pants' Coursier lockfile format.
All coordinates in the resolved lockfile will be compatible, so we do not need to do version
testing when looking up coordinates.
"""
entries: tuple[CoursierLockfileEntry, ...]
@classmethod
def _coordinate_not_found(cls, key: CoursierResolveKey, coord: Coordinate) -> CoursierError:
# TODO: After fixing https://github.com/pantsbuild/pants/issues/13496, coordinate matches
# should become exact, and this error message will capture all cases of stale lockfiles.
return CoursierError(
f"{coord} was not present in resolve `{key.name}` at `{key.path}`.\n"
f"If you have recently added new `{JvmArtifactTarget.alias}` targets, you might "
f"need to update your lockfile by running `coursier-resolve --names={key.name}`."
)
def direct_dependencies(
self, key: CoursierResolveKey, coord: Coordinate
) -> tuple[CoursierLockfileEntry, tuple[CoursierLockfileEntry, ...]]:
"""Return the entry for the given Coordinate, and for its direct dependencies."""
entries = {(i.coord.group, i.coord.artifact): i for i in self.entries}
entry = entries.get((coord.group, coord.artifact))
if entry is None:
raise self._coordinate_not_found(key, coord)
return (entry, tuple(entries[(i.group, i.artifact)] for i in entry.direct_dependencies))
def dependencies(
self, key: CoursierResolveKey, coord: Coordinate
) -> tuple[CoursierLockfileEntry, tuple[CoursierLockfileEntry, ...]]:
"""Return the entry for the given Coordinate, and for its transitive dependencies."""
entries = {(i.coord.group, i.coord.artifact): i for i in self.entries}
entry = entries.get((coord.group, coord.artifact))
if entry is None:
raise self._coordinate_not_found(key, coord)
return (entry, tuple(entries[(i.group, i.artifact)] for i in entry.dependencies))
@classmethod
def from_json_dict(cls, lockfile) -> CoursierResolvedLockfile:
"""Construct a CoursierResolvedLockfile from its JSON dictionary representation."""
return cls(entries=tuple(CoursierLockfileEntry.from_json_dict(dep) for dep in lockfile))
def to_json(self) -> bytes:
"""Export this CoursierResolvedLockfile to human-readable JSON.
This JSON is intended to be checked in to the user's repo as a hermetic snapshot of a
Coursier resolved JVM classpath.
"""
return json.dumps([entry.to_json_dict() for entry in self.entries], indent=4).encode(
"utf-8"
)
def classpath_dest_filename(coord: str, src_filename: str) -> str:
"""Calculates the destination filename on the classpath for the given source filename and coord.
TODO: This is duplicated in `COURSIER_POST_PROCESSING_SCRIPT`.
"""
dest_name = coord.replace(":", "_")
_, ext = os.path.splitext(src_filename)
return f"{dest_name}{ext}"
@dataclass(frozen=True)
class CoursierResolveInfo:
coord_arg_strings: FrozenSet[str]
digest: Digest
@rule
async def prepare_coursier_resolve_info(
artifact_requirements: ArtifactRequirements,
) -> CoursierResolveInfo:
# Transform requirements that correspond to local JAR files into coordinates with `file:/`
# URLs, and put the files in the place specified by the URLs.
no_jars: List[ArtifactRequirement] = []
jars: List[Tuple[ArtifactRequirement, JvmArtifactJarSourceField]] = []
for req in artifact_requirements:
jar = req.jar
if not jar:
no_jars.append(req)
else:
jars.append((req, jar))
jar_files = await Get(SourceFiles, SourceFilesRequest(i[1] for i in jars))
jar_file_paths = jar_files.snapshot.files
resolvable_jar_requirements = [
dataclasses.replace(
req, jar=None, url=f"file:{Coursier.working_directory_placeholder}/{path}"
)
for req, path in zip((i[0] for i in jars), jar_file_paths)
]
to_resolve = chain(no_jars, resolvable_jar_requirements)
return CoursierResolveInfo(
coord_arg_strings=frozenset(req.to_coord_arg_str() for req in to_resolve),
digest=jar_files.snapshot.digest,
)
@rule(level=LogLevel.DEBUG)
async def coursier_resolve_lockfile(
bash: BashBinary,
coursier: Coursier,
artifact_requirements: ArtifactRequirements,
) -> CoursierResolvedLockfile:
"""Run `coursier fetch ...` against a list of Maven coordinates and capture the result.
This rule does two things in a single Process invocation:
* Runs `coursier fetch` to let Coursier do the heavy lifting of resolving
dependencies and downloading resolved artifacts (jars, etc).
* Copies the resolved artifacts into the Process output directory, capturing
the artifacts as content-addressed `Digest`s.
It's important that this happens in the same process, since the process isn't
guaranteed to run on the same machine as the rule, nor is a subsequent process
invocation. This guarantees that whatever Coursier resolved, it was fully
captured into Pants' content addressed artifact storage.
Note however that we still get the benefit of Coursier's "global" cache if it
had already been run on the machine where the `coursier fetch` runs, so rerunning
`coursier fetch` tends to be fast in practice.
Finally, this rule bundles up the result into a `CoursierResolvedLockfile`. This
data structure encapsulates everything necessary to either materialize the
resolved dependencies to a classpath for Java invocations, or to write the
lockfile out to the workspace to hermetically freeze the result of the resolve.
"""
if len(artifact_requirements) == 0:
return CoursierResolvedLockfile(entries=())
coursier_resolve_info = await Get(
CoursierResolveInfo, ArtifactRequirements, artifact_requirements
)
coursier_report_file_name = "coursier_report.json"
process_result = await Get(
ProcessResult,
Process(
argv=coursier.args(
[
coursier_report_file_name,
*coursier_resolve_info.coord_arg_strings,
# TODO(#13496): Disable --strict-include to work around Coursier issue
# https://github.com/coursier/coursier/issues/1364 which erroneously rejects underscores in
# artifact rules as malformed.
# *(
# f"--strict-include={req.to_coord_str(versioned=False)}"
# for req in artifact_requirements
# if req.strict
# ),
],
wrapper=[bash.path, coursier.wrapper_script],
),
input_digest=coursier_resolve_info.digest,
immutable_input_digests=coursier.immutable_input_digests,
output_directories=("classpath",),
output_files=(coursier_report_file_name,),
append_only_caches=coursier.append_only_caches,
env=coursier.env,
description=(
"Running `coursier fetch` against "
f"{pluralize(len(artifact_requirements), 'requirement')}: "
f"{', '.join(req.to_coord_arg_str() for req in artifact_requirements)}"
),
level=LogLevel.DEBUG,
),
)
report_digest = await Get(
Digest, DigestSubset(process_result.output_digest, PathGlobs([coursier_report_file_name]))
)
report_contents = await Get(DigestContents, Digest, report_digest)
report = json.loads(report_contents[0].content)
artifact_file_names = tuple(
classpath_dest_filename(dep["coord"], dep["file"]) for dep in report["dependencies"]
)
artifact_output_paths = tuple(f"classpath/{file_name}" for file_name in artifact_file_names)
artifact_digests = await MultiGet(
Get(Digest, DigestSubset(process_result.output_digest, PathGlobs([output_path])))
for output_path in artifact_output_paths
)
stripped_artifact_digests = await MultiGet(
Get(Digest, RemovePrefix(artifact_digest, "classpath"))
for artifact_digest in artifact_digests
)
artifact_file_digests = await MultiGet(
Get(FileDigest, ExtractFileDigest(stripped_artifact_digest, file_name))
for stripped_artifact_digest, file_name in zip(
stripped_artifact_digests, artifact_file_names
)
)
first_pass_lockfile = CoursierResolvedLockfile(
entries=tuple(
CoursierLockfileEntry(
coord=Coordinate.from_coord_str(dep["coord"]),
direct_dependencies=Coordinates(
Coordinate.from_coord_str(dd) for dd in dep["directDependencies"]
),
dependencies=Coordinates(Coordinate.from_coord_str(d) for d in dep["dependencies"]),
file_name=file_name,
file_digest=artifact_file_digest,
)
for dep, file_name, artifact_file_digest in zip(
report["dependencies"], artifact_file_names, artifact_file_digests
)
)
)
inverted_artifacts = {req.coordinate: req for req in artifact_requirements}
new_entries = []
for entry in first_pass_lockfile.entries:
req = inverted_artifacts.get(entry.coord)
if req:
address = req.jar.address if req.jar else None
address_spec = address.spec if address else None
entry = dataclasses.replace(entry, remote_url=req.url, pants_address=address_spec)
new_entries.append(entry)
return CoursierResolvedLockfile(entries=tuple(new_entries))
@rule(desc="Fetch with coursier")
async def fetch_with_coursier(request: CoursierFetchRequest) -> FallibleClasspathEntry:
# TODO: Loading this per JvmArtifact.
lockfile = await Get(CoursierResolvedLockfile, CoursierResolveKey, request.resolve)
# All of the transitive dependencies are exported.
# TODO: Expose an option to control whether this exports only the root, direct dependencies,
# transitive dependencies, etc.
assert len(request.component.members) == 1, "JvmArtifact does not have dependencies."
root_entry, transitive_entries = lockfile.dependencies(
request.resolve,
ArtifactRequirement.from_jvm_artifact_target(request.component.representative).coordinate,
)
classpath_entries = await MultiGet(
Get(ClasspathEntry, CoursierLockfileEntry, entry)
for entry in (root_entry, *transitive_entries)
)
exported_digest = await Get(Digest, MergeDigests(cpe.digest for cpe in classpath_entries))
return FallibleClasspathEntry(
description=str(request.component),
result=CompileResult.SUCCEEDED,
output=ClasspathEntry.merge(exported_digest, classpath_entries),
exit_code=0,
)
class ResolvedClasspathEntries(Collection[ClasspathEntry]):
"""A collection of resolved classpath entries."""
@rule
async def coursier_fetch_one_coord(
bash: BashBinary,
coursier: Coursier,
request: CoursierLockfileEntry,
) -> ClasspathEntry:
"""Run `coursier fetch --intransitive` to fetch a single artifact.
This rule exists to permit efficient subsetting of a "global" classpath
in the form of a lockfile. Callers can determine what subset of dependencies
from the lockfile are needed for a given target, then request those
lockfile entries individually.
By fetching only one entry at a time, we maximize our cache efficiency. If instead
we fetched the entire subset that the caller wanted, there would be a different cache
key for every possible subset.
This rule also guarantees exact reproducibility. If all caches have been
removed, `coursier fetch` will re-download the artifact, and this rule will
confirm that what was downloaded matches exactly (by content digest) what
was specified in the lockfile (what Coursier originally downloaded).
"""
# Prepare any URL- or JAR-specifying entries for use with Coursier
req: ArtifactRequirement
if request.pants_address:
targets = await Get(
Targets, UnparsedAddressInputs([request.pants_address], owning_address=None)
)
req = ArtifactRequirement(request.coord, jar=targets[0][JvmArtifactJarSourceField])
else:
req = ArtifactRequirement(request.coord, url=request.remote_url)
coursier_resolve_info = await Get(
CoursierResolveInfo,
ArtifactRequirements([req]),
)
coursier_report_file_name = "coursier_report.json"
process_result = await Get(
ProcessResult,
Process(
argv=coursier.args(
[
coursier_report_file_name,
"--intransitive",
*coursier_resolve_info.coord_arg_strings,
],
wrapper=[bash.path, coursier.wrapper_script],
),
input_digest=coursier_resolve_info.digest,
immutable_input_digests=coursier.immutable_input_digests,
output_directories=("classpath",),
output_files=(coursier_report_file_name,),
append_only_caches=coursier.append_only_caches,
env=coursier.env,
description=f"Fetching with coursier: {request.coord.to_coord_str()}",
level=LogLevel.DEBUG,
),
)
report_digest = await Get(
Digest, DigestSubset(process_result.output_digest, PathGlobs([coursier_report_file_name]))
)
report_contents = await Get(DigestContents, Digest, report_digest)
report = json.loads(report_contents[0].content)
report_deps = report["dependencies"]
if len(report_deps) == 0:
raise CoursierError("Coursier fetch report has no dependencies (i.e. nothing was fetched).")
elif len(report_deps) > 1:
raise CoursierError(
"Coursier fetch report has multiple dependencies, but exactly 1 was expected."
)
dep = report_deps[0]
resolved_coord = Coordinate.from_coord_str(dep["coord"])
if resolved_coord != request.coord:
raise CoursierError(
f'Coursier resolved coord "{resolved_coord.to_coord_str()}" does not match requested coord "{request.coord.to_coord_str()}".'
)
classpath_dest_name = classpath_dest_filename(dep["coord"], dep["file"])
classpath_dest = f"classpath/{classpath_dest_name}"
resolved_file_digest = await Get(
Digest, DigestSubset(process_result.output_digest, PathGlobs([classpath_dest]))
)
stripped_digest = await Get(Digest, RemovePrefix(resolved_file_digest, "classpath"))
file_digest = await Get(
FileDigest,
ExtractFileDigest(stripped_digest, classpath_dest_name),
)
if file_digest != request.file_digest:
raise CoursierError(
f"Coursier fetch for '{resolved_coord}' succeeded, but fetched artifact {file_digest} did not match the expected artifact: {request.file_digest}."
)
return ClasspathEntry(digest=stripped_digest, filenames=(classpath_dest_name,))
@rule(level=LogLevel.DEBUG)
async def coursier_fetch_lockfile(lockfile: CoursierResolvedLockfile) -> ResolvedClasspathEntries:
"""Fetch every artifact in a lockfile."""
classpath_entries = await MultiGet(
Get(ClasspathEntry, CoursierLockfileEntry, entry) for entry in lockfile.entries
)
return ResolvedClasspathEntries(classpath_entries)
@rule
async def select_coursier_resolve_for_targets(
coarsened_targets: CoarsenedTargets, jvm: JvmSubsystem
) -> CoursierResolveKey:
"""Selects and validates (transitively) a single resolve for a set of roots in a compile graph.
In most cases, a `CoursierResolveKey` should be requested for a single `CoarsenedTarget` root,
which avoids coupling un-related roots unnecessarily. But in other cases, a single compatible
resolve is required for multiple roots (such as when running a `repl` over unrelated code), and
in that case there might be multiple CoarsenedTargets.
"""
root_targets = [t for ct in coarsened_targets for t in ct.members]
# Find the set of resolves that are compatible with all roots by ANDing them all together.
compatible_resolves: set[str] | None = None
for tgt in root_targets:
current_resolves = set(jvm.resolves_for_target(tgt))
if compatible_resolves is None:
compatible_resolves = current_resolves
else:
compatible_resolves &= current_resolves
# Select a resolve from the compatible set.
if not compatible_resolves:
raise NoCompatibleResolve(
jvm, "The selected targets did not have a resolve in common", root_targets
)
# Take the first compatible resolve.
resolve = min(compatible_resolves)
# Validate that the selected resolve is compatible with all transitive dependencies.
incompatible_targets = []
for ct in coarsened_targets.closure():
for t in ct.members:
target_resolves = jvm.resolves_for_target(t)
if target_resolves is not None and resolve not in target_resolves:
incompatible_targets.append(t)
if incompatible_targets:
raise NoCompatibleResolve(
jvm,
f"The resolve chosen for the root targets was {resolve}, but some of their "
"dependencies were not compatible with that resolve",
incompatible_targets,
)
# Load the resolve.
resolve_path = jvm.resolves[resolve]
lockfile_source = PathGlobs(
[resolve_path],
glob_match_error_behavior=GlobMatchErrorBehavior.error,
description_of_origin=f"The resolve `{resolve}` from `[jvm].resolves`",
)
resolve_digest = await Get(Digest, PathGlobs, lockfile_source)
return CoursierResolveKey(resolve, resolve_path, resolve_digest)
@rule
async def get_coursier_lockfile_for_resolve(
coursier_resolve: CoursierResolveKey,
) -> CoursierResolvedLockfile:
lockfile_digest_contents = await Get(DigestContents, Digest, coursier_resolve.digest)
lockfile_contents = lockfile_digest_contents[0].content
return CoursierResolvedLockfile.from_json_dict(json.loads(lockfile_contents))
@dataclass(frozen=True)
class MaterializedClasspathRequest:
"""A helper to merge various classpath elements.
:param prefix: if set, should be a relative directory that will
be prepended to every classpath element. This is useful for
keeping all classpath elements isolated under a single directory
in a process invocation, where other inputs on the process's
root directory might interfere with un-prefixed classpath
entries (or vice versa).
"""
prefix: str | None = None
lockfiles: tuple[CoursierResolvedLockfile, ...] = ()
artifact_requirements: tuple[ArtifactRequirements, ...] = ()
@dataclass(frozen=True)
class MaterializedClasspath:
"""A fully fetched and merged classpath, ready to hand to a JVM process invocation.
TODO: Consider renaming to reflect the fact that this is always a 3rdparty classpath.
"""
content: Snapshot
@property
def digest(self) -> Digest:
return self.content.digest
def classpath_entries(self, root: str | None = None) -> Iterator[str]:
"""Returns optionally prefixed classpath entry filenames.
:param prefix: if set, will be prepended to all entries. This is useful
if the process working directory is not the same as the root
directory for the process input `Digest`.
"""
if root is None:
yield from self.content.files
return
for file_name in self.content.files:
yield os.path.join(root, file_name)
@rule(level=LogLevel.DEBUG)
async def materialize_classpath(request: MaterializedClasspathRequest) -> MaterializedClasspath:
"""Resolve, fetch, and merge various classpath types to a single `Digest` and metadata."""
artifact_requirements_lockfiles = await MultiGet(
Get(CoursierResolvedLockfile, ArtifactRequirements, artifact_requirements)
for artifact_requirements in request.artifact_requirements
)
lockfile_and_requirements_classpath_entries = await MultiGet(
Get(
ResolvedClasspathEntries,
CoursierResolvedLockfile,
lockfile,
)
for lockfile in (*request.lockfiles, *artifact_requirements_lockfiles)
)
merged_snapshot = await Get(
Snapshot,
MergeDigests(
classpath_entry.digest
for classpath_entries in lockfile_and_requirements_classpath_entries
for classpath_entry in classpath_entries
),
)
if request.prefix is not None:
merged_snapshot = await Get(Snapshot, AddPrefix(merged_snapshot.digest, request.prefix))
return MaterializedClasspath(content=merged_snapshot)
def rules():
return [
*collect_rules(),
*coursier_setup.rules(),
UnionRule(ClasspathEntryRequest, CoursierFetchRequest),
]
| 39.580607
| 179
| 0.684071
|
4a1a40c657d820864c828e0fcfb905bc1d0dd87c
| 3,251
|
py
|
Python
|
venv/Lib/site-packages/networkx/readwrite/json_graph/tests/test_node_link.py
|
997Yi/Flask-web
|
6b5e5d274bfa25fbd3db5af02723a5671f1e901d
|
[
"MIT"
] | 3
|
2020-08-04T20:29:41.000Z
|
2020-11-09T09:28:19.000Z
|
venv/Lib/site-packages/networkx/readwrite/json_graph/tests/test_node_link.py
|
997Yi/Flask-web
|
6b5e5d274bfa25fbd3db5af02723a5671f1e901d
|
[
"MIT"
] | 30
|
2020-04-15T19:37:40.000Z
|
2020-04-22T21:19:35.000Z
|
venv/Lib/site-packages/networkx/readwrite/json_graph/tests/test_node_link.py
|
997Yi/Flask-web
|
6b5e5d274bfa25fbd3db5af02723a5671f1e901d
|
[
"MIT"
] | 2
|
2020-03-12T23:20:22.000Z
|
2021-02-15T21:54:02.000Z
|
# -*- coding: utf-8 -*-
import json
import pytest
import networkx as nx
from networkx.readwrite.json_graph import *
class TestNodeLink:
def test_graph(self):
G = nx.path_graph(4)
H = node_link_graph(node_link_data(G))
assert nx.is_isomorphic(G, H)
def test_graph_attributes(self):
G = nx.path_graph(4)
G.add_node(1, color='red')
G.add_edge(1, 2, width=7)
G.graph[1] = 'one'
G.graph['foo'] = 'bar'
H = node_link_graph(node_link_data(G))
assert H.graph['foo'] == 'bar'
assert H.nodes[1]['color'] == 'red'
assert H[1][2]['width'] == 7
d = json.dumps(node_link_data(G))
H = node_link_graph(json.loads(d))
assert H.graph['foo'] == 'bar'
assert H.graph['1'] == 'one'
assert H.nodes[1]['color'] == 'red'
assert H[1][2]['width'] == 7
def test_digraph(self):
G = nx.DiGraph()
H = node_link_graph(node_link_data(G))
assert H.is_directed()
def test_multigraph(self):
G = nx.MultiGraph()
G.add_edge(1, 2, key='first')
G.add_edge(1, 2, key='second', color='blue')
H = node_link_graph(node_link_data(G))
nx.is_isomorphic(G, H)
assert H[1][2]['second']['color'] == 'blue'
def test_graph_with_tuple_nodes(self):
G = nx.Graph()
G.add_edge((0, 0), (1, 0), color=[255, 255, 0])
d = node_link_data(G)
dumped_d = json.dumps(d)
dd = json.loads(dumped_d)
H = node_link_graph(dd)
assert H.nodes[(0, 0)] == G.nodes[(0, 0)]
assert H[(0, 0)][(1, 0)]['color'] == [255, 255, 0]
def test_unicode_keys(self):
try:
q = unicode("qualité", 'utf-8')
except NameError:
q = "qualité"
G = nx.Graph()
G.add_node(1, **{q: q})
s = node_link_data(G)
output = json.dumps(s, ensure_ascii=False)
data = json.loads(output)
H = node_link_graph(data)
assert H.nodes[1][q] == q
def test_exception(self):
with pytest.raises(nx.NetworkXError):
G = nx.MultiDiGraph()
attrs = dict(name='node', source='node', target='node', key='node')
node_link_data(G, attrs)
def test_string_ids(self):
try:
q = unicode("qualité", 'utf-8')
except NameError:
q = "qualité"
G = nx.DiGraph()
G.add_node('A')
G.add_node(q)
G.add_edge('A', q)
data = node_link_data(G)
assert data['links'][0]['source'] == 'A'
assert data['links'][0]['target'] == q
H = node_link_graph(data)
assert nx.is_isomorphic(G, H)
def test_custom_attrs(self):
G = nx.path_graph(4)
G.add_node(1, color='red')
G.add_edge(1, 2, width=7)
G.graph[1] = 'one'
G.graph['foo'] = 'bar'
attrs = dict(source='c_source', target='c_target', name='c_id', key='c_key', link='c_links')
H = node_link_graph(node_link_data(G, attrs=attrs), multigraph=False, attrs=attrs)
assert nx.is_isomorphic(G, H)
assert H.graph['foo'] == 'bar'
assert H.nodes[1]['color'] == 'red'
assert H[1][2]['width'] == 7
| 30.669811
| 100
| 0.538604
|
4a1a418e74726271e3c0920a04ff9d32248986f8
| 953
|
py
|
Python
|
src/visions/backends/python/types/date_time.py
|
bhumikapahariapuresoftware/visions
|
8838d89b4f02e401112378b4662a779227ead9f8
|
[
"BSD-4-Clause"
] | 142
|
2020-01-07T21:17:10.000Z
|
2022-03-30T13:10:14.000Z
|
src/visions/backends/python/types/date_time.py
|
bhumikapahariapuresoftware/visions
|
8838d89b4f02e401112378b4662a779227ead9f8
|
[
"BSD-4-Clause"
] | 121
|
2020-01-07T02:26:38.000Z
|
2022-03-29T17:18:19.000Z
|
src/visions/backends/python/types/date_time.py
|
bhumikapahariapuresoftware/visions
|
8838d89b4f02e401112378b4662a779227ead9f8
|
[
"BSD-4-Clause"
] | 18
|
2020-02-17T03:17:37.000Z
|
2022-02-20T14:01:11.000Z
|
from datetime import datetime
from typing import Sequence
from visions.backends.python.series_utils import sequence_not_empty
from visions.types.date_time import DateTime
from visions.types.string import String
@DateTime.register_relationship(String, Sequence)
def string_is_datetime(sequence: Sequence, state: dict) -> bool:
try:
_ = list(string_to_datetime(sequence, state))
return True
except (OverflowError, TypeError, ValueError):
return False
@DateTime.register_transformer(String, Sequence)
def string_to_datetime(sequence: Sequence, state: dict) -> Sequence:
"""
Python 3.7+
return map(datetime.fromisoformat, sequence)
"""
return tuple(map(lambda s: datetime.strptime(s, "%Y-%m-%d %H:%M:%S"), sequence))
@DateTime.contains_op.register
@sequence_not_empty
def datetime_contains(sequence: Sequence, state: dict) -> bool:
return all(isinstance(value, datetime) for value in sequence)
| 30.741935
| 84
| 0.748164
|
4a1a424cf02abd0d673b692d675ef7d22775dcde
| 1,814
|
py
|
Python
|
MolRep/Models/graph_based/MorganFP.py
|
UsedToBe97/MolRep
|
b39c427f3600056dd67b89bfbdce45cdc91a1e10
|
[
"MIT"
] | null | null | null |
MolRep/Models/graph_based/MorganFP.py
|
UsedToBe97/MolRep
|
b39c427f3600056dd67b89bfbdce45cdc91a1e10
|
[
"MIT"
] | null | null | null |
MolRep/Models/graph_based/MorganFP.py
|
UsedToBe97/MolRep
|
b39c427f3600056dd67b89bfbdce45cdc91a1e10
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch_geometric.nn import global_add_pool
class MorganFP(torch.nn.Module):
def __init__(self, dim_features, dim_target, model_configs, dataset_configs):
super(MorganFP, self).__init__()
hidden_dim = model_configs['hidden_units']
dim_features = 2048
self.mlp = torch.nn.Sequential(torch.nn.Linear(dim_features, hidden_dim), nn.ReLU(),
torch.nn.Linear(hidden_dim, dim_target))
self.task_type = dataset_configs["task_type"]
self.multiclass_num_classes = dataset_configs["multiclass_num_classes"] if self.task_type == 'Multi-Classification' else None
self.classification = self.task_type == 'Classification'
if self.classification:
self.sigmoid = nn.Sigmoid()
self.multiclass = self.task_type == 'Multiclass-Classification'
if self.multiclass:
self.multiclass_softmax = nn.Softmax(dim=2)
self.regression = self.task_type == 'Regression'
if self.regression:
self.relu = nn.ReLU()
assert not (self.classification and self.regression and self.multiclass)
def forward(self, data):
# print(data.x.shape)
# print(data.x[:,:50])
x = self.mlp(data.morgan_fp)
# Don't apply sigmoid during training b/c using BCEWithLogitsLoss
if self.classification and not self.training:
x = self.sigmoid(x)
if self.multiclass:
x = x.reshape((x.size(0), -1, self.multiclass_num_classes)) # batch size x num targets x num classes per target
if not self.training:
x = self.multiclass_softmax(x) # to get probabilities during evaluation, but not during training as we're using CrossEntropyLoss
return x
| 42.186047
| 144
| 0.656009
|
4a1a4350e6878b97a317c0df1443d8b14654212e
| 403
|
py
|
Python
|
company/company_type.py
|
thinkstack-co/ConnectPyse
|
ded8b426250aee352598f33ad08b7bcc3c6a3017
|
[
"MIT"
] | 23
|
2017-01-24T05:44:05.000Z
|
2021-11-26T17:08:01.000Z
|
company/company_type.py
|
thinkstack-co/ConnectPyse
|
ded8b426250aee352598f33ad08b7bcc3c6a3017
|
[
"MIT"
] | 10
|
2017-01-14T21:11:10.000Z
|
2019-06-16T21:10:29.000Z
|
company/company_type.py
|
thinkstack-co/ConnectPyse
|
ded8b426250aee352598f33ad08b7bcc3c6a3017
|
[
"MIT"
] | 16
|
2017-01-24T02:28:19.000Z
|
2021-07-13T17:23:22.000Z
|
from ..cw_model import CWModel
class CompanyType(CWModel):
def __init__(self, json_dict=None):
self.id = None # (Integer)
self.name = None # *(String(50))
self.defaultFlag = None # (Boolean)
self.vendorFlag = None # (Boolean)
self._info = None # (Metadata)
# initialize object with json dict
super().__init__(json_dict)
| 26.866667
| 45
| 0.583127
|
4a1a43f88ad95bc8d6ba62c7a845f38b341137af
| 3,349
|
py
|
Python
|
cvxpy/transforms/indicator.py
|
gabrielhartmann/cvxpy
|
8197e683b6e7b5cc9118505a5c95f102007747f1
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-04-15T14:01:03.000Z
|
2019-04-15T14:01:03.000Z
|
cvxpy/transforms/indicator.py
|
gabrielhartmann/cvxpy
|
8197e683b6e7b5cc9118505a5c95f102007747f1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cvxpy/transforms/indicator.py
|
gabrielhartmann/cvxpy
|
8197e683b6e7b5cc9118505a5c95f102007747f1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
"""
Copyright 2017 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.expressions.expression import Expression
import cvxpy.lin_ops.lin_utils as lu
import numpy as np
class indicator(Expression):
"""An expression representing the convex function I(constraints) = 0
if constraints hold, +infty otherwise.
Parameters
----------
constraints : list
A list of constraint objects.
err_tol:
A numeric tolerance for determining whether the constraints hold.
"""
def __init__(self, constraints, err_tol=1e-3):
self.args = constraints
self.err_tol = err_tol
super(indicator, self).__init__()
def is_convex(self):
"""Is the expression convex?
"""
return True
def is_concave(self):
"""Is the expression concave?
"""
return False
def is_nonneg(self):
"""Is the expression positive?
"""
return True
def is_nonpos(self):
"""Is the expression negative?
"""
return False
def is_imag(self):
"""Is the Leaf imaginary?
"""
return False
def is_complex(self):
"""Is the Leaf complex valued?
"""
return False
def get_data(self):
"""Returns info needed to reconstruct the expression besides the args.
"""
return [self.err_tol]
@property
def shape(self):
"""Returns the (row, col) dimensions of the expression.
"""
return ()
def name(self):
"""Returns the string representation of the expression.
"""
return "Indicator(%s)" % str(self.args)
def domain(self):
"""A list of constraints describing the closure of the region
where the expression is finite.
"""
return self.args
@property
def value(self):
"""Returns the numeric value of the expression.
Returns:
A numpy matrix or a scalar.
"""
if all(cons.value() for cons in self.args):
return 0
else:
return np.infty
@property
def grad(self):
"""Gives the (sub/super)gradient of the expression w.r.t. each variable.
Matrix expressions are vectorized, so the gradient is a matrix.
None indicates variable values unknown or outside domain.
Returns:
A map of variable to SciPy CSC sparse matrix or None.
"""
# TODO
return NotImplemented
def canonicalize(self):
"""Returns the graph implementation of the object.
Returns:
A tuple of (affine expression, [constraints]).
"""
constraints = []
for cons in self.args:
constraints += cons.canonical_form[1]
return (lu.create_const(0, (1, 1)), constraints)
| 26.579365
| 80
| 0.616303
|
4a1a4436ad48eeefa451ad9cc850714a19ceefef
| 869
|
py
|
Python
|
tests/test_examples.py
|
vberlier/tokenstream
|
70fca6405cca8f2d08186c118cb6322d9c7c176e
|
[
"MIT"
] | 5
|
2021-06-17T07:57:28.000Z
|
2022-01-05T16:20:08.000Z
|
tests/test_examples.py
|
vberlier/tokenstream
|
70fca6405cca8f2d08186c118cb6322d9c7c176e
|
[
"MIT"
] | 66
|
2021-06-22T13:18:48.000Z
|
2022-03-30T13:14:38.000Z
|
tests/test_examples.py
|
vberlier/tokenstream
|
70fca6405cca8f2d08186c118cb6322d9c7c176e
|
[
"MIT"
] | null | null | null |
import json
import pytest
from examples.calculator import calculate_sum
from examples.json import parse_json
from tokenstream import TokenStream
@pytest.mark.parametrize(
"source",
[
"123",
"1 + 2 + 3",
"1 * 2 * 3",
"1 + 2 * 3",
"1 * 2 + 3",
"1 + 2 - 3 * 4 / 5",
"(1 + 2 - 3) * 4 / 5",
"1 + (2 - 3) * 4 / 5",
"((1) + (2 - (3)) * 4 / 5)",
],
)
def test_calculator(source: str):
assert calculate_sum(TokenStream(source)) == eval(source)
@pytest.mark.parametrize(
"source",
[
r'{"hello": "world"}',
r'{"hello": [1, 2, 3, "thing"]}',
r'{"hello": [1, 2, 3, "thing"], "other": {}}',
r"123",
r"{}",
r"[]",
r'"foo"',
],
)
def test_json(source: str):
assert parse_json(TokenStream(source)) == json.loads(source)
| 20.690476
| 64
| 0.474108
|
4a1a452fef4236efba5d2332fa233683c1d4b7b9
| 13,673
|
py
|
Python
|
lisa/parameter_parser/runbook.py
|
tyhicks/lisa
|
50d07cbd13e4e777eaa211b01387721fe2d2094f
|
[
"MIT"
] | 65
|
2020-12-15T13:42:29.000Z
|
2022-03-03T13:14:16.000Z
|
lisa/parameter_parser/runbook.py
|
acidburn0zzz/lisa
|
3934d0546592d3ff71bc3e2c4aab5d4bc646a3b9
|
[
"MIT"
] | 236
|
2020-11-24T18:28:26.000Z
|
2022-03-30T19:19:25.000Z
|
lisa/parameter_parser/runbook.py
|
acidburn0zzz/lisa
|
3934d0546592d3ff71bc3e2c4aab5d4bc646a3b9
|
[
"MIT"
] | 52
|
2020-12-08T17:40:46.000Z
|
2022-03-31T18:24:14.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
from functools import partial
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Union, cast
import yaml
from marshmallow import Schema
from lisa import schema
from lisa.util import LisaException, constants
from lisa.util.logger import get_logger
from lisa.util.package import import_package
from lisa.variable import VariableEntry, load_variables, replace_variables
_schema: Optional[Schema] = None
_get_init_logger = partial(get_logger, "init", "runbook")
class RunbookBuilder:
def __init__(
self,
path: Path,
cmd_args: Optional[List[str]] = None,
) -> None:
if cmd_args is None:
cmd_args = []
self._log = _get_init_logger()
self._path = path
self._cmd_args = cmd_args
self._raw_data: Any = None
self._variables: Dict[str, VariableEntry] = {}
constants.RUNBOOK_PATH = self._path.parent
constants.RUNBOOK_FILE = self._path
@property
def variables(self) -> Dict[str, VariableEntry]:
return self._variables
@property
def raw_data(self) -> Any:
return self._raw_data
@property
def runbook(self) -> schema.Runbook:
return self.resolve()
@staticmethod
def from_path(
path: Path,
cmd_args: Optional[List[str]] = None,
) -> "RunbookBuilder":
"""
Loads a runbook given a user-supplied path and set of variables.
"""
builder = RunbookBuilder(path=path, cmd_args=cmd_args)
# load lisa itself modules, it's for subclasses, and other dynamic loading.
base_module_path = Path(__file__).parent.parent
import_package(base_module_path, enable_log=False)
# merge all parameters
builder._log.info(f"loading runbook: {builder._path}")
data = builder._load_data(
builder._path.absolute(), set(), higher_level_variables=builder._cmd_args
)
builder._raw_data = data
# load final variables
variables = load_variables(
runbook_data=data, higher_level_variables=builder._cmd_args
)
builder._variables = variables
builder._import_extensions()
# remove variables and extensions from data, since it's not used, and may be
# confusing in log.
if constants.VARIABLE in data:
del data[constants.VARIABLE]
runbook_name = builder.partial_resolve(constants.NAME)
constants.RUN_NAME = f"lisa_{runbook_name}_{constants.RUN_ID}"
builder._log.info(f"run name is '{constants.RUN_NAME}'")
return builder
def resolve(
self, variables: Optional[Dict[str, VariableEntry]] = None
) -> schema.Runbook:
parsed_data = self._internal_resolve(self.raw_data, variables)
# validate runbook, after extensions loaded
runbook = self._validate_and_load(parsed_data)
return runbook
def partial_resolve(
self, partial_name: str, variables: Optional[Dict[str, VariableEntry]] = None
) -> Any:
result: Any = None
if partial_name in self.raw_data:
raw_data = copy.deepcopy(self.raw_data[partial_name])
result = self._internal_resolve(raw_data, variables)
return result
def derive(
self, variables: Optional[Dict[str, VariableEntry]] = None
) -> "RunbookBuilder":
"""
create a new instance with a copy of variables. If the variables is not
given, it copies current variables
"""
result = RunbookBuilder(self._path, self._cmd_args)
if variables is None:
variables = {key: value.copy() for key, value in self.variables.items()}
result._variables = variables
result._raw_data = self._raw_data
return result
def dump_variables(self) -> None:
variables = self.variables
# log message for unused variables, it's helpful to see which variable
# is not used.
unused_keys = [key for key, value in variables.items() if not value.is_used]
if unused_keys:
self._log.debug(f"variables {unused_keys} are not used.")
# print runbook later, after __post_init__ executed, so secrets are handled.
for key, value in variables.items():
self._log.debug(f"variable '{key}': {value.data}")
def _internal_resolve(
self, raw_data: Any, variables: Optional[Dict[str, VariableEntry]] = None
) -> Any:
raw_data = copy.deepcopy(raw_data)
if variables is None:
variables = self.variables
try:
parsed_data = replace_variables(raw_data, variables)
except Exception as identifier:
# log current data for troubleshooting.
self._log.debug(f"parsed raw data: {raw_data}")
raise identifier
return parsed_data
def _import_extensions(self) -> None:
# load extended modules
if constants.EXTENSION in self._raw_data:
raw_extensions = self._load_extensions(
constants.RUNBOOK_PATH, self.raw_data, self.variables
)
extensions = schema.Extension.from_raw(raw_extensions)
for index, extension in enumerate(extensions):
if not extension.name:
extension.name = f"lisa_ext_{index}"
import_package(Path(extension.path), package_name=extension.name)
del self._raw_data[constants.EXTENSION]
@staticmethod
def _validate_and_load(data: Any) -> schema.Runbook:
global _schema
if not _schema:
_schema = schema.Runbook.schema() # type: ignore
assert _schema
runbook = cast(schema.Runbook, _schema.load(data))
log = _get_init_logger()
log.debug(f"parsed runbook: {runbook.to_dict()}") # type: ignore
return runbook
def _load_extensions(
self,
current_path: Path,
data: Any,
variables: Optional[Dict[str, VariableEntry]] = None,
) -> List[schema.Extension]:
results: List[schema.Extension] = []
if constants.EXTENSION in data:
raw_extensions: Any = data[constants.EXTENSION]
# replace variables in extensions names
if variables:
raw_extensions = replace_variables(raw_extensions, variables=variables)
# this is the first place to normalize extensions
extensions = schema.Extension.from_raw(raw_extensions)
for extension in extensions:
assert extension.path, "extension path must be specified"
# resolving to real path, it needs to compare for merging later.
if variables:
extension.path = replace_variables(
extension.path, variables=variables
)
extension.path = str(
current_path.joinpath(extension.path).absolute().resolve()
)
results.append(extension)
return results
def _merge_variables(
self,
merged_path: Path,
data_from_include: Dict[str, Any],
data_from_current: Dict[str, Any],
) -> List[Any]:
variables_from_include: List[schema.Variable] = []
if (
constants.VARIABLE in data_from_include
and data_from_include[constants.VARIABLE]
):
variables_from_include = [
schema.load_by_type(schema.Variable, variable)
for variable in data_from_include[constants.VARIABLE]
]
# resolve to absolute path
for included_var in variables_from_include:
if included_var.file:
included_var.file = str((merged_path / included_var.file).resolve())
if (
constants.VARIABLE in data_from_current
and data_from_current[constants.VARIABLE]
):
variables_from_current: List[schema.Variable] = [
schema.load_by_type(schema.Variable, variable)
for variable in data_from_current[constants.VARIABLE]
]
# remove duplicate items
for current_variable in variables_from_current:
for included_var in variables_from_include:
if (
included_var.name and included_var.name == current_variable.name
) or (
included_var.file and included_var.file == current_variable.file
):
variables_from_include.remove(included_var)
break
variables_from_include.extend(variables_from_current)
# serialize back for loading together
return [
variable.to_dict() for variable in variables_from_include # type: ignore
]
def _merge_extensions(
self,
merged_path: Path,
data_from_include: Dict[str, Any],
data_from_current: Dict[str, Any],
) -> List[Any]:
old_extensions = self._load_extensions(merged_path, data_from_include)
extensions = self._load_extensions(merged_path, data_from_current)
# remove duplicate paths
for old_extension in old_extensions:
for extension in extensions:
if extension.path == old_extension.path:
if not old_extension.name:
# specify name as possible
old_extension.name = extension.name
extensions.remove(extension)
break
if extensions or old_extensions:
# don't change the order, old ones should be imported earlier.
old_extensions.extend(extensions)
extensions = old_extensions
return extensions
def _merge_data(
self,
merged_path: Path,
data_from_include: Dict[str, Any],
data_from_current: Dict[str, Any],
) -> Dict[str, Any]:
"""
Merge included data to data_from_current. The current data has
higher precedence.
"""
result = data_from_include.copy()
# merge others
result.update(data_from_current)
# merge variables, latest should be effective last
variables = self._merge_variables(
merged_path, data_from_include, data_from_current
)
if variables:
result[constants.VARIABLE] = variables
# merge extensions
extensions = self._merge_extensions(
merged_path, data_from_include, data_from_current
)
if extensions:
result[constants.EXTENSION] = extensions
return result
def _load_data(
self,
path: Path,
used_path: Set[str],
higher_level_variables: Union[List[str], Dict[str, VariableEntry]],
) -> Any:
"""
Load runbook, but not to validate. It will be validated after
extensions are imported. To support partial runbooks, it loads
recursively.
"""
with open(path, "r") as file:
data_from_current = yaml.safe_load(file)
if not data_from_current:
raise LisaException(f"file '{path}' cannot be empty.")
variables = load_variables(
data_from_current, higher_level_variables=higher_level_variables
)
if (
constants.INCLUDE in data_from_current
and data_from_current[constants.INCLUDE]
):
includes = data_from_current[constants.INCLUDE]
log = _get_init_logger()
indent = len(used_path) * 4 * " "
data_from_include: Dict[str, Any] = {}
for include_raw in includes:
try:
include: schema.Include
include = schema.load_by_type(schema.Include, include_raw)
except Exception as identifer:
raise LisaException(
f"error on loading include node [{include_raw}]: {identifer}"
)
if include.strategy:
raise NotImplementedError(
"include runbook entry doesn't implement Strategy"
)
raw_path = include.path
if variables:
raw_path = replace_variables(raw_path, variables)
if raw_path in used_path:
raise LisaException(
f"circular reference on runbook includes detected: {raw_path}"
)
# use relative path to included runbook
include_path = (path.parent / raw_path).resolve().absolute()
log.debug(f"{indent}loading include: {raw_path}")
# clone a set to support same path is used in different tree.
new_used_path = used_path.copy()
new_used_path.add(raw_path)
include_data = self._load_data(
include_path,
used_path=new_used_path,
higher_level_variables=variables,
)
data_from_include = self._merge_data(
include_path.parent, include_data, data_from_include
)
data_from_current = self._merge_data(
path.parent, data_from_include, data_from_current
)
return data_from_current
| 35.239691
| 88
| 0.601185
|
4a1a46e0f79ffa8c7b88672f06b0928efe8d0621
| 3,870
|
py
|
Python
|
crawling/samsung_vd_request.py
|
jungchulha/Python-study
|
6fbeb6f449bbee24245e2cadce6df105dfc07f0b
|
[
"MIT"
] | null | null | null |
crawling/samsung_vd_request.py
|
jungchulha/Python-study
|
6fbeb6f449bbee24245e2cadce6df105dfc07f0b
|
[
"MIT"
] | null | null | null |
crawling/samsung_vd_request.py
|
jungchulha/Python-study
|
6fbeb6f449bbee24245e2cadce6df105dfc07f0b
|
[
"MIT"
] | null | null | null |
import sys
from concurrent.futures import ThreadPoolExecutor
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from queue import Queue, Empty
import pandas as pd
import json
import re
import time
import json
from multiprocessing import Pool # Pool import하기
from pymongo import MongoClient
import pprint
import os
def total_url() -> tuple:
""" Get total data url"""
# 51654
total_url = db.data.find({}, {'_id':0, 'page':1})
total_url = list(map(lambda i : i['page'], total_url))
return total_url
def crawled_url() -> list:
""" Get crawled url"""
# 0
crawled_url = db.uk_181125.find({}, {'_id':0, 'url':1})
crawled_url = list(map(lambda one_url : one_url['url'], crawled_url))
return crawled_url
def to_crawl_url(total_url, crawled_url) -> list:
""" Get need to crawl url"""
if crawled_url:
to_crawl_url = set(total_url) - set(crawled_url)
return to_crawl_url
else:
return total_url
def to_crawl_data() -> list:
""" Get neet to crawl data from to_crawl_url results"""
_total_url = total_url()
_crawled_url = crawled_url()
_to_crawl_url = list(to_crawl_url(_total_url, _crawled_url))
_to_crawl_data = db.data.find({"page": { "$in":_to_crawl_url}}, {'_id':0})
_to_crawl_data = list(map(lambda one_data : tuple(one_data.values()), _to_crawl_data))
return _to_crawl_data
def get_contents(data):
(site, url, clicks, impressions, ctr, position, number) = data
status_code = current_url = page_source = description = description_len = title = title_len = None
print("수집중 URL : ", url)
try:
r = requests.get(url)
trigger = "success"
except Exception as e:
trigger = "fail"
if trigger == "success":
try:
status_code = r.status_code
except Exception as e :
status_code = 999
try:
current_url = r.url
except Exception as e :
print("current_url 에러 : ", e)
"""
try :
browser_logs = driver.get_log("browser")
except Exception as e :
print("browser_logs 에러 : ", e)
"""
try :
page_source = re.sub('[\r|\n|\t]','',r.text)
except Exception as e :
print("soup 에러 : ", e)
try:
soup = BeautifulSoup(r.text , 'html.parser')
description = soup.find("meta", {"name":"description"})['content']
description_len = len(description)
except Exception as e :
print("description 에러 : ", e)
try:
title = soup.find("title").text
title_len = len(title)
except Exception as e:
print("title 에러 : ", e)
data = {
'url' : url ,
'redirect' : current_url ,
'time': time.strftime('%D %H:%M:%S') ,
'page_source':page_source,
# 'browser_log': browser_logs ,
'status_code': status_code ,
'description': description ,
'description_len':description_len,
'title': title,
'title_len':title_len,
"site" : site,
"clicks" : clicks,
"impressions" : impressions,
"ctr" : ctr,
"position" : position,
"index" : number,
"trigger" : trigger
}
"""
driver.close()
try:
driver.quit()
except Exception as e:
print("driver 에러 : ", e)
"""
db.uk_181125.insert_one(data)
if __name__=='__main__':
client = MongoClient('112.222.29.147', 27017)
db = client.get_database('samsung')
data = to_crawl_data()
print("크롤링할 데이터 : ", len(data))
print("크롤링할 데이터 : ", type(data))
print(data[0])
pool = Pool(processes=96)
pool.map(get_contents, data)
| 27.446809
| 102
| 0.573643
|
4a1a46f6c8d9e69fc6d804e8442962b30248ee19
| 15,843
|
py
|
Python
|
PyInstaller/depend/dylib.py
|
BearerPipelineTest/pyinstaller
|
0de9d6cf1701689c53161610acdab143a76d40b5
|
[
"Apache-2.0"
] | null | null | null |
PyInstaller/depend/dylib.py
|
BearerPipelineTest/pyinstaller
|
0de9d6cf1701689c53161610acdab143a76d40b5
|
[
"Apache-2.0"
] | null | null | null |
PyInstaller/depend/dylib.py
|
BearerPipelineTest/pyinstaller
|
0de9d6cf1701689c53161610acdab143a76d40b5
|
[
"Apache-2.0"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2022, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
"""
Manipulating with dynamic libraries.
"""
import os.path
from PyInstaller.utils.win32 import winutils
__all__ = ['exclude_list', 'include_list', 'include_library']
import os
import re
import PyInstaller.log as logging
from PyInstaller import compat
logger = logging.getLogger(__name__)
_BOOTLOADER_FNAMES = {'run', 'run_d', 'runw', 'runw_d'}
# Ignoring some system libraries speeds up packaging process
_excludes = {
# Ignore annoying warnings with Windows system DLLs.
#
# 'W: library kernel32.dll required via ctypes not found'
# 'W: library coredll.dll required via ctypes not found'
#
# These these dlls has to be ignored for all operating systems because they might be resolved when scanning code for
# ctypes dependencies.
r'advapi32\.dll',
r'ws2_32\.dll',
r'gdi32\.dll',
r'oleaut32\.dll',
r'shell32\.dll',
r'ole32\.dll',
r'coredll\.dll',
r'crypt32\.dll',
r'kernel32',
r'kernel32\.dll',
r'msvcrt\.dll',
r'rpcrt4\.dll',
r'user32\.dll',
# Some modules tries to import the Python library. e.g. pyreadline.console.console
r'python\%s\%s',
}
# Regex includes - overrides excludes. Include list is used only to override specific libraries from exclude list.
_includes = set()
_win_includes = {
# We need to allow collection of Visual Studio C++ (VC) runtime DLLs from system directories in order to avoid
# missing DLL errors when the frozen application is run on a system that does not have the corresponding VC
# runtime installed. The VC runtime DLLs may be dependencies of python shared library itself or of extension
# modules provided by 3rd party packages.
# Visual Studio 2010 (VC10) runtime
# http://msdn.microsoft.com/en-us/library/8kche8ah(v=vs.100).aspx
r'atl100\.dll',
r'msvcr100\.dll',
r'msvcp100\.dll',
r'mfc100\.dll',
r'mfc100u\.dll',
r'mfcmifc80\.dll',
r'mfcm100\.dll',
r'mfcm100u\.dll',
# Visual Studio 2012 (VC11) runtime
# https://docs.microsoft.com/en-us/visualstudio/releases/2013/2012-redistribution-vs
#
# VC110.ATL
r'atl110\.dll',
# VC110.CRT
r'msvcp110\.dll',
r'msvcr110\.dll',
r'vccorlib110\.dll',
# VC110.CXXAMP
r'vcamp110\.dll',
# VC110.MFC
r'mfc110\.dll',
r'mfc110u\.dll',
r'mfcm110\.dll',
r'mfcm110u\.dll',
# VC110.MFCLOC
r'mfc110chs\.dll',
r'mfc110cht\.dll',
r'mfc110enu\.dll',
r'mfc110esn\.dll',
r'mfc110deu\.dll',
r'mfc110fra\.dll',
r'mfc110ita\.dll',
r'mfc110jpn\.dll',
r'mfc110kor\.dll',
r'mfc110rus\.dll',
# VC110.OpenMP
r'vcomp110\.dll',
# DIA SDK
r'msdia110\.dll',
# Visual Studio 2013 (VC12) runtime
# https://docs.microsoft.com/en-us/visualstudio/releases/2013/2013-redistribution-vs
#
# VC120.CRT
r'msvcp120\.dll',
r'msvcr120\.dll',
r'vccorlib120\.dll',
# VC120.CXXAMP
r'vcamp120\.dll',
# VC120.MFC
r'mfc120\.dll',
r'mfc120u\.dll',
r'mfcm120\.dll',
r'mfcm120u\.dll',
# VC120.MFCLOC
r'mfc120chs\.dll',
r'mfc120cht\.dll',
r'mfc120deu\.dll',
r'mfc120enu\.dll',
r'mfc120esn\.dll',
r'mfc120fra\.dll',
r'mfc120ita\.dll',
r'mfc120jpn\.dll',
r'mfc120kor\.dll',
r'mfc120rus\.dll',
# VC120.OPENMP
r'vcomp120\.dll',
# DIA SDK
r'msdia120\.dll',
# Cpp REST Windows SDK
r'casablanca120.winrt\.dll',
# Mobile Services Cpp Client
r'zumosdk120.winrt\.dll',
# Cpp REST SDK
r'casablanca120\.dll',
# Universal C Runtime Library (since Visual Studio 2015)
#
# NOTE: these should be put under a switch, as they need not to be bundled if deployment target is Windows 10
# and later, as "UCRT is now a system component in Windows 10 and later, managed by Windows Update".
# (https://docs.microsoft.com/en-us/cpp/windows/determining-which-dlls-to-redistribute?view=msvc-170)
# And as discovered in #6326, Windows prefers system-installed version over the bundled one, anyway
# (see https://docs.microsoft.com/en-us/cpp/windows/universal-crt-deployment?view=msvc-170#local-deployment).
r'api-ms-win-core.*',
r'api-ms-win-crt.*',
r'ucrtbase\.dll',
# Visual Studio 2015/2017/2019/2022 (VC14) runtime
# https://docs.microsoft.com/en-us/visualstudio/releases/2022/redistribution
#
# VC141.CRT/VC142.CRT/VC143.CRT
r'concrt140\.dll',
r'msvcp140\.dll',
r'msvcp140_1\.dll',
r'msvcp140_2\.dll',
r'msvcp140_atomic_wait\.dll',
r'msvcp140_codecvt_ids\.dll',
r'vccorlib140\.dll',
r'vcruntime140\.dll',
r'vcruntime140_1\.dll',
# VC141.CXXAMP/VC142.CXXAMP/VC143.CXXAMP
r'vcamp140\.dll',
# VC141.OpenMP/VC142.OpenMP/VC143.OpenMP
r'vcomp140\.dll',
# DIA SDK
r'msdia140\.dll',
# Allow pythonNN.dll, pythoncomNN.dll, pywintypesNN.dll
r'py(?:thon(?:com(?:loader)?)?|wintypes)\d+\.dll',
}
_win_excludes = {
# On Windows, only .dll files can be loaded.
r'.*\.so',
r'.*\.dylib',
# MS assembly excludes
r'Microsoft\.Windows\.Common-Controls',
}
_unix_excludes = {
r'libc\.so(\..*)?',
r'libdl\.so(\..*)?',
r'libm\.so(\..*)?',
r'libpthread\.so(\..*)?',
r'librt\.so(\..*)?',
r'libthread_db\.so(\..*)?',
# glibc regex excludes.
r'ld-linux\.so(\..*)?',
r'libBrokenLocale\.so(\..*)?',
r'libanl\.so(\..*)?',
r'libcidn\.so(\..*)?',
r'libcrypt\.so(\..*)?',
r'libnsl\.so(\..*)?',
r'libnss_compat.*\.so(\..*)?',
r'libnss_dns.*\.so(\..*)?',
r'libnss_files.*\.so(\..*)?',
r'libnss_hesiod.*\.so(\..*)?',
r'libnss_nis.*\.so(\..*)?',
r'libnss_nisplus.*\.so(\..*)?',
r'libresolv\.so(\..*)?',
r'libutil\.so(\..*)?',
# graphical interface libraries come with graphical stack (see libglvnd)
r'libE?(Open)?GLX?(ESv1_CM|ESv2)?(dispatch)?\.so(\..*)?',
r'libdrm\.so(\..*)?',
# libxcb-dri changes ABI frequently (e.g.: between Ubuntu LTS releases) and is usually installed as dependency of
# the graphics stack anyway. No need to bundle it.
r'libxcb\.so(\..*)?',
r'libxcb-dri.*\.so(\..*)?',
}
_aix_excludes = {
r'libbz2\.a',
r'libc\.a',
r'libC\.a',
r'libcrypt\.a',
r'libdl\.a',
r'libintl\.a',
r'libpthreads\.a',
r'librt\\.a',
r'librtl\.a',
r'libz\.a',
}
if compat.is_win:
_includes |= _win_includes
_excludes |= _win_excludes
elif compat.is_aix:
# The exclude list for AIX differs from other *nix platforms.
_excludes |= _aix_excludes
elif compat.is_unix:
# Common excludes for *nix platforms -- except AIX.
_excludes |= _unix_excludes
class ExcludeList:
def __init__(self):
self.regex = re.compile('|'.join(_excludes), re.I)
def search(self, libname):
# Running re.search() on '' regex never returns None.
if _excludes:
return self.regex.match(os.path.basename(libname))
else:
return False
class IncludeList:
def __init__(self):
self.regex = re.compile('|'.join(_includes), re.I)
def search(self, libname):
# Running re.search() on '' regex never returns None.
if _includes:
return self.regex.match(os.path.basename(libname))
else:
return False
exclude_list = ExcludeList()
include_list = IncludeList()
if compat.is_darwin:
# On Mac use macholib to decide if a binary is a system one.
from macholib import util
class MacExcludeList:
def __init__(self, global_exclude_list):
# Wraps the global 'exclude_list' before it is overridden by this class.
self._exclude_list = global_exclude_list
def search(self, libname):
# First try global exclude list. If it matches, return its result; otherwise continue with other check.
result = self._exclude_list.search(libname)
if result:
return result
else:
return util.in_system_path(libname)
exclude_list = MacExcludeList(exclude_list)
elif compat.is_win:
class WinExcludeList:
def __init__(self, global_exclude_list):
self._exclude_list = global_exclude_list
# use normpath because msys2 uses / instead of \
self._windows_dir = os.path.normpath(winutils.get_windows_dir().lower())
def search(self, libname):
libname = libname.lower()
result = self._exclude_list.search(libname)
if result:
return result
else:
# Exclude everything from the Windows directory by default.
# .. sometimes realpath changes the case of libname, lower it
# .. use normpath because msys2 uses / instead of \
fn = os.path.normpath(os.path.realpath(libname).lower())
return fn.startswith(self._windows_dir)
exclude_list = WinExcludeList(exclude_list)
_seen_wine_dlls = set() # Used for warning tracking in include_library()
def include_library(libname):
"""
Check if the dynamic library should be included with application or not.
"""
if exclude_list:
if exclude_list.search(libname) and not include_list.search(libname):
# Library is excluded and is not overriden by include list. It should be excluded.
return False
# If we are running under Wine and the library is a Wine built-in DLL, ensure that it is always excluded. Typically,
# excluding a DLL leads to an incomplete bundle and run-time errors when the said DLL is not installed on the target
# system. However, having Wine built-in DLLs collected is even more detrimental, as they usually provide Wine's
# implementation of low-level functionality, and therefore cannot be used on actual Windows (i.e., system libraries
# from the C:\Windows\system32 directory that might end up collected due to ``_win_includes`` list; a prominent
# example are VC runtime DLLs, for which Wine provides their own implementation, unless user explicitly installs
# Microsoft's VC redistributable package in their Wine environment). Therefore, excluding the Wine built-in DLLs
# actually improves the chances of the bundle running on Windows, or at least makes the issue easier to debug by
# turning it into the "standard" missing DLL problem. Exclusion should not affect the bundle's ability to run under
# Wine itself, as the excluded DLLs are available there.
if compat.is_win_wine and compat.is_wine_dll(libname):
if libname not in _seen_wine_dlls:
logger.warning("Excluding Wine built-in DLL: %s", libname) # displayed only if DLL would have been included
_seen_wine_dlls.add(libname) # display only once for each DLL
return False
return True
# Patterns for suppressing warnings about missing dynamically linked libraries
_warning_suppressions = [
# We fail to discover shiboken2 (PySide2) and shiboken6 (PySide6) shared libraries due to the way the packages set
# up the search path to the library, which is located in a separate package. Suppress the harmless warnings to avoid
# confusion.
r'(lib)?shiboken.*',
]
# On some systems (e.g., openwrt), libc.so might point to ldd. Suppress warnings about it.
if compat.is_linux:
_warning_suppressions.append(r'ldd')
# Suppress false warnings on win 10 and UCRT (see issue #1566).
if compat.is_win_10:
_warning_suppressions.append(r'api-ms-win-crt.*')
_warning_suppressions.append(r'api-ms-win-core.*')
class MissingLibWarningSuppressionList:
def __init__(self):
self.regex = re.compile('|'.join(_warning_suppressions), re.I)
def search(self, libname):
# Running re.search() on '' regex never returns None.
if _warning_suppressions:
return self.regex.match(os.path.basename(libname))
else:
return False
missing_lib_warning_suppression_list = MissingLibWarningSuppressionList()
def warn_missing_lib(libname):
"""
Check if a missing-library warning should be displayed for the given library name (or full path).
"""
return not missing_lib_warning_suppression_list.search(libname)
def mac_set_relative_dylib_deps(libname, distname):
"""
On Mac OS set relative paths to dynamic library dependencies of `libname`.
Relative paths allow to avoid using environment variable DYLD_LIBRARY_PATH. There are known some issues with
DYLD_LIBRARY_PATH. Relative paths is more flexible mechanism.
Current location of dependend libraries is derived from the location of the library path (paths start with
'@loader_path').
'distname' path of the library relative to dist directory of frozen executable. We need this to determine the level
of directory level for @loader_path of binaries not found in dist directory.
For example, Qt5 plugins are not in the same directory as Qt*.dylib files. Without using
'@loader_path/../..' for Qt plugins, Mac OS would not be able to resolve shared library dependencies,
and Qt plugins will not be loaded.
"""
from macholib import util
from macholib.MachO import MachO
# Ignore bootloader; otherwise PyInstaller fails with exception like
# 'ValueError: total_size > low_offset (288 > 0)'
if os.path.basename(libname) in _BOOTLOADER_FNAMES:
return
# Determine how many directories up ('../') is the directory with shared dynamic libraries.
# E.g., ./qt4_plugins/images/ -> ./../../
parent_dir = ''
# Check if distname is not only base filename.
if os.path.dirname(distname):
parent_level = len(os.path.dirname(distname).split(os.sep))
parent_dir = parent_level * (os.pardir + os.sep)
def match_func(pth):
"""
For system libraries is still used absolute path. It is unchanged.
"""
# Leave system dynamic libraries unchanged.
if util.in_system_path(pth):
return None
# The older python.org builds that use system Tcl/Tk framework have their _tkinter.cpython-*-darwin.so
# library linked against /Library/Frameworks/Tcl.framework/Versions/8.5/Tcl and
# /Library/Frameworks/Tk.framework/Versions/8.5/Tk, although the actual frameworks are located in
# /System/Library/Frameworks. Therefore, they slip through the above in_system_path() check, and we need to
# exempt them manually.
_exemptions = [
'/Library/Frameworks/Tcl.framework/',
'/Library/Frameworks/Tk.framework/',
]
if any([x in pth for x in _exemptions]):
return None
# Use relative path to dependent dynamic libraries based on the location of the executable.
return os.path.join('@loader_path', parent_dir, os.path.basename(pth))
# Rewrite mach headers with @loader_path.
dll = MachO(libname)
dll.rewriteLoadCommands(match_func)
# Write changes into file. Write code is based on macholib example.
try:
with open(dll.filename, 'rb+') as f:
for header in dll.headers:
f.seek(0)
dll.write(f)
f.seek(0, 2)
f.flush()
except Exception:
pass
| 34.896476
| 120
| 0.650761
|
4a1a474766bfccc8f331d5e0094873185b34f709
| 7,161
|
py
|
Python
|
common-python/oc_provisioning/oc_provision_wrappers/wls/v12_2_1/weblogic_helper.py
|
LaudateCorpus1/atg-commerce-iaas
|
f1ae31657fc0111a5c019d46a28a3c81aae1acb2
|
[
"MIT"
] | 28
|
2016-11-07T14:03:25.000Z
|
2022-02-01T08:46:52.000Z
|
common-python/oc_provisioning/oc_provision_wrappers/wls/v12_2_1/weblogic_helper.py
|
LaudateCorpus1/atg-commerce-iaas
|
f1ae31657fc0111a5c019d46a28a3c81aae1acb2
|
[
"MIT"
] | 3
|
2016-11-09T13:23:03.000Z
|
2018-04-05T15:49:22.000Z
|
common-python/oc_provisioning/oc_provision_wrappers/wls/v12_2_1/weblogic_helper.py
|
LaudateCorpus1/atg-commerce-iaas
|
f1ae31657fc0111a5c019d46a28a3c81aae1acb2
|
[
"MIT"
] | 13
|
2016-10-27T17:59:38.000Z
|
2022-02-18T04:38:38.000Z
|
# The MIT License (MIT)
#
# Copyright (c) 2016 Oracle
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
__author__ = "Michael Shanley (Oracle A-Team)"
__copyright__ = "Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved."
__version__ = "1.0.0.0"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
from oc_provision_wrappers import commerce_setup_helper
import os
import platform
import shutil
import logging
logger = logging.getLogger(__name__)
json_key = 'WEBLOGIC_common'
service_name = "WebLogic"
def install_weblogic(configData, full_path):
if json_key in configData:
jsonData = configData[json_key]
else:
logging.error(json_key + " config data missing from json. will not install")
return
logging.info("installing " + service_name)
binary_path = full_path + "/binaries/wls-12.2.1"
response_files_path = full_path + "/responseFiles/wls-12.2.1"
install_exec = "fmw_12.2.1.2.0_wls.jar"
full_exec_path = binary_path + "/" + install_exec
if not os.path.exists(full_exec_path):
logging.error("Binary " + full_exec_path + " does not exist - will not install")
return False
requiredFields = ['middlewareHome', 'installOwner', 'installGroup', 'oraInventoryDir']
commerce_setup_helper.check_required_fields(jsonData, requiredFields)
INSTALL_DIR = jsonData['middlewareHome']
INSTALL_OWNER = jsonData['installOwner']
INSTALL_GROUP = jsonData['installGroup']
ORACLE_INVENTORY_DIR = jsonData['oraInventoryDir']
ORA_INST = "/etc/oraInst.loc"
oraInst_replacements = {'ORACLE_INVENTORY_DIR':ORACLE_INVENTORY_DIR, 'ORACLE_INVENTORY_GROUP':INSTALL_GROUP}
# if oraInst.loc doesn't already exist, we need to make one
if not os.path.isfile(ORA_INST):
commerce_setup_helper.substitute_file_fields(response_files_path + '/oraInst.loc.master', response_files_path + '/oraInst.loc', oraInst_replacements)
shutil.copyfile(response_files_path + "/oraInst.loc" , ORA_INST)
commerce_setup_helper.change_file_owner(ORA_INST, INSTALL_OWNER, INSTALL_GROUP)
os.chmod(ORA_INST, 0664)
wl_replacements = {'INSTALL_DIR':INSTALL_DIR}
commerce_setup_helper.substitute_file_fields(response_files_path + '/install.rsp.master', response_files_path + '/install.rsp', wl_replacements)
# make the install tree with correct owner if needed
commerce_setup_helper.mkdir_with_perms(INSTALL_DIR, INSTALL_OWNER, INSTALL_GROUP)
# install wl
if (platform.system() == 'SunOS'):
installCommand = "\"" + "java -d64 -jar "
else:
installCommand = "\"" + "java -jar "
installCommand = installCommand + full_exec_path + " -silent -invPtrLoc " + ORA_INST + " -responseFile " + response_files_path + "/install.rsp" + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, installCommand)
commerce_setup_helper.add_to_bashrc(INSTALL_OWNER, "##################### \n")
commerce_setup_helper.add_to_bashrc(INSTALL_OWNER, "#WebLogic Settings \n")
commerce_setup_helper.add_to_bashrc(INSTALL_OWNER, "##################### \n")
commerce_setup_helper.add_to_bashrc(INSTALL_OWNER, "export MW_HOME=" + INSTALL_DIR + "\n\n")
JAVA_RAND = ""
# if linux/Solaris, change random, This is faster in some implementations.
if (platform.system() == "SunOS"):
JAVA_RAND = "-Djava.security.egd=file:///dev/urandom"
else:
JAVA_RAND = "-Djava.security.egd=file:/dev/./urandom"
commerce_setup_helper.add_to_bashrc(INSTALL_OWNER, 'export CONFIG_JVM_ARGS=\"' + JAVA_RAND + ' \" \n')
commerce_setup_helper.add_to_bashrc(INSTALL_OWNER, 'export JAVA_OPTIONS=\"' + JAVA_RAND + ' \" \n')
# install patches if any were listed
patch_weblogic(configData, full_path)
def patch_weblogic(configData, full_path):
if json_key in configData:
jsonData = configData[json_key]
else:
logging.error(json_key + " config data missing from json. will not install")
return
binary_path = full_path + "/binaries/wls-12.2.1"
patches_path = binary_path + "/patches"
# json key containing patch files
patchKey = "wl_patches";
requiredFields = ['middlewareHome', 'installOwner', 'installGroup']
commerce_setup_helper.check_required_fields(jsonData, requiredFields)
INSTALL_DIR = jsonData['middlewareHome']
INSTALL_OWNER = jsonData['installOwner']
PATCH_FILES = None
# if the patches key was provided, get the list of patches to apply
if patchKey in jsonData:
PATCH_FILES = jsonData['wl_patches']
if PATCH_FILES:
logging.info("patching " + service_name)
patches = PATCH_FILES.split(',')
patchList = []
patchScript = INSTALL_DIR + "/OPatch/opatch"
tmpPatchDir = "/tmp/wlpatches"
for patch in patches:
# get list of patches - comma separated
patchParts = patch.split('_')
# get just the patch numbner
patchNum = patchParts[0][1:]
# keep a running list of all patch numbers
patchList.append(patchNum)
if not os.path.exists(patches_path + "/" + patch):
logging.error("patch file " + patches_path + "/" + patch + " missing - will not install")
return
# unzip patch to /tmp. This will create a dir with the patchNum as the name
unzipCommand = "\"" + "unzip " + patches_path + "/" + patch + " -d " + tmpPatchDir + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, unzipCommand)
patchCommand = "\"" + patchScript + " napply " + tmpPatchDir + " -jre /usr/java/latest" + " -silent -id " + ','.join(patchList) + "\""
commerce_setup_helper.exec_as_user(INSTALL_OWNER, patchCommand)
# cleanup our files from /tmp
shutil.rmtree(tmpPatchDir)
| 45.611465
| 157
| 0.664153
|
4a1a4882be6fe1bad11373390bf9fe623de65f0a
| 2,729
|
py
|
Python
|
hostchecker/__main__.py
|
SimoneCorazza/HostChecker
|
56bc9d24f7b783570925b41cb883974b21128c29
|
[
"CC0-1.0"
] | null | null | null |
hostchecker/__main__.py
|
SimoneCorazza/HostChecker
|
56bc9d24f7b783570925b41cb883974b21128c29
|
[
"CC0-1.0"
] | 1
|
2021-03-31T19:52:17.000Z
|
2021-03-31T19:52:17.000Z
|
hostchecker/__main__.py
|
SimoneCorazza/HostChecker
|
56bc9d24f7b783570925b41cb883974b21128c29
|
[
"CC0-1.0"
] | 1
|
2020-10-01T17:56:26.000Z
|
2020-10-01T17:56:26.000Z
|
import datetime
import chrome_bookmarks
import argparse
from hostingInfo import HostInfo
from hostStorage import HostStorage
def printInfoUrl(domain, hostStorage, useCache, storeOnCache):
"""
Print the info for the given url
:param domain: String of the damain to get the hosting info from
:param hostStorage: HostStorage where to store the info
:param useCache: True enable the loading of cached results
:param storeOnCache: True enable the caching of the result
"""
# Load info from cache
hostInfo = None
if useCache:
hostInfo = hostStorage.getHostInfo(domain)
time = "Loaded from cache"
# Get info from the web
if hostInfo is None:
start = datetime.datetime.now()
hostInfo = HostInfo.hostingInfo(domain)
end = datetime.datetime.now()
delta = end - start
time = delta.total_seconds()
# Store the result
if storeOnCache:
hostStorage.cache([hostInfo])
print(f"{hostInfo.domain:40} {str(hostInfo.datacenter):40} {time:.02F}s")
def urls(args):
if args.url is None:
urls = set()
for url in chrome_bookmarks.urls:
domain = HostInfo.getDomain(url.url)
urls.add(domain)
return list(urls)
else:
return [args.url]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Check who is hosting websites")
parser.add_argument('url', type=str, nargs='?',
help="The URL to check (esample: 'www.google.com')")
parser.add_argument('--bookmarks', action='store_true', default=True,
help='Check the host of all bookmarks [default true]')
parser.add_argument('--stats', action='store_true', default=False,
help='Print the stats of the used website [default false]')
group = parser.add_mutually_exclusive_group()
group.add_argument('--no-cache', action='store_true', default=False,
help='The cache will not be used to get and store the results [default false]')
group.add_argument('--force-cache', action='store_true', default=False,
help='The cached result will not be used [default false]')
args = parser.parse_args()
hostStorage = HostStorage("host_info.sqlite3")
if args.stats:
stats = hostStorage.stats()
for s in stats:
datacenter = "<Unknown>" if s.datacenter is None else s.datacenter
print(f"{datacenter:40} {s.count:02} {s.percentage:05.1%}")
else:
useCache = not (args.force_cache or args.no_cache)
storeOnCache = not args.no_cache
domains = urls(args)
for d in domains:
printInfoUrl(d, hostStorage, useCache, storeOnCache)
| 33.280488
| 87
| 0.655551
|
4a1a48aaea66c1f43e2a4272211b868dbd1ac486
| 3,137
|
py
|
Python
|
examples/python/example-03-slice.py
|
lukasm91/serialbox2
|
3a8dba366ef160df684c957e59c0a5f6b1b17244
|
[
"BSD-2-Clause"
] | 1
|
2020-09-04T00:43:52.000Z
|
2020-09-04T00:43:52.000Z
|
examples/python/example-03-slice.py
|
mlange05/serialbox2
|
fa72d8a39f62c7c0b76536680f7a9953957f59f2
|
[
"BSD-2-Clause"
] | null | null | null |
examples/python/example-03-slice.py
|
mlange05/serialbox2
|
fa72d8a39f62c7c0b76536680f7a9953957f59f2
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
##===-----------------------------------------------------------------------------*- Python -*-===##
##
## S E R I A L B O X
##
## This file is distributed under terms of BSD license.
## See LICENSE.txt for more information.
##
##===------------------------------------------------------------------------------------------===##
##
## This example demonstrates how to only load parts (slices) of a serialized field. This can
## significantly improve performance if one is only interested in a small part of the data.
##
## This example is also available in all other languages supported by Serialbox.
##
##===------------------------------------------------------------------------------------------===##
#
# First, we have to make sure Python finds the Serialbox module. Alternatively, you can also set the
# environment variable PYTHONPATH.
#
import os
import sys
import time
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../python')
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../../src/serialbox-python')
#
# Import Serialbox and numpy
#
import serialbox as ser
import numpy as np
def main():
#
# Initialize the serializer. At the moment sliced loading is only supported by the Binary
# archive
#
serializer_write = ser.Serializer(ser.OpenModeKind.Write, "./slice", "field", "Binary")
#
# Allocate 3D numpy arrays
#
field_in = np.random.rand(512, 512, 80)
field_out = np.zeros((512, 512, 80))
#
# Write the numpy array to disk at savepoint `sp`
#
start = time.time()
savepoint = ser.Savepoint('sp')
serializer_write.write('field', savepoint, field_in)
print("Serializer.write : %8.2f s" % (time.time() - start))
#
# Initialize a serializer for reading.
#
serializer_read = ser.Serializer(ser.OpenModeKind.Read, "./slice", "field", "Binary")
#
# Assume we are only interested in a certain layer of the data (k = 50), we can use the slice
# object (ser.Slice) to encode this information and instruct the serializer to only load
# the desired data. Note that you still need to allocate memory for the whole field!
#
start = time.time()
serializer_read.read_slice('field', savepoint, ser.Slice[:, :, 50], field_out)
print("Serializer.read_slice : %8.2f s" % (time.time() - start))
assert(np.allclose(field_in[:, :, 50], field_out[:, :, 50]))
#
# You can of course load the full data and slice it afterwards with numpy which yields the same
# result, though is most likely slower.
#
start = time.time()
serializer_read.read('field', savepoint, field_out)
print("Serializer.read : %8.2f s" % (time.time() - start))
assert(np.allclose(field_in[:, :, 50], field_out[:, :, 50]))
#
# Remove directory
#
import shutil
shutil.rmtree("./slice")
if __name__ == '__main__':
main()
| 32.677083
| 102
| 0.561683
|
4a1a48d28541093d2df3217ef6815a5ba01724e6
| 10,264
|
py
|
Python
|
lisa/features/gpu.py
|
srveniga/lisa
|
0b5bcf028ed4211d79ff90b9f915981c426baab4
|
[
"MIT"
] | null | null | null |
lisa/features/gpu.py
|
srveniga/lisa
|
0b5bcf028ed4211d79ff90b9f915981c426baab4
|
[
"MIT"
] | null | null | null |
lisa/features/gpu.py
|
srveniga/lisa
|
0b5bcf028ed4211d79ff90b9f915981c426baab4
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import re
from enum import Enum
from typing import Any, List, Set
from lisa.base_tools import Wget
from lisa.feature import Feature
from lisa.operating_system import Redhat, Ubuntu
from lisa.sut_orchestrator.azure.tools import LisDriver
from lisa.tools import Lsmod, Lspci, Lsvmbus
from lisa.util import LisaException, constants
FEATURE_NAME_GPU = "Gpu"
# Link to the latest GRID driver
# The DIR link is
# https://download.microsoft.com/download/9/5/c/95c667ff-ab95-4c56-89e0-e13e9a76782d/NVIDIA-Linux-x86_64-460.32.03-grid-azure.run
DEFAULT_GRID_DRIVER_URL = "https://go.microsoft.com/fwlink/?linkid=874272"
DEFAULT_CUDA_DRIVER_VERSION = "10.1.105-1"
class ComputeSDK(Enum):
# GRID Driver
GRID = 1
# CUDA Driver
CUDA = 2
class Gpu(Feature):
_redhat_gpu_dependencies = [
"kernel-devel-$(uname -r)",
"kernel-headers-$(uname -r)",
"mesa-libGL",
"mesa-libEGL",
"libglvnd-devel",
"dkms",
]
_ubuntu_gpu_dependencies = [
"build-essential",
"libelf-dev",
"linux-tools-$(uname -r)",
"linux-cloud-tools-$(uname -r)",
"python",
"libglvnd-dev",
"ubuntu-desktop",
]
# tuple of gpu device names and their device id pattern
# e.g. Tesla GPU device has device id "47505500-0001-0000-3130-444531303244"
gpu_devices = (("Tesla", "47505500", 0), ("A100", "44450000", 6))
@classmethod
def name(cls) -> str:
return FEATURE_NAME_GPU
@classmethod
def enabled(cls) -> bool:
return True
@classmethod
def can_disable(cls) -> bool:
return True
def is_supported(self) -> bool:
raise NotImplementedError
def _initialize(self, *args: Any, **kwargs: Any) -> None:
self.gpu_vendor: Set[str] = set()
def _get_supported_driver(self) -> List[ComputeSDK]:
raise NotImplementedError
# download and install NVIDIA grid driver
def _install_grid_driver(self, driver_url: str) -> None:
self._log.debug("Starting GRID driver installation")
# download and install the NVIDIA GRID driver
wget_tool = self._node.tools[Wget]
grid_file_path = wget_tool.get(
driver_url,
str(self._node.working_path),
"NVIDIA-Linux-x86_64-grid.run",
executable=True,
)
result = self._node.execute(
f"{grid_file_path} --no-nouveau-check --silent --no-cc-version-check"
)
if result.exit_code != 0:
raise LisaException(
"Failed to install the GRID driver! "
f"exit-code: {result.exit_code} stderr: {result.stderr}"
)
self._log.debug("Successfully installed the GRID driver")
# download and install CUDA Driver
def _install_cuda_driver(self, version: str) -> None:
self._log.debug("Starting CUDA driver installation")
cuda_repo = ""
os_information = self._node.os.information
if isinstance(self._node.os, Redhat):
release = os_information.release.split(".")[0]
cuda_repo_pkg = f"cuda-repo-rhel{release}-{version}.x86_64.rpm"
cuda_repo = (
"http://developer.download.nvidia.com/"
f"compute/cuda/repos/rhel{release}/x86_64/{cuda_repo_pkg}"
)
# download and install the cuda driver package from the repo
self._node.os._install_package_from_url(
f"{cuda_repo}", package_name="cuda-drivers.rpm", signed=False
)
elif isinstance(self._node.os, Ubuntu):
release = re.sub("[^0-9]+", "", os_information.release)
# there is no ubuntu2110 and ubuntu2104 folder under nvidia site
if release in ["2110", "2104"]:
release = "2004"
# Public CUDA GPG key is needed to be installed for Ubuntu
self._node.execute(
"apt-key adv --fetch-keys http://developer.download.nvidia.com/compute/"
f"cuda/repos/ubuntu{release}/x86_64/7fa2af80.pub",
sudo=True,
)
if "1804" == release:
cuda_repo_pkg = f"cuda-repo-ubuntu{release}_{version}_amd64.deb"
cuda_repo = (
"http://developer.download.nvidia.com/compute/"
f"cuda/repos/ubuntu{release}/x86_64/{cuda_repo_pkg}"
)
# download and install the cuda driver package from the repo
self._node.os._install_package_from_url(
f"{cuda_repo}", package_name="cuda-drivers.deb", signed=False
)
else:
self._node.tools[Wget].get(
f"https://developer.download.nvidia.com/compute/cuda/repos/"
f"ubuntu{release}/x86_64/cuda-ubuntu{release}.pin",
"/etc/apt/preferences.d",
"cuda-repository-pin-600",
sudo=True,
overwrite=False,
)
repo_entry = (
f"deb http://developer.download.nvidia.com/compute/cuda/repos/"
f"ubuntu{release}/x86_64/ /"
)
self._node.execute(
f'add-apt-repository -y "{repo_entry}"',
sudo=True,
expected_exit_code=0,
expected_exit_code_failure_message=(
f"failed to add repo {repo_entry}"
),
)
# the latest version cuda-drivers-510 has issues
# nvidia-smi
# No devices were found
# dmesg
# NVRM: GPU 0001:00:00.0: RmInitAdapter failed! (0x63:0x55:2344)
# NVRM: GPU 0001:00:00.0: rm_init_adapter failed, device minor number 0
# switch to use 495
self._node.os.install_packages("cuda-drivers-495")
else:
raise LisaException(
f"Distro {self._node.os.name}" "not supported to install CUDA driver."
)
def _install_gpu_dep(self) -> None:
# install dependency libraries for distros
if isinstance(self._node.os, Redhat):
self._node.os.install_packages(
list(self._redhat_gpu_dependencies), signed=False
)
elif isinstance(self._node.os, Ubuntu):
self._node.os.install_packages(
list(self._ubuntu_gpu_dependencies), timeout=2000
)
else:
raise LisaException(
f"Distro {self._node.os.name} is not supported for GPU."
)
def is_module_loaded(self) -> bool:
lsmod_tool = self._node.tools[Lsmod]
if (len(self.gpu_vendor) > 0) and all(
lsmod_tool.module_exists(vendor) for vendor in self.gpu_vendor
):
return True
return False
def install_compute_sdk(self, version: str = "") -> None:
# install GPU dependencies before installing driver
self._install_gpu_dep()
try:
# install LIS driver if required and not already installed.
if LisDriver.can_install:
self._node.tools[LisDriver]
except Exception as identifier:
self._log.debug(
"LisDriver is not installed. It might not be required. " f"{identifier}"
)
# install the driver
supported_driver = self._get_supported_driver()
for driver in supported_driver:
if driver == ComputeSDK.GRID:
if not version:
version = DEFAULT_GRID_DRIVER_URL
self._install_grid_driver(version)
self.gpu_vendor.add("nvidia")
elif driver == ComputeSDK.CUDA:
if not version:
version = DEFAULT_CUDA_DRIVER_VERSION
self._install_cuda_driver(version)
self.gpu_vendor.add("nvidia")
else:
raise LisaException(f"{driver} is not a valid value of ComputeSDK")
if not self.gpu_vendor:
raise LisaException("No supported gpu driver/vendor found for this node.")
def get_gpu_count_with_lsvmbus(self) -> int:
lsvmbus_device_count = 0
bridge_device_count = 0
lsvmbus_tool = self._node.tools[Lsvmbus]
device_list = lsvmbus_tool.get_device_channels()
for device in device_list:
for name, id, bridge_count in self.gpu_devices:
if id in device.device_id:
lsvmbus_device_count += 1
bridge_device_count = bridge_count
self._log.debug(f"GPU device {name} found!")
break
return lsvmbus_device_count - bridge_device_count
def get_gpu_count_with_lspci(self) -> int:
lspci_tool = self._node.tools[Lspci]
device_list = lspci_tool.get_device_list_per_device_type(
constants.DEVICE_TYPE_GPU
)
return len(device_list)
def get_gpu_count_with_vendor_cmd(self) -> int:
device_count = 0
if "nvidia" in self.gpu_vendor:
# sample output
# GPU 0: Tesla P100-PCIE-16GB (UUID: GPU-0609318e-4920-44d8-a9fd-7bae639f7c5d)# noqa: E501
# GPU 1: Tesla P100-PCIE-16GB (UUID: GPU-ede45443-35ad-8d4e-f40d-988423bc6c0b)# noqa: E501
# GPU 2: Tesla P100-PCIE-16GB (UUID: GPU-ccd6174e-b288-b73c-682e-054c83ef3a3e)# noqa: E501
# GPU 3: Tesla P100-PCIE-16GB (UUID: GPU-225b4607-ceba-5806-d41a-49ccbcf9794d)# noqa: E501
result = self._node.execute("nvidia-smi -L", shell=True)
if result.exit_code != 0 or (result.exit_code == 0 and result.stdout == ""):
raise LisaException(
f"nvidia-smi command exited with exit_code {result.exit_code}"
)
gpu_types = [x[0] for x in self.gpu_devices]
for gpu_type in gpu_types:
device_count += result.stdout.count(gpu_type)
return device_count
| 38.014815
| 129
| 0.582424
|
4a1a492ff20f25097050bb49ff5aeb5042e00c3a
| 685
|
py
|
Python
|
boundaries/ocd-division/country:ca/csd:3506008/definition.py
|
imhangoo/represent-canada-data
|
0d9cc818b343079f81a00c15438d79c079a10c9b
|
[
"OML"
] | null | null | null |
boundaries/ocd-division/country:ca/csd:3506008/definition.py
|
imhangoo/represent-canada-data
|
0d9cc818b343079f81a00c15438d79c079a10c9b
|
[
"OML"
] | null | null | null |
boundaries/ocd-division/country:ca/csd:3506008/definition.py
|
imhangoo/represent-canada-data
|
0d9cc818b343079f81a00c15438d79c079a10c9b
|
[
"OML"
] | null | null | null |
from datetime import date
import boundaries
boundaries.register('Ottawa wards',
domain='Ottawa, ON',
last_updated=date(2018, 4, 24),
name_func=boundaries.clean_attr('WARD_EN'),
id_func=boundaries.attr('WARD_NUM'),
authority='City of Ottawa',
source_url='http://data.ottawa.ca/en/dataset/wards-2014',
licence_url='https://ottawa.ca/en/city-hall/get-know-your-city/open-data#open-data-licence-version-2-0',
data_url='http://data.ottawa.ca/dataset/8321248d-0b86-47cd-9c83-c848a1bc0098/resource/8152e707-fc28-409c-9bbe-8cb9f88dca86/download/wards-2014shp.shp.zip',
encoding='utf-8',
extra={'division_id': 'ocd-division/country:ca/csd:3506008'},
)
| 40.294118
| 159
| 0.732847
|
4a1a49b9df3ca0ad98aa9ceb1c87748ed1a9a6f7
| 6,511
|
py
|
Python
|
app/test/stubs/empresa.py
|
smartlab-br/datahub-api
|
193e71172bb4891a5bbffc902da07ef57df9ab07
|
[
"MIT"
] | 1
|
2019-07-25T21:15:05.000Z
|
2019-07-25T21:15:05.000Z
|
app/test/stubs/empresa.py
|
smartlab-br/datahub-api
|
193e71172bb4891a5bbffc902da07ef57df9ab07
|
[
"MIT"
] | 44
|
2019-08-05T15:24:00.000Z
|
2022-01-31T23:11:31.000Z
|
app/test/stubs/empresa.py
|
smartlab-br/datahub-api
|
193e71172bb4891a5bbffc902da07ef57df9ab07
|
[
"MIT"
] | 1
|
2021-05-11T07:49:51.000Z
|
2021-05-11T07:49:51.000Z
|
""" Stubs for model testing """
import pandas as pd
from datetime import datetime
from model.empresa.empresa import Empresa
from model.thematic import Thematic
from repository.empresa.pessoadatasets import PessoaDatasetsRepository
from repository.empresa.datasets import DatasetsRepository
from repository.empresa.empresa import EmpresaRepository
from test.stubs.repository import StubThematicRepository
class StubThematicModel(Thematic):
""" Class to return a constant dataset when find_dataset is invoked """
def load_and_prepare(self):
""" Avoids the application context """
def get_repo(self):
""" Avoids the application context """
return StubThematicRepository()
def find_dataset(self, options):
""" Method to return a fixed collection """
dataframe = [
{'cnpj': '12345678000101', 'compet': 2047, 'agr_count': 100},
{'cnpj': '12345678000202', 'compet': 2099, 'agr_count': 200}
]
if (options is not None and 'theme' in options and
options.get('theme') == 'rais'):
dataframe = [
{'nu_cnpj_cei': '12345678000101', 'nu_ano_rais': 2047, 'agr_count': 100},
{'nu_cnpj_cei': '12345678000202', 'nu_ano_rais': 2099, 'agr_count': 200}
]
if (options is not None and 'theme' in options and
options.get('theme') in ['catweb_c']):
dataframe = [
{'cnpj_raiz': '12345678', 'cnpj': '12345678000101', 'nu_cnpj_empregador': '12345678000101', 'compet': 2047, 'agr_count': 100, "tp_tomador": 0},
{'cnpj_raiz': '12345678', 'cnpj': '12345678000202', 'nu_cnpj_empregador': '12345678000202', 'compet': 2047, 'agr_count': 200, "tp_tomador": 0}
]
if not options.get('as_pandas', True) and not options.get('no_wrap', True):
return {
"metadata": {"fonte": "Fonte"},
"dataset": dataframe
}
return pd.DataFrame(dataframe)
def get_persp_columns(self, dataframe):
""" Returns a fixed perspective column for testing """
return 'persp_column'
# def get_column_defs(self, table_name):
# ''' Get the column definitions from a dataframe '''
# return {
# 'cnpj_raiz': 'cnpj_raiz',
# 'cnpj': 'cnpj',
# 'pf': 'cpf',
# 'persp': None,
# 'persp_options': None,
# 'compet': None
# }
class StubDatasetRepository(DatasetsRepository):
""" Class to unlock Empresa model testing that uses REDIS data """
DATASETS = {
'skip': '2016',
'test': '2017,2018',
'failed_status': '2017,2099',
'expired': '2017,2018',
'another': '2019'
}
def __init__(self):
""" Overrides constructor to avoid application context """
def load_and_prepare(self):
""" Avoids the application context """
class StubEmpresaRepository(EmpresaRepository):
""" Class to unlock Empresa model testing that uses REDIS data """
DEFAULT_GROUPING = 'nu_competencia, cd_indicador'
DEFAULT_PARTITIONING = 'cd_indicador'
CNPJ_RAIZ_COLUMNS = {
"rais": "nu_cnpj_raiz",
"rfb" : "nu_cnpj_raiz",
}
CNPJ_COLUMNS = {
'rais': 'nu_cnpj_cei',
'rfb': 'nu_cnpj',
}
COMPET_COLUMNS = {
'rais': 'nu_ano_rais',
}
PF_COLUMNS = {
'rais': 'nu_cpf',
'rfb': 'nu_cpf_responsavel',
}
PERSP_COLUMNS = {
'catweb': 'origem_busca'
}
def __init__(self):
""" Overrides constructor to avoid application context """
def load_and_prepare(self):
""" Avoids the application context """
class StubPessoaDatasetRepository(PessoaDatasetsRepository):
""" Class to unlock Empresa model testing that uses REDIS data """
def __init__(self):
""" Overrides constructor to avoid application context """
def load_and_prepare(self):
""" Avoids the application context """
def retrieve(self, _id_pfpj, dataframe, _pfpj='pj'):
""" Fakes a REDIS call and return static data """
str_now = datetime.strftime(datetime.now(), "%Y-%m-%d")
if dataframe == 'unavailable':
return {
"2017": f"INGESTED|{str_now}",
"2099": f"INGESTED|{str_now}",
"when": f"{str_now}"
}
if dataframe == 'failed_status':
return {
"2017": f"FAILED|{str_now}",
"2018": f"INGESTED|{str_now}",
"when": f"{str_now}"
}
if dataframe == 'expired':
return {
"2017": "INGESTED|2000-01-01",
"2018": "INGESTED|2000-01-01",
"when": "2000-01-01"
}
return { # Valid
"2017": f"INGESTED|{str_now}",
"2018": f"INGESTED|{str_now}",
"when": f"{str_now}"
}
class StubEmpresa(Empresa):
""" Class to enable model testing without repository access """
EXPECTED_GROUPED_STATS = {
'stats_estab': {
'12345678000101': {'agr_count': 100, 'compet': 2047},
'12345678000202': {'agr_count': 200, 'compet': 2099}
},
'stats_compet': {
'2047': {'agr_count': 100, 'cnpj': '12345678000101'},
'2099': {'agr_count': 200, 'cnpj': '12345678000202'}
},
'stats_estab_compet': {
'2047_12345678000101': {
'agr_count': 100, 'cnpj': '12345678000101', 'compet': 2047
},
'2099_12345678000202': {
'agr_count': 200, 'cnpj': '12345678000202', 'compet': 2099
}
}
}
def __init__(self):
''' Construtor '''
self.repo = None
self.dataset_repo = None
self.pessoa_dataset_repo = None
self.thematic_handler = None
self.__set_repo()
def get_thematic_handler(self):
""" Gets the stub thematic model instead of the real one """
return StubThematicModel()
def get_dataset_repo(self):
""" Gets the stub dataset repo instead of the real one """
return StubDatasetRepository()
def get_pessoa_dataset_repo(self):
""" Gets the stub pessoa_dataset repo instead of the real one """
return StubPessoaDatasetRepository()
def __set_repo(self):
""" Setter invoked in Construtor """
self.repo = StubEmpresaRepository()
| 35.38587
| 159
| 0.574105
|
4a1a49d2eba38d14ce3e3831bec5aa3ca2d07126
| 356
|
py
|
Python
|
test_data/himanis/convert_guerin.py
|
matgille/boudams
|
d253ff2ee124e13186b5782ebb3c2d9b4bedd942
|
[
"MIT"
] | 5
|
2019-10-12T22:12:42.000Z
|
2022-02-17T14:59:02.000Z
|
test_data/himanis/convert_guerin.py
|
matgille/boudams
|
d253ff2ee124e13186b5782ebb3c2d9b4bedd942
|
[
"MIT"
] | 10
|
2019-06-08T09:31:19.000Z
|
2021-05-16T09:27:14.000Z
|
test_data/himanis/convert_guerin.py
|
matgille/boudams
|
d253ff2ee124e13186b5782ebb3c2d9b4bedd942
|
[
"MIT"
] | 2
|
2020-01-10T09:46:09.000Z
|
2021-02-26T17:04:36.000Z
|
import os
directory = os.path.dirname(__file__)
with open(os.path.join(directory, "txt", "guerin.txt")) as f:
for line_index, line in enumerate(f.readlines()):
if not line.strip():
continue
#
lang, text = line[:3], line[7:].strip()
with open(os.path.join(directory, lang, "guerin-%s.txt"% line_index), "w") as out_io:
out_io.write(text)
| 25.428571
| 87
| 0.668539
|
4a1a49d815a6b246ff76cb14bc580ee86f1ee9e4
| 2,534
|
py
|
Python
|
nni/experiment/config/training_services/remote.py
|
dutxubo/nni
|
c16f4e1c89b54b8b80661ef0072433d255ad2d24
|
[
"MIT"
] | 9,680
|
2019-05-07T01:42:30.000Z
|
2022-03-31T16:48:33.000Z
|
nni/experiment/config/training_services/remote.py
|
dutxubo/nni
|
c16f4e1c89b54b8b80661ef0072433d255ad2d24
|
[
"MIT"
] | 1,957
|
2019-05-06T21:44:21.000Z
|
2022-03-31T09:21:53.000Z
|
nni/experiment/config/training_services/remote.py
|
dutxubo/nni
|
c16f4e1c89b54b8b80661ef0072433d255ad2d24
|
[
"MIT"
] | 1,571
|
2019-05-07T06:42:55.000Z
|
2022-03-31T03:19:24.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Configuration for remote training service.
Check the reference_ for explaination of each field.
You may also want to check `remote training service doc`_.
.. _reference: https://nni.readthedocs.io/en/stable/reference/experiment_config.html
.. _remote training service doc: https://nni.readthedocs.io/en/stable/TrainingService/RemoteMachineMode.html
"""
__all__ = ['RemoteConfig', 'RemoteMachineConfig']
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional, Union
import warnings
from ..base import ConfigBase
from ..training_service import TrainingServiceConfig
from .. import utils
@dataclass(init=False)
class RemoteMachineConfig(ConfigBase):
host: str
port: int = 22
user: str
password: Optional[str] = None
ssh_key_file: Optional[utils.PathLike] = '~/.ssh/id_rsa'
ssh_passphrase: Optional[str] = None
use_active_gpu: bool = False
max_trial_number_per_gpu: int = 1
gpu_indices: Union[List[int], int, str, None] = None
python_path: Optional[str] = None
def _canonicalize(self, parents):
super()._canonicalize(parents)
if self.password is not None:
self.ssh_key_file = None
self.gpu_indices = utils.canonical_gpu_indices(self.gpu_indices)
def _validate_canonical(self):
super()._validate_canonical()
assert 0 < self.port < 65536
assert self.max_trial_number_per_gpu > 0
utils.validate_gpu_indices(self.gpu_indices)
if self.password is not None:
warnings.warn('SSH password will be exposed in web UI as plain text. We recommend to use SSH key file.')
elif not Path(self.ssh_key_file).is_file():
raise ValueError(
f'RemoteMachineConfig: You must either provide password or a valid SSH key file "{self.ssh_key_file}"'
)
@dataclass(init=False)
class RemoteConfig(TrainingServiceConfig):
platform: str = 'remote'
machine_list: List[RemoteMachineConfig]
reuse_mode: bool = True
def _validate_canonical(self):
super()._validate_canonical()
if not self.machine_list:
raise ValueError(f'RemoteConfig: must provide at least one machine in machine_list')
if not self.trial_gpu_number and any(machine.max_trial_number_per_gpu != 1 for machine in self.machine_list):
raise ValueError('RemoteConfig: max_trial_number_per_gpu does not work without trial_gpu_number')
| 34.712329
| 118
| 0.718232
|
4a1a4aec104bd4afe5bf0e271d46bbe758ceb236
| 32,315
|
py
|
Python
|
mars/dataframe/indexing/reindex.py
|
chineking/mars
|
660098c65bcb389c6bbebc26b2502a9b3af43cf9
|
[
"Apache-2.0"
] | null | null | null |
mars/dataframe/indexing/reindex.py
|
chineking/mars
|
660098c65bcb389c6bbebc26b2502a9b3af43cf9
|
[
"Apache-2.0"
] | null | null | null |
mars/dataframe/indexing/reindex.py
|
chineking/mars
|
660098c65bcb389c6bbebc26b2502a9b3af43cf9
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
try:
import scipy.sparse as sps
except ImportError: # pragma: no cover
sps = None
from ... import opcodes
from ...core import ENTITY_TYPE, recursive_tile
from ...core.operand import OperandStage
from ...serialization.serializables import (
KeyField,
AnyField,
StringField,
Int64Field,
BoolField,
)
from ...tensor import tensor as astensor
from ...utils import lazy_import, pd_release_version
from ..core import Index as DataFrameIndexType, INDEX_TYPE
from ..initializer import Index as asindex
from ..operands import DataFrameOperand, DataFrameOperandMixin
from ..utils import validate_axis_style_args, parse_index
from .index_lib import DataFrameReindexHandler
cudf = lazy_import("cudf")
# under pandas<1.1, SparseArray ignores zeros on creation
_pd_sparse_miss_zero = pd_release_version[:2] < (1, 1)
class DataFrameReindex(DataFrameOperand, DataFrameOperandMixin):
_op_type_ = opcodes.REINDEX
_input = KeyField("input")
_index = AnyField("index")
_index_freq = AnyField("index_freq")
_columns = AnyField("columns")
_method = StringField("method")
_level = AnyField("level")
_fill_value = AnyField("fill_value")
_limit = Int64Field("limit")
_enable_sparse = BoolField("enable_sparse")
def __init__(
self,
index=None,
index_freq=None,
columns=None,
method=None,
level=None,
fill_value=None,
limit=None,
enable_sparse=None,
**kw,
):
super().__init__(
_index=index,
_index_freq=index_freq,
_columns=columns,
_method=method,
_level=level,
_fill_value=fill_value,
_limit=limit,
_enable_sparse=enable_sparse,
**kw,
)
@property
def input(self):
return self._input
@property
def index(self):
return self._index
@property
def index_freq(self):
return self._index_freq
@property
def columns(self):
return self._columns
@property
def method(self):
return self._method
@property
def level(self):
return self._level
@property
def fill_value(self):
return self._fill_value
@property
def limit(self):
return self._limit
@property
def enable_sparse(self):
return self._enable_sparse
@property
def _indexes(self):
# used for index_lib
indexes = []
names = ("index", "columns")
for ax in range(self.input.ndim):
index = names[ax]
val = getattr(self, index)
if val is not None:
indexes.append(val)
else:
indexes.append(slice(None))
return indexes
@_indexes.setter
def _indexes(self, new_indexes):
for index_field, new_index in zip(["_index", "_columns"], new_indexes):
setattr(self, index_field, new_index)
@property
def indexes(self):
return self._indexes
@property
def can_index_miss(self):
return True
def _new_chunks(self, inputs, kws=None, **kw):
if self.stage == OperandStage.map and len(inputs) < len(self._inputs):
assert len(inputs) == len(self._inputs) - 1
inputs.append(self._fill_value.chunks[0])
if self.stage == OperandStage.agg and self._fill_value is not None:
# fill_value is not required
self._fill_value = None
return super()._new_chunks(inputs, kws=kws, **kw)
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
inputs_iter = iter(self._inputs)
self._input = next(inputs_iter)
if self._index is not None and isinstance(self._index, ENTITY_TYPE):
self._index = next(inputs_iter)
if self._fill_value is not None and isinstance(self._fill_value, ENTITY_TYPE):
self._fill_value = next(inputs_iter)
def __call__(self, df_or_series):
inputs = [df_or_series]
shape = list(df_or_series.shape)
index_value = df_or_series.index_value
columns_value = dtypes = None
if df_or_series.ndim == 2:
columns_value = df_or_series.columns_value
dtypes = df_or_series.dtypes
if self._index is not None:
shape[0] = self._index.shape[0]
index_value = asindex(self._index).index_value
self._index = astensor(self._index)
if isinstance(self._index, ENTITY_TYPE):
inputs.append(self._index)
if self._columns is not None:
shape[1] = self._columns.shape[0]
dtypes = df_or_series.dtypes.reindex(index=self._columns).fillna(
np.dtype(np.float64)
)
columns_value = parse_index(dtypes.index, store_data=True)
if self._fill_value is not None and isinstance(self._fill_value, ENTITY_TYPE):
inputs.append(self._fill_value)
if df_or_series.ndim == 1:
return self.new_series(
inputs,
shape=tuple(shape),
dtype=df_or_series.dtype,
index_value=index_value,
name=df_or_series.name,
)
else:
return self.new_dataframe(
inputs,
shape=tuple(shape),
dtypes=dtypes,
index_value=index_value,
columns_value=columns_value,
)
@classmethod
def tile(cls, op):
if all(len(inp.chunks) == 1 for inp in op.inputs):
# tile one chunk
out = op.outputs[0]
chunk_op = op.copy().reset_key()
chunk_params = out.params.copy()
chunk_params["index"] = (0,) * out.ndim
out_chunk = chunk_op.new_chunk(
[inp.chunks[0] for inp in op.inputs], kws=[chunk_params]
)
params = out.params.copy()
params["nsplits"] = ((s,) for s in out.shape)
params["chunks"] = [out_chunk]
new_op = op.copy()
return new_op.new_tileables(op.inputs, kws=[params])
handler = DataFrameReindexHandler()
result = yield from handler.handle(op)
if op.method is None and op.fill_value is None:
return [result]
else:
axis = 1 if op.columns is not None and op.index is None else 0
result = result.fillna(
value=op.fill_value, method=op.method, axis=axis, limit=op.limit
)
return [(yield from recursive_tile(result))]
@classmethod
def _get_value(cls, ctx, obj):
if obj is not None and hasattr(obj, "key"):
return ctx[obj.key]
return obj
@classmethod
def _convert_to_writable(cls, obj):
if isinstance(obj, np.ndarray) and not obj.flags.writeable:
return obj.copy()
return obj
@classmethod
def _sparse_reindex(cls, inp, index=None, columns=None):
if inp.ndim == 2:
columns = inp.columns if columns is None else columns
index_shape = len(index) if index is not None else len(inp)
i_to_columns = dict()
for i, col in enumerate(columns):
if col in inp.dtypes:
if index is None:
i_to_columns[i] = inp[col]
else:
indexer = inp.index.reindex(index)[1]
cond = indexer >= 0
available_indexer = indexer[cond]
del indexer
data = inp[col].iloc[available_indexer].to_numpy()
ind = cond.nonzero()[0]
spmatrix = sps.csc_matrix(
(data, (ind, np.zeros_like(ind))),
shape=(index_shape, 1),
dtype=inp[col].dtype,
)
# convert to SparseDtype(xxx, np.nan)
# to ensure 0 in sparse_array not converted to np.nan
if not _pd_sparse_miss_zero:
sparse_array = pd.arrays.SparseArray.from_spmatrix(spmatrix)
sparse_array = pd.arrays.SparseArray(
sparse_array.sp_values,
sparse_index=sparse_array.sp_index,
fill_value=np.nan,
dtype=pd.SparseDtype(sparse_array.dtype, np.nan),
)
else:
from pandas._libs.sparse import IntIndex
sparse_array = pd.arrays.SparseArray(
data,
sparse_index=IntIndex(index_shape, ind),
fill_value=np.nan,
dtype=pd.SparseDtype(data.dtype, np.nan),
)
series = pd.Series(sparse_array, index=index)
i_to_columns[i] = series
else:
ind = index if index is not None else inp.index
i_to_columns[i] = pd.DataFrame.sparse.from_spmatrix(
sps.coo_matrix((index_shape, 1), dtype=np.float64), index=ind
).iloc[:, 0]
df = pd.DataFrame(i_to_columns)
df.columns = columns
return df
else:
indexer = inp.index.reindex(index)[1]
cond = indexer >= 0
available_indexer = indexer[cond]
del indexer
data = inp.iloc[available_indexer].to_numpy()
ind = cond.nonzero()[0]
spmatrix = sps.csc_matrix(
(data, (ind, np.zeros_like(ind))),
shape=(len(index), 1),
dtype=inp.dtype,
)
sparse_array = pd.arrays.SparseArray.from_spmatrix(spmatrix)
# convert to SparseDtype(xxx, np.nan)
# to ensure 0 in sparse_array not converted to np.nan
sparse_array = pd.arrays.SparseArray(
sparse_array.sp_values,
sparse_index=sparse_array.sp_index,
fill_value=np.nan,
dtype=pd.SparseDtype(sparse_array.dtype, np.nan),
)
series = pd.Series(sparse_array, index=index, name=inp.name)
return series
@classmethod
def _reindex(cls, ctx, op, fill=True, try_sparse=None):
inp = cls._convert_to_writable(ctx[op.input.key])
index = cls._get_value(ctx, op.index)
if op.index_freq is not None:
index = pd.Index(index, freq=op.index_freq)
columns = cls._get_value(ctx, op.columns)
kw = {"level": op.level}
if index is not None and not isinstance(index, slice):
kw["index"] = cls._convert_to_writable(index)
if columns is not None and not isinstance(columns, slice):
kw["columns"] = cls._convert_to_writable(columns)
if fill:
kw["method"] = op.method
kw["fill_value"] = cls._get_value(ctx, op.fill_value)
kw["limit"] = op.limit
if (
try_sparse
and not fill
and op.level is None
and isinstance(inp, (pd.DataFrame, pd.Series))
and sps is not None
):
# 1. sparse is used in map only
# 2. for MultiIndex, sparse is not needed as well
# 3. only consider cpu
# 4. scipy is installed
if op.enable_sparse is None:
# try to use sparse if estimated size > 2 * input_size
cur_size = inp.memory_usage(deep=True)
if inp.ndim == 2:
cur_size = cur_size.sum()
element_size = cur_size / inp.size
shape = list(inp.shape)
if "index" in kw:
shape[0] = len(kw["index"])
if "columns" in kw:
shape[1] = len(kw["columns"])
estimate_size = np.prod(shape) * element_size
fitted = estimate_size > cur_size * 2
else:
# specified when op.enable_sparse == True
fitted = True
if fitted:
# use sparse instead
return cls._sparse_reindex(
inp, index=kw.get("index"), columns=kw.get("columns")
)
return inp.reindex(**kw)
@classmethod
def _execute_reindex(cls, ctx, op):
ctx[op.outputs[0].key] = cls._reindex(ctx, op)
@classmethod
def _execute_map(cls, ctx, op):
if op.enable_sparse is not None:
try_sparse = op.enable_sparse
else:
try_sparse = True
ctx[op.outputs[0].key] = cls._reindex(
ctx, op, fill=False, try_sparse=try_sparse
)
@classmethod
def _convert_to_dense(cls, series):
if isinstance(series.dtype, pd.SparseDtype):
return series.astype(
pd.SparseDtype(series.dtype.subtype, np.nan)
).sparse.to_dense()
return series
@classmethod
def _merge_chunks(cls, inputs):
xdf = cls._get_xdf(inputs[0])
ndim = inputs[0].ndim
if ndim == 2:
columns = inputs[0].columns
result = xdf.DataFrame(
np.full(inputs[0].shape, np.nan), columns=columns, index=inputs[0].index
)
else:
columns = [inputs[0].name]
result = None
for i in range(len(columns)):
if ndim == 1:
curr = cls._convert_to_dense(inputs[0]).copy()
else:
curr = cls._convert_to_dense(inputs[0].iloc[:, i]).copy()
for j in range(len(inputs) - 1):
if ndim == 2:
left = cls._convert_to_dense(inputs[j].iloc[:, i])
right = cls._convert_to_dense(inputs[j + 1].iloc[:, i])
else:
left = cls._convert_to_dense(inputs[j])
right = cls._convert_to_dense(inputs[j + 1])
left_notna = left.notna()
right_notna = right.notna()
if (left_notna & right_notna).sum() > 0:
raise ValueError("cannot reindex from a duplicate axis")
curr.loc[left_notna] = left.loc[left_notna]
curr.loc[right_notna] = right.loc[right_notna]
if ndim == 1:
result = curr
else:
result.iloc[:, i] = curr
return result
@classmethod
def _get_xdf(cls, obj):
return (
pd if isinstance(obj, (pd.DataFrame, pd.Series)) or cudf is None else cudf
)
@classmethod
def _execute_agg(cls, ctx, op):
out = op.outputs[0]
if op.index is None and op.columns is None:
# index is tensor
inputs = [ctx[inp.key] for inp in op.inputs]
xdf = cls._get_xdf(inputs[0])
if inputs[0].index.nlevels > 1 and op.level is not None:
# multi index
result = xdf.concat(inputs)
else:
result = cls._merge_chunks(inputs) if len(inputs) > 1 else inputs[0]
ctx[out.key] = result
else:
# ndarray index or columns
if isinstance(op.index, slice) and op.index == slice(None):
axis = 1
labels = op.columns
else:
assert op.columns is None or (
isinstance(op.columns, slice) and op.columns == slice(None)
)
axis = 0
labels = op.index
inp = ctx[op.inputs[0].key]
if inp.index.nlevels > 1 and op.level is not None:
new_inp = inp
else:
# split input
size = out.shape[axis]
assert inp.shape[axis] % size == 0
inputs = []
for i in range(inp.shape[axis] // size):
slc = [slice(None)] * inp.ndim
slc[axis] = slice(size * i, size * (i + 1))
inputs.append(inp.iloc[tuple(slc)])
new_inp = cls._merge_chunks(inputs)
labels = cls._convert_to_writable(labels)
if out.ndim == 2:
result = new_inp.reindex(labels=labels, axis=axis, level=op.level)
else:
result = new_inp.reindex(index=labels, level=op.level)
ctx[out.key] = result
@classmethod
def execute(cls, ctx, op):
if op.stage == OperandStage.map:
return cls._execute_map(ctx, op)
elif op.stage == OperandStage.agg:
return cls._execute_agg(ctx, op)
else:
assert op.stage is None
return cls._execute_reindex(ctx, op)
def reindex(df_or_series, *args, **kwargs):
"""
Conform Series/DataFrame to new index with optional filling logic.
Places NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.
index, columns : array-like, optional
New labels / index to conform to, should be specified using
keywords. Preferably an Index object to avoid duplicating data.
axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: Propagate last valid observation forward to next
valid.
* backfill / bfill: Use next valid observation to fill gap.
* nearest: Use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
limit : int, default None
Maximum number of consecutive elements to forward or backward fill.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
Returns
-------
Series/DataFrame with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> import mars.dataframe as md
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = md.DataFrame({'http_status': [200, 200, 404, 404, 301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index)
>>> df.execute()
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index = ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index).execute()
http_status response_time
Safari 404.0 0.07
Iceweasel NaN NaN
Comodo Dragon NaN NaN
IE10 404.0 0.08
Chrome 200.0 0.02
We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
increasing or decreasing, we cannot use arguments to the keyword
``method`` to fill the ``NaN`` values.
>>> df.reindex(new_index, fill_value=0).execute()
http_status response_time
Safari 404 0.07
Iceweasel 0 0.00
Comodo Dragon 0 0.00
IE10 404 0.08
Chrome 200 0.02
>>> df.reindex(new_index, fill_value='missing').execute()
http_status response_time
Safari 404 0.07
Iceweasel missing missing
Comodo Dragon missing missing
IE10 404 0.08
Chrome 200 0.02
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent']).execute()
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns").execute()
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = md.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = md.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2.execute()
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = md.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2).execute()
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
The index entries that did not have a value in the original data frame
(for example, '2009-12-29') are by default filled with ``NaN``.
If desired, we can fill in the missing values using one of several
options.
For example, to back-propagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.
>>> df2.reindex(date_index2, method='bfill').execute()
prices
2009-12-29 100.0
2009-12-30 100.0
2009-12-31 100.0
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
Please note that the ``NaN`` value present in the original dataframe
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
does not look at dataframe values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
in the original dataframe, use the ``fillna()`` method.
See the :ref:`user guide <basics.reindexing>` for more.
"""
axes = validate_axis_style_args(df_or_series, args, kwargs, "labels", "reindex")
# Pop these, since the values are in `kwargs` under different names
kwargs.pop("index", None)
if df_or_series.ndim > 1:
kwargs.pop("columns", None)
kwargs.pop("axis", None)
kwargs.pop("labels", None)
method = kwargs.pop("method", None)
level = kwargs.pop("level", None)
copy = kwargs.pop("copy", True)
limit = kwargs.pop("limit", None)
tolerance = kwargs.pop("tolerance", None)
fill_value = kwargs.pop("fill_value", None)
enable_sparse = kwargs.pop("enable_sparse", None)
if kwargs:
raise TypeError(
"reindex() got an unexpected keyword "
f'argument "{list(kwargs.keys())[0]}"'
)
if tolerance is not None: # pragma: no cover
raise NotImplementedError("`tolerance` is not supported yet")
if method == "nearest": # pragma: no cover
raise NotImplementedError("method=nearest is not supported yet")
index = axes.get("index")
index_freq = None
if isinstance(index, ENTITY_TYPE):
if isinstance(index, DataFrameIndexType):
index_freq = getattr(index.index_value.value, "freq", None)
if not isinstance(index, INDEX_TYPE):
index = astensor(index)
elif index is not None:
index = np.asarray(index)
index_freq = getattr(index, "freq", None)
columns = axes.get("columns")
if isinstance(columns, ENTITY_TYPE): # pragma: no cover
try:
columns = columns.fetch()
except ValueError:
raise NotImplementedError(
"`columns` need to be executed first if it's a Mars object"
)
elif columns is not None:
columns = np.asarray(columns)
if isinstance(fill_value, ENTITY_TYPE) and getattr(fill_value, "ndim", 0) != 0:
raise ValueError("fill_value must be a scalar")
op = DataFrameReindex(
index=index,
index_freq=index_freq,
columns=columns,
method=method,
level=level,
fill_value=fill_value,
limit=limit,
enable_sparse=enable_sparse,
)
ret = op(df_or_series)
if copy:
return ret.copy()
return ret
def reindex_like(
df_or_series, other, method=None, copy=True, limit=None, tolerance=None
):
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap.
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> import pandas as pd
>>> import mars.dataframe as md
>>> df1 = md.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit',
... 'windspeed'],
... index=md.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1.execute()
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31 87.8 high
2014-02-14 22 71.6 medium
2014-02-15 35 95 medium
>>> df2 = md.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2.execute()
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1).execute()
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
"""
cond = df_or_series.index_value.key == other.index_value.key
if df_or_series.ndim == 2:
cond &= df_or_series.columns_value.key == other.columns_value.key
if cond and not copy:
return df_or_series
kw = {
"index": other.index,
"method": method,
"limit": limit,
"tolerance": tolerance,
}
if df_or_series.ndim == 2:
kw["columns"] = other.dtypes.index
return reindex(df_or_series, **kw)
| 35.865705
| 88
| 0.558564
|
4a1a4b66b929e809ab9ffc70f4f62e22e1d277f5
| 825
|
py
|
Python
|
xlib/avecl/_internal/op/__init__.py
|
szywind/DeepFaceLive
|
60d867843c323a810352b1a8ac209912dec0acf2
|
[
"MIT"
] | 3
|
2021-12-08T08:59:50.000Z
|
2022-02-08T02:54:27.000Z
|
xlib/avecl/_internal/op/__init__.py
|
SandUhrGucker/DeepFaceLive
|
a897cbb06ee3511c63f10d9fbf2ccb66c3ee9659
|
[
"MIT"
] | 1
|
2022-02-08T01:29:03.000Z
|
2022-02-08T01:29:03.000Z
|
xlib/avecl/_internal/op/__init__.py
|
SandUhrGucker/DeepFaceLive
|
a897cbb06ee3511c63f10d9fbf2ccb66c3ee9659
|
[
"MIT"
] | 1
|
2021-12-14T09:18:15.000Z
|
2021-12-14T09:18:15.000Z
|
from .any_wise import add, any_wise, div, max_, min_, mul, sqrt, square, sub
from .binary_dilate_circle import binary_dilate_circle
from .binary_erode_circle import binary_erode_circle
from .binary_morph import binary_morph
from .cast import cast
from .concat import concat
from .depthwise_conv2D import depthwise_conv2D
from .gaussian_blur import gaussian_blur
from .matmul import matmul, matmulc
from .pad import pad
from .reduce import (moments, reduce_max, reduce_mean, reduce_min, reduce_std,
reduce_sum, reduce_variance)
from .remap import remap
from .remap_np_affine import remap_np_affine
from .reshape import reshape
from .slice_ import slice_
from .slice_set import slice_set
from .stack import stack
from .tile import tile
from .transpose import transpose
from .warp_affine import warp_affine
| 37.5
| 78
| 0.814545
|
4a1a4c612c49384f3ec250cc307f560e4b14dde2
| 13,227
|
py
|
Python
|
google/appengine/tools/devappserver2/inotify_file_watcher_test.py
|
theosp/google_appengine
|
9ce87a20684dc99cf5968e6f488c060e1530c159
|
[
"Apache-2.0"
] | 3
|
2019-01-28T03:57:20.000Z
|
2020-02-20T01:37:33.000Z
|
google/appengine/tools/devappserver2/inotify_file_watcher_test.py
|
theosp/google_appengine
|
9ce87a20684dc99cf5968e6f488c060e1530c159
|
[
"Apache-2.0"
] | null | null | null |
google/appengine/tools/devappserver2/inotify_file_watcher_test.py
|
theosp/google_appengine
|
9ce87a20684dc99cf5968e6f488c060e1530c159
|
[
"Apache-2.0"
] | 3
|
2019-01-18T11:33:56.000Z
|
2020-01-05T10:44:05.000Z
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for google.apphosting.tools.devappserver2.inotify_file_watcher."""
import logging
import os
import os.path
import shutil
import sys
import tempfile
import unittest
from google.appengine.tools.devappserver2 import inotify_file_watcher
@unittest.skipUnless(sys.platform.startswith('linux'), 'requires linux')
class TestInotifyFileWatcher(unittest.TestCase):
"""Tests for inotify_file_watcher.InotifyFileWatcher."""
def setUp(self):
self._directory = tempfile.mkdtemp() # The watched directory
self._junk_directory = tempfile.mkdtemp() # A scrap directory.
self._watcher = inotify_file_watcher.InotifyFileWatcher([self._directory])
logging.debug('watched directory=%r, junk directory=%r',
self._directory, self._junk_directory)
def tearDown(self):
self._watcher.quit()
shutil.rmtree(self._directory)
shutil.rmtree(self._junk_directory)
def _create_file(self, relative_path):
realpath = os.path.realpath(os.path.join(self._directory, relative_path))
with open(realpath, 'w'):
pass
return realpath
def _create_directory(self, relative_path):
realpath = os.path.realpath(os.path.join(self._directory, relative_path))
os.makedirs(realpath)
return realpath
def _create_directory_tree(self, path, num_directories):
"""Create exactly num_directories subdirectories in path."""
assert num_directories >= 0
if not num_directories:
return
self._create_directory(path)
num_directories -= 1
# Divide the remaining number of directories to create among 4
# subdirectories in an approximate even fashion.
for i in range(4, 0, -1):
sub_dir_size = num_directories/i
self._create_directory_tree(os.path.join(path, 'dir%d' % i), sub_dir_size)
num_directories -= sub_dir_size
def test_file_created(self):
self._watcher.start()
path = self._create_file('test')
self.assertEqual(
set([path]),
self._watcher._get_changed_paths())
def test_file_modified(self):
path = self._create_file('test')
self._watcher.start()
with open(path, 'w') as f:
f.write('testing')
self.assertEqual(
set([path]),
self._watcher._get_changed_paths())
def test_file_read(self):
path = self._create_file('test')
with open(path, 'w') as f:
f.write('testing')
self._watcher.start()
with open(path, 'r') as f:
f.read()
# Reads should not trigger updates.
self.assertEqual(
set(),
self._watcher._get_changed_paths())
def test_file_deleted(self):
path = self._create_file('test')
self._watcher.start()
os.remove(path)
self.assertEqual(
set([path]),
self._watcher._get_changed_paths())
def test_file_renamed(self):
source = self._create_file('test')
target = os.path.join(os.path.dirname(source), 'test2')
self._watcher.start()
os.rename(source, target)
self.assertEqual(
set([source, target]),
self._watcher._get_changed_paths())
def test_create_directory(self):
self._watcher.start()
directory = self._create_directory('test')
self.assertEqual(
set([directory]),
self._watcher._get_changed_paths())
def test_file_created_in_directory(self):
directory = self._create_directory('test')
self._watcher.start()
path = self._create_file('test/file')
self.assertEqual(
set([path]),
self._watcher._get_changed_paths())
def test_move_directory(self):
source = self._create_directory('test')
target = os.path.join(os.path.dirname(source), 'test2')
self._watcher.start()
os.rename(source, target)
self.assertEqual(
set([source, target]),
self._watcher._get_changed_paths())
def test_move_directory_out_of_watched(self):
source = self._create_directory('test')
target = os.path.join(self._junk_directory, 'test')
self._watcher.start()
os.rename(source, target)
self.assertEqual(
set([source]),
self._watcher._get_changed_paths())
with open(os.path.join(target, 'file'), 'w'):
pass
# Changes to files in subdirectories that have been moved should be ignored.
self.assertEqual(
set([]),
self._watcher._get_changed_paths())
def test_move_directory_into_watched(self):
source = os.path.join(self._junk_directory, 'source')
target = os.path.join(self._directory, 'target')
os.mkdir(source)
self._watcher.start()
os.rename(source, target)
self.assertEqual(
set([target]),
self._watcher._get_changed_paths())
file_path = os.path.join(target, 'file')
with open(file_path, 'w+'):
pass
self.assertEqual(
set([file_path]),
self._watcher._get_changed_paths())
def test_directory_deleted(self):
path = self._create_directory('test')
self._watcher.start()
os.rmdir(path)
self.assertEqual(
set([path]),
self._watcher._get_changed_paths())
def test_subdirectory_deleted(self):
"""Tests that internal _directory_to_subdirs is updated on delete."""
path = self._create_directory('test')
sub_path = self._create_directory('test/test2')
self._watcher.start()
self.assertEqual(
set([sub_path]),
self._watcher._directory_to_subdirs[path])
os.rmdir(sub_path)
self.assertEqual(
set([sub_path]),
self._watcher._get_changed_paths())
self.assertEqual(
set(),
self._watcher._directory_to_subdirs[path])
os.rmdir(path)
self.assertEqual(
set([path]),
self._watcher._get_changed_paths())
def test_symlink_directory(self):
sym_target = os.path.join(self._directory, 'test')
os.mkdir(os.path.join(self._junk_directory, 'subdir'))
self._watcher.start()
# Check that an added symlinked directory is reported.
os.symlink(self._junk_directory, sym_target)
self.assertEqual(
set([sym_target]),
self._watcher._get_changed_paths())
# Check that a file added to the symlinked directory is reported.
with open(os.path.join(self._junk_directory, 'file1'), 'w'):
pass
self.assertEqual(
set([os.path.join(self._directory, 'test', 'file1')]),
self._watcher._get_changed_paths())
# Check that modifying the file in the symlinked directory is reported.
with open(os.path.join(self._junk_directory, 'file1'), 'w') as fp:
fp.write('some data')
self.assertEqual(
set([os.path.join(self._directory, 'test', 'file1')]),
self._watcher._get_changed_paths())
# Check that a removed symlinked directory is reported.
os.remove(sym_target)
self.assertEqual(
set([sym_target]),
self._watcher._get_changed_paths())
# Check that a file added to the removed symlinked directory is *not*
# reported.
with open(os.path.join(self._junk_directory, 'subdir', 'file2'), 'w'):
pass
self.assertEqual(
set(),
self._watcher._get_changed_paths())
@unittest.skip('b/11896748')
def test_symlink_file(self):
actual_file = os.path.join(self._junk_directory, 'moo')
with open(actual_file, 'w'):
pass
symbolic_link = os.path.join(self._directory, 'moo')
self._watcher.start()
# Check that symlinking a file into watched directory is reported.
os.symlink(actual_file, symbolic_link)
self.assertEqual(
set([symbolic_link]),
self._watcher._get_changed_paths())
# Check that modifying the source file is reported.
with open(actual_file, 'w') as fp:
fp.write('some data')
self.assertEqual(
set([symbolic_link]),
self._watcher._get_changed_paths())
# Check that deleting the source file is reported.
os.unlink(actual_file)
self.assertEqual(
set([symbolic_link]),
self._watcher._get_changed_paths())
def test_many_directories(self):
# Linux supports a limited number of watches per file descriptor. The
# default is 8192 (i.e. 2^13).
self._create_directory_tree('bigdir', num_directories=10000)
self._watcher.start()
path = self._create_file('bigdir/dir4/dir4/file')
self.assertEqual(
set([path]),
self._watcher._get_changed_paths())
def test_internal_symlinks_relative(self):
dir_a_b = self._create_directory('a/b')
dir_p = self._create_directory('p')
os.symlink('../../p', os.path.join(dir_a_b, 'p'))
self._create_directory('p/q/r')
self._watcher.start()
shutil.rmtree(dir_p)
self._watcher._get_changed_paths()
# TODO: validate the value returned from _get_changed_paths once
# a solution is designed.
def test_internal_symlinks_absolute(self):
dir_a_b = self._create_directory('a/b')
dir_p = self._create_directory('p')
os.symlink(dir_p, os.path.join(dir_a_b, 'p'))
self._create_directory('p/q/r')
self._watcher.start()
shutil.rmtree(dir_p)
self._watcher._get_changed_paths()
# TODO: validate the value returned from _get_changed_paths once
# a solution is designed.
@unittest.skip('b/14583335')
def test_multiple_symlinks_same_directory(self):
# Create a file inside the junk directory (the important point is it's
# outside the watched directory).
junk_file = os.path.join(self._junk_directory, 'file')
with open(junk_file, 'w'):
pass
# Add a symlink from the watched directory to the junk directory. This
# causes the file inside the junk directory to be watched.
symlink_junkdir_1 = os.path.join(self._directory, 'junk1')
os.symlink(self._junk_directory, symlink_junkdir_1)
watched_junk_file = os.path.join(symlink_junkdir_1, 'file')
self._watcher.start()
# Make sure changes to the file are reported via the symlinked directory.
with open(junk_file, 'w') as f:
f.write('change1')
self.assertEqual(
set([watched_junk_file]),
self._watcher._get_changed_paths())
# Temporarily create a second symlink to the junk directory. We don't
# care about changed paths are reported, we just need to make sure the
# inotify internals are updated both when the second symlink is added and
# when it is removed.
symlink_junkdir_2 = os.path.join(self._directory, 'junk2')
os.symlink(self._junk_directory, symlink_junkdir_2)
self._watcher._get_changed_paths()
os.unlink(symlink_junkdir_2)
self._watcher._get_changed_paths()
# And make sure changes to the file are still reported.
with open(junk_file, 'w') as f:
f.write('change2')
self.assertEqual(
set([watched_junk_file]),
self._watcher._get_changed_paths())
@unittest.skipUnless(sys.platform.startswith('linux'), 'requires linux')
class TestInotifyFileWatcherMultipleDirectories(unittest.TestCase):
"""Tests for inotify_file_watcher.InotifyFileWatcher."""
def setUp(self):
self._directories = [tempfile.mkdtemp() for _ in range(4)]
self._watcher = inotify_file_watcher.InotifyFileWatcher(self._directories)
self._watcher.start()
def tearDown(self):
self._watcher.quit()
for directory in self._directories:
shutil.rmtree(directory)
@staticmethod
def _create_file(*paths):
realpath = os.path.realpath(os.path.join(*paths))
with open(realpath, 'w'):
pass
return realpath
def testInDir0(self):
path = self._create_file(self._directories[0], 'moo')
self.assertEqual(
set([path]),
self._watcher._get_changed_paths())
def testInDir2(self):
path = self._create_file(self._directories[2], 'moo')
self.assertEqual(
set([path]),
self._watcher._get_changed_paths())
def testInDir1And3(self):
path1 = self._create_file(self._directories[1], 'moo')
path3 = self._create_file(self._directories[3], 'moo')
self.assertEqual(
set([path1, path3]),
self._watcher._get_changed_paths())
class TestBitStr(unittest.TestCase):
_MASK_NAMES = {
0x1: 'one',
0x2: 'two',
0x8: 'eight',
}
def testSingleBit(self):
self.assertEquals(
'one (0x1)',
inotify_file_watcher._bit_str(0x1, self._MASK_NAMES))
def testMultipleBits(self):
self.assertEquals(
'one|two|eight (0xb)',
inotify_file_watcher._bit_str(0x1 | 0x2 | 0x8, self._MASK_NAMES))
def testExtraBits(self):
self.assertEquals(
'one|two|(0x4)|eight|(0x10) (0x1f)',
inotify_file_watcher._bit_str(0x1 | 0x2 | 0x4 | 0x8 | 0x10,
self._MASK_NAMES))
if __name__ == '__main__':
unittest.main()
| 32.419118
| 80
| 0.683451
|
4a1a4db7f214ac566d070c1e9d3ba85565522ff1
| 10,736
|
py
|
Python
|
src/mp_api/routes/materials/query_operators.py
|
materialsproject/api
|
e32114e83bc1c54b392723a1951f749acea6b9c3
|
[
"BSD-3-Clause-LBNL"
] | 18
|
2021-04-07T14:22:15.000Z
|
2022-03-29T13:21:20.000Z
|
src/mp_api/routes/materials/query_operators.py
|
materialsproject/api
|
e32114e83bc1c54b392723a1951f749acea6b9c3
|
[
"BSD-3-Clause-LBNL"
] | 411
|
2020-06-01T19:23:15.000Z
|
2022-03-31T19:15:25.000Z
|
src/mp_api/routes/materials/query_operators.py
|
materialsproject/api
|
e32114e83bc1c54b392723a1951f749acea6b9c3
|
[
"BSD-3-Clause-LBNL"
] | 7
|
2020-01-29T22:19:59.000Z
|
2021-11-18T16:48:45.000Z
|
from itertools import permutations
from typing import Optional
from emmet.core.symmetry import CrystalSystem
from fastapi import Body, HTTPException, Query
from maggma.api.query_operator import QueryOperator
from maggma.api.utils import STORE_PARAMS
from mp_api.routes.materials.utils import formula_to_criteria, chemsys_to_criteria
from pymatgen.analysis.structure_matcher import ElementComparator, StructureMatcher
from pymatgen.core.composition import Composition, CompositionError
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure
class FormulaQuery(QueryOperator):
"""
Factory method to generate a dependency for querying by
formula or chemical system with wild cards.
"""
def query(
self,
formula: Optional[str] = Query(
None,
description="Query by formula including anonymized formula or by including wild cards",
),
) -> STORE_PARAMS:
crit = {}
if formula:
crit.update(formula_to_criteria(formula))
return {"criteria": crit}
def ensure_indexes(self): # pragma: no cover
keys = ["formula_pretty", "formula_anonymous", "composition_reduced"]
return [(key, False) for key in keys]
class ChemsysQuery(QueryOperator):
"""
Factory method to generate a dependency for querying by
chemical system with wild cards.
"""
def query(
self,
chemsys: Optional[str] = Query(
None, description="Query by chemsys including wild cards",
),
) -> STORE_PARAMS:
crit = {}
if chemsys:
crit.update(chemsys_to_criteria(chemsys))
return {"criteria": crit}
def ensure_indexes(self): # pragma: no cover
keys = ["chemsys", "elements", "nelements"]
return [(key, False) for key in keys]
class ElementsQuery(QueryOperator):
"""
Factory method to generate a dependency for querying by element data
"""
def query(
self,
elements: Optional[str] = Query(
None,
description="Query by elements in the material composition as a comma-separated list",
),
exclude_elements: Optional[str] = Query(
None,
description="Query by excluded elements in the material composition as a comma-separated list",
),
) -> STORE_PARAMS:
crit = {} # type: dict
if elements or exclude_elements:
crit["elements"] = {}
if elements:
element_list = [Element(e) for e in elements.strip().split(",")]
crit["elements"]["$all"] = [str(el) for el in element_list]
if exclude_elements:
element_list = [Element(e) for e in exclude_elements.strip().split(",")]
crit["elements"]["$nin"] = [str(el) for el in element_list]
return {"criteria": crit}
def ensure_indexes(self): # pragma: no cover
return [("elements", False)]
class DeprecationQuery(QueryOperator):
"""
Method to generate a deprecation state query
"""
def query(
self,
deprecated: Optional[bool] = Query(
False, description="Whether the material is marked as deprecated",
),
) -> STORE_PARAMS:
crit = {}
if deprecated is not None:
crit.update({"deprecated": deprecated})
return {"criteria": crit}
class SymmetryQuery(QueryOperator):
"""
Method to generate a query on symmetry information
"""
def query(
self,
crystal_system: Optional[CrystalSystem] = Query(
None, description="Crystal system of the material",
),
spacegroup_number: Optional[int] = Query(
None, description="Space group number of the material",
),
spacegroup_symbol: Optional[str] = Query(
None, description="Space group symbol of the material",
),
) -> STORE_PARAMS:
crit = {} # type: dict
if crystal_system:
crit.update({"symmetry.crystal_system": str(crystal_system.value)})
if spacegroup_number:
crit.update({"symmetry.number": spacegroup_number})
if spacegroup_symbol:
crit.update({"symmetry.symbol": spacegroup_symbol})
return {"criteria": crit}
def ensure_indexes(self): # pragma: no cover
keys = ["symmetry.crystal_system", "symmetry.number", "symmetry.symbol"]
return [(key, False) for key in keys]
class MultiTaskIDQuery(QueryOperator):
"""
Method to generate a query for different task_ids
"""
def query(
self,
task_ids: Optional[str] = Query(
None, description="Comma-separated list of task_ids to query on"
),
) -> STORE_PARAMS:
crit = {}
if task_ids:
crit.update(
{
"task_ids": {
"$in": [task_id.strip() for task_id in task_ids.split(",")]
}
}
)
return {"criteria": crit}
def ensure_indexes(self): # pragma: no cover
return [("task_ids", False)]
class MultiMaterialIDQuery(QueryOperator):
"""
Method to generate a query for different root-level material_id values
"""
def query(
self,
material_ids: Optional[str] = Query(
None, description="Comma-separated list of material_id values to query on"
),
) -> STORE_PARAMS:
crit = {}
if material_ids:
crit.update(
{
"material_id": {
"$in": [
material_id.strip()
for material_id in material_ids.split(",")
]
}
}
)
return {"criteria": crit}
class FindStructureQuery(QueryOperator):
"""
Method to generate a find structure query
"""
def query(
self,
structure: Structure = Body(
..., description="Pymatgen structure object to query with",
),
ltol: float = Query(
0.2, description="Fractional length tolerance. Default is 0.2.",
),
stol: float = Query(
0.3,
description="Site tolerance. Defined as the fraction of the average free \
length per atom := ( V / Nsites ) ** (1/3). Default is 0.3.",
),
angle_tol: float = Query(
5, description="Angle tolerance in degrees. Default is 5 degrees.",
),
limit: int = Query(
1,
description="Maximum number of matches to show. Defaults to 1, only showing the best match.",
),
) -> STORE_PARAMS:
self.ltol = ltol
self.stol = stol
self.angle_tol = angle_tol
self.limit = limit
self.structure = structure
crit = {}
try:
s = Structure.from_dict(structure)
except Exception:
raise HTTPException(
status_code=404,
detail="Body cannot be converted to a pymatgen structure object.",
)
crit.update({"composition_reduced": dict(s.composition.to_reduced_dict)})
return {"criteria": crit}
def post_process(self, docs):
s1 = Structure.from_dict(self.structure)
m = StructureMatcher(
ltol=self.ltol,
stol=self.stol,
angle_tol=self.angle_tol,
primitive_cell=True,
scale=True,
attempt_supercell=False,
comparator=ElementComparator(),
)
matches = []
for doc in docs:
s2 = Structure.from_dict(doc["structure"])
matched = m.fit(s1, s2)
if matched:
rms = m.get_rms_dist(s1, s2)
matches.append(
{
"material_id": doc["material_id"],
"normalized_rms_displacement": rms[0],
"max_distance_paired_sites": rms[1],
}
)
response = sorted(
matches[: self.limit],
key=lambda x: (
x["normalized_rms_displacement"],
x["max_distance_paired_sites"],
),
)
return response
def ensure_indexes(self): # pragma: no cover
return [("composition_reduced", False)]
class FormulaAutoCompleteQuery(QueryOperator):
"""
Method to generate a formula autocomplete query
"""
def query(
self,
formula: str = Query(..., description="Human readable chemical formula.",),
limit: int = Query(
10, description="Maximum number of matches to show. Defaults to 10.",
),
) -> STORE_PARAMS:
self.formula = formula
self.limit = limit
try:
comp = Composition(formula)
except CompositionError:
raise HTTPException(
status_code=400, detail="Invalid formula provided.",
)
ind_str = []
eles = []
if len(comp) == 1:
d = comp.get_integer_formula_and_factor()
s = d[0] + str(int(d[1])) if d[1] != 1 else d[0]
ind_str.append(s)
eles.append(d[0])
else:
comp_red = comp.reduced_composition.items()
for (i, j) in comp_red:
if j != 1:
ind_str.append(i.name + str(int(j)))
else:
ind_str.append(i.name)
eles.append(i.name)
final_terms = ["".join(entry) for entry in permutations(ind_str)]
pipeline = [
{
"$search": {
"index": "formula_autocomplete",
"text": {"path": "formula_pretty", "query": final_terms},
}
},
{
"$project": {
"_id": 0,
"formula_pretty": 1,
"elements": 1,
"length": {"$strLenCP": "$formula_pretty"},
}
},
{
"$match": {
"length": {"$gte": len(final_terms[0])},
"elements": {"$all": eles},
}
},
{"$limit": limit},
{"$sort": {"length": 1}},
{"$project": {"elements": 0, "length": 0}},
]
return {"pipeline": pipeline}
def ensure_indexes(self): # pragma: no cover
return [("formula_pretty", False)]
| 27.741602
| 107
| 0.538562
|
4a1a4f15547afa589fe8d95c6bc51a6c342f2f1d
| 1,979
|
py
|
Python
|
jeopardy_backend/jeopardy/serializers.py
|
daniel-weisdorf/jeopardy
|
509e7fe23276c7311492e23690e9b9ee9c7083ff
|
[
"Apache-2.0"
] | null | null | null |
jeopardy_backend/jeopardy/serializers.py
|
daniel-weisdorf/jeopardy
|
509e7fe23276c7311492e23690e9b9ee9c7083ff
|
[
"Apache-2.0"
] | null | null | null |
jeopardy_backend/jeopardy/serializers.py
|
daniel-weisdorf/jeopardy
|
509e7fe23276c7311492e23690e9b9ee9c7083ff
|
[
"Apache-2.0"
] | 1
|
2021-04-04T04:56:34.000Z
|
2021-04-04T04:56:34.000Z
|
from rest_framework import serializers
from jeopardy.models import Game, Team, Player, Category, Question, Host
class QuestionSerializer(serializers.ModelSerializer):
category_name = serializers.SerializerMethodField()
def get_category_name(self, obj):
return obj.category.name
class Meta:
model = Question
exclude = [
'category',
'question',
'answer',
'is_daily_double',
'is_picture_question'
]
class QuestionDetailSerializer(serializers.ModelSerializer):
category_name = serializers.SerializerMethodField()
def get_category_name(self, obj):
return obj.category.name
class Meta:
model = Question
exclude = ['category', 'answer']
class HostSerializer(serializers.ModelSerializer):
class Meta:
model = Host
exclude = ['game']
class CategorySerializer(serializers.ModelSerializer):
questions = QuestionSerializer(many=True)
class Meta:
model = Category
exclude = ['game']
class PlayerSerializer(serializers.ModelSerializer):
class Meta:
model = Player
fields = '__all__'
class TeamSerializer(serializers.ModelSerializer):
players = PlayerSerializer(many=True)
class Meta:
model = Team
exclude = ['game']
class GameSerializer(serializers.ModelSerializer):
categories = CategorySerializer(many=True)
teams = TeamSerializer(many=True)
host = HostSerializer()
player_answering = serializers.SerializerMethodField()
picking_team_id = serializers.IntegerField()
selected_question = serializers.SerializerMethodField()
def get_selected_question(self, obj):
selected_question = obj.selected_question()
if selected_question:
if obj.show_full_question:
return QuestionDetailSerializer(selected_question).data
else:
return QuestionSerializer(selected_question).data
return None
def get_player_answering(self, obj):
player_answering = obj.player_answering()
if player_answering:
return PlayerSerializer(player_answering).data
return None
class Meta:
model = Game
fields = '__all__'
| 26.039474
| 72
| 0.775139
|
4a1a50e73542a011948d4e2ea0bc94ccff8c3d3f
| 1,521
|
py
|
Python
|
pyservice/organizer.py
|
adomokos/pyservice
|
e79b4aeacec84bea31b1f3492cdef7bec47d9f1a
|
[
"MIT"
] | 1
|
2021-01-06T01:50:44.000Z
|
2021-01-06T01:50:44.000Z
|
pyservice/organizer.py
|
adomokos/pyservice
|
e79b4aeacec84bea31b1f3492cdef7bec47d9f1a
|
[
"MIT"
] | null | null | null |
pyservice/organizer.py
|
adomokos/pyservice
|
e79b4aeacec84bea31b1f3492cdef7bec47d9f1a
|
[
"MIT"
] | null | null | null |
import functools
from pyservice.action import Action
from pyservice.context import Context
from itertools import takewhile
from typing import List
class Organizer:
class ContextFailed(Exception):
def __init__(self, action: Action):
self.action = action
def __init__(self, actions: List[Action]):
self.actions = actions
@staticmethod
def _call_action(f: Action, ctx: Context) -> Context:
ctx = f(ctx)
if ctx.is_failure:
raise Organizer.ContextFailed(f)
return ctx
def run(self, ctx: Context) -> Context:
try:
return functools.reduce(
lambda _ctx, f: self._call_action(f, _ctx), self.actions, ctx
)
except Organizer.ContextFailed as e:
# roll back the actions in reverse order
actions_to_roll_back = self._find_actions_to_roll_back(
e.action, self.actions
)
result = functools.reduce(self._execute_rollback, actions_to_roll_back, ctx)
return ctx if result is None else result
@staticmethod
def _execute_rollback(ctx: Context, action: Action):
return action(ctx)
@staticmethod
def _find_actions_to_roll_back(
action: Action, actions: List[Action]
) -> List[Action]:
actions_to_roll_back = [
*takewhile(lambda a, x=action: a != x, actions) # type: ignore
]
actions_to_roll_back.reverse()
return actions_to_roll_back
| 28.166667
| 88
| 0.631821
|
4a1a50ef7fa6e0969b024477cfe0f9d016d7f48c
| 817
|
py
|
Python
|
01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Edno/065.py
|
moacirsouza/nadas
|
ad98d73b4281d1581fd2b2a9d29001acb426ee56
|
[
"MIT"
] | 1
|
2020-07-03T13:54:18.000Z
|
2020-07-03T13:54:18.000Z
|
01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Edno/065.py
|
moacirsouza/nadas
|
ad98d73b4281d1581fd2b2a9d29001acb426ee56
|
[
"MIT"
] | null | null | null |
01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/02-Aluno/Edno/065.py
|
moacirsouza/nadas
|
ad98d73b4281d1581fd2b2a9d29001acb426ee56
|
[
"MIT"
] | null | null | null |
# (01-Gabarito/065.py)) Crie um programa que leia vários números inteiros pelo teclado.
# No final da execução mostre a média entre todos os valores e qual foi o maior e o menor valores lidos.
# O programa deve perguntar ao usuário se ele quer ou não continuar a digitar valores.
continuar = 1
maior = 0
menor = 999999999
cont = 0
soma = 0
while continuar == 1:
numero = int(input('Digite qualquer número inteiro\t: '))
if numero > maior:
maior = numero
if numero < menor:
menor = numero
soma = soma + numero
cont += 1
print('')
continuar = int(input('Continuar a digitar? [1] - SIM. Para não, digite qualquer outro número.'))
print('O maior número é {}, o menor número é {}. {} números foram digitados e a média é {}'.format(maior, menor, cont, round(soma/cont,2)))
| 31.423077
| 139
| 0.674419
|
4a1a51dd7800f0928e20f4725e4d3055584887a8
| 1,117
|
py
|
Python
|
ajaximage/image.py
|
trailhawks/django-ajaximage
|
ab4ceca0bed93216d317d5576be9ce41c45e657b
|
[
"MIT"
] | null | null | null |
ajaximage/image.py
|
trailhawks/django-ajaximage
|
ab4ceca0bed93216d317d5576be9ce41c45e657b
|
[
"MIT"
] | null | null | null |
ajaximage/image.py
|
trailhawks/django-ajaximage
|
ab4ceca0bed93216d317d5576be9ce41c45e657b
|
[
"MIT"
] | null | null | null |
import os
from PIL import Image, ImageOps
try:
from StringIO import StringIO as IO
except ImportError:
from io import BytesIO as IO
from django.core.files.base import ContentFile
from django.core.files.uploadedfile import SimpleUploadedFile
def resize(file_, max_width=0, max_height=0, crop=0):
max_width = int(max_width)
max_height = int(max_height)
crop = int(crop)
if max_width is 0 and max_height is 0:
return file_
max_width = 9999 if max_width is 0 else max_width
max_height = 9999 if max_height is 0 else max_height
size = (max_width, max_height)
image = Image.open(file_)
if image.mode == "RGBA":
image.load()
background = Image.new("RGB", image.size, (255, 255, 255))
background.paste(image, mask=image.split()[3])
image = background
temp = IO()
if crop is 1:
image = ImageOps.fit(image, size, Image.ANTIALIAS)
else:
image.thumbnail(size, Image.ANTIALIAS)
image.save(temp, "jpeg")
temp.seek(0)
return SimpleUploadedFile(file_.name, temp.read(), content_type="image/jpeg")
| 25.386364
| 81
| 0.675022
|
4a1a5205e2ba73b5fa86b9a31cb6be78d3a2fa3c
| 1,664
|
py
|
Python
|
models_all_solvable2/st_test4.py
|
grossmann-group/pyomo-MINLP-benchmarking
|
714f0a0dffd61675649a805683c0627af6b4929e
|
[
"MIT"
] | 7
|
2019-05-08T19:14:34.000Z
|
2021-12-24T00:00:40.000Z
|
models_all_solvable2/st_test4.py
|
grossmann-group/pyomo-MINLP-benchmarking
|
714f0a0dffd61675649a805683c0627af6b4929e
|
[
"MIT"
] | null | null | null |
models_all_solvable2/st_test4.py
|
grossmann-group/pyomo-MINLP-benchmarking
|
714f0a0dffd61675649a805683c0627af6b4929e
|
[
"MIT"
] | 2
|
2020-05-21T22:15:51.000Z
|
2020-06-02T23:02:08.000Z
|
# MINLP written by GAMS Convert at 05/15/20 00:51:23
#
# Equation counts
# Total E G L N X C B
# 6 1 0 5 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 7 1 0 6 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 37 35 2 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.i1 = Var(within=Integers,bounds=(0,1E15),initialize=0)
m.i2 = Var(within=Integers,bounds=(0,1E15),initialize=0)
m.i3 = Var(within=Integers,bounds=(0,1E15),initialize=0)
m.i4 = Var(within=Integers,bounds=(0,1),initialize=0)
m.i5 = Var(within=Integers,bounds=(0,1),initialize=0)
m.i6 = Var(within=Integers,bounds=(0,2),initialize=0)
m.obj = Objective(expr=0.5*m.i1*m.i1 + 6.5*m.i1 + 7*m.i6*m.i6 - m.i6 - m.i2 - 2*m.i3 + 3*m.i4 - 2*m.i5, sense=minimize)
m.c1 = Constraint(expr= m.i1 + 2*m.i2 + 8*m.i3 + m.i4 + 3*m.i5 + 5*m.i6 <= 16)
m.c2 = Constraint(expr= - 8*m.i1 - 4*m.i2 - 2*m.i3 + 2*m.i4 + 4*m.i5 - m.i6 <= -1)
m.c3 = Constraint(expr= 2*m.i1 + 0.5*m.i2 + 0.2*m.i3 - 3*m.i4 - m.i5 - 4*m.i6 <= 24)
m.c4 = Constraint(expr= 0.2*m.i1 + 2*m.i2 + 0.1*m.i3 - 4*m.i4 + 2*m.i5 + 2*m.i6 <= 12)
m.c5 = Constraint(expr= - 0.1*m.i1 - 0.5*m.i2 + 2*m.i3 + 5*m.i4 - 5*m.i5 + 3*m.i6 <= 3)
| 38.697674
| 119
| 0.481971
|
4a1a52dfdca2cc530a3d5ef2f4ce476dd6906106
| 4,597
|
py
|
Python
|
src/jindex.py
|
cerad95/job-scraper
|
3841c34d0ead57089fce192bd285faccfcf8f5da
|
[
"MIT"
] | null | null | null |
src/jindex.py
|
cerad95/job-scraper
|
3841c34d0ead57089fce192bd285faccfcf8f5da
|
[
"MIT"
] | null | null | null |
src/jindex.py
|
cerad95/job-scraper
|
3841c34d0ead57089fce192bd285faccfcf8f5da
|
[
"MIT"
] | null | null | null |
import asyncio
import re
from datetime import datetime
import aiohttp
import bs4
from tqdm import tqdm
from job import Job
from website import Website
async def fetch(session, url):
async with session.get(url) as response:
return await response.read()
class jindex(Website):
async def fetch_all_urls(self, weburls):
tasks = []
async with aiohttp.ClientSession() as session:
for url in weburls:
tasks.append(asyncio.tasks.create_task((fetch(session, url))))
self.html_pages = [await f for f in tqdm(asyncio.as_completed(tasks), total=len(tasks), desc="Fetching:", bar_format=self.printstring)]
async def get_max_page(self):
async with aiohttp.ClientSession() as session:
task = asyncio.tasks.create_task(fetch(session, self.url))
page = await asyncio.gather(task)
for one in tqdm(page, desc="Fetching max page", bar_format=self.printstring):
strainer = bs4.SoupStrainer("div", {'class': ['jix_pagination_pages']})
soup = bs4.BeautifulSoup(one.decode('utf-8'), "lxml", parse_only=strainer)
pages = soup.find("div", {'class': ['jix_pagination_pages']})
hrefs = []
for element in pages:
if 'href' in str(element):
try:
hrefs.append(int(element.contents[0]))
except Exception as e:
pass
self.max_page = max(hrefs) + 1
def generate_urls_for_pages(self):
urls = []
loop = asyncio.get_event_loop()
loop.run_until_complete(self.get_max_page())
for i in tqdm(range(1, self.max_page), desc="Generating URLS", bar_format=self.printstring):
urls.append(self.url + "&page={}".format(i))
return urls
def jobs_unique_and_sorted(self):
self.jobs = list(set(self.jobs))
self.jobs.sort(key=lambda x: (x.location, x.title), reverse=False)
def scrape_all_pages(self):
for job_page in tqdm(self.html_pages, desc="Scraping:", bar_format=self.printstring):
strainer = bs4.SoupStrainer("div", {'class': ['PaidJob']})
soup = bs4.BeautifulSoup(job_page.decode('utf-8'), 'lxml', parse_only=strainer)
self.scrape_page(soup)
def scrape_page(self, job_page):
paidjobs = job_page.find_all("div", {'class': ['PaidJob']})
for paidjob in paidjobs:
title_div = paidjob.find("b")
title = str(title_div.contents[0])
publishdate = datetime.strptime(
str(paidjob.find("li", {'class': ['toolbar-pubdate']}).contents[1].attrs['datetime']), '%Y-%m-%d')
joblink = str(paidjob.find("b").parent.attrs['href']).replace("'", "")
paidjob.find("div", {'class': ['jix_toolbar', 'jix_appetizer_toolbar']}).decompose()
paragraphs = paidjob.find_all(["p", 'li'], recursive=True)
for paragraph in paragraphs:
if str(paragraph) == "<p></p>" or "<p>\n<a":
del paragraph
if len(paragraphs[0].contents) != 0:
company = paragraphs[0].contents[1].contents[0].contents[0]
else:
company = paragraphs[1].contents[1].contents[0].contents[0]
description = []
for paragraph in paragraphs:
for content in paragraph.contents:
if type(content) == bs4.NavigableString:
description.append(str(content))
elif type(content) == bs4.Tag and len(content.contents) > 0:
description.append(str(content.contents[0]))
del (description[0:3])
descriptionstring = ""
for paragraph in description:
if paragraph == " ":
del paragraph
else:
descriptionstring += self.clean_string(paragraph)
if len(job_page.find("p").contents) > 2:
location = str(job_page.find("p").contents[2]).split(' ')[-1]
else:
location = "N/A"
newjob = Job(title=title, location=location, company=company, joblink=joblink, description=descriptionstring, publishdate=publishdate, urlname="jobindex")
self.clean_object(newjob)
if re.compile('|'.join(self.steder), re.IGNORECASE).search(location) and re.compile('|'.join(self.areas), re.IGNORECASE).search(descriptionstring):
self.jobs.append(newjob)
| 37.991736
| 166
| 0.580379
|
4a1a5369567c07c93f01e105e25e26119bbcd425
| 722
|
py
|
Python
|
async/async19fixed.py
|
showa-yojyo/bin
|
8ddd29b3c629634212b3708904cf615c42a6eaf5
|
[
"MIT"
] | 1
|
2017-04-27T19:58:41.000Z
|
2017-04-27T19:58:41.000Z
|
async/async19fixed.py
|
showa-yojyo/bin
|
8ddd29b3c629634212b3708904cf615c42a6eaf5
|
[
"MIT"
] | 8
|
2016-10-30T17:16:45.000Z
|
2018-05-15T15:01:45.000Z
|
async/async19fixed.py
|
showa-yojyo/bin
|
8ddd29b3c629634212b3708904cf615c42a6eaf5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""async19fixed.py: Chain coroutines (fixed)
Usage:
async19fixed.py
"""
import asyncio
async def create():
await asyncio.sleep(3.0)
print("(1) create file")
async def write():
await asyncio.sleep(1.0)
print("(2) write into file")
async def close():
print("(3) close file")
async def test(loop):
await create() # == await asyncio.ensure_future(create())
await write()
await close()
await asyncio.sleep(2.0)
loop.stop()
def main():
loop = asyncio.get_event_loop()
asyncio.ensure_future(test(loop))
loop.run_forever()
print(f"Pending tasks at exit: {asyncio.Task.all_tasks(loop)}")
loop.close()
if __name__ == '__main__':
main()
| 19.513514
| 67
| 0.648199
|
4a1a55696cb9baed4e136e83306b246d22a54cf5
| 4,483
|
py
|
Python
|
python/chicago_crime_server.py
|
thekingofkings/chicago-crime
|
30550697402aa3a5a074096a0032b0c1e1264313
|
[
"MIT"
] | 10
|
2016-11-08T04:31:06.000Z
|
2021-07-28T15:17:52.000Z
|
python/chicago_crime_server.py
|
thekingofkings/chicago-crime
|
30550697402aa3a5a074096a0032b0c1e1264313
|
[
"MIT"
] | 24
|
2016-04-19T15:07:52.000Z
|
2017-05-20T02:29:23.000Z
|
python/chicago_crime_server.py
|
thekingofkings/urban-flow-analysis
|
30550697402aa3a5a074096a0032b0c1e1264313
|
[
"MIT"
] | 5
|
2016-09-13T21:13:46.000Z
|
2019-12-04T11:40:02.000Z
|
from flask import Flask, request, jsonify, redirect, make_response
from flask import render_template, send_from_directory
from NBRegression import *
import os
here = os.path.dirname(os.path.abspath(__file__))
import glob
app = Flask(__name__)
app.debug = True
features = ['density', 'disadvantage', 'ethnic', 'pctblack', 'pctship',
'population', 'poverty', 'residential', 'sociallag', 'spatiallag',
'temporallag']
@app.route('/')
def input_parameter():
return render_template('nb-parameter-setting.html')
@app.route('/set-parameter', methods=['GET'])
def set_parameter():
a = request.args
features = list(a.keys())
crimeT = a.get('crimeT')
if crimeT == 'total':
crimeT = ['total']
elif crimeT == 'violent':
crimeT = ['HOMICIDE', 'CRIM SEXUAL ASSAULT', 'BATTERY', 'ROBBERY',
'ARSON', 'DOMESTIC VIOLENCE', 'ASSAULT']
flowT = int(a.get('flowT'))
iters = int(a.get('iters'))
year = int(a.get('year'))
features.remove('crimeT')
features.remove('flowT')
features.remove('iters')
features.remove('year')
logF = []
for k in a.keys():
if a.get(k) == 'log':
logF.append(k)
elif a.get(k) == 'none':
features.remove(k)
import sys
import time
import os
fname = 'file-{0}'.format(time.strftime('%m-%d-%y-%H-%M-%S'))
sys.stdout = open(os.path.join(here, 'templates', fname), 'w')
# every print is redirected to the file
print 'Selected features', features
print 'Features take log', logF
print 'Year', year
print 'Flow type:', flowT, '(0 - total, 4 - low income)'
print 'crime type', crimeT
print 'number of iterations', iters, '\n'
permutationTest_onChicagoCrimeData(year=year, features=features,
logFeatures=logF, crimeType=crimeT, flowType=flowT, iters=iters)
# print redirection ends
sys.stdout.close()
s = None
return redirect('result/' + fname)
@app.route('/history')
def list_previous_results():
s = glob.glob(here + '/templates/file*')
items = []
for f in s:
with open(f, 'r') as fin:
head = [next(fin) for x in range(6)]
fn = os.path.basename(f)
item = {'head': head, 'name': fn}
items.append(item)
return render_template('history.html', items=items)
@app.route('/result/<fname>')
def format_result(fname):
fn = here + '/templates/' + fname
with open(fn, 'r') as fin:
head = [fin.readline() for x in range(6)]
fin.readline()
key = fin.readline().strip()
rows = []
while (key != ''):
values = fin.readline().split(" ")
row = {'key': key, 'values': values}
rows.append(row)
key = fin.readline().strip()
return render_template('result.html', head=head, rows = rows)
@app.route('/nb-permute')
def nb_permute():
return render_template('nb-permute.html')
@app.route('/new-permute')
def new_permute():
a = request.args
year = int(a.get('year')) if a.get('year') != '' else '2010'
iters = a.get('iters') if a.get('iters') != '' else 10
lags = []
lags.append( "1" if "social-lag-crime" in a else "0" )
lags.append( "1" if "spatial-lag-crime" in a else "0" )
lags.append( "1" if "social-lag-disadv" in a else "0" )
lags.append( "1" if "spatial-lag-disadv" in a else "0" )
lagsFlag = "".join( lags )
ep = 'exposure' if 'exposure' in a else 'noexposure'
tl = 'templag' if 'templag' in a else 'notemplag'
sf = 'selfflow' if 'selfflow' in a else 'noselfflow'
print a
print lagsFlag, iters, ep, year
fname = "glmmadmb--totallehd-totalcrime-bysource-{0}-logpop-{1}-{2}-logpopdensty-{3}-{4}-.out".format(
ep, lagsFlag, iters, tl, sf)
coefficients_pvalue(lagsFlag, tempflag=tl, selfflow=sf, itersN=iters, exposure=ep, year=year)
return redirect('download/' + fname)
@app.route('/download/<fname>')
def download_result(fname):
fn = here + '/../R/'
response = make_response(send_from_directory(fn, fname))
response.cache_control.max_age = 0
return response
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| 29.11039
| 107
| 0.571269
|
4a1a5573ecade10bf79ac29c3286c599a5b3109d
| 12,682
|
py
|
Python
|
src/lms-ds-loader/tests/test_loader_facade.py
|
Ed-Fi-Alliance-OSS/LMS-Toolkit
|
22eb5e77465c7d0f179ebf192f6c8560fa396418
|
[
"Apache-2.0"
] | 3
|
2021-06-10T16:27:18.000Z
|
2022-01-25T18:41:35.000Z
|
src/lms-ds-loader/tests/test_loader_facade.py
|
Ed-Fi-Alliance-OSS/LMS-Toolkit
|
22eb5e77465c7d0f179ebf192f6c8560fa396418
|
[
"Apache-2.0"
] | 30
|
2021-03-19T17:50:11.000Z
|
2022-02-01T21:37:56.000Z
|
src/lms-ds-loader/tests/test_loader_facade.py
|
Ed-Fi-Exchange-OSS/LMS-Toolkit
|
22eb5e77465c7d0f179ebf192f6c8560fa396418
|
[
"Apache-2.0"
] | 10
|
2021-06-10T16:27:27.000Z
|
2021-12-27T12:31:57.000Z
|
# SPDX-License-Identifier: Apache-2.0
# Licensed to the Ed-Fi Alliance under one or more agreements.
# The Ed-Fi Alliance licenses this file to you under the Apache License, Version 2.0.
# See the LICENSE and NOTICES files in the project root for more information.
from typing import Dict, Tuple
import pandas as pd
import pytest
from unittest.mock import MagicMock, Mock
from edfi_lms_ds_loader import migrator
from edfi_lms_ds_loader.helpers.argparser import MainArguments
from edfi_lms_ds_loader.loader_facade import run_loader
from edfi_lms_ds_loader.helpers.constants import DbEngine
def describe_when_uploading_extractor_files() -> None:
def describe_given_no_errors_occur() -> None:
@pytest.fixture
def fixture(mocker) -> Tuple[Dict[str, MagicMock], Dict[str, pd.DataFrame]]:
# Arrange
args_mock = MagicMock(spec=MainArguments)
args_mock.engine = DbEngine.MSSQL
db_engine_mock = MagicMock()
args_mock.get_adapter.return_value = db_engine_mock
args_mock.csv_path = "/some/path"
db_adapter_mock = Mock()
db_adapter_mock.get_processed_files = Mock(
return_value=set(["FullPathOne"])
)
db_adapter_mock.engine = DbEngine.MSSQL
args_mock.get_db_operations_adapter.return_value = db_adapter_mock
migrator_mock = MagicMock(spec=migrator.migrate)
migrator_mock.engine = DbEngine.MSSQL
mocker.patch("edfi_lms_ds_loader.migrator.migrate", migrator_mock)
fake_df_users = pd.DataFrame({"generic_df": [1, 2, 3]})
mocker.patch(
"edfi_lms_file_utils.file_reader.read_users_file",
return_value=fake_df_users,
)
fake_df_sections = pd.DataFrame([{"SourceSystemIdentifier": "a"}])
mocker.patch(
"edfi_lms_file_utils.file_reader.read_sections_file",
return_value=fake_df_sections,
)
mocker.patch(
"edfi_lms_ds_loader.loader_facade._get_sections_df",
return_value=fake_df_sections,
)
fake_df_assignments = pd.DataFrame(
[
{"LMSSectionSourceSystemIdentifier": "a"},
{"SourceSystemIdentifier": "b"},
]
)
mocker.patch(
"edfi_lms_ds_loader.loader_facade._get_assignments_df",
return_value=fake_df_assignments,
)
mocker.patch(
"edfi_lms_file_utils.file_reader.read_assignments_file",
return_value=fake_df_assignments,
)
fake_df_section_associations = pd.DataFrame([{"associations": "b"}])
mocker.patch(
"edfi_lms_file_utils.file_reader.read_section_associations_file",
return_value=fake_df_section_associations,
)
fake_df_section_activities = pd.DataFrame([{"activities": "b"}])
mocker.patch(
"edfi_lms_file_utils.file_reader.read_section_activities_file",
return_value=fake_df_section_activities,
)
fake_df_system_activities = pd.DataFrame([{"activities": "b"}])
mocker.patch(
"edfi_lms_file_utils.file_reader.read_system_activities_file",
return_value=fake_df_system_activities,
)
fake_df_assignment_submissions = pd.DataFrame([{"submissions": "b"}])
mocker.patch(
"edfi_lms_file_utils.file_reader.read_submissions_file",
return_value=fake_df_assignment_submissions,
)
fake_df_attendance_events = pd.DataFrame([{"date": "c"}])
mocker.patch(
"edfi_lms_file_utils.file_reader.read_attendance_events_file",
return_value=fake_df_attendance_events
)
mock_upload_file = mocker.patch("edfi_lms_ds_loader.df_to_db.upload_file")
mock_upload_assignments_file = mocker.patch(
"edfi_lms_ds_loader.df_to_db.upload_assignments"
)
mock_upload_section_associations_file = mocker.patch(
"edfi_lms_ds_loader.df_to_db.upload_section_associations"
)
mock_upload_section_activities_file = mocker.patch(
"edfi_lms_ds_loader.df_to_db.upload_section_activities"
)
mock_upload_system_activities_file = mocker.patch(
"edfi_lms_ds_loader.df_to_db.upload_system_activities"
)
mock_upload_assignment_submissions_file = mocker.patch(
"edfi_lms_ds_loader.df_to_db.upload_assignment_submissions"
)
mock_upload_attendance_events_file = mocker.patch(
"edfi_lms_ds_loader.df_to_db.upload_attendance_events"
)
mocks = {
"migrate": migrator_mock,
"get_db_operations_adapter": db_adapter_mock,
"get_adapter": db_engine_mock,
"upload_file": mock_upload_file,
"upload_assignments_file": mock_upload_assignments_file,
"upload_section_associations_file": mock_upload_section_associations_file,
"upload_section_activities_file": mock_upload_section_activities_file,
"upload_system_activities_file": mock_upload_system_activities_file,
"upload_assignment_submissions_file": mock_upload_assignment_submissions_file,
"upload_attendance_events_file": mock_upload_attendance_events_file,
}
dfs = {
"users": fake_df_users,
"sections": fake_df_sections,
"assignments": fake_df_assignments,
"section_associations": fake_df_section_associations,
"section_activities": fake_df_section_activities,
"system_activities": fake_df_system_activities,
"assignment_submissions": fake_df_assignment_submissions,
"attendance_events": fake_df_attendance_events
}
file_repository_users_mock = Mock(return_value=["fileOne", "fileTwo"])
mocker.patch(
"edfi_lms_file_utils.file_repository.get_sections_file_paths",
file_repository_users_mock,
)
file_repository_users_mock = Mock(return_value=["fileFour", "fileSix"])
mocker.patch(
"edfi_lms_file_utils.file_repository.get_users_file_paths",
file_repository_users_mock,
)
file_repository_assignments_mock = Mock(
return_value=["fileSeven", "fileEighth"]
)
mocker.patch(
"edfi_lms_file_utils.file_repository.get_assignments_file_paths",
file_repository_assignments_mock,
)
file_repository_section_associations_mock = Mock(
return_value=["fileNine", "fileTen"]
)
mocker.patch(
"edfi_lms_file_utils.file_repository.get_section_associations_file_paths",
file_repository_section_associations_mock,
)
file_repository_assignment_submissions_mock = Mock(
return_value=["fileEleven", "fileTwelve"]
)
mocker.patch(
"edfi_lms_file_utils.file_repository.get_submissions_file_paths",
file_repository_assignment_submissions_mock,
)
file_repository_section_activities_mock = Mock(
return_value=["file13", "file14"]
)
mocker.patch(
"edfi_lms_file_utils.file_repository.get_section_activities_file_paths",
file_repository_section_activities_mock,
)
file_repository_system_activities_mock = Mock(
return_value=["file15", "file16"]
)
mocker.patch(
"edfi_lms_file_utils.file_repository.get_system_activities_file_paths",
file_repository_system_activities_mock,
)
file_repository_attendance_mock = Mock(
return_value=["file17", "file18"]
)
mocker.patch(
"edfi_lms_file_utils.file_repository.get_attendance_events_paths",
file_repository_attendance_mock
)
# Act
run_loader(args_mock)
# Return the mock objects for examination
return (mocks, dfs)
def it_runs_migrations(mocker, fixture) -> None:
mocks, _ = fixture
mocks["migrate"].assert_called_once_with(mocks["get_adapter"], 'mssql')
def it_uploads_users(mocker, fixture) -> None:
mocks, dfs = fixture
sections_call = mocks["upload_file"].call_args_list[0][0]
assert sections_call[0] is mocks["get_db_operations_adapter"]
assert sections_call[1] is dfs["users"]
assert sections_call[2] == "LMSUser"
def it_uploads_sections(mocker, fixture) -> None:
mocks, dfs = fixture
# call_args_list[1] means second call, the one for sections
print(mocks["upload_file"].call_args_list)
sections_call = mocks["upload_file"].call_args_list[2][0]
assert sections_call[0] is mocks["get_db_operations_adapter"]
assert sections_call[1] is dfs["sections"]
assert sections_call[2] == "LMSSection"
def it_uploads_assignments(mocker, fixture) -> None:
mocks, dfs = fixture
mocks["upload_assignments_file"].assert_called_with(
mocks["get_db_operations_adapter"], dfs["assignments"]
)
def it_uploads_section_associations(mocker, fixture) -> None:
mocks, dfs = fixture
mocks["upload_section_associations_file"].assert_called_with(
mocks["get_db_operations_adapter"], dfs["section_associations"]
)
def it_uploads_section_activities(mocker, fixture) -> None:
mocks, dfs = fixture
mocks["upload_section_activities_file"].assert_called_with(
mocks["get_db_operations_adapter"], dfs["section_activities"]
)
def it_uploads_system_activities(mocker, fixture) -> None:
mocks, dfs = fixture
mocks["upload_system_activities_file"].assert_called_with(
mocks["get_db_operations_adapter"], dfs["system_activities"]
)
def it_uploads_assignment_submissions(mocker, fixture) -> None:
mocks, dfs = fixture
mocks["upload_assignment_submissions_file"].assert_called_with(
mocks["get_db_operations_adapter"], dfs["assignment_submissions"]
)
def it_uploads_attendance_events(mocker, fixture) -> None:
mocks, dfs = fixture
mocks["upload_attendance_events_file"].assert_called_with(
mocks["get_db_operations_adapter"], dfs["attendance_events"]
)
def describe_given_users_file_read_fails() -> None:
def it_bubbles_up_the_error(mocker) -> None:
# Arrange
args_mock = MagicMock(spec=MainArguments)
db_engine_mock = Mock()
args_mock.get_adapter.return_value = db_engine_mock
args_mock.csv_path = "/some/path"
db_adapter_mock = Mock()
db_adapter_mock.get_processed_files = Mock(return_value=set(["fileOne"]))
args_mock.get_db_operations_adapter.return_value = db_adapter_mock
migrator_mock = MagicMock(spec=migrator.migrate)
mocker.patch("edfi_lms_ds_loader.migrator.migrate", migrator_mock)
def __raise(csv_path) -> None:
raise Exception("bad things")
mocker.patch(
"edfi_lms_file_utils.file_reader.read_users_file", side_effect=__raise
)
file_repository_mock = Mock(return_value=["fileOne", "fileThree"])
mocker.patch(
"edfi_lms_file_utils.file_repository.get_users_file_paths",
file_repository_mock,
)
# Act
with pytest.raises(Exception):
run_loader(args_mock)
# Since we're not doing anything special to have that error bubble up,
# additional tests for exceptions on other methods would not add much value
# here.
| 40.006309
| 94
| 0.620722
|
4a1a563f7229b382d4782f689c41c4687f344433
| 1,973
|
py
|
Python
|
aries_cloudagent/messaging/routing/messages/tests/test_route_query_request.py
|
DibbsZA/aries-cloudagent-python
|
a094dd7697023721ac2a2fd4e58b04d4b37d1f44
|
[
"Apache-2.0"
] | 7
|
2020-07-07T15:44:41.000Z
|
2022-03-26T21:20:41.000Z
|
aries_cloudagent/messaging/routing/messages/tests/test_route_query_request.py
|
totemprotocol/aries-fl
|
dd78dcebc771971abfee301b80cdd5d246c14840
|
[
"Apache-2.0"
] | null | null | null |
aries_cloudagent/messaging/routing/messages/tests/test_route_query_request.py
|
totemprotocol/aries-fl
|
dd78dcebc771971abfee301b80cdd5d246c14840
|
[
"Apache-2.0"
] | 2
|
2019-12-02T18:59:07.000Z
|
2020-06-03T18:58:20.000Z
|
from ..route_query_request import RouteQueryRequest
from ...message_types import ROUTE_QUERY_REQUEST
from ...models.paginate import Paginate, PaginateSchema
from unittest import mock, TestCase
class TestRouteQueryRequest(TestCase):
test_limit = 100
test_offset = 10
test_verkey = "3Dn1SJNPaCXcvvJvSbsFWP2xaCjMom3can8CQNhWrTRx"
test_filter = {"recipient_key": ["3Dn1SJNPaCXcvvJvSbsFWP2xaCjMom3can8CQNhWrTRx"]}
def setUp(self):
self.paginate = Paginate(limit=self.test_limit, offset=self.test_offset)
self.message = RouteQueryRequest(
filter=self.test_filter, paginate=self.paginate
)
def test_init(self):
assert self.message.filter == self.test_filter
assert self.message.paginate.limit == self.test_limit
assert self.message.paginate.offset == self.test_offset
def test_type(self):
assert self.message._type == ROUTE_QUERY_REQUEST
@mock.patch(
"aries_cloudagent.messaging.routing.messages.route_query_request.RouteQueryRequestSchema.load"
)
def test_deserialize(self, message_schema_load):
obj = {"obj": "obj"}
message = RouteQueryRequest.deserialize(obj)
message_schema_load.assert_called_once_with(obj)
assert message is message_schema_load.return_value
@mock.patch(
"aries_cloudagent.messaging.routing.messages.route_query_request.RouteQueryRequestSchema.dump"
)
def test_serialize(self, message_schema_dump):
message_dict = self.message.serialize()
message_schema_dump.assert_called_once_with(self.message)
assert message_dict is message_schema_dump.return_value
class TestRouteQueryRequestSchema(TestCase):
def test_make_model(self):
message = RouteQueryRequest(filter={}, paginate=Paginate())
data = message.serialize()
model_instance = RouteQueryRequest.deserialize(data)
assert isinstance(model_instance, RouteQueryRequest)
| 35.872727
| 102
| 0.737962
|
4a1a568bbc677728fb311c7bfe0d66c764102916
| 4,233
|
py
|
Python
|
release/stubs.min/System/ComponentModel/__init___parts/PropertyTabAttribute.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/ComponentModel/__init___parts/PropertyTabAttribute.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs.min/System/ComponentModel/__init___parts/PropertyTabAttribute.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
class PropertyTabAttribute(Attribute, _Attribute):
"""
Identifies the property tab or tabs to display for the specified class or classes.
PropertyTabAttribute()
PropertyTabAttribute(tabClass: Type)
PropertyTabAttribute(tabClassName: str)
PropertyTabAttribute(tabClass: Type,tabScope: PropertyTabScope)
PropertyTabAttribute(tabClassName: str,tabScope: PropertyTabScope)
"""
def Equals(self, other):
"""
Equals(self: PropertyTabAttribute,other: PropertyTabAttribute) -> bool
Returns a value indicating whether this instance is equal to a specified attribute.
other: A System.ComponentModel.PropertyTabAttribute to compare to this instance,or null.
Returns: true if the System.ComponentModel.PropertyTabAttribute instances are equal; otherwise,false.
Equals(self: PropertyTabAttribute,other: object) -> bool
Returns a value indicating whether this instance is equal to a specified object.
other: An object to compare to this instance,or null.
Returns: true if other refers to the same System.ComponentModel.PropertyTabAttribute instance; otherwise,
false.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: PropertyTabAttribute) -> int
Gets the hash code for this object.
Returns: The hash code for the object the attribute belongs to.
"""
pass
def InitializeArrays(self, *args):
"""
InitializeArrays(self: PropertyTabAttribute,tabClasses: Array[Type],tabScopes: Array[PropertyTabScope])
Initializes the attribute using the specified names of tab classes and array of tab scopes.
tabClasses: The types of tabs to create.
tabScopes: The scope of each tab. If the scope is System.ComponentModel.PropertyTabScope.Component,it is
shown only for components with the corresponding System.ComponentModel.PropertyTabAttribute. If
it is System.ComponentModel.PropertyTabScope.Document,it is shown for all components on the
document.
InitializeArrays(self: PropertyTabAttribute,tabClassNames: Array[str],tabScopes: Array[PropertyTabScope])
Initializes the attribute using the specified names of tab classes and array of tab scopes.
tabClassNames: An array of fully qualified type names of the types to create for tabs on the Properties window.
tabScopes: The scope of each tab. If the scope is System.ComponentModel.PropertyTabScope.Component,it is
shown only for components with the corresponding System.ComponentModel.PropertyTabAttribute. If
it is System.ComponentModel.PropertyTabScope.Document,it is shown for all components on the
document.
"""
pass
def __eq__(self, *args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self, *__args):
"""
__new__(cls: type)
__new__(cls: type,tabClass: Type)
__new__(cls: type,tabClassName: str)
__new__(cls: type,tabClass: Type,tabScope: PropertyTabScope)
__new__(cls: type,tabClassName: str,tabScope: PropertyTabScope)
"""
pass
def __ne__(self, *args):
pass
TabClasses = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the types of tabs that this attribute uses.
Get: TabClasses(self: PropertyTabAttribute) -> Array[Type]
"""
TabClassNames = property(
lambda self: object(), lambda self, v: None, lambda self: None
)
"""Gets the names of the tab classes that this attribute uses.
"""
TabScopes = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets an array of tab scopes of each tab of this System.ComponentModel.PropertyTabAttribute.
Get: TabScopes(self: PropertyTabAttribute) -> Array[PropertyTabScope]
"""
| 27.134615
| 221
| 0.690999
|
4a1a56ebde53ef8b628e74cf9c90e6bd94975bbb
| 5,956
|
py
|
Python
|
frappe/www/list.py
|
chaitraliw/mws_frappe
|
52182a27f3b9a61d080e741e349a308be3e94582
|
[
"MIT"
] | 1
|
2020-08-12T23:07:02.000Z
|
2020-08-12T23:07:02.000Z
|
frappe/www/list.py
|
chaitraliw/mws_frappe
|
52182a27f3b9a61d080e741e349a308be3e94582
|
[
"MIT"
] | null | null | null |
frappe/www/list.py
|
chaitraliw/mws_frappe
|
52182a27f3b9a61d080e741e349a308be3e94582
|
[
"MIT"
] | 1
|
2018-03-21T18:41:05.000Z
|
2018-03-21T18:41:05.000Z
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cint, quoted
from frappe.website.render import resolve_path
from frappe.model.document import get_controller, Document
from frappe import _
no_cache = 1
no_sitemap = 1
def get_context(context):
"""Returns context for a list standard list page.
Will also update `get_list_context` from the doctype module file"""
doctype = frappe.local.form_dict.doctype
context.parents = [{"route":"me", "title":_("My Account")}]
context.update(get_list_context(context, doctype) or {})
context.doctype = doctype
context.txt = frappe.local.form_dict.txt
context.update(get(**frappe.local.form_dict))
@frappe.whitelist(allow_guest=True)
def get(doctype, txt=None, limit_start=0, limit=20, **kwargs):
"""Returns processed HTML page for a standard listing."""
limit_start = cint(limit_start)
limit_page_length = limit
next_start = limit_start + limit_page_length
if not txt and frappe.form_dict.search:
txt = frappe.form_dict.search
del frappe.form_dict['search']
controller = get_controller(doctype)
meta = frappe.get_meta(doctype)
filters = prepare_filters(doctype, controller, kwargs)
list_context = get_list_context(frappe._dict(), doctype)
list_context.title_field = getattr(controller, 'website',
{}).get('page_title_field', meta.title_field or 'name')
if list_context.filters:
filters.update(list_context.filters)
_get_list = list_context.get_list or get_list
kwargs = dict(doctype=doctype, txt=txt, filters=filters,
limit_start=limit_start, limit_page_length=limit_page_length + 1,
order_by = list_context.order_by or 'modified desc')
# allow guest if flag is set
if not list_context.get_list and (list_context.allow_guest or meta.allow_guest_to_view):
kwargs['ignore_permissions'] = True
raw_result = _get_list(**kwargs)
if not raw_result: return {"result": []}
show_more = len(raw_result) > limit_page_length
if show_more:
raw_result = raw_result[:-1]
if txt:
list_context.default_subtitle = _('Filtered by "{0}"').format(txt)
result = []
row_template = list_context.row_template or "templates/includes/list/row_template.html"
for doc in raw_result:
doc.doctype = doctype
new_context = frappe._dict(doc=doc, meta=meta)
if not list_context.get_list and not isinstance(new_context.doc, Document):
new_context.doc = frappe.get_doc(doc.doctype, doc.name)
new_context.update(new_context.doc.as_dict())
if not frappe.flags.in_test:
new_context["pathname"] = frappe.local.request.path.strip("/ ")
new_context.update(list_context)
set_route(new_context)
rendered_row = frappe.render_template(row_template, new_context, is_path=True)
result.append(rendered_row)
return {
"result": result,
"show_more": show_more,
"next_start": next_start
}
def set_route(context):
'''Set link for the list item'''
if context.web_form_name:
context.route = "{0}?name={1}".format(context.pathname, quoted(context.doc.name))
elif context.doc and getattr(context.doc, 'route', None):
context.route = context.doc.route
else:
context.route = "{0}/{1}".format(context.pathname or quoted(context.doc.doctype),
quoted(context.doc.name))
def prepare_filters(doctype, controller, kwargs):
filters = frappe._dict(kwargs)
meta = frappe.get_meta(doctype)
if hasattr(controller, 'website') and controller.website.get('condition_field'):
filters[controller.website['condition_field']] = 1
if filters.pathname:
# resolve additional filters from path
resolve_path(filters.pathname)
for key, val in frappe.local.form_dict.items():
if key not in filters and key != 'flags':
filters[key] = val
# filter the filters to include valid fields only
for fieldname, val in filters.items():
if not meta.has_field(fieldname):
del filters[fieldname]
return filters
def get_list_context(context, doctype):
from frappe.modules import load_doctype_module
from frappe.website.doctype.web_form.web_form import get_web_form_list
list_context = context or frappe._dict()
meta = frappe.get_meta(doctype)
if not meta.custom:
# custom doctypes don't have modules
module = load_doctype_module(doctype)
if hasattr(module, "get_list_context"):
out = frappe._dict(module.get_list_context(list_context) or {})
if out:
list_context = out
# get path from '/templates/' folder of the doctype
if not list_context.row_template:
list_context.row_template = meta.get_row_template()
# is web form, show the default web form filters
# which is only the owner
if frappe.form_dict.web_form_name:
list_context.web_form_name = frappe.form_dict.web_form_name
if not list_context.get("get_list"):
list_context.get_list = get_web_form_list
if not frappe.flags.web_form:
# update list context from web_form
frappe.flags.web_form = frappe.get_doc('Web Form', frappe.form_dict.web_form_name)
if frappe.flags.web_form.is_standard:
frappe.flags.web_form.update_list_context(list_context)
return list_context
def get_list(doctype, txt, filters, limit_start, limit_page_length=20, ignore_permissions=False,
fields=None, order_by=None):
meta = frappe.get_meta(doctype)
if not filters:
filters = []
if not fields:
fields = "distinct *"
or_filters = []
if txt:
if meta.search_fields:
for f in meta.get_search_fields():
if f == 'name' or meta.get_field(f).fieldtype in ('Data', 'Text', 'Small Text', 'Text Editor'):
or_filters.append([doctype, f, "like", "%" + txt + "%"])
else:
if isinstance(filters, dict):
filters["name"] = ("like", "%" + txt + "%")
else:
filters.append([doctype, "name", "like", "%" + txt + "%"])
return frappe.get_list(doctype, fields = fields,
filters=filters, or_filters=or_filters, limit_start=limit_start,
limit_page_length = limit_page_length, ignore_permissions=ignore_permissions,
order_by=order_by)
| 32.546448
| 99
| 0.749161
|
4a1a57256faa46cc2bcf64bb3865c166c4dba713
| 10,094
|
py
|
Python
|
scripts/pyqtgraph-develop/pyqtgraph/colormap.py
|
kuldeepaman/tf-pose
|
8050912c52a7b4f3c8a2656f267d47ba21d093f6
|
[
"Apache-2.0"
] | null | null | null |
scripts/pyqtgraph-develop/pyqtgraph/colormap.py
|
kuldeepaman/tf-pose
|
8050912c52a7b4f3c8a2656f267d47ba21d093f6
|
[
"Apache-2.0"
] | null | null | null |
scripts/pyqtgraph-develop/pyqtgraph/colormap.py
|
kuldeepaman/tf-pose
|
8050912c52a7b4f3c8a2656f267d47ba21d093f6
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from .Qt import QtGui, QtCore
from .python2_3 import basestring
class ColorMap(object):
"""
A ColorMap defines a relationship between a scalar value and a range of colors.
ColorMaps are commonly used for false-coloring monochromatic images, coloring
scatter-plot points, and coloring surface plots by height.
Each color map is defined by a set of colors, each corresponding to a
particular scalar value. For example:
| 0.0 -> black
| 0.2 -> red
| 0.6 -> yellow
| 1.0 -> white
The colors for intermediate values are determined by interpolating between
the two nearest colors in either RGB or HSV color space.
To provide user-defined color mappings, see :class:`GradientWidget <pyqtgraph.GradientWidget>`.
"""
## color interpolation modes
RGB = 1
HSV_POS = 2
HSV_NEG = 3
## boundary modes
CLIP = 1
REPEAT = 2
MIRROR = 3
## return types
BYTE = 1
FLOAT = 2
QCOLOR = 3
enumMap = {
'rgb': RGB,
'hsv+': HSV_POS,
'hsv-': HSV_NEG,
'clip': CLIP,
'repeat': REPEAT,
'mirror': MIRROR,
'byte': BYTE,
'float': FLOAT,
'qcolor': QCOLOR,
}
def __init__(self, pos, color, mode=None):
"""
=============== ==============================================================
**Arguments:**
pos Array of positions where each color is defined
color Array of RGBA colors.
Integer data types are interpreted as 0-255; float data types
are interpreted as 0.0-1.0
mode Array of color modes (ColorMap.RGB, HSV_POS, or HSV_NEG)
indicating the color space that should be used when
interpolating between stops. Note that the last mode value is
ignored. By default, the mode is entirely RGB.
=============== ==============================================================
"""
self.pos = np.array(pos)
order = np.argsort(self.pos)
self.pos = self.pos[order]
self.color = np.array(color)[order]
if mode is None:
mode = np.ones(len(pos))
self.mode = mode
self.stopsCache = {}
def map(self, data, mode='byte'):
"""
Return an array of colors corresponding to the values in *data*.
Data must be either a scalar position or an array (any shape) of positions.
The *mode* argument determines the type of data returned:
=========== ===============================================================
byte (default) Values are returned as 0-255 unsigned bytes.
float Values are returned as 0.0-1.0 floats.
qcolor Values are returned as an array of QColor objects.
=========== ===============================================================
"""
if isinstance(mode, basestring):
mode = self.enumMap[mode.lower()]
if mode == self.QCOLOR:
pos, color = self.getStops(self.BYTE)
else:
pos, color = self.getStops(mode)
# don't need this--np.interp takes care of it.
#data = np.clip(data, pos.min(), pos.max())
# Interpolate
# TODO: is griddata faster?
# interp = scipy.interpolate.griddata(pos, color, data)
if np.isscalar(data):
interp = np.empty((color.shape[1],), dtype=color.dtype)
else:
if not isinstance(data, np.ndarray):
data = np.array(data)
interp = np.empty(data.shape + (color.shape[1],), dtype=color.dtype)
for i in range(color.shape[1]):
interp[...,i] = np.interp(data, pos, color[:,i])
# Convert to QColor if requested
if mode == self.QCOLOR:
if np.isscalar(data):
return QtGui.QColor(*interp)
else:
return [QtGui.QColor(*x) for x in interp]
else:
return interp
def mapToQColor(self, data):
"""Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`."""
return self.map(data, mode=self.QCOLOR)
def mapToByte(self, data):
"""Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`."""
return self.map(data, mode=self.BYTE)
def mapToFloat(self, data):
"""Convenience function; see :func:`map() <pyqtgraph.ColorMap.map>`."""
return self.map(data, mode=self.FLOAT)
def getGradient(self, p1=None, p2=None):
"""Return a QLinearGradient object spanning from QPoints p1 to p2."""
if p1 == None:
p1 = QtCore.QPointF(0,0)
if p2 == None:
p2 = QtCore.QPointF(self.pos.max()-self.pos.min(),0)
g = QtGui.QLinearGradient(p1, p2)
pos, color = self.getStops(mode=self.BYTE)
color = [QtGui.QColor(*x) for x in color]
g.setStops(zip(pos, color))
#if self.colorMode == 'rgb':
#ticks = self.listTicks()
#g.setStops([(x, QtGui.QColor(t.color)) for t,x in ticks])
#elif self.colorMode == 'hsv': ## HSV mode is approximated for display by interpolating 10 points between each stop
#ticks = self.listTicks()
#stops = []
#stops.append((ticks[0][1], ticks[0][0].color))
#for i in range(1,len(ticks)):
#x1 = ticks[i-1][1]
#x2 = ticks[i][1]
#dx = (x2-x1) / 10.
#for j in range(1,10):
#x = x1 + dx*j
#stops.append((x, self.getColor(x)))
#stops.append((x2, self.getColor(x2)))
#g.setStops(stops)
return g
def getColors(self, mode=None):
"""Return list of all color stops converted to the specified mode.
If mode is None, then no conversion is done."""
if isinstance(mode, basestring):
mode = self.enumMap[mode.lower()]
color = self.color
if mode in [self.BYTE, self.QCOLOR] and color.dtype.kind == 'f':
color = (color * 255).astype(np.ubyte)
elif mode == self.FLOAT and color.dtype.kind != 'f':
color = color.astype(float) / 255.
if mode == self.QCOLOR:
color = [QtGui.QColor(*x) for x in color]
return color
def getStops(self, mode):
## Get fully-expanded set of RGBA stops in either float or byte mode.
if mode not in self.stopsCache:
color = self.color
if mode == self.BYTE and color.dtype.kind == 'f':
color = (color * 255).astype(np.ubyte)
elif mode == self.FLOAT and color.dtype.kind != 'f':
color = color.astype(float) / 255.
## to support HSV mode, we need to do a little more work..
#stops = []
#for i in range(len(self.pos)):
#pos = self.pos[i]
#color = color[i]
#imode = self.mode[i]
#if imode == self.RGB:
#stops.append((x,color))
#else:
#ns =
self.stopsCache[mode] = (self.pos, color)
return self.stopsCache[mode]
def getLookupTable(self, start=0.0, stop=1.0, nPts=512, alpha=None, mode='byte'):
"""
Return an RGB(A) lookup table (ndarray).
=============== =============================================================================
**Arguments:**
start The starting value in the lookup table (default=0.0)
stop The final value in the lookup table (default=1.0)
nPts The number of points in the returned lookup table.
alpha True, False, or None - Specifies whether or not alpha values are included
in the table. If alpha is None, it will be automatically determined.
mode Determines return type: 'byte' (0-255), 'float' (0.0-1.0), or 'qcolor'.
See :func:`map() <pyqtgraph.ColorMap.map>`.
=============== =============================================================================
"""
if isinstance(mode, basestring):
mode = self.enumMap[mode.lower()]
if alpha is None:
alpha = self.usesAlpha()
x = np.linspace(start, stop, nPts)
table = self.map(x, mode)
if not alpha:
return table[:,:3]
else:
return table
def usesAlpha(self):
"""Return True if any stops have an alpha < 255"""
max = 1.0 if self.color.dtype.kind == 'f' else 255
return np.any(self.color[:,3] != max)
def isMapTrivial(self):
"""
Return True if the gradient has exactly two stops in it: black at 0.0 and white at 1.0.
"""
if len(self.pos) != 2:
return False
if self.pos[0] != 0.0 or self.pos[1] != 1.0:
return False
if self.color.dtype.kind == 'f':
return np.all(self.color == np.array([[0.,0.,0.,1.], [1.,1.,1.,1.]]))
else:
return np.all(self.color == np.array([[0,0,0,255], [255,255,255,255]]))
def __repr__(self):
pos = repr(self.pos).replace('\n', '')
color = repr(self.color).replace('\n', '')
return "ColorMap(%s, %s)" % (pos, color)
| 39.584314
| 125
| 0.482267
|
4a1a57cdd23807dce4ade8e9afe6a51652599a23
| 881
|
py
|
Python
|
src/SimilarNeuron/utils/dicttool.py
|
luxuncang/similar-neuron
|
b0bab30270e768ec70551d26d709e692d00f62a9
|
[
"MIT"
] | 4
|
2021-12-02T15:54:09.000Z
|
2021-12-09T14:22:05.000Z
|
src/SimilarNeuron/utils/dicttool.py
|
luxuncang/similar-neuron
|
b0bab30270e768ec70551d26d709e692d00f62a9
|
[
"MIT"
] | 13
|
2021-12-03T07:04:39.000Z
|
2022-03-09T08:38:39.000Z
|
src/SimilarNeuron/utils/dicttool.py
|
luxuncang/similar-neuron
|
b0bab30270e768ec70551d26d709e692d00f62a9
|
[
"MIT"
] | 1
|
2021-12-13T14:39:12.000Z
|
2021-12-13T14:39:12.000Z
|
from typing import Any, Iterable, Iterator, Hashable
def dictDFS(d: dict) -> Iterator:
'''python Dict DFS(stop dict.v not dict)'''
for k,v in d.items():
if isinstance(v, dict):
yield (k, v)
yield from dictDFS(v)
else:
yield (k,v)
def dictBSF(d: dict) -> Iterator:
'''python Dict BSF(stop dict.v not dict)'''
for k,v in d.items():
if isinstance(v, dict):
yield (k,v)
for k,v in d.items():
if isinstance(v, dict):
yield from dictBSF(v)
else:
yield (k, v)
def dictfilter(d: dict, *, filterKey: Iterable[Hashable] = [], filterValue: Iterable[Any] = []) -> dict:
'''python Dict filter(According to the dictionary key name or value)'''
return {k:v for k,v in d.items() if (k not in filterKey) and (v not in filterValue)}
| 35.24
| 105
| 0.553916
|
4a1a589a5abfd6b8ece2c1c7017bde9492c6fe82
| 567
|
py
|
Python
|
vc_assistance/indico_vc_assistance/blueprint.py
|
pferreir/indico-plugins-cern
|
0fc2eb6b1aa3c3083a813477886a6632f148a4d9
|
[
"MIT"
] | null | null | null |
vc_assistance/indico_vc_assistance/blueprint.py
|
pferreir/indico-plugins-cern
|
0fc2eb6b1aa3c3083a813477886a6632f148a4d9
|
[
"MIT"
] | null | null | null |
vc_assistance/indico_vc_assistance/blueprint.py
|
pferreir/indico-plugins-cern
|
0fc2eb6b1aa3c3083a813477886a6632f148a4d9
|
[
"MIT"
] | null | null | null |
# This file is part of the CERN Indico plugins.
# Copyright (C) 2014 - 2019 CERN
#
# The CERN Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License; see
# the LICENSE file for more details.
from __future__ import unicode_literals
from indico.core.plugins import IndicoPluginBlueprint
from indico_vc_assistance.controllers import RHRequestList
blueprint = IndicoPluginBlueprint('vc_assistance', __name__, url_prefix='/service/vc-assistance')
blueprint.add_url_rule('/', 'request_list', RHRequestList)
| 33.352941
| 97
| 0.798942
|
4a1a58af4359c1a6762af3867af4070eaa25cde6
| 2,931
|
py
|
Python
|
Twitter/Model/Place/Place.py
|
david00medina/cyberattack-forecasting
|
e8a18e474d9eec0800d06e3f21c9cd25cb54e831
|
[
"MIT"
] | null | null | null |
Twitter/Model/Place/Place.py
|
david00medina/cyberattack-forecasting
|
e8a18e474d9eec0800d06e3f21c9cd25cb54e831
|
[
"MIT"
] | null | null | null |
Twitter/Model/Place/Place.py
|
david00medina/cyberattack-forecasting
|
e8a18e474d9eec0800d06e3f21c9cd25cb54e831
|
[
"MIT"
] | null | null | null |
# ############################################################################################################
# Copyright (c) 2022 David Alberto Medina Medina. #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software #
# and associated documentation files (the "Software"), to deal in the Software without restriction, #
# including without limitation the rights to use, copy, modify, merge, publish, distribute, #
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software #
# is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all copies or substantial #
# portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, #
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A #
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR #
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN #
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION #
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #
# ############################################################################################################
import json
from dataclasses import dataclass, field
from datetime import datetime
from typing import List
from Twitter.Model.Geolocation.Geolocation import Geolocation
@dataclass
class Place:
full_name: str
id: str
country: str = field(default=None)
country_code: str = field(default=None)
geo: Geolocation = field(default=None)
contained_within: list = field(default=None)
name: str = field(default=None)
place_type: str = field(default=None)
def __post_init__(self):
if self.geo:
self.geo = Geolocation(**self.geo)
def toJSON(self):
return json.loads(json.dumps(self, default=lambda o:
o.__dict__ if not isinstance(o, datetime) else o.strftime('%Y-%m-%dT%H:%M:%S.%fZ'),
sort_keys=True, indent=4))
| 62.361702
| 110
| 0.479359
|
4a1a58b4876e2586f72fc3ef6510a0184a9fd5f2
| 1,176
|
py
|
Python
|
models/disposable_booking.py
|
ndalilav/Textdump
|
7fef26b88903f40ee0588b309d2cb08ea6d9b062
|
[
"MIT"
] | null | null | null |
models/disposable_booking.py
|
ndalilav/Textdump
|
7fef26b88903f40ee0588b309d2cb08ea6d9b062
|
[
"MIT"
] | null | null | null |
models/disposable_booking.py
|
ndalilav/Textdump
|
7fef26b88903f40ee0588b309d2cb08ea6d9b062
|
[
"MIT"
] | null | null | null |
from main import db
from sqlalchemy import func
class DisposableBooking(db.Model):
__tablename__ = "disposable_bookings"
id = db.Column(db.Integer, primary_key=True, nullable=False)
email = db.Column(db.String(255), nullable=False)
phone_number = db.Column(db.String(255), nullable=False)
full_name = db.Column(db.String(255), nullable=False)
disposable_id = db.Column(db.Integer, db.ForeignKey("disposable.id"))
created = db.Column(db.DateTime(timezone=True), server_default=func.now())
updated_ = db.Column(db.DateTime(timezone=True), onupdate=func.now())
def save(self):
db.session.add(self)
db.session.commit()
return self
@classmethod
def all(cls):
return cls.query.all()
@classmethod
def check_email_exists(cls, email):
return cls.query.filter_by(email=email).first()
@classmethod
def check_email_exists_in_disposable(cls, email, disposable_id):
return cls.query.filter_by(email=email, disposable_id=disposable_id).first()
@classmethod
def fetch_by_disposable_id(cls, id):
r = cls.query.filter_by(disposable_id=id).all()
return r
| 33.6
| 84
| 0.694728
|
4a1a59706b6097f300ac28a823bcb23e862a4fb7
| 318
|
py
|
Python
|
bims/templatetags/td_biblio.py
|
Christiaanvdm/django-bims
|
f92a63156c711b2d53c5f8ea06867cd64cee9eb9
|
[
"MIT"
] | 7
|
2017-03-16T20:50:29.000Z
|
2019-11-07T01:00:10.000Z
|
bims/templatetags/td_biblio.py
|
Christiaanvdm/django-bims
|
f92a63156c711b2d53c5f8ea06867cd64cee9eb9
|
[
"MIT"
] | 25
|
2017-03-23T14:12:25.000Z
|
2019-06-14T07:53:17.000Z
|
bims/templatetags/td_biblio.py
|
Christiaanvdm/django-bims
|
f92a63156c711b2d53c5f8ea06867cd64cee9eb9
|
[
"MIT"
] | 6
|
2017-09-05T06:55:20.000Z
|
2019-06-09T14:04:00.000Z
|
# -*- coding: utf-8 -*-
from django import template
register = template.Library()
@register.filter
def publication_date(entry):
"""We expect an entry object to filter"""
if entry.is_partial_publication_date:
fmt = "%Y"
else:
fmt = "%F %Y"
return entry.publication_date.strftime(fmt)
| 21.2
| 47
| 0.657233
|
4a1a59d7d340f69a01b005f138e304714fe35641
| 855
|
py
|
Python
|
src/chapter3_ros2_basics/chapter3_ros2_basics/topic_subscriber.py
|
slowrunner/handsonros2
|
8434414bba54e0bea06cda23f58ae62ee3c38b20
|
[
"Apache-2.0"
] | 1
|
2021-09-21T19:53:29.000Z
|
2021-09-21T19:53:29.000Z
|
src/chapter3_ros2_basics/chapter3_ros2_basics/topic_subscriber.py
|
slowrunner/handsonros2
|
8434414bba54e0bea06cda23f58ae62ee3c38b20
|
[
"Apache-2.0"
] | null | null | null |
src/chapter3_ros2_basics/chapter3_ros2_basics/topic_subscriber.py
|
slowrunner/handsonros2
|
8434414bba54e0bea06cda23f58ae62ee3c38b20
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# BEGIN ALL
# import rospy
import rclpy
from rclpy.node import Node
from std_msgs.msg import Int32
import sys
# BEGIN CALLBACK
def callback(msg):
global node
print(msg.data)
# rospy.loginfo(rospy.get_caller_id() + 'I heard %s', msg.data)
node.get_logger().info('I heard: {}'.format(msg.data))
# END CALLBACK
def main():
# Don't know correct way to get the node into the callback, so made it a global
global node
# rospy.init_node('topic_subscriber')
rclpy.init(args=sys.argv)
node = rclpy.create_node('topic_subscriber')
# BEGIN SUBSCRIBER
# sub = rospy.Subscriber('counter', Int32, callback)
sub = node.create_subscription(Int32, 'counter', callback, qos_profile=1)
# END SUBSCRIBER
# rospy.spin()
rclpy.spin(node)
# END ALL
if __name__ == '__main__':
main()
| 21.923077
| 83
| 0.679532
|
4a1a5b9e84d463c8428cc9cc662dc2deb373eaf3
| 21,262
|
py
|
Python
|
stafd.py
|
charles-rose/nvme-stas
|
7af1f489d98dad53d7bbb697eccc53e5c08dbfcc
|
[
"Apache-2.0"
] | null | null | null |
stafd.py
|
charles-rose/nvme-stas
|
7af1f489d98dad53d7bbb697eccc53e5c08dbfcc
|
[
"Apache-2.0"
] | null | null | null |
stafd.py
|
charles-rose/nvme-stas
|
7af1f489d98dad53d7bbb697eccc53e5c08dbfcc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2021, Dell Inc. or its subsidiaries. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
# See the LICENSE file for details.
#
# This file is part of NVMe STorage Appliance Services (nvme-stas).
#
# Authors: Martin Belanger <Martin.Belanger@dell.com>
#
''' STorage Appliance Finder Daemon
'''
import sys
from argparse import ArgumentParser
from staslib import defs
DBUS_IDL = '''
<node>
<interface name="%s.debug">
<property name="tron" type="b" access="readwrite"/>
<property name="log_level" type="s" access="read"/>
<method name="process_info">
<arg direction="out" type="s" name="info_json"/>
</method>
<method name="controller_info">
<arg direction="in" type="s" name="transport"/>
<arg direction="in" type="s" name="traddr"/>
<arg direction="in" type="s" name="trsvcid"/>
<arg direction="in" type="s" name="host_traddr"/>
<arg direction="in" type="s" name="host_iface"/>
<arg direction="in" type="s" name="subsysnqn"/>
<arg direction="out" type="s" name="info_json"/>
</method>
</interface>
<interface name="%s">
<method name="list_controllers">
<arg direction="in" type="b" name="detailed"/>
<arg direction="out" type="s" name="controller_list_json"/>
</method>
<method name="get_log_pages">
<arg direction="in" type="s" name="transport"/>
<arg direction="in" type="s" name="traddr"/>
<arg direction="in" type="s" name="trsvcid"/>
<arg direction="in" type="s" name="host_traddr"/>
<arg direction="in" type="s" name="host_iface"/>
<arg direction="in" type="s" name="subsysnqn"/>
<arg direction="out" type="s" name="log_pages_json"/>
</method>
<method name="get_all_log_pages">
<arg direction="in" type="b" name="detailed"/>
<arg direction="out" type="s" name="log_pages_json"/>
</method>
<signal name="log_pages_changed">
<arg direction="out" type="s" name="transport"/>
<arg direction="out" type="s" name="traddr"/>
<arg direction="out" type="s" name="trsvcid"/>
<arg direction="out" type="s" name="host_traddr"/>
<arg direction="out" type="s" name="host_iface"/>
<arg direction="out" type="s" name="subsysnqn"/>
<arg direction="out" type="s" name="device"/>
</signal>
</interface>
</node>
''' % (defs.STAFD_DBUS_NAME, defs.STAFD_DBUS_NAME)
def parse_args(conf_file:str):
parser = ArgumentParser(description=f'{defs.STAF_DESCRIPTION} ({defs.STAF_ACRONYM}). Must be root to run this program.')
parser.add_argument('-f', '--conf-file', action='store', help='Configuration file (default: %(default)s)', default=conf_file, type=str, metavar='FILE')
parser.add_argument('-s', '--syslog', action='store_true', help='Send messages to syslog instead of stdout. Use this when running %(prog)s as a daemon. (default: %(default)s)', default=False)
parser.add_argument('--tron', action='store_true', help='Trace ON. (default: %(default)s)', default=False)
parser.add_argument('-v', '--version', action='store_true', help='Print version, then exit', default=False)
parser.add_argument('--idl', action='store', help='Print D-Bus IDL, then exit', type=str, metavar='FILE')
return parser.parse_args()
ARGS = parse_args(defs.STAFD_CONFIG_FILE)
if ARGS.version:
print(f'{defs.PROJECT_NAME} {defs.VERSION}')
sys.exit(0)
if ARGS.idl:
with open(ARGS.idl, 'w') as f:
print(f'{DBUS_IDL}', file=f)
sys.exit(0)
# There is a reason for having this import here and not at the top of the file.
# We want to allow running stafd with the --version and --idl options and exit
# without having to import stas and avahi.
from staslib import stas, avahi # pylint: disable=wrong-import-position
# Before going any further, make sure the script is allowed to run.
stas.check_if_allowed_to_continue()
################################################################################
# Preliminary checks have passed. Let her rip!
# pylint: disable=wrong-import-position
# pylint: disable=wrong-import-order
import json
import dasbus.server.interface
import systemd.daemon
from libnvme import nvme
from gi.repository import GLib
DLP_CHANGED = ((nvme.NVME_LOG_LID_DISCOVER << 16) |
(nvme.NVME_AER_NOTICE_DISC_CHANGED << 8) | nvme.NVME_AER_NOTICE) # 0x70f002
LOG = stas.get_logger(ARGS.syslog, defs.STAFD_PROCNAME)
CNF = stas.get_configuration(ARGS.conf_file)
stas.trace_control(ARGS.tron or CNF.tron)
SYS_CNF = stas.get_sysconf() # Singleton
NVME_ROOT = nvme.root() # Singleton
NVME_ROOT.log_level("debug" if (ARGS.tron or CNF.tron) else "err")
NVME_HOST = nvme.host(NVME_ROOT, SYS_CNF.hostnqn, SYS_CNF.hostid, SYS_CNF.hostsymname) # Singleton
def set_loglevel(tron):
stas.trace_control(tron)
NVME_ROOT.log_level("debug" if tron else "err")
#*******************************************************************************
class Dc(stas.Controller):
''' @brief This object establishes a connection to one Discover Controller (DC).
It retrieves the discovery log pages and caches them.
It also monitors udev events associated with that DC and updates
the cached discovery log pages accordingly.
'''
GET_LOG_PAGE_RETRY_RERIOD_SEC = 20
REGISTRATION_RETRY_RERIOD_SEC = 10
def __init__(self, tid:stas.TransportId):
super().__init__(NVME_ROOT, NVME_HOST, tid, discovery_ctrl=True)
self._register_op = None
self._get_log_op = None
self._log_pages = list() # Log pages cache
def _release_resources(self):
LOG.debug('Dc._release_resources() - %s | %s', self.id, self.device)
super()._release_resources()
self._log_pages = list()
def _kill_ops(self):
super()._kill_ops()
if self._get_log_op:
self._get_log_op.kill()
self._get_log_op = None
if self._register_op:
self._register_op.kill()
self._register_op = None
def info(self) -> dict:
''' @brief Get the controller info for this object
'''
info = super().info()
if self._get_log_op:
info['get log page operation'] = self._get_log_op.as_dict()
if self._register_op:
info['register operation'] = self._register_op.as_dict()
return info
def cancel(self):
''' @brief Used to cancel pending operations.
'''
super().cancel()
if self._get_log_op:
self._get_log_op.cancel()
if self._register_op:
self._register_op.cancel()
def disconnect(self, disconnected_cb):
LOG.debug('Dc.disconnect() - %s | %s', self.id, self.device)
self._kill_ops()
if self._ctrl and self._ctrl.connected() and not CNF.persistent_connections:
LOG.info('%s | %s - Disconnect initiated', self.id, self.device)
op = stas.AsyncOperationWithRetry(self._on_disconnected_success, self._on_disconnected_fail, self._ctrl.disconnect)
op.run_async(disconnected_cb)
else:
# Defer callback to the next main loop's idle period.
GLib.idle_add(disconnected_cb, self.tid)
def _on_disconnected_success(self, op_obj, data, disconnected_cb): # pylint: disable=unused-argument
LOG.debug('Dc._on_disconnected_success() - %s | %s', self.id, self.device)
op_obj.kill()
disconnected_cb(self.tid)
def _on_disconnected_fail(self, op_obj, err, fail_cnt, disconnected_cb): # pylint: disable=unused-argument
LOG.debug('Dc._on_disconnected_fail() - %s | %s: %s', self.id, self.device, err)
op_obj.kill()
disconnected_cb(self.tid)
def log_pages(self) -> list:
''' @brief Get the cached log pages for this object
'''
return self._log_pages
def referrals(self) -> list:
''' @brief Return the list of referrals
'''
return [ page for page in self._log_pages if page['subtype'] == 'referral' ]
def _on_aen(self, udev, aen:int):
super()._on_aen(udev, aen)
if aen == DLP_CHANGED and self._get_log_op:
self._get_log_op.run_async()
def _on_nvme_event(self, udev, nvme_event:str):
super()._on_nvme_event(udev, nvme_event)
if nvme_event == 'connected' and self._register_op:
self._register_op.run_async()
def _on_udev_remove(self, udev):
super()._on_udev_remove(udev)
# Defer attempt to connect to the next main loop's idle period.
GLib.idle_add(self._try_to_connect)
def _find_existing_connection(self):
return stas.UDEV.find_nvme_dc_device(self.tid)
#--------------------------------------------------------------------------
def _on_connect_success(self, op_obj, data):
''' @brief Function called when we successfully connect to the
Discovery Controller.
'''
super()._on_connect_success(op_obj, data)
if self._alive():
if self._ctrl.is_registration_supported():
self._register_op = stas.AsyncOperationWithRetry(self._on_registration_success, self._on_registration_fail, self._ctrl.registration_ctlr, nvme.NVMF_DIM_TAS_REGISTER)
self._register_op.run_async()
else:
self._get_log_op = stas.AsyncOperationWithRetry(self._on_get_log_success, self._on_get_log_fail, self._ctrl.discover)
self._get_log_op.run_async()
#--------------------------------------------------------------------------
def _on_registration_success(self, op_obj, data):
''' @brief Function called when we successfully register with the
Discovery Controller. See self._register_op object
for details.
'''
if self._alive():
if data is not None:
LOG.warning('%s | %s - Registration error. %s.', self.id, self.device, data)
else:
LOG.debug('Dc._on_registration_success() - %s | %s %s', self.id, self.device, data if data else 'success')
self._get_log_op = stas.AsyncOperationWithRetry(self._on_get_log_success, self._on_get_log_fail, self._ctrl.discover)
self._get_log_op.run_async()
else:
LOG.debug('Dc._on_registration_success() - %s | %s Received event on dead object.', self.id, self.device)
def _on_registration_fail(self, op_obj, err, fail_cnt):
''' @brief Function called when we fail to register with the
Discovery Controller. See self._register_op object
for details.
'''
if self._alive():
LOG.debug('Dc._on_registration_fail() - %s | %s: %s. Retry in %s sec', self.id, self.device, err, Dc.REGISTRATION_RETRY_RERIOD_SEC)
if fail_cnt == 1: # Throttle the logs. Only print the first time we fail to connect
LOG.error('%s | %s - Failed to register with Discovery Controller. %s', self.id, self.device, err)
#op_obj.retry(Dc.REGISTRATION_RETRY_RERIOD_SEC)
else:
LOG.debug('Dc._on_registration_fail() - %s | %s Received event on dead object. %s', self.id, self.device, err)
op_obj.kill()
#--------------------------------------------------------------------------
def _on_get_log_success(self, op_obj, data): # pylint: disable=unused-argument
''' @brief Function called when we successfully retrieve the log pages
from the Discovery Controller. See self._get_log_op object
for details.
'''
if self._alive():
# Note that for historical reasons too long to explain, the CDC may
# return invalid addresses ("0.0.0.0", "::", or ""). Those need to be
# filtered out.
referrals_before = self.referrals()
self._log_pages = [ { k: str(v) for k,v in dictionary.items() } for dictionary in data if dictionary.get('traddr') not in ('0.0.0.0', '::', '') ] if data else list()
LOG.info('%s | %s - Received discovery log pages (num records=%s).', self.id, self.device, len(self._log_pages))
referrals_after = self.referrals()
STAF.log_pages_changed(self, self.device)
if referrals_after != referrals_before:
LOG.debug('Dc._on_get_log_success() - %s | %s Referrals before = %s', self.id, self.device, referrals_before)
LOG.debug('Dc._on_get_log_success() - %s | %s Referrals after = %s', self.id, self.device, referrals_after)
STAF.referrals_changed()
else:
LOG.debug('Dc._on_get_log_success() - %s | %s Received event on dead object.', self.id, self.device)
def _on_get_log_fail(self, op_obj, err, fail_cnt):
''' @brief Function called when we fail to retrieve the log pages
from the Discovery Controller. See self._get_log_op object
for details.
'''
if self._alive():
LOG.debug('Dc._on_get_log_fail() - %s | %s: %s. Retry in %s sec', self.id, self.device, err, Dc.GET_LOG_PAGE_RETRY_RERIOD_SEC)
if fail_cnt == 1: # Throttle the logs. Only print the first time we fail to connect
LOG.error('%s | %s - Failed to retrieve log pages. %s', self.id, self.device, err)
op_obj.retry(Dc.GET_LOG_PAGE_RETRY_RERIOD_SEC)
else:
LOG.debug('Dc._on_get_log_fail() - %s | %s Received event on dead object. %s', self.id, self.device, err)
op_obj.kill()
#*******************************************************************************
class Staf(stas.Service):
''' STorage Appliance Finder (STAF)
'''
CONF_STABILITY_SOAK_TIME_SEC = 1.5
class Dbus:
''' This is the DBus interface that external programs can use to
communicate with stafd.
'''
__dbus_xml__ = DBUS_IDL
@dasbus.server.interface.dbus_signal
def log_pages_changed(self, transport:str, traddr:str, trsvcid:str, host_traddr:str, host_iface:str, subsysnqn:str, device:str):
''' @brief Signal sent when log pages have changed.
'''
pass
@property
def tron(self):
''' @brief Get Trace ON property '''
return stas.TRON
@tron.setter
def tron(self, value): # pylint: disable=no-self-use
''' @brief Set Trace ON property '''
set_loglevel(value)
@property
def log_level(self) -> str:
''' @brief Get Log Level property '''
return stas.log_level()
def process_info(self) -> str:
''' @brief Get status info (for debug)
@return A string representation of a json object.
'''
info = {
'tron': stas.TRON,
'log-level': self.log_level,
}
info.update(STAF.info())
return json.dumps(info)
def controller_info(self, transport, traddr, trsvcid, host_traddr, host_iface, subsysnqn) -> str: # pylint: disable=no-self-use,too-many-arguments
controller = STAF.get_controller(transport, traddr, trsvcid, host_traddr, host_iface, subsysnqn)
return json.dumps(controller.info()) if controller else '{}'
def get_log_pages(self, transport, traddr, trsvcid, host_traddr, host_iface, subsysnqn) -> str: # pylint: disable=no-self-use,too-many-arguments
controller = STAF.get_controller(transport, traddr, trsvcid, host_traddr, host_iface, subsysnqn)
return json.dumps(controller.log_pages()) if controller else '[]'
def get_all_log_pages(self, detailed) -> str: # pylint: disable=no-self-use
log_pages = list()
for controller in STAF.get_controllers():
log_pages.append({'discovery-controller': controller.details() if detailed else controller.controller_id_dict(),
'log-pages': controller.log_pages()})
return json.dumps(log_pages)
def list_controllers(self, detailed) -> str: # pylint: disable=no-self-use
''' @brief Return the list of discovery controller IDs
'''
return json.dumps([ controller.details() if detailed else controller.controller_id_dict() for controller in STAF.get_controllers() ])
#===========================================================================
def __init__(self):
super().__init__(self._reload_hdlr)
self._avahi = avahi.Avahi(LOG, self._sysbus, self._avahi_change)
self._avahi.config_stypes(CNF.get_stypes())
# We don't want to apply configuration changes to nvme-cli right away.
# Often, multiple changes will occur in a short amount of time (sub-second).
# We want to wait until there are no more changes before applying them
# to the system. The following timer acts as a "soak period". Changes
# will be applied by calling self._on_config_ctrls() at the end of
# the soak period.
self._cfg_soak_tmr = stas.GTimer(Staf.CONF_STABILITY_SOAK_TIME_SEC, self._on_config_ctrls)
self._cfg_soak_tmr.start()
# Create the D-Bus instance.
self._config_dbus(Staf.Dbus(), defs.STAFD_DBUS_NAME, defs.STAFD_DBUS_PATH)
def info(self) -> dict:
''' @brief Get the status info for this object (used for debug)
'''
info = super().info()
info['avahi'] = self._avahi.info()
return info
def _release_resources(self):
LOG.debug('Staf._release_resources()')
super()._release_resources()
self._avahi.kill()
self._avahi = None
def _reload_hdlr(self):
''' @brief Reload configuration file. This is triggered by the SIGHUP
signal, which can be sent with "systemctl reload stafd".
'''
systemd.daemon.notify('RELOADING=1')
CNF.reload()
set_loglevel(CNF.tron)
self._avahi.config_stypes(CNF.get_stypes())
self._cfg_soak_tmr.start()
systemd.daemon.notify('READY=1')
return GLib.SOURCE_CONTINUE
def log_pages_changed(self, controller, device):
self._dbus_iface.log_pages_changed.emit(controller.tid.transport, controller.tid.traddr, controller.tid.trsvcid,
controller.tid.host_traddr, controller.tid.host_iface, controller.tid.subsysnqn, device)
def referrals_changed(self):
LOG.debug('Staf.referrals_changed()')
self._cfg_soak_tmr.start()
def _referrals(self) -> list:
return [ stas.cid_from_dlpe(dlpe, controller.tid.host_traddr, controller.tid.host_iface)
for controller in self.get_controllers() for dlpe in controller.referrals() ]
def _config_ctrls_finish(self, configured_ctrl_list):
''' @brief Finish discovery controllers configuration after
hostnames (if any) have been resolved.
'''
configured_ctrl_list = [ ctrl_dict for ctrl_dict in configured_ctrl_list if 'traddr' in ctrl_dict
and ctrl_dict.setdefault('subsysnqn', 'nqn.2014-08.org.nvmexpress.discovery') ]
discovered_ctrl_list = self._avahi.get_controllers()
referral_ctrl_list = self._referrals()
LOG.debug('Staf._config_ctrls_finish() - configured_ctrl_list = %s', configured_ctrl_list)
LOG.debug('Staf._config_ctrls_finish() - discovered_ctrl_list = %s', discovered_ctrl_list)
LOG.debug('Staf._config_ctrls_finish() - referral_ctrl_list = %s', referral_ctrl_list)
controllers = stas.remove_blacklisted(configured_ctrl_list + discovered_ctrl_list + referral_ctrl_list)
controllers = stas.remove_invalid_addresses(controllers)
new_controller_ids = { stas.TransportId(controller) for controller in controllers }
cur_controller_ids = set(self._controllers.keys())
controllers_to_add = new_controller_ids - cur_controller_ids
controllers_to_rm = cur_controller_ids - new_controller_ids
LOG.debug('Staf._config_ctrls_finish() - controllers_to_add = %s', list(controllers_to_add))
LOG.debug('Staf._config_ctrls_finish() - controllers_to_rm = %s', list(controllers_to_rm))
for tid in controllers_to_rm:
controller = self._controllers.pop(tid, None)
if controller is not None:
controller.kill()
for tid in controllers_to_add:
self._controllers[tid] = Dc(tid)
def _avahi_change(self):
self._cfg_soak_tmr.start()
#*******************************************************************************
STAF = Staf()
STAF.run()
STAF = None
CNF = None
LOG = None
ARGS = None
| 45.528908
| 195
| 0.611796
|
4a1a5ba35c88b9dfdbbcb55463cda70da0625b51
| 3,166
|
py
|
Python
|
workers/data_refinery_workers/processors/tximport.py
|
cgreene/refinebio
|
fe75e42f2963d60c4307806cba11520754547190
|
[
"BSD-3-Clause"
] | null | null | null |
workers/data_refinery_workers/processors/tximport.py
|
cgreene/refinebio
|
fe75e42f2963d60c4307806cba11520754547190
|
[
"BSD-3-Clause"
] | null | null | null |
workers/data_refinery_workers/processors/tximport.py
|
cgreene/refinebio
|
fe75e42f2963d60c4307806cba11520754547190
|
[
"BSD-3-Clause"
] | null | null | null |
import boto3
import glob
import io
import json
import multiprocessing
import os
import re
import shutil
import subprocess
import tarfile
from botocore.client import Config
from django.conf import settings
from django.db import transaction
from django.utils import timezone
from typing import Dict, List
import numpy as np
import pandas as pd
from data_refinery_common.job_lookup import Downloaders, PipelineEnum
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
ComputationalResult,
ComputationalResultAnnotation,
ComputedFile,
Experiment,
ExperimentSampleAssociation,
OrganismIndex,
Pipeline,
Processor,
Sample,
SampleComputedFileAssociation,
SampleResultAssociation,
)
from data_refinery_common.utils import get_env_variable
from data_refinery_workers.processors import utils, salmon
# We have to set the signature_version to v4 since us-east-1 buckets require
# v4 authentication.
S3 = boto3.client('s3', config=Config(signature_version='s3v4'))
logger = get_and_configure_logger(__name__)
JOB_DIR_PREFIX = "processor_job_"
LOCAL_ROOT_DIR = get_env_variable("LOCAL_ROOT_DIR", "/home/user/data_store")
S3_BUCKET_NAME = get_env_variable("S3_BUCKET_NAME", "data-refinery")
def _set_job_prefix(job_context: Dict) -> Dict:
""" Sets the `job_dir_prefix` value in the job context object."""
job_context["job_dir_prefix"] = JOB_DIR_PREFIX + str(job_context["job_id"])
return job_context
def _prepare_files(job_context: Dict) -> Dict:
"""Moves the file(s) from the raw directory to the temp directory.
"""
logger.debug("Preparing files..")
# Create a directory specific to this processor job.
# (A single sample could belong to multiple experiments, meaning
# that it could be run more than once, potentially even at the
# same time.)
job_context["work_dir"] = os.path.join(LOCAL_ROOT_DIR,
job_context["job_dir_prefix"]) + "/"
os.makedirs(job_context["work_dir"], exist_ok=True)
# Technically unsafe, but if either of these objects don't exist we need to fail anyway.
sample = job_context["job"].original_files.first().samples.first()
job_context['sample'] = sample
job_context['samples'] = []
job_context['organism'] = sample.organism
job_context["success"] = True
job_context["is_tximport_only"] = True
job_context["computed_files"] = []
job_context["smashable_files"] = []
return job_context
def tximport(job_id: int) -> None:
"""Main processor function for the Tximport Processor.
Runs tximport command line tool on an experiment.
"""
pipeline = Pipeline(name=PipelineEnum.TXIMPORT.value)
final_context = utils.run_pipeline({"job_id": job_id, "pipeline": pipeline},
[utils.start_job,
_set_job_prefix,
_prepare_files,
salmon.get_tximport_inputs,
salmon._find_or_download_index,
salmon.tximport,
utils.end_job])
return final_context
| 33.680851
| 92
| 0.708781
|
4a1a5bf6ba168a098f1d76f96235cdab0a4bb9c7
| 1,722
|
py
|
Python
|
anvil/rig_templates/insect.py
|
AndresMWeber/Anvil
|
9cd202183ac998983c2bf6e55cc46bbc0ca1a78e
|
[
"Apache-2.0"
] | 3
|
2019-11-22T04:38:06.000Z
|
2022-01-19T08:27:18.000Z
|
anvil/rig_templates/insect.py
|
AndresMWeber/Anvil
|
9cd202183ac998983c2bf6e55cc46bbc0ca1a78e
|
[
"Apache-2.0"
] | 28
|
2018-02-01T20:39:42.000Z
|
2018-04-26T17:25:23.000Z
|
anvil/rig_templates/insect.py
|
AndresMWeber/Anvil
|
9cd202183ac998983c2bf6e55cc46bbc0ca1a78e
|
[
"Apache-2.0"
] | 1
|
2018-03-11T06:47:26.000Z
|
2018-03-11T06:47:26.000Z
|
import anvil.node_types as nt
import anvil.config as cfg
import anvil.sub_rig_templates as sub_rig_templates
class Diptera(nt.Rig):
SUB_RIG_BUIlD_TABLE = {
cfg.LEFT + '_' + cfg.ARM: [sub_rig_templates.BipedArm, {cfg.NAME: cfg.ARM, cfg.SIDE: cfg.LEFT}],
cfg.RIGHT + '_' + cfg.ARM: [sub_rig_templates.BipedArm, {cfg.NAME: cfg.ARM, cfg.SIDE: cfg.RIGHT}],
cfg.LEFT + '_' + cfg.HAND: [sub_rig_templates.Hand, {cfg.NAME: cfg.HAND, cfg.SIDE: cfg.LEFT}],
cfg.RIGHT + '_' + cfg.HAND: [sub_rig_templates.Hand, {cfg.NAME: cfg.HAND, cfg.SIDE: cfg.RIGHT}],
cfg.LEFT + '_' + cfg.LEG: [sub_rig_templates.BipedLeg, {cfg.NAME: cfg.LEG, cfg.SIDE: cfg.LEFT}],
cfg.RIGHT + '_' + cfg.LEG: [sub_rig_templates.BipedLeg, {cfg.NAME: cfg.LEG, cfg.SIDE: cfg.RIGHT}],
cfg.LEFT + '_' + cfg.FOOT: [sub_rig_templates.BipedFoot, {cfg.NAME: cfg.FOOT, cfg.SIDE: cfg.LEFT}],
cfg.RIGHT + '_' + cfg.FOOT: [sub_rig_templates.BipedFoot, {cfg.NAME: cfg.FOOT, cfg.SIDE: cfg.RIGHT}],
cfg.SPINE: [sub_rig_templates.Spine, {cfg.NAME: cfg.SPINE}],
cfg.NECK: [sub_rig_templates.Neck, {cfg.NAME: cfg.NECK}],
cfg.HEAD: [sub_rig_templates.Head, {cfg.NAME: cfg.HEAD}],
}
REFLECTABLE_SUB_RIGS = [cfg.ARM, cfg.HAND, cfg.LEG, cfg.FOOT, cfg.DIGITS]
SUB_RIG_BUILD_ORDER = [cfg.SPINE, cfg.NECK, cfg.HEAD, cfg.ARM, cfg.LEG, cfg.HAND, cfg.FOOT, cfg.DIGITS]
ORDERED_SUB_RIG_KEYS = [[key for key in SUB_RIG_BUIlD_TABLE if sub_rig_name in key] for sub_rig_name in
SUB_RIG_BUILD_ORDER]
def setup_sub_rig_connections(self):
pass
def rename(self, input_dicts, **kwargs):
super(Diptera, self).rename(input_dicts, **kwargs)
| 57.4
| 109
| 0.660859
|
4a1a5de8e9c10795660b18dbbccdc4c417d05993
| 731
|
py
|
Python
|
zvt/domain/meta/block_meta.py
|
aaron8tang/zvt
|
568cf0d42577eb05b96e1a07ec512aed34245b2d
|
[
"MIT"
] | null | null | null |
zvt/domain/meta/block_meta.py
|
aaron8tang/zvt
|
568cf0d42577eb05b96e1a07ec512aed34245b2d
|
[
"MIT"
] | null | null | null |
zvt/domain/meta/block_meta.py
|
aaron8tang/zvt
|
568cf0d42577eb05b96e1a07ec512aed34245b2d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from sqlalchemy import Column, String
from sqlalchemy.orm import declarative_base
from zvt.contract import Portfolio, PortfolioStock
from zvt.contract.register import register_schema, register_entity
BlockMetaBase = declarative_base()
@register_entity(entity_type='block')
class Block(BlockMetaBase, Portfolio):
"""
板块
"""
__tablename__ = 'block'
# 板块类型,行业(industry),概念(concept)
category = Column(String(length=64))
class BlockStock(BlockMetaBase, PortfolioStock):
__tablename__ = 'block_stock'
register_schema(providers=['eastmoney', 'sina'], db_name='block_meta',
schema_base=BlockMetaBase)
# the __all__ is generated
__all__ = ['Block', 'BlockStock']
| 24.366667
| 70
| 0.730506
|
4a1a5eac51ec4e416742313543ea3def598f145a
| 17,468
|
py
|
Python
|
test/functional/test_framework/test_node.py
|
mdvenka/FTHcoin
|
335d026521402e8452f5e1dc570366bd18a0fdf7
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/test_node.py
|
mdvenka/FTHcoin
|
335d026521402e8452f5e1dc570366bd18a0fdf7
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/test_node.py
|
mdvenka/FTHcoin
|
335d026521402e8452f5e1dc570366bd18a0fdf7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for faithcoind node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.parse
from .authproxy import JSONRPCException
from .util import (
append_config,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
# For Python 3.4 compatibility
JSONDecodeError = getattr(json, "JSONDecodeError", ValueError)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a faithcoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, rpchost, timewait, bitcoind, bitcoin_cli, mocktime, coverage_dir, extra_conf=None, extra_args=None, use_cli=False):
self.index = i
self.datadir = datadir
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = bitcoind
self.coverage_dir = coverage_dir
if extra_conf != None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-mocktime=" + str(mocktime),
"-uacomment=testnode%d" % i
]
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
self.use_cli = use_cli
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
self.p2ps = []
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
PRIV_KEYS = [
# adress , privkey
('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
]
return PRIV_KEYS[self.index]
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(self.rpc, name)
def start(self, extra_args=None, *, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time bitcoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, **kwargs)
self.running = True
self.log.debug("faithcoind started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the faithcoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'faithcoind exited with status {} during initialization'.format(self.process.returncode)))
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
self.rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to faithcoind")
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return self.rpc / wallet_path
def stop_node(self, expected_stderr=''):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
self.stdout.close()
self.stderr.close()
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code (%d) when stopping" % return_code)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs):
debug_log = os.path.join(self.datadir, 'regtest', 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
try:
yield
finally:
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
self._raise_assertion_error('Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log))
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to faithcoind
expected_msg: regex that stderr should match when faithcoind fails
Will throw if faithcoind starts without an error.
Will throw if an expected_msg is provided and it does not match faithcoind's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('faithcoind failed to start: %s', e)
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
else:
if expected_msg is None:
assert_msg = "faithcoind should have exited with an error"
else:
assert_msg = "faithcoind should have exited with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def node_encrypt_wallet(self, passphrase):
""""Encrypts the wallet.
This causes faithcoind to shutdown, so this method takes
care of cleaning up resources."""
self.encryptwallet(passphrase)
self.wait_until_stopped()
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs)()
self.p2ps.append(p2p_conn)
if wait_for_verack:
p2p_conn.wait_for_verack()
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, self._node_msg("No p2p connection")
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
class TestNodeCLI():
"""Interface to faithcoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.bitcoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run faithcoin-cli command. Deserializes returned string as python object."""
pos_args = [str(arg).lower() if type(arg) is bool else str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same faithcoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running faithcoin-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except JSONDecodeError:
return cli_stdout.rstrip("\n")
| 42.193237
| 158
| 0.631383
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.