hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2eda01290cb12964d055438179684520182283e8 | 578 | py | Python | LeetCode/Mock Interviews/Google/Phone Interview 10/Print Immutable Linked List in Reverse.py | UtkarshPathrabe/Competitive-Coding | ba322fbb1b88682d56a9b80bdd92a853f1caa84e | [
"MIT"
] | 13 | 2021-09-02T07:30:02.000Z | 2022-03-22T19:32:03.000Z | LeetCode/Mock Interviews/Google/Phone Interview 10/Print Immutable Linked List in Reverse.py | UtkarshPathrabe/Competitive-Coding | ba322fbb1b88682d56a9b80bdd92a853f1caa84e | [
"MIT"
] | null | null | null | LeetCode/Mock Interviews/Google/Phone Interview 10/Print Immutable Linked List in Reverse.py | UtkarshPathrabe/Competitive-Coding | ba322fbb1b88682d56a9b80bdd92a853f1caa84e | [
"MIT"
] | 3 | 2021-08-24T16:06:22.000Z | 2021-09-17T15:39:53.000Z | # """
# This is the ImmutableListNode's API interface.
# You should not implement it, or speculate about its implementation.
# """
# class ImmutableListNode:
# def printValue(self) -> None: # print the value of this node.
# def getNext(self) -> 'ImmutableListNode': # return the next node.
class Solution:
def printLinkedListInReverse(self, head: 'ImmutableListNode') -> None:
if head is None:
return
nextNode = head.getNext()
if nextNode is not None:
self.printLinkedListInReverse(nextNode)
head.printValue() | 36.125 | 74 | 0.66436 |
1453c4c383a2dbfdd6dc5ab396f0d9987fa78a84 | 32,063 | py | Python | nilearn/tests/test_signal.py | RaphaelMeudec/nilearn | 186c47c34c036c092a8d735baf25541d510078ec | [
"BSD-2-Clause"
] | 827 | 2015-01-30T23:11:42.000Z | 2022-03-29T21:21:05.000Z | nilearn/tests/test_signal.py | RaphaelMeudec/nilearn | 186c47c34c036c092a8d735baf25541d510078ec | [
"BSD-2-Clause"
] | 2,845 | 2015-01-04T22:14:41.000Z | 2022-03-31T20:28:09.000Z | nilearn/tests/test_signal.py | RaphaelMeudec/nilearn | 186c47c34c036c092a8d735baf25541d510078ec | [
"BSD-2-Clause"
] | 484 | 2015-02-03T10:58:19.000Z | 2022-03-29T21:57:16.000Z | """
Test the signals module
"""
# Author: Gael Varoquaux, Alexandre Abraham
# License: simplified BSD
import os.path
import warnings
from distutils.version import LooseVersion
import numpy as np
import pytest
# Use nisignal here to avoid name collisions (using nilearn.signal is
# not possible)
from nilearn import signal as nisignal
from nilearn.signal import clean
from pandas import read_csv
import scipy.signal
def generate_signals(n_features=17, n_confounds=5, length=41,
same_variance=True, order="C"):
"""Generate test signals.
All returned signals have no trends at all (to machine precision).
Parameters
----------
n_features, n_confounds : int, optional
respectively number of features to generate, and number of confounds
to use for generating noise signals.
length : int, optional
number of samples for every signal.
same_variance : bool, optional
if True, every column of "signals" have a unit variance. Otherwise,
a random amplitude is applied.
order : "C" or "F"
gives the contiguousness of the output arrays.
Returns
-------
signals : numpy.ndarray, shape (length, n_features)
unperturbed signals.
noises : numpy.ndarray, shape (length, n_features)
confound-based noises. Each column is a signal obtained by linear
combination of all confounds signals (below). The coefficients in
the linear combination are also random.
confounds : numpy.ndarray, shape (length, n_confounds)
random signals used as confounds.
"""
rng = np.random.RandomState(42)
# Generate random confounds
confounds_shape = (length, n_confounds)
confounds = np.ndarray(confounds_shape, order=order)
confounds[...] = rng.standard_normal(size=confounds_shape)
confounds[...] = scipy.signal.detrend(confounds, axis=0)
# Compute noise based on confounds, with random factors
factors = rng.standard_normal(size=(n_confounds, n_features))
noises_shape = (length, n_features)
noises = np.ndarray(noises_shape, order=order)
noises[...] = np.dot(confounds, factors)
noises[...] = scipy.signal.detrend(noises, axis=0)
# Generate random signals with random amplitudes
signals_shape = noises_shape
signals = np.ndarray(signals_shape, order=order)
if same_variance:
signals[...] = rng.standard_normal(size=signals_shape)
else:
signals[...] = (
4.0 * abs(rng.standard_normal(size=signals_shape[1])) + 0.5
) * rng.standard_normal(size=signals_shape)
signals[...] = scipy.signal.detrend(signals, axis=0)
return signals, noises, confounds
def generate_trends(n_features=17, length=41):
"""Generate linearly-varying signals, with zero mean.
Parameters
----------
n_features, length : int
respectively number of signals and number of samples to generate.
Returns
-------
trends : numpy.ndarray, shape (length, n_features)
output signals, one per column.
"""
rng = np.random.RandomState(42)
trends = scipy.signal.detrend(np.linspace(0, 1.0, length), type="constant")
trends = np.repeat(np.atleast_2d(trends).T, n_features, axis=1)
factors = rng.standard_normal(size=n_features)
return trends * factors
def generate_signals_plus_trends(n_features=17, n_samples=41):
signals, _, _ = generate_signals(n_features=n_features,
length=n_samples)
trends = generate_trends(n_features=n_features,
length=n_samples)
return signals + trends
def test_butterworth():
rng = np.random.RandomState(42)
n_features = 20000
n_samples = 100
sampling = 100
low_pass = 30
high_pass = 10
# Compare output for different options.
# single timeseries
data = rng.standard_normal(size=n_samples)
data_original = data.copy()
'''
May be only on py3.5:
Bug in scipy 1.1.0 generates an unavoidable FutureWarning.
(More info: https://github.com/scipy/scipy/issues/9086)
The number of warnings generated is overwhelming TravisCI's log limit,
causing it to fail tests.
This hack prevents that and will be removed in future.
'''
buggy_scipy = (LooseVersion(scipy.__version__) < LooseVersion('1.2')
and LooseVersion(scipy.__version__) > LooseVersion('1.0')
)
if buggy_scipy:
warnings.simplefilter('ignore')
''' END HACK '''
out_single = nisignal.butterworth(data, sampling,
low_pass=low_pass, high_pass=high_pass,
copy=True)
np.testing.assert_almost_equal(data, data_original)
nisignal.butterworth(data, sampling,
low_pass=low_pass, high_pass=high_pass,
copy=False)
np.testing.assert_almost_equal(out_single, data)
np.testing.assert_(id(out_single) != id(data))
# multiple timeseries
data = rng.standard_normal(size=(n_samples, n_features))
data[:, 0] = data_original # set first timeseries to previous data
data_original = data.copy()
out1 = nisignal.butterworth(data, sampling,
low_pass=low_pass, high_pass=high_pass,
copy=True)
np.testing.assert_almost_equal(data, data_original)
np.testing.assert_(id(out1) != id(data_original))
# check that multiple- and single-timeseries filtering do the same thing.
np.testing.assert_almost_equal(out1[:, 0], out_single)
nisignal.butterworth(data, sampling,
low_pass=low_pass, high_pass=high_pass,
copy=False)
np.testing.assert_almost_equal(out1, data)
# Test nyquist frequency clipping, issue #482
out1 = nisignal.butterworth(data, sampling,
low_pass=50.,
copy=True)
out2 = nisignal.butterworth(data, sampling,
low_pass=80., # Greater than nyq frequency
copy=True)
np.testing.assert_almost_equal(out1, out2)
np.testing.assert_(id(out1) != id(out2))
def test_standardize():
rng = np.random.RandomState(42)
n_features = 10
n_samples = 17
# Create random signals with offsets
a = rng.random_sample((n_samples, n_features))
a += np.linspace(0, 2., n_features)
# transpose array to fit _standardize input.
# Without trend removal
b = nisignal._standardize(a, standardize='zscore')
stds = np.std(b)
np.testing.assert_almost_equal(stds, np.ones(n_features))
np.testing.assert_almost_equal(b.sum(axis=0), np.zeros(n_features))
# With trend removal
a = np.atleast_2d(np.linspace(0, 2., n_features)).T
b = nisignal._standardize(a, detrend=True, standardize=False)
np.testing.assert_almost_equal(b, np.zeros(b.shape))
length_1_signal = np.atleast_2d(np.linspace(0, 2., n_features))
np.testing.assert_array_equal(length_1_signal,
nisignal._standardize(length_1_signal,
standardize='zscore'))
def test_detrend():
"""Test custom detrend implementation."""
point_number = 703
features = 17
signals, _, _ = generate_signals(n_features=features,
length=point_number,
same_variance=True)
trends = generate_trends(n_features=features, length=point_number)
x = signals + trends + 1
original = x.copy()
# Mean removal only (out-of-place)
detrended = nisignal._detrend(x, inplace=False, type="constant")
assert (abs(detrended.mean(axis=0)).max()
< 15. * np.finfo(np.float64).eps)
# out-of-place detrending. Use scipy as a reference implementation
detrended = nisignal._detrend(x, inplace=False)
detrended_scipy = scipy.signal.detrend(x, axis=0)
# "x" must be left untouched
np.testing.assert_almost_equal(original, x, decimal=14)
assert abs(detrended.mean(axis=0)).max() < 15. * np.finfo(np.float64).eps
np.testing.assert_almost_equal(detrended_scipy, detrended, decimal=14)
# for this to work, there must be no trends at all in "signals"
np.testing.assert_almost_equal(detrended, signals, decimal=14)
# inplace detrending
nisignal._detrend(x, inplace=True)
assert abs(x.mean(axis=0)).max() < 15. * np.finfo(np.float64).eps
# for this to work, there must be no trends at all in "signals"
np.testing.assert_almost_equal(detrended_scipy, detrended, decimal=14)
np.testing.assert_almost_equal(x, signals, decimal=14)
length_1_signal = x[0]
length_1_signal = length_1_signal[np.newaxis, :]
np.testing.assert_array_equal(length_1_signal,
nisignal._detrend(length_1_signal))
# Mean removal on integers
detrended = nisignal._detrend(x.astype(np.int64), inplace=True,
type="constant")
assert (abs(detrended.mean(axis=0)).max() <
20. * np.finfo(np.float64).eps)
def test_mean_of_squares():
"""Test _mean_of_squares."""
n_samples = 11
n_features = 501 # Higher than 500 required
signals, _, _ = generate_signals(n_features=n_features,
length=n_samples,
same_variance=True)
# Reference computation
var1 = np.copy(signals)
var1 **= 2
var1 = var1.mean(axis=0)
var2 = nisignal._mean_of_squares(signals)
np.testing.assert_almost_equal(var1, var2)
def test_row_sum_of_squares():
"""Test _row_sum_of_squares."""
n_samples = 11
n_features = 501 # Higher than 500 required
signals, _, _ = generate_signals(n_features=n_features,
length=n_samples,
same_variance=True)
# Reference computation
var1 = signals ** 2
var1 = var1.sum(axis=0)
var2 = nisignal._row_sum_of_squares(signals)
np.testing.assert_almost_equal(var1, var2)
# This test is inspired from Scipy docstring of detrend function
def test_clean_detrending():
n_samples = 21
n_features = 501 # Must be higher than 500
signals, _, _ = generate_signals(n_features=n_features,
length=n_samples)
trends = generate_trends(n_features=n_features,
length=n_samples)
x = signals + trends
x_orig = x.copy()
# if NANs, data out should be False with ensure_finite=True
y = signals + trends
y[20, 150] = np.nan
y[5, 500] = np.nan
y[15, 14] = np.inf
y_orig = y.copy()
y_clean = nisignal.clean(y, ensure_finite=True)
assert np.any(np.isfinite(y_clean)), True
# clean should not modify inputs
# using assert_almost_equal instead of array_equal due to NaNs
np.testing.assert_almost_equal(y_orig, y, decimal=13)
# test boolean is not given to signal.clean
pytest.raises(TypeError, nisignal.clean, x, low_pass=False)
pytest.raises(TypeError, nisignal.clean, x, high_pass=False)
# This should remove trends
x_detrended = nisignal.clean(x, standardize=False, detrend=True,
low_pass=None, high_pass=None)
np.testing.assert_almost_equal(x_detrended, signals, decimal=13)
# clean should not modify inputs
assert np.array_equal(x_orig, x)
# This should do nothing
x_undetrended = nisignal.clean(x, standardize=False, detrend=False,
low_pass=None, high_pass=None)
assert not abs(x_undetrended - signals).max() < 0.06
# clean should not modify inputs
assert np.array_equal(x_orig, x)
def test_clean_t_r():
"""Different TRs must produce different results after butterworth filtering"""
rng = np.random.RandomState(42)
n_samples = 34
# n_features Must be higher than 500
n_features = 501
x_orig = generate_signals_plus_trends(n_features=n_features,
n_samples=n_samples)
random_tr_list1 = np.round(rng.uniform(size=3) * 10, decimals=2)
random_tr_list2 = np.round(rng.uniform(size=3) * 10, decimals=2)
for tr1, tr2 in zip(random_tr_list1, random_tr_list2):
low_pass_freq_list = tr1 * np.array([1.0 / 100, 1.0 / 110])
high_pass_freq_list = tr1 * np.array([1.0 / 210, 1.0 / 190])
for low_cutoff, high_cutoff in zip(low_pass_freq_list,
high_pass_freq_list):
det_one_tr = nisignal.clean(x_orig, t_r=tr1, low_pass=low_cutoff,
high_pass=high_cutoff)
det_diff_tr = nisignal.clean(x_orig, t_r=tr2, low_pass=low_cutoff,
high_pass=high_cutoff)
if not np.isclose(tr1, tr2, atol=0.3):
msg = ('results do not differ for different TRs: {} and {} '
'at cutoffs: low_pass={}, high_pass={} '
'n_samples={}, n_features={}'.format(
tr1, tr2, low_cutoff, high_cutoff,
n_samples, n_features))
np.testing.assert_(np.any(np.not_equal(det_one_tr,
det_diff_tr)),
msg)
del det_one_tr, det_diff_tr
def test_clean_frequencies():
'''Using butterworth method.'''
sx1 = np.sin(np.linspace(0, 100, 2000))
sx2 = np.sin(np.linspace(0, 100, 2000))
sx = np.vstack((sx1, sx2)).T
sx_orig = sx.copy()
assert clean(sx, standardize=False, high_pass=0.002, low_pass=None,
t_r=2.5).max() > 0.1
assert clean(sx, standardize=False, high_pass=0.2, low_pass=None,
t_r=2.5) .max() < 0.01
assert clean(sx, standardize=False, low_pass=0.01, t_r=2.5).max() > 0.9
pytest.raises(ValueError, clean, sx, low_pass=0.4, high_pass=0.5, t_r=2.5)
# clean should not modify inputs
sx_cleaned = clean(sx, standardize=False, detrend=False, low_pass=0.2, t_r=2.5)
assert np.array_equal(sx_orig, sx)
def test_clean_runs():
n_samples = 21
n_features = 501 # Must be higher than 500
signals, _, confounds = generate_signals(n_features=n_features,
length=n_samples)
trends = generate_trends(n_features=n_features,
length=n_samples)
x = signals + trends
x_orig = x.copy()
# Create run info
runs = np.ones(n_samples)
runs[0:n_samples // 2] = 0
x_detrended = nisignal.clean(x, confounds=confounds, standardize=False, detrend=True,
low_pass=None, high_pass=None,
runs=runs)
# clean should not modify inputs
assert np.array_equal(x_orig, x)
def test_clean_confounds():
signals, noises, confounds = generate_signals(n_features=41,
n_confounds=5, length=45)
# No signal: output must be zero.
eps = np.finfo(np.float64).eps
noises1 = noises.copy()
cleaned_signals = nisignal.clean(noises, confounds=confounds,
detrend=True, standardize=False)
assert abs(cleaned_signals).max() < 100. * eps
# clean should not modify inputs
assert np.array_equal(noises, noises1)
# With signal: output must be orthogonal to confounds
cleaned_signals = nisignal.clean(signals + noises, confounds=confounds,
detrend=False, standardize=True)
assert abs(np.dot(confounds.T, cleaned_signals)).max() < 1000. * eps
# Same output when a constant confound is added
confounds1 = np.hstack((np.ones((45, 1)), confounds))
cleaned_signals1 = nisignal.clean(signals + noises, confounds=confounds1,
detrend=False, standardize=True)
np.testing.assert_almost_equal(cleaned_signals1, cleaned_signals)
# Test detrending. No trend should exist in the output.
# Use confounds with a trend.
temp = confounds.T
temp += np.arange(confounds.shape[0])
cleaned_signals = nisignal.clean(signals + noises, confounds=confounds,
detrend=False, standardize=False)
coeffs = np.polyfit(np.arange(cleaned_signals.shape[0]),
cleaned_signals, 1)
assert (abs(coeffs) > 1e-3).any() # trends remain
cleaned_signals = nisignal.clean(signals + noises, confounds=confounds,
detrend=True, standardize=False)
coeffs = np.polyfit(np.arange(cleaned_signals.shape[0]),
cleaned_signals, 1)
assert (abs(coeffs) < 1000. * eps).all() # trend removed
# Test no-op
input_signals = 10 * signals
cleaned_signals = nisignal.clean(input_signals, detrend=False,
standardize=False)
np.testing.assert_almost_equal(cleaned_signals, input_signals)
cleaned_signals = nisignal.clean(input_signals, detrend=False,
standardize=True)
np.testing.assert_almost_equal(cleaned_signals.var(axis=0),
np.ones(cleaned_signals.shape[1]))
# Test with confounds read from a file. Smoke test only (result has
# no meaning).
current_dir = os.path.split(__file__)[0]
signals, _, confounds = generate_signals(n_features=41,
n_confounds=3, length=20)
filename1 = os.path.join(current_dir, "data", "spm_confounds.txt")
filename2 = os.path.join(current_dir, "data",
"confounds_with_header.csv")
nisignal.clean(signals, detrend=False, standardize=False,
confounds=filename1)
nisignal.clean(signals, detrend=False, standardize=False,
confounds=filename2)
nisignal.clean(signals, detrend=False, standardize=False,
confounds=confounds[:, 1])
# test with confounds as a pandas DataFrame
confounds_df = read_csv(filename2, sep='\t')
nisignal.clean(signals, detrend=False, standardize=False,
confounds=confounds_df.values)
nisignal.clean(signals, detrend=False, standardize=False,
confounds=confounds_df)
# test array-like signals
list_signal = signals.tolist()
nisignal.clean(list_signal)
# Use a list containing two filenames, a 2D array and a 1D array
nisignal.clean(signals, detrend=False, standardize=False,
confounds=[filename1, confounds[:, 0:2],
filename2, confounds[:, 2]])
# Test error handling
pytest.raises(TypeError, nisignal.clean, signals, confounds=1)
pytest.raises(ValueError, nisignal.clean, signals, confounds=np.zeros(2))
pytest.raises(ValueError, nisignal.clean, signals,
confounds=np.zeros((2, 2)))
pytest.raises(ValueError, nisignal.clean, signals,
confounds=np.zeros((2, 3, 4)))
pytest.raises(ValueError, nisignal.clean, signals[:-1, :],
confounds=filename1)
pytest.raises(TypeError, nisignal.clean, signals,
confounds=[None])
error_msg = pytest.raises(ValueError, nisignal.clean, signals, filter='cosine',
t_r=None, high_pass=0.008)
assert "t_r='None'" in str(error_msg.value)
pytest.raises(ValueError, nisignal.clean, signals, t_r=None,
low_pass=.01) # using butterworth filter here
pytest.raises(ValueError, nisignal.clean, signals, filter='not_implemented')
pytest.raises(ValueError, nisignal.clean, signals, ensure_finite=None)
# Check warning message when no confound methods were specified,
# but cutoff frequency provided.
pytest.warns(UserWarning, nisignal.clean, signals,
t_r=2.5, filter=False, low_pass=.01, match='not perform filtering')
# Test without standardizing that constant parts of confounds are
# accounted for
# passing standardize_confounds=False, detrend=False should raise warning
warning_message = r"must perform detrend and/or standarize confounds"
with pytest.warns(UserWarning, match=warning_message):
np.testing.assert_almost_equal(
nisignal.clean(np.ones((20, 2)),
standardize=False,
confounds=np.ones(20),
standardize_confounds=False,
detrend=False,
).mean(),
np.zeros((20, 2)))
# Test to check that confounders effects are effectively removed from
# the signals when having a detrending and filtering operation together.
# This did not happen originally due to a different order in which
# these operations were being applied to the data and confounders
# (it thus solves issue # 2730).
signals_clean = nisignal.clean(signals,
detrend=True,
high_pass=0.01,
standardize_confounds=True,
standardize=True,
confounds=confounds)
confounds_clean = nisignal.clean(confounds,
detrend=True,
high_pass=0.01,
standardize=True)
assert abs(np.dot(confounds_clean.T, signals_clean)).max() < 1000. * eps
def test_clean_frequencies_using_power_spectrum_density():
# Create signal
sx = np.array([np.sin(np.linspace(0, 100, 100) * 1.5),
np.sin(np.linspace(0, 100, 100) * 3.),
np.sin(np.linspace(0, 100, 100) / 8.),
]).T
# Create confound
_, _, confounds = generate_signals(
n_features=10, n_confounds=10, length=100)
# Apply low- and high-pass filter (separately)
t_r = 1.0
low_pass = 0.1
high_pass = 0.4
res_low = clean(sx, detrend=False, standardize=False, low_pass=low_pass,
high_pass=None, t_r=t_r)
res_high = clean(sx, detrend=False, standardize=False, low_pass=None,
high_pass=high_pass, t_r=t_r)
# Compute power spectrum density for both test
f, Pxx_den_low = scipy.signal.welch(np.mean(res_low.T, axis=0), fs=t_r)
f, Pxx_den_high = scipy.signal.welch(np.mean(res_high.T, axis=0), fs=t_r)
# Verify that the filtered frequencies are removed
assert np.sum(Pxx_den_low[f >= low_pass * 2.]) <= 1e-4
assert np.sum(Pxx_den_high[f <= high_pass / 2.]) <= 1e-4
def test_clean_finite_no_inplace_mod():
"""
Test for verifying that the passed in signal array is not modified.
For PR #2125 . This test is failing on main, passing in this PR.
"""
n_samples = 2
# n_features Must be higher than 500
n_features = 501
x_orig, _, _ = generate_signals(n_features=n_features,
length=n_samples)
x_orig_inital_copy = x_orig.copy()
x_orig_with_nans = x_orig.copy()
x_orig_with_nans[0, 0] = np.nan
x_orig_with_nans_initial_copy = x_orig_with_nans.copy()
cleaned_x_orig = clean(x_orig)
assert np.array_equal(x_orig, x_orig_inital_copy)
cleaned_x_orig_with_nans = clean(x_orig_with_nans, ensure_finite=True)
assert np.isnan(x_orig_with_nans_initial_copy[0, 0])
assert np.isnan(x_orig_with_nans[0, 0])
def test_high_variance_confounds():
# C and F order might take different paths in the function. Check that the
# result is identical.
n_features = 1001
length = 20
n_confounds = 5
seriesC, _, _ = generate_signals(n_features=n_features,
length=length, order="C")
seriesF, _, _ = generate_signals(n_features=n_features,
length=length, order="F")
np.testing.assert_almost_equal(seriesC, seriesF, decimal=13)
outC = nisignal.high_variance_confounds(seriesC, n_confounds=n_confounds,
detrend=False)
outF = nisignal.high_variance_confounds(seriesF, n_confounds=n_confounds,
detrend=False)
np.testing.assert_almost_equal(outC, outF, decimal=13)
# Result must not be influenced by global scaling
seriesG = 2 * seriesC
outG = nisignal.high_variance_confounds(seriesG, n_confounds=n_confounds,
detrend=False)
np.testing.assert_almost_equal(outC, outG, decimal=13)
assert(outG.shape == (length, n_confounds))
# Changing percentile changes the result
seriesG = seriesC
outG = nisignal.high_variance_confounds(seriesG, percentile=1.,
n_confounds=n_confounds,
detrend=False)
pytest.raises(AssertionError, np.testing.assert_almost_equal,
outC, outG, decimal=13)
assert(outG.shape == (length, n_confounds))
# Check shape of output
out = nisignal.high_variance_confounds(seriesG, n_confounds=7,
detrend=False)
assert(out.shape == (length, 7))
# Adding a trend and detrending should give same results as with no trend.
seriesG = seriesC
trends = generate_trends(n_features=n_features, length=length)
seriesGt = seriesG + trends
outG = nisignal.high_variance_confounds(seriesG, detrend=False,
n_confounds=n_confounds)
outGt = nisignal.high_variance_confounds(seriesGt, detrend=True,
n_confounds=n_confounds)
# Since sign flips could occur, we look at the absolute values of the
# covariance, rather than the absolute difference, and compare this to
# the identity matrix
np.testing.assert_almost_equal(np.abs(outG.T.dot(outG)),
np.identity(outG.shape[1]),
decimal=13)
# Control for sign flips by taking the min of both possibilities
np.testing.assert_almost_equal(
np.min(np.abs(np.dstack([outG - outGt, outG + outGt])), axis=2),
np.zeros(outG.shape))
# Control robustness to NaNs
seriesG[:, 0] = 0
out1 = nisignal.high_variance_confounds(seriesG, n_confounds=n_confounds)
seriesG[:, 0] = np.nan
out2 = nisignal.high_variance_confounds(seriesG, n_confounds=n_confounds)
np.testing.assert_almost_equal(out1, out2, decimal=13)
def test_clean_psc():
rng = np.random.RandomState(0)
n_samples = 500
n_features = 5
signals, _, _ = generate_signals(n_features=n_features,
length=n_samples)
# positive mean signal
means = rng.randn(1, n_features)
signals_pos_mean = signals + means
# a mix of pos and neg mean signal
signals_mixed_mean = signals + np.append(means[:, :-3], -1 * means[:, -3:])
# both types should pass
for s in [signals_pos_mean, signals_mixed_mean]:
cleaned_signals = clean(s, standardize='psc')
np.testing.assert_almost_equal(cleaned_signals.mean(0), 0)
cleaned_signals.std(axis=0)
np.testing.assert_almost_equal(cleaned_signals.mean(0), 0)
tmp = (s - s.mean(0)) / np.abs(s.mean(0))
tmp *= 100
np.testing.assert_almost_equal(cleaned_signals, tmp)
# leave out the last 3 columns with a mean of zero to test user warning
signals_w_zero = signals + np.append(means[:, :-3], np.zeros((1, 3)))
cleaned_w_zero = clean(signals_w_zero, standardize='psc')
with pytest.warns(UserWarning) as records:
cleaned_w_zero = clean(signals_w_zero, standardize='psc')
psc_warning = sum('psc standardization strategy' in str(r.message)
for r in records)
assert psc_warning == 1
np.testing.assert_equal(cleaned_w_zero[:, -3:].mean(0), 0)
def test_clean_zscore():
rng = np.random.RandomState(42)
n_samples = 500
n_features = 5
signals, _, _ = generate_signals(n_features=n_features,
length=n_samples)
signals += rng.standard_normal(size=(1, n_features))
cleaned_signals = clean(signals, standardize='zscore')
np.testing.assert_almost_equal(cleaned_signals.mean(0), 0)
np.testing.assert_almost_equal(cleaned_signals.std(0), 1)
def test_cosine_filter():
'''Testing cosine filter interface and output.'''
from nilearn.glm.first_level.design_matrix import _cosine_drift
t_r, high_pass, low_pass, filter = 2.5, 0.002, None, 'cosine'
signals, _, confounds = generate_signals(n_features=41,
n_confounds=5, length=45)
# Not passing confounds it will return drift terms only
frame_times = np.arange(signals.shape[0]) * t_r
cosine_drift = _cosine_drift(high_pass, frame_times)
signals_unchanged, cosine_confounds = nisignal._filter_signal(
signals, confounds, filter, low_pass, high_pass, t_r)
np.testing.assert_array_equal(signals_unchanged, signals)
np.testing.assert_almost_equal(cosine_confounds,
np.hstack((confounds, cosine_drift)))
# Not passing confounds it will return drift terms only
signals_unchanged, drift_terms_only = nisignal._filter_signal(
signals, None, filter, low_pass, high_pass, t_r)
np.testing.assert_array_equal(signals_unchanged, signals)
np.testing.assert_almost_equal(drift_terms_only, cosine_drift)
def test_sample_mask():
"""Test sample_mask related feature."""
signals, _, confounds = generate_signals(n_features=11,
n_confounds=5, length=40)
sample_mask = np.arange(signals.shape[0])
scrub_index = [2, 3, 6, 7, 8, 30, 31, 32]
sample_mask = np.delete(sample_mask, scrub_index)
scrub_clean = clean(signals, confounds=confounds, sample_mask=sample_mask)
assert scrub_clean.shape[0] == sample_mask.shape[0]
# list of sample_mask for each run
runs = np.ones(signals.shape[0])
runs[0:signals.shape[0] // 2] = 0
sample_mask_sep = [np.arange(20), np.arange(20)]
scrub_index = [[6, 7, 8], [10, 11, 12]]
sample_mask_sep = [np.delete(sm, si)
for sm, si in zip(sample_mask_sep, scrub_index)]
scrub_sep_mask = clean(signals, confounds=confounds,
sample_mask=sample_mask_sep, runs=runs)
assert scrub_sep_mask.shape[0] == signals.shape[0] - 6
# 1D sample mask with runs labels
with pytest.raises(ValueError,
match=r'Number of sample_mask \(\d\) not matching'):
clean(signals, sample_mask=sample_mask, runs=runs)
# invalid input for sample_mask
with pytest.raises(TypeError, match='unhandled type'):
clean(signals, sample_mask='not_supported')
# sample_mask too long
with pytest.raises(IndexError,
match='more timepoints than the current run'):
clean(signals, sample_mask=np.hstack((sample_mask, sample_mask)))
# list of sample_mask with one that's too long
invalid_sample_mask_sep = [np.arange(10), np.arange(30)]
with pytest.raises(IndexError,
match='more timepoints than the current run'):
clean(signals, sample_mask=invalid_sample_mask_sep, runs=runs)
# list of sample_mask with invalid indexing in one
sample_mask_sep[-1][-1] = 100
with pytest.raises(IndexError, match='invalid index'):
clean(signals, sample_mask=sample_mask_sep, runs=runs)
# invalid index in 1D sample_mask
sample_mask[-1] = 999
with pytest.raises(IndexError, match=r'invalid index \[\d*\]'):
clean(signals, sample_mask=sample_mask)
| 40.637516 | 89 | 0.630665 |
14e3b218ad7339a4e58b3b76e16bed37c946feee | 6,852 | py | Python | dub.py | iTecAI/peacecraft-util-bot | da4bc750fd3553a7720629c47fbb9c48eea9f315 | [
"MIT"
] | null | null | null | dub.py | iTecAI/peacecraft-util-bot | da4bc750fd3553a7720629c47fbb9c48eea9f315 | [
"MIT"
] | null | null | null | dub.py | iTecAI/peacecraft-util-bot | da4bc750fd3553a7720629c47fbb9c48eea9f315 | [
"MIT"
] | null | null | null | import discord, time
from ipify import get_ip
from urllib.request import urlopen
client = discord.Client()
with open('cfg.sys', 'r') as tok:
TOK = tok.read().splitlines()[0]
toggle = True
disabled_users = []
last = ''
@client.event
async def on_ready():
print('PCU Active')
@client.event
async def on_message(message):
global toggle, disabled_users, last
if 'All we do is hack bots' in message.content:
print('Located HAX')
await client.delete_message(message)
return
if message.author in disabled_users:
await client.delete_message(message)
if last != message.author.name + ' tried to send a message but was disabled by an admin':
await client.send_message(message.channel, message.author.name + ' tried to send a message but was disabled by an admin')
last = message.author.name + ' tried to send a message but was disabled by an admin'
return
if message.channel.name == 'utilbots' or message.server.name == 'Bots!':
if message.content.startswith('pcu ') or message.content.startswith('PCU '):
cmd_help = {'test': 'Tests utilbot online status', 'help': 'Displays this information', 'server':'Displays relevant info about the MC server', 'ping':'Mentions everyone repeatedly. Specify this amount with a number after the ping command'}
cmd = message.content.split(' ')[1].lower()
_args = message.content.split(' ')[2:]
args = []
c = 0
while c < len(_args):
if _args[c].startswith('['):
if _args[c].endswith(']'):
args.append(_args[c].strip('[]'))
c += 1
else:
print('COMPOUND')
arg = []
while not _args[c].endswith(']') and c < len(_args):
arg.append(_args[c].strip('['))
c += 1
arg.append(_args[c].strip(']'))
c += 1
args.append(' '.join(arg))
else:
args.append(_args[c])
c += 1
print('RCV ' + cmd + ': ' + ', '.join(args))
if cmd == 'test':
await client.send_message(message.channel, 'System Online')
await client.send_message(message.channel, 'Active: ' + str(toggle))
await client.send_message(message.channel, 'Time: ' + time.ctime())
mem_online = []
for mem in message.server.members:
if mem.status != discord.Status.offline:
mem_online.append(str(mem.name) + ' (' + str(mem.nick) + ')')
await client.send_message(message.channel, 'Online: ' + ', '.join(mem_online))
raw = str(urlopen('https://api.ipgeolocation.io/ipgeo?apiKey=839e7eb39f7e4a958d348fdb9f87c47d&ip=' + str(get_ip())).read())
raw = raw[2:len(raw)].strip(" '")
loc = eval(raw, {'true':True, 'false': False})
await client.send_message(message.channel, 'LOC: ' + ', '.join([loc['latitude'], loc['longitude']]))
elif cmd == 'server' and toggle:
if len(args) >= 1:
if args[0] == 'ip':
await client.send_message(message.channel, 'Server IP: 207.38.165.56')
elif args[0] == 'owner':
await client.send_message(message.channel, 'Server Owner: Matteo | Discord: iTecX | Minecraft: MisitaLife')
else:
await client.send_message(message.channel, 'Error: Invalid argument ' + args[0])
else:
await client.send_message(message.channel, 'Arguments/subcommands: \n-ip: Displays server IP \n-owner: gives info about server owner')
elif cmd == 'help' and toggle:
for i in cmd_help.keys():
await client.send_message(message.channel, i + ': ' + cmd_help[i])
elif cmd == 'ping' and toggle:
if len(args) == 0:
args[0] = 1
try:
int(args[0])
except:
args[0] = 1
if message.author.name != 'iTecX' and int(args[0]) > 5:
await client.send_message(message.channel, 'Error: You cannot ping more than 5 times.')
return
if len(args) == 1:
for i in range(abs(int(args[0]))):
if not toggle:
return
await client.send_message(message.channel, '@everyone')
else:
to_men = None
for mem in message.server.members:
if mem.name == args[1] or mem.nick == args[1]:
to_men = mem.mention
if to_men == None:
await client.send_message(message.channel, 'Error: ' + args[1] + ' is not a member of this server')
else:
for i in range(abs(int(args[0]))):
if not toggle:
return
await client.send_message(message.channel, to_men)
elif cmd == 'toggle' and message.author.name == 'iTecX':
if toggle:
toggle = False
else:
toggle = True
await client.send_message(message.channel, '$PCU Active: ' + str(toggle))
elif cmd == 'utoggle' and message.author.name == 'iTecX':
try:
for i in message.mentions:
if i in disabled_users:
del disabled_users[disabled_users.index(i)]
await client.send_message(message.channel, 'Enabled ' + i.name)
else:
disabled_users.append(i)
await client.send_message(message.channel, 'Disabled ' + i.name)
except:
pass
elif cmd == 'op' and message.author.name == 'iTecX':
client.add_roles(message.author, message.server.roles)
await client.send_message(message.channel, 'Complete')
else:
if toggle:
await client.send_message(message.channel, 'Error: invalid command.')
client.run(TOK)
| 50.382353 | 253 | 0.483071 |
f660267620e603efb60d567eab3d216add846a29 | 7,349 | py | Python | tests/core/middleware/test_latest_block_based_cache_middleware.py | carver/web3.py | 9a9640505ea310334828a190f2220c4cb7925c3b | [
"MIT"
] | null | null | null | tests/core/middleware/test_latest_block_based_cache_middleware.py | carver/web3.py | 9a9640505ea310334828a190f2220c4cb7925c3b | [
"MIT"
] | null | null | null | tests/core/middleware/test_latest_block_based_cache_middleware.py | carver/web3.py | 9a9640505ea310334828a190f2220c4cb7925c3b | [
"MIT"
] | null | null | null | import codecs
import itertools
import time
import uuid
import pytest
from eth_utils import (
is_integer,
to_tuple,
)
from web3 import Web3
from web3.providers.base import BaseProvider
from web3.middleware import ( # noqa: F401
construct_result_generator_middleware,
construct_error_generator_middleware,
construct_latest_block_based_cache_middleware,
)
from web3.utils.caching import (
generate_cache_key,
)
@pytest.fixture
def w3_base():
return Web3(providers=[BaseProvider()], middlewares=[])
def _mk_block(n, timestamp):
return {
'hash': codecs.decode(str(n).zfill(32), 'hex'),
'number': n,
'timestamp': timestamp,
}
@to_tuple
def generate_block_history(num_mined_blocks=5, block_time=1):
genesis = _mk_block(0, time.time())
yield genesis
for block_number in range(1, num_mined_blocks + 1):
yield _mk_block(
block_number,
genesis['timestamp'] + 2 * block_number,
)
@pytest.fixture
def construct_block_data_middleware():
def _construct_block_data_middleware(num_blocks):
blocks = generate_block_history(num_blocks)
_block_info = {
'blocks': blocks,
'head_block_number': blocks[0]['number']
}
def _evm_mine(method, params, block_info=_block_info):
num_blocks = params[0]
head_block_number = block_info['head_block_number']
if head_block_number + num_blocks >= len(block_info['blocks']):
raise ValueError("no more blocks to mine")
block_info['head_block_number'] += num_blocks
def _get_block_by_number(method, params, block_info=_block_info):
block_id = params[0]
blocks = block_info['blocks']
head_block_number = block_info['head_block_number']
if block_id == 'latest':
return blocks[head_block_number]
elif block_id == 'pending':
if head_block_number + 1 >= len(blocks):
raise ValueError("no pending block")
return blocks[head_block_number + 1]
elif block_id == 'earliest':
return blocks[0]
elif is_integer(block_id):
if block_id <= head_block_number:
return blocks[block_id]
else:
return None
else:
raise TypeError('Invalid type for block_id')
def _get_block_by_hash(method, params, block_info=_block_info):
block_hash = params[0]
blocks = block_info['blocks']
head_block_number = block_info['head_block_number']
blocks_by_hash = {
block['hash']: block
for block
in blocks
}
try:
block = blocks_by_hash[block_hash]
if block['number'] <= head_block_number:
return block
else:
return None
except KeyError:
return None
return construct_result_generator_middleware({
'eth_getBlockByNumber': _get_block_by_number,
'eth_getBlockByHash': _get_block_by_hash,
'evm_mine': _evm_mine,
})
return _construct_block_data_middleware
@pytest.fixture
def block_data_middleware(construct_block_data_middleware):
return construct_block_data_middleware(5)
@pytest.fixture
def result_generator_middleware():
return construct_result_generator_middleware({
'fake_endpoint': lambda *_: str(uuid.uuid4()),
'not_whitelisted': lambda *_: str(uuid.uuid4()),
})
@pytest.fixture
def latest_block_based_cache_middleware():
return construct_latest_block_based_cache_middleware(
cache_class=dict,
average_block_time_sample_size=1,
default_average_block_time=0.1,
rpc_whitelist={'fake_endpoint'},
)
@pytest.fixture
def w3(w3_base,
result_generator_middleware,
block_data_middleware,
latest_block_based_cache_middleware):
w3_base.middleware_stack.add(block_data_middleware)
w3_base.middleware_stack.add(result_generator_middleware)
w3_base.middleware_stack.add(latest_block_based_cache_middleware)
return w3_base
def test_latest_block_based_cache_middleware_pulls_from_cache(
w3_base,
block_data_middleware,
result_generator_middleware):
w3 = w3_base
w3.middleware_stack.add(block_data_middleware)
w3.middleware_stack.add(result_generator_middleware)
current_block_hash = w3.eth.getBlock('latest')['hash']
def cache_class():
return {
generate_cache_key(
(current_block_hash, 'fake_endpoint', [1])
): {'result': 'value-a'},
}
w3.middleware_stack.add(construct_latest_block_based_cache_middleware(
cache_class=cache_class,
rpc_whitelist={'fake_endpoint'},
))
assert w3.manager.request_blocking('fake_endpoint', [1]) == 'value-a'
def test_latest_block_based_cache_middleware_populates_cache(w3):
result = w3.manager.request_blocking('fake_endpoint', [])
assert w3.manager.request_blocking('fake_endpoint', []) == result
assert w3.manager.request_blocking('fake_endpoint', [1]) != result
def test_latest_block_based_cache_middleware_busts_cache(w3, mocker):
result = w3.manager.request_blocking('fake_endpoint', [])
assert w3.manager.request_blocking('fake_endpoint', []) == result
w3.testing.mine()
# should still be cached for at least 1 second. This also verifies that
# the middleware caches the latest block based on the block time.
assert w3.manager.request_blocking('fake_endpoint', []) == result
mocker.patch('time.time', return_value=time.time() + 5)
assert w3.manager.request_blocking('fake_endpoint', []) != result
def test_latest_block_cache_middleware_does_not_cache_bad_responses(
w3_base,
block_data_middleware,
latest_block_based_cache_middleware):
counter = itertools.count()
w3 = w3_base
def result_cb(method, params):
next(counter)
return None
w3 = w3_base
w3.middleware_stack.add(block_data_middleware)
w3.middleware_stack.add(construct_result_generator_middleware({
'fake_endpoint': result_cb,
}))
w3.middleware_stack.add(latest_block_based_cache_middleware)
w3.manager.request_blocking('fake_endpoint', [])
w3.manager.request_blocking('fake_endpoint', [])
assert next(counter) == 2
def test_latest_block_cache_middleware_does_not_cache_error_response(
w3_base,
block_data_middleware,
latest_block_based_cache_middleware):
counter = itertools.count()
w3 = w3_base
def error_cb(method, params):
next(counter)
return "the error message"
w3.middleware_stack.add(block_data_middleware)
w3.middleware_stack.add(construct_error_generator_middleware({
'fake_endpoint': error_cb,
}))
w3.middleware_stack.add(latest_block_based_cache_middleware)
with pytest.raises(ValueError):
w3.manager.request_blocking('fake_endpoint', [])
with pytest.raises(ValueError):
w3.manager.request_blocking('fake_endpoint', [])
assert next(counter) == 2
| 30.367769 | 76 | 0.666213 |
1d32369906f1991c35b3ac37d831d3c2cfa02a4f | 3,038 | py | Python | tests/integration/routes/test_errors.py | petechd/eq-questionnaire-runner | 1c5b182a7f8bc878cfdd767ae080410fa679abd6 | [
"MIT"
] | null | null | null | tests/integration/routes/test_errors.py | petechd/eq-questionnaire-runner | 1c5b182a7f8bc878cfdd767ae080410fa679abd6 | [
"MIT"
] | null | null | null | tests/integration/routes/test_errors.py | petechd/eq-questionnaire-runner | 1c5b182a7f8bc878cfdd767ae080410fa679abd6 | [
"MIT"
] | null | null | null | from unittest.mock import patch
from tests.integration.integration_test_case import IntegrationTestCase
class TestErrors(IntegrationTestCase):
example_payload = {
"user_id": "integration-test",
"period_str": "April 2016",
"period_id": "201604",
"collection_exercise_sid": "789",
"ru_ref": "123456789012A",
"response_id": "1234567890123456",
"ru_name": "Integration Testing",
"ref_p_start_date": "2016-04-01",
"ref_p_end_date": "2016-04-30",
"return_by": "2016-05-06",
"employment_date": "1983-06-02",
"region_code": "GB-ENG",
"language_code": "en",
"account_service_url": "http://correct.place",
"roles": [],
}
def test_errors_404(self):
self.get("/hfjdskahfjdkashfsa")
self.assertStatusNotFound()
# Test that my account link does not show
self.assertNotInBody("My account")
self.assertNotInBody("http://correct.place")
def test_errors_404_with_payload(self):
with patch("tests.integration.create_token.PAYLOAD", self.example_payload):
self.launchSurvey("test_percentage")
self.get("/hfjdskahfjdkashfsa")
self.assertStatusNotFound()
def test_errors_405(self):
# Given / When
self.get("/flush")
# Then
self.assertStatusCode(405) # 405 is returned as the status code
self.assertInBody("Page not found") # 404 page template is used
def test_errors_500_with_payload(self):
# Given
with patch("tests.integration.create_token.PAYLOAD", self.example_payload):
self.launchSurvey("test_percentage")
# When / Then
# Patch out a class in post to raise an exception so that the application error handler
# gets called
with patch(
"app.routes.questionnaire.get_block_handler",
side_effect=Exception("You broked it"),
):
self.post({"answer": "5000000"})
self.assertStatusCode(500)
def test_errors_500_exception_during_error_handling(self):
# Given
with patch("tests.integration.create_token.PAYLOAD", self.example_payload):
self.launchSurvey("test_percentage")
# When
# Patch out a class in post to raise an exception so that the application error handler
# gets called
with patch(
"app.routes.questionnaire.get_block_handler",
side_effect=Exception("You broked it"),
):
# Another exception occurs during exception handling
with patch(
"app.routes.errors.log_exception",
side_effect=Exception("You broked it again"),
):
self.post({"answer": "5000000"})
self.assertStatusCode(500)
self.assertInBody("Sorry, there is a problem with this service")
| 36.60241 | 99 | 0.598091 |
578fda8a7d8c16881251fe03191da6647e4012a5 | 3,217 | py | Python | ask-smapi-model/ask_smapi_model/v1/skill/manifest/health_request.py | Signal-Kinetics/alexa-apis-for-python | abb8d3dce18a5510c48b215406ed36c024f01495 | [
"Apache-2.0"
] | 2 | 2021-10-30T06:52:48.000Z | 2021-11-16T12:34:16.000Z | ask-smapi-model/ask_smapi_model/v1/skill/manifest/health_request.py | Signal-Kinetics/alexa-apis-for-python | abb8d3dce18a5510c48b215406ed36c024f01495 | [
"Apache-2.0"
] | null | null | null | ask-smapi-model/ask_smapi_model/v1/skill/manifest/health_request.py | Signal-Kinetics/alexa-apis-for-python | abb8d3dce18a5510c48b215406ed36c024f01495 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
class HealthRequest(object):
"""
:param name: Defines the name of request, each request has their own payload format.
:type name: (optional) str
"""
deserialized_types = {
'name': 'str'
} # type: Dict
attribute_map = {
'name': 'name'
} # type: Dict
supports_multiple_types = False
def __init__(self, name=None):
# type: (Optional[str]) -> None
"""
:param name: Defines the name of request, each request has their own payload format.
:type name: (optional) str
"""
self.__discriminator_value = None # type: str
self.name = name
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, HealthRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 30.065421 | 96 | 0.573205 |
c0760c9417ba736e366f1f6e9f741e2668e2e51a | 16,138 | py | Python | boa3_test/tests/compiler_tests/test_interop/test_binary.py | DanPopa46/neo3-boa | e4ef340744b5bd25ade26f847eac50789b97f3e9 | [
"Apache-2.0"
] | null | null | null | boa3_test/tests/compiler_tests/test_interop/test_binary.py | DanPopa46/neo3-boa | e4ef340744b5bd25ade26f847eac50789b97f3e9 | [
"Apache-2.0"
] | null | null | null | boa3_test/tests/compiler_tests/test_interop/test_binary.py | DanPopa46/neo3-boa | e4ef340744b5bd25ade26f847eac50789b97f3e9 | [
"Apache-2.0"
] | null | null | null | from boa3.exception.CompilerError import MismatchedTypes, UnexpectedArgument, UnfilledArgument
from boa3.neo.vm.type.StackItem import StackItemType, serialize
from boa3.neo.vm.type.String import String
from boa3_test.tests.boa_test import BoaTest
from boa3_test.tests.test_classes.TestExecutionException import TestExecutionException
from boa3_test.tests.test_classes.testengine import TestEngine
class TestBinaryInterop(BoaTest):
default_folder: str = 'test_sc/interop_test/binary'
def test_base64_encode(self):
import base64
path = self.get_contract_path('Base64Encode.py')
engine = TestEngine()
expected_result = base64.b64encode(b'unit test')
result = self.run_smart_contract(engine, path, 'Main', b'unit test',
expected_result_type=bytes)
self.assertEqual(expected_result, result)
expected_result = base64.b64encode(b'')
result = self.run_smart_contract(engine, path, 'Main', b'',
expected_result_type=bytes)
self.assertEqual(expected_result, result)
long_byte_string = (b'Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam accumsan magna eu massa '
b'vulputate bibendum. Aliquam commodo euismod tristique. Sed purus erat, pretium ut interdum '
b'et, aliquet sed mauris. Curabitur vitae turpis euismod, hendrerit mi a, rhoncus justo. Mauris '
b'sollicitudin, nisl sit amet feugiat pharetra, odio ligula congue tellus, vel pellentesque '
b'libero leo id dui. Morbi vel risus vehicula, consectetur mauris eget, gravida ligula. '
b'Maecenas aliquam velit sit amet nisi ultricies, ac sollicitudin nisi mollis. Lorem ipsum '
b'dolor sit amet, consectetur adipiscing elit. Ut tincidunt, nisi in ullamcorper ornare, '
b'est enim dictum massa, id aliquet justo magna in purus.')
expected_result = base64.b64encode(long_byte_string)
result = self.run_smart_contract(engine, path, 'Main', long_byte_string,
expected_result_type=bytes)
self.assertEqual(expected_result, result)
def test_base64_encode_mismatched_type(self):
path = self.get_contract_path('Base64EncodeMismatchedType.py')
self.assertCompilerLogs(MismatchedTypes, path)
def test_base64_decode(self):
import base64
path = self.get_contract_path('Base64Decode.py')
engine = TestEngine()
arg = String.from_bytes(base64.b64encode(b'unit test'))
result = self.run_smart_contract(engine, path, 'Main', arg,
expected_result_type=bytes)
self.assertEqual(b'unit test', result)
arg = String.from_bytes(base64.b64encode(b''))
result = self.run_smart_contract(engine, path, 'Main', arg,
expected_result_type=bytes)
self.assertEqual(b'', result)
long_string = ('Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam accumsan magna eu massa '
'vulputate bibendum. Aliquam commodo euismod tristique. Sed purus erat, pretium ut interdum '
'et, aliquet sed mauris. Curabitur vitae turpis euismod, hendrerit mi a, rhoncus justo. Mauris '
'sollicitudin, nisl sit amet feugiat pharetra, odio ligula congue tellus, vel pellentesque '
'libero leo id dui. Morbi vel risus vehicula, consectetur mauris eget, gravida ligula. '
'Maecenas aliquam velit sit amet nisi ultricies, ac sollicitudin nisi mollis. Lorem ipsum '
'dolor sit amet, consectetur adipiscing elit. Ut tincidunt, nisi in ullamcorper ornare, '
'est enim dictum massa, id aliquet justo magna in purus.')
arg = String.from_bytes(base64.b64encode(String(long_string).to_bytes()))
result = self.run_smart_contract(engine, path, 'Main', arg,
expected_result_type=bytes)
self.assertEqual(String(long_string).to_bytes(), result)
def test_base64_decode_mismatched_type(self):
path = self.get_contract_path('Base64DecodeMismatchedType.py')
self.assertCompilerLogs(MismatchedTypes, path)
def test_base58_encode(self):
import base58
path = self.get_contract_path('Base58Encode.py')
engine = TestEngine()
expected_result = base58.b58encode('unit test')
result = self.run_smart_contract(engine, path, 'Main', 'unit test',
expected_result_type=bytes)
self.assertEqual(expected_result, result)
expected_result = base58.b58encode('')
result = self.run_smart_contract(engine, path, 'Main', '',
expected_result_type=bytes)
self.assertEqual(expected_result, result)
long_string = ('Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam accumsan magna eu massa '
'vulputate bibendum. Aliquam commodo euismod tristique. Sed purus erat, pretium ut interdum '
'et, aliquet sed mauris. Curabitur vitae turpis euismod, hendrerit mi a, rhoncus justo. Mauris '
'sollicitudin, nisl sit amet feugiat pharetra, odio ligula congue tellus, vel pellentesque '
'libero leo id dui. Morbi vel risus vehicula, consectetur mauris eget, gravida ligula. '
'Maecenas aliquam velit sit amet nisi ultricies, ac sollicitudin nisi mollis. Lorem ipsum '
'dolor sit amet, consectetur adipiscing elit. Ut tincidunt, nisi in ullamcorper ornare, '
'est enim dictum massa, id aliquet justo magna in purus.')
expected_result = base58.b58encode(long_string)
result = self.run_smart_contract(engine, path, 'Main', long_string,
expected_result_type=bytes)
self.assertEqual(expected_result, result)
def test_base58_encode_mismatched_type(self):
path = self.get_contract_path('Base58EncodeMismatchedType.py')
self.assertCompilerLogs(MismatchedTypes, path)
def test_base58_decode(self):
import base58
path = self.get_contract_path('Base58Decode.py')
engine = TestEngine()
arg = base58.b58encode('unit test')
result = self.run_smart_contract(engine, path, 'Main', arg)
self.assertEqual('unit test', result)
arg = base58.b58encode('')
result = self.run_smart_contract(engine, path, 'Main', arg)
self.assertEqual('', result)
long_string = ('Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nullam accumsan magna eu massa '
'vulputate bibendum. Aliquam commodo euismod tristique. Sed purus erat, pretium ut interdum '
'et, aliquet sed mauris. Curabitur vitae turpis euismod, hendrerit mi a, rhoncus justo. Mauris '
'sollicitudin, nisl sit amet feugiat pharetra, odio ligula congue tellus, vel pellentesque '
'libero leo id dui. Morbi vel risus vehicula, consectetur mauris eget, gravida ligula. '
'Maecenas aliquam velit sit amet nisi ultricies, ac sollicitudin nisi mollis. Lorem ipsum '
'dolor sit amet, consectetur adipiscing elit. Ut tincidunt, nisi in ullamcorper ornare, '
'est enim dictum massa, id aliquet justo magna in purus.')
arg = base58.b58encode(long_string)
result = self.run_smart_contract(engine, path, 'Main', arg)
self.assertEqual(long_string, result)
def test_base58_decode_mismatched_type(self):
path = self.get_contract_path('Base58DecodeMismatchedType.py')
self.assertCompilerLogs(MismatchedTypes, path)
def test_serialize_int(self):
path = self.get_contract_path('SerializeInt.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'serialize_int',
expected_result_type=bytes)
expected_result = serialize(42)
self.assertEqual(expected_result, result)
def test_serialize_bool(self):
path = self.get_contract_path('SerializeBool.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'serialize_bool',
expected_result_type=bytes)
expected_result = serialize(True)
self.assertEqual(expected_result, result)
def test_serialize_str(self):
path = self.get_contract_path('SerializeStr.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'serialize_str',
expected_result_type=bytes)
expected_result = serialize('42')
self.assertEqual(expected_result, result)
def test_serialize_sequence(self):
path = self.get_contract_path('SerializeSequence.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'serialize_sequence',
expected_result_type=bytes)
expected_result = serialize([2, 3, 5, 7])
self.assertEqual(expected_result, result)
def test_serialize_dict(self):
path = self.get_contract_path('SerializeDict.py')
self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'serialize_dict',
expected_result_type=bytes)
expected_result = serialize({1: 1, 2: 1, 3: 2})
self.assertEqual(expected_result, result)
def test_deserialize(self):
path = self.get_contract_path('Deserialize.py')
self.compile_and_save(path)
engine = TestEngine()
expected_result = 42
value = serialize(expected_result)
result = self.run_smart_contract(engine, path, 'deserialize_arg', value,
expected_result_type=bytes)
self.assertEqual(expected_result, result)
expected_result = True
value = serialize(expected_result)
result = self.run_smart_contract(engine, path, 'deserialize_arg', value)
# it shouldn't be equal to the convertion, because it converts as an int instead of a boolean
self.assertEqual(expected_result, result)
self.assertNotEqual(type(expected_result), type(result))
value = StackItemType.Boolean + value[1:]
result = self.run_smart_contract(engine, path, 'deserialize_arg', value,
expected_result_type=bool)
self.assertEqual(expected_result, result)
self.assertEqual(type(expected_result), type(result))
expected_result = '42'
value = serialize(expected_result)
result = self.run_smart_contract(engine, path, 'deserialize_arg', value)
self.assertEqual(expected_result, result)
expected_result = b'42'
value = serialize(expected_result)
result = self.run_smart_contract(engine, path, 'deserialize_arg', value,
expected_result_type=bytes)
self.assertEqual(expected_result, result)
expected_result = [1, '2', b'3']
value = serialize(expected_result)
result = self.run_smart_contract(engine, path, 'deserialize_arg', value)
expected_result[2] = String.from_bytes(expected_result[2])
self.assertEqual(expected_result, result)
expected_result = {'int': 1, 'str': '2', 'bytes': b'3'}
value = serialize(expected_result)
result = self.run_smart_contract(engine, path, 'deserialize_arg', value)
expected_result['bytes'] = String.from_bytes(expected_result['bytes'])
self.assertEqual(expected_result, result)
def test_deserialize_mismatched_type(self):
path = self.get_contract_path('DeserializeMismatchedType.py')
self.assertCompilerLogs(MismatchedTypes, path)
def test_boa2_serialization_test1(self):
path = self.get_contract_path('SerializationBoa2Test.py')
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', 1, expected_result_type=bytes)
expected_result = serialize(['a', 3, ['j', 3, 5], 'jk', 'lmnopqr'])
self.assertEqual(expected_result, result)
def test_boa2_serialization_test2(self):
path = self.get_contract_path('SerializationBoa2Test.py')
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', 2, expected_result_type=bytes)
expected_result = serialize(['a', 3, ['j', 3, 5], 'jk', 'lmnopqr'])
self.assertEqual(expected_result, result)
def test_boa2_serialization_test3(self):
path = self.get_contract_path('SerializationBoa2Test.py')
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', 3)
self.assertEqual(['a', 3, ['j', 3, 5], 'jk', 'lmnopqr'], result)
def test_boa2_serialization_test4(self):
path = self.get_contract_path('SerializationBoa2Test.py')
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', 4)
self.assertEqual(['j', 3, 5], result)
def test_atoi(self):
path = self.get_contract_path('Atoi.py')
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', '10', 10)
self.assertEqual(10, result)
result = self.run_smart_contract(engine, path, 'main', '10', 16)
self.assertEqual(16, result)
result = self.run_smart_contract(engine, path, 'main', '123', 10)
self.assertEqual(123, result)
result = self.run_smart_contract(engine, path, 'main', '123', 16)
self.assertEqual(291, result)
result = self.run_smart_contract(engine, path, 'main', '1f', 16)
self.assertEqual(31, result)
result = self.run_smart_contract(engine, path, 'main', 'ff', 16)
self.assertEqual(-1, result)
with self.assertRaises(TestExecutionException, msg=self.ASSERT_RESULTED_FALSE_MSG):
self.run_smart_contract(engine, path, 'main', 'string', 10)
with self.assertRaises(TestExecutionException, msg=self.ASSERT_RESULTED_FALSE_MSG):
self.run_smart_contract(engine, path, 'main', 'string', 16)
with self.assertRaises(TestExecutionException, msg=self.ASSERT_RESULTED_FALSE_MSG):
self.run_smart_contract(engine, path, 'main', 'abc', 10)
with self.assertRaises(TestExecutionException, msg=self.ASSERT_RESULTED_FALSE_MSG):
self.run_smart_contract(engine, path, 'main', '10', 2)
def test_atoi_default(self):
path = self.get_contract_path('AtoiDefault.py')
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'main', '10')
self.assertEqual(10, result)
result = self.run_smart_contract(engine, path, 'main', '123')
self.assertEqual(123, result)
with self.assertRaises(TestExecutionException, msg=self.ASSERT_RESULTED_FALSE_MSG):
self.run_smart_contract(engine, path, 'main', 'string')
def test_atoi_too_few_parameters(self):
path = self.get_contract_path('AtoiTooFewArguments.py')
self.assertCompilerLogs(UnfilledArgument, path)
def test_atoi_too_many_parameters(self):
path = self.get_contract_path('AtoiTooManyArguments.py')
self.assertCompilerLogs(UnexpectedArgument, path)
def test_atoi_mismatched_type(self):
path = self.get_contract_path('AtoiMismatchedType.py')
self.assertCompilerLogs(MismatchedTypes, path)
| 50.274143 | 125 | 0.650886 |
cd8f8e1135d2d5d0259f4f7c17a92f0cddf39c40 | 1,767 | py | Python | arviz/tests/base_tests/test_stats_numba.py | FabioRosado/arviz | 6b958cc5abeb0cc3a1186f4a3dbeeaba73b899ee | [
"Apache-2.0"
] | 1 | 2021-07-10T19:08:49.000Z | 2021-07-10T19:08:49.000Z | arviz/tests/base_tests/test_stats_numba.py | FabioRosado/arviz | 6b958cc5abeb0cc3a1186f4a3dbeeaba73b899ee | [
"Apache-2.0"
] | null | null | null | arviz/tests/base_tests/test_stats_numba.py | FabioRosado/arviz | 6b958cc5abeb0cc3a1186f4a3dbeeaba73b899ee | [
"Apache-2.0"
] | 1 | 2019-03-02T03:23:12.000Z | 2019-03-02T03:23:12.000Z | # pylint: disable=redefined-outer-name, no-member
import importlib
import numpy as np
import pytest
from ...rcparams import rcParams
from ...stats import r2_score, summary
from ...utils import Numba
from ..helpers import ( # pylint: disable=unused-import
check_multiple_attrs,
multidim_models,
running_on_ci,
)
from .test_stats import centered_eight, non_centered_eight # pylint: disable=unused-import
pytestmark = pytest.mark.skipif( # pylint: disable=invalid-name
(importlib.util.find_spec("numba") is None) and not running_on_ci(),
reason="test requires numba which is not installed",
)
rcParams["data.load"] = "eager"
@pytest.mark.parametrize("circ_var_names", [["mu"], None])
def test_summary_circ_vars(centered_eight, circ_var_names):
assert summary(centered_eight, circ_var_names=circ_var_names) is not None
state = Numba.numba_flag
Numba.disable_numba()
assert summary(centered_eight, circ_var_names=circ_var_names) is not NotImplementedError
Numba.enable_numba()
assert state == Numba.numba_flag
def test_numba_stats():
"""Numba test for r2_score"""
state = Numba.numba_flag # Store the current state of Numba
set_1 = np.random.randn(100, 100)
set_2 = np.random.randn(100, 100)
set_3 = np.random.rand(100)
set_4 = np.random.rand(100)
Numba.disable_numba()
non_numba = r2_score(set_1, set_2)
non_numba_one_dimensional = r2_score(set_3, set_4)
Numba.enable_numba()
with_numba = r2_score(set_1, set_2)
with_numba_one_dimensional = r2_score(set_3, set_4)
assert state == Numba.numba_flag # Ensure that inital state = final state
assert np.allclose(non_numba, with_numba)
assert np.allclose(non_numba_one_dimensional, with_numba_one_dimensional)
| 34.647059 | 92 | 0.745331 |
7afe7c5c9815e6ab15a0c557fa61aa68a1707124 | 10,411 | py | Python | src/rosjava_build_tools/create_rosjava_project.py | CentralLabFacilities/rosjava_build_tools | 5a8fd3018756f3f87b23104553e3d17be53b69ee | [
"Apache-2.0"
] | 1 | 2021-07-09T08:11:06.000Z | 2021-07-09T08:11:06.000Z | src/rosjava_build_tools/create_rosjava_project.py | CentralLabFacilities/rosjava_build_tools | 5a8fd3018756f3f87b23104553e3d17be53b69ee | [
"Apache-2.0"
] | null | null | null | src/rosjava_build_tools/create_rosjava_project.py | CentralLabFacilities/rosjava_build_tools | 5a8fd3018756f3f87b23104553e3d17be53b69ee | [
"Apache-2.0"
] | 2 | 2020-08-10T13:57:51.000Z | 2021-07-08T12:46:45.000Z | #!/usr/bin/env python
##############################################################################
# Imports
##############################################################################
from __future__ import print_function
import os
import re
import sys
import argparse
import xml.etree.ElementTree as ElementTree
# local imports
from rosjava_build_tools import utils
from rosjava_build_tools import console
##############################################################################
# Methods
##############################################################################
def parse_arguments():
argv = sys.argv[1:]
parser = argparse.ArgumentParser(
description='Creates a new rosjava package based on catkin and gradle. \n')
parser.add_argument('name',
nargs=1,
help='The name for the package')
parser.add_argument('-a', '--author',
action='store',
default=utils.author_name(),
help='A single author, may be used multiple times')
args = parser.parse_args(argv)
return args
##############################################################################
# Methods acting on classes
##############################################################################
# This inserts the labelled variables into the template wherever the corresponding
# %package, %brief, %description and %depends is found.
def instantiate_template(template, project_name, author):
return template % locals()
def instantiate_code_template(template, package_name, project_name, author):
return template % locals()
def create_gradle_package_files(args, template_directory):
'''
This is almost a direct copy from catkin_create_pkg.
'''
try:
project_name = args.name[0].lower()
package_path = os.path.abspath(os.path.join(os.getcwd(), project_name))
for template_name in ['build.gradle']: # 'CMakeLists.txt']:
filename = os.path.join(package_path, template_name)
template = utils.read_template_file(template_directory, template_name)
contents = instantiate_template(template, project_name, args.author)
try:
f = open(filename, 'w')
f.write(contents)
console.pretty_print(' File : ', console.cyan)
console.pretty_println(template_name, console.yellow)
finally:
f.close()
except Exception:
raise
def create_talker_listener_classes(project_name, template_directory, author):
path = os.path.join(os.getcwd(), project_name.lower())
package_name = os.path.basename(os.getcwd())
java_package_path = os.path.join(path, 'src', 'main', 'java', 'com', 'github', package_name, project_name)
utils.mkdir_p(java_package_path)
try:
for template_name in ['Talker.java', 'Listener.java']:
filename = os.path.join(java_package_path, template_name)
template = utils.read_template_file(template_directory, template_name)
contents = instantiate_code_template(template, package_name, project_name, author)
try:
f = open(filename, 'w')
f.write(contents)
console.pretty_print(' File : ', console.cyan)
console.pretty_println(template_name, console.yellow)
finally:
f.close()
except Exception:
raise
def add_to_root_gradle_settings(name):
'''
Adds project name to the root level settings.gradle file.
'''
for rel_path in ['.', '..']:
settings_gradle_path = os.path.join(os.getcwd(), rel_path, 'settings.gradle')
if os.path.isfile(settings_gradle_path):
break
else:
settings_gradle_path = None
if settings_gradle_path is None:
console.pretty_println("\nCouldn't find the root level settings.gradle file - not adding to the superproject.")
return
with open(settings_gradle_path, 'a') as settings_gradle:
console.pretty_print(' File : ', console.cyan)
console.pretty_println('settings.gradle', console.yellow)
settings_gradle.write("include '%s'\n" % name)
def add_catkin_generate_tree_command():
for rel_path in ['.', '..']:
build_gradle_path = os.path.join(os.getcwd(), rel_path, 'build.gradle')
if os.path.isfile(build_gradle_path):
break
else:
build_gradle_path = None
if build_gradle_path is None:
console.pretty_println("\nCouldn't find the root level build.gradle file - not adding to the superproject.")
return
with open(build_gradle_path, 'r') as build_gradle:
console.pretty_print(' File : ', console.cyan)
console.pretty_println('build.gradle (catkin_generate_tree update)', console.yellow)
new_contents = build_gradle.read().replace("apply plugin: 'catkin'", "apply plugin: 'catkin'\nproject.catkin.tree.generate()\n")
with open(build_gradle_path, 'w') as build_gradle:
build_gradle.write(new_contents)
def add_to_package_xml(name):
'''
Adds project name to build_depends in package.xml (should be same name as the ros msg package name).
'''
for rel_path in ['.', '..']:
package_xml_path = os.path.join(os.getcwd(), rel_path, 'package.xml')
if os.path.isfile(package_xml_path):
break
else:
package_xml_path = None
if package_xml_path is None:
console.pretty_println("\nCouldn't find the root level package.xml file - not adding to the superproject.")
return
with open(package_xml_path, 'r') as package_xml:
console.pretty_print(' File : ', console.cyan)
console.pretty_println('package.xml (dependency update)', console.yellow)
new_contents = package_xml.read().replace("</package>", "<build_depend>%s</build_depend>\n</package>" % name)
with open(package_xml_path, 'w') as package_xml:
package_xml.write(new_contents)
def add_tasks_to_cmake_setup(tasks):
'''
Adds project name to build_depends in package.xml (should be same name as the ros msg package name).
'''
for rel_path in ['.', '..']:
cmakelists_txt_path = os.path.join(os.getcwd(), rel_path, 'CMakeLists.txt')
if os.path.isfile(cmakelists_txt_path):
break
else:
cmakelists_txt_path = None
if cmakelists_txt_path is None:
console.pretty_println("\nCouldn't find the root level CMakeLists.txt - not adding to the superproject.")
return
with open(cmakelists_txt_path, 'r') as cmakelists_txt:
old_contents = cmakelists_txt.read()
result = re.search('^catkin_rosjava_setup\(.*\)', old_contents, re.MULTILINE)
if result is None:
console.pretty_println("\nCouldn't find a catkin_rosjava_setup entry in the CMakeLists.txt - not adding tasks.")
return
rosjava_setup_string = result.group(0)
gradle_tasks = set([])
if rosjava_setup_string.find("publish") == -1:
gradle_tasks.add("publish")
if rosjava_setup_string.find("install") == -1:
gradle_tasks.add("install")
gradle_tasks |= set(tasks)
console.pretty_print(' File : ', console.cyan)
console.pretty_println('CMakeLists.txt (gradle task update)', console.yellow)
old_text = rosjava_setup_string
new_text = 'catkin_rosjava_setup(' + ' '.join(gradle_tasks) + ')'
new_contents = old_contents.replace(old_text, new_text)
with open(cmakelists_txt_path, 'w') as cmakelists_txt:
cmakelists_txt.write(new_contents)
def create_dummy_java_class(project_name):
path = os.path.join(os.getcwd(), project_name.lower())
package_name = os.path.basename(os.getcwd())
java_package_path = os.path.join(path, 'src', 'main', 'java', 'com', 'github', package_name, project_name)
utils.mkdir_p(java_package_path)
filename = os.path.join(java_package_path, 'Dude.java')
java_class = "package com.github.%s.%s;\n" % (package_name, project_name)
java_class += "\n"
java_class += "public class Dude {\n"
java_class += "}\n"
console.pretty_print(' File : ', console.cyan)
console.pretty_println('Dude.class', console.yellow)
with open(filename, 'w') as dude_class:
dude_class.write(java_class)
def ros_package_name():
for rel_path in ['.', '..']:
package_xml_path = os.path.join(os.getcwd(), rel_path, 'package.xml')
if os.path.isfile(package_xml_path):
break
else:
package_xml_path = None
if package_xml_path is None:
console.pretty_println("\nCouldn't find the root level package.xml file - not adding to the superproject.")
return
tree = ElementTree.parse(package_xml_path)
root = tree.getroot()
name = root.find('name').text
return name
def create_rosjava_project_common(args, template_directory):
project_name = args.name[0]
console.pretty_println("\nCreating rosjava project ", console.bold)
console.pretty_print(" Name : ", console.cyan)
console.pretty_println("%s" % project_name, console.yellow)
utils.mkdir_p(os.path.join(os.getcwd(), project_name.lower()))
# This is in the old form, let's shovel the shit around to the new form
create_gradle_package_files(args, template_directory)
add_to_root_gradle_settings(args.name[0])
def create_rosjava_project():
args = parse_arguments()
project_name = args.name[0]
author = args.author
create_rosjava_project_common(args, 'rosjava_project')
create_talker_listener_classes(project_name, 'rosjava_project', author)
add_tasks_to_cmake_setup(['installDist', 'publish'])
def create_rosjava_library_project():
args = parse_arguments()
project_name = args.name[0]
create_rosjava_project_common(args, 'rosjava_library_project')
create_dummy_java_class(project_name)
add_tasks_to_cmake_setup(['publish'])
def create_rosjava_msg_project():
args = parse_arguments()
project_name = args.name[0]
create_rosjava_project_common(args, 'rosjava_msg_project')
add_catkin_generate_tree_command()
add_to_package_xml(project_name)
add_tasks_to_cmake_setup(['publish'])
| 40.509728 | 136 | 0.635386 |
67e78537e50af8000b4e82580749f1b8a8d36ef6 | 4,490 | py | Python | test/integration/ggrc/converters/test_snapshot_block.py | j0gurt/ggrc-core | 84662dc85aa8864c907eabe70b8efccf92298a1f | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-01-04T10:55:14.000Z | 2019-01-04T10:55:14.000Z | test/integration/ggrc/converters/test_snapshot_block.py | j0gurt/ggrc-core | 84662dc85aa8864c907eabe70b8efccf92298a1f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | test/integration/ggrc/converters/test_snapshot_block.py | j0gurt/ggrc-core | 84662dc85aa8864c907eabe70b8efccf92298a1f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Tests for Snapshot block converter class."""
import mock
from ggrc import db
from ggrc.converters.snapshot_block import SnapshotBlockConverter
from ggrc.models import all_models
from integration.ggrc import TestCase
from integration.ggrc.models import factories
class TestSnapshotBlockConverter(TestCase):
"""Tests for Snapshot block converter."""
# Removing protected access checks because we wish to tests even the
# protected functions.
# pylint: disable=protected-access
def test_empty_snapshots(self):
"""Test snapshots property for empty ids list."""
converter = mock.MagicMock()
block = SnapshotBlockConverter(converter, [])
self.assertEqual(block.snapshots, [])
def test_snapshots_property(self):
"""Test snapshots property and snapshot content."""
with factories.single_commit():
snapshots = self._create_snapshots(
factories.AuditFactory(),
[factories.ControlFactory()],
)
converter = mock.MagicMock()
ids = [s.id for s in snapshots]
block = SnapshotBlockConverter(converter, ids)
self.assertEqual(block.snapshots, snapshots)
for snapshot in snapshots:
self.assertIn("audit", snapshot.content)
def test_valid_child_types(self):
"""Test child_type property with valid snapshots list."""
with factories.single_commit():
snapshots = self._create_snapshots(
factories.AuditFactory(),
[factories.ControlFactory(), factories.ControlFactory()],
)
converter = mock.MagicMock()
ids = [s.id for s in snapshots]
block = SnapshotBlockConverter(converter, ids)
self.assertEqual(block.child_type, "Control")
def test_empty_child_type(self):
"""Test child_type property with empty snapshots list."""
converter = mock.MagicMock()
block = SnapshotBlockConverter(converter, [])
self.assertEqual(block.child_type, "")
def test_invalid_child_types(self):
"""Test child_type property with invalid snapshots list."""
with factories.single_commit():
snapshots = self._create_snapshots(
factories.AuditFactory(),
[factories.ControlFactory(), factories.PolicyFactory()],
)
converter = mock.MagicMock()
ids = [s.id for s in snapshots]
block = SnapshotBlockConverter(converter, ids)
with self.assertRaises(AssertionError):
block.child_type = block.child_type
def test_attribute_name_map(self):
"""Test Control snapshot name map and order."""
converter = mock.MagicMock()
block = SnapshotBlockConverter(converter, [])
block.child_type = "Control"
expected_attrs = [
('slug', 'Code'),
('audit', 'Audit'), # inserted attribute
('revision_date', 'Revision Date'), # inserted attribute
('title', 'Title'),
('description', 'Description'),
('notes', 'Notes'),
('test_plan', 'Assessment Procedure'),
('start_date', 'Effective Date'),
('end_date', 'Last Deprecated Date'),
('archived', 'Archived'),
('status', 'State'),
('review_status', 'Review State'),
('reviewers', 'Reviewers'),
('assertions', 'Assertions'),
('categories', 'Categories'),
('fraud_related', 'Fraud Related'),
('key_control', 'Significance'),
('kind', 'Kind/Nature'),
('means', 'Type/Means'),
('verify_frequency', 'Frequency'),
('recipients', 'Recipients'),
('send_by_default', 'Send by default'),
('documents_file', 'Document File'),
('documents_reference_url', 'Reference URL'),
('updated_at', 'Last Updated Date'),
('modified_by', 'Last Updated By'),
('created_at', 'Created Date'),
('folder', "Folder"),
]
ac_roles = db.session.query(all_models.AccessControlRole.name).filter(
all_models.AccessControlRole.object_type == "Control",
all_models.AccessControlRole.internal == 0,
).all()
expected_attrs += sorted(
("__acl__:{}".format(role[0]), role[0]) for role in ac_roles
)
# last_assessment_date and comments should be in the end
# according to current order
expected_attrs.append(('comments', 'Comments'))
expected_attrs.append(('last_assessment_date', 'Last Assessment Date'))
self.assertEqual(
block._attribute_name_map.items(),
expected_attrs
)
| 36.209677 | 78 | 0.661693 |
efe07bbef91e7259a1a8d7f80d1923f35c8bb9e2 | 2,649 | py | Python | apps/Products/migrations/0001_initial.py | ulibn/BlueXolo | 2560593fdb5a26fe90c65aa711a8eb6db96d03f1 | [
"Apache-2.0"
] | 21 | 2018-12-05T16:16:58.000Z | 2021-10-07T22:51:26.000Z | apps/Products/migrations/0001_initial.py | ulibn/BlueXolo | 2560593fdb5a26fe90c65aa711a8eb6db96d03f1 | [
"Apache-2.0"
] | 437 | 2018-12-11T19:36:38.000Z | 2022-02-04T16:28:14.000Z | apps/Products/migrations/0001_initial.py | ulibn/BlueXolo | 2560593fdb5a26fe90c65aa711a8eb6db96d03f1 | [
"Apache-2.0"
] | 29 | 2019-01-24T16:46:33.000Z | 2021-12-21T06:45:50.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-22 01:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Argument',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='name')),
('description', models.TextField(blank=True, verbose_name='description')),
('requirement', models.BooleanField(default=False)),
('needs_value', models.BooleanField(default=False)),
],
options={
'verbose_name': 'argument',
'verbose_name_plural': 'arguments',
'db_table': 'arguments',
},
),
migrations.CreateModel(
name='Command',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='command')),
('description', models.TextField(blank=True, verbose_name='description')),
('arguments', models.ManyToManyField(blank=True, to='Products.Argument')),
],
options={
'verbose_name': 'command',
'verbose_name_plural': 'commands',
'db_table': 'commands',
},
),
migrations.CreateModel(
name='Source',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='source name')),
('version', models.CharField(max_length=30, verbose_name='version')),
('category', models.IntegerField(choices=[(1, 'Flow Sentences'), (2, 'OS'), (3, 'Product'), (4, 'Robot Framework'), (5, 'External Libraries')], verbose_name='category')),
('depends', models.ManyToManyField(blank=True, to='Products.Source')),
],
options={
'verbose_name': 'source',
'verbose_name_plural': 'sources',
'db_table': 'sources',
},
),
migrations.AddField(
model_name='command',
name='source',
field=models.ManyToManyField(blank=True, to='Products.Source'),
),
]
| 40.136364 | 186 | 0.547376 |
2bbfe4ad1b56fed16f7b9b7acce1374d789de0a0 | 1,727 | py | Python | tests/test_graph.py | stoyan3d/python-algorithms | 1d18647c821d4dac04b7d7c98443a3218f6348a6 | [
"MIT"
] | null | null | null | tests/test_graph.py | stoyan3d/python-algorithms | 1d18647c821d4dac04b7d7c98443a3218f6348a6 | [
"MIT"
] | null | null | null | tests/test_graph.py | stoyan3d/python-algorithms | 1d18647c821d4dac04b7d7c98443a3218f6348a6 | [
"MIT"
] | null | null | null | import unittest
from datastructures.graph import GraphVertex, WeightedGraphVertex
from algorithms.graph_search import depth_first_search, breadth_first_search, dijkstra_shortest_path
class TestGraphVertex(unittest.TestCase):
def setUp(self):
self.root = GraphVertex("Mohammad")
felicia = GraphVertex("Felicia")
zei = GraphVertex("Zei")
self.root.add_adjacent(felicia)
self.root.add_adjacent(zei)
felicia.add_adjacent(zei)
def test_dfs(self):
self.assertEqual("Zei", depth_first_search(self.root, "Zei").value)
self.assertEqual("Felicia", depth_first_search(self.root, "Felicia").value)
self.assertIsNone(depth_first_search(self.root, "Tom"))
def test_bfs(self):
self.assertEqual("Zei", breadth_first_search(self.root, "Zei").value)
self.assertEqual("Felicia", breadth_first_search(self.root, "Felicia").value)
self.assertIsNone(breadth_first_search(self.root, "Tom"))
class TestWeightedGraphVertex(unittest.TestCase):
def test_dijkstra_shortest_path(self):
atlanta = WeightedGraphVertex("Atlanta")
boston = WeightedGraphVertex("Boston")
chicago = WeightedGraphVertex("Chicago")
denver = WeightedGraphVertex("Denver")
el_paso = WeightedGraphVertex("El Paso")
atlanta.add_adjacent(boston, 100)
atlanta.add_adjacent(denver, 160)
boston.add_adjacent(chicago, 120)
boston.add_adjacent(denver, 180)
chicago.add_adjacent(el_paso, 80)
denver.add_adjacent(chicago, 40)
denver.add_adjacent(el_paso, 140)
self.assertListEqual(["Atlanta", "Denver", "Chicago", "El Paso"], dijkstra_shortest_path(atlanta, el_paso))
| 39.25 | 115 | 0.70469 |
119f70a9332ef4ab6268daba30f1431daeeb22e7 | 7,512 | py | Python | kubernetes/client/models/v1_gce_persistent_disk_volume_source.py | fooka03/python | 073cf4d89e532f92b57e8955b4efc3d5d5eb80cf | [
"Apache-2.0"
] | 2 | 2020-07-02T05:47:41.000Z | 2020-07-02T05:50:34.000Z | kubernetes/client/models/v1_gce_persistent_disk_volume_source.py | fooka03/python | 073cf4d89e532f92b57e8955b4efc3d5d5eb80cf | [
"Apache-2.0"
] | 1 | 2021-03-25T23:44:49.000Z | 2021-03-25T23:44:49.000Z | k8sdeployment/k8sstat/python/kubernetes/client/models/v1_gce_persistent_disk_volume_source.py | JeffYFHuang/gpuaccounting | afa934350ebbd0634beb60b9df4a147426ea0006 | [
"MIT"
] | 1 | 2021-10-13T17:45:37.000Z | 2021-10-13T17:45:37.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.15.6
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1GCEPersistentDiskVolumeSource(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'fs_type': 'str',
'partition': 'int',
'pd_name': 'str',
'read_only': 'bool'
}
attribute_map = {
'fs_type': 'fsType',
'partition': 'partition',
'pd_name': 'pdName',
'read_only': 'readOnly'
}
def __init__(self, fs_type=None, partition=None, pd_name=None, read_only=None): # noqa: E501
"""V1GCEPersistentDiskVolumeSource - a model defined in OpenAPI""" # noqa: E501
self._fs_type = None
self._partition = None
self._pd_name = None
self._read_only = None
self.discriminator = None
if fs_type is not None:
self.fs_type = fs_type
if partition is not None:
self.partition = partition
self.pd_name = pd_name
if read_only is not None:
self.read_only = read_only
@property
def fs_type(self):
"""Gets the fs_type of this V1GCEPersistentDiskVolumeSource. # noqa: E501
Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:return: The fs_type of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""Sets the fs_type of this V1GCEPersistentDiskVolumeSource.
Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:param fs_type: The fs_type of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:type: str
"""
self._fs_type = fs_type
@property
def partition(self):
"""Gets the partition of this V1GCEPersistentDiskVolumeSource. # noqa: E501
The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:return: The partition of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:rtype: int
"""
return self._partition
@partition.setter
def partition(self, partition):
"""Sets the partition of this V1GCEPersistentDiskVolumeSource.
The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:param partition: The partition of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:type: int
"""
self._partition = partition
@property
def pd_name(self):
"""Gets the pd_name of this V1GCEPersistentDiskVolumeSource. # noqa: E501
Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:return: The pd_name of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:rtype: str
"""
return self._pd_name
@pd_name.setter
def pd_name(self, pd_name):
"""Sets the pd_name of this V1GCEPersistentDiskVolumeSource.
Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:param pd_name: The pd_name of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:type: str
"""
if pd_name is None:
raise ValueError("Invalid value for `pd_name`, must not be `None`") # noqa: E501
self._pd_name = pd_name
@property
def read_only(self):
"""Gets the read_only of this V1GCEPersistentDiskVolumeSource. # noqa: E501
ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:return: The read_only of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:rtype: bool
"""
return self._read_only
@read_only.setter
def read_only(self, read_only):
"""Sets the read_only of this V1GCEPersistentDiskVolumeSource.
ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk # noqa: E501
:param read_only: The read_only of this V1GCEPersistentDiskVolumeSource. # noqa: E501
:type: bool
"""
self._read_only = read_only
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1GCEPersistentDiskVolumeSource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 37.56 | 367 | 0.638845 |
55e8bc2df1210a67382cce04e96ac33c04962da0 | 450 | py | Python | e2elink/synthetic/fakers/dategenerator.py | ersilia-os/cidrz-e2e-linkage | 840581cdb90617f3ceb1be898992f0a8df71f9e3 | [
"MIT"
] | null | null | null | e2elink/synthetic/fakers/dategenerator.py | ersilia-os/cidrz-e2e-linkage | 840581cdb90617f3ceb1be898992f0a8df71f9e3 | [
"MIT"
] | null | null | null | e2elink/synthetic/fakers/dategenerator.py | ersilia-os/cidrz-e2e-linkage | 840581cdb90617f3ceb1be898992f0a8df71f9e3 | [
"MIT"
] | null | null | null | from random import randrange
from datetime import timedelta, datetime
class DateGenerator(object):
def __init__(self):
pass
def sample(self, start, end):
start = datetime.strptime(start, "%Y-%m-%d")
end = datetime.strptime(end, "%Y-%m-%d")
delta = end - start
int_delta = delta.days * 24 * 60 * 60
random_second = randrange(int_delta)
return start + timedelta(seconds=random_second)
| 28.125 | 55 | 0.635556 |
cee31a8783365bbff4a46ece74901c86292c254e | 2,838 | py | Python | tests/kyu_3_tests/test_finding_an_appointment.py | the-zebulan/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | 40 | 2016-03-09T12:26:20.000Z | 2022-03-23T08:44:51.000Z | tests/kyu_3_tests/test_finding_an_appointment.py | akalynych/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | null | null | null | tests/kyu_3_tests/test_finding_an_appointment.py | akalynych/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | 36 | 2016-11-07T19:59:58.000Z | 2022-03-31T11:18:27.000Z | import unittest
from katas.kyu_3.finding_an_appointment import get_start_time
class GetStartTimeTestCase(unittest.TestCase):
def setUp(self):
self.schedule_1 = [
[['09:00', '11:30'], ['13:30', '16:00'], ['16:00', '17:30'],
['17:45', '19:00']],
[['09:15', '12:00'], ['14:00', '16:30'], ['17:00', '17:30']],
[['11:30', '12:15'], ['15:00', '16:30'], ['17:45', '19:00']]
]
self.schedule_2 = [
[['10:07', '10:39'], ['10:41', '11:03'], ['12:21', '12:22'],
['15:49', '16:11'], ['17:29', '17:54']],
[['09:37', '11:19'], ['11:27', '13:37'], ['16:29', '17:41']],
[['09:48', '12:26'], ['15:41', '15:59'], ['18:50', '18:57']],
[['09:41', '09:57'], ['10:03', '10:14'], ['10:32', '10:39'],
['10:56', '11:17'], ['11:23', '11:41'], ['11:59', '12:03'],
['12:28', '12:45'], ['17:19', '17:27'], ['18:56', '18:57']],
[['11:21', '12:42'], ['12:51', '13:20'], ['17:51', '17:53'],
['18:07', '18:11']]
]
self.schedule_3 = [
[['09:09', '11:27'], ['12:14', '13:41'], ['15:16', '17:17'],
['17:32', '18:50']],
[['10:38', '12:06'], ['13:39', '15:08'], ['17:23', '17:26'],
['18:02', '18:26']]
]
def test_equal_1(self):
self.assertEqual(get_start_time(self.schedule_1, 60), '12:15')
def test_equal_2(self):
self.assertEqual(get_start_time(self.schedule_1, 75), '12:15')
def test_equal_3(self):
self.assertEqual(get_start_time([
[['10:00', '13:00'], ['14:00', '17:00'], ['18:00', '19:00']],
[['10:00', '11:00'], ['12:00', '13:00'], ['14:00', '15:00'],
['16:00', '17:00'], ['18:00', '19:00']]
], 60), '09:00')
def test_equal_4(self):
self.assertEqual(get_start_time(self.schedule_2, 37), '09:00')
def test_equal_5(self):
self.assertEqual(get_start_time(self.schedule_2, 38), '13:37')
def test_equal_6(self):
self.assertEqual(get_start_time(self.schedule_2, 124), '13:37')
def test_equal_7(self):
self.assertEqual(get_start_time(self.schedule_3, 9), '09:00')
def test_equal_8(self):
self.assertEqual(get_start_time(self.schedule_3, 10), '18:50')
def test_is_none_1(self):
self.assertIsNone(get_start_time(self.schedule_1, 76))
def test_is_none_2(self):
self.assertIsNone(get_start_time(self.schedule_1, 90))
def test_is_none_3(self):
self.assertIsNone(get_start_time(
[[['09:00', '19:00']], [], [], []], 1
))
def test_is_none_4(self):
self.assertIsNone(get_start_time(self.schedule_2, 125))
def test_is_none_5(self):
self.assertIsNone(get_start_time(self.schedule_3, 11))
| 37.342105 | 73 | 0.501762 |
05978404cba7df2997f324f7909485be2ca3c44b | 388 | py | Python | main.py | cls1991/12306-ticket-accelerator | b17fff9c998b851fdfd9c37eb78a9c15bfb4f7eb | [
"Apache-2.0"
] | null | null | null | main.py | cls1991/12306-ticket-accelerator | b17fff9c998b851fdfd9c37eb78a9c15bfb4f7eb | [
"Apache-2.0"
] | null | null | null | main.py | cls1991/12306-ticket-accelerator | b17fff9c998b851fdfd9c37eb78a9c15bfb4f7eb | [
"Apache-2.0"
] | null | null | null | # coding: utf8
import os
from core.query import query_tickets
# 切换工作目录到项目根目录
project = os.path.split(os.path.realpath(__file__))[0]
os.chdir(project)
def main():
"""
test main
"""
from_station = '北京'
to_station = '武汉'
train_date = '2017-12-16'
query_tickets(from_station, to_station, train_date, is_subscribe=True)
if __name__ == '__main__':
main()
| 16.166667 | 74 | 0.667526 |
06f81183fa4711af7e639b4ac758ebf690c24ea1 | 668 | py | Python | src/factiva/pipelines/const/__init__.py | dowjones/factiva-pipelines-python | 053aa10bba195f15a039e1974cb2f2357b67b311 | [
"MIT"
] | 1 | 2021-06-04T10:18:38.000Z | 2021-06-04T10:18:38.000Z | src/factiva/pipelines/const/__init__.py | wizeline/factiva-pipelines-python | 053aa10bba195f15a039e1974cb2f2357b67b311 | [
"MIT"
] | null | null | null | src/factiva/pipelines/const/__init__.py | wizeline/factiva-pipelines-python | 053aa10bba195f15a039e1974cb2f2357b67b311 | [
"MIT"
] | 3 | 2021-03-27T11:36:48.000Z | 2022-02-16T12:55:43.000Z | # Fields to be used for statistical purposes, no content is loaded
ARTICLE_STATS_FIELDS = ['an', 'company_codes', 'company_codes_about',
'company_codes_occur', 'industry_codes', 'ingestion_datetime',
'language_code', 'modification_datetime',
'publication_datetime', 'publisher_name', 'region_codes',
'region_of_origin', 'source_code', 'source_name',
'subject_codes', 'title', 'word_count']
# Fields that commonly are empty, or that are deprecated
ARTICLE_DELETE_FIELDS = ['art', 'credit', 'document_type', 'publication_date', 'modification_date']
| 60.727273 | 99 | 0.636228 |
ccef969fd1f84d45fab64d85110c07787dc83b65 | 2,843 | py | Python | terroroftinytown/test/random_result.py | Flashfire42/terroroftinytown | c52be7ac0f7abc37f4c90955e5c96b91f935903a | [
"MIT"
] | 59 | 2015-03-05T21:30:06.000Z | 2022-01-31T05:50:34.000Z | terroroftinytown/test/random_result.py | Flashfire42/terroroftinytown | c52be7ac0f7abc37f4c90955e5c96b91f935903a | [
"MIT"
] | 33 | 2015-01-10T02:27:08.000Z | 2022-02-08T18:26:56.000Z | terroroftinytown/test/random_result.py | Flashfire42/terroroftinytown | c52be7ac0f7abc37f4c90955e5c96b91f935903a | [
"MIT"
] | 16 | 2015-03-06T19:51:53.000Z | 2021-11-16T03:50:52.000Z | import random, hashlib, datetime
from sqlalchemy.sql.expression import insert
from terroroftinytown.tracker.bootstrap import Bootstrap
from terroroftinytown.tracker.database import Database
from terroroftinytown.tracker.model import new_session, Result, Project
class MockProject(Bootstrap):
def __init__(self, delete_everything=False):
super().__init__()
self.delete_everything = delete_everything
def setup_database(self):
self.database = Database(
path=self.config['database']['path'],
delete_everything=self.delete_everything
)
def start(self, args=None):
super().start(args=args)
self.generate_mock()
def setup_args(self):
super().setup_args()
self.arg_parser.add_argument('--count', help='Number of projects to generate', default=1, type=int)
def generate_mock(self):
with new_session() as session:
for project_num in range(1, self.args.count + 1):
project_id = 'test_{}'.format(project_num)
project = Project(name=project_id)
if project_num == 2:
project.url_template = 'http://example.com/{shortcode}/slash/'
print('Running insertion')
session.add(project)
class MockResult(Bootstrap):
def start(self, args=None):
super().start(args=args)
self.generate_mock()
def setup_args(self):
super().setup_args()
self.arg_parser.add_argument('--count', help='Number of items to generate', default=int(1E6), type=int)
self.arg_parser.add_argument('--projects', help='Number of projects to generate', default=1, type=int)
def generate_mock(self):
with new_session() as session:
items = []
for i in range(self.args.count):
if i % 100 == 0:
print(i)
if self.args.projects == 1:
project_id = 'test'
else:
project_id = 'test_{}'.format(random.randint(1, self.args.projects))
items.append({
'project_id': project_id,
'shortcode': self.generate_shortcode(),
'url': self.generate_url(),
'encoding': 'ascii',
'datetime': datetime.datetime.utcnow()
})
print('Running insertion')
session.execute(insert(Result), items)
def generate_shortcode(self):
# todo: non duplicated
return hashlib.md5(str(random.random()).encode('ascii')).hexdigest()[:random.randrange(1, 9)]
def generate_url(self):
return 'http://' + self.generate_shortcode() + '.com/' + self.generate_shortcode()
if __name__ == '__main__':
MockResult().start()
| 32.306818 | 111 | 0.592684 |
b85f0bb744d85dbf051f861a3491466a13c8c5bf | 4,135 | py | Python | scripts/setup/generate_secrets.py | my-name-here/zulip | 4ad6582982d05e169afb1fffe5e648a7843333f0 | [
"Apache-2.0"
] | 1 | 2019-01-13T20:47:29.000Z | 2019-01-13T20:47:29.000Z | scripts/setup/generate_secrets.py | alex784004/patient | a6510c4626392b9a8385cbac82698d9e23df0a55 | [
"Apache-2.0"
] | 1 | 2021-11-15T17:53:42.000Z | 2021-11-15T17:53:42.000Z | scripts/setup/generate_secrets.py | alex784004/patient | a6510c4626392b9a8385cbac82698d9e23df0a55 | [
"Apache-2.0"
] | 1 | 2020-10-26T08:49:45.000Z | 2020-10-26T08:49:45.000Z | #!/usr/bin/env python3
# This tools generates /etc/zulip/zulip-secrets.conf
import sys
import os
if False:
from typing import Dict, List, Optional, Text
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(BASE_DIR)
import scripts.lib.setup_path_on_import
os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.settings'
from django.utils.crypto import get_random_string
import argparse
import uuid
import configparser
from zerver.lib.str_utils import force_str
from zerver.lib.utils import generate_random_token
os.chdir(os.path.join(os.path.dirname(__file__), '..', '..'))
CAMO_CONFIG_FILENAME = '/etc/default/camo'
# Standard, 64-bit tokens
AUTOGENERATED_SETTINGS = [
'avatar_salt',
'initial_password_salt',
'local_database_password',
'rabbitmq_password',
'shared_secret',
'thumbor_key',
]
# TODO: We can eliminate this function if we refactor the install
# script to run generate_secrets before zulip-puppet-apply.
def generate_camo_config_file(camo_key):
# type: (Text) -> None
camo_config = """ENABLED=yes
PORT=9292
CAMO_KEY=%s
""" % (camo_key,)
with open(CAMO_CONFIG_FILENAME, 'w') as camo_file:
camo_file.write(camo_config)
print("Generated Camo config file %s" % (CAMO_CONFIG_FILENAME,))
def generate_django_secretkey():
# type: () -> Text
"""Secret key generation taken from Django's startproject.py"""
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
return get_random_string(50, chars)
def get_old_conf(output_filename):
# type: (str) -> Dict[str, Text]
if not os.path.exists(output_filename):
return {}
secrets_file = configparser.RawConfigParser()
secrets_file.read(output_filename)
return dict(secrets_file.items("secrets"))
def generate_secrets(development=False):
# type: (bool) -> None
if development:
OUTPUT_SETTINGS_FILENAME = "zproject/dev-secrets.conf"
else:
OUTPUT_SETTINGS_FILENAME = "/etc/zulip/zulip-secrets.conf"
current_conf = get_old_conf(OUTPUT_SETTINGS_FILENAME)
lines = [] # type: List[Text]
if len(current_conf) == 0:
lines = [u'[secrets]\n']
def need_secret(name):
# type: (str) -> bool
return name not in current_conf
def add_secret(name, value):
# type: (str, Text) -> None
lines.append("%s = %s\n" % (name, value))
current_conf[name] = value
for name in AUTOGENERATED_SETTINGS:
if need_secret(name):
add_secret(name, generate_random_token(64))
if need_secret('secret_key'):
add_secret('secret_key', generate_django_secretkey())
if need_secret('camo_key'):
add_secret('camo_key', get_random_string(64))
# zulip_org_key is generated using os.urandom().
# zulip_org_id does not require a secure CPRNG,
# it only needs to be unique.
if need_secret('zulip_org_key'):
add_secret('zulip_org_key', get_random_string(64))
if need_secret('zulip_org_id'):
add_secret('zulip_org_id', str(uuid.uuid4()))
if not development:
# Write the Camo config file directly
generate_camo_config_file(current_conf['camo_key'])
if len(lines) == 0:
print("generate_secrets: No new secrets to generate.")
return
out = open(OUTPUT_SETTINGS_FILENAME, 'a')
# Write a newline at the start, in case there was no newline at
# the end of the file due to human editing.
out.write("\n" + force_str("".join(lines)))
out.close()
print("Generated new secrets in %s." % (OUTPUT_SETTINGS_FILENAME,))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--development', action='store_true', dest='development',
help='For setting up the developer env for zulip')
group.add_argument('--production', action='store_false', dest='development',
help='For setting up the production env for zulip')
results = parser.parse_args()
generate_secrets(results.development)
| 31.807692 | 87 | 0.689238 |
11db0d8a78994823ed947299e05d21a4d27145a6 | 338 | py | Python | decentmark/migrations/0007_remove_assignment_attempts.py | DecentMark/decentmark | 52cc80f2b13b13a249326f953615e8386f9d4155 | [
"MIT"
] | null | null | null | decentmark/migrations/0007_remove_assignment_attempts.py | DecentMark/decentmark | 52cc80f2b13b13a249326f953615e8386f9d4155 | [
"MIT"
] | 1 | 2018-09-05T12:07:44.000Z | 2018-09-05T12:07:44.000Z | decentmark/migrations/0007_remove_assignment_attempts.py | DecentMark/decentmark | 52cc80f2b13b13a249326f953615e8386f9d4155 | [
"MIT"
] | 3 | 2018-08-28T06:02:47.000Z | 2018-09-03T10:53:07.000Z | # Generated by Django 2.1 on 2018-10-04 11:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('decentmark', '0006_submission_autostatus'),
]
operations = [
migrations.RemoveField(
model_name='assignment',
name='attempts',
),
]
| 18.777778 | 53 | 0.606509 |
912e62bcfc08d27d786cb2dedb00fffa1ca560fa | 2,480 | py | Python | analysis/scripts/project_functions.py | data301-2020-winter2/course-project-group_1039 | 26d661a543ce9dcea61f579f9edbcde88543e7c3 | [
"MIT"
] | 1 | 2021-02-09T02:13:23.000Z | 2021-02-09T02:13:23.000Z | analysis/scripts/project_functions.py | data301-2020-winter2/course-project-group_1039 | 26d661a543ce9dcea61f579f9edbcde88543e7c3 | [
"MIT"
] | 31 | 2021-02-02T17:03:39.000Z | 2021-04-13T03:22:16.000Z | analysis/scripts/project_functions.py | data301-2020-winter2/course-project-group_1039 | 26d661a543ce9dcea61f579f9edbcde88543e7c3 | [
"MIT"
] | 1 | 2021-03-14T05:56:16.000Z | 2021-03-14T05:56:16.000Z | import collections
from typing import Collection, Container
import pandas as pd
import os
# CONSTANT FOR US REGION
NORTH_EAST = ['CT', 'DE', 'MA', 'MD', 'ME', 'NH', 'NJ', 'NY', 'PA', 'RI', 'VT']
MID_WEST = ['IA', 'IL', 'IN', 'KS', 'MI', 'MN', 'MO', 'ND', 'NE', 'OH', 'SD', 'WI']
WEST = ['AK', 'AZ', 'CA', 'CO', 'HI', 'ID', 'MT', 'NM', 'NV', 'OR', 'UT', 'WA', 'WY']
SOUTH = ['AL', 'AR', 'DC', 'DE', 'FL', 'GA', 'KY', 'LA', 'MD', 'MS', 'NC', 'OK', 'SC', 'TN', 'TX', 'VA', 'WV']
def load_and_process_one(source:str):
if not os.path.exists(source):
raise FileExistsError("Path does not exist.")
elif os.path.isdir(source):
raise FileNotFoundError("Expected file path.")
return process(pd.read_csv(source, delimiter=','))
def load_and_process_many(source:str):
if not os.path.exists(source):
raise FileExistsError("Path does not exist.")
elif not os.path.isdir(source):
raise TypeError("Expected directory path.")
parts = ([pd.read_csv(os.path.join(source,file), delimiter=",")
for file in os.listdir(source) if file.endswith(".csv")]
)
return process(pd.concat(parts, axis=0, ignore_index=True))
def process(dataframe:pd.DataFrame):
subset = ["Name","Year", "Gender"]
if "State" in dataframe.columns:
subset.append("State")
return ( dataframe
.drop(columns=['Id'])
.dropna()
.drop_duplicates(subset=subset, keep='first')
.loc[dataframe["Year"] >= 1910]
.reset_index(drop=True)
)
def export_processed_data(data_dict: dict, buffer=os.getcwd(), remove=True):
if not os.path.exists(buffer):
raise FileExistsError("Path does not exists")
elif not os.path.isdir(buffer):
raise TypeError("Expected directory path.")
# Warning: this method will overwrite all existing csv in buffer by default
if remove:
for file in os.listdir(buffer):
removed = list()
if file.endswith(".csv"):
csv = os.path.join(buffer, file)
os.remove(csv)
removed.append(csv)
print("{0}\nThe following csv files have been removed:\n{1}\n{0}".format("="*50, removed))
for name, data in data_dict.items():
name = name+".csv"
data.to_csv(os.path.join(buffer, name), index=True)
print(f"=> Exported {name} to {os.path.abspath(buffer)}")
| 37.014925 | 110 | 0.575806 |
436547d109330580c5a9c0aba31dc1a0b795ec7b | 3,232 | py | Python | app/app/settings.py | eshwar00001/Rest-API-Python | c1d6198b9340bab8b453839ecd76453e3020e4ae | [
"MIT"
] | null | null | null | app/app/settings.py | eshwar00001/Rest-API-Python | c1d6198b9340bab8b453839ecd76453e3020e4ae | [
"MIT"
] | null | null | null | app/app/settings.py | eshwar00001/Rest-API-Python | c1d6198b9340bab8b453839ecd76453e3020e4ae | [
"MIT"
] | null | null | null | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8k22^ecdwci6n++m9fl$%vc%(#olodb19ne7(he7piu1ikldo='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME':os.environ.get('DB_NAME'),
'USER':os.environ.get('DB_USER'),
'PASSWORD':os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL='core.User' | 25.650794 | 91 | 0.691832 |
c4f4ca3eab41aec18c235e5dcb731c945ddd22ea | 6,384 | py | Python | options/base_options.py | ChristopherLu/pytorch-CycleGAN-and-pix2pix | 3603cfb9614307eb63df0e48f7c608c71a0a87b1 | [
"BSD-3-Clause"
] | null | null | null | options/base_options.py | ChristopherLu/pytorch-CycleGAN-and-pix2pix | 3603cfb9614307eb63df0e48f7c608c71a0a87b1 | [
"BSD-3-Clause"
] | null | null | null | options/base_options.py | ChristopherLu/pytorch-CycleGAN-and-pix2pix | 3603cfb9614307eb63df0e48f7c608c71a0a87b1 | [
"BSD-3-Clause"
] | null | null | null | import argparse
import os
from util import util
import torch
import models
import data
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser.add_argument('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
parser.add_argument('--batch_size', type=int, default=1, help='input batch size')
parser.add_argument('--loadSize', type=int, default=256, help='scale images to this size')
parser.add_argument('--fineSize', type=int, default=256, help='then crop to this size')
parser.add_argument('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
parser.add_argument('--netD', type=str, default='basic', help='selects model to use for netD')
parser.add_argument('--netG', type=str, default='resnet_9blocks', help='selects model to use for netG')
parser.add_argument('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
parser.add_argument('--gpu_ids', type=str, default='1', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single]')
parser.add_argument('--model', type=str, default='cycle_gan',
help='chooses which model to use. cycle_gan, pix2pix, test')
parser.add_argument('--direction', type=str, default='AtoB', help='AtoB or BtoA')
parser.add_argument('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--num_threads', default=4, type=int, help='# threads for loading data')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--norm', type=str, default='instance', help='instance normalization or batch normalization')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--no_dropout', action='store_true', help='no dropout for the generator')
parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--resize_or_crop', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop|none]')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
parser.add_argument('--init_type', type=str, default='normal', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
parser.add_argument('--verbose', action='store_true', help='if specified, print more debugging information')
parser.add_argument('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{loadSize}')
self.initialized = True
return parser
def gather_options(self):
# initialize parser with basic options
if not self.initialized:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# get the basic options
opt, _ = parser.parse_known_args()
# modify model-related parser options
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
opt, _ = parser.parse_known_args() # parse again with the new defaults
# modify dataset-related parser options
dataset_name = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_name)
parser = dataset_option_setter(parser, self.isTrain)
self.parser = parser
return parser.parse_args()
def print_options(self, opt):
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
# save to the disk
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt.txt')
with open(file_name, 'wt') as opt_file:
opt_file.write(message)
opt_file.write('\n')
def parse(self):
opt = self.gather_options()
opt.isTrain = self.isTrain # train or test
# process opt.suffix
if opt.suffix:
suffix = ('_' + opt.suffix.format(**vars(opt))) if opt.suffix != '' else ''
opt.name = opt.name + suffix
self.print_options(opt)
# set gpu ids
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
opt.gpu_ids.append(id)
if len(opt.gpu_ids) > 0:
torch.cuda.set_device(opt.gpu_ids[0])
self.opt = opt
return self.opt
| 54.564103 | 223 | 0.64646 |
4cf513dc9118e334e88cff6361a00cf82616c153 | 11,559 | py | Python | websauna/system/user/social.py | maikroeder/websauna | fd266cf5e4761cd4c1f3e33be47ad8358b4c6afa | [
"CNRI-Python"
] | null | null | null | websauna/system/user/social.py | maikroeder/websauna | fd266cf5e4761cd4c1f3e33be47ad8358b4c6afa | [
"CNRI-Python"
] | null | null | null | websauna/system/user/social.py | maikroeder/websauna | fd266cf5e4761cd4c1f3e33be47ad8358b4c6afa | [
"CNRI-Python"
] | 1 | 2021-04-15T17:35:57.000Z | 2021-04-15T17:35:57.000Z | from abc import abstractmethod, ABC
import authomatic
from authomatic.core import LoginResult
from pyramid.registry import Registry
from pyramid.request import Request
from sqlalchemy.orm.attributes import flag_modified
from websauna.system.user.events import UserCreated
from websauna.utils.time import now
from websauna.system.user.interfaces import IUserModel, ISocialLoginMapper
from zope.interface import implementer
class NotSatisfiedWithData(Exception):
"""Raisen when social media login cannot proceed due to incomplete provided information.
E.g. we need email to map the user, but the Facebook doesn't give us email because user doesn't grant the permission.
"""
@implementer(ISocialLoginMapper)
class SocialLoginMapper(ABC):
"""Map Authomatic LoginResult objects (social network logins) to our internal database users."""
def __init__(self, registry:Registry, provider_id:str):
"""Create a mapper.
:param registry: Pyramid configuration used to drive this mapper. Subclasses might want to query this when they create users.
:param provider_id: String id we use to refer to this authentication source in the configuration file and in the database.
"""
#: This is the string we use to map configuration for the
self.provider_id = provider_id
self.registry = registry
@abstractmethod
def capture_social_media_user(self, request:Request, result:LoginResult) -> IUserModel:
"""Extract social media information from the Authomatic login result in order to associate the user account."""
class EmailSocialLoginMapper(SocialLoginMapper):
"""Base class for mapping internal users to social network (OAuth) users."""
def activate_user(request, dbsession, user):
"""Checks to perform when the user becomes a valid user for the first time.
If this user has already started sign up process through email we need to cancel that.
"""
user.activated_at = now()
# Cancel any pending email activations if the user chooses the option to use social media login
if user.activation:
dbsession.delete(user.activation)
def update_first_login_social_data(self, user:object, data:dict):
"""Set the initial data on the user model.
When the user logs in from a social network for the first time (no prior logins with this email before) we fill in blanks in the user model with incoming data.
Default action is not to set any items.
:param data: Normalized data
"""
pass
def update_every_login_social_data(self, user:IUserModel, data:dict):
"""Update internal user data on every login.
Bt default, sets user.user_data["social"]["facebook"] or user.user_data["social"]["yoursocialnetwork"] to reflect the raw data given us by ``import_social_media_user()``.
"""
# Non-destructive update - don't remove values which might not be present in the new data
user.user_data["social"][self.provider_id] = user.user_data["social"].get(self.provider_id) or {}
user.user_data["social"][self.provider_id].update(data)
# Because we are doing direct
flag_modified(user, "user_data")
@abstractmethod
def import_social_media_user(self, user:authomatic.core.User) -> dict:
"""Map incoming social network data to internal data structure.
Sometimes social networks change how the data is presented over API and you might need to do some wiggling to get it a proper shape you wish to have.
The resulting dict must be JSON serializable as it is persisted as is.
"""
def create_blank_user(self, user_model, dbsession, email) -> IUserModel:
"""Create a new blank user instance as we could not find matching user with the existing details."""
user = user_model(email=email)
dbsession.add(user)
dbsession.flush()
user.username = user.generate_username()
user.registration_source = self.provider_id
user.activated_at = now()
return user
def get_existing_user(self, user_model, dbsession, email):
"""Check if we have a matching user for the email already."""
user = dbsession.query(user_model).filter_by(email=email).first()
return user
def get_or_create_user_by_social_medial_email(self, request:Request, user:authomatic.core.User) -> IUserModel:
User = self.registry.queryUtility(IUserModel)
dbsession = request.dbsession
imported_data = self.import_social_media_user(user)
email = imported_data["email"]
user = self.get_existing_user(User, dbsession, email)
if not user:
user = self.create_blank_user(User, dbsession, email)
request.registry.notify(UserCreated(request, user))
self.update_first_login_social_data(user, imported_data)
user.first_login = True
else:
user.first_login = False
self.activate_user(dbsession, user)
self.update_every_login_social_data(user, imported_data)
return user
class FacebookMapper(EmailSocialLoginMapper):
"""Map and login Facebook OAuth users to internal users.
You must have application created in developers.facebook.com
The application must have its consumer_key and consumer_secret configured in the secrets config file.
For testing: The application must have one Web site platform configured in developers.facebook.com, pointing to http://localhost:8521/ and Valid OAuth redirect URLs to http://localhost:8521/login/facebook
"""
def import_social_media_user(self, user):
return {
"country": user.country,
"timezone": user.timezone,
"gender": user.gender,
"first_name": user.first_name,
"last_name": user.last_name,
"full_name": user.name,
"link": user.link,
"birth_date": user.birth_date,
"city": user.city,
"postal_code": user.postal_code,
"email": user.email,
"id": user.id,
"nickname": user.nickname,
# "address": user.address,
}
def update_first_login_social_data(self, user:IUserModel, data:dict):
super(FacebookMapper, self).update_first_login_social_data(user, data)
if not user.full_name and data.get("full_name"):
user.full_name = data["full_name"]
def capture_social_media_user(self, request:Request, result:LoginResult) -> IUserModel:
"""Extract social media information from the Authomatic login result in order to associate the user account."""
assert not result.error
# Facebook specific Authomatic call to fetch more user data from the Facebook provider
# https://github.com/peterhudec/authomatic/issues/112
result.user.provider.user_info_url = 'https://graph.facebook.com/me?fields=id,email,name,first_name,last_name,gender,link,timezone,verified'
result.user.update()
# Make user Facebook user looks somewhat legit
assert result.user.credentials
assert result.user.id
if not result.user.email:
# We cannot login if the Facebook doesnt' give us email as we use it for the user mapping
# This can also happen when you have not configured Facebook app properly in the developers.facebook.com
raise NotSatisfiedWithData("Email address is needed in order to user this service and we could not get one from your social media provider. Please try to sign up with your email instead.")
user = self.get_or_create_user_by_social_medial_email(request, result.user)
return user
class GoogleMapper(EmailSocialLoginMapper):
"""Map and login Google OAuth users to internal users.
See :ref:`google-auth`.
"""
def import_social_media_user(self, user):
# http://peterhudec.github.io/authomatic/reference/providers.html#authomatic.providers.oauth2.Google
return {
"email": user.email,
"first_name": user.first_name,
"last_name": user.last_name,
"full_name": user.name,
"locale": user.locale,
"picture": user.picture,
"email_verified": user.email_verified,
}
def update_first_login_social_data(self, user:IUserModel, data:dict):
super(GoogleMapper, self).update_first_login_social_data(user, data)
if not user.full_name and data.get("full_name"):
user.full_name = data["full_name"]
def capture_social_media_user(self, request:Request, result:LoginResult) -> IUserModel:
"""Extract social media information from the Authomatic login result in order to associate the user account."""
assert not result.error
result.user.update()
# Make user we got some meaningful input from the user_info_url
assert result.user.credentials
if not result.user.email_verified:
raise NotSatisfiedWithData("User account email is not verified.")
if not result.user.email:
# We cannot login if the Facebook doesnt' give us email as we use it for the user mapping
# This can also happen when you have not configured Facebook app properly in the developers.facebook.com
raise NotSatisfiedWithData("Email address is needed in order to user this service and we could not get one from your social media provider. Please try to sign up with your email instead.")
user = self.get_or_create_user_by_social_medial_email(request, result.user)
return user
class TwitterMapper(EmailSocialLoginMapper):
"""Map Twitter OAuth users to internal users.
See :ref:`twitter-auth`.
"""
@staticmethod
def _x_user_parser(user, data):
"""Monkey patched authomatic.providers.oauth1.Twitter._x_user_parser."""
user.data = data
# TODO: Fetch user email
# TODO: Pending Twitter authentication request
return user
def import_social_media_user(self, user):
"""Pass-through Twitter auth data to user_data['social']['twitter']"""
return user.data
def update_first_login_social_data(self, user:IUserModel, data:dict):
super(TwitterMapper, self).update_first_login_social_data(user, data)
if not user.full_name and data.get("name"):
user.full_name = data["name"]
def capture_social_media_user(self, request:Request, result:LoginResult) -> IUserModel:
"""Extract social media information from the Authomatic login result in order to associate the user account."""
assert not result.error
# Monkey patch user data fetcher
result.provider._x_user_parser = TwitterMapper._x_user_parser
result.user.update()
# Make user we got some meaningful input from the user_info_url
assert result.user.credentials
if not result.user.email:
# We cannot login if the Facebook doesnt' give us email as we use it for the user mapping
# This can also happen when you have not configured Facebook app properly in the developers.facebook.com
raise NotSatisfiedWithData("Email address is needed in order to user this service and we could not get one from your social media provider. Please try to sign up with your email instead.")
user = self.get_or_create_user_by_social_medial_email(request, result.user)
return user
| 41.579137 | 208 | 0.695302 |
8c9c8854327078ee86f81ae13541e627ba144dbf | 6,098 | py | Python | portal/courses.py | Thommond/tsct-portal | 726cfcf86a15985093fd9002a2636478a2495e9e | [
"MIT"
] | 2 | 2020-04-16T00:44:44.000Z | 2020-04-21T19:14:30.000Z | portal/courses.py | Thommond/tsct-portal | 726cfcf86a15985093fd9002a2636478a2495e9e | [
"MIT"
] | 16 | 2020-04-14T17:41:11.000Z | 2020-10-30T18:42:33.000Z | portal/courses.py | Thommond/tsct-portal | 726cfcf86a15985093fd9002a2636478a2495e9e | [
"MIT"
] | 1 | 2020-04-07T18:08:54.000Z | 2020-04-07T18:08:54.000Z | from flask import redirect, g, url_for, render_template, session, request, Blueprint, flash, abort
import functools
from . import db
from portal.auth import login_required, teacher_required
bp = Blueprint("courses", __name__)
@bp.route("/courses", methods=('GET', 'POST')) # Management Page
@login_required
@teacher_required
def course_manage():
"""Allows teachers to have a page which allows
them to edit and create courses"""
cur = db.get_db().cursor()
cur.execute('SELECT * FROM courses')
courses = cur.fetchall()
cur.close()
return render_template("courses/courseMan.html", courses=courses)
@bp.route("/courses/create", methods=('GET', 'POST')) # Course Create
@login_required
@teacher_required
def course_create():
"""Allows the teacher to create a course
and fill out specifics on course"""
all_majors = get_majors()
if request.method == 'POST':
course_number = request.form['courseNumber']
course_title = request.form['courseTitle']
course_description = request.form['description']
course_credit = request.form['courseCredits']
course_major = request.form['major_name']
error = None
result = isinstance(course_major, int)
# Checks if course_number is a number
try:
int(course_number)
except ValueError:
error = 'Course number needs to be a number'
# Checks if course_credit is a number
try:
int(course_credit)
except ValueError:
error = 'Credit amount needs to be a number'
# Checks if course_major is a number
try:
int(course_major)
except ValueError:
error = 'Major not found'
# Checks if the selected major is in the database
if not error:
with db.get_db() as con:
with con.cursor() as cur:
cur.execute('SELECT * FROM majors WHERE id = %s', (course_major,))
check_major = cur.fetchone()
if check_major == None:
error = 'Major not found'
# Checks if the course number or name is taken
if not error:
with db.get_db() as con:
with con.cursor() as cur:
cur.execute('SELECT * FROM courses WHERE course_title = %s', (course_title, ))
existing_course_name = cur.fetchone()
if existing_course_name != None:
error = "Course Name already exists"
cur.execute(
"SELECT * FROM courses WHERE course_num = %s;", (course_number, ))
existing_course_num = cur.fetchone()
if existing_course_num != None:
error = "Course Number already exists"
if not course_number:
error = 'Course number is required'
if not course_title:
error = 'Title of course is required'
if not course_credit:
error = 'Credit amount is required'
if not course_major:
error = 'Major is required'
if error is None:
with db.get_db() as con:
with con.cursor() as cur:
# Adds info to courses table
cur.execute("""INSERT INTO courses (course_num, course_title, description,
credits, major_id, teacher_id)
VALUES (%s, %s, %s, %s, %s, %s)""",
(course_number, course_title, course_description,
course_credit, course_major, g.user['id'], )
)
con.commit()
return redirect(url_for("courses.course_manage"))
flash(error)
return render_template('courses/courseCreate.html', all_majors=all_majors)
# Needs new template
@bp.route("/courses/<int:course_id>/edit", methods=('GET', 'POST'))
@login_required
@teacher_required
def course_edit(course_id):
"""Allows teachers to edit the course"""
course = get_course(course_id)
if g.user['id'] != course['teacher_id']:
abort(403)
if request.method == "POST":
credit = request.form['editCredit']
title = request.form['editTitle']
desc = request.form['editDesc']
error = None
# Checks if course_credit is a number
try:
int(credit)
except ValueError:
error = 'Credit amount needs to be a number'
if not credit:
error = 'Credit amount is required'
if not title:
error = 'Title of course is required'
if error is None:
with db.get_db() as con:
with con.cursor() as cur:
cur.execute("""UPDATE courses SET
course_title = %s,
description = %s,
credits = %s
WHERE course_num = %s
""",
(title, desc, credit, course_id,)
)
con.commit()
return redirect(url_for("courses.course_manage"))
flash(error)
return render_template("courses/courseEdit.html", course=course)
def get_course(course_id):
"""Gets the course from the database"""
with db.get_db() as con:
with con.cursor() as cur:
cur.execute(
'SELECT course_num, credits, description, course_title, teacher_id'
' FROM courses WHERE course_num = %s',
(course_id,))
course = cur.fetchone()
if course is None:
abort(404)
return course
def get_majors():
"""Gets the list of majors"""
with db.get_db() as con:
with con.cursor() as cur:
cur.execute(
'SELECT name, id '
'FROM majors'
)
all_majors = cur.fetchall()
return all_majors
| 28.764151 | 98 | 0.547229 |
02897fca41617a6293740987933a0d7c4689f831 | 392 | py | Python | patientapp/migrations/0005_auto_20200113_1458.py | AbhiyantrikTechnology/DentalHub-Backend | 89802b3e7671ffe8b3d287a998c3c4f375b58f03 | [
"MIT"
] | 1 | 2021-04-03T19:57:32.000Z | 2021-04-03T19:57:32.000Z | patientapp/migrations/0005_auto_20200113_1458.py | AbhiyantrikTechnology/DentalHub-Backend | 89802b3e7671ffe8b3d287a998c3c4f375b58f03 | [
"MIT"
] | null | null | null | patientapp/migrations/0005_auto_20200113_1458.py | AbhiyantrikTechnology/DentalHub-Backend | 89802b3e7671ffe8b3d287a998c3c4f375b58f03 | [
"MIT"
] | null | null | null | # Generated by Django 2.1 on 2020-01-13 14:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('patientapp', '0004_auto_20190925_0855'),
]
operations = [
migrations.AlterField(
model_name='patient',
name='created_at',
field=models.DateField(db_index=True),
),
]
| 20.631579 | 50 | 0.607143 |
6303d7467519ca3adb79fdc7cb87a1b13522cced | 906 | py | Python | getting_small_piece2csv.py | Shemka/SteamTags | a35463b7f30e112d8c18a73cd4abe54b102c0b4b | [
"MIT"
] | null | null | null | getting_small_piece2csv.py | Shemka/SteamTags | a35463b7f30e112d8c18a73cd4abe54b102c0b4b | [
"MIT"
] | null | null | null | getting_small_piece2csv.py | Shemka/SteamTags | a35463b7f30e112d8c18a73cd4abe54b102c0b4b | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import os
import sys
import json
import pymysql as mysql
import time
host, login, password, db, n_users = input('Enter host, login, password, database name, users_number:\n').split()
start_time = time.time()
connection = mysql.connect(host, login, password, db)
with connection.cursor() as cur:
cur.execute(f'SELECT UNIQUE steamid FROM games_2 WHERE playtime_forever > 0 LIMIT {n_users};')
steamids = list(map(lambda x: str(x[0]), cur.fetchall()))
steamids = '('+', '.join(steamids)+')'
cur.execute(f'SELECT steamid, appid, playtime_forever FROM games_2 WHERE steamid IN {steamids} AND playtime_forever > 0;')
data = cur.fetchall()
print('Dataset contains', len(data), 'rows')
df = pd.DataFrame(data, columns=['steamid', 'appid', 'playtime_forever'])
df.to_csv(f'{n_users}_users_slice.csv.gz', index=False, compression='gzip')
| 39.391304 | 126 | 0.708609 |
d4584cbc3f7323267f960a90f3625b05024108b6 | 60 | py | Python | simple_operations_and_calculations/usd_to_bgn.py | MaggieIllustrations/softuni-github-programming | f5695cb14602f3d2974359f6d8734332acc650d3 | [
"MIT"
] | null | null | null | simple_operations_and_calculations/usd_to_bgn.py | MaggieIllustrations/softuni-github-programming | f5695cb14602f3d2974359f6d8734332acc650d3 | [
"MIT"
] | null | null | null | simple_operations_and_calculations/usd_to_bgn.py | MaggieIllustrations/softuni-github-programming | f5695cb14602f3d2974359f6d8734332acc650d3 | [
"MIT"
] | 1 | 2022-01-14T17:12:44.000Z | 2022-01-14T17:12:44.000Z | usd = float(input())
bgn = usd * 1.79549
print(f"{bgn:.2f}") | 20 | 20 | 0.6 |
0503e1294512d5b4a949888fb1b256522facda52 | 6,010 | py | Python | modules/seq2seq_decoder.py | naacl2021anonymous/seq2emo | 6fb4f938cd623de1941072b3ab7b462615e000a3 | [
"MIT"
] | 7 | 2021-03-20T17:06:13.000Z | 2022-02-25T08:08:49.000Z | modules/seq2seq_decoder.py | naacl2021anonymous/seq2emo | 6fb4f938cd623de1941072b3ab7b462615e000a3 | [
"MIT"
] | null | null | null | modules/seq2seq_decoder.py | naacl2021anonymous/seq2emo | 6fb4f938cd623de1941072b3ab7b462615e000a3 | [
"MIT"
] | 1 | 2022-03-04T01:49:07.000Z | 2022-03-04T01:49:07.000Z | import torch
import torch.nn as nn
from modules.luong_attention import Attention
class Seq2SeqDecoder(nn.Module):
r"""A long short-term memory (LSTM) cell with attention."""
def __init__(self, emb_dim, hidden_size, num_class, batch_first=True,
dropout=0.2, args=None):
"""Initialize params."""
super(Seq2SeqDecoder, self).__init__()
self.args = args
self.num_class = num_class
self.emb_dim = emb_dim
self.hidden_size = hidden_size
self.num_layers = 1
self.batch_first = batch_first
self.dropout = nn.Dropout(dropout)
self.input_feed = self.args.input_feeding
self.concat_signal = self.args.concat_signal
# forward decoder parameters
if self.args.input_feeding:
lstm_input_size = emb_dim + hidden_size
else:
lstm_input_size = emb_dim
self.forward_signal_embedding = nn.Embedding(num_class, emb_dim)
self.forward_attention_layer = Attention(hidden_size, args=args)
self.forward_decoder_lstm = nn.LSTM(lstm_input_size, hidden_size, num_layers=1, batch_first=True, dropout=dropout)
self.backward_signal_embedding = nn.Embedding(num_class, emb_dim)
self.backward_signal_embedding = self.forward_signal_embedding
# backward decoder parameters
if self.args.single_direction is False:
if not self.args.unify_decoder:
self.backward_attention_layer = Attention(hidden_size, args=args)
self.backward_decoder_lstm = nn.LSTM(lstm_input_size, hidden_size, num_layers=1, batch_first=True, dropout=dropout)
else:
self.backward_attention_layer = self.forward_attention_layer
self.backward_decoder_lstm = self.forward_decoder_lstm
# input feeding option
# self.decoder2emo_W = nn.Embedding(num_class, hidden_size * 2 * 2)
# self.decoder2emo_bias = nn.Embedding(num_class, 2)
if not self.concat_signal:
self.binary_hidden2label_list = nn.ModuleList([nn.Linear(hidden_size*2, 2) for _ in range(num_class)])
else:
self.binary_hidden2label_list = nn.ModuleList([nn.Linear(hidden_size * 2 + emb_dim, 2) for _ in range(num_class)])
else:
if not self.concat_signal:
self.binary_hidden2label_list = nn.ModuleList([nn.Linear(hidden_size, 2) for _ in range(num_class)])
else:
self.binary_hidden2label_list = nn.ModuleList(
[nn.Linear(hidden_size + emb_dim, 2) for _ in range(num_class)])
def forward(self, hidden, ctx, src_len):
def recurrence(_trg_emb_i, _hidden, _h_tilde, _decoder_lstm, _attention_layer):
if self.input_feed:
if len(_h_tilde.size()) > 2:
_h_tilde = _h_tilde.squeeze(0)
_lstm_input = torch.cat((_trg_emb_i, _h_tilde), dim=1)
else:
_lstm_input = _trg_emb_i
lstm_out, _hidden = _decoder_lstm(_lstm_input.unsqueeze(1), _hidden)
_h_tilde, alpha = _attention_layer(lstm_out, ctx, src_len.view(-1))
return _h_tilde.squeeze(0), _hidden # squeeze out the trg_len dimension
b_size = src_len.size(0)
src_len = src_len.view(-1)
# hidden_copy = (hidden[0].clone(), hidden[1].clone())
init_hidden = hidden
# Note forward
hs_forward = []
h_tilde = init_hidden[0]
hidden = init_hidden
if len(hidden[0].size()) == 2:
hidden = (hidden[0].unsqueeze(0), hidden[1].unsqueeze(0))
for i in range(self.num_class):
emo_signal = torch.LongTensor([i] * b_size).cuda()
emo_signal_input = self.forward_signal_embedding(emo_signal)
emo_signal_input = self.dropout(emo_signal_input)
h_tilde, hidden = recurrence(emo_signal_input, hidden, h_tilde, self.forward_decoder_lstm,
self.forward_attention_layer)
if not self.concat_signal:
hs_forward.append(h_tilde)
else:
hs_forward.append(torch.cat((emo_signal_input, h_tilde), dim=1))
if self.args.single_direction is False:
# Note backward
hs_backward = []
h_tilde = init_hidden[0]
hidden = init_hidden
if len(hidden[0].size()) == 2:
hidden = (hidden[0].unsqueeze(0), hidden[1].unsqueeze(0))
for i in range(self.num_class - 1, -1, -1):
emo_signal = torch.LongTensor([i] * b_size).cuda()
emo_signal_input = self.backward_signal_embedding(emo_signal)
emo_signal_input = self.dropout(emo_signal_input)
h_tilde, hidden = recurrence(emo_signal_input, hidden, h_tilde, self.backward_decoder_lstm, self.backward_attention_layer)
hs_backward.append(h_tilde)
decoder_output = []
h_list = []
for i in range(self.num_class):
h_bidirection = torch.cat((hs_forward[i], hs_backward[self.num_class - i - 1]), dim=1)
# h_bidirection = self.dropout(h_bidirection)
# h_bidirection = torch.relu(h_bidirection)
h_list.append(h_bidirection)
# emo_signal = torch.LongTensor([i] * b_size).cuda()
# emo_out = torch.bmm(h_bidirection.unsqueeze(1),
# self.decoder2emo_W(emo_signal).view(-1, self.hidden_size * 2, 2)).squeeze()
# emo_out = torch.add(emo_out, self.decoder2emo_bias(emo_signal))
# decoder_output.append(emo_out)
pred_list = [self.binary_hidden2label_list[i](h_list[i]) for i in range(self.num_class)]
else:
pred_list = [self.binary_hidden2label_list[i](hs_forward[i]) for i in range(self.num_class)]
return torch.stack(pred_list, dim=1)
| 47.698413 | 138 | 0.6198 |
4297ad7c8635a305154b5abb634bcb98f929abbb | 18,433 | py | Python | Geolocation/Data/Design2a/design2a_11k_test5/pilot.0000/rp_install/lib/python2.7/site-packages/radical/pilot/compute_pilot.py | radical-experiments/iceberg_escience | e5c230a23395a71a4adf554730ea3d77f923166c | [
"MIT"
] | 1 | 2019-05-24T02:19:29.000Z | 2019-05-24T02:19:29.000Z | Geolocation/Data/Design2a/design2a_11k_test5/pilot.0000/rp_install/lib/python2.7/site-packages/radical/pilot/compute_pilot.py | radical-experiments/iceberg_escience | e5c230a23395a71a4adf554730ea3d77f923166c | [
"MIT"
] | null | null | null | Geolocation/Data/Design2a/design2a_11k_test5/pilot.0000/rp_install/lib/python2.7/site-packages/radical/pilot/compute_pilot.py | radical-experiments/iceberg_escience | e5c230a23395a71a4adf554730ea3d77f923166c | [
"MIT"
] | null | null | null |
__copyright__ = "Copyright 2013-2016, http://radical.rutgers.edu"
__license__ = "MIT"
import copy
import time
import threading
import radical.utils as ru
from . import states as rps
from . import constants as rpc
# ------------------------------------------------------------------------------
#
class ComputePilot(object):
'''
A ComputePilot represent a resource overlay on a local or remote resource.
.. note:: A ComputePilot cannot be created directly. The factory method
:meth:`radical.pilot.PilotManager.submit_pilots` has to be
used instead.
**Example**::
pm = radical.pilot.PilotManager(session=s)
pd = radical.pilot.ComputePilotDescription()
pd.resource = "local.localhost"
pd.cores = 2
pd.runtime = 5 # minutes
pilot = pm.submit_pilots(pd)
'''
# --------------------------------------------------------------------------
# In terms of implementation, a Pilot is not much more than a dict whose
# content are dynamically updated to reflect the state progression through
# the PMGR components. As a Pilot is always created via a PMGR, it is
# considered to *belong* to that PMGR, and all activities are actually
# implemented by that PMGR.
#
# Note that this implies that we could create Pilots before submitting them
# to a PMGR, w/o any problems. (FIXME?)
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
#
def __init__(self, pmgr, descr):
# 'static' members
self._descr = descr.as_dict()
# sanity checks on description
for check in ['resource', 'cores', 'runtime']:
if not self._descr.get(check):
raise ValueError("ComputePilotDescription needs '%s'" % check)
# initialize state
self._pmgr = pmgr
self._session = self._pmgr.session
self._prof = self._session._prof
self._uid = ru.generate_id('pilot.%(item_counter)04d',
ru.ID_CUSTOM,
namespace=self._session.uid)
self._state = rps.NEW
self._log = pmgr._log
self._pilot_dict = dict()
self._callbacks = dict()
self._cache = dict() # cache of SAGA dir handles
self._cb_lock = threading.RLock()
# pilot failures can trigger app termination
self._exit_on_error = self._descr.get('exit_on_error')
for m in rpc.PMGR_METRICS:
self._callbacks[m] = dict()
# we always invoke the default state cb
self._callbacks[rpc.PILOT_STATE][self._default_state_cb.__name__] = {
'cb' : self._default_state_cb,
'cb_data' : None}
# `as_dict()` needs `pilot_dict` and other attributes. Those should all
# be available at this point (apart from the sandboxes), so we now
# query for those sandboxes.
self._pilot_jsurl = ru.Url()
self._pilot_jshop = ru.Url()
self._resource_sandbox = ru.Url()
self._session_sandbox = ru.Url()
self._pilot_sandbox = ru.Url()
self._client_sandbox = ru.Url()
pilot = self.as_dict()
self._pilot_jsurl, self._pilot_jshop \
= self._session._get_jsurl (pilot)
self._resource_sandbox = self._session._get_resource_sandbox(pilot)
self._session_sandbox = self._session._get_session_sandbox (pilot)
self._pilot_sandbox = self._session._get_pilot_sandbox (pilot)
self._client_sandbox = self._session._get_client_sandbox()
# we need to expand plaaceholders in the sandboxes
# FIXME: this code is a duplication from the pilot launcher code
expand = dict()
for k,v in pilot['description'].iteritems():
if v is None:
v = ''
expand['pd.%s' % k] = v
if isinstance(v, basestring):
expand['pd.%s' % k.upper()] = v.upper()
expand['pd.%s' % k.lower()] = v.lower()
else:
expand['pd.%s' % k.upper()] = v
expand['pd.%s' % k.lower()] = v
self._resource_sandbox.path = self._resource_sandbox.path % expand
self._session_sandbox .path = self._session_sandbox .path % expand
self._pilot_sandbox .path = self._pilot_sandbox .path % expand
# --------------------------------------------------------------------------
#
def __repr__(self):
return str(self)
# --------------------------------------------------------------------------
#
def __str__(self):
return str([self.uid, self.resource, self.state])
# --------------------------------------------------------------------------
#
def _default_state_cb(self, pilot, state):
self._log.info("[Callback]: pilot %s state: %s.", self.uid, self.state)
if self.state == rps.FAILED and self._exit_on_error:
self._log.error("[Callback]: pilot '%s' failed - exit", self.uid)
# There are different ways to tell main...
ru.cancel_main_thread('int')
# raise RuntimeError('pilot %s failed - fatal!' % self.uid)
# import sys
# sys.exit()
# --------------------------------------------------------------------------
#
def _update(self, pilot_dict):
'''
This will update the facade object after state changes etc, and is
invoked by whatever component receiving that updated information.
Return True if state changed, False otherwise
'''
if pilot_dict['uid'] != self.uid:
self._log.error('invalid uid: %s / %s', pilot_dict['uid'], self.uid)
assert(pilot_dict['uid'] == self.uid), 'update called on wrong instance'
# NOTE: this method relies on state updates to arrive in order and
# without gaps.
current = self.state
target = pilot_dict['state']
if target not in [rps.FAILED, rps.CANCELED]:
try:
cur_state_val = rps._pilot_state_value(current)
tgt_state_val = rps._pilot_state_value(target)
assert(tgt_state_val - cur_state_val), 'invalid state transition'
except:
self._log.error('%s: invalid state transition %s -> %s',
self.uid, current, target)
raise
self._state = target
# keep all information around
self._pilot_dict = copy.deepcopy(pilot_dict)
# invoke pilot specific callbacks
# FIXME: this iteration needs to be thread-locked!
for _,cb_val in self._callbacks[rpc.PILOT_STATE].iteritems():
cb = cb_val['cb']
cb_data = cb_val['cb_data']
self._log.debug('%s calls cb %s', self.uid, cb)
if cb_data: cb(self, self.state, cb_data)
else : cb(self, self.state)
# ask pmgr to invoke any global callbacks
self._pmgr._call_pilot_callbacks(self, self.state)
# --------------------------------------------------------------------------
#
def as_dict(self):
'''
Returns a Python dictionary representation of the object.
'''
ret = {'session': self.session.uid,
'pmgr': self.pmgr.uid,
'uid': self.uid,
'type': 'pilot',
'state': self.state,
'log': self.log,
'stdout': self.stdout,
'stderr': self.stderr,
'resource': self.resource,
'resource_sandbox': str(self._resource_sandbox),
'session_sandbox': str(self._session_sandbox),
'pilot_sandbox': str(self._pilot_sandbox),
'client_sandbox': str(self._client_sandbox),
'js_url': str(self._pilot_jsurl),
'js_hop': str(self._pilot_jshop),
'description': self.description, # this is a deep copy
'resource_details': self.resource_details
}
return ret
# --------------------------------------------------------------------------
#
@property
def session(self):
'''
Returns the pilot's session.
**Returns:**
* A :class:`Session`.
'''
return self._session
# --------------------------------------------------------------------------
#
@property
def pmgr(self):
'''
Returns the pilot's manager.
**Returns:**
* A :class:`PilotManager`.
'''
return self._pmgr
# -------------------------------------------------------------------------
#
@property
def resource_details(self):
'''
Returns agent level resource information
'''
return self._pilot_dict.get('resource_details')
# --------------------------------------------------------------------------
#
@property
def uid(self):
'''
Returns the pilot's unique identifier.
The uid identifies the pilot within a :class:`PilotManager`.
**Returns:**
* A unique identifier (string).
'''
return self._uid
# --------------------------------------------------------------------------
#
@property
def state(self):
'''
Returns the current state of the pilot.
**Returns:**
* state (string enum)
'''
return self._state
# --------------------------------------------------------------------------
#
@property
def log(self):
'''
Returns a list of human readable [timestamp, string] tuples describing
various events during the pilot's lifetime. Those strings are not
normative, only informative!
**Returns:**
* log (list of [timestamp, string] tuples)
'''
return self._pilot_dict.get('log')
# --------------------------------------------------------------------------
#
@property
def stdout(self):
'''
Returns a snapshot of the pilot's STDOUT stream.
If this property is queried before the pilot has reached
'DONE' or 'FAILED' state it will return None.
.. warning: This can be inefficient. Output may be incomplete and/or
filtered.
**Returns:**
* stdout (string)
'''
return self._pilot_dict.get('stdout')
# --------------------------------------------------------------------------
#
@property
def stderr(self):
'''
Returns a snapshot of the pilot's STDERR stream.
If this property is queried before the pilot has reached
'DONE' or 'FAILED' state it will return None.
.. warning: This can be inefficient. Output may be incomplete and/or
filtered.
**Returns:**
* stderr (string)
'''
return self._pilot_dict.get('stderr')
# --------------------------------------------------------------------------
#
@property
def resource(self):
'''
Returns the resource tag of this pilot.
**Returns:**
* A resource tag (string)
'''
return self._descr.get('resource')
# --------------------------------------------------------------------------
#
@property
def pilot_sandbox(self):
'''
Returns the full sandbox URL of this pilot, if that is already
known, or 'None' otherwise.
**Returns:**
* A string
'''
# NOTE: The pilot has a sandbox property, containing the full sandbox
# path, which is used by the pmgr to stage data back and forth.
# However, the full path as visible from the pmgr side might not
# be what the agent is seeing, specifically in the case of
# non-shared filesystems (OSG). The agent thus uses
# `$PWD` as sandbox, with the assumption that this will
# get mapped to whatever is here returned as sandbox URL.
#
# There is thus implicit knowledge shared between the RP client
# and the RP agent that `$PWD` *is* the sandbox! The same
# implicitly also holds for the staging area, which is relative
# to the pilot sandbox.
if self._pilot_sandbox:
return str(self._pilot_sandbox)
@property
def resource_sandbox(self):
return self._resource_sandbox
@property
def session_sandbox(self):
return self._session_sandbox
@property
def client_sandbox(self):
return self._client_sandbox
# --------------------------------------------------------------------------
#
@property
def description(self):
'''
Returns the description the pilot was started with, as a dictionary.
**Returns:**
* description (dict)
'''
return copy.deepcopy(self._descr)
# --------------------------------------------------------------------------
#
def register_callback(self, cb, metric=rpc.PILOT_STATE, cb_data=None):
'''
Registers a callback function that is triggered every time the
pilot's state changes.
All callback functions need to have the same signature::
def cb(obj, state)
where ``object`` is a handle to the object that triggered the callback
and ``state`` is the new state of that object. If 'cb_data' is given,
then the 'cb' signature changes to
def cb(obj, state, cb_data)
and 'cb_data' are passed along.
'''
if metric not in rpc.PMGR_METRICS :
raise ValueError ("Metric '%s' not available on pmgr" % metric)
with self._cb_lock:
cb_name = cb.__name__
self._callbacks[metric][cb_name] = {'cb' : cb,
'cb_data' : cb_data}
# --------------------------------------------------------------------------
#
def unregister_callback(self, cb, metric=rpc.PILOT_STATE):
if metric and metric not in rpc.UMGR_METRICS :
raise ValueError ("Metric '%s' not available on pmgr" % metric)
if not metric : metrics = rpc.PMGR_METRICS
elif not isinstance(metric, list): metrics = [metric]
else : metrics = metric
with self._cb_lock:
for metric in metrics:
if cb: to_delete = [cb.__name__]
else : to_delete = self._callbacks[metric].keys()
for cb_name in to_delete:
if cb_name not in self._callbacks[metric]:
raise ValueError("Callback '%s' is not registered" % cb_name)
del(self._callbacks[metric][cb_name])
# --------------------------------------------------------------------------
#
def wait(self, state=None, timeout=None):
'''
Returns when the pilot reaches a specific state or
when an optional timeout is reached.
**Arguments:**
* **state** [`list of strings`]
The state(s) that pilot has to reach in order for the
call to return.
By default `wait` waits for the pilot to reach a **final**
state, which can be one of the following:
* :data:`radical.pilot.states.DONE`
* :data:`radical.pilot.states.FAILED`
* :data:`radical.pilot.states.CANCELED`
* **timeout** [`float`]
Optional timeout in seconds before the call returns regardless
whether the pilot has reached the desired state or not. The
default value **None** never times out. '''
if not state : states = rps.FINAL
elif not isinstance(state, list): states = [state]
else : states = state
if self.state in rps.FINAL:
# we will never see another state progression. Raise an error
# (unless we waited for this)
if self.state in states:
return
# FIXME: do we want a raise here, really? This introduces a race,
# really, on application level
# raise RuntimeError("can't wait on a pilot in final state")
return self.state
start_wait = time.time()
while self.state not in states:
time.sleep(0.1)
if timeout and (timeout <= (time.time() - start_wait)):
break
if self._pmgr._terminate.is_set():
break
return self.state
# --------------------------------------------------------------------------
#
def cancel(self):
'''
Cancel the pilot.
'''
# clean connection cache
try:
for key in self._cache:
self._cache[key].close()
self._cache = dict()
except:
pass
self._pmgr.cancel_pilots(self.uid)
# --------------------------------------------------------------------------
#
def stage_in(self, directives):
'''
Stages the content of the staging directive into the pilot's
staging area
'''
# This staging request is actually served by the pmgr *launching*
# component, because that already has a channel open to the target
# resource which we can reuse. We might eventually implement or
# interface to a dedicated data movement service though.
# send the staging request to the pmg launcher
self._pmgr._pilot_staging_input(self.as_dict(), directives)
# ------------------------------------------------------------------------------
| 31.781034 | 85 | 0.493029 |
9ad63bcb3e75092d0064409f6fbf287d83e9792a | 572 | py | Python | friday/hackernews.py | gutku10/Friday-Zulip-Bot | f6c22b8f718b37cbe807d159b54e68f5c7906de4 | [
"Apache-2.0"
] | null | null | null | friday/hackernews.py | gutku10/Friday-Zulip-Bot | f6c22b8f718b37cbe807d159b54e68f5c7906de4 | [
"Apache-2.0"
] | null | null | null | friday/hackernews.py | gutku10/Friday-Zulip-Bot | f6c22b8f718b37cbe807d159b54e68f5c7906de4 | [
"Apache-2.0"
] | null | null | null | from newsapi import NewsApiClient
# Init
newsapi = NewsApiClient(api_key='d34d3205f0794f85af841f72c04abd06')
#d34d3205f0794f85af841f72c04abd06
class Hackernews(object):
def __init__(self):
pass
def get_hackernews(self, topic):
response = ''
if(topic == 'coronavirus'):
x = newsapi.get_top_headlines(q=topic)
elif topic == 'coronavirus india':
x = newsapi.get_top_headlines(q=topic, sources='the-hindu, the-times-of-india, google-news-in')
for i in x['articles']:
response +=i['description'] + "\n" + i['url'] + '\n\n'
return response | 23.833333 | 98 | 0.702797 |
c099912f3324e1ec1126b75dc273688bb945d4a7 | 17,129 | py | Python | selfdrive/car/hyundai/spdctrlRelaxed.py | Saeed59sa/OPKR084 | 5b087bf325ec8e692513df2f523eaa50062b55c6 | [
"MIT"
] | null | null | null | selfdrive/car/hyundai/spdctrlRelaxed.py | Saeed59sa/OPKR084 | 5b087bf325ec8e692513df2f523eaa50062b55c6 | [
"MIT"
] | null | null | null | selfdrive/car/hyundai/spdctrlRelaxed.py | Saeed59sa/OPKR084 | 5b087bf325ec8e692513df2f523eaa50062b55c6 | [
"MIT"
] | null | null | null | #this was initiated by atom(conan)
#partially modified by opkr
import os
import math
import numpy as np
from cereal import car, log
from common.params import Params
from selfdrive.car.hyundai.spdcontroller import SpdController
import common.log as trace1
from selfdrive.controls.lib.events import Events
EventName = car.CarEvent.EventName
class SpdctrlRelaxed(SpdController):
def __init__(self, CP=None):
super().__init__( CP )
self.cv_Raio = 0.45
self.cv_Dist = -5
self.steer_mode = ""
self.cruise_gap = 0.0
self.cut_in = False
self.map_enable = False
self.map_spdlimit_offset = 0
self.target_speed = 0
self.target_speed_camera = 0
self.target_speed_map = 0.0
self.target_speed_map_counter = 0
self.target_speed_map_counter1 = 0
self.target_speed_map_counter2 = 0
self.hesitant_status = False
self.hesitant_timer = 0
self.map_decel_only = Params().get_bool("OpkrMapDecelOnly")
self.map_spdlimit_offset = int(Params().get("OpkrSpeedLimitOffset"))
def update_lead(self, sm, CS, dRel, yRel, vRel):
plan = sm['longitudinalPlan']
dRele = plan.dRel1 #EON Lead
yRele = plan.yRel1 #EON Lead
vRele = plan.vRel1 * 3.6 + 0.5 #EON Lead
dRelef = plan.dRel2 #EON Lead
yRelef = plan.yRel2 #EON Lead
vRelef = plan.vRel2 * 3.6 + 0.5 #EON Lead
lead2_status = plan.status2
self.target_speed_camera = plan.targetSpeedCamera + round(plan.targetSpeedCamera*0.01*self.map_spdlimit_offset)
if self.target_speed_camera <= 29:
self.map_enable = False
self.target_speed = 0
elif self.target_speed_camera > 29:
self.target_speed = self.target_speed_camera
self.map_enable = True
else:
self.target_speed = 0
lead_set_speed = int(round(self.cruise_set_speed_kph))
lead_wait_cmd = 300
dRel = 150
vRel = 0
dRel2 = 140
vRel2 = 0
#dRel, yRel, vRel = self.get_lead( sm, CS )
if 1 < dRele < 149:
dRel = int(dRele) # dRele(이온 차간간격)값 사용
vRel = int(vRele)
elif 1 < CS.lead_distance < 149:
dRel = int(CS.lead_distance) # CS.lead_distance(레이더 차간간격)값 사용
vRel = int(CS.lead_objspd)
else:
dRel = 150
vRel = 0
if 1 < dRelef < 140:
dRel2 = int(dRelef)
vRel2 = int(vRelef) # for cut-in detection??
dst_lead_distance = int(CS.clu_Vanz*self.cv_Raio) # 기준 유지 거리
dst_lead_distance2 = int(CS.clu_Vanz*0.45) # 기준 유지 거리
if dst_lead_distance > 100:
dst_lead_distance = 100
#elif dst_lead_distance < 15:
#dst_lead_distance = 15
if 1 < dRel < 149: #앞차와의 간격이 150미터 미만이면, 즉 앞차가 인식되면,
self.time_no_lean = 0
d_delta = dRel - dst_lead_distance # d_delta = 앞차간격(이온값) - 유지거리
lead_objspd = vRel # 선행차량 상대속도.
else:
d_delta = 0
lead_objspd = 0
if 1 < dRel2 < 140:
d_delta2 = dRel2 - dst_lead_distance2
else:
d_delta2 = 0
if CS.driverAcc_time and not self.map_decel_only: #운전자가 가속페달 밟으면 크루즈 설정속도를 현재속도+1로 동기화
if int(CS.VSetDis) < int(round(CS.clu_Vanz)):
lead_set_speed = int(round(CS.clu_Vanz)) + 1
self.seq_step_debug = "운전자가속"
lead_wait_cmd = 8
elif int(round(self.target_speed)) < int(CS.VSetDis) and self.map_enable and ((int(round(self.target_speed)) < int(round(self.cruise_set_speed_kph))) and self.target_speed != 0):
self.seq_step_debug = "맵기반감속"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -1)
# 거리 유지 조건
elif d_delta < 0 or d_delta2 < 0 and not self.map_decel_only: # 기준유지거리(현재속도*0.4)보다 가까이 있게 된 상황
if (int(CS.clu_Vanz)-1) <= int(CS.VSetDis) and dRele - dRelef > 3 and lead2_status:
self.seq_step_debug = "끼어들기감지"
#lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 15, -5)
self.cut_in = True
elif lead_objspd < 0 and self.cut_in == True and (int(CS.clu_Vanz)-7) <= int(CS.VSetDis) and dRele < int(CS.clu_Vanz)*0.3 and int(CS.clu_Vanz) > 80:
self.seq_step_debug = "거리확보3"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -1)
elif lead_objspd < 0 and self.cut_in == True and (int(CS.clu_Vanz)-5) <= int(CS.VSetDis) and dRele < int(CS.clu_Vanz)*0.35 and int(CS.clu_Vanz) > 50:
self.seq_step_debug = "거리확보2"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -1)
elif lead_objspd < 0 and self.cut_in == True and (int(CS.clu_Vanz)-3) <= int(CS.VSetDis) and dRele < int(CS.clu_Vanz)*0.4 and int(CS.clu_Vanz) > 20:
self.seq_step_debug = "거리확보1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -1)
elif self.cut_in == True and (int(CS.clu_Vanz)-4) <= int(CS.VSetDis):
self.seq_step_debug = "끼어들기감속중"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -1)
elif lead_objspd < -30 or (dRel < 60 and CS.clu_Vanz > 60 and lead_objspd < -5) and (int(CS.clu_Vanz)-6) <= int(CS.VSetDis): # 끼어든 차가 급감속 하는 경우
self.seq_step_debug = "기준내,-5"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -5)
self.cut_in = False
elif lead_objspd < -20 or (dRel < 80 and CS.clu_Vanz > 80 and lead_objspd < -5) and (int(CS.clu_Vanz)-5) <= int(CS.VSetDis): # 끼어든 차가 급감속 하는 경우
self.seq_step_debug = "기준내,-4"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -4)
self.cut_in = False
elif lead_objspd < 0 and int(CS.clu_Vanz)//abs(lead_objspd) <= int(CS.VSetDis)//abs(lead_objspd):
self.seq_step_debug = "기준내-가변"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, max(8, 120-(abs(lead_objspd**3))), -2)
self.cut_in = False
elif lead_objspd >= 0 and int(CS.clu_Vanz) <= int(CS.VSetDis):
self.seq_step_debug = "기준내,-1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, min(250, 70*(1+lead_objspd/2)), -1)
self.cut_in = False
else:
self.seq_step_debug = "거리유지"
self.cut_in = False
# 선행차량이 멀리 있는 상태에서 감속 조건
elif 20 <= dRel < 149 and lead_objspd < -40 and not self.map_decel_only: #정지 차량 및 급감속 차량 발견 시
self.cut_in = False
self.seq_step_debug = "정차차량 급감속-40"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -40)
elif 20 <= dRel < 149 and lead_objspd < -30 and not self.map_decel_only: #정지 차량 및 급감속 차량 발견 시
self.cut_in = False
self.seq_step_debug = "정차차량 급감속-30"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -30)
elif 20 <= dRel < 149 and lead_objspd < -20 and not self.map_decel_only: #정지 차량 및 급감속 차량 발견 시
self.cut_in = False
self.seq_step_debug = "정차차량 급감속-20"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -20)
elif 20 <= dRel < 149 and lead_objspd < -15 and not self.map_decel_only: #정지 차량 및 급감속 차량 발견 시
self.cut_in = False
if dRel >= 80:
self.seq_step_debug = "정차차량 감속-5"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -5)
if dRel >= 50:
self.seq_step_debug = "정차차량 감속-15"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 8, -15)
elif dRel >= 30:
self.seq_step_debug = "정차차량 감속-15"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, 20, -15)
elif self.cruise_set_speed_kph > int(round((CS.clu_Vanz))) and not self.map_decel_only: #이온설정속도가 차량속도보다 큰경우
self.cut_in = False
if 10 > dRel > 3 and lead_objspd <= 0 and 1 < int(CS.clu_Vanz) <= 7 and CS.VSetDis < 45 and ((int(round(self.target_speed)) > int(CS.VSetDis) and self.target_speed != 0) or self.target_speed == 0):
self.seq_step_debug = "출발속도조정+5"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 8, 5)
elif 20 > dRel > 3 and lead_objspd > 5 and CS.clu_Vanz <= 25 and CS.VSetDis < 55 and ((int(round(self.target_speed)) > int(CS.VSetDis) and self.target_speed != 0) or self.target_speed == 0):
self.seq_step_debug = "SS>VS,출발+1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 100, 3)
elif lead_objspd > 9 and CS.clu_Vanz > 20 and CS.VSetDis < 45: # 처음출발시 선행차량 급가속할 때 설정속도 많이 업
self.seq_step_debug = "SS>VS,초가"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 10, 5)
elif lead_objspd > 8 and CS.clu_Vanz > 45 and CS.VSetDis < 60: # 중간속도에서 선행차량 급가속할 때 설정속도 많이 업
self.seq_step_debug = "SS>VS,중가"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 15, 5)
elif lead_objspd > 7 and CS.clu_Vanz > 65 and CS.VSetDis < 80:
self.seq_step_debug = "SS>VS,종가"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 15, 5)
elif lead_objspd > 0 and int(CS.clu_Vanz//lead_objspd) >= int(CS.VSetDis//lead_objspd) and int(CS.clu_Vanz*0.4) < dRel < 149 and ((int(round(self.target_speed)) > int(CS.VSetDis) and self.target_speed != 0) or self.target_speed == 0):
self.seq_step_debug = "SS>VS,++1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 8, 1)
elif lead_objspd > 0 and int(CS.clu_Vanz)+lead_objspd >= int(CS.VSetDis) and int(CS.clu_Vanz*0.4) < dRel < 149 and ((int(round(self.target_speed)) > int(CS.VSetDis) and self.target_speed != 0) or self.target_speed == 0):
self.seq_step_debug = "SS>VS,+1"
if int(CS.VSetDis) > int(CS.clu_Vanz)+14:
self.hesitant_status = True
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 8, 1)
elif CS.clu_Vanz > 80 and lead_objspd < 0 and (int(CS.clu_Vanz)-1) <= int(CS.VSetDis) and int(CS.clu_Vanz) >= dRel*1.6 and 1 < dRel < 149: # 유지거리 범위 외 감속 조건 앞차 감속중 현재속도/2 아래로 거리 좁혀졌을 때 상대속도에 따라 점진적 감소
self.seq_step_debug = "SS>VS,v>80,-1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, max(8, 50+(lead_objspd*2)), -1)
elif CS.clu_Vanz > 60 and lead_objspd < 0 and (int(CS.clu_Vanz)-1) <= int(CS.VSetDis) and int(CS.clu_Vanz) >= dRel*1.8 and 1 < dRel < 149: # 유지거리 범위 외 감속 조건 앞차 감속중 현재속도/2 아래로 거리 좁혀졌을 때 상대속도에 따라 점진적 감소
self.seq_step_debug = "SS>VS,v>60,-1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, max(8, 50+(lead_objspd*2)), -1)
elif CS.clu_Vanz > 40 and lead_objspd < 0 and (int(CS.clu_Vanz)-1) <= int(CS.VSetDis) and int(CS.clu_Vanz) >= dRel*2.1 and 1 < dRel < 149: # 유지거리 범위 외 감속 조건 앞차 감속중 현재속도/2 아래로 거리 좁혀졌을 때 상대속도에 따라 점진적 감소
self.seq_step_debug = "SS>VS,v>40,-1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, max(8, 50+(lead_objspd*2)), -1)
elif 70 > CS.clu_Vanz > 30 and lead_objspd < 0 and int(CS.clu_Vanz)//abs(lead_objspd*2.2) <= int(CS.VSetDis)//abs(lead_objspd*2.2) and int(CS.clu_Vanz) >= dRel*0.8 and 1 < dRel < 149:
self.seq_step_debug = "SS>VS,70>v>30,-1"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, max(8, 120-(abs(lead_objspd**3))), -2)
elif 7 < int(CS.clu_Vanz) < 30 and lead_objspd < 0 and CS.VSetDis > 30:
self.seq_step_debug = "SS>VS,30이하"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 8, -5)
elif lead_objspd == 0 and int(CS.clu_Vanz)+3 <= int(CS.VSetDis) and int(CS.clu_Vanz) > 40 and 1 < dRel < 149: # 앞차와 속도 같을 시 현재속도+5로 크루즈설정속도 유지
self.seq_step_debug = "SS>VS,vRel=0"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 8, -1)
elif d_delta == 0 and lead_objspd == 0 and int(CS.clu_Vanz//10) >= int(CS.VSetDis//10) and dRel > 149 and ((int(round(self.target_speed)) > int(CS.VSetDis) and self.target_speed != 0) or self.target_speed == 0):
self.seq_step_debug = "선행차없음"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 8, 5)
elif d_delta == 0 and lead_objspd == 0 and self.cruise_set_speed_kph > int(CS.VSetDis) and int(CS.clu_Vanz//10) >= int(CS.VSetDis//10) and dRel > 149 and ((int(round(self.target_speed)) > int(CS.VSetDis) and self.target_speed != 0) or self.target_speed == 0):
self.seq_step_debug = "점진가속"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 8, 1)
elif lead_objspd == 0 and int(CS.clu_Vanz) == 0 and dRel <= 6:
self.seq_step_debug = "출발대기"
else:
self.seq_step_debug = "SS>VS,거리유지"
if self.hesitant_status and self.hesitant_timer > 150:
self.hesitant_status = False
self.hesitant_timer = 0
elif self.hesitant_status:
self.hesitant_timer += 1
# 유지거리 범위 박 점진 감속
elif 20 <= dRel < int(CS.clu_Vanz*0.75) and lead_objspd < 0 and not self.map_decel_only:
self.cut_in = False
if int(CS.clu_Vanz//abs(lead_objspd)) <= int(CS.VSetDis//abs(lead_objspd)):
self.seq_step_debug = "점진감속"
lead_wait_cmd, lead_set_speed = self.get_tm_speed(CS, max(8, 200-(abs(lead_objspd**3))), -1)
elif lead_objspd >= 0 and CS.clu_Vanz >= int(CS.VSetDis) and int(CS.clu_Vanz * 0.5) < dRel < 149 and not self.map_decel_only:
self.cut_in = False
self.seq_step_debug = "속도유지"
elif self.map_decel_only and self.cruise_set_speed_kph > int(round(CS.VSetDis)) and ((int(round(self.target_speed)) > int(CS.VSetDis) and self.target_speed != 0) or self.target_speed == 0):
self.seq_step_debug = "속도원복"
lead_wait_cmd, lead_set_speed = self.get_tm_speed( CS, 8, 1)
else:
self.cut_in = False
self.seq_step_debug = "속도유지"
return lead_wait_cmd, lead_set_speed
def update_curv(self, CS, sm, model_speed):
wait_time_cmd = 0
set_speed = self.cruise_set_speed_kph
# 2. 커브 감속.
#if self.cruise_set_speed_kph >= 100:
if CS.out.cruiseState.modeSel == 1 and Events().names not in [EventName.laneChangeManual, EventName.laneChange] and not (CS.left_blinker_flash or CS.right_blinker_flash)and not self.map_decel_only:
if model_speed < 45 and int(CS.clu_Vanz) >= 40 and CS.lead_distance >= 15:
set_speed = min(45, self.cruise_set_speed_kph - int(CS.clu_Vanz * 0.3))
self.seq_step_debug = "커브감속-5"
wait_time_cmd = 8
elif model_speed < 55 and int(CS.clu_Vanz) >= 40 and CS.lead_distance >= 15:
set_speed = min(55, self.cruise_set_speed_kph - int(CS.clu_Vanz * 0.25))
self.seq_step_debug = "커브감속-4"
wait_time_cmd = 8
elif model_speed < 65 and int(CS.clu_Vanz) >= 40 and CS.lead_distance >= 15:
set_speed = min(65, self.cruise_set_speed_kph - int(CS.clu_Vanz * 0.2))
self.seq_step_debug = "커브감속-3"
wait_time_cmd = 8
elif model_speed < 75 and int(CS.clu_Vanz) >= 40 and CS.lead_distance >= 15:
set_speed = min(75, self.cruise_set_speed_kph - int(CS.clu_Vanz * 0.15))
self.seq_step_debug = "커브감속-2"
wait_time_cmd = 8
elif model_speed < 90 and int(CS.clu_Vanz) >= 40 and CS.lead_distance >= 15:
set_speed = min(85, self.cruise_set_speed_kph - int(CS.clu_Vanz * 0.1))
self.seq_step_debug = "커브감속-1"
wait_time_cmd = 8
return wait_time_cmd, set_speed
def update_log(self, CS, set_speed, target_set_speed, long_wait_cmd ):
if CS.out.cruiseState.modeSel == 0:
self.steer_mode = "오파모드"
elif CS.out.cruiseState.modeSel == 1:
self.steer_mode = "차간+커브"
elif CS.out.cruiseState.modeSel == 2:
self.steer_mode = "차간ONLY"
elif CS.out.cruiseState.modeSel == 3:
self.steer_mode = "편도1차선"
if self.cruise_gap != CS.cruiseGapSet:
self.cruise_gap = CS.cruiseGapSet
str3 = 'MODE={:s} VL={:03.0f}/{:03.0f} TM={:03.0f}/{:03.0f} TS={:03.0f}'.format( self.steer_mode, set_speed, CS.VSetDis, long_wait_cmd, self.long_curv_timer, int(round(self.target_speed)) )
str4 = ' RD=D:{:03.0f}/V:{:03.0f} CG={:1.0f} DG={:s}'.format( CS.lead_distance, CS.lead_objspd, self.cruise_gap, self.seq_step_debug )
str5 = str3 + str4
trace1.printf2( str5 )
| 57.287625 | 271 | 0.598984 |
6b8659697af1582dbb1f27f9ba87e4d41a14a4dc | 1,405 | py | Python | fairseq/models/fairseq_decoder.py | jhcross/fairseq | af38ed48bb88cc489d1251e16519c7eef3a2fba7 | [
"BSD-3-Clause"
] | 3 | 2018-11-16T00:51:20.000Z | 2021-03-17T14:56:05.000Z | fairseq/models/fairseq_decoder.py | jhcross/fairseq | af38ed48bb88cc489d1251e16519c7eef3a2fba7 | [
"BSD-3-Clause"
] | null | null | null | fairseq/models/fairseq_decoder.py | jhcross/fairseq | af38ed48bb88cc489d1251e16519c7eef3a2fba7 | [
"BSD-3-Clause"
] | 1 | 2019-08-28T17:58:45.000Z | 2019-08-28T17:58:45.000Z | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch.nn as nn
import torch.nn.functional as F
class FairseqDecoder(nn.Module):
"""Base class for decoders."""
def __init__(self, dictionary):
super().__init__()
self.dictionary = dictionary
def forward(self, prev_output_tokens, encoder_out):
raise NotImplementedError
def get_normalized_probs(self, net_output, log_probs, sample):
"""Get normalized probabilities (or log probs) from a net's output."""
if hasattr(self, 'adaptive_softmax') and self.adaptive_softmax is not None:
assert sample is not None and 'target' in sample
out = self.adaptive_softmax.get_log_prob(net_output[0], sample['target'])
return out.exp_() if not log_probs else out
logits = net_output[0].float()
if log_probs:
return F.log_softmax(logits, dim=-1)
else:
return F.softmax(logits, dim=-1)
def max_positions(self):
"""Maximum input length supported by the decoder."""
raise NotImplementedError
def upgrade_state_dict(self, state_dict):
return state_dict
| 33.452381 | 85 | 0.679004 |
99aa51b54d56cf1dd5276a6d013d4830620fd5db | 634 | py | Python | app/migrations/0006_auto_20171021_1344.py | fossabot/fermentrack | 3070bc14791b1482ec661607005ebda961ca3a8f | [
"MIT"
] | 114 | 2017-03-19T22:51:45.000Z | 2022-01-18T06:00:23.000Z | app/migrations/0006_auto_20171021_1344.py | fossabot/fermentrack | 3070bc14791b1482ec661607005ebda961ca3a8f | [
"MIT"
] | 392 | 2017-03-12T17:09:16.000Z | 2022-03-31T22:08:45.000Z | app/migrations/0006_auto_20171021_1344.py | fossabot/fermentrack | 3070bc14791b1482ec661607005ebda961ca3a8f | [
"MIT"
] | 67 | 2017-03-19T18:11:54.000Z | 2022-01-31T12:12:17.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-10-21 13:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0005_Add Profile Type'),
]
operations = [
migrations.AlterField(
model_name='oldcontrolconstants',
name='tempFormat',
field=models.CharField(choices=[('F', 'Fahrenheit'), ('C', 'Celsius')], default='F', help_text='This is the temperature format that will be used by the device', max_length=1, verbose_name='Temperature format'),
),
]
| 30.190476 | 222 | 0.64511 |
4601ef3627278f0039c9777c6c9a489f2a110ca3 | 2,529 | py | Python | run_webcam.py | labibsharrarrayat/activity_clothes_detection | e876ed1e60efa495bfc0f47dfd695f9991336c71 | [
"Apache-2.0"
] | 1 | 2021-10-14T09:37:20.000Z | 2021-10-14T09:37:20.000Z | run_webcam.py | labibsharrarrayat/activity_clothes_detection | e876ed1e60efa495bfc0f47dfd695f9991336c71 | [
"Apache-2.0"
] | null | null | null | run_webcam.py | labibsharrarrayat/activity_clothes_detection | e876ed1e60efa495bfc0f47dfd695f9991336c71 | [
"Apache-2.0"
] | null | null | null | import argparse
import logging
import time
import cv2
import numpy as np
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
logger = logging.getLogger('TfPoseEstimator-WebCam')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
fps_time = 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tf-pose-estimation realtime webcam')
parser.add_argument('--camera', type=int, default=0)
parser.add_argument('--resize', type=str, default='0x0',
help='if provided, resize images before they are processed. default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')
parser.add_argument('--resize-out-ratio', type=float, default=4.0,
help='if provided, resize heatmaps before they are post-processed. default=1.0')
parser.add_argument('--model', type=str, default='mobilenet_thin', help='cmu / mobilenet_thin')
parser.add_argument('--show-process', type=bool, default=False,
help='for debug purpose, if enabled, speed for inference is dropped.')
args = parser.parse_args()
logger.debug('initialization %s : %s' % (args.model, get_graph_path(args.model)))
w, h = model_wh(args.resize)
if w > 0 and h > 0:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
else:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(432, 368))
logger.debug('cam read+')
cam = cv2.VideoCapture(args.camera)
ret_val, image = cam.read()
logger.info('cam image=%dx%d' % (image.shape[1], image.shape[0]))
while True:
ret_val, image = cam.read()
logger.debug('image process+')
humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)
logger.debug('postprocess+')
image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
logger.debug('show+')
cv2.putText(image,
"FPS: %f" % (1.0 / (time.time() - fps_time)),
(10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2)
cv2.imshow('tf-pose-estimation result', image)
fps_time = time.time()
if cv2.waitKey(1) == 27:
break
logger.debug('finished+')
cv2.destroyAllWindows() | 37.746269 | 143 | 0.651641 |
ee5e94fdff2cbf172129684253c2b05ec3b717af | 4,380 | py | Python | stable_baselines/ddpg/memory.py | BruceK4t1qbit/stable-baselines | d997d659de54bd14129d0af8df07e7c875cba7e5 | [
"MIT"
] | 49 | 2020-07-24T18:17:12.000Z | 2022-01-04T15:30:52.000Z | stable_baselines/ddpg/memory.py | BruceK4t1qbit/stable-baselines | d997d659de54bd14129d0af8df07e7c875cba7e5 | [
"MIT"
] | 14 | 2020-07-21T20:21:08.000Z | 2022-03-12T00:42:18.000Z | stable_baselines/ddpg/memory.py | BruceK4t1qbit/stable-baselines | d997d659de54bd14129d0af8df07e7c875cba7e5 | [
"MIT"
] | 6 | 2020-01-07T02:23:52.000Z | 2020-10-11T15:42:43.000Z | import numpy as np
class RingBuffer(object):
def __init__(self, maxlen, shape, dtype='float32'):
"""
A buffer object, when full restarts at the initial position
:param maxlen: (int) the max number of numpy objects to store
:param shape: (tuple) the shape of the numpy objects you want to store
:param dtype: (str) the name of the type of the numpy object you want to store
"""
self.maxlen = maxlen
self.start = 0
self.length = 0
self.data = np.zeros((maxlen,) + shape).astype(dtype)
def __len__(self):
return self.length
def __getitem__(self, idx):
if idx < 0 or idx >= self.length:
raise KeyError()
return self.data[(self.start + idx) % self.maxlen]
def get_batch(self, idxs):
"""
get the value at the indexes
:param idxs: (int or numpy int) the indexes
:return: (np.ndarray) the stored information in the buffer at the asked positions
"""
return self.data[(self.start + idxs) % self.maxlen]
def append(self, var):
"""
Append an object to the buffer
:param var: (np.ndarray) the object you wish to add
"""
if self.length < self.maxlen:
# We have space, simply increase the length.
self.length += 1
elif self.length == self.maxlen:
# No space, "remove" the first item.
self.start = (self.start + 1) % self.maxlen
else:
# This should never happen.
raise RuntimeError()
self.data[(self.start + self.length - 1) % self.maxlen] = var
def array_min2d(arr):
"""
cast to np.ndarray, and make sure it is of 2 dim
:param arr: ([Any]) the array to clean
:return: (np.ndarray) the cleaned array
"""
arr = np.array(arr)
if arr.ndim >= 2:
return arr
return arr.reshape(-1, 1)
class Memory(object):
def __init__(self, limit, action_shape, observation_shape):
"""
The replay buffer object
:param limit: (int) the max number of transitions to store
:param action_shape: (tuple) the action shape
:param observation_shape: (tuple) the observation shape
"""
self.limit = limit
self.observations0 = RingBuffer(limit, shape=observation_shape)
self.actions = RingBuffer(limit, shape=action_shape)
self.rewards = RingBuffer(limit, shape=(1,))
self.terminals1 = RingBuffer(limit, shape=(1,))
self.observations1 = RingBuffer(limit, shape=observation_shape)
def sample(self, batch_size):
"""
sample a random batch from the buffer
:param batch_size: (int) the number of element to sample for the batch
:return: (dict) the sampled batch
"""
# Draw such that we always have a proceeding element.
batch_idxs = np.random.randint(low=1, high=self.nb_entries - 1, size=batch_size)
obs0_batch = self.observations0.get_batch(batch_idxs)
obs1_batch = self.observations1.get_batch(batch_idxs)
action_batch = self.actions.get_batch(batch_idxs)
reward_batch = self.rewards.get_batch(batch_idxs)
terminal1_batch = self.terminals1.get_batch(batch_idxs)
result = {
'obs0': array_min2d(obs0_batch),
'obs1': array_min2d(obs1_batch),
'rewards': array_min2d(reward_batch),
'actions': array_min2d(action_batch),
'terminals1': array_min2d(terminal1_batch),
}
return result
def append(self, obs0, action, reward, obs1, terminal1, training=True):
"""
Append a transition to the buffer
:param obs0: ([float] or [int]) the last observation
:param action: ([float]) the action
:param reward: (float] the reward
:param obs1: ([float] or [int]) the current observation
:param terminal1: (bool) is the episode done
:param training: (bool) is the RL model training or not
"""
if not training:
return
self.observations0.append(obs0)
self.actions.append(action)
self.rewards.append(reward)
self.observations1.append(obs1)
self.terminals1.append(terminal1)
@property
def nb_entries(self):
return len(self.observations0)
| 33.435115 | 89 | 0.611187 |
4024228be1953615310b97b2563f0558359c4f45 | 6,201 | py | Python | benchmarks/f3_wrong_hints/scaling_software_termination/11-2Nested_false-termination_34.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 3 | 2021-04-23T23:29:26.000Z | 2022-03-23T10:00:30.000Z | benchmarks/f3_wrong_hints/scaling_software_termination/11-2Nested_false-termination_34.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | null | null | null | benchmarks/f3_wrong_hints/scaling_software_termination/11-2Nested_false-termination_34.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 1 | 2021-11-17T22:02:56.000Z | 2021-11-17T22:02:56.000Z | from typing import Tuple, FrozenSet
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
import pysmt.typing as types
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
symbols = frozenset([pc, x, y])
m_1 = mgr.Int(-1)
n_locs = 3
max_int = n_locs
ints = []
pcs = []
x_pcs = []
for idx in range(n_locs):
num = mgr.Int(idx)
ints.append(num)
pcs.append(mgr.Equals(pc, num))
x_pcs.append(mgr.Equals(x_pc, num))
for idx in range(n_locs, max_int):
num = mgr.Int(idx)
ints.append(num)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
init = pcs[0]
cfg = []
# pc = 0 & (x >= 0) -> pc' = 1
cond = mgr.GE(x, ints[0])
cfg.append(mgr.Implies(mgr.And(pcs[0], cond), x_pcs[1]))
# pc = 0 & !(x >= 0) -> pc' = -1
cfg.append(mgr.Implies(mgr.And(pcs[0], mgr.Not(cond)), x_pcend))
# pc = 1 -> pc' = 2
cfg.append(mgr.Implies(pcs[1], x_pcs[2]))
# pc = 2 -> pc' = 0
cfg.append(mgr.Implies(pcs[2], x_pcs[0]))
# pc = -1 -> pc' = -1
cfg.append(mgr.Implies(pcend, x_pcend))
trans = []
same_x = mgr.Equals(x_x, x)
same_y = mgr.Equals(x_y, y)
same = mgr.And(same_x, same_y)
# pc = 0 -> same
trans.append(mgr.Implies(pcs[0], same))
# pc = 1 -> x' = x + y & same_y
trans.append(mgr.Implies(pcs[1],
mgr.And(mgr.Equals(x_x, mgr.Plus(x, y)),
same_y)))
# pc = 2 -> same_x & y' = y + 1
trans.append(mgr.Implies(pcs[2],
mgr.And(same_x,
mgr.Equals(x_y, mgr.Plus(y, ints[1])))))
# pc = end -> same
trans.append(mgr.Implies(pcend, same))
trans = mgr.And(*cfg, *trans)
fairness = mgr.Not(mgr.Equals(pc, m_1))
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
symbs = frozenset([pc, x, y])
m_100 = mgr.Int(-100)
m_1 = mgr.Int(-1)
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_4 = mgr.Int(4)
i_20 = mgr.Int(20)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
res = []
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GE(x, i_20), mgr.GE(y, i_1), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x0", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
stutter = mgr.Equals(x_y, y)
loc = Location(env, mgr.TRUE(), mgr.LE(x, i_20), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_y, mgr.Plus(x, y)))
h_y = Hint("h_y1", env, frozenset([y]), symbs)
h_y.set_locs([loc])
res.append(h_y)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Plus(x, y)))
h_x = Hint("h_x1", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(0, mgr.Equals(x_y, m_100))
h_y = Hint("h_y3", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.Equals(pc, i_1))
loc0.set_progress(1, mgr.GT(x_pc, mgr.Plus(pc, i_1)))
loc1 = Location(env, mgr.GT(pc, i_2))
loc1.set_progress(0, mgr.Equals(x_pc, i_1))
h_pc = Hint("h_pc0", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
loc0 = Location(env, mgr.GE(y, m_100), mgr.LE(x, i_20))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(x, y)))
loc1 = Location(env, mgr.TRUE(), mgr.GE(x, m_100))
loc1.set_progress(2, mgr.GE(x_y, i_20))
loc2 = Location(env, mgr.TRUE())
loc2.set_progress(0, mgr.And(mgr.GE(x_y, m_100), mgr.LE(x_y, i_0)))
h_y = Hint("h_y4", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1, loc2])
res.append(h_y)
loc0 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc0.set_progress(1, mgr.Equals(x_x, mgr.Times(x, y)))
loc1 = Location(env, mgr.GE(x, i_1), mgr.GE(y, i_1))
loc1.set_progress(2, mgr.GT(x_x, y))
loc2 = Location(env, mgr.GE(x, i_2))
loc2.set_progress(0, mgr.GE(x_x, i_20))
h_x = Hint("h_x4", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
loc0 = Location(env, mgr.TRUE())
loc0.set_progress(0, mgr.TRUE())
h_pc = Hint("h_pc1", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0])
res.append(h_pc)
loc0 = Location(env, mgr.GE(y, m_100))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Times(y, y)))
loc1 = Location(env, mgr.GE(y, i_0))
loc1.set_progress(0, mgr.GE(x_y, mgr.Plus(y, i_1)))
h_y = Hint("h_y6", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
loc0 = Location(env, mgr.LE(pc, i_1))
loc0.set_progress(1, mgr.GT(x_pc, pc))
loc1 = Location(env, mgr.LE(pc, i_2))
loc1.set_progress(0, mgr.Equals(x_pc, mgr.Div(pc, pc)))
h_pc = Hint("h_pc3", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1])
res.append(h_pc)
loc0 = Location(env, mgr.GE(y, i_0), mgr.GE(pc, i_1))
loc0.set_progress(1, mgr.Equals(x_y, mgr.Plus(y, pc)))
loc1 = Location(env, mgr.GE(y, i_1))
loc1.set_progress(0, mgr.Equals(x_y, y))
h_y = Hint("h_y7", env, frozenset([y]), symbs)
h_y.set_locs([loc0, loc1])
res.append(h_y)
return frozenset(res)
| 30.397059 | 77 | 0.583132 |
269aea0a91330b1d96a370f976b0ad94445c1f1d | 2,822 | py | Python | jarviscli/packages/timeIn.py | KrishnaSai2020/Jarvis | e6b184358833c33febf877e71ed0a0e15d46c05f | [
"MIT"
] | null | null | null | jarviscli/packages/timeIn.py | KrishnaSai2020/Jarvis | e6b184358833c33febf877e71ed0a0e15d46c05f | [
"MIT"
] | null | null | null | jarviscli/packages/timeIn.py | KrishnaSai2020/Jarvis | e6b184358833c33febf877e71ed0a0e15d46c05f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import shutil
import json
import requests
from colorama import Fore
# this sets the path to the modules directory not the directory it was called from
module_path = os.path.dirname(__file__)
module_path = module_path + '/../data/'
def main(self, s):
# Trims input s to be just the city/region name
s = s.replace('time ', '').replace('in ', '')
exists = os.path.isfile(module_path + 'key_timein.json')
if not exists:
shutil.copy2(
module_path
+ 'samplekey_timein.json',
module_path
+ 'key_timein.json')
print(
Fore.RED
+ "Generate api key here: https://developers.google.com/maps/documentation/geocoding/start?hl=en_US")
print(
Fore.RED
+ "and add it to jarviscli/data/key_timein.json"
+ Fore.RESET)
return
# Transforms a city name into coordinates using Google Maps API
loc = getLocation(s)
if loc is None:
return
# Gets current date and time using TimeZoneDB API
send_url = (
"http://api.timezonedb.com/v2/get-time-zone?"
"key=BFA6XBCZ8AL5&format=json"
"&by=position&lat={:.6f}&lng={:.6f}".format(*loc)
)
r = requests.get(send_url)
j = json.loads(r.text)
time = j['formatted']
self.dst = j['dst']
# Prints current date and time as YYYY-MM-DD HH:MM:SS
print("{COLOR}The current date and time in {LOC} is: {TIME}{COLOR_RESET}"
.format(COLOR=Fore.MAGENTA, COLOR_RESET=Fore.RESET,
LOC=str(s).title(), TIME=str(time)))
def getLocation(s):
file_path = module_path + 'key_timein.json'
with open(file_path) as json_file:
data = json.load(json_file)
if 'timein' not in data or data['timein'] == 'insertyourkeyhere':
print(Fore.RED + "API key not added")
print(
Fore.RED
+ "Generate api key here: https://developers.google.com/maps/documentation/geocoding/start?hl=en_US")
print(
Fore.RED
+ "and add it to jarviscli/data/key_timein.json"
+ Fore.RESET)
return None
key = data['timein']
send_url = (
"https://maps.googleapis.com/maps/api/geocode/json?address={0}&key={1}".format(s, key))
# https://developers.google.com/maps/documentation/geocoding/start?hl=en_US
# https://maps.googleapis.com/maps/api/geocode/json?address=1600+Amphitheatre+Parkway,+Mountain+View,+CA&key=YOUR_API_KEY
r = requests.get(send_url)
j = json.loads(r.text)
try:
lat = j['results'][0]['geometry']['location']['lat'] # Latitude
lng = j['results'][0]['geometry']['location']['lng'] # Longitude
return lat, lng
except IndexError:
pass
raise Exception(r.text)
| 34.839506 | 125 | 0.611623 |
abe4bd28d3a249c9a71b96e1238277ebdb8b9d48 | 806 | py | Python | hawkdet/models/detor/retinaface.py | itisianlee/hawk-facedet | 55774ac5619f9a4c76a3a872ff11940a874b32d1 | [
"Apache-2.0"
] | null | null | null | hawkdet/models/detor/retinaface.py | itisianlee/hawk-facedet | 55774ac5619f9a4c76a3a872ff11940a874b32d1 | [
"Apache-2.0"
] | null | null | null | hawkdet/models/detor/retinaface.py | itisianlee/hawk-facedet | 55774ac5619f9a4c76a3a872ff11940a874b32d1 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
from ..build import detor_registry
from hawkdet.models.utils import IntermediateLayerGetter
class RetinaFace(nn.Module):
def __init__(self, backbone, stem, head, backbone_return_layers):
"""
:param cfg: Network related settings.
:param phase: train or test.
"""
super().__init__()
# self.phase = phase
self.backbone = IntermediateLayerGetter(backbone, backbone_return_layers)
self.stem = stem
self.head = head
def forward(self, x):
x = self.backbone(x)
x = self.stem(x)
x = self.head(x)
return x
@detor_registry.register()
def retinaface(backbone, stem, head, backbone_return_layers):
return RetinaFace(backbone, stem, head, backbone_return_layers) | 27.793103 | 81 | 0.662531 |
6f11e64f4d22fe77c3e513a2870b78d09e908896 | 1,000 | py | Python | flightradar24/helpers.py | carlocorradini/flightradar24 | 6409a4c2bfcc51ed502707768c85d5c4868ba295 | [
"MIT"
] | null | null | null | flightradar24/helpers.py | carlocorradini/flightradar24 | 6409a4c2bfcc51ed502707768c85d5c4868ba295 | [
"MIT"
] | null | null | null | flightradar24/helpers.py | carlocorradini/flightradar24 | 6409a4c2bfcc51ed502707768c85d5c4868ba295 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import requests
def api_request(end_point, proxies=None):
request_base_headers = {
"user-agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0) Gecko/20100101 Firefox/47.0",
"accept": "application/jsoN",
"accept-language": "en-EN",
"cache-control": "max-age=0",
"origin": "https://www.flightradar24.com",
"referer": "https://www.flightradar24.com/"
}
r = requests.get(end_point, headers=request_base_headers, proxies=proxies)
print(end_point, r.headers, r.text)
if r.status_code is 402:
raise RuntimeError("Request to " + end_point + " requires payment")
if r.status_code is 403:
raise RuntimeError("Request to " + end_point + " is Forbidden")
if r.status_code is 404:
raise RuntimeError("Request to " + end_point + " is NotFound")
if r.status_code is 500:
raise RuntimeError("Request to " + end_point + " returns InternalServerError")
return r.json()
| 37.037037 | 102 | 0.64 |
ad6e5dbc1add021f46214617e84a717ee5b16e87 | 648 | py | Python | Basics/splitmergeColor.py | itahirmasood/open_cv | 70233d2c1d76e266f990a1a762f37b049be8ad39 | [
"MIT"
] | null | null | null | Basics/splitmergeColor.py | itahirmasood/open_cv | 70233d2c1d76e266f990a1a762f37b049be8ad39 | [
"MIT"
] | null | null | null | Basics/splitmergeColor.py | itahirmasood/open_cv | 70233d2c1d76e266f990a1a762f37b049be8ad39 | [
"MIT"
] | null | null | null | import cv2 as cv
import numpy as np
img=cv.imread('bhola.jpg')
cv.imshow('MachoMan',img)
blank=np.zeros(img.shape[:2],dtype='uint8')
b,g,r=cv.split(img)
#to see colors of each color sepately
blue=cv.merge([b,blank,blank])
green=cv.merge([blank,g,blank])
red=cv.merge([blank,blank,r])
cv.imshow('Blue',blue)
cv.imshow('Green',green)
cv.imshow('Red',red)
#it will show in grayscale
# cv.imshow('Blue',b)
# cv.imshow('Green',g)
# cv.imshow('Red',r)
print(img.shape)
print(b.shape)
print(g.shape)
print(r.shape)
#merge back to original
merged=cv.merge([b,g,r])
cv.imshow('merged',merged)
cv.waitKey(0) | 17.052632 | 44 | 0.655864 |
c82fbdd4d5d920d449086511ce787b618912d5f1 | 592 | py | Python | codes/src/util/svnversion.py | CorbinFoucart/FEMexperiment | 9bad34d9ed7cbdd740e3a4b67f433779dd53b264 | [
"MIT"
] | 2 | 2018-05-26T22:09:32.000Z | 2018-06-25T21:46:32.000Z | codes/src/util/svnversion.py | CorbinFoucart/FEMexperiment | 9bad34d9ed7cbdd740e3a4b67f433779dd53b264 | [
"MIT"
] | 16 | 2018-05-17T21:38:44.000Z | 2022-03-11T23:21:25.000Z | codes/src/util/svnversion.py | CorbinFoucart/FEMexperiment | 9bad34d9ed7cbdd740e3a4b67f433779dd53b264 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""@file svnversion.py
Produces a compact version identifier for the working copy
@author Chris Mirabito (mirabito@mit.edu)
"""
import subprocess
def svnversion():
"""Produces a compact version identifier for the working copy
@return SVN version identifier (from @c stdout of the subprocess)
"""
process = subprocess.Popen('svnversion',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, _) = process.communicate()
return stdout
| 28.190476 | 69 | 0.60473 |
58c6f04a856372311d82a998b83ad5ede9521910 | 3,173 | py | Python | st2common/tests/unit/test_ip_utils.py | machao19902/st2 | 6768a529af1b3c12109cbfeae19d3cf7fdb71bb7 | [
"Apache-2.0"
] | 1 | 2020-11-09T21:05:33.000Z | 2020-11-09T21:05:33.000Z | st2common/tests/unit/test_ip_utils.py | machao19902/st2 | 6768a529af1b3c12109cbfeae19d3cf7fdb71bb7 | [
"Apache-2.0"
] | 3 | 2021-03-25T23:57:10.000Z | 2021-03-26T00:01:05.000Z | st2common/tests/unit/test_ip_utils.py | machao19902/st2 | 6768a529af1b3c12109cbfeae19d3cf7fdb71bb7 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import unittest2
from st2common.util.ip_utils import split_host_port
class IPUtilsTests(unittest2.TestCase):
def test_host_port_split(self):
# Simple IPv4
host_str = '1.2.3.4'
host, port = split_host_port(host_str)
self.assertEqual(host, host_str)
self.assertEqual(port, None)
# Simple IPv4 with port
host_str = '1.2.3.4:55'
host, port = split_host_port(host_str)
self.assertEqual(host, '1.2.3.4')
self.assertEqual(port, 55)
# Simple IPv6
host_str = 'fec2::10'
host, port = split_host_port(host_str)
self.assertEqual(host, 'fec2::10')
self.assertEqual(port, None)
# IPv6 with square brackets no port
host_str = '[fec2::10]'
host, port = split_host_port(host_str)
self.assertEqual(host, 'fec2::10')
self.assertEqual(port, None)
# IPv6 with square brackets with port
host_str = '[fec2::10]:55'
host, port = split_host_port(host_str)
self.assertEqual(host, 'fec2::10')
self.assertEqual(port, 55)
# IPv4 inside bracket
host_str = '[1.2.3.4]'
host, port = split_host_port(host_str)
self.assertEqual(host, '1.2.3.4')
self.assertEqual(port, None)
# IPv4 inside bracket and port
host_str = '[1.2.3.4]:55'
host, port = split_host_port(host_str)
self.assertEqual(host, '1.2.3.4')
self.assertEqual(port, 55)
# Hostname inside bracket
host_str = '[st2build001]:55'
host, port = split_host_port(host_str)
self.assertEqual(host, 'st2build001')
self.assertEqual(port, 55)
# Simple hostname
host_str = 'st2build001'
host, port = split_host_port(host_str)
self.assertEqual(host, 'st2build001')
self.assertEqual(port, None)
# Simple hostname with port
host_str = 'st2build001:55'
host, port = split_host_port(host_str)
self.assertEqual(host, 'st2build001')
self.assertEqual(port, 55)
# No-bracket invalid port
host_str = 'st2build001:abc'
self.assertRaises(Exception, split_host_port, host_str)
# Bracket invalid port
host_str = '[fec2::10]:abc'
self.assertRaises(Exception, split_host_port, host_str)
| 34.11828 | 74 | 0.655216 |
a50c02899736e8c201534084f4b23f947331b33a | 60,338 | py | Python | lib/galaxy/managers/workflows.py | KyleL1998/galaxy | 10be2cd8ac05680f8291eea7996f4d3fc76197de | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/managers/workflows.py | KyleL1998/galaxy | 10be2cd8ac05680f8291eea7996f4d3fc76197de | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/managers/workflows.py | KyleL1998/galaxy | 10be2cd8ac05680f8291eea7996f4d3fc76197de | [
"CC-BY-3.0"
] | null | null | null | from __future__ import absolute_import
import json
import logging
import os
import uuid
from collections import namedtuple
from gxformat2 import (
from_galaxy_native,
ImporterGalaxyInterface,
ImportOptions,
python_to_workflow,
)
from gxformat2.converter import ordered_load
from six import string_types
from sqlalchemy import and_
from sqlalchemy.orm import joinedload, subqueryload
from galaxy import (
exceptions,
model,
util
)
from galaxy.jobs.actions.post import ActionBox
from galaxy.model.item_attrs import UsesAnnotations
from galaxy.tools.parameters import (
params_to_incoming,
visit_input_values
)
from galaxy.tools.parameters.basic import (
DataCollectionToolParameter,
DataToolParameter,
RuntimeValue,
workflow_building_modes
)
from galaxy.util.json import safe_loads
from galaxy.util.sanitize_html import sanitize_html
from galaxy.web import url_for
from galaxy.workflow.modules import (
is_tool_module_type,
module_factory,
ToolModule,
WorkflowModuleInjector
)
from galaxy.workflow.resources import get_resource_mapper_function
from galaxy.workflow.steps import attach_ordered_steps
from .base import decode_id
log = logging.getLogger(__name__)
class WorkflowsManager(object):
""" Handle CRUD type operations related to workflows. More interesting
stuff regarding workflow execution, step sorting, etc... can be found in
the galaxy.workflow module.
"""
def __init__(self, app):
self.app = app
def get_stored_workflow(self, trans, workflow_id):
""" Use a supplied ID (UUID or encoded stored workflow ID) to find
a workflow.
"""
if util.is_uuid(workflow_id):
# see if they have passed in the UUID for a workflow that is attached to a stored workflow
workflow_uuid = uuid.UUID(workflow_id)
workflow_query = trans.sa_session.query(trans.app.model.StoredWorkflow).filter(and_(
trans.app.model.StoredWorkflow.latest_workflow_id == trans.app.model.Workflow.id,
trans.app.model.Workflow.uuid == workflow_uuid
))
else:
workflow_id = decode_id(self.app, workflow_id)
workflow_query = trans.sa_session.query(trans.app.model.StoredWorkflow).\
filter(trans.app.model.StoredWorkflow.id == workflow_id)
stored_workflow = workflow_query.options(joinedload('annotations'),
joinedload('tags'),
subqueryload('latest_workflow').joinedload('steps').joinedload('*')).first()
if stored_workflow is None:
raise exceptions.ObjectNotFound("No such workflow found.")
return stored_workflow
def get_stored_accessible_workflow(self, trans, workflow_id):
""" Get a stored workflow from a encoded stored workflow id and
make sure it accessible to the user.
"""
stored_workflow = self.get_stored_workflow(trans, workflow_id)
# check to see if user has permissions to selected workflow
if stored_workflow.user != trans.user and not trans.user_is_admin and not stored_workflow.published:
if trans.sa_session.query(trans.app.model.StoredWorkflowUserShareAssociation).filter_by(user=trans.user, stored_workflow=stored_workflow).count() == 0:
message = "Workflow is not owned by or shared with current user"
raise exceptions.ItemAccessibilityException(message)
return stored_workflow
def get_owned_workflow(self, trans, encoded_workflow_id):
""" Get a workflow (non-stored) from a encoded workflow id and
make sure it accessible to the user.
"""
workflow_id = decode_id(self.app, encoded_workflow_id)
workflow = trans.sa_session.query(model.Workflow).get(workflow_id)
self.check_security(trans, workflow, check_ownership=True)
return workflow
def check_security(self, trans, has_workflow, check_ownership=True, check_accessible=True):
""" check accessibility or ownership of workflows, storedworkflows, and
workflowinvocations. Throw an exception or returns True if user has
needed level of access.
"""
if not check_ownership and not check_accessible:
return True
# If given an invocation verify ownership of invocation
if isinstance(has_workflow, model.WorkflowInvocation):
# We use the the owner of the history that is associated to the invocation as a proxy
# for the owner of the invocation.
if trans.user != has_workflow.history.user and not trans.user_is_admin:
raise exceptions.ItemOwnershipException()
else:
return True
# stored workflow contains security stuff - follow that workflow to
# that unless given a stored workflow.
if isinstance(has_workflow, model.Workflow):
stored_workflow = has_workflow.top_level_stored_workflow
else:
stored_workflow = has_workflow
if stored_workflow.user != trans.user and not trans.user_is_admin:
if check_ownership:
raise exceptions.ItemOwnershipException()
# else check_accessible...
if trans.sa_session.query(model.StoredWorkflowUserShareAssociation).filter_by(user=trans.user, stored_workflow=stored_workflow).count() == 0:
raise exceptions.ItemAccessibilityException()
return True
def get_invocation(self, trans, decoded_invocation_id):
workflow_invocation = trans.sa_session.query(
self.app.model.WorkflowInvocation
).get(decoded_invocation_id)
if not workflow_invocation:
encoded_wfi_id = trans.security.encode_id(decoded_invocation_id)
message = "'%s' is not a valid workflow invocation id" % encoded_wfi_id
raise exceptions.ObjectNotFound(message)
self.check_security(trans, workflow_invocation, check_ownership=True, check_accessible=False)
return workflow_invocation
def cancel_invocation(self, trans, decoded_invocation_id):
workflow_invocation = self.get_invocation(trans, decoded_invocation_id)
cancelled = workflow_invocation.cancel()
if cancelled:
trans.sa_session.add(workflow_invocation)
trans.sa_session.flush()
else:
# TODO: More specific exception?
raise exceptions.MessageException("Cannot cancel an inactive workflow invocation.")
return workflow_invocation
def get_invocation_step(self, trans, decoded_workflow_invocation_step_id):
try:
workflow_invocation_step = trans.sa_session.query(
model.WorkflowInvocationStep
).get(decoded_workflow_invocation_step_id)
except Exception:
raise exceptions.ObjectNotFound()
self.check_security(trans, workflow_invocation_step.workflow_invocation, check_ownership=True, check_accessible=False)
return workflow_invocation_step
def update_invocation_step(self, trans, decoded_workflow_invocation_step_id, action):
if action is None:
raise exceptions.RequestParameterMissingException("Updating workflow invocation step requires an action parameter. ")
workflow_invocation_step = self.get_invocation_step(trans, decoded_workflow_invocation_step_id)
workflow_invocation = workflow_invocation_step.workflow_invocation
if not workflow_invocation.active:
raise exceptions.RequestParameterInvalidException("Attempting to modify the state of an completed workflow invocation.")
step = workflow_invocation_step.workflow_step
module = module_factory.from_workflow_step(trans, step)
performed_action = module.do_invocation_step_action(step, action)
workflow_invocation_step.action = performed_action
trans.sa_session.add(workflow_invocation_step)
trans.sa_session.flush()
return workflow_invocation_step
def build_invocations_query(self, trans, stored_workflow_id=None, history_id=None, user_id=None):
"""Get invocations owned by the current user."""
sa_session = trans.sa_session
invocations_query = sa_session.query(model.WorkflowInvocation)
if stored_workflow_id is not None:
stored_workflow = sa_session.query(model.StoredWorkflow).get(stored_workflow_id)
if not stored_workflow:
raise exceptions.ObjectNotFound()
invocations_query = invocations_query.join(
model.Workflow
).filter(
model.Workflow.table.c.stored_workflow_id == stored_workflow_id
)
if user_id is not None:
invocations_query = invocations_query.join(
model.History
).filter(
model.History.table.c.user_id == user_id
)
if history_id is not None:
invocations_query = invocations_query.filter(
model.WorkflowInvocation.table.c.history_id == history_id
)
return [inv for inv in invocations_query if self.check_security(trans,
inv,
check_ownership=True,
check_accessible=False)]
def serialize_workflow_invocation(self, invocation, **kwd):
app = self.app
view = kwd.get("view", "element")
step_details = util.string_as_bool(kwd.get('step_details', False))
legacy_job_state = util.string_as_bool(kwd.get('legacy_job_state', False))
as_dict = invocation.to_dict(view, step_details=step_details, legacy_job_state=legacy_job_state)
return app.security.encode_all_ids(as_dict, recursive=True)
def serialize_workflow_invocations(self, invocations, **kwd):
if "view" not in kwd:
kwd["view"] = "collection"
return list(map(lambda i: self.serialize_workflow_invocation(i, **kwd), invocations))
CreatedWorkflow = namedtuple("CreatedWorkflow", ["stored_workflow", "workflow", "missing_tools"])
class WorkflowContentsManager(UsesAnnotations):
def __init__(self, app):
self.app = app
self._resource_mapper_function = get_resource_mapper_function(app)
def ensure_raw_description(self, dict_or_raw_description):
if not isinstance(dict_or_raw_description, RawWorkflowDescription):
dict_or_raw_description = RawWorkflowDescription(dict_or_raw_description)
return dict_or_raw_description
def normalize_workflow_format(self, trans, as_dict):
"""Process incoming workflow descriptions for consumption by other methods.
Currently this mostly means converting format 2 workflows into standard Galaxy
workflow JSON for consumption for the rest of this module. In the future we will
want to be a lot more percise about this - preserve the original description along
side the data model and apply updates in a way that largely preserves YAML structure
so workflows can be extracted.
"""
workflow_directory = None
workflow_path = None
if as_dict.get("src", None) == "from_path":
if not trans.user_is_admin:
raise exceptions.AdminRequiredException()
workflow_path = as_dict.get("path")
with open(workflow_path, "r") as f:
as_dict = ordered_load(f)
workflow_directory = os.path.normpath(os.path.dirname(workflow_path))
workflow_class = as_dict.get("class", None)
if workflow_class == "GalaxyWorkflow" or "$graph" in as_dict or "yaml_content" in as_dict:
# Format 2 Galaxy workflow.
galaxy_interface = Format2ConverterGalaxyInterface()
import_options = ImportOptions()
import_options.deduplicate_subworkflows = True
as_dict = python_to_workflow(as_dict, galaxy_interface, workflow_directory=workflow_directory, import_options=import_options)
return RawWorkflowDescription(as_dict, workflow_path)
def build_workflow_from_raw_description(
self,
trans,
raw_workflow_description,
source=None,
add_to_menu=False,
publish=False,
create_stored_workflow=True,
exact_tools=True,
fill_defaults=False,
):
data = raw_workflow_description.as_dict
# Put parameters in workflow mode
trans.workflow_building_mode = workflow_building_modes.ENABLED
# If there's a source, put it in the workflow name.
if source:
name = "%s (imported from %s)" % (data['name'], source)
else:
name = data['name']
workflow, missing_tool_tups = self._workflow_from_raw_description(
trans,
raw_workflow_description,
name=name,
exact_tools=exact_tools,
fill_defaults=fill_defaults,
)
if 'uuid' in data:
workflow.uuid = data['uuid']
if create_stored_workflow:
# Connect up
stored = model.StoredWorkflow()
stored.from_path = raw_workflow_description.workflow_path
stored.name = workflow.name
workflow.stored_workflow = stored
stored.latest_workflow = workflow
stored.user = trans.user
stored.published = publish
if data['annotation']:
annotation = sanitize_html(data['annotation'])
self.add_item_annotation(trans.sa_session, stored.user, stored, annotation)
workflow_tags = data.get('tags', [])
trans.app.tag_handler.set_tags_from_list(user=trans.user, item=stored, new_tags_list=workflow_tags)
# Persist
trans.sa_session.add(stored)
if add_to_menu:
if trans.user.stored_workflow_menu_entries is None:
trans.user.stored_workflow_menu_entries = []
menuEntry = model.StoredWorkflowMenuEntry()
menuEntry.stored_workflow = stored
trans.user.stored_workflow_menu_entries.append(menuEntry)
else:
stored = None
# Persist
trans.sa_session.add(workflow)
trans.sa_session.flush()
return CreatedWorkflow(
stored_workflow=stored,
workflow=workflow,
missing_tools=missing_tool_tups
)
def update_workflow_from_raw_description(self, trans, stored_workflow, raw_workflow_description, **kwds):
raw_workflow_description = self.ensure_raw_description(raw_workflow_description)
# Put parameters in workflow mode
trans.workflow_building_mode = workflow_building_modes.ENABLED
workflow, missing_tool_tups = self._workflow_from_raw_description(
trans,
raw_workflow_description,
name=stored_workflow.name,
**kwds
)
if missing_tool_tups:
errors = []
for missing_tool_tup in missing_tool_tups:
errors.append("Step %i: Requires tool '%s'." % (int(missing_tool_tup[3]) + 1, missing_tool_tup[0]))
raise MissingToolsException(workflow, errors)
# Connect up
workflow.stored_workflow = stored_workflow
stored_workflow.latest_workflow = workflow
# Persist
trans.sa_session.flush()
if stored_workflow.from_path:
self._sync_stored_workflow(trans, stored_workflow)
# Return something informative
errors = []
if workflow.has_errors:
errors.append("Some steps in this workflow have validation errors")
if workflow.has_cycles:
errors.append("This workflow contains cycles")
return workflow, errors
def _workflow_from_raw_description(self, trans, raw_workflow_description, name, **kwds):
data = raw_workflow_description.as_dict
if isinstance(data, string_types):
data = json.loads(data)
# Create new workflow from source data
workflow = model.Workflow()
workflow.name = name
# Assume no errors until we find a step that has some
workflow.has_errors = False
# Create each step
steps = []
# The editor will provide ids for each step that we don't need to save,
# but do need to use to make connections
steps_by_external_id = {}
# Preload dependent workflows with locally defined content_ids.
subworkflows = data.get("subworkflows")
subworkflow_id_map = None
if subworkflows:
subworkflow_id_map = {}
for key, subworkflow_dict in subworkflows.items():
subworkflow = self.__build_embedded_subworkflow(trans, subworkflow_dict, **kwds)
subworkflow_id_map[key] = subworkflow
# Keep track of tools required by the workflow that are not available in
# the local Galaxy instance. Each tuple in the list of missing_tool_tups
# will be ( tool_id, tool_name, tool_version ).
missing_tool_tups = []
for step_dict in self.__walk_step_dicts(data):
self.__load_subworkflows(trans, step_dict, subworkflow_id_map, **kwds)
for step_dict in self.__walk_step_dicts(data):
module, step = self.__module_from_dict(trans, steps, steps_by_external_id, step_dict, **kwds)
is_tool = is_tool_module_type(module.type)
if is_tool and module.tool is None:
missing_tool_tup = (module.tool_id, module.get_name(), module.tool_version, step_dict['id'])
if missing_tool_tup not in missing_tool_tups:
missing_tool_tups.append(missing_tool_tup)
if module.get_errors():
workflow.has_errors = True
# Second pass to deal with connections between steps
self.__connect_workflow_steps(steps, steps_by_external_id)
# Order the steps if possible
attach_ordered_steps(workflow, steps)
return workflow, missing_tool_tups
def workflow_to_dict(self, trans, stored, style="export", version=None):
""" Export the workflow contents to a dictionary ready for JSON-ification and to be
sent out via API for instance. There are three styles of export allowed 'export', 'instance', and
'editor'. The Galaxy team will do its best to preserve the backward compatibility of the
'export' style - this is the export method meant to be portable across Galaxy instances and over
time. The 'editor' style is subject to rapid and unannounced changes. The 'instance' export
option describes the workflow in a context more tied to the current Galaxy instance and includes
fields like 'url' and 'url' and actual unencoded step ids instead of 'order_index'.
"""
def to_format_2(wf_dict, **kwds):
return from_galaxy_native(wf_dict, None, **kwds)
if version == '':
version = None
if version is not None:
version = int(version)
workflow = stored.get_internal_version(version)
if style == "export":
# Export workflows as GA format through 19.05, in 19.09 this will become format2.
style = "ga"
if self.app.config.enable_beta_export_format2_default:
style = "format2"
if style == "editor":
wf_dict = self._workflow_to_dict_editor(trans, stored, workflow)
elif style == "legacy":
wf_dict = self._workflow_to_dict_instance(stored, workflow=workflow, legacy=True)
elif style == "instance":
wf_dict = self._workflow_to_dict_instance(stored, workflow=workflow, legacy=False)
elif style == "run":
wf_dict = self._workflow_to_dict_run(trans, stored, workflow=workflow)
elif style == "format2":
wf_dict = self._workflow_to_dict_export(trans, stored, workflow=workflow)
wf_dict = to_format_2(wf_dict)
elif style == "format2_wrapped_yaml":
wf_dict = self._workflow_to_dict_export(trans, stored, workflow=workflow)
wf_dict = to_format_2(wf_dict, json_wrapper=True)
elif style == "ga":
wf_dict = self._workflow_to_dict_export(trans, stored, workflow=workflow)
else:
raise exceptions.RequestParameterInvalidException('Unknown workflow style [%s]' % style)
if version:
wf_dict['version'] = version
else:
wf_dict['version'] = len(stored.workflows) - 1
return wf_dict
def _sync_stored_workflow(self, trans, stored_workflow):
workflow_path = stored_workflow.from_path
workflow = stored_workflow.latest_workflow
with open(workflow_path, "w") as f:
if workflow_path.endswith(".ga"):
wf_dict = self._workflow_to_dict_export(trans, stored_workflow, workflow=workflow)
json.dump(wf_dict, f, indent=4)
else:
wf_dict = self._workflow_to_dict_export(trans, stored_workflow, workflow=workflow)
wf_dict = from_galaxy_native(wf_dict, None, json_wrapper=True)
f.write(wf_dict["yaml_content"])
def _workflow_to_dict_run(self, trans, stored, workflow):
"""
Builds workflow dictionary used by run workflow form
"""
if len(workflow.steps) == 0:
raise exceptions.MessageException('Workflow cannot be run because it does not have any steps.')
if attach_ordered_steps(workflow, workflow.steps):
raise exceptions.MessageException('Workflow cannot be run because it contains cycles.')
trans.workflow_building_mode = workflow_building_modes.USE_HISTORY
module_injector = WorkflowModuleInjector(trans)
has_upgrade_messages = False
step_version_changes = []
missing_tools = []
errors = {}
for step in workflow.steps:
try:
module_injector.inject(step, steps=workflow.steps, exact_tools=False)
except exceptions.ToolMissingException as e:
# FIXME: if a subworkflow lacks multiple tools we report only the first missing tool
if e.tool_id not in missing_tools:
missing_tools.append(e.tool_id)
continue
if step.upgrade_messages:
has_upgrade_messages = True
if step.type == 'tool' or step.type is None:
if step.module.version_changes:
step_version_changes.extend(step.module.version_changes)
step_errors = step.module.get_errors()
if step_errors:
errors[step.id] = step_errors
if missing_tools:
workflow.annotation = self.get_item_annotation_str(trans.sa_session, trans.user, workflow)
raise exceptions.MessageException('Following tools missing: %s' % ', '.join(missing_tools))
workflow.annotation = self.get_item_annotation_str(trans.sa_session, trans.user, workflow)
step_order_indices = {}
for step in workflow.steps:
step_order_indices[step.id] = step.order_index
step_models = []
for step in workflow.steps:
step_model = None
if step.type == 'tool':
incoming = {}
tool = trans.app.toolbox.get_tool(step.tool_id, tool_version=step.tool_version, tool_uuid=step.tool_uuid)
params_to_incoming(incoming, tool.inputs, step.state.inputs, trans.app)
step_model = tool.to_json(trans, incoming, workflow_building_mode=workflow_building_modes.USE_HISTORY)
step_model['post_job_actions'] = [{
'short_str' : ActionBox.get_short_str(pja),
'action_type' : pja.action_type,
'output_name' : pja.output_name,
'action_arguments' : pja.action_arguments
} for pja in step.post_job_actions]
else:
inputs = step.module.get_runtime_inputs(connections=step.output_connections)
step_model = {
'inputs' : [input.to_dict(trans) for input in inputs.values()]
}
step_model['replacement_parameters'] = step.module.get_replacement_parameters(step)
step_model['step_type'] = step.type
step_model['step_label'] = step.label
step_model['step_name'] = step.module.get_name()
step_model['step_version'] = step.module.get_version()
step_model['step_index'] = step.order_index
step_model['output_connections'] = [{
'input_step_index' : step_order_indices.get(oc.input_step_id),
'output_step_index' : step_order_indices.get(oc.output_step_id),
'input_name' : oc.input_name,
'output_name' : oc.output_name
} for oc in step.output_connections]
if step.annotations:
step_model['annotation'] = step.annotations[0].annotation
if step.upgrade_messages:
step_model['messages'] = step.upgrade_messages
step_models.append(step_model)
return {
'id': trans.app.security.encode_id(stored.id),
'history_id': trans.app.security.encode_id(trans.history.id) if trans.history else None,
'name': stored.name,
'steps': step_models,
'step_version_changes': step_version_changes,
'has_upgrade_messages': has_upgrade_messages,
'workflow_resource_parameters': self._workflow_resource_parameters(trans, stored, workflow),
}
def _workflow_resource_parameters(self, trans, stored, workflow):
"""Get workflow scheduling resource parameters for this user and workflow or None if not configured.
"""
return self._resource_mapper_function(trans=trans, stored_workflow=stored, workflow=workflow)
def _workflow_to_dict_editor(self, trans, stored, workflow, tooltip=True, is_subworkflow=False):
# Pack workflow data into a dictionary and return
data = {}
data['name'] = workflow.name
data['steps'] = {}
data['upgrade_messages'] = {}
input_step_types = set(workflow.input_step_types)
# For each step, rebuild the form and encode the state
for step in workflow.steps:
# Load from database representation
module = module_factory.from_workflow_step(trans, step, exact_tools=False)
if not module:
raise exceptions.MessageException('Unrecognized step type: %s' % step.type)
# Load label from state of data input modules, necessary for backward compatibility
self.__set_default_label(step, module, step.tool_inputs)
# Fix any missing parameters
upgrade_message = module.check_and_update_state()
if upgrade_message:
data['upgrade_messages'][step.order_index] = upgrade_message
if (hasattr(module, "version_changes")) and (module.version_changes):
if step.order_index in data['upgrade_messages']:
data['upgrade_messages'][step.order_index][module.tool.name] = "\n".join(module.version_changes)
else:
data['upgrade_messages'][step.order_index] = {module.tool.name: "\n".join(module.version_changes)}
# Get user annotation.
annotation_str = self.get_item_annotation_str(trans.sa_session, trans.user, step) or ''
config_form = module.get_config_form(step=step)
# Pack attributes into plain dictionary
step_dict = {
'id': step.order_index,
'type': module.type,
'label': module.label,
'content_id': module.get_content_id(),
'name': module.get_name(),
'tool_state': module.get_state(),
'errors': module.get_errors(),
'inputs': module.get_all_inputs(connectable_only=True),
'outputs': module.get_all_outputs(),
'config_form': config_form,
'annotation': annotation_str,
'post_job_actions': {},
'uuid': str(step.uuid) if step.uuid else None,
'workflow_outputs': []
}
if tooltip:
step_dict['tooltip'] = module.get_tooltip(static_path=url_for('/static'))
# Connections
input_connections = step.input_connections
input_connections_type = {}
multiple_input = {} # Boolean value indicating if this can be multiple
if (step.type is None or step.type == 'tool') and module.tool:
# Determine full (prefixed) names of valid input datasets
data_input_names = {}
def callback(input, prefixed_name, **kwargs):
if isinstance(input, DataToolParameter) or isinstance(input, DataCollectionToolParameter):
data_input_names[prefixed_name] = True
multiple_input[prefixed_name] = input.multiple
if isinstance(input, DataToolParameter):
input_connections_type[input.name] = "dataset"
if isinstance(input, DataCollectionToolParameter):
input_connections_type[input.name] = "dataset_collection"
visit_input_values(module.tool.inputs, module.state.inputs, callback)
# post_job_actions
pja_dict = {}
for pja in step.post_job_actions:
pja_dict[pja.action_type + pja.output_name] = dict(
action_type=pja.action_type,
output_name=pja.output_name,
action_arguments=pja.action_arguments
)
step_dict['post_job_actions'] = pja_dict
# workflow outputs
outputs = []
for output in step.unique_workflow_outputs:
if output.workflow_step.type not in input_step_types:
output_label = output.label
output_name = output.output_name
output_uuid = str(output.uuid) if output.uuid else None
outputs.append({"output_name": output_name,
"uuid": output_uuid,
"label": output_label})
step_dict['workflow_outputs'] = outputs
# Encode input connections as dictionary
input_conn_dict = {}
for conn in input_connections:
input_type = "dataset"
if conn.input_name in input_connections_type:
input_type = input_connections_type[conn.input_name]
conn_dict = dict(id=conn.output_step.order_index, output_name=conn.output_name, input_type=input_type)
if conn.input_name in multiple_input:
if conn.input_name in input_conn_dict:
input_conn_dict[conn.input_name].append(conn_dict)
else:
input_conn_dict[conn.input_name] = [conn_dict]
else:
input_conn_dict[conn.input_name] = conn_dict
step_dict['input_connections'] = input_conn_dict
# Position
step_dict['position'] = step.position
# Add to return value
data['steps'][step.order_index] = step_dict
if is_subworkflow:
data['steps'] = self._resolve_collection_type(data['steps'])
return data
@staticmethod
def get_step_map_over(current_step, steps):
"""
Given a tool step and its input steps guess that maximum level of mapping over.
All data outputs of a step need to be mapped over to this level.
"""
max_map_over = ''
for input_name, input_connections in current_step['input_connections'].items():
if isinstance(input_connections, dict):
# if input does not accept multiple inputs
input_connections = [input_connections]
for input_value in input_connections:
current_data_input = None
for current_input in current_step['inputs']:
if current_input['name'] == input_name:
current_data_input = current_input
# we've got one of the tools' input data definitions
break
input_step = steps[input_value['id']]
for input_step_data_output in input_step['outputs']:
if input_step_data_output['name'] == input_value['output_name']:
collection_type = input_step_data_output.get('collection_type')
# This is the defined incoming collection type, in reality there may be additional
# mapping over of the workflows' data input, but this should be taken care of by the workflow editor /
# outer workflow.
if collection_type:
if current_data_input.get('input_type') == 'dataset' and current_data_input.get('multiple'):
# We reduce the innermost collection
if ':' in collection_type:
# more than one layer of nesting and multiple="true" input,
# we consume the innermost collection
collection_type = ":".join(collection_type.rsplit(':')[:-1])
else:
# We've reduced a list or a pair
collection_type = None
elif current_data_input.get('input_type') == 'dataset_collection':
current_collection_types = current_data_input['collection_types']
if not current_collection_types:
# Accepts any input dataset collection, no mapping
collection_type = None
elif collection_type in current_collection_types:
# incoming collection type is an exact match, no mapping over
collection_type = None
else:
outer_map_over = collection_type
for accepted_collection_type in current_data_input['collection_types']:
# need to find the lowest level of mapping over,
# for collection_type = 'list:list:list' and accepted_collection_type = ['list:list', 'list']
# it'd be outer_map_over == 'list'
if collection_type.endswith(accepted_collection_type):
_outer_map_over = collection_type[:-(len(accepted_collection_type) + 1)]
if len(_outer_map_over.split(':')) < len(outer_map_over.split(':')):
outer_map_over = _outer_map_over
collection_type = outer_map_over
# If there is mapping over, we're going to assume it is linked, everything else is (probably)
# too hard to display in the workflow editor. With this assumption we should be able to
# set the maximum mapping over level to the most deeply nested map_over
if collection_type and len(collection_type.split(':')) >= len(max_map_over.split(':')):
max_map_over = collection_type
if max_map_over:
return max_map_over
return None
def _resolve_collection_type(self, steps):
"""
Fill in collection type for step outputs.
This can either be via collection_type_source and / or "inherited" from the step's input.
This information is only needed in the workflow editor.
"""
for order_index in sorted(steps):
step = steps[order_index]
if step['type'] == 'tool' and not step.get('errors'):
map_over = self.get_step_map_over(step, steps)
for step_data_output in step['outputs']:
if step_data_output.get('collection_type_source') and step_data_output['collection_type'] is None:
collection_type_source = step_data_output['collection_type_source']
for input_connection in step['input_connections'].get(collection_type_source, []):
input_step = steps[input_connection['id']]
for input_step_data_output in input_step['outputs']:
if input_step_data_output['name'] == input_connection['output_name']:
step_data_output['collection_type'] = input_step_data_output.get('collection_type')
if map_over:
collection_type = map_over
step_data_output['collection'] = True
if step_data_output.get('collection_type'):
collection_type = "%s:%s" % (map_over, step_data_output['collection_type'])
step_data_output['collection_type'] = collection_type
return steps
def _workflow_to_dict_export(self, trans, stored=None, workflow=None):
""" Export the workflow contents to a dictionary ready for JSON-ification and export.
"""
annotation_str = ""
tag_str = ""
if stored is not None:
annotation_str = self.get_item_annotation_str(trans.sa_session, trans.user, stored) or ''
tag_str = stored.make_tag_string_list()
# Pack workflow data into a dictionary and return
data = {}
data['a_galaxy_workflow'] = 'true' # Placeholder for identifying galaxy workflow
data['format-version'] = "0.1"
data['name'] = workflow.name
data['annotation'] = annotation_str
data['tags'] = tag_str
if workflow.uuid is not None:
data['uuid'] = str(workflow.uuid)
data['steps'] = {}
# For each step, rebuild the form and encode the state
for step in workflow.steps:
# Load from database representation
module = module_factory.from_workflow_step(trans, step)
if not module:
raise exceptions.MessageException('Unrecognized step type: %s' % step.type)
# Get user annotation.
annotation_str = self.get_item_annotation_str(trans.sa_session, trans.user, step) or ''
content_id = module.get_content_id()
# Export differences for backward compatibility
if module.type == 'tool':
tool_state = module.get_state(nested=False)
else:
tool_state = module.state.inputs
# Step info
step_dict = {
'id': step.order_index,
'type': module.type,
'content_id': content_id,
'tool_id': content_id, # For workflows exported to older Galaxies,
# eliminate after a few years...
'tool_version': step.tool_version,
'name': module.get_name(),
'tool_state': json.dumps(tool_state),
'errors': module.get_errors(),
'uuid': str(step.uuid),
'label': step.label or None,
'annotation': annotation_str
}
# Add tool shed repository information and post-job actions to step dict.
if module.type == 'tool':
if module.tool and module.tool.tool_shed:
step_dict["tool_shed_repository"] = {
'name': module.tool.repository_name,
'owner': module.tool.repository_owner,
'changeset_revision': module.tool.changeset_revision,
'tool_shed': module.tool.tool_shed
}
tool_representation = None
dynamic_tool = step.dynamic_tool
if dynamic_tool:
tool_representation = dynamic_tool.value
step_dict['tool_representation'] = tool_representation
if util.is_uuid(step_dict['content_id']):
step_dict['content_id'] = None
step_dict['tool_id'] = None
pja_dict = {}
for pja in step.post_job_actions:
pja_dict[pja.action_type + pja.output_name] = dict(
action_type=pja.action_type,
output_name=pja.output_name,
action_arguments=pja.action_arguments)
step_dict['post_job_actions'] = pja_dict
if module.type == 'subworkflow':
del step_dict['content_id']
del step_dict['errors']
del step_dict['tool_version']
del step_dict['tool_state']
subworkflow = step.subworkflow
subworkflow_as_dict = self._workflow_to_dict_export(
trans,
stored=None,
workflow=subworkflow
)
step_dict['subworkflow'] = subworkflow_as_dict
# Data inputs, legacy section not used anywhere within core
input_dicts = []
step_state = module.state.inputs or {}
if "name" in step_state and module.type != 'tool':
name = step_state.get("name")
input_dicts.append({"name": name, "description": annotation_str})
for name, val in step_state.items():
input_type = type(val)
if input_type == RuntimeValue:
input_dicts.append({"name": name, "description": "runtime parameter for tool %s" % module.get_name()})
elif input_type == dict:
# Input type is described by a dict, e.g. indexed parameters.
for partval in val.values():
if type(partval) == RuntimeValue:
input_dicts.append({"name": name, "description": "runtime parameter for tool %s" % module.get_name()})
step_dict['inputs'] = input_dicts
# User outputs
workflow_outputs_dicts = []
for workflow_output in step.unique_workflow_outputs:
workflow_output_dict = dict(
output_name=workflow_output.output_name,
label=workflow_output.label,
uuid=str(workflow_output.uuid) if workflow_output.uuid is not None else None,
)
workflow_outputs_dicts.append(workflow_output_dict)
step_dict['workflow_outputs'] = workflow_outputs_dicts
# All step outputs
step_dict['outputs'] = []
if type(module) is ToolModule:
for output in module.get_data_outputs():
step_dict['outputs'].append({'name': output['name'], 'type': output['extensions'][0]})
step_in = {}
for step_input in step.inputs:
if step_input.default_value_set:
step_in[step_input.name] = {"default": step_input.default_value}
if step_in:
step_dict["in"] = step_in
# Connections
input_connections = step.input_connections
if step.type is None or step.type == 'tool':
# Determine full (prefixed) names of valid input datasets
data_input_names = {}
def callback(input, prefixed_name, **kwargs):
if isinstance(input, DataToolParameter) or isinstance(input, DataCollectionToolParameter):
data_input_names[prefixed_name] = True
# FIXME: this updates modules silently right now; messages from updates should be provided.
module.check_and_update_state()
if module.tool:
# If the tool is installed we attempt to verify input values
# and connections, otherwise the last known state will be dumped without modifications.
visit_input_values(module.tool.inputs, module.state.inputs, callback)
# Encode input connections as dictionary
input_conn_dict = {}
unique_input_names = set([conn.input_name for conn in input_connections])
for input_name in unique_input_names:
input_conn_dicts = []
for conn in input_connections:
if conn.input_name != input_name:
continue
input_conn = dict(
id=conn.output_step.order_index,
output_name=conn.output_name
)
if conn.input_subworkflow_step is not None:
subworkflow_step_id = conn.input_subworkflow_step.order_index
input_conn["input_subworkflow_step_id"] = subworkflow_step_id
input_conn_dicts.append(input_conn)
input_conn_dict[input_name] = input_conn_dicts
# Preserve backward compatibility. Previously Galaxy
# assumed input connections would be dictionaries not
# lists of dictionaries, so replace any singleton list
# with just the dictionary so that workflows exported from
# newer Galaxy instances can be used with older Galaxy
# instances if they do no include multiple input
# tools. This should be removed at some point. Mirrored
# hack in _workflow_from_raw_description should never be removed so
# existing workflow exports continue to function.
for input_name, input_conn in dict(input_conn_dict).items():
if len(input_conn) == 1:
input_conn_dict[input_name] = input_conn[0]
step_dict['input_connections'] = input_conn_dict
# Position
step_dict['position'] = step.position
# Add to return value
data['steps'][step.order_index] = step_dict
return data
def _workflow_to_dict_instance(self, stored, workflow, legacy=True):
encode = self.app.security.encode_id
sa_session = self.app.model.context
item = stored.to_dict(view='element', value_mapper={'id': encode})
item['url'] = url_for('workflow', id=item['id'])
item['owner'] = stored.user.username
inputs = {}
for step in workflow.input_steps:
step_type = step.type
step_label = step.label or step.tool_inputs.get('name')
if step_label:
label = step_label
elif step_type == "data_input":
label = "Input Dataset"
elif step_type == "data_collection_input":
label = "Input Dataset Collection"
elif step_type == 'parameter_input':
label = "Input Parameter"
else:
raise ValueError("Invalid step_type %s" % step_type)
if legacy:
index = step.id
else:
index = step.order_index
step_uuid = str(step.uuid) if step.uuid else None
inputs[index] = {'label': label, 'value': '', 'uuid': step_uuid}
item['inputs'] = inputs
item['annotation'] = self.get_item_annotation_str(sa_session, stored.user, stored)
steps = {}
steps_to_order_index = {}
for step in workflow.steps:
steps_to_order_index[step.id] = step.order_index
for step in workflow.steps:
step_id = step.id if legacy else step.order_index
step_type = step.type
step_dict = {'id': step_id,
'type': step_type,
'tool_id': step.tool_id,
'tool_version': step.tool_version,
'annotation': self.get_item_annotation_str(sa_session, stored.user, step),
'tool_inputs': step.tool_inputs,
'input_steps': {}}
if step_type == 'subworkflow':
del step_dict['tool_id']
del step_dict['tool_version']
del step_dict['tool_inputs']
step_dict['workflow_id'] = encode(step.subworkflow.id)
for conn in step.input_connections:
step_id = step.id if legacy else step.order_index
source_id = conn.output_step_id
source_step = source_id if legacy else steps_to_order_index[source_id]
step_dict['input_steps'][conn.input_name] = {'source_step': source_step,
'step_output': conn.output_name}
steps[step_id] = step_dict
item['steps'] = steps
return item
def __walk_step_dicts(self, data):
""" Walk over the supplied step dictionaries and return them in a way
designed to preserve step order when possible.
"""
supplied_steps = data['steps']
# Try to iterate through imported workflow in such a way as to
# preserve step order.
step_indices = list(supplied_steps.keys())
try:
step_indices = sorted(step_indices, key=int)
except ValueError:
# to defensive, were these ever or will they ever not be integers?
pass
discovered_labels = set()
discovered_uuids = set()
discovered_output_labels = set()
discovered_output_uuids = set()
# First pass to build step objects and populate basic values
for step_index in step_indices:
step_dict = supplied_steps[step_index]
uuid = step_dict.get("uuid", None)
if uuid and uuid != "None":
if uuid in discovered_uuids:
raise exceptions.DuplicatedIdentifierException("Duplicate step UUID in request.")
discovered_uuids.add(uuid)
label = step_dict.get("label", None)
if label:
if label in discovered_labels:
raise exceptions.DuplicatedIdentifierException("Duplicated step label in request.")
discovered_labels.add(label)
if 'workflow_outputs' in step_dict:
outputs = step_dict['workflow_outputs']
# outputs may be list of name (deprecated legacy behavior)
# or dictionary of names to {uuid: <uuid>, label: <label>}
if isinstance(outputs, dict):
for output_name in outputs:
output_dict = outputs[output_name]
output_label = output_dict.get("label", None)
if output_label:
if label in discovered_output_labels:
raise exceptions.DuplicatedIdentifierException("Duplicated workflow output label in request.")
discovered_output_labels.add(label)
output_uuid = step_dict.get("output_uuid", None)
if output_uuid:
if output_uuid in discovered_output_uuids:
raise exceptions.DuplicatedIdentifierException("Duplicate workflow output UUID in request.")
discovered_output_uuids.add(uuid)
yield step_dict
def __load_subworkflows(self, trans, step_dict, subworkflow_id_map, **kwds):
step_type = step_dict.get("type", None)
if step_type == "subworkflow":
subworkflow = self.__load_subworkflow_from_step_dict(
trans, step_dict, subworkflow_id_map, **kwds
)
step_dict["subworkflow"] = subworkflow
def __module_from_dict(self, trans, steps, steps_by_external_id, step_dict, **kwds):
""" Create a WorkflowStep model object and corresponding module
representing type-specific functionality from the incoming dictionary.
"""
step = model.WorkflowStep()
# TODO: Consider handling position inside module.
step.position = step_dict['position']
if step_dict.get("uuid", None) and step_dict['uuid'] != "None":
step.uuid = step_dict["uuid"]
if "label" in step_dict:
step.label = step_dict["label"]
module = module_factory.from_dict(trans, step_dict, **kwds)
self.__set_default_label(step, module, step_dict.get('tool_state'))
module.save_to_step(step)
annotation = step_dict['annotation']
if annotation:
annotation = sanitize_html(annotation)
self.add_item_annotation(trans.sa_session, trans.get_user(), step, annotation)
# Stick this in the step temporarily
step.temp_input_connections = step_dict['input_connections']
# Create the model class for the step
steps.append(step)
external_id = step_dict["id"]
steps_by_external_id[external_id] = step
if 'workflow_outputs' in step_dict:
workflow_outputs = step_dict['workflow_outputs']
found_output_names = set([])
for workflow_output in workflow_outputs:
# Allow workflow outputs as list of output_names for backward compatibility.
if not isinstance(workflow_output, dict):
workflow_output = {"output_name": workflow_output}
output_name = workflow_output["output_name"]
if output_name in found_output_names:
raise exceptions.ObjectAttributeInvalidException("Duplicate workflow outputs with name [%s] found." % output_name)
if not output_name:
raise exceptions.ObjectAttributeInvalidException("Workflow output with empty name encountered.")
found_output_names.add(output_name)
uuid = workflow_output.get("uuid", None)
label = workflow_output.get("label", None)
m = step.create_or_update_workflow_output(
output_name=output_name,
uuid=uuid,
label=label,
)
trans.sa_session.add(m)
if "in" in step_dict:
for input_name, input_dict in step_dict["in"].items():
step_input = step.get_or_add_input(input_name)
NO_DEFAULT_DEFINED = object()
default = input_dict.get("default", NO_DEFAULT_DEFINED)
if default is not NO_DEFAULT_DEFINED:
step_input.default_value = default
step_input.default_value_set = True
return module, step
def __load_subworkflow_from_step_dict(self, trans, step_dict, subworkflow_id_map, **kwds):
embedded_subworkflow = step_dict.get("subworkflow", None)
subworkflow_id = step_dict.get("content_id", None)
if embedded_subworkflow and subworkflow_id:
raise Exception("Subworkflow step defines both subworkflow and content_id, only one may be specified.")
if not embedded_subworkflow and not subworkflow_id:
raise Exception("Subworkflow step must define either subworkflow or content_id.")
if embedded_subworkflow:
subworkflow = self.__build_embedded_subworkflow(trans, embedded_subworkflow, **kwds)
elif subworkflow_id_map is not None:
# Interpret content_id as a workflow local thing.
subworkflow = subworkflow_id_map[subworkflow_id[1:]]
else:
workflow_manager = WorkflowsManager(self.app)
subworkflow = workflow_manager.get_owned_workflow(
trans, subworkflow_id
)
return subworkflow
def __build_embedded_subworkflow(self, trans, data, **kwds):
raw_workflow_description = self.ensure_raw_description(data)
subworkflow = self.build_workflow_from_raw_description(
trans, raw_workflow_description, create_stored_workflow=False, fill_defaults=kwds.get("fill_defaults", False)
).workflow
return subworkflow
def __connect_workflow_steps(self, steps, steps_by_external_id):
""" Second pass to deal with connections between steps.
Create workflow connection objects using externally specified ids
using during creation or update.
"""
for step in steps:
# Input connections
for input_name, conn_list in step.temp_input_connections.items():
if not conn_list:
continue
if not isinstance(conn_list, list): # Older style singleton connection
conn_list = [conn_list]
for conn_dict in conn_list:
if 'output_name' not in conn_dict or 'id' not in conn_dict:
template = "Invalid connection [%s] - must be dict with output_name and id fields."
message = template % conn_dict
raise exceptions.MessageException(message)
external_id = conn_dict['id']
if external_id not in steps_by_external_id:
raise KeyError("Failed to find external id %s in %s" % (external_id, steps_by_external_id.keys()))
output_step = steps_by_external_id[external_id]
output_name = conn_dict["output_name"]
input_subworkflow_step_index = conn_dict.get('input_subworkflow_step_id', None)
step.add_connection(input_name, output_name, output_step, input_subworkflow_step_index)
del step.temp_input_connections
def __set_default_label(self, step, module, state):
""" Previously data input modules had a `name` attribute to rename individual steps. Here, this value is transferred
to the actual `label` attribute which is available for all module types, unique, and mapped to its own database column.
"""
if not module.label and module.type in ['data_input', 'data_collection_input']:
new_state = safe_loads(state)
default_label = new_state.get('name')
if default_label and util.unicodify(default_label).lower() not in ['input dataset', 'input dataset collection']:
step.label = module.label = default_label
class MissingToolsException(exceptions.MessageException):
def __init__(self, workflow, errors):
self.workflow = workflow
self.errors = errors
class RawWorkflowDescription(object):
def __init__(self, as_dict, workflow_path=None):
self.as_dict = as_dict
self.workflow_path = workflow_path
class Format2ConverterGalaxyInterface(ImporterGalaxyInterface):
def import_workflow(self, workflow, **kwds):
raise NotImplementedError("Direct format 2 import of nested workflows is not yet implemented, use bioblend client.")
| 48.738288 | 163 | 0.610776 |
6f627b04d172bd6717bac7be30a8dd36c653b71b | 379 | py | Python | guilanche/pedido/urls.py | evton/Emissor-pedidos-lanchonete | 87869c3eb6860ba4486d069ffc4759648f044783 | [
"MIT"
] | null | null | null | guilanche/pedido/urls.py | evton/Emissor-pedidos-lanchonete | 87869c3eb6860ba4486d069ffc4759648f044783 | [
"MIT"
] | null | null | null | guilanche/pedido/urls.py | evton/Emissor-pedidos-lanchonete | 87869c3eb6860ba4486d069ffc4759648f044783 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path('', views.inicio, name='inicio'),
path('novopedido/', views.novopedido, name='novopedido'),
path('atualizapedido/<str:pk>/', views.atualizapedido, name='atualizapedido'),
path('apagapedido/<str:pk>/', views.apagapedido, name='apagapedido'),
path('recibo/', views.recibo, name='recibo'),
] | 37.9 | 82 | 0.686016 |
94a7d8d39e0d055a738437ff2b2fbb3dbc891e4a | 15,736 | py | Python | proxyprotocol/tlv.py | tommyvn/proxy-protocol | 07c63fb930c126c3e034fc6503071bbcbaf8b361 | [
"MIT"
] | null | null | null | proxyprotocol/tlv.py | tommyvn/proxy-protocol | 07c63fb930c126c3e034fc6503071bbcbaf8b361 | [
"MIT"
] | null | null | null | proxyprotocol/tlv.py | tommyvn/proxy-protocol | 07c63fb930c126c3e034fc6503071bbcbaf8b361 | [
"MIT"
] | null | null | null |
from __future__ import annotations
import json
import zlib
from enum import IntEnum, IntFlag
from struct import Struct, error as struct_error
from typing import ClassVar, Any, Hashable, Optional, Union, Iterator, \
Mapping, Dict, List
from .typing import PeerCert
__all__ = ['Type', 'SSLClient', 'TLV', 'ProxyProtocolTLV',
'ProxyProtocolSSLTLV', 'ProxyProtocolExtTLV']
class Type(IntEnum):
"""The PROXY protocol TLV type values."""
PP2_TYPE_ALPN = 0x01
PP2_TYPE_AUTHORITY = 0x02
PP2_TYPE_CRC32C = 0x03
PP2_TYPE_NOOP = 0x04
PP2_TYPE_UNIQUE_ID = 0x05
PP2_TYPE_SSL = 0x20
PP2_TYPE_NETNS = 0x30
PP2_SUBTYPE_SSL_VERSION = 0x21
PP2_SUBTYPE_SSL_CN = 0x22
PP2_SUBTYPE_SSL_CIPHER = 0x23
PP2_SUBTYPE_SSL_SIG_ALG = 0x24
PP2_SUBTYPE_SSL_KEY_ALG = 0x25
PP2_TYPE_MIN_CUSTOM = 0xE0
PP2_TYPE_MAX_CUSTOM = 0xEF
PP2_TYPE_MIN_EXPERIMENT = 0xF0
PP2_TYPE_MAX_EXPERIMENT = 0xF7
PP2_TYPE_MIN_FUTURE = 0xF8
PP2_TYPE_MAX_FUTURE = 0xFF
# Extension TLV sub-types
PP2_SUBTYPE_EXT_COMPRESSION = 0x01
PP2_SUBTYPE_EXT_SECRET_BITS = 0x02
PP2_SUBTYPE_EXT_PEERCERT = 0x03
PP2_SUBTYPE_EXT_DNSBL = 0x04
class SSLClient(IntFlag):
"""The PROXY protocol ``PP2_TYPE_SSL`` client flags."""
PP2_CLIENT_SSL = 0x01
PP2_CLIENT_CERT_CONN = 0x02
PP2_CLIENT_CERT_SESS = 0x04
class TLV(Mapping[int, bytes], Hashable):
"""Defines the basic parsing and structure of a PROXY protocol TLV vector.
The unpacked TLV values are available as dict-style keys of this object,
e.g. ``tlv[0xE2]``. To serialize back to a bytestring, use ``bytes(tlv)``.
Args:
data: TLV data to parse.
init: A mapping of types to values to initialize the TLV, such as
another :class:`TLV`.
"""
_fmt = Struct('!BH')
def __init__(self, data: bytes = b'',
init: Optional[Mapping[int, bytes]] = None) -> None:
super().__init__()
self._tlv = self._unpack(data)
if init is not None:
self._tlv.update(init)
self._frozen = self._freeze()
def _freeze(self) -> Hashable:
return frozenset((key, zlib.adler32(val))
for key, val in self._tlv.items())
def _unpack(self, data: bytes) -> Dict[int, bytes]:
view = memoryview(data)
index = 0
fmt = self._fmt
results: Dict[int, bytes] = {}
while len(data) >= index + fmt.size:
type_num, size = fmt.unpack_from(view, index)
index += fmt.size
results[type_num] = view[index:index + size]
index += size
return results
def _pack(self) -> bytes:
parts: List[bytes] = []
fmt = self._fmt
for type_num in range(0x00, 0x100):
val = self.get(type_num)
if val is not None:
parts.append(fmt.pack(type_num, len(val)))
parts.append(val)
return b''.join(parts)
def __bytes__(self) -> bytes:
return self._pack()
def __getitem__(self, type_num: int) -> bytes:
return self._tlv[type_num]
def __iter__(self) -> Iterator[int]:
return iter(self._tlv)
def __len__(self) -> int:
return len(self._tlv)
def __hash__(self) -> int:
return hash(self._frozen)
def __eq__(self, other: Any) -> bool:
if isinstance(other, type(self)):
return self._frozen == other._frozen
return super().__eq__(other)
def __repr__(self) -> str:
return f'{type(self).__name__}({bytes(self)!r})'
class ProxyProtocolTLV(TLV):
"""Defines the TLV values that may be appended to a PROXY protocol header.
These values can provide additional information not stored in the address
data. Refer to the PROXY protocol spec for more information about each TLV.
Args:
data: TLV data to parse.
init: A mapping of types to values to initialize the TLV, such as
another :class:`TLV`.
"""
__slots__ = ['_ssl']
_crc32c_fmt = Struct('!L')
def __init__(self, data: bytes = b'',
init: Optional[Mapping[int, bytes]] = None, *,
alpn: Optional[bytes] = None,
authority: Optional[str] = None,
crc32c: Optional[int] = None,
unique_id: Optional[bytes] = None,
ssl: Optional[ProxyProtocolSSLTLV] = None,
netns: Optional[str] = None,
ext: Optional[ProxyProtocolExtTLV] = None) -> None:
results = dict(init or {})
if alpn is not None:
results[Type.PP2_TYPE_ALPN] = alpn
if authority is not None:
results[Type.PP2_TYPE_AUTHORITY] = authority.encode('utf-8')
if crc32c is not None:
results[Type.PP2_TYPE_CRC32C] = self._crc32c_fmt.pack(crc32c)
if unique_id is not None:
results[Type.PP2_TYPE_UNIQUE_ID] = unique_id
if ssl is not None:
results[Type.PP2_TYPE_SSL] = bytes(ssl)
if netns is not None:
results[Type.PP2_TYPE_NETNS] = netns.encode('ascii')
if ext is not None:
results[Type.PP2_TYPE_NOOP] = bytes(ext)
super().__init__(data, results)
@property
def alpn(self) -> Optional[bytes]:
"""The ``PP2_TYPE_ALPN`` value."""
val = self.get(Type.PP2_TYPE_ALPN)
if val is not None:
return bytes(val)
return None
@property
def authority(self) -> Optional[str]:
"""The ``PP2_TYPE_AUTHORITY`` value."""
val = self.get(Type.PP2_TYPE_AUTHORITY)
if val is not None:
return str(val, 'utf-8')
return None
@property
def crc32c(self) -> Optional[int]:
"""The ``PP2_TYPE_CRC32C`` value."""
val = self.get(Type.PP2_TYPE_CRC32C)
if val is not None:
crc32c, = self._crc32c_fmt.unpack(val)
return int(crc32c)
return None
@property
def unique_id(self) -> bytes:
"""The ``PP2_TYPE_UNIQUE_ID`` value."""
val = self.get(Type.PP2_TYPE_UNIQUE_ID)
if val is not None:
return bytes(val)
return b''
@property
def ssl(self) -> ProxyProtocolSSLTLV:
"""The ``PP2_TYPE_SSL`` value."""
val = self.get(Type.PP2_TYPE_SSL)
if val is not None:
return ProxyProtocolSSLTLV(val)
return ProxyProtocolSSLTLV()
@property
def netns(self) -> Optional[str]:
"""The ``PP2_TYPE_NETNS`` value."""
val = self.get(Type.PP2_TYPE_NETNS)
if val is not None:
return str(val, 'ascii')
return None
@property
def ext(self) -> ProxyProtocolExtTLV:
"""The ``PP2_TYPE_NOOP`` value, possibly parsed as an extension TLV."""
val = self.get(Type.PP2_TYPE_NOOP)
if val is not None:
return ProxyProtocolExtTLV(val)
return ProxyProtocolExtTLV()
class ProxyProtocolSSLTLV(TLV):
"""The ``PP2_TYPE_SSL`` TLV, which is prefixed with a struct containing
*client* and *verify* values, then follows with ``PP2_SUBTYPE_SSL_*`` TLVs.
Args:
data: TLV data to parse.
init: A mapping of types to values to initialize the TLV, such as
another :class:`TLV`.
"""
_prefix_fmt = Struct('!BL')
def __init__(self, data: bytes = b'',
init: Optional[Mapping[int, bytes]] = None, *,
has_ssl: Optional[bool] = None,
has_cert_conn: Optional[bool] = None,
has_cert_sess: Optional[bool] = None,
verify: Union[None, int, bool] = None,
version: Optional[str] = None,
cn: Optional[str] = None,
cipher: Optional[str] = None,
sig_alg: Optional[str] = None,
key_alg: Optional[str] = None) -> None:
self._client = 0
self._verify = 1
results = dict(init or {})
if version is not None:
results[Type.PP2_SUBTYPE_SSL_VERSION] = version.encode('ascii')
if cn is not None:
results[Type.PP2_SUBTYPE_SSL_CN] = cn.encode('utf-8')
if cipher is not None:
results[Type.PP2_SUBTYPE_SSL_CIPHER] = cipher.encode('ascii')
if sig_alg is not None:
results[Type.PP2_SUBTYPE_SSL_SIG_ALG] = sig_alg.encode('ascii')
if key_alg is not None:
results[Type.PP2_SUBTYPE_SSL_KEY_ALG] = key_alg.encode('ascii')
super().__init__(data, results)
if has_ssl is True:
self._client |= SSLClient.PP2_CLIENT_SSL
elif has_ssl is False:
self._client &= ~SSLClient.PP2_CLIENT_SSL
if has_cert_conn is True:
self._client |= SSLClient.PP2_CLIENT_CERT_CONN
elif has_cert_conn is False:
self._client &= ~SSLClient.PP2_CLIENT_CERT_CONN
if has_cert_sess is True:
self._client |= SSLClient.PP2_CLIENT_CERT_SESS
elif has_cert_sess is False:
self._client &= ~SSLClient.PP2_CLIENT_CERT_SESS
if verify is not None:
self._verify = int(verify)
def _unpack(self, data: bytes) -> Dict[int, bytes]:
view = memoryview(data)
try:
self._client, self._verify = self._prefix_fmt.unpack_from(data, 0)
except struct_error:
pass
view = view[self._prefix_fmt.size:]
return super()._unpack(view)
def _pack(self) -> bytes:
prefix = self._prefix_fmt.pack(self.client, self.verify)
return prefix + super()._pack()
def __hash__(self) -> int:
return hash((self._frozen, self._client, self._verify))
def __eq__(self, other: Any) -> bool:
if isinstance(other, type(self)):
self_cmp = (self._frozen, self._client, self._verify)
other_cmp = (self._frozen, self._client, self._verify)
return self_cmp == other_cmp
return super().__eq__(other)
@property
def client(self) -> int:
"""The client field in the ``PP2_TYPE_SSL`` value."""
return self._client
@property
def verify(self) -> int:
"""The verify field in the ``PP2_TYPE_SSL`` value."""
return self._verify
@property
def has_ssl(self) -> bool:
"""True if the ``PP2_CLIENT_SSL`` flag was set."""
return self.client & SSLClient.PP2_CLIENT_SSL != 0
@property
def has_cert_conn(self) -> bool:
"""True if the ``PP2_CLIENT_CERT_CONN`` flag was set."""
return self.client & SSLClient.PP2_CLIENT_CERT_CONN != 0
@property
def has_cert_sess(self) -> bool:
"""True if the ``PP2_CLIENT_CERT_SESS`` flag was set."""
return self.client & SSLClient.PP2_CLIENT_CERT_SESS != 0
@property
def verified(self) -> bool:
"""True if the client provided a certificate that was successfully
verified.
"""
return self.verify == 0
@property
def version(self) -> Optional[str]:
"""The ``PP2_SUBTYPE_SSL_VERSION`` value."""
val = self.get(Type.PP2_SUBTYPE_SSL_VERSION)
if val is not None:
return str(val, 'ascii')
return None
@property
def cn(self) -> Optional[str]:
"""The ``PP2_SUBTYPE_SSL_CN`` value."""
val = self.get(Type.PP2_SUBTYPE_SSL_CN)
if val is not None:
return str(val, 'utf-8')
return None
@property
def cipher(self) -> Optional[str]:
"""The ``PP2_SUBTYPE_SSL_CIPHER`` value."""
val = self.get(Type.PP2_SUBTYPE_SSL_CIPHER)
if val is not None:
return str(val, 'ascii')
return None
@property
def sig_alg(self) -> Optional[str]:
"""The ``PP2_SUBTYPE_SSL_SIG_ALG`` value."""
val = self.get(Type.PP2_SUBTYPE_SSL_SIG_ALG)
if val is not None:
return str(val, 'ascii')
return None
@property
def key_alg(self) -> Optional[str]:
"""The ``PP2_SUBTYPE_SSL_KEY_ALG`` value."""
val = self.get(Type.PP2_SUBTYPE_SSL_KEY_ALG)
if val is not None:
return str(val, 'ascii')
return None
class ProxyProtocolExtTLV(TLV):
"""Non-standard extension TLV, which is hidden inside a ``PP2_TYPE_NOOP``
and must start with :attr:`.MAGIC_PREFIX`.
Args:
data: TLV data to parse.
init: A mapping of types to values to initialize the TLV, such as
another :class:`TLV`.
"""
#: The ``PP2_TYPE_NOOP`` value must begin with this byte sequence to be
#: parsed as a :class:`ProxyProtocolExtTLV`.
MAGIC_PREFIX: ClassVar[bytes] = b'\x88\x1b\x79\xc1\xce\x96\x85\xb0'
_secret_bits_fmt = Struct('!H')
def __init__(self, data: bytes = b'',
init: Optional[Mapping[int, bytes]] = None, *,
compression: Optional[str] = None,
secret_bits: Optional[int] = None,
peercert: Optional[PeerCert] = None,
dnsbl: Optional[str] = None) -> None:
results = dict(init or {})
if compression is not None:
val = compression.encode('ascii')
results[Type.PP2_SUBTYPE_EXT_COMPRESSION] = val
if secret_bits is not None:
val = self._secret_bits_fmt.pack(secret_bits)
results[Type.PP2_SUBTYPE_EXT_SECRET_BITS] = val
if peercert is not None:
val = zlib.compress(json.dumps(peercert).encode('ascii'))
results[Type.PP2_SUBTYPE_EXT_PEERCERT] = val
if dnsbl is not None:
val = dnsbl.encode('utf-8')
results[Type.PP2_SUBTYPE_EXT_DNSBL] = val
super().__init__(data, results)
def _unpack(self, data: bytes) -> Dict[int, bytes]:
view = memoryview(data)
magic_prefix = self.MAGIC_PREFIX
if view[0:len(magic_prefix)] != magic_prefix:
return {}
view = view[len(magic_prefix):]
return super()._unpack(view)
def _pack(self) -> bytes:
return self.MAGIC_PREFIX + super()._pack()
@property
def compression(self) -> Optional[str]:
"""The ``PP2_SUBTYPE_EXT_COMPRESSION`` value. This is used by the
:attr:`~proxyprotocol.sock.SocketInfo.compression` value.
"""
val = self.get(Type.PP2_SUBTYPE_EXT_COMPRESSION)
if val is not None:
return str(val, 'ascii')
return None
@property
def secret_bits(self) -> Optional[int]:
"""The ``PP2_SUBTYPE_EXT_SECRET_BITS`` value. This is used to populate
the third member of the: attr:`~proxyprotocol.sock.SocketInfo.cipher`
tuple.
"""
val = self.get(Type.PP2_SUBTYPE_EXT_SECRET_BITS)
if val is not None:
secret_bits, = self._secret_bits_fmt.unpack(val)
return int(secret_bits)
return None
@property
def peercert(self) -> Optional[PeerCert]:
"""The ``PP2_SUBTYPE_EXT_PEERCERT`` value. This is used by the
:attr:`~proxyprotocol.sock.SocketInfo.peercert` value.
"""
val = self.get(Type.PP2_SUBTYPE_EXT_PEERCERT)
if val is not None:
decompressed = zlib.decompress(val)
ret: PeerCert = json.loads(decompressed)
return ret
return None
@property
def dnsbl(self) -> Optional[str]:
"""The ``PP2_SUBTYPE_EXT_DNSBL`` value. This is the hostname or other
identifier that reports a status or reputation of the connecting IP
address.
"""
val = self.get(Type.PP2_SUBTYPE_EXT_DNSBL)
if val is not None:
return str(val, 'utf-8')
return None
| 33.268499 | 79 | 0.600025 |
d3bc20feef91fca4e6f18955907be84379b24487 | 926 | py | Python | DTPython/client.py | CPSuperstore/DTPython | 52dd9e0415fe136ab368d70189a15d7cb229e93d | [
"MIT"
] | null | null | null | DTPython/client.py | CPSuperstore/DTPython | 52dd9e0415fe136ab368d70189a15d7cb229e93d | [
"MIT"
] | null | null | null | DTPython/client.py | CPSuperstore/DTPython | 52dd9e0415fe136ab368d70189a15d7cb229e93d | [
"MIT"
] | null | null | null | from socket import *
import DTPython
import DTPython.coder as coder
class DTPClient:
def __init__(self, identifier: int, broadcast_address: str = "255.255.255.255", port: int = None):
self.port = DTPython.PORT if port is None else port
self.identifier = identifier
self.broadcast_address = broadcast_address
self.s = socket(AF_INET, SOCK_DGRAM)
self.s.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
def broadcast_raw(self, message: bytes):
self.s.sendto(message, (self.broadcast_address, self.port))
def broadcast(self, message: str):
message = "{}|{}".format(str(self.identifier), message)
message = coder.encode_message(message)
self.broadcast_raw(message)
def await_response(self):
return self.s.recvfrom(1024)
def broadcast_await(self, message: str):
self.broadcast(message)
return self.await_response()
| 29.870968 | 102 | 0.678186 |
10f9d37383c16f7bbfed6e751ce54843d028d1da | 1,710 | py | Python | conanfile.py | tt4g/conan-getting-started | e4c5f4b3eace47c111b1ff0f2dab38b0b4f79ad7 | [
"MIT"
] | null | null | null | conanfile.py | tt4g/conan-getting-started | e4c5f4b3eace47c111b1ff0f2dab38b0b4f79ad7 | [
"MIT"
] | null | null | null | conanfile.py | tt4g/conan-getting-started | e4c5f4b3eace47c111b1ff0f2dab38b0b4f79ad7 | [
"MIT"
] | null | null | null | from conans import ConanFile, CMake, tools
class HelloworldConan(ConanFile):
name = "HelloWorld"
version = "0.1"
license = "MIT"
author = "tt4g"
url = "https://github.com/tt4g/conan-getting-started"
description = "Hello World package"
topics = ("conan", "hello-world")
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False]}
default_options = "shared=False"
generators = "cmake"
def source(self):
self.run("git clone https://github.com/memsharded/hello.git")
self.run("cd hello && git checkout static_shared")
# This small hack might be useful to guarantee proper /MT /MD linkage
# in MSVC if the packaged project doesn't have variables to set it
# properly
tools.replace_in_file("hello/CMakeLists.txt", "PROJECT(MyHello)",
'''PROJECT(MyHello)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()''')
def build(self):
cmake = CMake(self)
cmake.configure(source_folder="hello")
cmake.build()
# Explicit way:
# self.run('cmake %s/hello %s'
# % (self.source_folder, cmake.command_line))
# self.run("cmake --build . %s" % cmake.build_config)
def package(self):
self.copy("*.h", dst="include", src="hello")
self.copy("*hello.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.dylib", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["hello"]
| 34.897959 | 77 | 0.605263 |
6dc05e2de986fdb15e86ae8799e68c677be78ab1 | 875 | py | Python | main.py | RamtinAlami/inspirational-quote-generator | 950357dd302624082baffef1e40364b91346d535 | [
"MIT"
] | 27 | 2018-01-01T18:49:24.000Z | 2021-09-30T11:48:11.000Z | main.py | RamtinAlami/inspirational-quote-generator | 950357dd302624082baffef1e40364b91346d535 | [
"MIT"
] | 1 | 2021-09-30T11:48:08.000Z | 2021-09-30T11:48:08.000Z | main.py | RamtinAlami/inspirational-quote-generator | 950357dd302624082baffef1e40364b91346d535 | [
"MIT"
] | 14 | 2018-01-02T04:53:23.000Z | 2021-08-08T17:44:23.000Z | #!/usr/bin/env python3
import mkchain
import shelve
def train_text(input_file, model_file = 'data'):
model = mkchain.train('Life is Love.'.lower().split())
with open(input_file+'.txt') as dataset:
for line in dataset:
text = line.lower().split()
model = mkchain.train(text, model)
with shelve.open(model_file) as data:
data['Model'] = model
def generate_text(model_file = 'data', output_file = 'generated', amount = 100):
with shelve.open(model_file) as data:
model = data['Model']
with open(output_file+'.txt', '+w') as file:
for i in range(amount):
while True:
txt = mkchain.generate(model, length=50)
if len(txt) > 20:
break
file.write(' '.join(txt) + '\n\n')
train_text('dataset')
generate_text() | 26.515152 | 80 | 0.572571 |
6d4e938657bb4f631e780e7ceacbc2ec911d2de5 | 5,549 | py | Python | lcm/ns/biz/ns_manual_scale.py | onap/vfc-nfvo-lcm | b7d4d015fa96a246d73d863092d3362afcedc284 | [
"Apache-2.0"
] | 4 | 2018-08-29T02:51:38.000Z | 2021-11-16T11:36:11.000Z | lcm/ns/biz/ns_manual_scale.py | onap/vfc-nfvo-lcm | b7d4d015fa96a246d73d863092d3362afcedc284 | [
"Apache-2.0"
] | null | null | null | lcm/ns/biz/ns_manual_scale.py | onap/vfc-nfvo-lcm | b7d4d015fa96a246d73d863092d3362afcedc284 | [
"Apache-2.0"
] | 1 | 2019-05-12T08:21:19.000Z | 2019-05-12T08:21:19.000Z | # Copyright 2017 ZTE Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import threading
import time
import traceback
from lcm.ns.biz.scale_aspect import get_scale_vnf_data_info_list
from lcm.ns.enum import NS_INST_STATUS
from lcm.pub.database.models import JobModel, NSInstModel
from lcm.pub.exceptions import NSLCMException
from lcm.pub.utils.jobutil import JobUtil
from lcm.jobs.enum import JOB_MODEL_STATUS, JOB_PROGRESS
from lcm.pub.utils.values import ignore_case_get
from lcm.ns_vnfs.biz.scale_vnfs import NFManualScaleService
from lcm.ns.biz.ns_lcm_op_occ import NsLcmOpOcc
JOB_ERROR = 255
SCALE_TYPE = ("SCALE_NS", "SCALE_VNF")
logger = logging.getLogger(__name__)
class NSManualScaleService(threading.Thread):
def __init__(self, ns_instance_id, request_data, job_id):
super(NSManualScaleService, self).__init__()
self.ns_instance_id = ns_instance_id
self.request_data = request_data
self.job_id = job_id
self.occ_id = NsLcmOpOcc.create(ns_instance_id, "SCALE", "PROCESSING", False, request_data)
self.scale_vnf_data = ''
def run(self):
try:
self.do_biz()
except NSLCMException as e:
JobUtil.add_job_status(self.job_id, JOB_PROGRESS.ERROR, e.args[0])
NsLcmOpOcc.update(self.occ_id, operationState="FAILED", error=e.args[0])
except Exception as e:
logger.error(e.args[0])
logger.error(traceback.format_exc())
JobUtil.add_job_status(self.job_id, JOB_PROGRESS.ERROR, 'ns scale fail')
NsLcmOpOcc.update(self.occ_id, operationState="FAILED", error=e.args[0])
finally:
self.update_ns_status(NS_INST_STATUS.ACTIVE)
def do_biz(self):
self.update_job(JOB_PROGRESS.STARTED, desc='ns scale start')
self.update_ns_status(NS_INST_STATUS.SCALING)
self.check_and_set_params()
self.do_vnfs_scale()
self.update_job(JOB_PROGRESS.FINISHED, desc='ns scale success')
NsLcmOpOcc.update(self.occ_id, "COMPLETED")
def check_and_set_params(self):
scale_type = ignore_case_get(self.request_data, 'scaleType')
if scale_type != SCALE_TYPE[0]:
raise NSLCMException('scaleType should be SCALE_NS.')
scale_ns_data = ignore_case_get(self.request_data, 'scaleNsData')
self.scale_vnf_data = get_scale_vnf_data_info_list(
scale_ns_data, self.ns_instance_id)
logger.debug('scale_vnf_data = %s' % self.scale_vnf_data)
if not self.scale_vnf_data:
raise NSLCMException('Failed to get scaleVnfData parameter')
def do_vnfs_scale(self):
for i in range(len(self.scale_vnf_data)):
vnf_scale_params = self.prepare_vnf_scale_params(
self.scale_vnf_data[i])
count = len(self.scale_vnf_data)
progress_range = [11 + 80 / count * i, 10 + 80 / count * (i + 1)]
status = self.do_vnf_scale(vnf_scale_params, progress_range)
if status is JOB_MODEL_STATUS.FINISHED:
logger.info(
'nf[%s] scale handle end' %
vnf_scale_params.get('vnfInstanceId'))
self.update_job(
progress_range[1],
desc='nf[%s] scale handle end' %
vnf_scale_params.get('vnfInstanceId'))
else:
raise NSLCMException('VNF scale failed')
def prepare_vnf_scale_params(self, vnf_data):
return {
"vnfInstanceId": ignore_case_get(vnf_data, 'vnfInstanceId'),
"scaleVnfData": ignore_case_get(vnf_data, 'scaleByStepData'),
"nsInstanceId": self.ns_instance_id
}
def do_vnf_scale(self, vnf_scale_params, progress_range):
nf_inst_id = vnf_scale_params.get('vnfInstanceId')
nf_service = NFManualScaleService(nf_inst_id, vnf_scale_params)
nf_service.start()
self.update_job(
progress_range[0],
desc='nf[%s] scale handle start' %
nf_inst_id)
status = self.wait_job_finish(nf_service.job_id)
return status
@staticmethod
def wait_job_finish(sub_job_id, timeout=3600):
query_interval = 2
start_time = end_time = datetime.datetime.now()
while (end_time - start_time).seconds < timeout:
job_result = JobModel.objects.get(jobid=sub_job_id)
time.sleep(query_interval)
end_time = datetime.datetime.now()
if job_result.progress == JOB_PROGRESS.FINISHED:
return JOB_MODEL_STATUS.FINISHED
if job_result.progress > JOB_PROGRESS.FINISHED:
return JOB_MODEL_STATUS.ERROR
return JOB_MODEL_STATUS.TIMEOUT
def update_job(self, progress, desc=''):
JobUtil.add_job_status(self.job_id, progress, desc)
def update_ns_status(self, status):
NSInstModel.objects.filter(
id=self.ns_instance_id).update(
status=status)
| 41.103704 | 99 | 0.672554 |
6595d0f882ff5650d2cd8bdbeac3fca3c97db7dc | 614 | py | Python | AI.py | NotVirtualism/Uno-vs-AI | a6e6f737a85430bcbbef37303aa1116697b9d3f6 | [
"MIT"
] | null | null | null | AI.py | NotVirtualism/Uno-vs-AI | a6e6f737a85430bcbbef37303aa1116697b9d3f6 | [
"MIT"
] | null | null | null | AI.py | NotVirtualism/Uno-vs-AI | a6e6f737a85430bcbbef37303aa1116697b9d3f6 | [
"MIT"
] | null | null | null | from Deck import *
import random
class AI:
def __init__(self):
self.hand = []
def turn(self, card):
playable_cards = []
for c in self.hand:
if c.color == card.color or c.value == card.value or c.color == "Black":
playable_cards.append(c)
if len(playable_cards) > 0:
chosen = random.choice(playable_cards)
self.hand.remove(chosen)
if chosen.color == "Black":
chosen.color = random.choice(["Red", "Green", "Blue", "Yellow"])
return chosen
return "draw"
| 29.238095 | 85 | 0.521173 |
cee94b612bf2ba3948e4694a2e6502c5b4877ed2 | 2,127 | py | Python | imperative/python/test/unit/core/test_interpreter.py | googol-lab/MegEngine | e0193cc4431371719a6ddb0fa85f910c5583bfc8 | [
"Apache-2.0"
] | 1 | 2021-03-25T01:13:24.000Z | 2021-03-25T01:13:24.000Z | imperative/python/test/unit/core/test_interpreter.py | googol-lab/MegEngine | e0193cc4431371719a6ddb0fa85f910c5583bfc8 | [
"Apache-2.0"
] | 1 | 2021-05-27T08:55:38.000Z | 2021-05-27T08:55:38.000Z | imperative/python/test/unit/core/test_interpreter.py | googol-lab/MegEngine | e0193cc4431371719a6ddb0fa85f910c5583bfc8 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pytest
import megengine as mge
import megengine.functional as F
from megengine.core._imperative_rt.core2 import (
_set_drop_flag,
_set_swap_flag,
config_async_level,
get_async_level,
)
def test_basic():
config_async_level(2)
assert get_async_level() == 2
with pytest.raises(RuntimeError):
config_async_level(3)
def test_level1_infer_value():
config_async_level(1)
a = mge.tensor([[1, 2], [2, 3], [3, 4]], dtype="float32")
b = mge.tensor([1, 1], dtype="float32")
identity = mge.tensor(np.array([[1, 0], [0, 1]]), dtype="float32")
# make DepType::VALUE unknown
c = F.matmul(b, identity)
with pytest.raises(RuntimeError):
d = F.reshape(a, c)
config_async_level(2)
def test_level1_infer_shape_with_unknown():
config_async_level(2)
a = mge.tensor([[1, 2, 2, 3]], dtype="float32")
b = mge.tensor([1, 1])
multi2 = mge.tensor(np.array([[2, 0], [0, 2]]), dtype="float32")
c = F.matmul(b, multi2)
# make DepType::SHAPE unknown
d = F.reshape(a, c)
e = mge.tensor([[1, 2]], dtype="float32")
config_async_level(1)
# test src no shape, throw in level1
with pytest.raises(RuntimeError):
f = F.reshape(d, b)
with pytest.raises(RuntimeError):
g = F.matmul(d, e)
config_async_level(2)
def test_host_compute_elemwise():
a = mge.tensor([[1, 2], [2, 3], [3, 4]], dtype="float32")
b = mge.tensor([1, 1], dtype="int32")
# check DepType::VALUE is still known
c = b * 2
with pytest.raises(RuntimeError):
d = F.reshape(a, c)
def test_swap_drop_basic():
_set_swap_flag(True)
_set_drop_flag(True)
# test xpu compute
x = mge.tensor(np.ones((3, 3)), dtype=np.float32)
y = mge.tensor(np.ones((3, 3)), dtype=np.float32)
z = x + y
x._swap_out()
z._drop()
z.numpy()
# test host value compute
x = mge.tensor(np.ones((2, 2)), dtype=np.float32)
y = mge.tensor(np.ones((2, 2)), dtype=np.float32)
z = x + y
x._swap_out()
z._drop()
z.numpy()
_set_swap_flag(False)
_set_drop_flag(False)
| 26.924051 | 70 | 0.621533 |
755d675d1dca8000305ef76cf8c728e37807c866 | 644 | py | Python | old/camera.py | langulski/Pygame-TransilvanianHunger | 0fd89d73d5f48891cbea84da76eec83025f4e52e | [
"MIT"
] | null | null | null | old/camera.py | langulski/Pygame-TransilvanianHunger | 0fd89d73d5f48891cbea84da76eec83025f4e52e | [
"MIT"
] | null | null | null | old/camera.py | langulski/Pygame-TransilvanianHunger | 0fd89d73d5f48891cbea84da76eec83025f4e52e | [
"MIT"
] | null | null | null | import pygame as pg
class Camera:
def __init__(self, width, height):
self.camera = pg.Rect(0, 0, width, height)
self.width = 800
self.height = 600
def apply(self, entity):
return entity.rect.move(self.camera.topleft)
def update(self, target):
x = -target.rect.x + int(200 / 2)
y = -target.rect.y + int(300 / 2)
# limit scrolling to map size
x = min(0, x) # left
y = min(0, y) # top
x = max(-(self.width - WIDTH), x) # right
y = max(-(self.height - HEIGHT), y) # bottom
self.camera = pg.Rect(x, y, self.width, self.height)
| 28 | 60 | 0.541925 |
c112d4b84056f6142b8ba6c4a28a95848c3de635 | 2,355 | py | Python | tests/unit/test_IceOptions.py | ylipacbio/pbtranscript | 6b4ef164f191ffd4201feb62b951d9eeac3315b6 | [
"BSD-3-Clause"
] | null | null | null | tests/unit/test_IceOptions.py | ylipacbio/pbtranscript | 6b4ef164f191ffd4201feb62b951d9eeac3315b6 | [
"BSD-3-Clause"
] | null | null | null | tests/unit/test_IceOptions.py | ylipacbio/pbtranscript | 6b4ef164f191ffd4201feb62b951d9eeac3315b6 | [
"BSD-3-Clause"
] | 1 | 2021-02-26T10:08:09.000Z | 2021-02-26T10:08:09.000Z | #!/usr/bin/env python
import unittest
import os.path as op
import filecmp
from pbtranscript.ClusterOptions import IceOptions
from pbtranscript.Utils import mknewdir, execute
from test_setpath import DATA_DIR, OUT_DIR, SIV_DATA_DIR, SIV_STD_DIR
def copy_in_fasta_to_out(in_dir, out_dir, filename):
"""copy filename from in_dir (e.g., data) to out_dir,
return out_fasta
"""
mknewdir(out_dir)
cmd = "cp %s %s" % (op.join(in_dir, filename),
op.join(out_dir, filename))
execute(cmd=cmd)
return op.join(out_dir, filename)
class TestIceOptions(unittest.TestCase):
"""Test pbtranscript.ClusterOptions.IceOptions"""
def setUp(self):
"""Initialize."""
self.filename = "reads_of_insert.fasta"
def test_read_write_config(self):
"""test read_config and write_config."""
out_dir = op.join(OUT_DIR, "test_ice_opts_read_write_config")
fasta_filename = copy_in_fasta_to_out(DATA_DIR, out_dir, self.filename)
ice_opts = IceOptions(cDNA_size="above5k")
ice_opts._write_config(fasta_filename)
self.assertTrue(op.exists(ice_opts._config_filename(
fasta_filename=fasta_filename)))
self.assertEqual(ice_opts.low_cDNA_size, 739)
self.assertEqual(ice_opts.high_cDNA_size, 4175)
self.assertEqual(ice_opts.sensitive_mode, False)
ice_opts._read_config(fasta_filename)
self.assertEqual(ice_opts.low_cDNA_size, 739)
self.assertEqual(ice_opts.high_cDNA_size, 4175)
self.assertEqual(ice_opts.sensitive_mode, False)
def test_detect_cDNA_size(self):
"""Test detect_cDNA_size."""
out_dir = op.join(OUT_DIR, "test_ice_opts_detect_cDNA_size")
fasta_filename = copy_in_fasta_to_out(DATA_DIR, out_dir, self.filename)
ice_opts = IceOptions(cDNA_size="above5k")
config_filename = ice_opts._config_filename(fasta_filename=fasta_filename)
if op.exists(config_filename):
os.remove(config_filename)
ice_opts.detect_cDNA_size(fasta_filename)
self.assertTrue(op.exists(config_filename))
self.assertEqual(ice_opts.low_cDNA_size, 739)
self.assertEqual(ice_opts.high_cDNA_size, 4175)
self.assertEqual(ice_opts.cDNA_size, "under1k")
self.assertEqual(ice_opts.sensitive_mode, False)
| 35.149254 | 82 | 0.70913 |
839e9469b1a49e5032fd0ff11cf9e9e1edf36c66 | 1,140 | py | Python | configuration/test_IConfiguration.py | jameshi16/TypeSound | 238d019ed22ed5b41df533ac5ec43cbf28428fa6 | [
"MIT"
] | null | null | null | configuration/test_IConfiguration.py | jameshi16/TypeSound | 238d019ed22ed5b41df533ac5ec43cbf28428fa6 | [
"MIT"
] | null | null | null | configuration/test_IConfiguration.py | jameshi16/TypeSound | 238d019ed22ed5b41df533ac5ec43cbf28428fa6 | [
"MIT"
] | null | null | null | import unittest
from .IConfiguration import IConfiguration, ConfigurationNotLoaded
class TestIConfiguration(unittest.TestCase):
def setUp(self):
self.instance = IConfiguration()
def test_LoadedOrRaise(self):
self.instance._loaded = False
with self.assertRaises(ConfigurationNotLoaded):
self.instance._loaded_or_raise()
self.instance._loaded = True
self.instance._loaded_or_raise()
self.instance._loaded = False
def test_Unimplemented(self):
with self.assertRaises(NotImplementedError):
self.instance.get_version()
self.instance.load_config_from_file('no matter')
self.instance.load_config_from_string('no matter')
self.instance.save_config_to_file('no matter')
self.instance.save_config_to_string()
def test_LoadedData(self):
self.instance._loaded = False
with self.assertRaises(ConfigurationNotLoaded):
self.instance.loaded_data
self.instance._loaded = True
self.assertEqual(self.instance.loaded_data, {})
self.instance._loaded = False
| 33.529412 | 66 | 0.687719 |
663e858634aff274bac0348948da6ec8c81e8bb8 | 37,147 | py | Python | tests/unit/modules/boto_lambda_test.py | ahammond/salt | 945b21b70dbe708716d7b009a2005ef0acf76e6b | [
"Apache-2.0"
] | 1 | 2016-05-20T09:15:57.000Z | 2016-05-20T09:15:57.000Z | tests/unit/modules/boto_lambda_test.py | ahammond/salt | 945b21b70dbe708716d7b009a2005ef0acf76e6b | [
"Apache-2.0"
] | null | null | null | tests/unit/modules/boto_lambda_test.py | ahammond/salt | 945b21b70dbe708716d7b009a2005ef0acf76e6b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
from distutils.version import LooseVersion # pylint: disable=import-error,no-name-in-module
import platform
import random
import string
# Import Salt Testing libs
from salttesting.unit import skipIf, TestCase
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, patch
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt libs
import salt.config
import salt.loader
from salt.modules import boto_lambda
from salt.exceptions import SaltInvocationError
# Import 3rd-party libs
from tempfile import NamedTemporaryFile
import logging
import os
# Import Mock libraries
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
# pylint: disable=import-error,no-name-in-module
try:
import boto3
from botocore.exceptions import ClientError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
ON_SUSE = False
if 'SuSE' in platform.dist():
ON_SUSE = True
# pylint: enable=import-error,no-name-in-module
# the boto_lambda module relies on the connect_to_region() method
# which was added in boto 2.8.0
# https://github.com/boto/boto/commit/33ac26b416fbb48a60602542b4ce15dcc7029f12
required_boto3_version = '1.2.1'
region = 'us-east-1'
access_key = 'GKTADJGHEIQSXMKKRBJ08H'
secret_key = 'askdjghsdfjkghWupUjasdflkdfklgjsdfjajkghs'
conn_parameters = {'region': region, 'key': access_key, 'keyid': secret_key, 'profile': {}}
error_message = 'An error occurred (101) when calling the {0} operation: Test-defined error'
error_content = {
'Error': {
'Code': 101,
'Message': "Test-defined error"
}
}
function_ret = dict(FunctionName='testfunction',
Runtime='python2.7',
Role=None,
Handler='handler',
Description='abcdefg',
Timeout=5,
MemorySize=128,
CodeSha256='abcdef',
CodeSize=199,
FunctionArn='arn:lambda:us-east-1:1234:Something',
LastModified='yes',
VpcConfig=None)
alias_ret = dict(AliasArn='arn:lambda:us-east-1:1234:Something',
Name='testalias',
FunctionVersion='3',
Description='Alias description')
event_source_mapping_ret = dict(UUID='1234-1-123',
BatchSize=123,
EventSourceArn='arn:lambda:us-east-1:1234:Something',
FunctionArn='arn:lambda:us-east-1:1234:Something',
LastModified='yes',
LastProcessingResult='SUCCESS',
State='Enabled',
StateTransitionReason='Random')
log = logging.getLogger(__name__)
opts = salt.config.DEFAULT_MINION_OPTS
context = {}
utils = salt.loader.utils(opts, whitelist=['boto3'], context=context)
boto_lambda.__utils__ = utils
boto_lambda.__init__(opts)
boto_lambda.__salt__ = {}
def _has_required_boto():
'''
Returns True/False boolean depending on if Boto is installed and correct
version.
'''
if not HAS_BOTO:
return False
elif LooseVersion(boto3.__version__) < LooseVersion(required_boto3_version):
return False
else:
return True
class BotoLambdaTestCaseBase(TestCase):
conn = None
# Set up MagicMock to replace the boto3 session
def setUp(self):
boto_lambda.__context__ = {}
context.clear()
# connections keep getting cached from prior tests, can't find the
# correct context object to clear it. So randomize the cache key, to prevent any
# cache hits
conn_parameters['key'] = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(50))
self.patcher = patch('boto3.session.Session')
self.addCleanup(self.patcher.stop)
mock_session = self.patcher.start()
session_instance = mock_session.return_value
self.conn = MagicMock()
session_instance.client.return_value = self.conn
class TempZipFile(object):
def __enter__(self):
with NamedTemporaryFile(suffix='.zip', prefix='salt_test_', delete=False) as tmp:
tmp.write('###\n')
self.zipfile = tmp.name
return self.zipfile
def __exit__(self, type, value, traceback):
os.remove(self.zipfile)
class BotoLambdaTestCaseMixin(object):
pass
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto3 module must be greater than'
' or equal to version {0}'
.format(required_boto3_version))
@skipIf(NO_MOCK, NO_MOCK_REASON)
class BotoLambdaFunctionTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin):
'''
TestCase for salt.modules.boto_lambda module
'''
def test_that_when_checking_if_a_function_exists_and_a_function_exists_the_function_exists_method_returns_true(self):
'''
Tests checking lambda function existence when the lambda function already exists
'''
self.conn.list_functions.return_value = {'Functions': [function_ret]}
func_exists_result = boto_lambda.function_exists(FunctionName=function_ret['FunctionName'], **conn_parameters)
self.assertTrue(func_exists_result['exists'])
def test_that_when_checking_if_a_function_exists_and_a_function_does_not_exist_the_function_exists_method_returns_false(self):
'''
Tests checking lambda function existence when the lambda function does not exist
'''
self.conn.list_functions.return_value = {'Functions': [function_ret]}
func_exists_result = boto_lambda.function_exists(FunctionName='myfunc', **conn_parameters)
self.assertFalse(func_exists_result['exists'])
def test_that_when_checking_if_a_function_exists_and_boto3_returns_an_error_the_function_exists_method_returns_error(self):
'''
Tests checking lambda function existence when boto returns an error
'''
self.conn.list_functions.side_effect = ClientError(error_content, 'list_functions')
func_exists_result = boto_lambda.function_exists(FunctionName='myfunc', **conn_parameters)
self.assertEqual(func_exists_result.get('error', {}).get('message'), error_message.format('list_functions'))
def test_that_when_creating_a_function_from_zipfile_succeeds_the_create_function_method_returns_true(self):
'''
tests True function created.
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
with TempZipFile() as zipfile:
self.conn.create_function.return_value = function_ret
lambda_creation_result = boto_lambda.create_function(FunctionName='testfunction',
Runtime='python2.7',
Role='myrole',
Handler='file.method',
ZipFile=zipfile,
**conn_parameters)
self.assertTrue(lambda_creation_result['created'])
def test_that_when_creating_a_function_from_s3_succeeds_the_create_function_method_returns_true(self):
'''
tests True function created.
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
self.conn.create_function.return_value = function_ret
lambda_creation_result = boto_lambda.create_function(FunctionName='testfunction',
Runtime='python2.7',
Role='myrole',
Handler='file.method',
S3Bucket='bucket',
S3Key='key',
**conn_parameters)
self.assertTrue(lambda_creation_result['created'])
def test_that_when_creating_a_function_without_code_raises_a_salt_invocation_error(self):
'''
tests Creating a function without code
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
with self.assertRaisesRegexp(SaltInvocationError,
'Either ZipFile must be specified, or S3Bucket and S3Key must be provided.'):
lambda_creation_result = boto_lambda.create_function(FunctionName='testfunction',
Runtime='python2.7',
Role='myrole',
Handler='file.method',
**conn_parameters)
def test_that_when_creating_a_function_with_zipfile_and_s3_raises_a_salt_invocation_error(self):
'''
tests Creating a function without code
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
with self.assertRaisesRegexp(SaltInvocationError,
'Either ZipFile must be specified, or S3Bucket and S3Key must be provided.'):
with TempZipFile() as zipfile:
lambda_creation_result = boto_lambda.create_function(FunctionName='testfunction',
Runtime='python2.7',
Role='myrole',
Handler='file.method',
ZipFile=zipfile,
S3Bucket='bucket',
S3Key='key',
**conn_parameters)
def test_that_when_creating_a_function_fails_the_create_function_method_returns_error(self):
'''
tests False function not created.
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
self.conn.create_function.side_effect = ClientError(error_content, 'create_function')
with TempZipFile() as zipfile:
lambda_creation_result = boto_lambda.create_function(FunctionName='testfunction',
Runtime='python2.7',
Role='myrole',
Handler='file.method',
ZipFile=zipfile,
**conn_parameters)
self.assertEqual(lambda_creation_result.get('error', {}).get('message'), error_message.format('create_function'))
def test_that_when_deleting_a_function_succeeds_the_delete_function_method_returns_true(self):
'''
tests True function deleted.
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
result = boto_lambda.delete_function(FunctionName='testfunction',
Qualifier=1,
**conn_parameters)
self.assertTrue(result['deleted'])
def test_that_when_deleting_a_function_fails_the_delete_function_method_returns_false(self):
'''
tests False function not deleted.
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
self.conn.delete_function.side_effect = ClientError(error_content, 'delete_function')
result = boto_lambda.delete_function(FunctionName='testfunction',
**conn_parameters)
self.assertFalse(result['deleted'])
def test_that_when_describing_function_it_returns_the_dict_of_properties_returns_true(self):
'''
Tests describing parameters if function exists
'''
self.conn.list_functions.return_value = {'Functions': [function_ret]}
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
result = boto_lambda.describe_function(FunctionName=function_ret['FunctionName'], **conn_parameters)
self.assertEqual(result, {'function': function_ret})
def test_that_when_describing_function_it_returns_the_dict_of_properties_returns_false(self):
'''
Tests describing parameters if function does not exist
'''
self.conn.list_functions.return_value = {'Functions': []}
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
result = boto_lambda.describe_function(FunctionName='testfunction', **conn_parameters)
self.assertFalse(result['function'])
def test_that_when_describing_lambda_on_client_error_it_returns_error(self):
'''
Tests describing parameters failure
'''
self.conn.list_functions.side_effect = ClientError(error_content, 'list_functions')
result = boto_lambda.describe_function(FunctionName='testfunction', **conn_parameters)
self.assertTrue('error' in result)
def test_that_when_updating_a_function_succeeds_the_update_function_method_returns_true(self):
'''
tests True function updated.
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
self.conn.update_function_config.return_value = function_ret
result = boto_lambda.update_function_config(FunctionName=function_ret['FunctionName'], Role='myrole', **conn_parameters)
self.assertTrue(result['updated'])
def test_that_when_updating_a_function_fails_the_update_function_method_returns_error(self):
'''
tests False function not updated.
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
self.conn.update_function_configuration.side_effect = ClientError(error_content, 'update_function')
result = boto_lambda.update_function_config(FunctionName='testfunction',
Role='myrole',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('update_function'))
def test_that_when_updating_function_code_from_zipfile_succeeds_the_update_function_method_returns_true(self):
'''
tests True function updated.
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
with TempZipFile() as zipfile:
self.conn.update_function_code.return_value = function_ret
result = boto_lambda.update_function_code(FunctionName=function_ret['FunctionName'], ZipFile=zipfile, **conn_parameters)
self.assertTrue(result['updated'])
def test_that_when_updating_function_code_from_s3_succeeds_the_update_function_method_returns_true(self):
'''
tests True function updated.
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
self.conn.update_function_code.return_value = function_ret
result = boto_lambda.update_function_code(FunctionName='testfunction',
S3Bucket='bucket',
S3Key='key',
**conn_parameters)
self.assertTrue(result['updated'])
def test_that_when_updating_function_code_without_code_raises_a_salt_invocation_error(self):
'''
tests Creating a function without code
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
with self.assertRaisesRegexp(SaltInvocationError,
'Either ZipFile must be specified, or S3Bucket and S3Key must be provided.'):
result = boto_lambda.update_function_code(FunctionName='testfunction',
**conn_parameters)
def test_that_when_updating_function_code_fails_the_update_function_method_returns_error(self):
'''
tests False function not updated.
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
self.conn.update_function_code.side_effect = ClientError(error_content, 'update_function_code')
result = boto_lambda.update_function_code(FunctionName='testfunction',
S3Bucket='bucket',
S3Key='key',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('update_function_code'))
def test_that_when_listing_function_versions_succeeds_the_list_function_versions_method_returns_true(self):
'''
tests True function versions listed.
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
self.conn.list_versions_by_function.return_value = {'Versions': [function_ret]}
result = boto_lambda.list_function_versions(FunctionName='testfunction',
**conn_parameters)
self.assertTrue(result['Versions'])
def test_that_when_listing_function_versions_fails_the_list_function_versions_method_returns_false(self):
'''
tests False no function versions listed.
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
self.conn.list_versions_by_function.return_value = {'Versions': []}
result = boto_lambda.list_function_versions(FunctionName='testfunction',
**conn_parameters)
self.assertFalse(result['Versions'])
def test_that_when_listing_function_versions_fails_the_list_function_versions_method_returns_error(self):
'''
tests False function versions error.
'''
with patch.dict(boto_lambda.__salt__, {'boto_iam.get_account_id': MagicMock(return_value='1234')}):
self.conn.list_versions_by_function.side_effect = ClientError(error_content, 'list_versions_by_function')
result = boto_lambda.list_function_versions(FunctionName='testfunction',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('list_versions_by_function'))
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto3 module must be greater than'
' or equal to version {0}'
.format(required_boto3_version))
@skipIf(NO_MOCK, NO_MOCK_REASON)
class BotoLambdaAliasTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin):
'''
TestCase for salt.modules.boto_lambda module aliases
'''
def test_that_when_creating_an_alias_succeeds_the_create_alias_method_returns_true(self):
'''
tests True alias created.
'''
self.conn.create_alias.return_value = alias_ret
result = boto_lambda.create_alias(FunctionName='testfunction',
Name=alias_ret['Name'],
FunctionVersion=alias_ret['FunctionVersion'],
**conn_parameters)
self.assertTrue(result['created'])
def test_that_when_creating_an_alias_fails_the_create_alias_method_returns_error(self):
'''
tests False alias not created.
'''
self.conn.create_alias.side_effect = ClientError(error_content, 'create_alias')
result = boto_lambda.create_alias(FunctionName='testfunction',
Name=alias_ret['Name'],
FunctionVersion=alias_ret['FunctionVersion'],
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('create_alias'))
def test_that_when_deleting_an_alias_succeeds_the_delete_alias_method_returns_true(self):
'''
tests True alias deleted.
'''
result = boto_lambda.delete_alias(FunctionName='testfunction',
Name=alias_ret['Name'],
**conn_parameters)
self.assertTrue(result['deleted'])
def test_that_when_deleting_an_alias_fails_the_delete_alias_method_returns_false(self):
'''
tests False alias not deleted.
'''
self.conn.delete_alias.side_effect = ClientError(error_content, 'delete_alias')
result = boto_lambda.delete_alias(FunctionName='testfunction',
Name=alias_ret['Name'],
**conn_parameters)
self.assertFalse(result['deleted'])
def test_that_when_checking_if_an_alias_exists_and_the_alias_exists_the_alias_exists_method_returns_true(self):
'''
Tests checking lambda alias existence when the lambda alias already exists
'''
self.conn.list_aliases.return_value = {'Aliases': [alias_ret]}
result = boto_lambda.alias_exists(FunctionName='testfunction',
Name=alias_ret['Name'],
**conn_parameters)
self.assertTrue(result['exists'])
def test_that_when_checking_if_an_alias_exists_and_the_alias_does_not_exist_the_alias_exists_method_returns_false(self):
'''
Tests checking lambda alias existence when the lambda alias does not exist
'''
self.conn.list_aliases.return_value = {'Aliases': [alias_ret]}
result = boto_lambda.alias_exists(FunctionName='testfunction',
Name='otheralias',
**conn_parameters)
self.assertFalse(result['exists'])
def test_that_when_checking_if_an_alias_exists_and_boto3_returns_an_error_the_alias_exists_method_returns_error(self):
'''
Tests checking lambda alias existence when boto returns an error
'''
self.conn.list_aliases.side_effect = ClientError(error_content, 'list_aliases')
result = boto_lambda.alias_exists(FunctionName='testfunction',
Name=alias_ret['Name'],
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('list_aliases'))
def test_that_when_describing_alias_it_returns_the_dict_of_properties_returns_true(self):
'''
Tests describing parameters if alias exists
'''
self.conn.list_aliases.return_value = {'Aliases': [alias_ret]}
result = boto_lambda.describe_alias(FunctionName='testfunction',
Name=alias_ret['Name'],
**conn_parameters)
self.assertEqual(result, {'alias': alias_ret})
def test_that_when_describing_alias_it_returns_the_dict_of_properties_returns_false(self):
'''
Tests describing parameters if alias does not exist
'''
self.conn.list_aliases.return_value = {'Aliases': [alias_ret]}
result = boto_lambda.describe_alias(FunctionName='testfunction',
Name='othername',
**conn_parameters)
self.assertFalse(result['alias'])
def test_that_when_describing_lambda_on_client_error_it_returns_error(self):
'''
Tests describing parameters failure
'''
self.conn.list_aliases.side_effect = ClientError(error_content, 'list_aliases')
result = boto_lambda.describe_alias(FunctionName='testfunction',
Name=alias_ret['Name'],
**conn_parameters)
self.assertTrue('error' in result)
def test_that_when_updating_an_alias_succeeds_the_update_alias_method_returns_true(self):
'''
tests True alias updated.
'''
self.conn.update_alias.return_value = alias_ret
result = boto_lambda.update_alias(FunctionName='testfunctoin',
Name=alias_ret['Name'],
Description=alias_ret['Description'],
**conn_parameters)
self.assertTrue(result['updated'])
def test_that_when_updating_an_alias_fails_the_update_alias_method_returns_error(self):
'''
tests False alias not updated.
'''
self.conn.update_alias.side_effect = ClientError(error_content, 'update_alias')
result = boto_lambda.update_alias(FunctionName='testfunction',
Name=alias_ret['Name'],
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('update_alias'))
@skipIf(HAS_BOTO is False, 'The boto module must be installed.')
@skipIf(_has_required_boto() is False, 'The boto3 module must be greater than'
' or equal to version {0}'
.format(required_boto3_version))
@skipIf(NO_MOCK, NO_MOCK_REASON)
class BotoLambdaEventSourceMappingTestCase(BotoLambdaTestCaseBase, BotoLambdaTestCaseMixin):
'''
TestCase for salt.modules.boto_lambda module mappings
'''
def test_that_when_creating_a_mapping_succeeds_the_create_event_source_mapping_method_returns_true(self):
'''
tests True mapping created.
'''
self.conn.create_event_source_mapping.return_value = event_source_mapping_ret
result = boto_lambda.create_event_source_mapping(
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
StartingPosition='LATEST',
**conn_parameters)
self.assertTrue(result['created'])
def test_that_when_creating_an_event_source_mapping_fails_the_create_event_source_mapping_method_returns_error(self):
'''
tests False mapping not created.
'''
self.conn.create_event_source_mapping.side_effect = ClientError(error_content, 'create_event_source_mapping')
result = boto_lambda.create_event_source_mapping(
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
StartingPosition='LATEST',
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'),
error_message.format('create_event_source_mapping'))
def test_that_when_listing_mapping_ids_succeeds_the_get_event_source_mapping_ids_method_returns_true(self):
'''
tests True mapping ids listed.
'''
self.conn.list_event_source_mappings.return_value = {'EventSourceMappings': [event_source_mapping_ret]}
result = boto_lambda.get_event_source_mapping_ids(
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
**conn_parameters)
self.assertTrue(result)
def test_that_when_listing_event_source_mapping_ids_fails_the_get_event_source_mapping_ids_versions_method_returns_false(self):
'''
tests False no mapping ids listed.
'''
self.conn.list_event_source_mappings.return_value = {'EventSourceMappings': []}
result = boto_lambda.get_event_source_mapping_ids(
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
**conn_parameters)
self.assertFalse(result)
def test_that_when_listing_event_source_mapping_ids_fails_the_get_event_source_mapping_ids_method_returns_error(self):
'''
tests False mapping ids error.
'''
self.conn.list_event_source_mappings.side_effect = ClientError(error_content, 'list_event_source_mappings')
result = boto_lambda.get_event_source_mapping_ids(
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('list_event_source_mappings'))
def test_that_when_deleting_an_event_source_mapping_by_UUID_succeeds_the_delete_event_source_mapping_method_returns_true(self):
'''
tests True mapping deleted.
'''
result = boto_lambda.delete_event_source_mapping(
UUID=event_source_mapping_ret['UUID'],
**conn_parameters)
self.assertTrue(result['deleted'])
@skipIf(ON_SUSE, 'Skipping while debugging why the test suite hangs and bails on this test on opensuse')
def test_that_when_deleting_an_event_source_mapping_by_name_succeeds_the_delete_event_source_mapping_method_returns_true(self):
'''
tests True mapping deleted.
'''
self.conn.list_event_source_mappings.return_value = {'EventSourceMappings': [event_source_mapping_ret]}
result = boto_lambda.delete_event_source_mapping(
EventSourceArn=event_source_mapping_ret['EventSourceArn'],
FunctionName=event_source_mapping_ret['FunctionArn'],
**conn_parameters)
self.assertTrue(result['deleted'])
def test_that_when_deleting_an_event_source_mapping_without_identifier_the_delete_event_source_mapping_method_raises_saltinvocationexception(self):
'''
tests Deleting a mapping without identifier
'''
with self.assertRaisesRegexp(SaltInvocationError,
'Either UUID must be specified, or EventSourceArn and FunctionName must be provided.'):
result = boto_lambda.delete_event_source_mapping(**conn_parameters)
def test_that_when_deleting_an_event_source_mapping_fails_the_delete_event_source_mapping_method_returns_false(self):
'''
tests False mapping not deleted.
'''
self.conn.delete_event_source_mapping.side_effect = ClientError(error_content, 'delete_event_source_mapping')
result = boto_lambda.delete_event_source_mapping(UUID=event_source_mapping_ret['UUID'],
**conn_parameters)
self.assertFalse(result['deleted'])
def test_that_when_checking_if_an_event_source_mapping_exists_and_the_event_source_mapping_exists_the_event_source_mapping_exists_method_returns_true(self):
'''
Tests checking lambda event_source_mapping existence when the lambda
event_source_mapping already exists
'''
self.conn.get_event_source_mapping.return_value = event_source_mapping_ret
result = boto_lambda.event_source_mapping_exists(
UUID=event_source_mapping_ret['UUID'],
**conn_parameters)
self.assertTrue(result['exists'])
def test_that_when_checking_if_an_event_source_mapping_exists_and_the_event_source_mapping_does_not_exist_the_event_source_mapping_exists_method_returns_false(self):
'''
Tests checking lambda event_source_mapping existence when the lambda
event_source_mapping does not exist
'''
self.conn.get_event_source_mapping.return_value = None
result = boto_lambda.event_source_mapping_exists(
UUID='other_UUID',
**conn_parameters)
self.assertFalse(result['exists'])
def test_that_when_checking_if_an_event_source_mapping_exists_and_boto3_returns_an_error_the_event_source_mapping_exists_method_returns_error(self):
'''
Tests checking lambda event_source_mapping existence when boto returns an error
'''
self.conn.get_event_source_mapping.side_effect = ClientError(error_content, 'list_event_source_mappings')
result = boto_lambda.event_source_mapping_exists(
UUID=event_source_mapping_ret['UUID'],
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('list_event_source_mappings'))
def test_that_when_describing_event_source_mapping_it_returns_the_dict_of_properties_returns_true(self):
'''
Tests describing parameters if event_source_mapping exists
'''
self.conn.get_event_source_mapping.return_value = event_source_mapping_ret
result = boto_lambda.describe_event_source_mapping(
UUID=event_source_mapping_ret['UUID'],
**conn_parameters)
self.assertEqual(result, {'event_source_mapping': event_source_mapping_ret})
def test_that_when_describing_event_source_mapping_it_returns_the_dict_of_properties_returns_false(self):
'''
Tests describing parameters if event_source_mapping does not exist
'''
self.conn.get_event_source_mapping.return_value = None
result = boto_lambda.describe_event_source_mapping(
UUID=event_source_mapping_ret['UUID'],
**conn_parameters)
self.assertFalse(result['event_source_mapping'])
def test_that_when_describing_event_source_mapping_on_client_error_it_returns_error(self):
'''
Tests describing parameters failure
'''
self.conn.get_event_source_mapping.side_effect = ClientError(error_content, 'get_event_source_mapping')
result = boto_lambda.describe_event_source_mapping(
UUID=event_source_mapping_ret['UUID'],
**conn_parameters)
self.assertTrue('error' in result)
def test_that_when_updating_an_event_source_mapping_succeeds_the_update_event_source_mapping_method_returns_true(self):
'''
tests True event_source_mapping updated.
'''
self.conn.update_event_source_mapping.return_value = event_source_mapping_ret
result = boto_lambda.update_event_source_mapping(
UUID=event_source_mapping_ret['UUID'],
FunctionName=event_source_mapping_ret['FunctionArn'],
**conn_parameters)
self.assertTrue(result['updated'])
def test_that_when_updating_an_event_source_mapping_fails_the_update_event_source_mapping_method_returns_error(self):
'''
tests False event_source_mapping not updated.
'''
self.conn.update_event_source_mapping.side_effect = ClientError(error_content, 'update_event_source_mapping')
result = boto_lambda.update_event_source_mapping(
UUID=event_source_mapping_ret['UUID'],
FunctionName=event_source_mapping_ret['FunctionArn'],
**conn_parameters)
self.assertEqual(result.get('error', {}).get('message'), error_message.format('update_event_source_mapping'))
if __name__ == '__main__':
from integration import run_tests # pylint: disable=import-error
run_tests(BotoLambdaFunctionTestCase, needs_daemon=False)
| 49.861745 | 169 | 0.631518 |
81993e91a4f807db1cb9ab0f6c3c1c82a9d152e5 | 14,207 | py | Python | component_contribution/compound.py | biosustain/component-contribution | ea57303b4ca6d9e6b0708200a1b30bed6d5c36a3 | [
"MIT"
] | 1 | 2018-01-31T13:44:03.000Z | 2018-01-31T13:44:03.000Z | component_contribution/compound.py | biosustain/component-contribution | ea57303b4ca6d9e6b0708200a1b30bed6d5c36a3 | [
"MIT"
] | 19 | 2017-06-07T06:28:55.000Z | 2018-06-05T13:14:17.000Z | component_contribution/compound.py | biosustain/component-contribution | ea57303b4ca6d9e6b0708200a1b30bed6d5c36a3 | [
"MIT"
] | 1 | 2016-12-12T14:33:25.000Z | 2016-12-12T14:33:25.000Z | import openbabel, urllib, logging
import chemaxon
import numpy as np
from thermodynamic_constants import R, debye_huckel
from scipy.misc import logsumexp
MIN_PH = 0.0
MAX_PH = 14.0
class Compound(object):
def __init__(self, database, compound_id, inchi,
atom_bag, pKas, smiles_pH7, majorMSpH7, nHs, zs, molfile=None):
self.database = database
self.compound_id = compound_id
self.inchi = inchi
self.atom_bag = atom_bag
self.pKas = pKas
self.smiles_pH7 = smiles_pH7
self.majorMSpH7 = majorMSpH7
self.nHs = nHs
self.zs = zs
self.molfile = molfile
@staticmethod
def from_kegg(compound_id):
return Compound.from_inchi_with_keggID('KEGG', compound_id, Compound.get_inchi_from_kegg(compound_id))
@staticmethod
def from_inchi_with_keggID(database, compound_id, inchi):
if compound_id == 'C00080':
# We add an exception for H+ (and put nH = 0) in order to eliminate
# its effect of the Legendre transform
return Compound(database, compound_id, inchi,
{'H' : 1}, [], None, 0, [0], [0])
elif compound_id == 'C00087':
# ChemAxon gets confused with the structure of sulfur
# (returns a protonated form, [SH-], at pH 7).
# So we implement it manually here.
return Compound(database, compound_id, inchi,
{'S' : 1, 'e-': 16}, [], 'S', 0, [0], [0])
elif compound_id == 'C00237':
# ChemAxon gets confused with the structure of carbon monoxide
# (returns a protonated form, [CH]#[O+], at pH 7).
# So we implement it manually here.
return Compound(database, compound_id, inchi,
{'C' : 1, 'O': 1, 'e-': 14}, [], '[C-]#[O+]', 0, [0], [0])
elif compound_id == 'C00282':
# ChemAxon gets confused with the structure of hydrogen
# So we implement it manually here.
return Compound(database, compound_id, inchi,
{'H' : 2, 'e-': 2}, [], None, 0, [2], [0])
elif compound_id == 'C01353':
# When given the structure of carbonic acid, ChemAxon returns the
# pKas for CO2(tot), i.e. it assumes the non-hydrated CO2 species is
# one of the pseudoisomers, and the lower pKa value is 6.05 instead of
# 3.78. Here, we introduce a new "KEGG" compound that will represent
# pure bicarbonate (without CO2(sp)) and therefore plug in the pKa
# values from Alberty's book.
return Compound(database, compound_id, inchi,
{'C': 1, 'H': 1, 'O': 3, 'e-': 32}, [10.33, 3.43],
'OC(=O)[O-]', 1, [0, 1, 2], [-2, -1, 0])
# Metal Cations get multiple pKa values from ChemAxon, which is
# obviously a bug. We override the important ones here:
elif compound_id == 'C00076': # Ca2+
return Compound(database, compound_id, inchi,
{'Ca' : 1, 'e-': 18}, [], '[Ca++]', 0, [0], [2])
elif compound_id == 'C00238': # K+
return Compound(database, compound_id, inchi,
{'K' : 1, 'e-': 18}, [], '[K+]', 0, [0], [1])
elif compound_id == 'C00305': # Mg2+
return Compound(database, compound_id, inchi,
{'Mg' : 1, 'e-': 10}, [], '[Mg++]', 0, [0], [2])
elif compound_id == 'C14818': # Fe2+
return Compound(database, compound_id, inchi,
{'Fe' : 1, 'e-': 24}, [], '[Fe++]', 0, [0], [2])
elif compound_id == 'C14819': # Fe3+
return Compound(database, compound_id, inchi,
{'Fe' : 1, 'e-': 23}, [], '[Fe+++]', 0, [0], [3])
elif compound_id == 'C00138': # ferredoxin(red)
return Compound(database, compound_id, inchi,
{'Fe' : 1, 'e-': 26}, [], None, 0, [0], [0])
elif compound_id == 'C00139': # ferredoxin(ox)
return Compound(database, compound_id, inchi,
{'Fe' : 1, 'e-': 25}, [], None, 0, [0], [1])
elif inchi is None:
# If the compound has no explicit structure, we assume that it has
# no proton dissociations in the relevant pH range
return Compound(database, compound_id, inchi,
{}, [], None, 0, [0], [0])
# Otherwise, we use ChemAxon's software to get the pKas and the
# properties of all microspecies
try:
pKas, major_ms_smiles = chemaxon.GetDissociationConstants(inchi)
major_ms_smiles = Compound.smiles2smiles(major_ms_smiles)
pKas = sorted([pka for pka in pKas if pka > MIN_PH and pka < MAX_PH], reverse=True)
except chemaxon.ChemAxonError:
logging.warning('chemaxon failed to find pKas for this molecule: ' + inchi)
# use the original InChI to get the parameters (i.e. assume it
# represents the major microspecies at pH 7)
major_ms_smiles = Compound.inchi2smiles(inchi)
pKas = []
if major_ms_smiles:
atom_bag, major_ms_charge = chemaxon.GetAtomBagAndCharge(major_ms_smiles)
major_ms_nH = atom_bag.get('H', 0)
else:
atom_bag = {}
major_ms_charge = 0
major_ms_nH = 0
n_species = len(pKas) + 1
if pKas == []:
majorMSpH7 = 0
else:
majorMSpH7 = len([1 for pka in pKas if pka > 7])
nHs = []
zs = []
for i in xrange(n_species):
zs.append((i - majorMSpH7) + major_ms_charge)
nHs.append((i - majorMSpH7) + major_ms_nH)
return Compound(database, compound_id, inchi,
atom_bag, pKas, major_ms_smiles, majorMSpH7, nHs, zs)
def to_json_dict(self):
return {'database' : self.database,
'compound_id' : self.compound_id,
'inchi' : self.inchi,
'atom_bag' : self.atom_bag,
'pKas' : self.pKas,
'smiles_pH7' : self.smiles_pH7,
'majorMSpH7' : self.majorMSpH7,
'nHs' : self.nHs,
'zs' : self.zs}
@staticmethod
def from_json_dict(d):
return Compound(d['database'], d['compound_id'], d['inchi'], d['atom_bag'],
d['pKas'], d['smiles_pH7'], d['majorMSpH7'],
d['nHs'], d['zs'])
@staticmethod
def get_inchi_from_kegg(compound_id):
s_mol = urllib.urlopen('http://rest.kegg.jp/get/cpd:%s/mol' % compound_id).read()
return Compound.mol2inchi(s_mol)
@staticmethod
def mol2inchi(s):
openbabel.obErrorLog.SetOutputLevel(-1)
conv = openbabel.OBConversion()
conv.SetInAndOutFormats('mol', 'inchi')
conv.AddOption("F", conv.OUTOPTIONS)
conv.AddOption("T", conv.OUTOPTIONS)
conv.AddOption("x", conv.OUTOPTIONS, "noiso")
conv.AddOption("w", conv.OUTOPTIONS)
obmol = openbabel.OBMol()
if not conv.ReadString(obmol, str(s)):
return None
inchi = conv.WriteString(obmol, True) # second argument is trimWhitespace
if inchi == '':
return None
else:
return inchi
@staticmethod
def inchi2smiles(inchi):
openbabel.obErrorLog.SetOutputLevel(-1)
conv = openbabel.OBConversion()
conv.SetInAndOutFormats('inchi', 'smiles')
#conv.AddOption("F", conv.OUTOPTIONS)
#conv.AddOption("T", conv.OUTOPTIONS)
#conv.AddOption("x", conv.OUTOPTIONS, "noiso")
#conv.AddOption("w", conv.OUTOPTIONS)
obmol = openbabel.OBMol()
conv.ReadString(obmol, str(inchi))
smiles = conv.WriteString(obmol, True) # second argument is trimWhitespace
if smiles == '':
return None
else:
return smiles
@staticmethod
def smiles2smiles(smiles_in):
openbabel.obErrorLog.SetOutputLevel(-1)
conv = openbabel.OBConversion()
conv.SetInAndOutFormats('smiles', 'smiles')
#conv.AddOption("F", conv.OUTOPTIONS)
#conv.AddOption("T", conv.OUTOPTIONS)
#conv.AddOption("x", conv.OUTOPTIONS, "noiso")
#conv.AddOption("w", conv.OUTOPTIONS)
obmol = openbabel.OBMol()
conv.ReadString(obmol, str(smiles_in))
smiles_out = conv.WriteString(obmol, True) # second argument is trimWhitespace
if smiles_out == '':
return None
else:
return smiles_out
@staticmethod
def smiles2inchi(smiles):
openbabel.obErrorLog.SetOutputLevel(-1)
conv = openbabel.OBConversion()
conv.SetInAndOutFormats('smiles', 'inchi')
conv.AddOption("F", conv.OUTOPTIONS)
conv.AddOption("T", conv.OUTOPTIONS)
conv.AddOption("x", conv.OUTOPTIONS, "noiso")
conv.AddOption("w", conv.OUTOPTIONS)
obmol = openbabel.OBMol()
conv.ReadString(obmol, str(smiles))
inchi = conv.WriteString(obmol, True) # second argument is trimWhitespace
if inchi == '':
return None
else:
return inchi
def __str__(self):
return "%s\nInChI: %s\npKas: %s\nmajor MS: nH = %d, charge = %d" % \
(self.compound_id, self.inchi, ', '.join(['%.2f' % p for p in self.pKas]),
self.nHs[self.majorMSpH7], self.zs[self.majorMSpH7])
def _dG0_prime_vector(self, pH, I, T):
"""
Calculates the difference in kJ/mol between dG'0 and
the dG0 of the MS with the least hydrogens (dG0[0])
Returns:
dG'0 - dG0[0]
"""
if self.inchi is None:
return 0
elif self.pKas == []:
dG0s = np.zeros((1, 1))
else:
dG0s = -np.cumsum([0] + self.pKas) * R * T * np.log(10)
dG0s = dG0s
DH = debye_huckel((I, T))
# dG0' = dG0 + nH * (R T ln(10) pH + DH) - charge^2 * DH
pseudoisomers = np.vstack([dG0s, np.array(self.nHs), np.array(self.zs)]).T
dG0_prime_vector = pseudoisomers[:, 0] + \
pseudoisomers[:, 1] * (R * T * np.log(10) * pH + DH) - \
pseudoisomers[:, 2]**2 * DH
return dG0_prime_vector
def _transform(self, pH, I, T):
return -R * T * logsumexp(self._dG0_prime_vector(pH, I, T) / (-R * T))
def _ddG(self, i_from, i_to, T):
"""
Calculates the difference in kJ/mol between two MSs.
Returns:
dG0[i_to] - dG0[i_from]
"""
if not (0 <= i_from <= len(self.pKas)):
raise ValueError('MS index is out of bounds: 0 <= %d <= %d' % (i_from, len(self.pKas)))
if not (0 <= i_to <= len(self.pKas)):
raise ValueError('MS index is out of bounds: 0 <= %d <= %d' % (i_to, len(self.pKas)))
if i_from == i_to:
return 0
elif i_from < i_to:
return sum(self.pKas[i_from:i_to]) * R * T * np.log(10)
else:
return -sum(self.pKas[i_to:i_from]) * R * T * np.log(10)
def get_transform(self, pH, I, T):
"""
Returns the difference in kJ/mol between dG'0 and the dG0 of the
MS with index 'i'.
Returns:
(molelcule dG'0 - sp. dG0[0])
"""
return self._transform(pH, I, T)
def transform(self, i, pH, I, T):
"""
Returns the difference in kJ/mol between dG'0 and the dG0 of the
MS with index 'i'.
Returns:
(molelcule dG'0 - sp. dG0[0]) + (sp. dG0[0] - sp. dG0[i]) = molecule dG'0 - sp. dG0[i]
"""
return self._transform(pH, I, T) + self._ddG(0, i, T)
def transform_pH7(self, pH, I, T):
"""
Returns the transform for the major MS in pH 7
"""
return self.transform(self.majorMSpH7, pH, I, T)
def transform_neutral(self, pH, I, T):
"""
Returns the transform for the MS with no charge
"""
try:
return self.transform(self.zs.index(0), pH, I, T)
except ValueError:
raise ValueError("The compound (%s) does not have a microspecies with 0 charge"
% self.compound_id)
def get_species(self, major_ms_dG0_f, T):
"""
Given the chemical formation energy of the major microspecies,
uses the pKa values to calculate the chemical formation energies
of all other species, and returns a list of dictionaries with
all the relevant data: dG0_f, nH, nMg, z (charge)
"""
for i, (nH, z) in enumerate(zip(self.nHs, self.zs)):
dG0_f = major_ms_dG0_f + self._ddG(i, self.majorMSpH7, T)
d = {'phase': 'aqueous', 'dG0_f': np.round(dG0_f, 2),
'nH': nH, 'z': z, 'nMg': 0}
yield d
if __name__ == '__main__':
import sys, json
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
from python.compound_cacher import CompoundCacher, CompoundEncoder
from python.molecule import Molecule, OpenBabelError
ccache = CompoundCacher(cache_fname=None)
for compound_id in ['C00087', 'C00282', 'C00237']:
comp = Compound.from_kegg(compound_id)
try:
mol = Molecule.FromInChI(str(comp.inchi))
sys.stderr.write('%s : formula = %s, nE = %s' %
(str(comp.inchi), mol.GetFormula(), mol.GetNumElerctons()))
except OpenBabelError:
pass
ccache.add(comp)
sys.stderr.write('\ncompound id = %s, nH = %s, z = %s, pKa = %s, bag = %s\n\n\n' %
(compound_id, str(comp.nHs), str(comp.zs), str(comp.pKas), str(comp.atom_bag)))
ccache.dump()
| 40.824713 | 110 | 0.538819 |
925d34793248f7202d9e6391b57dfd8f61661969 | 1,704 | py | Python | tests/test_poisson_1d.py | NegriLuca/pigasus | d5057b771f81cfa05bb08ea4b0fd99088150cd7a | [
"MIT"
] | 1 | 2021-10-21T17:15:26.000Z | 2021-10-21T17:15:26.000Z | tests/test_poisson_1d.py | NegriLuca/pigasus | d5057b771f81cfa05bb08ea4b0fd99088150cd7a | [
"MIT"
] | null | null | null | tests/test_poisson_1d.py | NegriLuca/pigasus | d5057b771f81cfa05bb08ea4b0fd99088150cd7a | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
#! /usr/bin/python
from pigasus.utils.manager import context
# ...
try:
from matplotlib import pyplot as plt
PLOT=True
except ImportError:
PLOT=False
# ...
import numpy as np
from caid.cad_geometry import line as domain
from pigasus.gallery.poisson import *
import sys
import inspect
filename = inspect.getfile(inspect.currentframe()) # script filename (usually with path)
sys.stdout = open(filename.split('.py')[0]+'.txt', 'w')
# ...
sin = np.sin ; pi = np.pi
# ...
# ...
kx = 2. * pi
# ... exact solution
u = lambda x : [sin ( kx * x )]
# ... rhs
f = lambda x : [( kx**2) * sin ( kx * x )]
#-----------------------------------
nx = 63
px = 2
AllDirichlet = True
geo = domain(n=[nx],p=[px])
#-----------------------------------
# ...
try:
bc_dirichlet
except NameError:
bc_dirichlet = None
else:
pass
try:
bc_neumann
except NameError:
bc_neumann = None
else:
pass
try:
AllDirichlet
except NameError:
AllDirichlet = None
else:
pass
try:
Dirichlet
except NameError:
Dirichlet = None
else:
pass
try:
Metric
except NameError:
Metric = None
else:
pass
# ...
with context():
# ...
PDE = poisson(geometry=geo, bc_dirichlet=bc_dirichlet, bc_neumann=bc_neumann,
AllDirichlet=AllDirichlet, Dirichlet=Dirichlet,metric=Metric)
# ...
# ...
PDE.assembly(f=f)
PDE.solve()
# ...
# ...
normU = PDE.norm(exact=u)
print("norm U = ", normU)
# ...
# ...
if PLOT:
PDE.plot() ; plt.title('$u_h$')
plt.savefig(filename.split('.py')[0]+'.png', format='png')
plt.clf()
# ...
PDE.free()
| 16.705882 | 88 | 0.555164 |
1ba6f5396f0c49a9d90dd2e199fa0c71987e6543 | 7,310 | py | Python | stylegan2/dnnlib/tflib/custom_ops.py | chenqiguo/GAN_replication | 18e71914164f0d735354afb0134ce00570080ecd | [
"OLDAP-2.3"
] | 2 | 2021-11-11T00:18:28.000Z | 2021-12-28T01:10:25.000Z | stylegan2/dnnlib/tflib/custom_ops.py | chenqiguo/GAN_replication | 18e71914164f0d735354afb0134ce00570080ecd | [
"OLDAP-2.3"
] | null | null | null | stylegan2/dnnlib/tflib/custom_ops.py | chenqiguo/GAN_replication | 18e71914164f0d735354afb0134ce00570080ecd | [
"OLDAP-2.3"
] | null | null | null | # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
"""TensorFlow custom ops builder.
"""
import os
import re
import uuid
import hashlib
import tempfile
import shutil
import tensorflow as tf
from tensorflow.python.client import device_lib # pylint: disable=no-name-in-module
#----------------------------------------------------------------------------
# Global options.
cuda_cache_path = os.path.join(os.path.dirname(__file__), '_cudacache')
cuda_cache_version_tag = 'v1'
do_not_hash_included_headers = False # Speed up compilation by assuming that headers included by the CUDA code never change. Unsafe!
verbose = True # Print status messages to stdout.
compiler_bindir_search_path = [
'C:/Program Files (x86)/Microsoft Visual Studio/2017/Community/VC/Tools/MSVC/14.14.26428/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio/2019/Community/VC/Tools/MSVC/14.23.28105/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio 14.0/vc/bin',
]
#----------------------------------------------------------------------------
# Internal helper funcs.
def _find_compiler_bindir():
for compiler_path in compiler_bindir_search_path:
if os.path.isdir(compiler_path):
return compiler_path
return None
def _get_compute_cap(device):
caps_str = device.physical_device_desc
m = re.search('compute capability: (\\d+).(\\d+)', caps_str)
major = m.group(1)
minor = m.group(2)
return (major, minor)
def _get_cuda_gpu_arch_string():
gpus = [x for x in device_lib.list_local_devices() if x.device_type == 'GPU']
if len(gpus) == 0:
raise RuntimeError('No GPU devices found')
(major, minor) = _get_compute_cap(gpus[0])
return 'sm_%s%s' % (major, minor)
def _run_cmd(cmd):
with os.popen(cmd) as pipe:
output = pipe.read()
status = pipe.close()
if status is not None:
raise RuntimeError('NVCC returned an error. See below for full command line and output log:\n\n%s\n\n%s' % (cmd, output))
def _prepare_nvcc_cli(opts):
# modified by Chenqi:
#cmd = 'nvcc ' + opts.strip()
cmd = 'nvcc --std=c++11 -DNDEBUG ' + opts.strip()
cmd += ' --disable-warnings'
cmd += ' --include-path "%s"' % tf.sysconfig.get_include()
cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'protobuf_archive', 'src')
cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'com_google_absl')
cmd += ' --include-path "%s"' % os.path.join(tf.sysconfig.get_include(), 'external', 'eigen_archive')
compiler_bindir = _find_compiler_bindir()
if compiler_bindir is None:
# Require that _find_compiler_bindir succeeds on Windows. Allow
# nvcc to use whatever is the default on Linux.
if os.name == 'nt':
raise RuntimeError('Could not find MSVC/GCC/CLANG installation on this computer. Check compiler_bindir_search_path list in "%s".' % __file__)
else:
cmd += ' --compiler-bindir "%s"' % compiler_bindir
cmd += ' 2>&1'
return cmd
#----------------------------------------------------------------------------
# Main entry point.
_plugin_cache = dict()
def get_plugin(cuda_file):
cuda_file_base = os.path.basename(cuda_file)
cuda_file_name, cuda_file_ext = os.path.splitext(cuda_file_base)
# Already in cache?
if cuda_file in _plugin_cache:
return _plugin_cache[cuda_file]
# Setup plugin.
if verbose:
print('Setting up TensorFlow plugin "%s": ' % cuda_file_base, end='', flush=True)
try:
# Hash CUDA source.
md5 = hashlib.md5()
with open(cuda_file, 'rb') as f:
md5.update(f.read())
md5.update(b'\n')
# Hash headers included by the CUDA code by running it through the preprocessor.
if not do_not_hash_included_headers:
if verbose:
print('Preprocessing... ', end='', flush=True)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, cuda_file_name + '_tmp' + cuda_file_ext)
_run_cmd(_prepare_nvcc_cli('"%s" --preprocess -o "%s" --keep --keep-dir "%s"' % (cuda_file, tmp_file, tmp_dir)))
with open(tmp_file, 'rb') as f:
bad_file_str = ('"' + cuda_file.replace('\\', '/') + '"').encode('utf-8') # __FILE__ in error check macros
good_file_str = ('"' + cuda_file_base + '"').encode('utf-8')
for ln in f:
if not ln.startswith(b'# ') and not ln.startswith(b'#line '): # ignore line number pragmas
ln = ln.replace(bad_file_str, good_file_str)
md5.update(ln)
md5.update(b'\n')
# Select compiler options.
compile_opts = ''
if os.name == 'nt':
compile_opts += '"%s"' % os.path.join(tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.lib')
elif os.name == 'posix':
compile_opts += '"%s"' % os.path.join(tf.sysconfig.get_lib(), 'python', '_pywrap_tensorflow_internal.so')
compile_opts += ' --compiler-options \'-fPIC -D_GLIBCXX_USE_CXX11_ABI=0\''
else:
assert False # not Windows or Linux, w00t?
compile_opts += ' --gpu-architecture=%s' % _get_cuda_gpu_arch_string()
compile_opts += ' --use_fast_math'
nvcc_cmd = _prepare_nvcc_cli(compile_opts)
# Hash build configuration.
md5.update(('nvcc_cmd: ' + nvcc_cmd).encode('utf-8') + b'\n')
md5.update(('tf.VERSION: ' + tf.VERSION).encode('utf-8') + b'\n')
md5.update(('cuda_cache_version_tag: ' + cuda_cache_version_tag).encode('utf-8') + b'\n')
# Compile if not already compiled.
bin_file_ext = '.dll' if os.name == 'nt' else '.so'
bin_file = os.path.join(cuda_cache_path, cuda_file_name + '_' + md5.hexdigest() + bin_file_ext)
if not os.path.isfile(bin_file):
if verbose:
print('Compiling... ', end='', flush=True)
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, cuda_file_name + '_tmp' + bin_file_ext)
_run_cmd(nvcc_cmd + ' "%s" --shared -o "%s" --keep --keep-dir "%s"' % (cuda_file, tmp_file, tmp_dir))
os.makedirs(cuda_cache_path, exist_ok=True)
intermediate_file = os.path.join(cuda_cache_path, cuda_file_name + '_' + uuid.uuid4().hex + '_tmp' + bin_file_ext)
shutil.copyfile(tmp_file, intermediate_file)
os.rename(intermediate_file, bin_file) # atomic
# Load.
if verbose:
print('Loading... ', end='', flush=True)
plugin = tf.load_op_library(bin_file)
# Add to cache.
_plugin_cache[cuda_file] = plugin
if verbose:
print('Done.', flush=True)
return plugin
except:
if verbose:
print('Failed!', flush=True)
raise
#----------------------------------------------------------------------------
| 42.254335 | 153 | 0.597674 |
af52d680b9bf311f1e0f6ca28ae89f7653331f49 | 831 | py | Python | samples/py3_func/call_f.py | shresnis000/abaco | 547a30f3690b891d439dc923d3e88986ce742693 | [
"BSD-3-Clause"
] | 27 | 2015-07-24T16:54:38.000Z | 2022-01-07T04:36:47.000Z | samples/py3_func/call_f.py | shresnis000/abaco | 547a30f3690b891d439dc923d3e88986ce742693 | [
"BSD-3-Clause"
] | 68 | 2015-10-05T16:08:58.000Z | 2022-02-07T15:59:27.000Z | samples/py3_func/call_f.py | shresnis000/abaco | 547a30f3690b891d439dc923d3e88986ce742693 | [
"BSD-3-Clause"
] | 13 | 2015-10-01T21:38:34.000Z | 2021-05-26T00:19:07.000Z | import os
import cloudpickle
from agavepy.actors import get_binary_message, send_python_result
def main():
raw_message = get_binary_message()
try:
m = cloudpickle.loads(raw_message)
except Exception as e:
print(f"Got exception: {e} trying to loads raw_message: {raw_message}")
raise e
print("Was able to execute cloudpickle.loads: {m}")
f = m.get('func')
if not f:
print("Error - function attribute required. Got: {}".format(m))
raise Exception
args = m.get('args')
kwargs = m.get('kwargs')
try:
result = f(*args, **kwargs)
except Exception as e:
print(f"Got exception trying to call f: {f}. Exception: {e}")
raise e
send_python_result(result)
print("result: {}".format(result))
if __name__ == '__main__':
main()
| 28.655172 | 79 | 0.630566 |
321d3b01c724a4c75958111f6565104ac12adee1 | 11,606 | py | Python | docs/conf.py | groupserver/-gs.group.groups.list | b92141467106a10478caa350de3d21450373a02b | [
"ZPL-2.1"
] | null | null | null | docs/conf.py | groupserver/-gs.group.groups.list | b92141467106a10478caa350de3d21450373a02b | [
"ZPL-2.1"
] | null | null | null | docs/conf.py | groupserver/-gs.group.groups.list | b92141467106a10478caa350de3d21450373a02b | [
"ZPL-2.1"
] | null | null | null | # -*- coding: utf-8 -*-
#
# gs.group.groups.list documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 30 16:46:40 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'gs.group.groups.list'
copyright = u'2015, GroupServer.org'
author = u'GroupServer.org'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'gsgroupgroupslistdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'gsgroupgroupslist.tex', u'gs.group.groups.list Documentation',
u'GroupServer.org', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'gsgroupgroupslist', u'gs.group.groups.list Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'gsgroupgroupslist', u'gs.group.groups.list Documentation',
author, 'gsgroupgroupslist', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| 31.710383 | 80 | 0.720231 |
2442462a423f1a3c4898d9c18a455d07795f3fca | 27,801 | py | Python | names.py | DaveMitHut/npc-creator-python-headless | 35a3e02c8c27cef8f78045bde0dec3d2834ed550 | [
"MIT"
] | null | null | null | names.py | DaveMitHut/npc-creator-python-headless | 35a3e02c8c27cef8f78045bde0dec3d2834ed550 | [
"MIT"
] | null | null | null | names.py | DaveMitHut/npc-creator-python-headless | 35a3e02c8c27cef8f78045bde0dec3d2834ed550 | [
"MIT"
] | null | null | null | # Submodule for npc_creator.py-script to randomly generate
# names for NPC's for the 'Rise of the Darkness' setting.
# Written by David Huseyin on June 4th, 2019
import random
# Uses the D&D Players Handbook's name tables as reference for all names except the standard human ones
# standard human names are taken from scottish gaelic and are give with a translation
def generateName(profession, sex, race): # calls all other functions based on the input it received concerning race, sex and profession
if race == "Human":
if profession == "Herbalist":
return herbalistNamesHuman(sex)
elif profession == "Smith":
return smithNamesHuman(sex)
elif profession == "Shopkeep":
return shopkeepNamesHuman(sex)
elif profession == "Innkeep":
return shopkeepNamesHuman(sex)
elif profession == "Officer":
return officerNamesHuman(sex)
elif profession == "Guard":
return officerNamesHuman(sex)
elif profession == "Councilmember":
return councilmemberNamesHuman(sex)
elif profession == "Foreigner":
x = random.randint(1,3)
if x == 1:
return foreignNamesShou(sex)
if x == 2:
return foreignNamesMulan(sex)
else:
return foreignNamesTurami(sex)
else:
return shopkeepNamesHuman(sex)
elif race == "Elf":
return elfNames(sex)
elif race == "Dwarf":
return dwarfNames(sex)
elif race == "Halfelf":
x = random.randint(1,2)
if x == 1:
return officerNamesHuman(sex)
else:
return elfNames(sex)
elif race == "Halforc":
x = random.randint(1,2)
if x == 1:
return officerNamesHuman(sex)
else:
return orcNames(sex)
elif race == "Halfling":
return halflingNames(sex)
elif race == "Gnome":
return gnomeNames(sex)
elif race == "Tiefling":
x = random.randint(1,4)
if x == 1:
return officerNamesHuman(sex)
elif x == 2:
return elfNames(sex)
elif x == 3:
return tieflingNames(sex)
else:
return tieflingVirtueNames()
elif race == "Dragonborn":
return dragonbornNames(sex)
elif race == "Aasimar":
return officerNamesHuman(sex)
def herbalistNamesHuman(sex):
if sex == "Male":
human_male = {
"Crannog": "lake dweller",
"Daileass": "from the waterfall",
"Dhoire": "from the grove",
"Erskine": "from the top of the cliff",
"Faing": "from the sheep pen",
"Frasier": "of the forest men",
"Gair": "Short",
"Goraidh": "Peaceful",
"Iain": "gift from the gods",
"Kenneth": "born of fire",
"Kenzie": "fair",
"Leathan": "river",
"Machair": "plain",
"Matheson": "bear's son",
"Ogilvie": "from the high peak",
"Quarrie": "proud",
"Reade": "red haired",
"Scot": "wanderer",
"Sim": "listener",
"Todd": "fox"
}
randname = random.choice(list(human_male.items()))
return randname[0] + " (" + randname[1] + ") " + humanLastNames()
else:
human_female = {
"Aileene": "giver of life",
"Beathas": "wise",
"Cadha": "from the steep place",
"Cora": "seething pool",
"Daracha": "from the oak",
"Elspeth": "god's oath",
"Forbia": "headstrong",
"Gilbarta": "pledge, vow",
"Kenzie": "the fair one",
"Lorna": "from the place of laurel trees, honor",
"Maisie": "child of light",
"Marcail": "pearl",
"Nessia": "from the headland",
"Rose": "rose",
"Sileas": "youthful",
"Siusan": "lily",
"Tira": "Land",
"Vanora": "white wave",
"Vika": "from the creek",
"Wynda": "from the narrow passage"
}
randname = random.choice(list(human_female.items()))
return randname[0] + " (" + randname[1] + ") " + humanLastNames()
def smithNamesHuman(sex):
if sex == "Male":
human_male = {
"Alistair": "protector of mankind, helper",
"Bearnard": "strong as a bear",
"Beiste": "beast",
"Bhaltair": "strong warrior",
"Calder": "rough waters",
"Clach": "stone",
"Damh": "ox",
"Fergus": "man of strength",
"Frasier": "of the forest men",
"Gobha": "smith",
"Gowan": "smith",
"Kendrew": "manly, courageous",
"Kenneth": "born of fire",
"Matheson": "bear's son",
"Montgomery": "mountain",
"Ogilvie": "from the high peak",
"Oliphant": "great strength",
"Quarrie": "proud",
"Reade": "red haired",
"Struan": "stream"
}
randname = random.choice(list(human_male.items()))
return randname[0] + " (" + randname[1] + ") " + humanLastNames()
else:
human_female = {
"Adairia": "from the oak tree ford",
"Anice": "grace",
"Blaire": "from the field of battle",
"Caroline": "strong",
"Daracha": "from the oak",
"Evanna": "right handed",
"Forbia": "headstrong",
"Gilbarta": "pledge, vow",
"Kenzie": "the fair one",
"Lorna": "from the place of laurel trees, honor",
"Maisie": "child of light",
"Marcail": "pearl",
"Muira": "from the moors",
"Robena": "robin",
"Siubhan": "praised",
"Struana": "from the stream",
"Tira": "land",
"Torey": "triumphant",
"Vika": "from the creek",
"Wynda": "from the narrow passage"
}
randname = random.choice(list(human_female.items()))
return randname[0] + " (" + randname[1] + ") " + humanLastNames()
def shopkeepNamesHuman(sex): # also used for generating the innkeep names
if sex == "Male":
human_male = {
"Alistair": "protector of mankind, helper",
"Baen": "fair skinned",
"Balmoral": "from the majestic city/capital",
"Bearnard": "strong as a bear",
"Calder": "rough waters",
"Cambeul": "crooked mouth",
"Cameron": "bent nose",
"Dalyell": "from the little field",
"Fletcher": "maker of arrows",
"Gair": "short",
"Irving": "from the city",
"Kinnon": "fair born",
"Laurence": "from the place of laurel trees, honor, victory",
"Maolmuire": "dark skinned",
"Monro": "wheelwright",
"Mufidy": "man of the sea",
"Norval": "from the north valley",
"Ogilvie": "from the high peak",
"Parlan": "farmer",
"Quarrie": "proud"
}
randname = random.choice(list(human_male.items()))
return randname[0] + " (" + randname[1] + ") " + humanLastNames()
else:
human_female = {
"Adairia": "from the oak tree ford",
"Aila": "from the strong fortress",
"Bonnie": "sweet, good",
"Bradana": "salmon",
"Caitrin": "pure",
"Coira": "seething pool",
"Evina": "right handed",
"Fenella": "fair shoulders",
"Forbia": "headstrong",
"Grear": "watchful",
"Iona": "violet",
"Kenzie": "the fair one",
"Linsey": "from the place of linden trees",
"Maggie": "pearl",
"Mairi": "bitter",
"Manda": "whiskey",
"Sileas": "youthful",
"Siubhan": "praised",
"Tavia & Teva": "twins",
"Tira": "land"
}
randname = random.choice(list(human_female.items()))
return randname[0] + " (" + randname[1] + ") " + humanLastNames()
def officerNamesHuman(sex): # also used for generating the guard, halfelf, halforc & aasimar names
if sex == "Male":
human_male = {
"Alistair": "protector of mankind, defender, helper",
"Banner": "flag bearer",
"Balmoral": "from the majestic city/capital",
"Bearnard": "strong as a bear",
"Bhaltair": "strong warrior",
"Callum": "bald dove",
"Clach": "stone",
"Colin": "people's victory",
"Daimh": "ox",
"Duncan": "brown warrior",
"Fynn": "fair hero",
"Gawyn": "white hawk",
"Gilleasbuig": "brave",
"Gilmore": "sword bearer",
"Harailt": "leader",
"Iver": "archer",
"Keith": "from the battlefield",
"Kerr": "man of strength",
"Lamont": "man of law",
"Lyel": "loyal",
"Mathe": "bear",
"Montgomery": "mountain",
"Neilan": "champion",
"Oidhche": "night",
"Sandy": "protector of mankind",
"Sclymgeour": "fighter",
"Sloan": "warrior",
"Tearlach": "strong"
}
randname = random.choice(list(human_male.items()))
return randname[0] + " (" + randname[1] + ") " + humanLastNames()
else:
human_female = {
"Adairia": "from the oak tree ford",
"Aila": "from the strong fortress",
"Alison": "renowned warrior",
"Blaire": "field of battle",
"Cadha": "from the steep place",
"Caroline": "strong",
"Colina": "people's victory",
"Elspeth": "god's oath",
"Fiona": "fair",
"Gilbarta": "pledge, vow",
"Gordana": "heroic",
"Grear": "watchful",
"Lorna": "from the place of laurel trees, honor, victory",
"Maisie": "child of light",
"Muira": "from the moors",
"Nathara": "snake",
"Siubhan": "praised",
"Struana": "stream",
"Torey": "triumphant",
"Tyra": "land",
"Vanora": "white wave"
}
randname = random.choice(list(human_female.items()))
return randname[0] + " (" + randname[1] + ") " + humanLastNames()
def councilmemberNamesHuman(sex):
if sex == "Male":
human_male = {
"Alistair": "protector of mankind, helper",
"Baen": "fair skinned",
"Bram": "father of many",
"Camhlaidh": "relic",
"Cawley": "relic",
"Chalmer": "rules the home",
"Cleit": "rocky eminence",
"Domhnull": "all ruler",
"Donnel": "ruler",
"Eanruig": "rules the home",
"Erroll": "nobleman",
"Fergus": "first choice, man of strength",
"Gillivray": "servant of judgement",
"Gordain": "hero",
"Harailt": "leader",
"Kendric": "royal chieftain",
"Laird": "lord",
"Lamont": "man of law",
"Lyall": "loyal",
"Mitchell": "like god",
"Murdoch": "protector of the sea",
"Neakail": "people's victory",
"Payton": "royal",
"Robert": "bright, famous"
}
randname = random.choice(list(human_male.items()))
return randname[0] + " (" + randname[1] + ") " + humanLastNames()
else:
human_female = {
"Aileene": "giver of life",
"Alison": "renowned warrior",
"Beathas": "wise",
"Colina": "people's victory",
"Drew": "valian, courageous",
"Eirica": "ruler",
"Elspeth": "god's oath",
"Forbia": "headstrong",
"Gordania": "heroic",
"Grizela": "gray haired",
"Lorna": "from the place of laurel trees, honor, victory",
"May": "pearl",
"Minette": "mother",
"Moireach": "a lady",
"Mysie": "child of light",
"Sima": "listener",
"Siubhan": "praised",
"Tara": "a hill where kings met",
"Torey": "from the castle",
"Tyra": "land",
"Vanora": "white wave"
}
randname = random.choice(list(human_female.items()))
return randname[0] + " (" + randname[1] + ") " + humanLastNames()
def humanLastNames():
human_last = [
"Aileanach",
"Ambarsan",
"Ariss",
"Blàrach",
"Bochanan",
"Buideach",
"Camran",
"Ceannaideach",
"Càidh",
"Deòireach",
"Druiminn",
"Dùghlas",
"Eabarcrombaigh",
"Flimean",
"Flachnàn",
"Fòlais",
"Gilios",
"Gill'losa",
"Greum",
"Ìomharach",
"Lathurna",
"Lobhdain",
"Lìos",
"Mac a' Bhàird",
"Mac a' Charraige",
"Mac a' Ghoill",
"Mac a' Leòra",
"Mac a' Phersain",
"Mac an Deòir",
"Mac an Fhilidh",
"Mac an Lèigh",
"Mac an Ruaidh",
"Mac an Tuairneir",
"Mac na Ceàrdaich",
"MacÀidh",
"MacAilein",
"MacCaluim",
"MacCullach",
"MacEachainn",
"MacGhille",
"MacLùcais",
"MacNèill",
"MacPhòil",
"MacRath",
"MacRoibeirt",
"Morgan",
"O' Cain",
"Robasan",
"Rothach",
"Smios",
"Tolmach",
"Tàileach",
"Ualas"
]
return random.choice(human_last)
def foreignNamesShou(sex):
if sex == "Male":
shou_male = [
"An",
"Chen",
"Chi",
"Fai",
"Jiang",
"Jun",
"Lian",
"Long",
"Meng",
"On",
"Shan",
"Shui",
"Wen"
]
return random.choice(shou_male) + " " + foreignLastNamesShou()
else:
shou_female = [
"Bai",
"Chao",
"Jia",
"Lei",
"Mei",
"Qiao",
"Shui",
"Tai"
]
return random.choice(shou_female) + " " + foreignLastNamesShou()
def foreignLastNamesShou():
shou_last = [
"Chien",
"Huang",
"Kao",
"Kung",
"Lao",
"Ling",
"Mei",
"Pin",
"Shin",
"Sum",
"Tan",
"Wan"
]
return random.choice(shou_last)
def foreignNamesMulan(sex):
if sex == "Male":
mulan_male = [
"Aoth",
"Bareris",
"Ehput-Ki",
"Kethoth",
"Mumed",
"Ramas",
"So-Kehur",
"Thazar-De",
"Urhur"
]
return random.choice(mulan_male) + " " + foreignLastNamesMulan()
else:
mulan_female = [
"Arizima",
"Chathi",
"Nephis",
"Nulara",
"Murithi",
"Sefris",
"Thola",
"Umara",
"Zolis"
]
return random.choice(mulan_female) + " " + foreignLastNamesMulan()
def foreignLastNamesMulan():
mulan_last = [
"Ankhalab",
"Anskuld",
"Fezim",
"Hahpet",
"Nathandem",
"Sepret",
"Uuthrakt"
]
return random.choice(mulan_last)
def foreignNamesTurami(sex):
if sex == "Male":
turami_male = [
"Anton",
"Diero",
"Marcon",
"Pieron",
"Rimardo",
"Romero",
"Salazar",
"Umbero"
]
return random.choice(turami_male) + " " + foreignLastNamesTurami()
else:
turami_female = [
"Balama",
"Dona",
"Faila",
"Jalana",
"Luisa",
"Marta",
"Quara",
"Selise",
"Vonda"
]
return random.choice(turami_female) + " " + foreignLastNamesTurami()
def foreignLastNamesTurami():
turami_last = [
"Agosto",
"Astorio",
"Calabra",
"Domine",
"Falone",
"Marivaldi",
"Pisacar",
"Ramondo"
]
return random.choice(turami_last)
def elfNames(sex):
if sex == "Male":
elf_male = [
"Adran",
"Aelar",
"Aramil",
"Arannis",
"Aust",
"Beiro",
"Berrian",
"Carric",
"Enialis",
"Erdan",
"Erevan",
"Galinndan",
"Hadarai",
"Heian",
"Himo",
"Immeral",
"Ivellios",
"Laucian",
"Mindartis",
"Paelias",
"Peren",
"Quarion",
"Riardon",
"Rolen",
"Soveliss",
"Thamior",
"Tharivol",
"Theren",
"Varis"
]
lastname = elfLastNames()
return random.choice(elf_male) + " " + lastname[0] + " \"" + lastname[1] + "\""
else:
elf_female = [
"Adrie",
"Althaea",
"Anastrianna",
"Andraste",
"Antinua",
"Bethrynna",
"Birel",
"Caelynn",
"Drusilia",
"Enna",
"Felosial",
"Ielenia",
"Jelenneth",
"Keyleth",
"Leshanna",
"Lia",
"Meriele",
"Mialee",
"Naivara",
"Quelenna",
"Quillathe",
"Sariel",
"Shanairra",
"Shava",
"Silaqui",
"Theirastra",
"Thia",
"Vadania",
"Valanthe",
"Xanaphia"
]
lastname = elfLastNames()
return random.choice(elf_female) + " " + lastname[0] + " \"" + lastname[1] + "\""
def elfLastNames():
elf_last = {
"Amakiir": "Gemflower",
"Amastacia": "Starflower",
"Galanodel": "Moonwhisper",
"Holimion": "Diamonddew",
"Ilphelkiir": "Gemblossom",
"Liadon": "Silverfrond",
"Meliamne": "Oakenheel",
"Nailo": "Nightbreeze",
"Siannodel": "Moonbrook",
"Xiloscient": "Goldpetal"
}
return random.choice(list(elf_last.items()))
def dwarfNames(sex):
if sex == "Male":
dwarf_male = [
"Adrik",
"Alberich",
"Baern",
"Barendd",
"Brottor",
"Bruenor",
"Dain",
"Darrak",
"Delg",
"Eberk",
"Einkil",
"Fargrim" ,
"Flint",
"Gardain",
"Harbek",
"Kildrak",
"Morgran",
"Orsik",
"Oskar",
"Rangrim",
"Rurik",
"Taklinn",
"Thoradin",
"Thorin",
"Tordek",
"Traubon",
"Travok",
"Ulfgar",
"Veit",
"Vondal"
]
return random.choice(dwarf_male) + " " + dwarfLastNames()
else:
dwarf_female = [
"Amber",
"Artin",
"Audhild",
"Bardryn",
"Dagnal",
"Diesa",
"Eldeth",
"Falkrunn",
"Finellen",
"Gunnloda",
"Gurdis",
"Helja",
"Hlin",
"Kathra",
"Kristryd",
"Ilde",
"Liftrasa",
"Mardred",
"Riswynn",
"Sannl",
"Torbera",
"Torgga",
"Vistra"
]
return random.choice(dwarf_female) + " " + dwarfLastNames()
def dwarfLastNames():
dwarf_last = [
"Balderk",
"Battlehammer",
"Brawnanvil",
"Dankil",
"Fireforge",
"Frostbeard",
"Gorunn",
"Holderhek",
"Ironfist",
"Loderr",
"Lutgehr",
"Rumnaheim",
"Strakeln",
"Torunn",
"Ungart"
]
return random.choice(dwarf_last)
def halflingNames(sex):
if sex == "Male":
halfling_male = [
"Alton",
"Ander",
"Cade",
"Corrin",
"Eldon",
"Errich",
"Finnan",
"Garret",
"Lindal",
"Lyle",
"Merric",
"Milo",
"Osborn",
"Perrin",
"Reed",
"Roscoe",
"Wellby"
]
return random.choice(halfling_male) + " " + halflingLastNames()
else:
halfling_female = [
"Andry",
"Bree",
"Callie",
"Cora",
"Euphemia",
"Jillian",
"Kithri",
"Lavinia",
"Lidda",
"Merla",
"Nedda",
"Paela",
"Portia",
"Seraphina",
"Shaena",
"Trym",
"Vani",
"Verna"
]
return random.choice(halfling_female) + " " + halflingLastNames()
def halflingLastNames():
halfling_last = [
"Brushgather",
"Goodbarrel",
"Greenbottle",
"High-hill",
"Hilltopple",
"Leagallow",
"Tealeaf",
"Thorngage",
"Tosscobble",
"Underbough"
]
return random.choice(halfling_last)
def orcNames(sex):
if sex == "Male":
orc_male = [
"Dench",
"Feng",
"Gell",
"Henk",
"Holg",
"Imsh",
"Keth",
"Krusk",
"Mhurren",
"Ront",
"Shump",
"Thokk"
]
return random.choice(orc_male)
else:
orc_female = [
"Baggi",
"Emen",
"Engong",
"Kansif",
"Myev",
"Neega",
"Ovak",
"Ownka",
"Shautha",
"Sutha",
"Vola",
"Volen",
"Yevelda"
]
return random.choice(orc_female)
def gnomeNames(sex):
if sex == "Male":
gnome_male = [
"Alston",
"Alvyn",
"Boddynock",
"Brocc",
"Burgell",
"Dimble",
"Eldon",
"Erky",
"Fonkin",
"Frug",
"Gerbo",
"Gimble",
"Glim",
"Jebeddo",
"Kellen",
"Namfoodle",
"Orryn",
"Roondar",
"Seebo",
"Sindri",
"Warryn",
"Wrenn",
"Zook"
]
return random.choice(gnome_male) + " " + gnomeLastNames()
else:
gnome_female = [
"Bimpnottin",
"Breena",
"Caramip",
"Carlin",
"Donella",
"Duvamil",
"Ella",
"Ellyjobell",
"Ellywick",
"Lilli",
"Loopmottin",
"Lorilla",
"Mardnab",
"Nissa",
"Nyx",
"Oda",
"Orla",
"Roywyn",
"Shamil",
"Tana",
"Waywocket",
"Zanna"
]
return random.choice(gnome_female) + " \"" + gnomeNickName() + "\" " + gnomeLastNames()
def gnomeLastNames():
gnome_last = [
"Beren",
"Daergel",
"Folkor",
"Garrick",
"Nackle",
"Murnig",
"Ningel",
"Raulnor",
"Scheppen",
"Timbers",
"Turen"
]
return random.choice(gnome_last)
def gnomeNickName():
gnome_nick = [
"Aleslosh",
"Ashhearth",
"Badger",
"Cloak",
"Doublelock",
"Filchbatter",
"Fnipper",
"Ku",
"Nim",
"Oneshoe",
"Pock",
"Sparklegem",
"Stumbleduck"
]
return random.choice(gnome_nick)
def tieflingNames(sex):
if sex == "Male":
tiefling_male = [
"Akmenos",
"Amnon",
"Barakas",
"Damakos",
"Ekemon",
"Iados",
"Kairon",
"Leucis",
"Melech",
"Mordai",
"Morthos",
"Pelaios",
"Skamos",
"Therai"
]
return random.choice(tiefling_male)
else:
tiefling_female = [
"Akta",
"Anakis",
"Bryseis",
"Criella",
"Damaia",
"Ea",
"Kallista",
"Lerissa",
"Makaria",
"Nemeia",
"Orianna",
"Phelaia",
"Rieta"
]
return random.choice(tiefling_female)
def tieflingVirtueNames():
tiefling_virtue = [
"Art",
"Carrion",
"Chant",
"Creed",
"Despair",
"Excellence",
"Fear",
"Glory",
"Hope",
"Ideal",
"Music",
"Nowhere",
"Open",
"Poetry",
"Quest",
"Random",
"Reverence",
"Sorrow",
"Temerity",
"Torment",
"Weary"
]
return random.choice(tiefling_virtue)
def dragonbornNames(sex):
if sex == "Male":
dragonborn_male = [
"Arjhan",
"Balasar",
"Bharash",
"Donaar",
"Ghesh",
"Heskan",
"Kriv",
"Medrash",
"Mehen",
"Nadarr",
"Pandjed",
"Patrin",
"Rhogar",
"Shamash",
"Shedinn",
"Tarhun",
"Torinn"
]
return random.choice(dragonborn_male) + " \"" + dragonbornNickNames() + "\" " + dragonbornLastNames()
else:
dragonborn_female = [
"Akra",
"Biri",
"Daar",
"Farideh",
"Harann",
"Flavilar",
"Jheri",
"Kava",
"Korinn",
"Mishann",
"Nala",
"Perra",
"Raiann",
"Sora",
"Surina",
"Thava",
"Uadjit"
]
return random.choice(dragonborn_female) + " " + dragonbornLastNames()
def dragonbornLastNames():
dragonborn_last = [
"Clethtinthiallor",
"Daardendrian",
"Delmirev",
"Drachedandion",
"Fenkenkabradon",
"Kepeshkmolik",
"Kerrhylon",
"Kimbatuul",
"Linxakasendalor",
"Myastan",
"Nemmonis",
"Norixius",
"Ophinshtalajiir",
"Prexijandilin",
"Shestendeliath",
"Turnuroth",
"Verthisathurgiesh",
"Yarjerit"
]
return random.choice(dragonborn_last)
def dragonbornNickNames():
dragonborn_nick = [
"Climber",
"Earbender",
"Leaper",
"Pious",
"Shieldbiter",
"Zealous",
"Fighter",
"Restless",
"Trouble",
"Defender",
"Runner",
"Sleeper",
"Peaceful"
]
return random.choice(dragonborn_nick) | 26.757459 | 135 | 0.431495 |
f95b6c2a455456bc6bbd05c586ea17c21ed3ba0f | 5,019 | py | Python | pyplus/tools/file_cleaner.py | tornadoyi/pyplus | 4b98f0eff9c2f027a601ade220c2aaf1769850fd | [
"Apache-2.0"
] | 2 | 2018-09-26T14:01:12.000Z | 2020-04-02T13:44:53.000Z | pyplus/tools/file_cleaner.py | tornadoyi/pyplus | 4b98f0eff9c2f027a601ade220c2aaf1769850fd | [
"Apache-2.0"
] | null | null | null | pyplus/tools/file_cleaner.py | tornadoyi/pyplus | 4b98f0eff9c2f027a601ade220c2aaf1769850fd | [
"Apache-2.0"
] | null | null | null |
import os
import time
import fnmatch
def match(paths, atimeout=None, ctimeout=None, mtimeout=None, seed=None, patterns=None, verbose=False):
'''
:param paths: path for clean
:param atimeout: file will be deleted after access timeout
:param ctimeout: file will be deleted after creation timeout
:param mtimeout: file will be deleted after modification timeout
:param seed: base line of current time
:param patterns: includes and excludes patterns with format [('i', pattern), ('e', pattern), ...]
:return: file list
'''
# args check
if isinstance(paths, str): paths = [paths]
assert isinstance(paths, (tuple, list))
if seed is None: seed = time.time()
if patterns is None: patterns = ['i', '*']
# match function
def check_include(f):
# check patterns
for t, p in patterns:
m = fnmatch.fnmatch(file_path, p)
if t == 'i':
if not m: continue
break
else:
if not m: continue
return False
# check
at, ct, mt = os.path.getatime(f), os.path.getctime(f), os.path.getmtime(f)
if atimeout is not None and seed - at < atimeout: return False
if ctimeout is not None and seed - ct < ctimeout: return False
if mtimeout is not None and seed - mt < mtimeout: return False
return True
# scan all paths
include_files = []
for path in paths:
for root, dirs, files in os.walk(path):
for f in files:
file_path = os.path.join(root, f)
if not check_include(file_path): continue
include_files.append(file_path)
if verbose: print(file_path)
return include_files
def remove_empty_dirs(paths):
def _do(path):
empty = True
for f in os.listdir(path):
f = os.path.join(path, f)
if os.path.isfile(f):
empty = False
break
if not _do(f):
empty = False
break
return empty
for p in paths:
_do(p)
def clean(paths, atimeout=None, ctimeout=None, mtimeout=None, seed=None, patterns=None, remove_empty_dir=True, verbose=False):
'''
:params: see test method
:return: None
'''
# find all deleted files
files = match(paths, atimeout, ctimeout, mtimeout, seed, patterns, False)
# remove files
for f in files:
try:
os.remove(f)
except: pass
if verbose: print(f)
# clear empty directories
if remove_empty_dir: remove_empty_dirs(paths)
def main():
import argparse
class PatternAction(argparse.Action):
def __init__(self, *args, **kwargs):
super(PatternAction, self).__init__(*args, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
if not 'patterns' in namespace:
setattr(namespace, 'patterns', [])
tag = 'i' if self.dest == 'include' else 'e'
namespace.patterns.append((tag, values))
parser = argparse.ArgumentParser(prog='fclean', description="A clean tool for remove timeout files and path")
parser.add_argument('-p', '--path', type=str, required=True, action='append', help='Path for clean')
parser.add_argument('-t', '--timeout', type=int, help='File will be deleted after timeout')
parser.add_argument('-at', '--access-timeout', type=int, help='File will be deleted after last access timeout')
parser.add_argument('-ct', '--creation-timeout', type=int, help='File will be deleted after creation timeout')
parser.add_argument('-mt', '--modification-timeout', type=int, help='File will be deleted after modification timeout')
parser.add_argument('-s', '--seed', type=float, default=None, help='Base line of current time')
parser.add_argument('-i', '--include', type=str, action=PatternAction, help='Include files matching PATTERN')
parser.add_argument('-e', '--exclude', type=str, action=PatternAction, help='Exclude files matching PATTERN')
parser.add_argument('-m', '--match', action='store_true', default=False, help='Only execute match instead of remove files')
parser.add_argument('-v', '--verbose', action='store_true', default=False, help='Increase verbosity')
parser.add_argument('-k', '--keep', action='store_true', default=False, help='Keep empty directories')
args = parser.parse_args()
# parse timeout
if args.timeout is not None and args.access_timeout is None:
args.access_timeout = args.timeout
if args.match:
match(args.path, args.access_timeout, args.creation_timeout, args.modification_timeout,
args.seed, args.patterns, args.verbose)
else:
clean(args.path, args.access_timeout, args.creation_timeout, args.modification_timeout,
args.seed, args.patterns, not args.keep, args.verbose)
if __name__ == '__main__':
main()
| 32.803922 | 127 | 0.627615 |
baa7a9183b9bbffa3199b8f13828e8e756e85bf3 | 2,323 | py | Python | carnival_contrib/ssh.py | carnival-org/carnival-contrib | f9275a3be75f3de50f576ee0e3fe19a8f7488309 | [
"MIT"
] | 2 | 2022-02-23T02:31:48.000Z | 2022-02-23T11:00:09.000Z | carnival_contrib/ssh.py | carnival-org/carnival-contrib | f9275a3be75f3de50f576ee0e3fe19a8f7488309 | [
"MIT"
] | null | null | null | carnival_contrib/ssh.py | carnival-org/carnival-contrib | f9275a3be75f3de50f576ee0e3fe19a8f7488309 | [
"MIT"
] | null | null | null | import os
import re
from carnival import Step
from carnival import Connection
def _escape_for_regex(text: str) -> str:
"""
Tnx to https://stackoverflow.com/questions/280435/escaping-regex-string
:param text:
:return:
"""
regex = re.escape(text)
# double escaping for \
regex = regex.replace("\\\\", "\\\\\\")
# triple-escaping for $ signs
regex = regex.replace(r"\$", r"\\\$")
# single quotes should not be escaped
regex = regex.replace(r"\'", "'")
return regex
def _is_file_contains(c: Connection, filename: str, text: str, escape: bool = True) -> bool:
"""
Содержит ли файл текст
:param c: Конект с хостом
:param filename: путь до файла
:param text: текст который нужно искать
:param escape: экранировать ли текст
"""
if escape:
text = _escape_for_regex(text)
egrep_cmd = 'egrep "{}" "{}"'.format(text, filename)
return c.run(egrep_cmd, hide=True, warn=True).ok
class AddAuthorizedKey(Step):
"""
Добавить ssh ключ в `authorized_keys` если его там нет
"""
def __init__(self, ssh_key: str, keys_file: str = ".ssh/authorized_keys") -> None:
"""
:param ssh_key: ключ
:param keys_file: пусть до файла `authorized_keys`
:return: `True` если ключ был добавлен, `False` если ключ уже был в файле
"""
self.ssh_key = ssh_key.strip()
self.keys_file = keys_file
def run(self, c: Connection) -> bool:
c.run("mkdir -p ~/.ssh")
c.run("chmod 700 ~/.ssh")
c.run(f"touch {self.keys_file}")
if not _is_file_contains(c, self.keys_file, self.ssh_key, escape=True):
c.run(f"echo '{self.ssh_key}' >> {self.keys_file}")
return True
return False
class CopyId(Step):
"""
Добавить публичный ssh-ключ текущего пользователя в авторизованные
"""
def __init__(self, pubkey_file: str = "~/.ssh/id_rsa.pub") -> None:
"""
:param pubkey_file: путь до файла с публичным ключем
:return: `True` если ключ был добавлен, `False` если ключ уже был в файле
"""
self.pubkey_file = pubkey_file
def run(self, c: Connection) -> bool:
key = open(os.path.expanduser(self.pubkey_file)).read().strip()
return AddAuthorizedKey(key).run(c=c)
| 29.405063 | 92 | 0.615583 |
aba1d666b5be389d34a5dc658d9020d654d20587 | 5,441 | py | Python | satchmo/apps/payment/modules/sermepa/config.py | funwhilelost/satchmo | 589a5d797533ea15dfde9af7f36e304092d22a94 | [
"BSD-3-Clause"
] | null | null | null | satchmo/apps/payment/modules/sermepa/config.py | funwhilelost/satchmo | 589a5d797533ea15dfde9af7f36e304092d22a94 | [
"BSD-3-Clause"
] | null | null | null | satchmo/apps/payment/modules/sermepa/config.py | funwhilelost/satchmo | 589a5d797533ea15dfde9af7f36e304092d22a94 | [
"BSD-3-Clause"
] | null | null | null | #
# SERMEPA / ServiRed payments module for Satchmo
#
# Author: Michal Salaban <michal (at) salaban.info>
# with a great help of Fluendo S.A., Barcelona
#
# Based on "Guia de comercios TPV Virtual SIS" ver. 5.18, 15/11/2008, SERMEPA
# For more information about integration look at http://www.sermepa.es/
#
# TODO: SERMEPA interface provides possibility of recurring payments, which
# could be probably used for SubscriptionProducts. This module doesn't support it.
#
from livesettings import *
from django.utils.translation import ugettext_lazy as _
PAYMENT_GROUP = ConfigurationGroup('PAYMENT_SERMEPA',
_('SERMEPA (ServiRed) Payment Module Settings'))
config_register_list(
ModuleValue(PAYMENT_GROUP,
'MODULE',
description=_('Implementation module'),
hidden=True,
default = 'payment.modules.sermepa'
),
StringValue(PAYMENT_GROUP,
'KEY',
description=_("Module key"),
hidden=True,
default = 'SERMEPA'
),
StringValue(PAYMENT_GROUP,
'LABEL',
description=_('English name for this group on the checkout screens'),
default = 'Credit Card (via SERMEPA)',
dummy = _('Credit Card (via SERMEPA)'), # Force this to appear on po-files
help_text = _('This will be passed to the translation utility'),
ordering=10
),
StringValue(PAYMENT_GROUP,
'URL_BASE',
description=_('The url base used for constructing urlpatterns which will use this module'),
default = '^sermepa/',
ordering=20
),
BooleanValue(
PAYMENT_GROUP,
'LIVE',
description=_("Accept real payments"),
help_text=_("False if you want to be in test mode"),
default=False,
ordering=30
),
StringValue(
PAYMENT_GROUP,
'MERCHANT_CURRENCY',
description=_('Currency'),
default='978',
choices=[
('978', _("EUR (Euro)")),
('840', _("USD (US Dollar)")),
('826', _("GBP (British Pound)")),
('392', _("JPY (Japanese Yen)")),
],
ordering=40
),
StringValue(
PAYMENT_GROUP,
'MERCHANT_FUC',
description=_('Merchant FUC'),
help_text=_('Your FUC code'),
ordering=50
),
StringValue(
PAYMENT_GROUP,
'MERCHANT_TITULAR',
description=_('Merchant title'),
help_text=_('Description of your shop which will be visible on payment confirmation screen'),
ordering=60,
),
# signature
StringValue(
PAYMENT_GROUP,
'MERCHANT_SIGNATURE_CODE',
description=_('Signature code'),
help_text=_('Your secret code used to sign transaction data'),
ordering=100,
),
StringValue(
PAYMENT_GROUP,
'MERCHANT_TEST_SIGNATURE_CODE',
description=_('Test signature code'),
help_text=_('Your secret code used to sign transaction data in test payments'),
ordering=200,
),
BooleanValue(
PAYMENT_GROUP,
'EXTENDED_SIGNATURE',
description=_("Extended signature calculation"),
help_text=_("Consult your Sermepa documentation to know the differences between normal and extended signature calculation, and ask your provider which type of signature expects you to use. If possible, use extended signature."),
default=False,
ordering=105,
),
# terminal
IntegerValue(
PAYMENT_GROUP,
'MERCHANT_TERMINAL',
description=_('Terminal number'),
default=1,
ordering=110
),
IntegerValue(
PAYMENT_GROUP,
'MERCHANT_TEST_TERMINAL',
description=_('Test terminal number'),
default=1,
help_text=_('Terminal number used for test payments'),
ordering=210
),
# post url
StringValue(
PAYMENT_GROUP,
'POST_URL',
description=_('Post URL'),
help_text=_('The SERMEPA URL for real transaction posting.'),
default="https://sis.sermepa.es/sis/realizarPago",
ordering=120
),
StringValue(
PAYMENT_GROUP,
'POST_TEST_URL',
description=_('Test Post URL'),
help_text=_('The SERMEPA URL for test transaction posting.'),
default="https://sis-t.sermepa.es:25443/sis/realizarPago",
ordering=220
),
StringValue(
PAYMENT_GROUP,
'MERCHANT_URL_CALLBACK',
description=_('Callback URL'),
help_text=_('Callback URL for on-line notifications about payment progress'),
default='SERMEPA_satchmo_checkout-notify_callback',
ordering=300
),
StringValue(
PAYMENT_GROUP,
'MERCHANT_URL_OK',
description=_('OK URL'),
help_text=_('URL for customer to return after successful payment'),
default='SERMEPA_satchmo_checkout-success',
ordering=310
),
StringValue(
PAYMENT_GROUP,
'MERCHANT_URL_KO',
description=_('Failure URL'),
help_text=_('URL for customer to return after payment failure'),
default='SERMEPA_satchmo_checkout-failure',
ordering=320
),
BooleanValue(PAYMENT_GROUP,
'EXTRA_LOGGING',
description=_("Verbose logs"),
help_text=_("Add extensive logs during post."),
default=False)
)
| 32.195266 | 236 | 0.61055 |
8ee53413966aadbe0741dbd5bc277993179cf863 | 2,075 | py | Python | medium/1448_count_good_nodes_in_binary_tree.py | niki4/leetcode_py3 | 794f560a09a8950da21bd58ea222e0c74449ffa6 | [
"MIT"
] | null | null | null | medium/1448_count_good_nodes_in_binary_tree.py | niki4/leetcode_py3 | 794f560a09a8950da21bd58ea222e0c74449ffa6 | [
"MIT"
] | null | null | null | medium/1448_count_good_nodes_in_binary_tree.py | niki4/leetcode_py3 | 794f560a09a8950da21bd58ea222e0c74449ffa6 | [
"MIT"
] | null | null | null | """
Given a binary tree root, a node X in the tree is named good if in the path from root to X there are no nodes with a value greater than X.
Return the number of good nodes in the binary tree.
Example 1:
(3)
/ \
1 (4)
/ / \
(3) 1 (5)
Input: root = [3,1,4,3,null,1,5]
Output: 4
Explanation: Nodes in parentheses are good.
Root Node (3) is always a good node.
Node 4 -> (3,4) is the maximum value in the path starting from the root.
Node 5 -> (3,4,5) is the maximum value in the path
Node 3 -> (3,1,3) is the maximum value in the path.
Example 2:
(3)
/
(3)
/ \
(4) 2
Input: root = [3,3,null,4,2]
Output: 3
Explanation: Node 2 -> (3, 3, 2) is not good, because "3" is higher than it.
Example 3:
Input: root = [1]
Output: 1
Explanation: Root is considered as good.
Constraints:
The number of nodes in the binary tree is in the range [1, 10^5].
Each node's value is between [-10^4, 10^4].
"""
from tools.binary_tree import TreeNode
class Solution:
"""
Use DFS (Depth First Search) to traverse the tree,
and constantly keep track of the current path maximum.
The solution below could give better performance metrics if use DFS helper in closure,
but I personally prefer class-based approach here.
Runtime: 256 ms, faster than 52.11% of Python3
Memory Usage: 33.6 MB, less than 11.91% of Python3
Time complexity: O(n) where n is the number of nodes in the tree
Space complexity: O(h) where h is the height of the tree (num of levels) to keep recursion stack.
"""
def __init__(self):
self.good_nodes = 0
def dfs(self, node: TreeNode, max_val: int):
if node:
if node.val >= max_val:
self.good_nodes += 1
max_val = max(max_val, node.val)
self.dfs(node.left, max_val)
self.dfs(node.right, max_val)
def goodNodes(self, root: TreeNode) -> int:
self.dfs(root, root.val)
return self.good_nodes
| 28.424658 | 138 | 0.61494 |
b61dab5a5f246c1b229e38549ae7c18e03572428 | 240 | py | Python | filedrop.py | nng68/StarPakBrowser | 065e1652d7b1e5bfe384921c789a1167422d62b0 | [
"MIT"
] | 11 | 2019-05-04T11:21:17.000Z | 2021-11-20T04:50:10.000Z | filedrop.py | nng68/StarPakBrowser | 065e1652d7b1e5bfe384921c789a1167422d62b0 | [
"MIT"
] | null | null | null | filedrop.py | nng68/StarPakBrowser | 065e1652d7b1e5bfe384921c789a1167422d62b0 | [
"MIT"
] | null | null | null | import wx
class FileDrop(wx.FileDropTarget):
def __init__(self, window):
wx.FileDropTarget.__init__(self)
self.window = window
def OnDropFiles(self, x, y, filenames):
self.window.OnDragFile(filenames) | 30 | 44 | 0.666667 |
fc4d71b9e1dc47c7737a4fb626aa9381c865d1ae | 11,571 | py | Python | setup.py | giosh94mhz/oletools | 2aa07c1204670b3b42d9b3b566b8e82938f0b897 | [
"BSD-2-Clause"
] | null | null | null | setup.py | giosh94mhz/oletools | 2aa07c1204670b3b42d9b3b566b8e82938f0b897 | [
"BSD-2-Clause"
] | null | null | null | setup.py | giosh94mhz/oletools | 2aa07c1204670b3b42d9b3b566b8e82938f0b897 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
"""
Installs oletools using distutils
Run:
python setup.py install
to install this package.
(setup script partly borrowed from cherrypy)
"""
#--- CHANGELOG ----------------------------------------------------------------
# 2014-08-27 v0.06 PL: - added doc subfolder
# 2015-01-05 v0.07 PL: - added xglob, prettytable
# 2015-02-08 v0.08 PL: - added DridexUrlDecoder
# 2015-03-23 v0.09 PL: - updated description and classifiers, added shebang line
# 2015-06-16 v0.10 PL: - added pyparsing
# 2016-02-08 v0.42 PL: - added colorclass, tablestream
# 2016-07-19 v0.50 PL: - create CLI scripts using entry points (by 2*yo)
# 2016-07-29 PL: - use setuptools if available
# 2016-09-05 PL: - added more entry points
# 2017-01-18 v0.51 PL: - added package zipfile27 (issue #121)
# 2017-10-18 v0.52 PL: - added msodde
# 2018-03-19 v0.52.3 PL: - added install_requires, removed thirdparty.pyparsing
# 2018-09-11 v0.54 PL: - olefile is now a dependency
# 2018-09-15 PL: - easygui is now a dependency
# 2018-09-22 PL: - colorclass is now a dependency
# 2018-10-27 PL: - fixed issue #359 (bug when importing log_helper)
# 2019-02-26 CH: - add optional dependency msoffcrypto for decryption
# 2019-05-22 PL: - 'msoffcrypto-tool' is now a required dependency
# 2019-05-23 v0.55 PL: - added pcodedmp as dependency
# 2019-09-24 PL: - removed oletools.thirdparty.DridexUrlDecoder
# 2019-11-10 PL: - changed pyparsing from 2.2.0 to 2.1.0 for issue #481
#--- TODO ---------------------------------------------------------------------
#--- IMPORTS ------------------------------------------------------------------
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
#from distutils.command.install import INSTALL_SCHEMES
import os, fnmatch
#--- METADATA -----------------------------------------------------------------
name = "oletools"
version = '0.55.2'
desc = "Python tools to analyze security characteristics of MS Office and OLE files (also called Structured Storage, Compound File Binary Format or Compound Document File Format), for Malware Analysis and Incident Response #DFIR"
long_desc = open('oletools/README.rst').read()
author = "Philippe Lagadec"
author_email = "nospam@decalage.info"
url = "http://www.decalage.info/python/oletools"
license = "BSD"
download_url = "https://github.com/decalage2/oletools/releases"
# see https://pypi.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"Intended Audience :: System Administrators",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Security",
"Topic :: Software Development :: Libraries :: Python Modules",
]
#--- PACKAGES -----------------------------------------------------------------
packages=[
"oletools",
"oletools.common",
"oletools.common.log_helper",
'oletools.thirdparty',
'oletools.thirdparty.xxxswf',
'oletools.thirdparty.prettytable',
'oletools.thirdparty.xglob',
'oletools.thirdparty.tablestream',
'oletools.thirdparty.oledump',
]
##setupdir = '.'
##package_dir={'': setupdir}
#--- PACKAGE DATA -------------------------------------------------------------
## Often, additional files need to be installed into a package. These files are
## often data that?s closely related to the package?s implementation, or text
## files containing documentation that might be of interest to programmers using
## the package. These files are called package data.
##
## Package data can be added to packages using the package_data keyword argument
## to the setup() function. The value must be a mapping from package name to a
## list of relative path names that should be copied into the package. The paths
## are interpreted as relative to the directory containing the package
## (information from the package_dir mapping is used if appropriate); that is,
## the files are expected to be part of the package in the source directories.
## They may contain glob patterns as well.
##
## The path names may contain directory portions; any necessary directories will
## be created in the installation.
# the following functions are used to dynamically include package data without
# listing every file here:
def riglob(top, prefix='', pattern='*'):
"""
recursive iterator glob
- top: path to start searching from
- prefix: path to use instead of top when generating file path (in order to
choose the root of relative paths)
- pattern: wilcards to select files (same syntax as fnmatch)
Yields each file found in top and subdirectories, matching pattern
"""
#print 'top=%s prefix=%s pat=%s' % (top, prefix, pattern)
dirs = []
for path in os.listdir(top):
p = os.path.join(top, path)
if os.path.isdir(p):
dirs.append(path)
elif os.path.isfile(p):
#print ' - file:', path
if fnmatch.fnmatch(path, pattern):
yield os.path.join(prefix, path)
#print ' dirs =', dirs
for d in dirs:
dtop = os.path.join(top, d)
dprefix = os.path.join(prefix, d)
#print 'dtop=%s dprefix=%s' % (dtop, dprefix)
for p in riglob(dtop, dprefix, pattern):
yield p
def rglob(top, prefix='', pattern='*'):
"""
recursive glob
Same as riglob, but returns a list.
"""
return list(riglob(top, prefix, pattern))
package_data={
'oletools': [
'README.rst',
'README.html',
'LICENSE.txt',
]
# doc folder: md, html, png
+ rglob('oletools/doc', 'doc', '*.html')
+ rglob('oletools/doc', 'doc', '*.md')
+ rglob('oletools/doc', 'doc', '*.png'),
'oletools.thirdparty.xglob': [
'LICENSE.txt',
],
'oletools.thirdparty.xxxswf': [
'LICENSE.txt',
],
'oletools.thirdparty.prettytable': [
'CHANGELOG', 'COPYING', 'README'
],
'oletools.thirdparty.DridexUrlDecoder': [
'LICENSE.txt',
],
# 'oletools.thirdparty.tablestream': [
# 'LICENSE', 'README',
# ],
}
#--- data files ---------------------------------------------------------------
# not used for now.
## The data_files option can be used to specify additional files needed by the
## module distribution: configuration files, message catalogs, data files,
## anything which doesn?t fit in the previous categories.
##
## data_files specifies a sequence of (directory, files) pairs in the following way:
##
## setup(...,
## data_files=[('bitmaps', ['bm/b1.gif', 'bm/b2.gif']),
## ('config', ['cfg/data.cfg']),
## ('/etc/init.d', ['init-script'])]
## )
##
## Note that you can specify the directory names where the data files will be
## installed, but you cannot rename the data files themselves.
##
## Each (directory, files) pair in the sequence specifies the installation
## directory and the files to install there. If directory is a relative path,
## it is interpreted relative to the installation prefix (Python?s sys.prefix for
## pure-Python packages, sys.exec_prefix for packages that contain extension
## modules). Each file name in files is interpreted relative to the setup.py
## script at the top of the package source distribution. No directory information
## from files is used to determine the final location of the installed file;
## only the name of the file is used.
##
## You can specify the data_files options as a simple sequence of files without
## specifying a target directory, but this is not recommended, and the install
## command will print a warning in this case. To install data files directly in
## the target directory, an empty string should be given as the directory.
##data_files=[
## ('balbuzard', [
## 'balbuzard/README.txt',
## ]),
##]
##if sys.version_info >= (3, 0):
## required_python_version = '3.0'
## setupdir = 'py3'
##else:
## required_python_version = '2.3'
## setupdir = 'py2'
##data_files = [(install_dir, ['%s/%s' % (setupdir, f) for f in files])
## for install_dir, files in data_files]
##def fix_data_files(data_files):
## """
## bdist_wininst seems to have a bug about where it installs data files.
## I found a fix the django team used to work around the problem at
## http://code.djangoproject.com/changeset/8313 . This function
## re-implements that solution.
## Also see http://mail.python.org/pipermail/distutils-sig/2004-August/004134.html
## for more info.
## """
## def fix_dest_path(path):
## return '\\PURELIB\\%(path)s' % vars()
##
## if not 'bdist_wininst' in sys.argv: return
##
## data_files[:] = [
## (fix_dest_path(path), files)
## for path, files in data_files]
##fix_data_files(data_files)
# --- SCRIPTS ------------------------------------------------------------------
# Entry points to create convenient scripts automatically
entry_points = {
'console_scripts': [
'ezhexviewer=oletools.ezhexviewer:main',
'mraptor=oletools.mraptor:main',
'mraptor3=oletools.mraptor3:main',
'olebrowse=oletools.olebrowse:main',
'oledir=oletools.oledir:main',
'oleid=oletools.oleid:main',
'olemap=oletools.olemap:main',
'olemeta=oletools.olemeta:main',
'oletimes=oletools.oletimes:main',
'olevba=oletools.olevba:main',
'olevba3=oletools.olevba3:main',
'pyxswf=oletools.pyxswf:main',
'rtfobj=oletools.rtfobj:main',
'oleobj=oletools.oleobj:main',
'msodde=oletools.msodde:main',
'olefile=olefile.olefile:main',
],
}
# scripts=['oletools/olevba.py', 'oletools/mraptor.py']
# === MAIN =====================================================================
def main():
# TODO: warning about Python 2.6
## # set default location for "data_files" to
## # platform specific "site-packages" location
## for scheme in list(INSTALL_SCHEMES.values()):
## scheme['data'] = scheme['purelib']
dist = setup(
name=name,
version=version,
description=desc,
long_description=long_desc,
classifiers=classifiers,
author=author,
author_email=author_email,
url=url,
license=license,
# package_dir=package_dir,
packages=packages,
package_data = package_data,
download_url=download_url,
# data_files=data_files,
entry_points=entry_points,
test_suite="tests",
# scripts=scripts,
install_requires=[
"pyparsing>=2.1.0", # changed from 2.2.0 to 2.1.0 for issue #481
"olefile>=0.46",
"easygui",
'colorclass',
'msoffcrypto-tool',
'pcodedmp>=1.2.5',
],
)
if __name__ == "__main__":
main()
| 34.747748 | 237 | 0.616369 |
c37c94bbda24c3e3c3e3c507d3e2d57383cdb671 | 561 | py | Python | backend/app/protocol.py | williamsyb/StockTick | 1dd10101d44fa3a0584f849b022fc8254c2e66c7 | [
"MIT"
] | 2 | 2020-11-23T13:38:49.000Z | 2021-08-17T15:37:04.000Z | backend/app/protocol.py | williamsyb/StockTick | 1dd10101d44fa3a0584f849b022fc8254c2e66c7 | [
"MIT"
] | null | null | null | backend/app/protocol.py | williamsyb/StockTick | 1dd10101d44fa3a0584f849b022fc8254c2e66c7 | [
"MIT"
] | null | null | null | from flask import jsonify
class Resp:
__slots__ = 'error_no', 'msg', 'data'
def __init__(self, error_no, msg, data):
self.error_no = error_no
self.msg = msg
self.data = data
def to_json(self):
return jsonify({'error_no': self.error_no, 'msg': self.msg, 'data': self.data})
# TODO DATAFRAME转dict
def serialize(data):
data = data.to_dict(orient='list')
return data
class ModuleException(Exception):
def __init__(self, trace_error, msg):
self.trace_error = trace_error
self.msg = msg
| 21.576923 | 87 | 0.639929 |
e6a989748ae9223a34fdafe30affc1f8a76e4585 | 5,249 | py | Python | spectacles/migrations/0002_auto_20180531_2047.py | hyperstudio/spectacles | 25456c84fd4d338520847d0535854d4c3c2c242b | [
"MIT"
] | 1 | 2020-01-11T14:16:31.000Z | 2020-01-11T14:16:31.000Z | spectacles/migrations/0002_auto_20180531_2047.py | hyperstudio/spectacles | 25456c84fd4d338520847d0535854d4c3c2c242b | [
"MIT"
] | null | null | null | spectacles/migrations/0002_auto_20180531_2047.py | hyperstudio/spectacles | 25456c84fd4d338520847d0535854d4c3c2c242b | [
"MIT"
] | 1 | 2019-05-13T14:32:39.000Z | 2019-05-13T14:32:39.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-31 20:47
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
import enumfields.fields
import spectacles.models
import spectacles.utils
import uuid
class Migration(migrations.Migration):
dependencies = [
('spectacles', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Annotation',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('data', django.contrib.postgres.fields.jsonb.JSONField()),
('vector', models.BinaryField(null=True)),
('vector_needs_synch', models.BooleanField(default=True)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='annotations', to=settings.AUTH_USER_MODEL)),
],
bases=(spectacles.utils.VectorModel, spectacles.utils.DictModel, models.Model),
),
migrations.CreateModel(
name='Archive',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('title', models.TextField()),
('description', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('members', models.ManyToManyField(related_name='archives', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Bookmark',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('annotation', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='bookmarks', to='spectacles.Annotation')),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bookmarks', to=settings.AUTH_USER_MODEL)),
],
bases=(spectacles.utils.DictModel, models.Model),
),
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('state', enumfields.fields.EnumIntegerField(enum=spectacles.models.DocumentState)),
('title', models.TextField()),
('text', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('author', models.TextField()),
('vector', models.BinaryField(null=True)),
('vector_needs_synch', models.BooleanField(default=True)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='documents', to=settings.AUTH_USER_MODEL)),
],
bases=(spectacles.utils.VectorModel, spectacles.utils.DictModel, models.Model),
managers=[
('slim', django.db.models.manager.Manager()),
],
),
migrations.CreateModel(
name='Upload',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('state', enumfields.fields.EnumIntegerField(enum=spectacles.models.UploadState)),
('source_file', models.FileField(max_length=256, upload_to='uploads/')),
('content_type', models.CharField(max_length=256)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('creator', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='uploads', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='document',
name='upload',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='documents', to='spectacles.Upload'),
),
migrations.AddField(
model_name='bookmark',
name='document',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bookmarks', to='spectacles.Document'),
),
migrations.AddField(
model_name='annotation',
name='document',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='annotations', to='spectacles.Document'),
),
]
| 49.056075 | 160 | 0.612117 |
82311ae12cda2de1934abb0baba6238c9fe6d78f | 9,553 | py | Python | targets/pipistrello/base.py | niklasnisbeth/litex-buildenv | 846fc6193972692f9de8d99146f29702d6ff80e3 | [
"BSD-2-Clause"
] | 87 | 2017-03-31T05:58:30.000Z | 2022-03-03T20:26:07.000Z | targets/pipistrello/base.py | niklasnisbeth/litex-buildenv | 846fc6193972692f9de8d99146f29702d6ff80e3 | [
"BSD-2-Clause"
] | 247 | 2015-07-02T13:23:17.000Z | 2017-03-16T23:43:39.000Z | targets/pipistrello/base.py | niklasnisbeth/litex-buildenv | 846fc6193972692f9de8d99146f29702d6ff80e3 | [
"BSD-2-Clause"
] | 49 | 2017-03-17T11:25:53.000Z | 2021-01-26T03:08:58.000Z | # Support for the Pipistrello - http://pipistrello.saanlima.com/
from fractions import Fraction
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex.soc.integration.soc_sdram import *
from litex.soc.integration.builder import *
from litedram.modules import MT46H32M16
from litedram.phy import s6ddrphy
from litedram.core import ControllerSettings
#from gateware import i2c_hack
from gateware import info
from gateware import spi_flash
from targets.utils import csr_map_update
class _CRG(Module):
def __init__(self, platform, clk_freq):
# Clock domains for the system (soft CPU and related components run at).
self.clock_domains.cd_sys = ClockDomain()
# Clock domains for the DDR interface.
self.clock_domains.cd_sdram_half = ClockDomain()
self.clock_domains.cd_sdram_full_wr = ClockDomain()
self.clock_domains.cd_sdram_full_rd = ClockDomain()
# Clock domain for peripherals (such as HDMI output).
self.clock_domains.cd_base50 = ClockDomain()
self.reset = Signal()
# Input 50MHz clock
f0 = 50*1000000
clk50 = platform.request("clk50")
clk50a = Signal()
# Input 50MHz clock (buffered)
self.specials += Instance("IBUFG", i_I=clk50, o_O=clk50a)
clk50b = Signal()
self.specials += Instance(
"BUFIO2", p_DIVIDE=1,
p_DIVIDE_BYPASS="TRUE", p_I_INVERT="FALSE",
i_I=clk50a, o_DIVCLK=clk50b)
p = 12
f = Fraction(clk_freq*p, f0)
n, d = f.numerator, f.denominator
assert 19e6 <= f0/d <= 500e6 # pfd
assert 400e6 <= f0*n/d <= 1080e6 # vco
# Unbuffered output signals from the PLL. They need to be buffered
# before feeding into the fabric.
unbuf_sdram_full = Signal()
unbuf_sdram_half_a = Signal()
unbuf_sdram_half_b = Signal()
unbuf_unused = Signal()
unbuf_sys = Signal()
unbuf_periph = Signal()
# PLL signals
pll_lckd = Signal()
pll_fb = Signal()
self.specials.pll = Instance(
"PLL_ADV",
name="crg_pll_adv",
p_SIM_DEVICE="SPARTAN6", p_BANDWIDTH="OPTIMIZED", p_COMPENSATION="INTERNAL",
p_REF_JITTER=.01,
i_DADDR=0, i_DCLK=0, i_DEN=0, i_DI=0, i_DWE=0, i_RST=0, i_REL=0,
p_DIVCLK_DIVIDE=d,
# Input Clocks (50MHz)
i_CLKIN1=clk50b,
p_CLKIN1_PERIOD=1e9/f0,
i_CLKIN2=0,
p_CLKIN2_PERIOD=0.,
i_CLKINSEL=1,
# Feedback
i_CLKFBIN=pll_fb, o_CLKFBOUT=pll_fb, o_LOCKED=pll_lckd,
p_CLK_FEEDBACK="CLKFBOUT",
p_CLKFBOUT_MULT=n, p_CLKFBOUT_PHASE=0.,
# (333MHz) sdram wr rd
o_CLKOUT0=unbuf_sdram_full, p_CLKOUT0_DUTY_CYCLE=.5,
p_CLKOUT0_PHASE=0., p_CLKOUT0_DIVIDE=p//4,
# unused?
o_CLKOUT1=unbuf_unused, p_CLKOUT1_DUTY_CYCLE=.5,
p_CLKOUT1_PHASE=0., p_CLKOUT1_DIVIDE=15,
# (166MHz) sdram_half - sdram dqs adr ctrl
o_CLKOUT2=unbuf_sdram_half_a, p_CLKOUT2_DUTY_CYCLE=.5,
p_CLKOUT2_PHASE=270., p_CLKOUT2_DIVIDE=p//2,
# (166MHz) off-chip ddr
o_CLKOUT3=unbuf_sdram_half_b, p_CLKOUT3_DUTY_CYCLE=.5,
p_CLKOUT3_PHASE=250., p_CLKOUT3_DIVIDE=p//2,
# ( 50MHz) periph
o_CLKOUT4=unbuf_periph, p_CLKOUT4_DUTY_CYCLE=.5,
p_CLKOUT4_PHASE=0., p_CLKOUT4_DIVIDE=20,
# ( 83MHz) sysclk
o_CLKOUT5=unbuf_sys, p_CLKOUT5_DUTY_CYCLE=.5,
p_CLKOUT5_PHASE=0., p_CLKOUT5_DIVIDE=p//1,
)
# power on reset?
reset = platform.request("user_btn") | self.reset
self.clock_domains.cd_por = ClockDomain()
por = Signal(max=1 << 11, reset=(1 << 11) - 1)
self.sync.por += If(por != 0, por.eq(por - 1))
self.specials += AsyncResetSynchronizer(self.cd_por, reset)
# System clock - 75MHz
self.specials += Instance("BUFG", i_I=unbuf_sys, o_O=self.cd_sys.clk)
self.comb += self.cd_por.clk.eq(self.cd_sys.clk)
self.specials += AsyncResetSynchronizer(self.cd_sys, ~pll_lckd | (por > 0))
# SDRAM clocks
# ------------------------------------------------------------------------------
self.clk4x_wr_strb = Signal()
self.clk4x_rd_strb = Signal()
# sdram_full
self.specials += Instance("BUFPLL", name="sdram_full_bufpll",
p_DIVIDE=4,
i_PLLIN=unbuf_sdram_full, i_GCLK=self.cd_sys.clk,
i_LOCKED=pll_lckd,
o_IOCLK=self.cd_sdram_full_wr.clk,
o_SERDESSTROBE=self.clk4x_wr_strb)
self.comb += [
self.cd_sdram_full_rd.clk.eq(self.cd_sdram_full_wr.clk),
self.clk4x_rd_strb.eq(self.clk4x_wr_strb),
]
# sdram_half
self.specials += Instance("BUFG", name="sdram_half_a_bufpll", i_I=unbuf_sdram_half_a, o_O=self.cd_sdram_half.clk)
clk_sdram_half_shifted = Signal()
self.specials += Instance("BUFG", name="sdram_half_b_bufpll", i_I=unbuf_sdram_half_b, o_O=clk_sdram_half_shifted)
clk = platform.request("ddram_clock")
self.specials += Instance("ODDR2", p_DDR_ALIGNMENT="NONE",
p_INIT=0, p_SRTYPE="SYNC",
i_D0=1, i_D1=0, i_S=0, i_R=0, i_CE=1,
i_C0=clk_sdram_half_shifted,
i_C1=~clk_sdram_half_shifted,
o_Q=clk.p)
self.specials += Instance("ODDR2", p_DDR_ALIGNMENT="NONE",
p_INIT=0, p_SRTYPE="SYNC",
i_D0=0, i_D1=1, i_S=0, i_R=0, i_CE=1,
i_C0=clk_sdram_half_shifted, i_C1=~clk_sdram_half_shifted,
o_Q=clk.n)
# Peripheral clock - 50MHz
# ------------------------------------------------------------------------------
# The peripheral clock is kept separate from the system clock to allow
# the system clock to be increased in the future.
dcm_base50_locked = Signal()
self.specials += [
Instance("DCM_CLKGEN", name="crg_periph_dcm_clkgen",
p_CLKIN_PERIOD=20.0,
p_CLKFX_MULTIPLY=2,
p_CLKFX_DIVIDE=2,
p_CLKFX_MD_MAX=1.0, # CLKFX_MULTIPLY/CLKFX_DIVIDE
p_CLKFXDV_DIVIDE=2,
p_SPREAD_SPECTRUM="NONE",
p_STARTUP_WAIT="FALSE",
i_CLKIN=clk50a,
o_CLKFX=self.cd_base50.clk,
o_LOCKED=dcm_base50_locked,
i_FREEZEDCM=0,
i_RST=ResetSignal(),
),
AsyncResetSynchronizer(self.cd_base50,
self.cd_sys.rst | ~dcm_base50_locked)
]
platform.add_period_constraint(self.cd_base50.clk, 20)
class BaseSoC(SoCSDRAM):
csr_peripherals = (
"spiflash",
"front_panel",
"ddrphy",
"info",
)
csr_map_update(SoCSDRAM.csr_map, csr_peripherals)
mem_map = {
"spiflash": 0x20000000, # (default shadow @0xa0000000)
}
mem_map.update(SoCSDRAM.mem_map)
def __init__(self, platform, **kwargs):
if 'integrated_rom_size' not in kwargs:
kwargs['integrated_rom_size']=0x8000
if 'integrated_sram_size' not in kwargs:
kwargs['integrated_sram_size']=0x4000
clk_freq = (83 + Fraction(1, 3))*1000*1000
SoCSDRAM.__init__(self, platform, clk_freq, **kwargs)
self.submodules.crg = _CRG(platform, clk_freq)
self.platform.add_period_constraint(self.crg.cd_sys.clk, 1e9/clk_freq)
self.submodules.info = info.Info(platform, self.__class__.__name__)
self.submodules.spiflash = spi_flash.SpiFlash(
platform.request("spiflash4x"),
dummy=platform.spiflash_read_dummy_bits,
div=platform.spiflash_clock_div)
self.add_constant("SPIFLASH_PAGE_SIZE", platform.spiflash_page_size)
self.add_constant("SPIFLASH_SECTOR_SIZE", platform.spiflash_sector_size)
self.register_mem("spiflash", self.mem_map["spiflash"],
self.spiflash.bus, size=platform.spiflash_total_size)
bios_size = 0x8000
self.flash_boot_address = self.mem_map["spiflash"]+platform.gateware_size+bios_size
self.add_constant("FLASH_BOOT_ADDRESS", self.flash_boot_address)
# sdram
sdram_module = MT46H32M16(self.clk_freq, "1:2")
self.submodules.ddrphy = s6ddrphy.S6HalfRateDDRPHY(
platform.request("ddram"),
sdram_module.memtype,
rd_bitslip=1,
wr_bitslip=3,
dqs_ddr_alignment="C1")
controller_settings = ControllerSettings(with_bandwidth=True)
self.register_sdram(self.ddrphy,
sdram_module.geom_settings,
sdram_module.timing_settings,
controller_settings=controller_settings)
self.comb += [
self.ddrphy.clk4x_wr_strb.eq(self.crg.clk4x_wr_strb),
self.ddrphy.clk4x_rd_strb.eq(self.crg.clk4x_rd_strb),
]
SoC = BaseSoC
| 40.308017 | 121 | 0.583482 |
5b2a6cf5c9b5a31d09848072c815184d5ef33e49 | 2,238 | py | Python | tsfuse/construction/__init__.py | vishalbelsare/tsfuse | 5b80e0d1ef0a9d902f8a7e7ea8207e9921e26289 | [
"Apache-2.0"
] | 23 | 2019-09-23T11:55:36.000Z | 2021-12-22T10:54:49.000Z | tsfuse/construction/__init__.py | vishalbelsare/tsfuse | 5b80e0d1ef0a9d902f8a7e7ea8207e9921e26289 | [
"Apache-2.0"
] | null | null | null | tsfuse/construction/__init__.py | vishalbelsare/tsfuse | 5b80e0d1ef0a9d902f8a7e7ea8207e9921e26289 | [
"Apache-2.0"
] | 4 | 2020-07-21T09:33:08.000Z | 2021-12-02T18:08:23.000Z | from .autods19 import construct as construct_autods19
from .unsupervised import construct as construct_unsupervised
__all__ = ['construct']
def construct(X, y=None, task='classification', transformers='full', return_graph=False, **kwargs):
"""
Construct features for a given time series dataset ``X, y``
This function implements the method presented in our paper in the ECML/PKDD Workshop on
Automating Data Science 2019. [1]_ The construction method will change in future versions of TSFuse.
Parameters
----------
X : dict(str, Collection)
Multi-view time series data.
y : array-like, optional
Target data. Not required for unsupervised feature construction.
task : {'classification', 'regression'}, optional
Machine learning task. Default: `classification`
transformers : {'minimal', 'fast', 'full'}, optional
Feature construction settings. Default: `full` (the most extensive set of transformers)
return_graph : bool, optional
Return the computation graph. Default: `False`
Returns
-------
features : pandas.DataFrame
Tabular representation of the constructed features.
graph : Graph
Constructed computation graph. Only returned if ``return_graph == True``
References
----------
.. [1] Arne De Brabandere, Pieter Robberechts, Tim Op De Beéck and Jesse Davis.
`Automating Feature Construction for Multi-View Time Series Data <https://www.google.com/url?q=https%3A%2F%2Fupvedues-my.sharepoint.com%2F%3Ab%3A%2Fg%2Fpersonal%2Fjorallo_upv_edu_es%2FETxycG2WhmFBmVN7CNW8yKsBQHwhhlzdyegEx1AnNeRa2w%3Fe%3DbPQR7e&sa=D&sntz=1&usg=AFQjCNH-zTIQtPE2M0m0h_uUPN_25SaGCw>`_.
ECML/PKDD Workshop on Automating Data Science 2019.
"""
if y is not None:
graph, data = construct_autods19(
X,
y,
task=task,
transformers=transformers,
return_data=True,
**kwargs,
)
else:
graph, data = construct_unsupervised(
X,
transformers=transformers,
return_data=True,
**kwargs,
)
if return_graph:
return data, graph
else:
return data
| 37.3 | 305 | 0.663986 |
d1025a4cef51f6a2b5f0e58370ca862b1c6e18e9 | 4,198 | py | Python | calculator.py | kumaraditya303/General-Programs | bb1365dd7fa0a45809301ba1c7090726756a0bed | [
"MIT"
] | 2 | 2020-07-13T20:43:59.000Z | 2021-02-26T03:04:56.000Z | calculator.py | kumaraditya303/General-Programs | bb1365dd7fa0a45809301ba1c7090726756a0bed | [
"MIT"
] | null | null | null | calculator.py | kumaraditya303/General-Programs | bb1365dd7fa0a45809301ba1c7090726756a0bed | [
"MIT"
] | null | null | null | # Simple GUI Calculator BY Kumar Aditya
import tkinter as tk
from tkinter import *
import time
expression = ""
def press(num):
global expression
expression += str(num)
equation.set(expression)
def equalpress():
try:
global expression
total = str(eval(expression))
equation.set(total)
expression = total
except:
equation.set(" Error ")
expression = ""
def clear():
global expression
expression = ""
equation.set("")
gui = Tk()
gui.configure(background="light blue")
gui.title("Simple GUI Calculator")
equation = StringVar()
expression_block = Label(gui, textvariable=equation,
font="Cascadia", bg="#FFFFFF", fg="#000000")
expression_block.grid(columnspan=4, ipadx=100, ipady=30)
button1 = Button(gui, text=' 1 ', fg='black', bg='red',
command=lambda: press(1), height=3, width=7, font="Cascadia")
button1.grid(row=2, column=0)
button2 = Button(gui, text=' 2 ', fg='black', bg='red',
command=lambda: press(2), height=3, width=7, font="Cascadia")
button2.grid(row=2, column=1)
button3 = Button(gui, text=' 3 ', fg='black', bg='red',
command=lambda: press(3), height=3, width=7, font="Cascadia")
button3.grid(row=2, column=2)
button4 = Button(gui, text=' 4 ', fg='black', bg='red',
command=lambda: press(4), height=3, width=7, font="Cascadia")
button4.grid(row=3, column=0)
button5 = Button(gui, text=' 5 ', fg='black', bg='red',
command=lambda: press(5), height=3, width=7, font="Cascadia")
button5.grid(row=3, column=1)
button6 = Button(gui, text=' 6 ', fg='black', bg='red',
command=lambda: press(6), height=3, width=7, font="Cascadia")
button6.grid(row=3, column=2)
button7 = Button(gui, text=' 7 ', fg='black', bg='red',
command=lambda: press(7), height=3, width=7, font="Cascadia")
button7.grid(row=4, column=0)
button8 = Button(gui, text=' 8 ', fg='black', bg='red',
command=lambda: press(8), height=3, width=7, font="Cascadia")
button8.grid(row=4, column=1)
button9 = Button(gui, text=' 9 ', fg='black', bg='red',
command=lambda: press(9), height=3, width=7, font="Cascadia")
button9.grid(row=4, column=2)
button0 = Button(gui, text=' 0 ', fg='black', bg='red',
command=lambda: press(0), height=3, width=7, font="Cascadia")
button0.grid(row=5, column=0)
button00 = Button(gui, text=' . ', fg='black', bg='red',
command=lambda: press("."), height=3, width=7, font="Cascadia")
button00.grid(row=5, column=1)
plus = Button(gui, text=' + ', fg='black', bg='red',
command=lambda: press("+"), height=3, width=7, font="Cascadia")
plus.grid(row=2, column=3)
minus = Button(gui, text=' - ', fg='black', bg='red',
command=lambda: press("-"), height=3, width=7, font="Cascadia")
minus.grid(row=3, column=3)
multiply = Button(gui, text=' * ', fg='black', bg='red',
command=lambda: press("*"), height=3, width=7, font="Cascadia")
multiply.grid(row=4, column=3)
divide = Button(gui, text=' / ', fg='black', bg='red',
command=lambda: press("/"), height=3, width=7, font="Cascadia")
divide.grid(row=5, column=3)
exp = Button(gui, text=' ^ ', fg='black', bg='red',
command=lambda: press("**"), height=3, width=7, font="Cascadia")
exp.grid(row=6, column=0)
percent = Button(gui, text=' % ', fg='black', bg='red',
command=lambda: press("*0.01"), height=3, width=7, font="Cascadia")
percent.grid(row=6, column=1)
pi = Button(gui, text=' π ', fg='black', bg='red',
command=lambda: press("22/7"), height=3, width=7, font="Cascadia")
pi.grid(row=6, column=2)
equal = Button(gui, text=' = ', fg='black', bg='red',
command=equalpress, height=3, width=7, font="Cascadia")
equal.grid(row=5, column=2)
clear = Button(gui, text='Clear', fg='black', bg='red',
command=clear, height=3, width=7, font="Cascadia")
clear.grid(row=6, column=3)
gui.mainloop() | 35.880342 | 85 | 0.584088 |
ac3f31cd199c28eb13266a4d9a6882aa7b8967ed | 2,561 | py | Python | luxon/core/session/sessionredis.py | HieronymusCrouse/luxon | b0b08c103936adcbb3dd03b1701d44a65de8f61e | [
"BSD-3-Clause"
] | 7 | 2018-02-27T00:18:02.000Z | 2019-05-16T16:57:00.000Z | luxon/core/session/sessionredis.py | HieronymusCrouse/luxon | b0b08c103936adcbb3dd03b1701d44a65de8f61e | [
"BSD-3-Clause"
] | 47 | 2018-01-23T13:49:28.000Z | 2019-06-06T13:14:59.000Z | luxon/core/session/sessionredis.py | HieronymusCrouse/luxon | b0b08c103936adcbb3dd03b1701d44a65de8f61e | [
"BSD-3-Clause"
] | 14 | 2018-01-15T08:47:11.000Z | 2019-12-27T12:05:41.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2018-2020 Christiaan Frans Rademan <chris@fwiw.co.za>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
from luxon.helpers.rd import Redis
class SessionRedis(object):
"""Session Redis Interface.
Used for storing session data in Redis. Helpful when running multiple
instances of luxon which requires a shared session state.
Please refer to Session.
"""
def __init__(self, expire, session_id, session):
self._redis = Redis()
self._expire = expire
self._session = session
self._name = "session:%s" % str(session_id)
def load(self):
with Redis() as redis:
if self._name in redis:
self._session.update(redis.get(self._name))
def save(self):
if len(self._session) > 0:
with Redis() as redis:
redis.set(self._name, self._session, self._expire)
def clear(self):
self._session.clear()
try:
with Redis() as redis:
redis.delete(self._name)
except Exception:
pass
| 40.015625 | 79 | 0.709879 |
f4b4f8d21283f407e08b9b9d07f07e3223bb3fd9 | 774 | py | Python | day21/puzzle1.py | amurciegorico/advent_of_code_2020 | d5ef6647359334e9de1c3082620a05dfb9c133ba | [
"CC0-1.0"
] | null | null | null | day21/puzzle1.py | amurciegorico/advent_of_code_2020 | d5ef6647359334e9de1c3082620a05dfb9c133ba | [
"CC0-1.0"
] | null | null | null | day21/puzzle1.py | amurciegorico/advent_of_code_2020 | d5ef6647359334e9de1c3082620a05dfb9c133ba | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python3
with open("./data/data1") as file:
allergens_ingredients = {}
all_ingredients = []
for line in file.readlines():
split_line = line.split(' (contains ')
ingredients = set(split_line[0].split(' '))
for ingredient in ingredients:
all_ingredients.append(ingredient)
for allergen in split_line[1][:-2].split(', '):
allergens_ingredients[allergen] = allergens_ingredients[allergen].intersection(ingredients) if allergen in allergens_ingredients else ingredients
for ingredients in allergens_ingredients.values():
for ingredient in ingredients:
while ingredient in all_ingredients:
all_ingredients.remove(ingredient)
print(len(all_ingredients))
| 43 | 157 | 0.678295 |
3b2296c128936170076afa139aab495c7a45360c | 83 | py | Python | apps/dashboard/apps.py | EltonARodrigues/NOIRr-Server | e73e6b9de763b073295adb78ff7844f8ed8c832c | [
"BSD-2-Clause"
] | null | null | null | apps/dashboard/apps.py | EltonARodrigues/NOIRr-Server | e73e6b9de763b073295adb78ff7844f8ed8c832c | [
"BSD-2-Clause"
] | 3 | 2019-07-03T20:48:40.000Z | 2021-04-08T20:12:00.000Z | apps/dashboard/apps.py | EltonARodrigues/NOIR-Server | e73e6b9de763b073295adb78ff7844f8ed8c832c | [
"BSD-2-Clause"
] | null | null | null | from django.apps import AppConfig
class NoirConfig(AppConfig):
name = 'noir'
| 13.833333 | 33 | 0.73494 |
7cb1847121ad801e7c35ad92d71df29941c7e4c8 | 917 | py | Python | esp8266/clock/main.py | JiangYangJie/Embedded | 70dba3a1e5c1fb7b9a7d8b633a5fc05138894456 | [
"MIT"
] | 1 | 2019-07-23T07:14:07.000Z | 2019-07-23T07:14:07.000Z | esp8266/clock/main.py | JiangYangJie/Embedded | 70dba3a1e5c1fb7b9a7d8b633a5fc05138894456 | [
"MIT"
] | null | null | null | esp8266/clock/main.py | JiangYangJie/Embedded | 70dba3a1e5c1fb7b9a7d8b633a5fc05138894456 | [
"MIT"
] | 2 | 2019-07-22T11:42:55.000Z | 2019-12-15T01:43:19.000Z | from machine import Pin, I2C
import time
from get_time import TIME
from printf import Printf
from ssd1306 import SSD1306_I2C
from wlan import Network
i2c = I2C(scl=Pin(5), sda=Pin(4))
oled= SSD1306_I2C(128, 64, i2c)#初始oled
Time=TIME()#初始自定义的TIME类
printf=Printf(oled)#初始自定义的输出类
network=Network()#初始自定义的联网类
variable_x=variable_y=0
def variable_():#实现动态效果
global variable_x,variable_y
if variable_x>128:
variable_x=0
else:
variable_x+=10
if variable_y>64:
variable_y = 0
else:
variable_y += 10
while network.is_connected()==False:
printf.clear()
variable_()
printf.en('wating...',variable_x,variable_y)
time.sleep(3)
printf.en('connection successful',1,30)
while True:
year, month, day, hours, minute=Time.Now_time()
printf.clear()
printf.printf(hours+':'+minute,5,0)
printf.en(year + '.' + month+'.'+day, 40, 40)
time.sleep(60)
| 24.131579 | 51 | 0.685932 |
99b334251d6bb1cbd073e9e6b73e050b47f4c2f9 | 33,894 | py | Python | components/policy/tools/generate_policy_source.py | justremotephone/android_external_chromium_org | 246856e61da7acf5494076c74198f2aea894a721 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2020-01-25T10:18:18.000Z | 2021-01-23T15:29:56.000Z | components/policy/tools/generate_policy_source.py | justremotephone/android_external_chromium_org | 246856e61da7acf5494076c74198f2aea894a721 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | components/policy/tools/generate_policy_source.py | justremotephone/android_external_chromium_org | 246856e61da7acf5494076c74198f2aea894a721 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2020-11-04T07:24:13.000Z | 2020-11-04T07:24:13.000Z | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''python %prog [options] platform chromium_os_flag template
platform specifies which platform source is being generated for
and can be one of (win, mac, linux)
chromium_os_flag should be 1 if this is a Chromium OS build
template is the path to a .json policy template file.'''
from __future__ import with_statement
from functools import partial
import json
from optparse import OptionParser
import re
import sys
import textwrap
import types
CHROME_POLICY_KEY = 'SOFTWARE\\\\Policies\\\\Google\\\\Chrome'
CHROMIUM_POLICY_KEY = 'SOFTWARE\\\\Policies\\\\Chromium'
class PolicyDetails:
"""Parses a policy template and caches all its details."""
# Maps policy types to a tuple with 3 other types:
# - the equivalent base::Value::Type or 'TYPE_EXTERNAL' if the policy
# references external data
# - the equivalent Protobuf field type
# - the name of one of the protobufs for shared policy types
# TODO(joaodasilva): refactor the 'dict' type into a more generic 'json' type
# that can also be used to represent lists of other JSON objects.
TYPE_MAP = {
'dict': ('TYPE_DICTIONARY', 'string', 'String'),
'external': ('TYPE_EXTERNAL', 'string', 'String'),
'int': ('TYPE_INTEGER', 'int64', 'Integer'),
'int-enum': ('TYPE_INTEGER', 'int64', 'Integer'),
'list': ('TYPE_LIST', 'StringList', 'StringList'),
'main': ('TYPE_BOOLEAN', 'bool', 'Boolean'),
'string': ('TYPE_STRING', 'string', 'String'),
'string-enum': ('TYPE_STRING', 'string', 'String'),
}
class EnumItem:
def __init__(self, item):
self.caption = PolicyDetails._RemovePlaceholders(item['caption'])
self.value = item['value']
def __init__(self, policy, os, is_chromium_os):
self.id = policy['id']
self.name = policy['name']
self.is_deprecated = policy.get('deprecated', False)
self.is_device_only = policy.get('device_only', False)
self.schema = policy.get('schema', {})
expected_platform = 'chrome_os' if is_chromium_os else os.lower()
self.platforms = []
for platform, version in [ p.split(':') for p in policy['supported_on'] ]:
if not version.endswith('-'):
continue
if platform.startswith('chrome.'):
platform_sub = platform[7:]
if platform_sub == '*':
self.platforms.extend(['win', 'mac', 'linux'])
else:
self.platforms.append(platform_sub)
else:
self.platforms.append(platform)
self.platforms.sort()
self.is_supported = expected_platform in self.platforms
if not PolicyDetails.TYPE_MAP.has_key(policy['type']):
raise NotImplementedError('Unknown policy type for %s: %s' %
(policy['name'], policy['type']))
self.policy_type, self.protobuf_type, self.policy_protobuf_type = \
PolicyDetails.TYPE_MAP[policy['type']]
self.schema = policy['schema']
self.desc = '\n'.join(
map(str.strip,
PolicyDetails._RemovePlaceholders(policy['desc']).splitlines()))
self.caption = PolicyDetails._RemovePlaceholders(policy['caption'])
self.max_size = policy.get('max_size', 0)
items = policy.get('items')
if items is None:
self.items = None
else:
self.items = [ PolicyDetails.EnumItem(entry) for entry in items ]
PH_PATTERN = re.compile('<ph[^>]*>([^<]*|[^<]*<ex>([^<]*)</ex>[^<]*)</ph>')
# Simplistic grit placeholder stripper.
@staticmethod
def _RemovePlaceholders(text):
result = ''
pos = 0
for m in PolicyDetails.PH_PATTERN.finditer(text):
result += text[pos:m.start(0)]
result += m.group(2) or m.group(1)
pos = m.end(0)
result += text[pos:]
return result
def main():
parser = OptionParser(usage=__doc__)
parser.add_option('--pch', '--policy-constants-header', dest='header_path',
help='generate header file of policy constants',
metavar='FILE')
parser.add_option('--pcc', '--policy-constants-source', dest='source_path',
help='generate source file of policy constants',
metavar='FILE')
parser.add_option('--cpp', '--cloud-policy-protobuf',
dest='cloud_policy_proto_path',
help='generate cloud policy protobuf file',
metavar='FILE')
parser.add_option('--csp', '--chrome-settings-protobuf',
dest='chrome_settings_proto_path',
help='generate chrome settings protobuf file',
metavar='FILE')
parser.add_option('--cpd', '--cloud-policy-decoder',
dest='cloud_policy_decoder_path',
help='generate C++ code decoding the cloud policy protobuf',
metavar='FILE')
(opts, args) = parser.parse_args()
if len(args) != 3:
print 'exactly platform, chromium_os flag and input file must be specified.'
parser.print_help()
return 2
os = args[0]
is_chromium_os = args[1] == '1'
template_file_name = args[2]
template_file_contents = _LoadJSONFile(template_file_name)
policy_details = [ PolicyDetails(policy, os, is_chromium_os)
for policy in _Flatten(template_file_contents) ]
sorted_policy_details = sorted(policy_details, key=lambda policy: policy.name)
def GenerateFile(path, writer, sorted=False):
if path:
with open(path, 'w') as f:
_OutputGeneratedWarningHeader(f, template_file_name)
writer(sorted and sorted_policy_details or policy_details, os, f)
GenerateFile(opts.header_path, _WritePolicyConstantHeader, sorted=True)
GenerateFile(opts.source_path, _WritePolicyConstantSource, sorted=True)
GenerateFile(opts.cloud_policy_proto_path, _WriteCloudPolicyProtobuf)
GenerateFile(opts.chrome_settings_proto_path, _WriteChromeSettingsProtobuf)
GenerateFile(opts.cloud_policy_decoder_path, _WriteCloudPolicyDecoder)
return 0
#------------------ shared helpers ---------------------------------#
def _OutputGeneratedWarningHeader(f, template_file_path):
f.write('//\n'
'// DO NOT MODIFY THIS FILE DIRECTLY!\n'
'// IT IS GENERATED BY generate_policy_source.py\n'
'// FROM ' + template_file_path + '\n'
'//\n\n')
COMMENT_WRAPPER = textwrap.TextWrapper()
COMMENT_WRAPPER.width = 80
COMMENT_WRAPPER.initial_indent = '// '
COMMENT_WRAPPER.subsequent_indent = '// '
COMMENT_WRAPPER.replace_whitespace = False
# Writes a comment, each line prefixed by // and wrapped to 80 spaces.
def _OutputComment(f, comment):
for line in comment.splitlines():
if len(line) == 0:
f.write('//')
else:
f.write(COMMENT_WRAPPER.fill(line))
f.write('\n')
# Returns an iterator over all the policies in |template_file_contents|.
def _Flatten(template_file_contents):
for policy in template_file_contents['policy_definitions']:
if policy['type'] == 'group':
for sub_policy in policy['policies']:
yield sub_policy
else:
yield policy
def _LoadJSONFile(json_file):
with open(json_file, 'r') as f:
text = f.read()
return eval(text)
#------------------ policy constants header ------------------------#
def _WritePolicyConstantHeader(policies, os, f):
f.write('#ifndef CHROME_COMMON_POLICY_CONSTANTS_H_\n'
'#define CHROME_COMMON_POLICY_CONSTANTS_H_\n'
'\n'
'#include <string>\n'
'\n'
'#include "base/basictypes.h"\n'
'#include "base/values.h"\n'
'#include "components/policy/core/common/policy_details.h"\n'
'\n'
'namespace policy {\n'
'\n'
'namespace internal {\n'
'struct SchemaData;\n'
'}\n\n')
if os == 'win':
f.write('// The windows registry path where Chrome policy '
'configuration resides.\n'
'extern const wchar_t kRegistryChromePolicyKey[];\n')
f.write('// Returns the PolicyDetails for |policy| if |policy| is a known\n'
'// Chrome policy, otherwise returns NULL.\n'
'const PolicyDetails* GetChromePolicyDetails('
'const std::string& policy);\n'
'\n'
'// Returns the schema data of the Chrome policy schema.\n'
'const internal::SchemaData* GetChromeSchemaData();\n'
'\n')
f.write('// Key names for the policy settings.\n'
'namespace key {\n\n')
for policy in policies:
# TODO(joaodasilva): Include only supported policies in
# configuration_policy_handler.cc and configuration_policy_handler_list.cc
# so that these names can be conditional on 'policy.is_supported'.
# http://crbug.com/223616
f.write('extern const char k' + policy.name + '[];\n')
f.write('\n} // namespace key\n\n'
'} // namespace policy\n\n'
'#endif // CHROME_COMMON_POLICY_CONSTANTS_H_\n')
#------------------ policy constants source ------------------------#
# A mapping of the simple schema types to base::Value::Types.
SIMPLE_SCHEMA_NAME_MAP = {
'boolean': 'TYPE_BOOLEAN',
'integer': 'TYPE_INTEGER',
'null' : 'TYPE_NULL',
'number' : 'TYPE_DOUBLE',
'string' : 'TYPE_STRING',
}
class SchemaNodesGenerator:
"""Builds the internal structs to represent a JSON schema."""
def __init__(self, shared_strings):
"""Creates a new generator.
|shared_strings| is a map of strings to a C expression that evaluates to
that string at runtime. This mapping can be used to reuse existing string
constants."""
self.shared_strings = shared_strings
self.schema_nodes = []
self.property_nodes = []
self.properties_nodes = []
self.restriction_nodes = []
self.int_enums = []
self.string_enums = []
self.simple_types = {
'boolean': None,
'integer': None,
'null': None,
'number': None,
'string': None,
}
self.stringlist_type = None
self.ranges = {}
self.id_map = {}
def GetString(self, s):
if s in self.shared_strings:
return self.shared_strings[s]
# Generate JSON escaped string, which is slightly different from desired
# C/C++ escaped string. Known differences includes unicode escaping format.
return json.dumps(s)
def AppendSchema(self, type, extra, comment=''):
index = len(self.schema_nodes)
self.schema_nodes.append((type, extra, comment))
return index
def AppendRestriction(self, first, second):
r = (str(first), str(second))
if not r in self.ranges:
self.ranges[r] = len(self.restriction_nodes)
self.restriction_nodes.append(r)
return self.ranges[r]
def GetSimpleType(self, name):
if self.simple_types[name] == None:
self.simple_types[name] = self.AppendSchema(
SIMPLE_SCHEMA_NAME_MAP[name],
-1,
'simple type: ' + name)
return self.simple_types[name]
def GetStringList(self):
if self.stringlist_type == None:
self.stringlist_type = self.AppendSchema(
'TYPE_LIST',
self.GetSimpleType('string'),
'simple type: stringlist')
return self.stringlist_type
def SchemaHaveRestriction(self, schema):
return any(keyword in schema for keyword in
['minimum', 'maximum', 'enum', 'pattern'])
def IsConsecutiveInterval(self, seq):
sortedSeq = sorted(seq)
return all(sortedSeq[i] + 1 == sortedSeq[i + 1]
for i in xrange(len(sortedSeq) - 1))
def GetEnumIntegerType(self, schema, name):
assert all(type(x) == int for x in schema['enum'])
possible_values = schema['enum']
if self.IsConsecutiveInterval(possible_values):
index = self.AppendRestriction(max(possible_values), min(possible_values))
return self.AppendSchema('TYPE_INTEGER', index,
'integer with enumeration restriction (use range instead): %s' % name)
offset_begin = len(self.int_enums)
self.int_enums += possible_values
offset_end = len(self.int_enums)
return self.AppendSchema('TYPE_INTEGER',
self.AppendRestriction(offset_begin, offset_end),
'integer with enumeration restriction: %s' % name)
def GetEnumStringType(self, schema, name):
assert all(type(x) == str for x in schema['enum'])
offset_begin = len(self.string_enums)
self.string_enums += schema['enum']
offset_end = len(self.string_enums)
return self.AppendSchema('TYPE_STRING',
self.AppendRestriction(offset_begin, offset_end),
'string with enumeration restriction: %s' % name)
def GetEnumType(self, schema, name):
if len(schema['enum']) == 0:
raise RuntimeError('Empty enumeration in %s' % name)
elif schema['type'] == 'integer':
return self.GetEnumIntegerType(schema, name)
elif schema['type'] == 'string':
return self.GetEnumStringType(schema, name)
else:
raise RuntimeError('Unknown enumeration type in %s' % name)
def GetPatternType(self, schema, name):
if schema['type'] != 'string':
raise RuntimeError('Unknown pattern type in %s' % name)
pattern = schema['pattern']
# Try to compile the pattern to validate it, note that the syntax used
# here might be slightly different from re2.
# TODO(binjin): Add a python wrapper of re2 and use it here.
re.compile(pattern)
index = len(self.string_enums);
self.string_enums.append(pattern);
return self.AppendSchema('TYPE_STRING',
self.AppendRestriction(index, index),
'string with pattern restriction: %s' % name);
def GetRangedType(self, schema, name):
if schema['type'] != 'integer':
raise RuntimeError('Unknown ranged type in %s' % name)
min_value_set, max_value_set = False, False
if 'minimum' in schema:
min_value = int(schema['minimum'])
min_value_set = True
if 'maximum' in schema:
max_value = int(schema['minimum'])
max_value_set = True
if min_value_set and max_value_set and min_value > max_value:
raise RuntimeError('Invalid ranged type in %s' % name)
index = self.AppendRestriction(
str(max_value) if max_value_set else 'INT_MAX',
str(min_value) if min_value_set else 'INT_MIN')
return self.AppendSchema('TYPE_INTEGER',
index,
'integer with ranged restriction: %s' % name)
def Generate(self, schema, name):
"""Generates the structs for the given schema.
|schema|: a valid JSON schema in a dictionary.
|name|: the name of the current node, for the generated comments."""
if schema.has_key('$ref'):
if schema.has_key('id'):
raise RuntimeError("Schemas with a $ref can't have an id")
if not isinstance(schema['$ref'], types.StringTypes):
raise RuntimeError("$ref attribute must be a string")
return schema['$ref']
if schema['type'] in self.simple_types:
if not self.SchemaHaveRestriction(schema):
# Simple types use shared nodes.
return self.GetSimpleType(schema['type'])
elif 'enum' in schema:
return self.GetEnumType(schema, name)
elif 'pattern' in schema:
return self.GetPatternType(schema, name)
else:
return self.GetRangedType(schema, name)
if schema['type'] == 'array':
# Special case for lists of strings, which is a common policy type.
# The 'type' may be missing if the schema has a '$ref' attribute.
if schema['items'].get('type', '') == 'string':
return self.GetStringList()
return self.AppendSchema('TYPE_LIST',
self.GenerateAndCollectID(schema['items'], 'items of ' + name))
elif schema['type'] == 'object':
# Reserve an index first, so that dictionaries come before their
# properties. This makes sure that the root node is the first in the
# SchemaNodes array.
index = self.AppendSchema('TYPE_DICTIONARY', -1)
if 'additionalProperties' in schema:
additionalProperties = self.GenerateAndCollectID(
schema['additionalProperties'],
'additionalProperties of ' + name)
else:
additionalProperties = -1
# Properties must be sorted by name, for the binary search lookup.
# Note that |properties| must be evaluated immediately, so that all the
# recursive calls to Generate() append the necessary child nodes; if
# |properties| were a generator then this wouldn't work.
sorted_properties = sorted(schema.get('properties', {}).items())
properties = [
(self.GetString(key), self.GenerateAndCollectID(subschema, key))
for key, subschema in sorted_properties ]
pattern_properties = []
for pattern, subschema in schema.get('patternProperties', {}).items():
pattern_properties.append((self.GetString(pattern),
self.GenerateAndCollectID(subschema, pattern)));
begin = len(self.property_nodes)
self.property_nodes += properties
end = len(self.property_nodes)
self.property_nodes += pattern_properties
pattern_end = len(self.property_nodes)
if index == 0:
self.root_properties_begin = begin
self.root_properties_end = end
extra = len(self.properties_nodes)
self.properties_nodes.append((begin, end, pattern_end,
additionalProperties, name))
# Set the right data at |index| now.
self.schema_nodes[index] = ('TYPE_DICTIONARY', extra, name)
return index
else:
assert False
def GenerateAndCollectID(self, schema, name):
"""A wrapper of Generate(), will take the return value, check and add 'id'
attribute to self.id_map. The wrapper needs to be used for every call to
Generate().
"""
index = self.Generate(schema, name)
if not schema.has_key('id'):
return index
id_str = schema['id']
if self.id_map.has_key(id_str):
raise RuntimeError('Duplicated id: ' + id_str)
self.id_map[id_str] = index
return index
def Write(self, f):
"""Writes the generated structs to the given file.
|f| an open file to write to."""
f.write('const internal::SchemaNode kSchemas[] = {\n'
'// Type Extra\n')
for type, extra, comment in self.schema_nodes:
type += ','
f.write(' { base::Value::%-18s %3d }, // %s\n' % (type, extra, comment))
f.write('};\n\n')
if self.property_nodes:
f.write('const internal::PropertyNode kPropertyNodes[] = {\n'
'// Property #Schema\n')
for key, schema in self.property_nodes:
key += ','
f.write(' { %-50s %6d },\n' % (key, schema))
f.write('};\n\n')
if self.properties_nodes:
f.write('const internal::PropertiesNode kProperties[] = {\n'
'// Begin End PatternEnd Additional Properties\n')
for node in self.properties_nodes:
f.write(' { %5d, %5d, %10d, %5d }, // %s\n' % node)
f.write('};\n\n')
if self.restriction_nodes:
f.write('const internal::RestrictionNode kRestrictionNodes[] = {\n')
f.write('// FIRST, SECOND\n')
for first, second in self.restriction_nodes:
f.write(' {{ %-8s %4s}},\n' % (first + ',', second))
f.write('};\n\n')
if self.int_enums:
f.write('const int kIntegerEnumerations[] = {\n')
for possible_values in self.int_enums:
f.write(' %d,\n' % possible_values)
f.write('};\n\n')
if self.string_enums:
f.write('const char* kStringEnumerations[] = {\n')
for possible_values in self.string_enums:
f.write(' %s,\n' % self.GetString(possible_values))
f.write('};\n\n')
f.write('const internal::SchemaData kChromeSchemaData = {\n'
' kSchemas,\n')
f.write(' kPropertyNodes,\n' if self.property_nodes else ' NULL,\n')
f.write(' kProperties,\n' if self.properties_nodes else ' NULL,\n')
f.write(' kRestrictionNodes,\n' if self.restriction_nodes else ' NULL,\n')
f.write(' kIntegerEnumerations,\n' if self.int_enums else ' NULL,\n')
f.write(' kStringEnumerations,\n' if self.string_enums else ' NULL,\n')
f.write('};\n\n')
def GetByID(self, id_str):
if not isinstance(id_str, types.StringTypes):
return id_str
if not self.id_map.has_key(id_str):
raise RuntimeError('Invalid $ref: ' + id_str)
return self.id_map[id_str]
def ResolveID(self, index, params):
return params[:index] + (self.GetByID(params[index]),) + params[index+1:]
def ResolveReferences(self):
"""Resolve reference mapping, required to be called after Generate()
After calling Generate(), the type of indices used in schema structures
might be either int or string. An int type suggests that it's a resolved
index, but for string type it's unresolved. Resolving a reference is as
simple as looking up for corresponding ID in self.id_map, and replace the
old index with the mapped index.
"""
self.schema_nodes = map(partial(self.ResolveID, 1), self.schema_nodes)
self.property_nodes = map(partial(self.ResolveID, 1), self.property_nodes)
self.properties_nodes = map(partial(self.ResolveID, 3),
self.properties_nodes)
def _WritePolicyConstantSource(policies, os, f):
f.write('#include "policy/policy_constants.h"\n'
'\n'
'#include <algorithm>\n'
'#include <climits>\n'
'\n'
'#include "base/logging.h"\n'
'#include "components/policy/core/common/schema_internal.h"\n'
'\n'
'namespace policy {\n'
'\n'
'namespace {\n'
'\n')
# Generate the Chrome schema.
chrome_schema = {
'type': 'object',
'properties': {},
}
shared_strings = {}
for policy in policies:
shared_strings[policy.name] = "key::k%s" % policy.name
if policy.is_supported:
chrome_schema['properties'][policy.name] = policy.schema
# Note: this list must be kept in sync with the known property list of the
# Chrome schema, so that binary seaching in the PropertyNode array gets the
# right index on this array as well. See the implementation of
# GetChromePolicyDetails() below.
f.write('const PolicyDetails kChromePolicyDetails[] = {\n'
'// is_deprecated is_device_policy id max_external_data_size\n')
for policy in policies:
if policy.is_supported:
f.write(' { %-14s %-16s %3s, %24s },\n' % (
'true,' if policy.is_deprecated else 'false,',
'true,' if policy.is_device_only else 'false,',
policy.id,
policy.max_size))
f.write('};\n\n')
schema_generator = SchemaNodesGenerator(shared_strings)
schema_generator.GenerateAndCollectID(chrome_schema, 'root node')
schema_generator.ResolveReferences()
schema_generator.Write(f)
f.write('bool CompareKeys(const internal::PropertyNode& node,\n'
' const std::string& key) {\n'
' return node.key < key;\n'
'}\n\n')
f.write('} // namespace\n\n')
if os == 'win':
f.write('#if defined(GOOGLE_CHROME_BUILD)\n'
'const wchar_t kRegistryChromePolicyKey[] = '
'L"' + CHROME_POLICY_KEY + '";\n'
'#else\n'
'const wchar_t kRegistryChromePolicyKey[] = '
'L"' + CHROMIUM_POLICY_KEY + '";\n'
'#endif\n\n')
f.write('const internal::SchemaData* GetChromeSchemaData() {\n'
' return &kChromeSchemaData;\n'
'}\n\n')
f.write('const PolicyDetails* GetChromePolicyDetails('
'const std::string& policy) {\n'
' // First index in kPropertyNodes of the Chrome policies.\n'
' static const int begin_index = %s;\n'
' // One-past-the-end of the Chrome policies in kPropertyNodes.\n'
' static const int end_index = %s;\n' %
(schema_generator.root_properties_begin,
schema_generator.root_properties_end))
f.write(' const internal::PropertyNode* begin =\n'
' kPropertyNodes + begin_index;\n'
' const internal::PropertyNode* end = kPropertyNodes + end_index;\n'
' const internal::PropertyNode* it =\n'
' std::lower_bound(begin, end, policy, CompareKeys);\n'
' if (it == end || it->key != policy)\n'
' return NULL;\n'
' // This relies on kPropertyNodes from begin_index to end_index\n'
' // having exactly the same policies (and in the same order) as\n'
' // kChromePolicyDetails, so that binary searching on the first\n'
' // gets the same results as a binary search on the second would.\n'
' // However, kPropertyNodes has the policy names and\n'
' // kChromePolicyDetails doesn\'t, so we obtain the index into\n'
' // the second array by searching the first to avoid duplicating\n'
' // the policy name pointers.\n'
' // Offsetting |it| from |begin| here obtains the index we\'re\n'
' // looking for.\n'
' size_t index = it - begin;\n'
' CHECK_LT(index, arraysize(kChromePolicyDetails));\n'
' return kChromePolicyDetails + index;\n'
'}\n\n')
f.write('namespace key {\n\n')
for policy in policies:
# TODO(joaodasilva): Include only supported policies in
# configuration_policy_handler.cc and configuration_policy_handler_list.cc
# so that these names can be conditional on 'policy.is_supported'.
# http://crbug.com/223616
f.write('const char k{name}[] = "{name}";\n'.format(name=policy.name))
f.write('\n} // namespace key\n\n'
'} // namespace policy\n')
#------------------ policy protobufs --------------------------------#
CHROME_SETTINGS_PROTO_HEAD = '''
syntax = "proto2";
option optimize_for = LITE_RUNTIME;
package enterprise_management;
// For StringList and PolicyOptions.
import "cloud_policy.proto";
'''
CLOUD_POLICY_PROTO_HEAD = '''
syntax = "proto2";
option optimize_for = LITE_RUNTIME;
package enterprise_management;
message StringList {
repeated string entries = 1;
}
message PolicyOptions {
enum PolicyMode {
// The given settings are applied regardless of user choice.
MANDATORY = 0;
// The user may choose to override the given settings.
RECOMMENDED = 1;
// No policy value is present and the policy should be ignored.
UNSET = 2;
}
optional PolicyMode mode = 1 [default = MANDATORY];
}
message BooleanPolicyProto {
optional PolicyOptions policy_options = 1;
optional bool value = 2;
}
message IntegerPolicyProto {
optional PolicyOptions policy_options = 1;
optional int64 value = 2;
}
message StringPolicyProto {
optional PolicyOptions policy_options = 1;
optional string value = 2;
}
message StringListPolicyProto {
optional PolicyOptions policy_options = 1;
optional StringList value = 2;
}
'''
# Field IDs [1..RESERVED_IDS] will not be used in the wrapping protobuf.
RESERVED_IDS = 2
def _WritePolicyProto(f, policy, fields):
_OutputComment(f, policy.caption + '\n\n' + policy.desc)
if policy.items is not None:
_OutputComment(f, '\nValid values:')
for item in policy.items:
_OutputComment(f, ' %s: %s' % (str(item.value), item.caption))
if policy.policy_type == 'TYPE_DICTIONARY':
_OutputComment(f, '\nValue schema:\n%s' %
json.dumps(policy.schema, sort_keys=True, indent=4,
separators=(',', ': ')))
_OutputComment(f, '\nSupported on: %s' % ', '.join(policy.platforms))
f.write('message %sProto {\n' % policy.name)
f.write(' optional PolicyOptions policy_options = 1;\n')
f.write(' optional %s %s = 2;\n' % (policy.protobuf_type, policy.name))
f.write('}\n\n')
fields += [ ' optional %sProto %s = %s;\n' %
(policy.name, policy.name, policy.id + RESERVED_IDS) ]
def _WriteChromeSettingsProtobuf(policies, os, f):
f.write(CHROME_SETTINGS_PROTO_HEAD)
fields = []
f.write('// PBs for individual settings.\n\n')
for policy in policies:
# Note: this protobuf also gets the unsupported policies, since it's an
# exaustive list of all the supported user policies on any platform.
if not policy.is_device_only:
_WritePolicyProto(f, policy, fields)
f.write('// --------------------------------------------------\n'
'// Big wrapper PB containing the above groups.\n\n'
'message ChromeSettingsProto {\n')
f.write(''.join(fields))
f.write('}\n\n')
def _WriteCloudPolicyProtobuf(policies, os, f):
f.write(CLOUD_POLICY_PROTO_HEAD)
f.write('message CloudPolicySettings {\n')
for policy in policies:
if policy.is_supported and not policy.is_device_only:
f.write(' optional %sPolicyProto %s = %s;\n' %
(policy.policy_protobuf_type, policy.name,
policy.id + RESERVED_IDS))
f.write('}\n\n')
#------------------ protobuf decoder -------------------------------#
CPP_HEAD = '''
#include <limits>
#include <string>
#include "base/basictypes.h"
#include "base/callback.h"
#include "base/json/json_reader.h"
#include "base/logging.h"
#include "base/memory/scoped_ptr.h"
#include "base/memory/weak_ptr.h"
#include "base/values.h"
#include "components/policy/core/common/cloud/cloud_external_data_manager.h"
#include "components/policy/core/common/external_data_fetcher.h"
#include "components/policy/core/common/policy_map.h"
#include "policy/policy_constants.h"
#include "policy/proto/cloud_policy.pb.h"
using google::protobuf::RepeatedPtrField;
namespace policy {
namespace em = enterprise_management;
base::Value* DecodeIntegerValue(google::protobuf::int64 value) {
if (value < std::numeric_limits<int>::min() ||
value > std::numeric_limits<int>::max()) {
LOG(WARNING) << "Integer value " << value
<< " out of numeric limits, ignoring.";
return NULL;
}
return base::Value::CreateIntegerValue(static_cast<int>(value));
}
base::ListValue* DecodeStringList(const em::StringList& string_list) {
base::ListValue* list_value = new base::ListValue;
RepeatedPtrField<std::string>::const_iterator entry;
for (entry = string_list.entries().begin();
entry != string_list.entries().end(); ++entry) {
list_value->Append(base::Value::CreateStringValue(*entry));
}
return list_value;
}
base::Value* DecodeJson(const std::string& json) {
scoped_ptr<base::Value> root(
base::JSONReader::Read(json, base::JSON_ALLOW_TRAILING_COMMAS));
if (!root)
LOG(WARNING) << "Invalid JSON string, ignoring: " << json;
// Accept any Value type that parsed as JSON, and leave it to the handler to
// convert and check the concrete type.
return root.release();
}
void DecodePolicy(const em::CloudPolicySettings& policy,
base::WeakPtr<CloudExternalDataManager> external_data_manager,
PolicyMap* map) {
'''
CPP_FOOT = '''}
} // namespace policy
'''
def _CreateValue(type, arg):
if type == 'TYPE_BOOLEAN':
return 'base::Value::CreateBooleanValue(%s)' % arg
elif type == 'TYPE_INTEGER':
return 'DecodeIntegerValue(%s)' % arg
elif type == 'TYPE_STRING':
return 'base::Value::CreateStringValue(%s)' % arg
elif type == 'TYPE_LIST':
return 'DecodeStringList(%s)' % arg
elif type == 'TYPE_DICTIONARY' or type == 'TYPE_EXTERNAL':
return 'DecodeJson(%s)' % arg
else:
raise NotImplementedError('Unknown type %s' % type)
def _CreateExternalDataFetcher(type, name):
if type == 'TYPE_EXTERNAL':
return 'new ExternalDataFetcher(external_data_manager, key::k%s)' % name
return 'NULL'
def _WritePolicyCode(f, policy):
membername = policy.name.lower()
proto_type = '%sPolicyProto' % policy.policy_protobuf_type
f.write(' if (policy.has_%s()) {\n' % membername)
f.write(' const em::%s& policy_proto = policy.%s();\n' %
(proto_type, membername))
f.write(' if (policy_proto.has_value()) {\n')
f.write(' PolicyLevel level = POLICY_LEVEL_MANDATORY;\n'
' bool do_set = true;\n'
' if (policy_proto.has_policy_options()) {\n'
' do_set = false;\n'
' switch(policy_proto.policy_options().mode()) {\n'
' case em::PolicyOptions::MANDATORY:\n'
' do_set = true;\n'
' level = POLICY_LEVEL_MANDATORY;\n'
' break;\n'
' case em::PolicyOptions::RECOMMENDED:\n'
' do_set = true;\n'
' level = POLICY_LEVEL_RECOMMENDED;\n'
' break;\n'
' case em::PolicyOptions::UNSET:\n'
' break;\n'
' }\n'
' }\n'
' if (do_set) {\n')
f.write(' base::Value* value = %s;\n' %
(_CreateValue(policy.policy_type, 'policy_proto.value()')))
# TODO(bartfab): |value| == NULL indicates that the policy value could not be
# parsed successfully. Surface such errors in the UI.
f.write(' if (value) {\n')
f.write(' ExternalDataFetcher* external_data_fetcher = %s;\n' %
_CreateExternalDataFetcher(policy.policy_type, policy.name))
f.write(' map->Set(key::k%s, level, POLICY_SCOPE_USER,\n' %
policy.name)
f.write(' value, external_data_fetcher);\n'
' }\n'
' }\n'
' }\n'
' }\n')
def _WriteCloudPolicyDecoder(policies, os, f):
f.write(CPP_HEAD)
for policy in policies:
if policy.is_supported and not policy.is_device_only:
_WritePolicyCode(f, policy)
f.write(CPP_FOOT)
if __name__ == '__main__':
sys.exit(main())
| 36.523707 | 80 | 0.634508 |
277ef75b0b8b20f5650fa66a778b872d5b71e013 | 6,567 | py | Python | setup.py | kiranvizru/psutil | 3b0a3419d93f0094abef2bc61315974958906001 | [
"BSD-3-Clause"
] | 2 | 2016-09-19T05:25:09.000Z | 2016-11-24T10:17:51.000Z | setup.py | grrrrrrrrr/psutil | 930d22ba69bb3e1e97f3a5dcb6916f520e97469e | [
"BSD-3-Clause"
] | null | null | null | setup.py | grrrrrrrrr/psutil | 930d22ba69bb3e1e97f3a5dcb6916f520e97469e | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2009 Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""psutil is a cross-platform library for retrieving information on
running processes and system utilization (CPU, memory, disks, network)
in Python.
"""
import os
import sys
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
HERE = os.path.abspath(os.path.dirname(__file__))
def get_version():
INIT = os.path.join(HERE, 'psutil/__init__.py')
f = open(INIT, 'r')
try:
for line in f:
if line.startswith('__version__'):
ret = eval(line.strip().split(' = ')[1])
assert ret.count('.') == 2, ret
for num in ret.split('.'):
assert num.isdigit(), ret
return ret
else:
raise ValueError("couldn't find version string")
finally:
f.close()
def get_description():
README = os.path.join(HERE, 'README.rst')
f = open(README, 'r')
try:
return f.read()
finally:
f.close()
# POSIX
if os.name == 'posix':
posix_extension = Extension(
'_psutil_posix',
sources=['psutil/_psutil_posix.c'],
)
# Windows
if sys.platform.startswith("win32"):
def get_winver():
maj, min = sys.getwindowsversion()[0:2]
return '0x0%s' % ((maj * 100) + min)
extensions = [Extension(
'_psutil_windows',
sources=[
'psutil/_psutil_windows.c',
'psutil/_psutil_common.c',
'psutil/arch/windows/process_info.c',
'psutil/arch/windows/process_handles.c',
'psutil/arch/windows/security.c',
],
define_macros=[
# be nice to mingw, see:
# http://www.mingw.org/wiki/Use_more_recent_defined_functions
('_WIN32_WINNT', get_winver()),
('_AVAIL_WINVER_', get_winver()),
# see: https://github.com/giampaolo/psutil/issues/348
('PSAPI_VERSION', 1),
],
libraries=[
"psapi", "kernel32", "advapi32", "shell32", "netapi32", "iphlpapi",
"wtsapi32",
],
# extra_compile_args=["/Z7"],
# extra_link_args=["/DEBUG"]
)]
# OS X
elif sys.platform.startswith("darwin"):
extensions = [Extension(
'_psutil_osx',
sources=[
'psutil/_psutil_osx.c',
'psutil/_psutil_common.c',
'psutil/arch/osx/process_info.c'
],
extra_link_args=[
'-framework', 'CoreFoundation', '-framework', 'IOKit'
],
),
posix_extension,
]
# FreeBSD
elif sys.platform.startswith("freebsd"):
extensions = [Extension(
'_psutil_bsd',
sources=[
'psutil/_psutil_bsd.c',
'psutil/_psutil_common.c',
'psutil/arch/bsd/process_info.c'
],
libraries=["devstat"]),
posix_extension,
]
# Linux
elif sys.platform.startswith("linux"):
extensions = [Extension(
'_psutil_linux',
sources=['psutil/_psutil_linux.c']),
posix_extension,
]
# Solaris
elif sys.platform.lower().startswith('sunos'):
extensions = [Extension(
'_psutil_sunos',
sources=['psutil/_psutil_sunos.c'],
libraries=['kstat', 'nsl'],),
posix_extension,
]
else:
sys.exit('platform %s is not supported' % sys.platform)
def main():
setup_args = dict(
name='psutil',
version=get_version(),
description=__doc__.replace('\n', '').strip(),
long_description=get_description(),
keywords=[
'ps', 'top', 'kill', 'free', 'lsof', 'netstat', 'nice',
'tty', 'ionice', 'uptime', 'taskmgr', 'process', 'df',
'iotop', 'iostat', 'ifconfig', 'taskset', 'who', 'pidof',
'pmap', 'smem', 'monitoring', 'ulimit', 'prlimit',
],
author='Giampaolo Rodola',
author_email='g.rodola <at> gmail <dot> com',
url='https://github.com/giampaolo/psutil',
platforms='Platform Independent',
license='BSD',
packages=['psutil'],
# see: python setup.py register --list-classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Win32 (MS Windows)',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows :: Windows NT/2000',
'Operating System :: Microsoft',
'Operating System :: OS Independent',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: SunOS/Solaris',
'Operating System :: POSIX',
'Programming Language :: C',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.4',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Benchmark',
'Topic :: System :: Hardware',
'Topic :: System :: Monitoring',
'Topic :: System :: Networking :: Monitoring',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
)
if extensions is not None:
setup_args["ext_modules"] = extensions
setup(**setup_args)
if __name__ == '__main__':
main()
| 33 | 79 | 0.554439 |
efa60157a887e1f7e16711ed830f4fa4e99e6a6f | 2,749 | py | Python | src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_cosmosdb/models/mongo_db_collection_create_update_parameters_py3.py | limingu/azure-cli-extensions | 1bc29f089f4da42ab8905e440f2f46d6b5b0aa97 | [
"MIT"
] | 2 | 2021-06-05T17:51:26.000Z | 2021-11-17T11:17:56.000Z | src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_cosmosdb/models/mongo_db_collection_create_update_parameters_py3.py | limingu/azure-cli-extensions | 1bc29f089f4da42ab8905e440f2f46d6b5b0aa97 | [
"MIT"
] | 1 | 2020-06-12T01:39:40.000Z | 2020-06-12T01:39:40.000Z | src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_cosmosdb/models/mongo_db_collection_create_update_parameters_py3.py | anpaz-msft/azure-cli-extensions | 847fd487fe61e83f2a4163a9393edc9555267bc2 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .arm_resource_properties_py3 import ARMResourceProperties
class MongoDBCollectionCreateUpdateParameters(ARMResourceProperties):
"""Parameters to create and update Cosmos DB MongoDB collection.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: The unique resource identifier of the ARM resource.
:vartype id: str
:ivar name: The name of the ARM resource.
:vartype name: str
:ivar type: The type of Azure resource.
:vartype type: str
:param location: The location of the resource group to which the resource
belongs.
:type location: str
:param tags:
:type tags: dict[str, str]
:param identity:
:type identity: ~azure.mgmt.cosmosdb.models.ManagedServiceIdentity
:param resource: Required. The standard JSON format of a MongoDB
collection
:type resource: ~azure.mgmt.cosmosdb.models.MongoDBCollectionResource
:param options: Required. A key-value pair of options to be applied for
the request. This corresponds to the headers sent with the request.
:type options: ~azure.mgmt.cosmosdb.models.CreateUpdateOptions
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource': {'required': True},
'options': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'ManagedServiceIdentity'},
'resource': {'key': 'properties.resource', 'type': 'MongoDBCollectionResource'},
'options': {'key': 'properties.options', 'type': 'CreateUpdateOptions'},
}
def __init__(self, *, resource, options, location: str=None, tags=None, identity=None, **kwargs) -> None:
super(MongoDBCollectionCreateUpdateParameters, self).__init__(location=location, tags=tags, identity=identity, **kwargs)
self.resource = resource
self.options = options
| 41.029851 | 128 | 0.631139 |
3ae62a9f06573318e341ba8412ebb82c34c4a9ca | 42,065 | py | Python | xframes/prettytable.py | cchayden/xframes | 1656cc69c814bda8132362b3a22f7cdf8a24637f | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | xframes/prettytable.py | cchayden/xframes | 1656cc69c814bda8132362b3a22f7cdf8a24637f | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | xframes/prettytable.py | cchayden/xframes | 1656cc69c814bda8132362b3a22f7cdf8a24637f | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2009, Luke Maurits <luke@maurits.id.au>
# All rights reserved.
# With contributions from:
# * Chris Clark
# * Klein Stephane
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__version__ = "TRUNK"
import copy
import csv
import random
import textwrap
import itertools
import unicodedata
itermap = itertools.imap
iterzip = itertools.izip
uni_chr = unichr
from cgi import escape
# hrule styles
FRAME = 0
ALL = 1
NONE = 2
# Table styles
DEFAULT = 10
MSWORD_FRIENDLY = 11
PLAIN_COLUMNS = 12
RANDOM = 20
def _get_size(text):
lines = text.split('\n')
height = len(lines)
width = max([_str_block_width(line) for line in lines])
return width, height
class PrettyTable(object):
def __init__(self, field_names=None, **kwargs):
"""
Return a new PrettyTable instance
Arguments:
encoding - Unicode encoding scheme used to decode any encoded input
field_names - list or tuple of field names
fields - list or tuple of field names to include in displays
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
header_style - stylisation to apply to field names in header ('cap', 'title', 'upper', 'lower' or None)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
vertical_char - single character string used to draw vertical lines
horizontal_char - single character string used to draw horizontal lines
junction_char - single character string used to draw line junctions
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
reversesort - True or False to sort in descending or ascending order"""
if 'encoding' in kwargs:
self.encoding = kwargs['encoding']
else:
self.encoding = 'UTF-8'
# Data
self._field_names = []
self._align = {}
self._max_width = {}
self._rows = []
if field_names:
self.field_names = field_names
else:
self._widths = []
self._rows = []
# Options
self._options = 'start end fields header border sortby reversesort sort_key attributes format hrules'.split()
self._options.extend('int_format float_format padding_width left_padding_width right_padding_width'.split())
self._options.extend('vertical_char horizontal_char junction_char header_style'.split())
for option in self._options:
if option in kwargs:
self._validate_option(option, kwargs[option])
else:
kwargs[option] = None
self._start = kwargs['start'] or 0
self._end = kwargs['end'] or None
self._fields = kwargs['fields'] or None
self._header = kwargs['header'] or True
self._header_style = kwargs['header_style'] or None
self._border = kwargs['border'] or True
self._hrules = kwargs['hrules'] or FRAME
self._sortby = kwargs['sortby'] or None
self._reversesort = kwargs['reversesort'] or False
self._sort_key = kwargs['sort_key'] or (lambda x: x)
self._int_format = kwargs['int_format'] or {}
self._float_format = kwargs['float_format'] or {}
self._padding_width = kwargs['padding_width'] or 1
self._left_padding_width = kwargs['left_padding_width'] or None
self._right_padding_width = kwargs['right_padding_width'] or None
self._vertical_char = kwargs['vertical_char'] or self._unicode('|')
self._horizontal_char = kwargs['horizontal_char'] or self._unicode('-')
self._junction_char = kwargs['junction_char'] or self._unicode('+')
self._format = kwargs['format'] or False
self._attributes = kwargs['attributes'] or {}
self._hrule = None
def _unicode(self, value):
if not isinstance(value, basestring):
value = str(value)
if not isinstance(value, unicode):
value = unicode(value, self.encoding, 'strict')
return value
def _justify(self, text, width, align):
excess = width - _str_block_width(text)
if align == 'l':
return text + excess * ' '
elif align == 'r':
return excess * ' ' + text
else:
if excess % 2:
# Uneven padding
# Put more space on right if text is of odd length...
if _str_block_width(text) % 2:
return (excess // 2) * ' ' + text + (excess // 2 + 1) * ' '
# and more space on left if text is of even length
else:
return (excess // 2 + 1) * ' ' + text + (excess // 2) * ' '
# Why distribute extra space this way? To match the behaviour of
# the inbuilt str.center() method.
else:
# Equal padding on either side
return (excess // 2) * ' ' + text + (excess // 2) * ' '
def __getattr__(self, name):
if name == 'rowcount':
return len(self._rows)
elif name == 'colcount':
if self._field_names:
return len(self._field_names)
elif self._rows:
return len(self._rows[0])
else:
return 0
else:
raise AttributeError(name)
def __getitem__(self, index):
newtable = copy.deepcopy(self)
if isinstance(index, slice):
newtable._rows = self._rows[index]
elif isinstance(index, int):
newtable._rows = [self._rows[index], ]
else:
raise Exception('Index %s is invalid, must be an integer or slice' % str(index))
return newtable
def __str__(self):
return self.__unicode__().encode(self.encoding)
def __unicode__(self):
return self.get_string()
##############################
# ATTRIBUTE VALIDATORS #
##############################
# The method _validate_option is all that should be used elsewhere in the code base to validate options.
# It will call the appropriate validation method for that option. The individual validation methods should
# never need to be called directly (although nothing bad will happen if they *are*).
# Validation happens in TWO places.
# Firstly, in the property setters defined in the ATTRIBUTE MANAGMENT section.
# Secondly, in the _get_options method, where keyword arguments are mixed with persistent settings
def _validate_option(self, option, val):
if option == 'field_names':
self._validate_field_names(val)
elif option in ('start', 'end', 'max_width', 'padding_width', 'left_padding_width',
'right_padding_width', 'format'):
self._validate_nonnegative_int(option, val)
elif option == 'sortby':
self._validate_field_name(option, val)
elif option == 'sort_key':
self._validate_function(option, val)
elif option == 'hrules':
self._validate_hrules(option, val)
elif option == 'fields':
self._validate_all_field_names(option, val)
elif option in ('header', 'border', 'reversesort'):
self._validate_true_or_false(option, val)
elif option == 'header_style':
self._validate_header_style(val)
elif option in ('vertical_char', 'horizontal_char', 'junction_char'):
self._validate_single_char(option, val)
elif option == 'attributes':
self._validate_attributes(option, val)
else:
raise Exception('Unrecognised option: %s!' % option)
def _validate_field_names(self, val):
# Check for appropriate length
if self._field_names:
try:
assert len(val) == len(self._field_names)
except AssertionError:
raise Exception('Field name list has incorrect number of values, (actual) %d!=%d (expected)' %
(len(val), len(self._field_names)))
if self._rows:
try:
assert len(val) == len(self._rows[0])
except AssertionError:
raise Exception('Field name list has incorrect number of values, (actual) %d!=%d (expected)' %
(len(val), len(self._rows[0])))
# Check for uniqueness
try:
assert len(val) == len(set(val))
except AssertionError:
raise Exception('Field names must be unique!')
def _validate_header_style(self, val):
try:
assert val in ('cap', 'title', 'upper', 'lower', None)
except AssertionError:
raise Exception('Invalid header style, use cap, title, upper, lower or None!')
def _validate_align(self, val):
try:
assert val in ['l', 'c', 'r']
except AssertionError:
raise Exception('Alignment %s is invalid, use l, c or r!' % val)
def _validate_nonnegative_int(self, name, val):
try:
assert int(val) >= 0
except AssertionError:
raise Exception('Invalid value for %s: %s!' % (name, self._unicode(val)))
def _validate_true_or_false(self, name, val):
try:
assert val in (True, False)
except AssertionError:
raise Exception('Invalid value for %s! Must be True or False.' % name)
def _validate_int_format(self, name, val):
if val == '':
return
try:
assert type(val) in (str, unicode)
assert val.isdigit()
except AssertionError:
raise Exception('Invalid value for %s! Must be an integer format string.' % name)
def _validate_float_format(self, name, val):
if val == '':
return
try:
assert type(val) in (str, unicode)
assert '.' in val
bits = val.split('.')
assert len(bits) <= 2
assert bits[0] == '' or bits[0].isdigit()
assert bits[1] == '' or bits[1].isdigit()
except AssertionError:
raise Exception('Invalid value for %s! Must be a float format string.' % name)
def _validate_function(self, name, val):
try:
assert hasattr(val, '__call__')
except AssertionError:
raise Exception('Invalid value for %s! Must be a function.' % name)
def _validate_hrules(self, name, val):
try:
assert val in (ALL, FRAME, NONE)
except AssertionError:
raise Exception('Invalid value for %s! Must be ALL, FRAME or NONE.' % name)
def _validate_field_name(self, name, val):
try:
assert val in self._field_names
except AssertionError:
raise Exception('Invalid field name: %s!' % val)
def _validate_all_field_names(self, name, val):
try:
for x in val:
self._validate_field_name(name, x)
except AssertionError:
raise Exception('fields must be a sequence of field names!')
def _validate_single_char(self, name, val):
try:
assert _str_block_width(val) == 1
except AssertionError:
raise Exception('Invalid value for %s! Must be a string of length 1.' % name)
def _validate_attributes(self, name, val):
try:
assert isinstance(val, dict)
except AssertionError:
raise Exception('attributes must be a dictionary of name/value pairs!')
##############################
# ATTRIBUTE MANAGEMENT #
##############################
def _get_field_names(self):
"""The names of the fields."""
return self._field_names
def _set_field_names(self, val):
val = [self._unicode(x) for x in val]
self._validate_option('field_names', val)
old_names = self._field_names[:] if self._field_names else None
self._field_names = val
if self._align and old_names:
for old_name, new_name in zip(old_names, val):
self._align[new_name] = self._align[old_name]
for old_name in old_names:
self._align.pop(old_name)
else:
for field in self._field_names:
self._align[field] = 'c'
field_names = property(_get_field_names, _set_field_names)
def _get_align(self):
return self._align
def _set_align(self, val):
self._validate_align(val)
for field in self._field_names:
self._align[field] = val
align = property(_get_align, _set_align)
def _get_max_width(self):
return self._max_width
def _set_max_width(self, val):
self._validate_option('max_width', val)
for field in self._field_names:
self._max_width[field] = val
max_width = property(_get_max_width, _set_max_width)
def _get_start(self):
"""Start index of the range of rows to print"""
return self._start
def _set_start(self, val):
self._validate_option('start', val)
self._start = val
start = property(_get_start, _set_start)
def _get_end(self):
"""End index of the range of rows to print"""
return self._end
def _set_end(self, val):
self._validate_option('end', val)
self._end = val
end = property(_get_end, _set_end)
def _get_sortby(self):
"""Name of field by which to sort rows"""
return self._sortby
def _set_sortby(self, val):
self._validate_option('sortby', val)
self._sortby = val
sortby = property(_get_sortby, _set_sortby)
def _get_reversesort(self):
"""Controls direction of sorting (ascending vs descending)"""
return self._reversesort
def _set_reversesort(self, val):
self._validate_option('reversesort', val)
self._reversesort = val
reversesort = property(_get_reversesort, _set_reversesort)
def _get_sort_key(self):
"""Sorting key function, applied to data points before sorting"""
return self._sort_key
def _set_sort_key(self, val):
self._validate_option('sort_key', val)
self._sort_key = val
sort_key = property(_get_sort_key, _set_sort_key)
def _get_header(self):
"""Controls printing of table header with field names"""
return self._header
def _set_header(self, val):
self._validate_option('header', val)
self._header = val
header = property(_get_header, _set_header)
def _get_header_style(self):
"""Controls stylization applied to field names in header"""
return self._header_style
def _set_header_style(self, val):
self._validate_header_style(val)
self._header_style = val
header_style = property(_get_header_style, _set_header_style)
def _get_border(self):
"""Controls printing of border around table"""
return self._border
def _set_border(self, val):
self._validate_option('border', val)
self._border = val
border = property(_get_border, _set_border)
def _get_hrules(self):
"""Controls printing of horizontal rules after rows"""
return self._hrules
def _set_hrules(self, val):
self._validate_option('hrules', val)
self._hrules = val
hrules = property(_get_hrules, _set_hrules)
def _get_int_format(self):
"""Controls formatting of integer data"""
return self._int_format
def _set_int_format(self, val):
for field in self._field_names:
self._int_format[field] = val
int_format = property(_get_int_format, _set_int_format)
def _get_float_format(self):
"""Controls formatting of floating point data"""
return self._float_format
def _set_float_format(self, val):
for field in self._field_names:
self._float_format[field] = val
float_format = property(_get_float_format, _set_float_format)
def _get_padding_width(self):
"""The number of empty spaces between a column's edge and its content"""
return self._padding_width
def _set_padding_width(self, val):
self._validate_option('padding_width', val)
self._padding_width = val
padding_width = property(_get_padding_width, _set_padding_width)
def _get_left_padding_width(self):
"""The number of empty spaces between a column's left edge and its content"""
return self._left_padding_width
def _set_left_padding_width(self, val):
self._validate_option("left_padding_width", val)
self._left_padding_width = val
left_padding_width = property(_get_left_padding_width, _set_left_padding_width)
def _get_right_padding_width(self):
"""The number of empty spaces between a column's right edge and its content"""
return self._right_padding_width
def _set_right_padding_width(self, val):
self._validate_option("right_padding_width", val)
self._right_padding_width = val
right_padding_width = property(_get_right_padding_width, _set_right_padding_width)
def _get_vertical_char(self):
"""The charcter used when printing table borders to draw vertical lines"""
return self._vertical_char
def _set_vertical_char(self, val):
val = self._unicode(val)
self._validate_option('vertical_char', val)
self._vertical_char = val
vertical_char = property(_get_vertical_char, _set_vertical_char)
def _get_horizontal_char(self):
"""The charcter used when printing table borders to draw horizontal lines"""
return self._horizontal_char
def _set_horizontal_char(self, val):
val = self._unicode(val)
self._validate_option('horizontal_char', val)
self._horizontal_char = val
horizontal_char = property(_get_horizontal_char, _set_horizontal_char)
def _get_junction_char(self):
"""The charcter used when printing table borders to draw line junctions"""
return self._junction_char
def _set_junction_char(self, val):
val = self._unicode(val)
self._validate_option('vertical_char', val)
self._junction_char = val
junction_char = property(_get_junction_char, _set_junction_char)
def _get_format(self):
"""Controls whether or not HTML tables are formatted to match styling options"""
return self._format
def _set_format(self, val):
self._validate_option('format', val)
self._format = val
format = property(_get_format, _set_format)
def _get_attributes(self):
"""A dictionary of HTML attribute name/value pairs to be included in the <table> tag when printing HTML"""
return self._attributes
def _set_attributes(self, val):
self._validate_option('attributes', val)
self._attributes = val
attributes = property(_get_attributes, _set_attributes)
##############################
# OPTION MIXER #
##############################
def _get_options(self, kwargs):
options = {}
for option in self._options:
if option in kwargs:
self._validate_option(option, kwargs[option])
options[option] = kwargs[option]
else:
options[option] = getattr(self, '_' + option)
return options
##############################
# PRESET STYLE LOGIC #
##############################
def set_style(self, style):
if style == DEFAULT:
self._set_default_style()
elif style == MSWORD_FRIENDLY:
self._set_msword_style()
elif style == PLAIN_COLUMNS:
self._set_columns_style()
elif style == RANDOM:
self._set_random_style()
else:
raise Exception('Invalid pre-set style!')
def _set_default_style(self):
self.header = True
self.border = True
self._hrules = FRAME
self.padding_width = 1
self.left_padding_width = 1
self.right_padding_width = 1
self.vertical_char = '|'
self.horizontal_char = '-'
self.junction_char = '+'
def _set_msword_style(self):
self.header = True
self.border = True
self._hrules = NONE
self.padding_width = 1
self.left_padding_width = 1
self.right_padding_width = 1
self.vertical_char = '|'
def _set_columns_style(self):
self.header = True
self.border = False
self.padding_width = 1
self.left_padding_width = 0
self.right_padding_width = 8
def _set_random_style(self):
# Just for fun!
self.header = random.choice((True, False))
self.border = random.choice((True, False))
self._hrules = random.choice((ALL, FRAME, NONE))
self.left_padding_width = random.randint(0, 5)
self.right_padding_width = random.randint(0, 5)
self.vertical_char = random.choice('~!@#$%^&*()_+|-=\{}[];\':",./;<>?')
self.horizontal_char = random.choice('~!@#$%^&*()_+|-=\{}[];\':",./;<>?')
self.junction_char = random.choice('~!@#$%^&*()_+|-=\{}[];\':",./;<>?')
##############################
# DATA INPUT METHODS #
##############################
def add_row(self, row):
"""
Add a row to the table
Parameters
----------
row : list
row of data, should be a list with as many elements as the table has fields
"""
if self._field_names and len(row) != len(self._field_names):
raise Exception('Row has incorrect number of values, (actual) %d!=%d (expected)' %
(len(row), len(self._field_names)))
if not self._field_names:
self.field_names = [('Field %d' % (n + 1)) for n in range(0, len(row))]
self._rows.append(list(row))
def del_row(self, row_index):
"""
Delete a row to the table.
Parameters
----------
row_index : int
The index of the row you want to delete. Indexing starts at 0.
"""
if row_index > len(self._rows) - 1:
raise Exception('Cant delete row at index %d, table only has %d rows!' % (row_index, len(self._rows)))
del self._rows[row_index]
def add_column(self, fieldname, column, align='c'):
"""
Add a column to the table.
Parameters
----------
fieldname : str
name of the field to contain the new column of data
column : list
column of data, should be a list with as many elements as the table has rows
align : str
desired alignment for this column - 'l' for left, 'c' for centre and 'r' for right
"""
if len(self._rows) in (0, len(column)):
self._validate_align(align)
self._field_names.append(fieldname)
self._align[fieldname] = align
for i in range(0, len(column)):
if len(self._rows) < i + 1:
self._rows.append([])
self._rows[i].append(column[i])
else:
raise Exception('Column length %d does not match number of rows %d!' % (len(column), len(self._rows)))
def clear_rows(self):
"""Delete all rows from the table but keep the current field names"""
self._rows = []
def clear(self):
"""Delete all rows and field names from the table, maintaining nothing but styling options"""
self._rows = []
self._field_names = []
self._widths = []
##############################
# MISC PUBLIC METHODS #
##############################
def copy(self):
return copy.deepcopy(self)
##############################
# MISC PRIVATE METHODS #
##############################
def _format_value(self, field, value):
if isinstance(value, int) and field in self._int_format:
value = self._unicode(('{0:' + self._int_format[field] + '}').format(value))
elif isinstance(value, float) and field in self._float_format:
value = self._unicode(('{0:' + self._float_format[field] + '}').format(value))
return self._unicode(value)
def _compute_widths(self, rows, options):
if options['header']:
widths = [_get_size(field)[0] for field in self._field_names]
else:
widths = len(self.field_names) * [0]
for row in rows:
for index, value in enumerate(row):
fieldname = self.field_names[index]
if fieldname in self.max_width:
widths[index] = max(widths[index], min(_get_size(value)[0], self.max_width[fieldname]))
else:
widths[index] = max(widths[index], _get_size(value)[0])
self._widths = widths
def _get_padding_widths(self, options):
if options['left_padding_width'] is not None:
lpad = options['left_padding_width']
else:
lpad = options['padding_width']
if options['right_padding_width'] is not None:
rpad = options['right_padding_width']
else:
rpad = options['padding_width']
return lpad, rpad
def _get_rows(self, options):
"""Return only those data rows that should be printed, based on slicing and sorting.
Arguments:
options - dictionary of option settings."""
# Make a copy of only those rows in the slice range
rows = copy.deepcopy(self._rows[options['start']:options['end']])
# Sort if necessary
if options['sortby']:
sortindex = self._field_names.index(options['sortby'])
# Decorate
rows = [[row[sortindex]] + row for row in rows]
# Sort
rows.sort(reverse=options['reversesort'], key=options['sort_key'])
# Undecorate
rows = [row[1:] for row in rows]
return rows
def _format_row(self, row, options):
return [self._format_value(field, value) for (field, value) in zip(self._field_names, row)]
def _format_rows(self, rows, options):
return [self._format_row(row, options) for row in rows]
##############################
# PLAIN TEXT STRING METHODS #
##############################
def get_string(self, **kwargs):
"""
Return string representation of table in current state.
Arguments:
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
vertical_char - single character string used to draw vertical lines
horizontal_char - single character string used to draw horizontal lines
junction_char - single character string used to draw line junctions
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
reversesort - True or False to sort in descending or ascending order
"""
options = self._get_options(kwargs)
lines = []
# Don't think too hard about an empty table
# Is this the desired behaviour? Maybe we should still print the header?
if self.rowcount == 0:
return ''
# Get the rows we need to print, taking into account slicing, sorting, etc.
rows = self._get_rows(options)
# Turn all data in all rows into Unicode, formatted as desired
formatted_rows = self._format_rows(rows, options)
# Compute column widths
self._compute_widths(formatted_rows, options)
# Add header or top of border
self._hrule = self._stringify_hrule(options)
if options['header']:
lines.append(self._stringify_header(options))
elif options['border'] and options['hrules'] != NONE:
lines.append(self._hrule)
# Add rows
for row in formatted_rows:
lines.append(self._stringify_row(row, options))
# Add bottom of border
if options['border'] and not options['hrules']:
lines.append(self._hrule)
return self._unicode('\n').join(lines)
def _stringify_hrule(self, options):
if not options['border']:
return ''
lpad, rpad = self._get_padding_widths(options)
bits = [options['junction_char']]
for field, width in zip(self._field_names, self._widths):
if options['fields'] and field not in options['fields']:
continue
bits.append((width + lpad + rpad) * options['horizontal_char'])
bits.append(options['junction_char'])
return ''.join(bits)
def _stringify_header(self, options):
bits = []
lpad, rpad = self._get_padding_widths(options)
if options['border']:
if options['hrules'] != NONE:
bits.append(self._hrule)
bits.append('\n')
bits.append(options['vertical_char'])
for field, width, in zip(self._field_names, self._widths):
if options['fields'] and field not in options['fields']:
continue
if self._header_style == 'cap':
fieldname = field.capitalize()
elif self._header_style == 'title':
fieldname = field.title()
elif self._header_style == 'upper':
fieldname = field.upper()
elif self._header_style == 'lower':
fieldname = field.lower()
else:
fieldname = field
bits.append(' ' * lpad + self._justify(fieldname, width, self._align[field]) + ' ' * rpad)
if options['border']:
bits.append(options['vertical_char'])
if options['border'] and options['hrules'] != NONE:
bits.append('\n')
bits.append(self._hrule)
return ''.join(bits)
def _stringify_row(self, row, options):
for index, field, value, width, in zip(range(0, len(row)), self._field_names, row, self._widths):
# Enforce max widths
lines = value.split('\n')
new_lines = []
for line in lines:
if _str_block_width(line) > width:
line = textwrap.fill(line, width)
new_lines.append(line)
lines = new_lines
value = '\n'.join(lines)
row[index] = value
row_height = 0
for c in row:
h = _get_size(c)[1]
if h > row_height:
row_height = h
bits = []
lpad, rpad = self._get_padding_widths(options)
for y in range(0, row_height):
bits.append([])
if options['border']:
bits[y].append(self.vertical_char)
for field, value, width, in zip(self._field_names, row, self._widths):
lines = value.split('\n')
if len(lines) < row_height:
lines = lines + ([''] * (row_height - len(lines)))
y = 0
for l in lines:
if options['fields'] and field not in options['fields']:
continue
bits[y].append(' ' * lpad + self._justify(l, width, self._align[field]) + ' ' * rpad)
if options['border']:
bits[y].append(self.vertical_char)
y += 1
if options['border'] and options['hrules'] == ALL:
bits[row_height - 1].append('\n')
bits[row_height - 1].append(self._hrule)
for y in range(0, row_height):
bits[y] = ''.join(bits[y])
return '\n'.join(bits)
##############################
# HTML STRING METHODS #
##############################
def get_html_string(self, **kwargs):
"""
Return string representation of HTML formatted version of table in current state.
Arguments:
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
attributes - dictionary of name/value pairs to include as HTML attributes in the <table> tag
"""
options = self._get_options(kwargs)
if options['format']:
string = self._get_formatted_html_string(options)
else:
string = self._get_simple_html_string(options)
return string
def _get_simple_html_string(self, options):
lines = []
open_tag = ['<table']
if options['border']:
open_tag.append(' border="1"')
if options['attributes']:
for attr_name in options['attributes']:
open_tag.append(' %s="%s"' % (attr_name, options['attributes'][attr_name]))
open_tag.append('>')
lines.append(''.join(open_tag))
# Headers
if options['header']:
lines.append(' <tr>')
for field in self._field_names:
if options['fields'] and field not in options['fields']:
continue
lines.append(' <th>%s</th>' % escape(field).replace('\n', '<br />'))
lines.append(' </tr>')
# Data
rows = self._get_rows(options)
formatted_rows = self._format_rows(rows, options)
for row in formatted_rows:
lines.append(' <tr>')
for field, datum in zip(self._field_names, row):
if options['fields'] and field not in options['fields']:
continue
lines.append(' <td>%s</td>' % escape(datum).replace('\n', '<br />'))
lines.append(' </tr>')
lines.append('</table>')
return self._unicode('\n').join(lines)
def _get_formatted_html_string(self, options):
lines = []
lpad, rpad = self._get_padding_widths(options)
open_tag = ['<table']
if options['border']:
open_tag.append(' border="1"')
if options['hrules'] == NONE:
open_tag.append(' frame="vsides" rules="cols"')
if options['attributes']:
for attr_name in options['attributes']:
open_tag.append(' %s="%s"' % (attr_name, options['attributes'][attr_name]))
open_tag.append('>')
lines.append(''.join(open_tag))
# Headers
if options['header']:
lines.append(' <tr>')
for field in self._field_names:
if options['fields'] and field not in options['fields']:
continue
lines.append(' <th style="padding-left:' +
' %dem; padding-right: %dem; text-align: center">%s</th>' %
(lpad, rpad, escape(field).replace('\n', '<br />')))
lines.append(' </tr>')
# Data
rows = self._get_rows(options)
formatted_rows = self._format_rows(rows, options)
aligns = []
for field in self._field_names:
aligns.append({'l': 'left', 'r': 'right', 'c': 'center'}[self._align[field]])
for row in formatted_rows:
lines.append(' <tr>')
for field, datum, align in zip(self._field_names, row, aligns):
if options['fields'] and field not in options['fields']:
continue
lines.append(' <td style="padding-left: %dem; padding-right: %dem; text-align: %s">%s</td>' %
(lpad, rpad, align, escape(datum).replace('\n', '<br />')))
lines.append(' </tr>')
lines.append('</table>')
return self._unicode('\n').join(lines)
##############################
# UNICODE WIDTH FUNCTIONS #
##############################
def _char_block_width(char):
# Basic Latin, which is probably the most common case
# if char in xrange(0x0021, 0x007e):
# if char >= 0x0021 and char <= 0x007e:
if 0x0021 <= char <= 0x007e:
return 1
# Chinese, Japanese, Korean (common)
if 0x4e00 <= char <= 0x9fff:
return 2
# Hangul
if 0xac00 <= char <= 0xd7af:
return 2
# Combining?
if unicodedata.combining(uni_chr(char)):
return 0
# Hiragana and Katakana
if 0x3040 <= char <= 0x309f or 0x30a0 <= char <= 0x30ff:
return 2
# Full-width Latin characters
if 0xff01 <= char <= 0xff60:
return 2
# CJK punctuation
if 0x3000 <= char <= 0x303e:
return 2
# Backspace and delete
if char in (0x0008, 0x007f):
return -1
# Other control characters
elif char in (0x0000, 0x001f):
return 0
# Take a guess
return 1
def _str_block_width(val):
return sum(itermap(_char_block_width, itermap(ord, val)))
##############################
# TABLE FACTORIES #
##############################
def from_csv(fp, field_names=None):
dialect = csv.Sniffer().sniff(fp.read(1024))
fp.seek(0)
reader = csv.reader(fp, dialect)
table = PrettyTable()
if field_names:
table.field_names = field_names
else:
table.field_names = [x.strip() for x in next(reader)]
for row in reader:
table.add_row([x.strip() for x in row])
return table
def from_db_cursor(cursor):
table = PrettyTable()
table.field_names = [col[0] for col in cursor.description]
for row in cursor.fetchall():
table.add_row(row)
return table
##############################
# MAIN (TEST FUNCTION) #
##############################
def main():
x = PrettyTable(['City name', 'Area', 'Population', 'Annual Rainfall'])
x.sortby = 'Population'
x.reversesort = True
x.int_format['Area'] = '04d'
x.float_format = '6.1f'
x.align['City name'] = 'l' # Left align city names
x.add_row(['Adelaide', 1295, 1158259, 600.5])
x.add_row(['Brisbane', 5905, 1857594, 1146.4])
x.add_row(['Darwin', 112, 120900, 1714.7])
x.add_row(['Hobart', 1357, 205556, 619.5])
x.add_row(['Sydney', 2058, 4336374, 1214.8])
x.add_row(['Melbourne', 1566, 3806092, 646.9])
x.add_row(['Perth', 5386, 1554769, 869.4])
print(x)
if __name__ == '__main__':
main()
| 37.225664 | 118 | 0.59477 |
8446b38334e649304ec01d84f9a0c7646e7f8f82 | 7,658 | py | Python | git-mirror.py | ruben-rodriguez/git-mirroring-py | 84ac4cc53a8827c1ce2f9b2c679dcd3e3b735064 | [
"MIT"
] | null | null | null | git-mirror.py | ruben-rodriguez/git-mirroring-py | 84ac4cc53a8827c1ce2f9b2c679dcd3e3b735064 | [
"MIT"
] | 1 | 2021-06-01T22:24:18.000Z | 2021-06-01T22:24:18.000Z | git-mirror.py | ruben-rodriguez/git-mirroring-py | 84ac4cc53a8827c1ce2f9b2c679dcd3e3b735064 | [
"MIT"
] | null | null | null | from __future__ import print_function
from builtins import input
import time
import json
import os
import requests
import signal
import subprocess
import sys
def signal_handler(signum, frame):
"""Signal handler that captures SIGINT and stops the application"""
print("\t\n\nSignal", signal.Signals(signum).name, "received, exiting...\n")
sys.exit(0)
def check_git():
"""Checks if git is installed in the system"""
print("\n\tChecking git version installed...")
try:
output = subprocess.check_output("git --version", shell=True, stderr=subprocess.STDOUT)
print("\t", output.decode('ascii'))
return True
except subprocess.CalledProcessError as e:
print("\n\tCould not find git command on the system...")
print("\t", e)
return False
def check_dir(path):
"""Checks if pointed path to store git repositories exists in the system"""
print("\tChecking if ", path, "exists...")
if(os.path.isdir(path)):
print("\tDirectory '", path, "' exists")
return True
else:
print("\tDirectory '", path, "' does not exist")
return False
def get_number_of_repos(username):
"""Gets the number of repositories hosted on GitHub for the given username"""
try:
r = requests.get('https://api.github.com/users/ermus19', timeout=10)
except requests.exceptions.RequestException as e:
print("\n\tException raised during connection:")
print("\t", e)
print("\tCheck System's connectivity and try again, exiting...\n\n")
sys.exit(1)
if(r.status_code == 200):
print("\tUser ", username, " data successfully collected!")
try:
with open('user_data.json', 'w') as response_data:
json.dump(r.json(), response_data, indent=4)
data = json.dumps(r.json(), indent=4)
except IOError as e:
print("Could not open the file to store data:")
print(e)
sys.exit(0)
user_list = json.loads(data)
repos_len = user_list['public_repos']
print("\tNumber of public repos found:", repos_len)
return repos_len
else:
print("Could not collect user information...")
sys.exit(0)
def get_repos_url(username, repos_len):
"""Gets the URL of all the user's repositories hosted on GitHub"""
try:
r = requests.get('https://api.github.com/users/ermus19/repos')
except requests.exceptions.RequestException as e:
print("\n\tException raised during connection:")
print("\t",e)
print("\tCheck System's connectivity and try again\n\n")
sys.exit(1)
if (r.status_code == 200):
print("\tGitHub API data successfully collected for user", username, "!")
try:
with open('repos_data.json', 'w') as response_data:
json.dump(r.json(), response_data, indent=4)
data = json.dumps(r.json(), indent=4)
except IOError:
print("\t\nCould not open the file to store data...")
sys.exit(0)
repositories_dictionary = json.loads(data)
urls = []
print("\n\tList of repositories urls: \n")
for index in range(0, repos_len):
url = repositories_dictionary[index]['html_url']
print("\t", url)
urls.append(url)
print("\n")
return urls
else:
print("Could not connect to GitHub API server...")
sys.exit(0)
def do_mirror(username, path):
"""Clones/pulls the remote repositories to the local path"""
start_time = time.time()
is_valid_dir = check_dir(path)
if(is_valid_dir):
repos_len = get_number_of_repos(username)
if(repos_len > 0):
repos_url = get_repos_url(username, repos_len)
for repo in repos_url:
repo_split = repo.split('/')
repo_name = repo_split[len(repo_split) - 1]
git_check = 'git -C ' + path + '/' + repo_name + ' rev-parse'
git_clone = 'git -C ' + path + ' clone --recursive ' + repo
git_pull = 'git -C ' + path + '/' + repo_name + ' pull --all'
print("\tWorking on " + repo_name + " repository...")
try:
output = subprocess.check_output(git_check, shell=True, stderr=subprocess.STDOUT)
print("\t" + repo_name + " repository already cloned, pulling...")
try:
output = subprocess.check_output(git_pull, shell=True, stderr=subprocess.STDOUT)
print("\t" + output.decode('UTF-8').replace('\n', '\n\t'))
except subprocess.CalledProcessError as e:
print("\tCould not execute git pull...")
print("\t", e)
except subprocess.CalledProcessError as e:
print("\t" + repo_name + " repository not present, cloning...")
try:
output = subprocess.check_output(git_clone, shell=True, stderr=subprocess.STDOUT)
print("\t" + output.decode('UTF-8').replace('\n', '\n\t'))
except subprocess.CalledProcessError as e:
print("\tCould not execute git clone...")
print("\t", e)
print("\n")
end_time = time.time()
elapsed_time = end_time - start_time
print("\tExecuted in %.2f seconds\n" % elapsed_time )
else:
print("Found 0 public repositories for username", username,", exiting...")
sys.exit(0)
else:
print("\n\t> Run the script with a valid path...")
sys.exit(0)
if __name__ == '__main__':
"""Main function: args parsing and validation"""
yes_answer = {'yes', 'y', 'ys', 'ye', ''}
no_answer = {'no', 'n'}
signal.signal(signal.SIGINT, signal_handler)
is_git_installed = check_git()
if(not(is_git_installed)):
print("\n\tGit is not installed, exiting...")
sys.exit(0)
#Check for arguments length and content
if (len(sys.argv) == 3):
github_username = sys.argv[1]
system_mirror_path = sys.argv[2]
print("\n\tAre the following parameters correct?\n")
print("\t\tGitHub username: ", github_username)
print("\t\tSystem path to store mirrors:", system_mirror_path)
print("\n\tAnswer[Yes/y/Y, No/n/N]: ", end='')
answer = input().lower()
if answer in yes_answer:
print("\n\tProceeding to clone/pull public repositories of", github_username)
do_mirror(
github_username,
system_mirror_path
)
elif answer in no_answer:
print("\n\tRun again with proper arguments:")
print("\n\tpython git-mirror.py <GitHub username> <System path to store mirror>\n\n")
sys.exit(0)
else:
print("Please answer with 'yes' or 'no'")
sys.exit(0)
else:
print("\n\tRun the script with more arguments!")
print("\n\tpython git-mirror.py <GitHub username> <System path to store mirror>\n\n")
sys.exit(0) | 28.789474 | 106 | 0.545312 |
c998db13ae317c4d774ae191f207198ab6de6f46 | 988 | py | Python | catalogue/serializers.py | ezekieltech/eduTech-backend | 33b82f57add98285b73d89bc9d97f499cdb3f1e4 | [
"MIT"
] | null | null | null | catalogue/serializers.py | ezekieltech/eduTech-backend | 33b82f57add98285b73d89bc9d97f499cdb3f1e4 | [
"MIT"
] | 15 | 2021-01-02T17:43:37.000Z | 2021-02-13T12:02:11.000Z | catalogue/serializers.py | ezekieltech/eduTech-backend | 33b82f57add98285b73d89bc9d97f499cdb3f1e4 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from catalogue.models import Book, Author, BookInstance
class BookSerializer(serializers.ModelSerializer):
class Meta:
model = Book
fields = '__all__'
class AuthorSerializer(serializers.ModelSerializer):
class Meta:
model = Author
fields = '__all__'
# class RenewBookFormSerializer(serializers.Serializer):
# date = serializers.DateField()
# def create(self, validated_data):
# return RenewBookForm(**validated_data)
# def update(self, instance, validated_data):
# """
# Update and return an existing `Snippet` instance, given the validated data.
# """
# instance.date = validated_data.get('date', instance.date)
# instance.save()
# return instance
class RenewBookFormSerializer(serializers.ModelSerializer):
class Meta:
model = BookInstance
fields = ['id','book', 'status','borrower','due_back', 'is_overdue'] | 27.444444 | 85 | 0.67004 |
6a2d2f548e7bfb36954f0ec0ef6f275ccfc9aa16 | 1,966 | py | Python | resources/PTZgrid/calcInitialCond.py | sebalander/sebaPhD | 0260094bd5143843ef372ce52aceb568834f90f4 | [
"BSD-3-Clause"
] | 6 | 2017-10-03T15:10:14.000Z | 2020-08-06T06:39:14.000Z | resources/PTZgrid/calcInitialCond.py | sebalander/sebaPhD | 0260094bd5143843ef372ce52aceb568834f90f4 | [
"BSD-3-Clause"
] | 1 | 2017-02-09T21:13:13.000Z | 2017-02-09T21:13:13.000Z | resources/PTZgrid/calcInitialCond.py | sebalander/sebaPhD | 0260094bd5143843ef372ce52aceb568834f90f4 | [
"BSD-3-Clause"
] | 4 | 2017-02-09T19:46:00.000Z | 2019-11-21T12:47:55.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 20 20:21:33 2016
generate the camera's pose conditions by hand
@author: sebalander
"""
# %%
import cv2
import numpy as np
import numpy.linalg as lin
from scipy.linalg import sqrtm, inv
import matplotlib.pyplot as plt
# %%
tVecFile = "PTZsheetTvecInitial.npy"
rVecFile = "PTZsheetRvecInitial.npy"
# %% Initial TRASLATION VECTOR
tVec = np.array([[0], [0], [2.5]])
# %% ROTATION MATRIX
# center of image points to grid point:
center = np.array([3*0.21, 5*0.297, 0])
z = center - tVec[:,0]
z /= lin.norm(z)
# la tercera coordenada no la se, la dejo en cero
x = np.array([6*21, -1*29.7, 0])
y = np.array([-1*21, -7*29.7, 0])
# hacer que x,y sean perp a z, agregar la tercera componente
x = x - z * np.dot(x,z) # hago perpendicular a z
x /= lin.norm(x)
y = y - z * np.dot(y,z) # hago perpendicular a z
y /= lin.norm(y)
# %% test ortogonal
np.dot(x,z)
np.dot(y,z)
np.dot(x,y) # ok if not perfectly 0
# %% make into versor matrix
rMatrix = np.array([x,y,z])
# find nearest ortogonal matrix
# http://stackoverflow.com/questions/13940056/orthogonalize-matrix-numpy
rMatrix = rMatrix.dot(inv(sqrtm(rMatrix.T.dot(rMatrix))))
# %% SAVE PARAMETERS
# convert to rodrigues vector
rVec, _ = cv2.Rodrigues(rMatrix)
np.save(tVecFile, tVec)
np.save(rVecFile, rVec)
# %% PLOT VECTORS
[x,y,z] = rMatrix # get from ortogonal matrix
tvec = tVec[:,0]
fig = plt.figure()
from mpl_toolkits.mplot3d import Axes3D
ax = fig.gca(projection='3d')
ax.plot([0, tvec[0]],
[0, tvec[1]],
[0, tvec[2]])
ax.plot([tvec[0], tvec[0] + x[0]],
[tvec[1], tvec[1] + x[1]],
[tvec[2], tvec[2] + x[2]])
ax.plot([tvec[0], tvec[0] + y[0]],
[tvec[1], tvec[1] + y[1]],
[tvec[2], tvec[2] + y[2]])
ax.plot([tvec[0], tvec[0] + z[0]],
[tvec[1], tvec[1] + z[1]],
[tvec[2], tvec[2] + z[2]])
#ax.legend()
#ax.set_xlim3d(0, 1)
#ax.set_ylim3d(0, 1)
#ax.set_zlim3d(0, 1)
plt.show() | 21.844444 | 72 | 0.61648 |
f5db04cda83acf05b95f49c6589e5ad2c025edea | 12,816 | py | Python | src/parsing/ast_visitor.py | junzew/yinyang | 15f1460780fe34ab62b4d38b8a8ce0735f77c435 | [
"MIT"
] | null | null | null | src/parsing/ast_visitor.py | junzew/yinyang | 15f1460780fe34ab62b4d38b8a8ce0735f77c435 | [
"MIT"
] | null | null | null | src/parsing/ast_visitor.py | junzew/yinyang | 15f1460780fe34ab62b4d38b8a8ce0735f77c435 | [
"MIT"
] | null | null | null | from antlr4 import *
from .ast import *
from .SMTLIBv2Parser import SMTLIBv2Parser
from .SMTLIBv2Visitor import *
from .util import *
class ASTVisitor(SMTLIBv2Visitor):
def __init__(self, strict=True):
self.strict = strict
self.global_vars = {}
def visitStart(self, ctx:SMTLIBv2Parser.StartContext):
return self.visitScript(ctx.script())
def visitScript(self, ctx:SMTLIBv2Parser.ScriptContext):
cmds=[]
for c in ctx.command():
cmds.append(self.visitCommand(c))
return Script(cmds,self.global_vars)
def add_to_globals(self, identifier, input_sorts, output_sort):
if len(input_sorts) == 0:
self.global_vars[identifier] = output_sort
else:
self.global_vars[identifier] = input_sorts + " "+ output_sort
def handleCommand(self, ctx:SMTLIBv2Parser.CommandContext):
if ctx.cmd_assert():
return Assert(self.visitTerm(ctx.term()[0]))
if ctx.cmd_assertSoft():
attr = []
for a in ctx.attribute():
a = self.visitAttribute(a)
attr.append(a)
return AssertSoft(self.visitTerm(ctx.term()[0]),attr)
if ctx.cmd_simplify():
attr = []
for a in ctx.attribute():
a = self.visitAttribute(a)
attr.append(a)
return Simplify(self.visitTerm(ctx.term()[0]), attr)
if ctx.cmd_minimize():
return Minimize(self.visitTerm(ctx.term()[0]))
if ctx.cmd_maximize():
return Maximize(self.visitTerm(ctx.term()[0]))
if ctx.cmd_display():
return Display(self.visitTerm(ctx.term()[0]))
if ctx.cmd_eval():
return Eval(self.visitTerm(ctx.term()[0]))
if ctx.cmd_declareConst():
self.global_vars[self.visitSymbol(ctx.symbol()[0])] = self.visitSort(ctx.sort()[0])
decl = DeclareConst(self.visitSymbol(ctx.symbol()[0]), self.visitSort(ctx.sort()[0]))
return decl
if ctx.cmd_declareFun():
input_sorts = []
for sort in ctx.sort()[:-1]:
input_sorts.append(self.visitSort(sort))
output_sort = self.visitSort(ctx.sort()[-1])
input_sorts = " ".join(input_sorts)
identifier = self.visitSymbol(ctx.symbol()[0])
self.add_to_globals(identifier, input_sorts, output_sort)
return DeclareFun(identifier, input_sorts, output_sort)
if ctx.cmd_define():
return Define(self.visitSymbol(ctx.symbol()[0]), self.visitTerm(ctx.term()[0]))
if ctx.cmd_defineConst():
return DefineConst(self.visitSymbol(ctx.symbol()[0]),
self.visitSort(ctx.sort()[0]),
self.visitTerm(ctx.term()[0]))
if ctx.cmd_defineFun():
sorted_vars = []
for var in ctx.function_def().sorted_var():
sorted_vars.append(self.visitSorted_var(var))
identifier = self.visitSymbol(ctx.function_def().symbol())
sorted_vars = " ".join(sorted_vars)
self.add_to_globals(identifier,sorted_vars, self.visitSort(ctx.function_def().sort()))
return DefineFun(identifier,
sorted_vars,
self.visitSort(ctx.function_def().sort()),
self.visitTerm(ctx.function_def().term()))
if ctx.cmd_defineFunRec():
sorted_vars = []
for var in ctx.function_def().sorted_var():
sorted_vars.append(self.visitSorted_var(var))
return DefineFunRec(self.visitSymbol(ctx.function_def().symbol()),
sorted_vars,
self.visitSort(ctx.function_def().sort()),
self.visitTerm(ctx.function_def().term()))
if ctx.cmd_defineFunsRec():
decls = []
for decl in ctx.function_dec():
decls.append(self.visitFunction_dec(decl))
terms = []
for term in ctx.term():
terms.append(self.visitTerm(term))
return DefineFunsRec(decls,terms)
if ctx.cmd_checkSat():
terms = []
for t in ctx.term():
terms.append(self.visitTerm(t))
if len(terms) > 0:
return CheckSat(terms)
return CheckSat()
if ctx.cmd_checkSatAssuming():
terms = []
for t in ctx.term():
terms.append(self.visitTerm(t))
return CheckSatAssuming(terms)
if ctx.cmd_getValue():
terms = []
for t in ctx.term():
terms.append(self.visitTerm(t))
return GetValue(terms)
def visitFunction_dec(self, ctx:SMTLIBv2Parser.Function_decContext):
sorted_vars = []
for var in ctx.sorted_var():
sorted_vars.append(self.visitSorted_var(var))
return FunDecl(self.visitSymbol(ctx.symbol()),
sorted_vars,
self.visitSort(ctx.sort()))
def visitSorted_var(self, ctx:SMTLIBv2Parser.Sorted_varContext):
return "("+ self.visitSymbol(ctx.symbol()) + " " +self.visitSort(ctx.sort()) +")"
def getString(self,ctx):
start, stop = ctx.start.start, ctx.stop.stop
return ctx.start.getInputStream().getText(start, stop)
def visitCommand(self, ctx:SMTLIBv2Parser.CommandContext):
if not self.strict:
try:
cmd = self.handleCommand(ctx)
if not cmd:
cmd_str = str(ctx.start.getInputStream())
return SMTLIBCommand(self.getString(ctx))
else:
return cmd
except:
cmd_str = str(ctx.start.getInputStream())
return SMTLIBCommand(self.getString(ctx))
else:
cmd = self.handleCommand(ctx)
if not cmd:
return SMTLIBCommand(self.getString(ctx))
else:
return cmd
def visitAttribute(self, ctx:SMTLIBv2Parser.AttributeContext):
return (ctx.keyword().getText(),ctx.attribute_value().getText())
def handle_quantifier(self,ctx:SMTLIBv2Parser.TermContext, quant, local_vars={}):
subterms = []
qvars = []
qtypes = []
for i in range(len(ctx.sorted_var())):
qvar = self.visitSymbol(ctx.sorted_var()[i].symbol())
qtype = self.visitSort(ctx.sorted_var()[i].sort())
local_vars[qvar] = qtype
qvars.append(qvar)
qtypes.append(qtype)
for t in ctx.term():
subterms.append(self.visitTerm(t, local_vars))
return Quantifier(quant, (qvars, qtypes), subterms)
"""
spec_constant
: numeral
| decimal
| hexadecimal
| binary
| string
| b_value
| ParOpen GRW_Underscore ' bv' numeral numeral ParClose
;
"""
def visitSpec_constant(self, ctx:SMTLIBv2Parser.Spec_constantContext):
if ctx.ParOpen():
X,n = ctx.numeral()[0].getText(), ctx.numeral()[1].getText()
return "(_ bv"+X+" "+n+")"
return ctx.getText()
"""
term
: spec_constant
| qual_identifier
| ParOpen qual_identifier term+ ParClose
| ParOpen GRW_Let ParOpen var_binding+ ParClose term ParClose
| ParOpen GRW_Forall ParOpen sorted_var+ ParClose term ParClose
| ParOpen GRW_Exists ParOpen sorted_var+ ParClose term ParClose
| ParOpen GRW_Match term ParOpen match_case+ ParClose ParClose
| ParOpen GRW_Exclamation term attribute+ ParClose
;
"""
def visitTerm(self, ctx:SMTLIBv2Parser.TermContext, local_vars={}):
if ctx.ParOpen() and ctx.GRW_Exclamation() and ctx.term()\
and len(ctx.attribute()) >= 1 and ctx.ParClose():
term,label = self.visitTerm(ctx.term()[0]),self.visitAttribute(ctx.attribute()[0])
return LabeledTerm(label, [term])
if len(ctx.ParOpen()) == 2 and ctx.GRW_Match() and ctx.term() and len(ctx.match_case()) >= 1 and\
len(ctx.ParClose()) == 2:
raise ASTException("ParOpen GRW_Match term ParOpen match_case+ ParClose ParClose")
if len(ctx.ParOpen()) == 2 and ctx.GRW_Exists() and len(ctx.sorted_var()) >= 1 and\
len(ctx.ParClose()) == 2 and ctx.term():
return self.handle_quantifier(ctx,"exists",local_vars)
if len(ctx.ParOpen()) == 2 and ctx.GRW_Forall() and len(ctx.sorted_var()) >= 1 and\
len(ctx.ParClose()) == 2 and ctx.term():
return self.handle_quantifier(ctx,"forall", local_vars)
if len(ctx.ParOpen()) == 2 and ctx.GRW_Let() and ctx.var_binding() and\
len(ctx.ParClose()) == 2 and ctx.term():
terms = []
var_list = []
for b in ctx.var_binding():
local_vars[self.visitSymbol(b.symbol())] = "Unknown"
var_list.append(self.visitSymbol(b.symbol()))
terms.append(self.visitTerm(b.term(),local_vars))
subterms = []
for sub in ctx.term():
subterms.append(self.visitTerm(sub, local_vars=local_vars))
return LetBinding(var_list, terms, subterms=subterms)
if ctx.ParOpen() and ctx.qual_identifier() and len(ctx.term()) >= 1 and ctx.ParClose():
op = self.visitQual_identifier(ctx.qual_identifier())
subterms = []
for term in ctx.term():
subterms.append(self.visitTerm(term, local_vars))
return Expr(op=op,subterms=subterms)
if ctx.spec_constant():
name=self.visitSpec_constant(ctx.spec_constant())
return Const(name=name)
if ctx.qual_identifier():
return self.visitQual_identifier(ctx.qual_identifier(),local_vars)
raise ASTException("No match for term : ... |... |... ")
"""
qual_identifier
: identifier
| ParOpen GRW_As identifier sort ParClose [OK]
;
"""
def visitQual_identifier(self,ctx:SMTLIBv2Parser.Qual_identifierContext, local_vars={}):
if ctx.ParOpen() and ctx.GRW_As() and ctx.identifier() and ctx.sort() and\
ctx.ParClose():
raise ASTException("ParOpen GRW_As identifier sort ParClose")
if ctx.identifier():
return self.visitIdentifier(ctx.identifier(),local_vars)
raise ASTException("No match for qual_identifier: ... |... |... ")
def visitSimpleSymbol(self, ctx:SMTLIBv2Parser.SimpleSymbolContext):
return ctx.getText()
def visitQuotedSymbol(self, ctx:SMTLIBv2Parser.QuotedSymbolContext):
return ctx.getText()
"""
symbol
: simpleSymbol
| quotedSymbol
;
"""
def visitSymbol(self, ctx:SMTLIBv2Parser.SymbolContext):
if ctx.simpleSymbol():
return self.visitSimpleSymbol(ctx.simpleSymbol())
if ctx.quotedSymbol():
return self.visitQuotedSymbol(ctx.quotedSymbol())
"""
identifier
: symbol
| ParOpen GRW_Underscore symbol index+ ParClose
;
"""
def visitIdentifier(self,ctx:SMTLIBv2Parser.IdentifierContext, local_vars=[]):
if ctx.ParOpen() and ctx.GRW_Underscore() and ctx.symbol() and len(ctx.index()) >= 1 and\
ctx.ParClose():
symbol = self.visitSymbol(ctx.symbol())
index = ctx.index()[0].getText()
for ind in ctx.index()[1:]:
index+= " "+ ind.getText()
name = "(_ "+symbol +" "+ index+")"
if name in local_vars:
return Var(name=name, type=local_vars[name],is_indexed_id=True)
elif name in self.global_vars:
return Var(name=name, type=self.global_vars[name],is_indexed_id=True)
else:
return name
if ctx.symbol():
name = self.visitSymbol(ctx.symbol())
if name in local_vars:
return Var(name=name, type=local_vars[name])
elif name in self.global_vars:
return Var(name=name, type=self.global_vars[name])
else:
return self.visitSymbol(ctx.symbol())
raise ASTException("No match for identifier: ... |... |... ")
def visitTerminal(self,ctx):
return ctx.getText()
def visitSort(self, ctx:SMTLIBv2Parser.SortContext):
if len(ctx.sort()) >= 1:
s = "("+ self.visitIdentifier(ctx.identifier())
for sort in ctx.sort():
s += " "+ self.visitSort(sort)
return s+")"
return self.visitIdentifier(ctx.identifier())
| 38.719033 | 105 | 0.575218 |
30924c554f8e8f8083e8b2e70ba3e56c975e515a | 4,056 | py | Python | disentanglement_lib/methods/shared/losses.py | erow/disentanglement_lib | c875207fdeadc44880277542447544941bc0bd0a | [
"Apache-2.0"
] | null | null | null | disentanglement_lib/methods/shared/losses.py | erow/disentanglement_lib | c875207fdeadc44880277542447544941bc0bd0a | [
"Apache-2.0"
] | null | null | null | disentanglement_lib/methods/shared/losses.py | erow/disentanglement_lib | c875207fdeadc44880277542447544941bc0bd0a | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The DisentanglementLib Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library of commonly used losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn.functional as F
import gin
epsilon = 1e-3
@gin.configurable("bernoulli_loss", allowlist=["subtract_true_image_entropy"])
def bernoulli_loss(true_images,
reconstructed_images,
activation,
subtract_true_image_entropy=False):
"""Computes the Bernoulli loss. A vector on the batch."""
img_size = np.prod(true_images.shape[1:])
reconstructed_images = reconstructed_images.reshape(-1, img_size)
true_images = true_images.reshape(-1, img_size)
# Because true images are not binary, the lower bound in the xent is not zero:
# the lower bound in the xent is the entropy of the true images.?
if subtract_true_image_entropy:
dist = torch.distributions.Bernoulli(
probs=torch.clamp(true_images, epsilon, 1 - epsilon))
loss_lower_bound = dist.entropy().sum(1)
else:
loss_lower_bound = 0
if activation == "logits":
loss = F.binary_cross_entropy_with_logits(reconstructed_images,
true_images,
reduction="none").sum(1)
elif activation == "tanh":
reconstructed_images = torch.clamp(
F.tanh(reconstructed_images) / 2 + 0.5, epsilon, 1 - epsilon)
loss = -torch.sum(
true_images * torch.log(reconstructed_images) +
(1 - true_images) * torch.log(1 - reconstructed_images),
dim=1)
else:
raise NotImplementedError("Activation not supported.")
return loss - loss_lower_bound
@gin.configurable("l2_loss", allowlist=[])
def l2_loss(true_images, reconstructed_images, activation):
"""Computes the l2 loss."""
if activation == "logits":
return torch.sum(
torch.square(true_images - torch.sigmoid(reconstructed_images)), [1, 2, 3])
elif activation == "tanh":
reconstructed_images = torch.tanh(reconstructed_images) / 2 + 0.5
return torch.sum(
torch.square(true_images - reconstructed_images), [1, 2, 3])
else:
raise NotImplementedError("Activation not supported.")
@gin.configurable(
"reconstruction_loss", denylist=["true_images", "reconstructed_images"])
def make_reconstruction_loss(true_images,
reconstructed_images,
loss_fn=bernoulli_loss,
activation="logits"):
"""Wrapper that creates reconstruction loss."""
per_sample_loss = loss_fn(true_images, reconstructed_images, activation)
return per_sample_loss
def kl_normal_loss(mean, logvar, mean_dim=None):
"""
Calculates the KL divergence between a normal distribution
with diagonal covariance and a unit normal distribution.
Parameters
----------
mean : torch.Tensor
Mean of the normal distribution. Shape (batch_size, num_latent) where
D is dimension of distribution.
logvar : torch.Tensor
Diagonal log variance of the normal distribution. Shape (batch_size,
num_latent)
"""
if mean_dim is None:
mean_dim = [0]
latent_kl = 0.5 * (-1 - logvar + mean.pow(2) + logvar.exp()).mean(dim=mean_dim)
return latent_kl
| 38.264151 | 87 | 0.66642 |
4522f8a07a009649958451d3a62a4f434efe1dad | 1,666 | py | Python | src/dstools/testing/sql.py | edublancas/python-ds-tools | 1da2337961db9c50562349c28c9115d3a7cc6c0c | [
"MIT"
] | 1 | 2021-11-02T05:48:00.000Z | 2021-11-02T05:48:00.000Z | src/dstools/testing/sql.py | edublancas/python-ds-tools | 1da2337961db9c50562349c28c9115d3a7cc6c0c | [
"MIT"
] | 8 | 2016-05-25T01:50:14.000Z | 2021-03-03T14:52:36.000Z | src/dstools/testing/sql.py | edublancas/dstools | 1da2337961db9c50562349c28c9115d3a7cc6c0c | [
"MIT"
] | null | null | null | """
Testing SQL relations
"""
from jinja2 import Template
def nulls_in_columns(client, cols, product):
"""Check if any column has NULL values, returns bool
"""
sql = Template("""
SELECT EXISTS(
SELECT * FROM {{product}}
WHERE {{cols | join(' is null or ') }} is null
)
""").render(cols=cols, product=product)
cur = client.connection.cursor()
cur.execute(sql)
output = bool(cur.fetchone()[0])
cur.close()
return output
def distinct_values_in_column(client, col, product):
"""Get distinct values in a column, returns a set
"""
sql = Template("""
SELECT DISTINCT {{col}} FROM {{product}}
""").render(col=col, product=product)
cur = client.connection.cursor()
cur.execute(sql)
output = cur.fetchall()
cur.close()
return set(o[0] for o in output)
def duplicates_in_column(client, col, product):
"""Check if a column has duplicated values, returns bool
"""
sql = Template("""
SELECT EXISTS(
SELECT {{col}}, COUNT(*)
FROM {{product}}
GROUP BY {{col}}
HAVING COUNT(*) > 1
)
""").render(col=col, product=product)
cur = client.connection.cursor()
cur.execute(sql)
output = bool(cur.fetchone()[0])
cur.close()
return output
def range_in_column(client, col, product):
"""Get range for a column, returns a (min_value, max_value) tuple
"""
sql = Template("""
SELECT MIN({{col}}), MAX({{col}}) FROM {{product}}
""").render(col=col, product=product)
cur = client.connection.cursor()
cur.execute(sql)
output = cur.fetchone()
cur.close()
return output
| 22.821918 | 69 | 0.605642 |
05b5d83a8d3ea7eacc4c17b61606c0c2b64ff728 | 7,971 | py | Python | sdk/monitor/azure-monitor-query/azure/monitor/query/_logs_query_client.py | paikend/azure-sdk-for-python | 5772d14728569fce7b40552a0f20795d12ecd797 | [
"MIT"
] | null | null | null | sdk/monitor/azure-monitor-query/azure/monitor/query/_logs_query_client.py | paikend/azure-sdk-for-python | 5772d14728569fce7b40552a0f20795d12ecd797 | [
"MIT"
] | 2 | 2021-08-24T15:32:30.000Z | 2021-08-24T23:21:34.000Z | sdk/monitor/azure-monitor-query/azure/monitor/query/_logs_query_client.py | paikend/azure-sdk-for-python | 5772d14728569fce7b40552a0f20795d12ecd797 | [
"MIT"
] | null | null | null | #
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING, Any, Union, Sequence, Dict, Optional
from azure.core.exceptions import HttpResponseError
from ._generated._monitor_query_client import MonitorQueryClient
from ._generated.models import BatchRequest, QueryBody as LogsQueryBody
from ._helpers import get_authentication_policy, process_error, construct_iso8601, order_results
from ._models import LogsQueryResult, LogsBatchQuery, LogsBatchQueryResult
if TYPE_CHECKING:
from azure.core.credentials import TokenCredential
from datetime import timedelta
class LogsQueryClient(object):
"""LogsQueryClient
.. admonition:: Example:
.. literalinclude:: ../samples/sample_log_query_client.py
:start-after: [START client_auth_with_token_cred]
:end-before: [END client_auth_with_token_cred]
:language: python
:dedent: 0
:caption: Creating the LogsQueryClient with a TokenCredential.
:param credential: The credential to authenticate the client.
:type credential: ~azure.core.credentials.TokenCredential
:keyword endpoint: The endpoint to connect to. Defaults to 'https://api.loganalytics.io'.
:paramtype endpoint: str
"""
def __init__(self, credential, **kwargs):
# type: (TokenCredential, Any) -> None
self._endpoint = kwargs.pop('endpoint', 'https://api.loganalytics.io/v1')
self._client = MonitorQueryClient(
credential=credential,
authentication_policy=get_authentication_policy(credential),
base_url=self._endpoint,
**kwargs
)
self._query_op = self._client.query
def query(self, workspace_id, query, duration=None, **kwargs):
# type: (str, str, Optional[timedelta], Any) -> LogsQueryResult
"""Execute an Analytics query.
Executes an Analytics query for data.
**Note**: Although the start_time, end_time, duration are optional parameters, it is highly
recommended to specify the timespan. If not, the entire dataset is queried.
:param workspace_id: ID of the workspace. This is Workspace ID from the Properties blade in the
Azure portal.
:type workspace_id: str
:param query: The Analytics query. Learn more about the `Analytics query syntax
<https://azure.microsoft.com/documentation/articles/app-insights-analytics-reference/>`_.
:type query: str
:param ~datetime.timedelta duration: The duration for which to query the data. This can also be accompanied
with either start_time or end_time. If start_time or end_time is not provided, the current time is
taken as the end time.
:keyword datetime start_time: The start time from which to query the data. This should be accompanied
with either end_time or duration.
:keyword datetime end_time: The end time till which to query the data. This should be accompanied
with either start_time or duration.
:keyword int server_timeout: the server timeout in seconds. The default timeout is 3 minutes,
and the maximum timeout is 10 minutes.
:keyword bool include_statistics: To get information about query statistics.
:keyword bool include_render: In the query language, it is possible to specify different render options.
By default, the API does not return information regarding the type of visualization to show.
If your client requires this information, specify the preference
:keyword additional_workspaces: A list of workspaces that are included in the query.
These can be qualified workspace names, workspace Ids, or Azure resource Ids.
:paramtype additional_workspaces: list[str]
:return: QueryResults, or the result of cls(response)
:rtype: ~azure.monitor.query.LogsQueryResult
:raises: ~azure.core.exceptions.HttpResponseError
.. admonition:: Example:
.. literalinclude:: ../samples/sample_log_query_client.py
:start-after: [START send_logs_query]
:end-before: [END send_logs_query]
:language: python
:dedent: 0
:caption: Get a response for a single Log Query
"""
start = kwargs.pop('start_time', None)
end = kwargs.pop('end_time', None)
timespan = construct_iso8601(start, end, duration)
include_statistics = kwargs.pop("include_statistics", False)
include_render = kwargs.pop("include_render", False)
server_timeout = kwargs.pop("server_timeout", None)
workspaces = kwargs.pop("additional_workspaces", None)
prefer = ""
if server_timeout:
prefer += "wait=" + str(server_timeout)
if include_statistics:
if len(prefer) > 0:
prefer += " "
prefer += "include-statistics=true"
if include_render:
if len(prefer) > 0:
prefer += " "
prefer += "include-render=true"
body = LogsQueryBody(
query=query,
timespan=timespan,
workspaces=workspaces,
**kwargs
)
try:
return LogsQueryResult._from_generated(self._query_op.execute( # pylint: disable=protected-access
workspace_id=workspace_id,
body=body,
prefer=prefer,
**kwargs
))
except HttpResponseError as e:
process_error(e)
def query_batch(self, queries, **kwargs):
# type: (Union[Sequence[Dict], Sequence[LogsBatchQuery]], Any) -> Sequence[LogsBatchQueryResult]
"""Execute a list of analytics queries. Each request can be either a LogQueryRequest
object or an equivalent serialized model.
The response is returned in the same order as that of the requests sent.
:param queries: The list of queries that should be processed
:type queries: list[dict] or list[~azure.monitor.query.LogsBatchQuery]
:return: List of LogsBatchQueryResult, or the result of cls(response)
:rtype: ~list[~azure.monitor.query.LogsBatchQueryResult]
:raises: ~azure.core.exceptions.HttpResponseError
.. admonition:: Example:
.. literalinclude:: ../samples/sample_batch_query.py
:start-after: [START send_query_batch]
:end-before: [END send_query_batch]
:language: python
:dedent: 0
:caption: Get a response for multiple Log Queries.
"""
try:
queries = [LogsBatchQuery(**q) for q in queries]
except (KeyError, TypeError):
pass
queries = [q._to_generated() for q in queries] # pylint: disable=protected-access
try:
request_order = [req.id for req in queries]
except AttributeError:
request_order = [req['id'] for req in queries]
batch = BatchRequest(requests=queries)
generated = self._query_op.batch(batch, **kwargs)
return order_results(
request_order,
[
LogsBatchQueryResult._from_generated(rsp) for rsp in generated.responses # pylint: disable=protected-access
])
def close(self):
# type: () -> None
"""Close the :class:`~azure.monitor.query.LogsQueryClient` session."""
return self._client.close()
def __enter__(self):
# type: () -> LogsQueryClient
self._client.__enter__() # pylint:disable=no-member
return self
def __exit__(self, *args):
# type: (*Any) -> None
self._client.__exit__(*args) # pylint:disable=no-member
| 43.086486 | 123 | 0.645841 |
085934b87cd8e73b4f20a2312f5083491d6870a7 | 1,751 | py | Python | fixture/login_page.py | litovsky2/shop_test | a618f23debfb2efa6f1fceb7eff6443a10f0b11f | [
"Apache-2.0"
] | 1 | 2020-05-03T21:32:49.000Z | 2020-05-03T21:32:49.000Z | fixture/login_page.py | litovsky2/shop_test | a618f23debfb2efa6f1fceb7eff6443a10f0b11f | [
"Apache-2.0"
] | 1 | 2020-07-01T11:12:02.000Z | 2020-07-01T11:12:02.000Z | fixture/login_page.py | litovsky2/shop_test | a618f23debfb2efa6f1fceb7eff6443a10f0b11f | [
"Apache-2.0"
] | 2 | 2021-03-08T14:46:09.000Z | 2021-08-30T13:12:21.000Z | import logging
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
from locators.login import Authorization
from model.login import UserData
logger = logging.getLogger()
class LoginPage:
def __init__(self, app):
self.app = app
def _password_input(self):
return self.app.wd.find_element(*Authorization.PASSWORD_INPUT)
def _login_input(self):
element = (
WebDriverWait(self.app.wd, 10).until(
EC.presence_of_element_located(
Authorization.LOGIN_INPUT))
)
return element
def _login_button(self):
return self.app.wd.find_element(*Authorization.LOGIN_BUTTON)
def sign_button_click(self):
self._login_button().click()
def _submit_login(self):
return self.app.wd.find_element(*Authorization.SUBMIT_BUTTON)
def authentication(self, user: UserData, submit=True):
logger.info(f'Try to login with login: {user.login} and password: '
f'{user.password}')
self.sign_button_click()
if user.login is not None:
self._login_input().send_keys(user.login)
if user.password is not None:
self._password_input().send_keys(user.password)
if submit:
self._submit_login().click()
def error_auth_text(self):
return self.app.wd.find_element(*Authorization.ERROR_AUTH_TEXT).text
def _login_form(self):
return self.app.wd.find_element(*Authorization.LOGIN_FORM)
def login_form_text(self):
return self._login_form().text
def helper_login(self):
return self.app.wd.find_elements(*Authorization.LOGIN_HELPER_TEXT)
| 30.189655 | 76 | 0.676185 |
3a8330399c48fd883441055c4135407694a049de | 26,343 | py | Python | cellpose/io.py | thosoo/cellpose | 2e175677bdceb139f769fa3ab59331b0c06ad2ec | [
"BSD-3-Clause"
] | null | null | null | cellpose/io.py | thosoo/cellpose | 2e175677bdceb139f769fa3ab59331b0c06ad2ec | [
"BSD-3-Clause"
] | null | null | null | cellpose/io.py | thosoo/cellpose | 2e175677bdceb139f769fa3ab59331b0c06ad2ec | [
"BSD-3-Clause"
] | 1 | 2021-06-02T12:56:06.000Z | 2021-06-02T12:56:06.000Z | import os, datetime, gc, warnings, glob
from natsort import natsorted
import numpy as np
import cv2
import tifffile
from . import utils, plot, transforms
try:
from PyQt5 import QtGui, QtCore, Qt, QtWidgets
GUI = True
except:
GUI = False
try:
import matplotlib.pyplot as plt
MATPLOTLIB = True
except:
MATPLOTLIB = False
print('matplotlib not installed')
try:
from google.cloud import storage
SERVER_UPLOAD = True
except:
SERVER_UPLOAD = False
def outlines_to_text(base, outlines):
with open(base + '_cp_outlines.txt', 'w') as f:
for o in outlines:
xy = list(o.flatten())
xy_str = ','.join(map(str, xy))
f.write(xy_str)
f.write('\n')
def imread(filename):
ext = os.path.splitext(filename)[-1]
if ext== '.tif' or ext=='tiff':
img = tifffile.imread(filename)
return img
else:
try:
img = cv2.imread(filename, -1)#cv2.LOAD_IMAGE_ANYDEPTH)
if img.ndim > 2:
img = img[..., [2,1,0]]
return img
except Exception as e:
print('ERROR: could not read file, %s'%e)
return None
def imsave(filename, arr):
ext = os.path.splitext(filename)[-1]
if ext== '.tif' or ext=='tiff':
tifffile.imsave(filename, arr)
else:
cv2.imwrite(filename, arr)
def get_image_files(folder, mask_filter, imf=None):
mask_filters = ['_cp_masks', '_cp_output', '_flows', mask_filter]
image_names = []
if imf is None:
imf = ''
image_names.extend(glob.glob(folder + '/*%s.png'%imf))
image_names.extend(glob.glob(folder + '/*%s.jpg'%imf))
image_names.extend(glob.glob(folder + '/*%s.jpeg'%imf))
image_names.extend(glob.glob(folder + '/*%s.tif'%imf))
image_names.extend(glob.glob(folder + '/*%s.tiff'%imf))
image_names = natsorted(image_names)
imn = []
for im in image_names:
imfile = os.path.splitext(im)[0]
igood = all([(len(imfile) > len(mask_filter) and imfile[-len(mask_filter):] != mask_filter) or len(imfile) < len(mask_filter)
for mask_filter in mask_filters])
if len(imf)>0:
igood &= imfile[-len(imf):]==imf
if igood:
imn.append(im)
image_names = imn
return image_names
def get_label_files(image_names, mask_filter, imf=None):
nimg = len(image_names)
label_names0 = [os.path.splitext(image_names[n])[0] for n in range(nimg)]
if imf is not None and len(imf) > 0:
label_names = [label_names0[n][:-len(imf)] for n in range(nimg)]
else:
label_names = label_names0
# check for flows
if os.path.exists(label_names0[0] + '_flows.tif'):
flow_names = [label_names0[n] + '_flows.tif' for n in range(nimg)]
else:
flow_names = [label_names[n] + '_flows.tif' for n in range(nimg)]
if not all([os.path.exists(flow) for flow in flow_names]):
flow_names = None
# check for masks
if os.path.exists(label_names[0] + mask_filter + '.tif'):
label_names = [label_names[n] + mask_filter + '.tif' for n in range(nimg)]
elif os.path.exists(label_names[0] + mask_filter + '.png'):
label_names = [label_names[n] + mask_filter + '.png' for n in range(nimg)]
else:
raise ValueError('labels not provided with correct --mask_filter')
if not all([os.path.exists(label) for label in label_names]):
raise ValueError('labels not provided for all images in train and/or test set')
return label_names, flow_names
def load_train_test_data(train_dir, test_dir=None, image_filter=None, mask_filter='_masks', unet=False):
image_names = get_image_files(train_dir, mask_filter, imf=image_filter)
nimg = len(image_names)
images = [imread(image_names[n]) for n in range(nimg)]
# training data
label_names, flow_names = get_label_files(image_names, mask_filter, imf=image_filter)
nimg = len(image_names)
labels = [imread(label_names[n]) for n in range(nimg)]
if flow_names is not None and not unet:
for n in range(nimg):
flows = imread(flow_names[n])
if flows.shape[0]<4:
labels[n] = np.concatenate((labels[n][np.newaxis,:,:], flows), axis=0)
else:
labels[n] = flows
# testing data
test_images, test_labels, image_names_test = None, None, None
if test_dir is not None:
image_names_test = get_image_files(test_dir, mask_filter, imf=image_filter)
label_names_test, flow_names_test = get_label_files(image_names_test, mask_filter, imf=image_filter)
nimg = len(image_names_test)
test_images = [imread(image_names_test[n]) for n in range(nimg)]
test_labels = [imread(label_names_test[n]) for n in range(nimg)]
if flow_names_test is not None and not unet:
for n in range(nimg):
flows = imread(flow_names_test[n])
if flows.shape[0]<4:
test_labels[n] = np.concatenate((test_labels[n][np.newaxis,:,:], flows), axis=0)
else:
test_labels[n] = flows
return images, labels, image_names, test_images, test_labels, image_names_test
def masks_flows_to_seg(images, masks, flows, diams, file_names, channels=None):
""" save output of model eval to be loaded in GUI
can be list output (run on multiple images) or single output (run on single image)
saved to file_names[k]+'_seg.npy'
Parameters
-------------
images: (list of) 2D or 3D arrays
images input into cellpose
masks: (list of) 2D arrays, int
masks output from Cellpose.eval, where 0=NO masks; 1,2,...=mask labels
flows: (list of) list of ND arrays
flows output from Cellpose.eval
diams: float array
diameters used to run Cellpose
file_names: (list of) str
names of files of images
channels: list of int (optional, default None)
channels used to run Cellpose
"""
if channels is None:
channels = [0,0]
if isinstance(masks, list):
for k, [image, mask, flow, diam, file_name] in enumerate(zip(images, masks, flows, diams, file_names)):
channels_img = channels
if channels_img is not None and len(channels) > 2:
channels_img = channels[k]
masks_flows_to_seg(image, mask, flow, diam, file_name, channels_img)
return
if len(channels)==1:
channels = channels[0]
flowi = []
if flows[0].ndim==3:
flowi.append(flows[0][np.newaxis,...])
else:
flowi.append(flows[0])
if flows[0].ndim==3:
flowi.append((np.clip(transforms.normalize99(flows[2]),0,1) * 255).astype(np.uint8)[np.newaxis,...])
flowi.append(np.zeros(flows[0].shape, dtype=np.uint8))
flowi[-1] = flowi[-1][np.newaxis,...]
else:
flowi.append((np.clip(transforms.normalize99(flows[2]),0,1) * 255).astype(np.uint8))
flowi.append((flows[1][0]/10 * 127 + 127).astype(np.uint8))
if len(flows)>2:
flowi.append(flows[3])
flowi.append(np.concatenate((flows[1], flows[2][np.newaxis,...]), axis=0))
outlines = masks * utils.masks_to_outlines(masks)
base = os.path.splitext(file_names)[0]
if masks.ndim==3:
np.save(base+ '_seg.npy',
{'outlines': outlines.astype(np.uint16),
'masks': masks.astype(np.uint16),
'chan_choose': channels,
'img': images,
'ismanual': np.zeros(masks.max(), np.bool),
'filename': file_names,
'flows': flowi,
'est_diam': diams})
else:
if images.shape[0]<8:
np.transpose(images, (1,2,0))
np.save(base+ '_seg.npy',
{'outlines': outlines.astype(np.uint16),
'masks': masks.astype(np.uint16),
'chan_choose': channels,
'ismanual': np.zeros(masks.max(), np.bool),
'filename': file_names,
'flows': flowi,
'est_diam': diams})
def save_to_png(images, masks, flows, file_names):
""" deprecated (runs io.save_masks with png=True)
does not work for 3D images
"""
save_masks(images, masks, flows, file_names, png=True)
def save_masks(images, masks, flows, file_names, png=True, tif=False):
""" save masks + nicely plotted segmentation image to png and/or tiff
if png, masks[k] for images[k] are saved to file_names[k]+'_cp_masks.png'
if tif, masks[k] for images[k] are saved to file_names[k]+'_cp_masks.tif'
if png and matplotlib installed, full segmentation figure is saved to file_names[k]+'_cp.png'
only tif option works for 3D data
Parameters
-------------
images: (list of) 2D, 3D or 4D arrays
images input into cellpose
masks: (list of) 2D arrays, int
masks output from Cellpose.eval, where 0=NO masks; 1,2,...=mask labels
flows: (list of) list of ND arrays
flows output from Cellpose.eval
file_names: (list of) str
names of files of images
"""
if isinstance(masks, list):
for image, mask, flow, file_name in zip(images, masks, flows, file_names):
save_masks(image, mask, flow, file_name, png=png, tif=tif)
return
if masks.ndim > 2 and not tif:
raise ValueError('cannot save 3D outputs as PNG, use tif option instead')
print(masks.shape)
base = os.path.splitext(file_names)[0]
exts = []
if masks.ndim > 2:
png = False
if png:
exts.append('.png')
if tif:
exts.append('.tif')
# save masks
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for ext in exts:
imsave(base + '_cp_masks' + ext, masks.astype(np.uint16))
if png and MATPLOTLIB and not min(images.shape) > 3:
img = images.copy()
if img.ndim<3:
img = img[:,:,np.newaxis]
elif img.shape[0]<8:
np.transpose(img, (1,2,0))
fig = plt.figure(figsize=(12,3))
# can save images (set save_dir=None if not)
plot.show_segmentation(fig, img, masks, flows[0])
fig.savefig(base + '_cp_output.png', dpi=300)
plt.close(fig)
if masks.ndim < 3:
outlines = utils.outlines_list(masks)
outlines_to_text(base, outlines)
def save_server(parent=None, filename=None):
""" Uploads a *_seg.npy file to the bucket.
Parameters
----------------
parent: PyQt.MainWindow (optional, default None)
GUI window to grab file info from
filename: str (optional, default None)
if no GUI, send this file to server
"""
if parent is not None:
q = QtGui.QMessageBox.question(
parent,
"Send to server",
"Are you sure? Only send complete and fully manually segmented data.\n (do not send partially automated segmentations)",
QtGui.QMessageBox.Yes | QtGui.QMessageBox.No
)
if q != QtGui.QMessageBox.Yes:
return
else:
filename = parent.filename
if filename is not None:
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'key/cellpose-data-writer.json')
bucket_name = 'cellpose_data'
base = os.path.splitext(filename)[0]
source_file_name = base + '_seg.npy'
print(source_file_name)
time = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S.%f")
filestring = time + '.npy'
print(filestring)
destination_blob_name = filestring
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(source_file_name)
print(
"File {} uploaded to {}.".format(
source_file_name, destination_blob_name
)
)
def _load_image(parent, filename=None):
""" load image with filename; if None, open QFileDialog """
if filename is None:
name = QtGui.QFileDialog.getOpenFileName(
parent, "Load image"
)
filename = name[0]
manual_file = os.path.splitext(filename)[0]+'_seg.npy'
if os.path.isfile(manual_file):
print(manual_file)
_load_seg(parent, manual_file, image=imread(filename), image_file=filename)
return
elif os.path.isfile(os.path.splitext(filename)[0]+'_manual.npy'):
manual_file = os.path.splitext(filename)[0]+'_manual.npy'
_load_seg(parent, manual_file, image=imread(filename), image_file=filename)
return
try:
image = imread(filename)
image.shape
parent.loaded = True
except:
print('images not compatible')
if parent.loaded:
parent.reset()
parent.filename = filename
print(filename)
filename = os.path.split(parent.filename)[-1]
_initialize_images(parent, image, resize=parent.resize, X2=0)
parent.clear_all()
parent.loaded = True
parent.enable_buttons()
parent.threshslider.setEnabled(False)
parent.probslider.setEnabled(False)
def _initialize_images(parent, image, resize, X2):
""" format image for GUI """
parent.onechan=False
if image.ndim > 3:
# make tiff Z x channels x W x H
if image.shape[0]<4:
# tiff is channels x Z x W x H
image = np.transpose(image, (1,0,2,3))
elif image.shape[-1]<4:
# tiff is Z x W x H x channels
image = np.transpose(image, (0,3,1,2))
# fill in with blank channels to make 3 channels
if image.shape[1] < 3:
shape = image.shape
image = np.concatenate((image,
np.zeros((shape[0], 3-shape[1], shape[2], shape[3]), dtype=np.uint8)), axis=1)
if 3-shape[1]>1:
parent.onechan=True
image = np.transpose(image, (0,2,3,1))
elif image.ndim==3:
if image.shape[0] < 5:
image = np.transpose(image, (1,2,0))
if image.shape[-1] < 3:
shape = image.shape
image = np.concatenate((image,
np.zeros((shape[0], shape[1], 3-shape[2]),
dtype=type(image[0,0,0]))), axis=-1)
if 3-shape[2]>1:
parent.onechan=True
image = image[np.newaxis,...]
elif image.shape[-1]<5 and image.shape[-1]>2:
image = image[:,:,:3]
image = image[np.newaxis,...]
else:
image = image[np.newaxis,...]
parent.stack = image
parent.NZ = len(parent.stack)
parent.scroll.setMaximum(parent.NZ-1)
if parent.stack.max()>255 or parent.stack.min()<0.0 or parent.stack.max()<=50.0:
parent.stack = parent.stack.astype(np.float32)
parent.stack -= parent.stack.min()
parent.stack /= parent.stack.max()
parent.stack *= 255
del image
gc.collect()
parent.stack = list(parent.stack)
for k,img in enumerate(parent.stack):
# if grayscale make 3D
if resize != -1:
img = transforms._image_resizer(img, resize=resize, to_uint8=False)
if img.ndim==2:
img = np.tile(img[:,:,np.newaxis], (1,1,3))
parent.onechan=True
if X2!=0:
img = transforms._X2zoom(img, X2=X2)
parent.stack[k] = img
parent.imask=0
print(parent.NZ, parent.stack[0].shape)
parent.Ly, parent.Lx = img.shape[0], img.shape[1]
parent.stack = np.array(parent.stack)
parent.layers = 0*np.ones((parent.NZ,parent.Ly,parent.Lx,4), np.uint8)
if parent.autobtn.isChecked() or len(parent.saturation)!=parent.NZ:
parent.compute_saturation()
parent.compute_scale()
parent.currentZ = int(np.floor(parent.NZ/2))
parent.scroll.setValue(parent.currentZ)
parent.zpos.setText(str(parent.currentZ))
def _load_seg(parent, filename=None, image=None, image_file=None):
""" load *_seg.npy with filename; if None, open QFileDialog """
if filename is None:
name = QtGui.QFileDialog.getOpenFileName(
parent, "Load labelled data", filter="*.npy"
)
filename = name[0]
try:
dat = np.load(filename, allow_pickle=True).item()
dat['outlines']
parent.loaded = True
except:
parent.loaded = False
print('not NPY')
return
parent.reset()
if image is None:
found_image = False
if 'filename' in dat:
parent.filename = dat['filename']
if os.path.isfile(parent.filename):
parent.filename = dat['filename']
found_image = True
else:
imgname = os.path.split(parent.filename)[1]
root = os.path.split(filename)[0]
parent.filename = root+'/'+imgname
if os.path.isfile(parent.filename):
found_image = True
if found_image:
try:
image = imread(parent.filename)
except:
parent.loaded = False
found_image = False
print('ERROR: cannot find image file, loading from npy')
if not found_image:
parent.filename = filename[:-11]
if 'img' in dat:
image = dat['img']
else:
print('ERROR: no image file found and no image in npy')
return
else:
parent.filename = image_file
print(parent.filename)
if 'X2' in dat:
parent.X2 = dat['X2']
else:
parent.X2 = 0
if 'resize' in dat:
parent.resize = dat['resize']
elif 'img' in dat:
if max(image.shape) > max(dat['img'].shape):
parent.resize = max(dat['img'].shape)
else:
parent.resize = -1
_initialize_images(parent, image, resize=parent.resize, X2=parent.X2)
if 'chan_choose' in dat:
parent.ChannelChoose[0].setCurrentIndex(dat['chan_choose'][0])
parent.ChannelChoose[1].setCurrentIndex(dat['chan_choose'][1])
if 'outlines' in dat:
if isinstance(dat['outlines'], list):
# old way of saving files
dat['outlines'] = dat['outlines'][::-1]
for k, outline in enumerate(dat['outlines']):
if 'colors' in dat:
color = dat['colors'][k]
else:
col_rand = np.random.randint(1000)
color = parent.colormap[col_rand,:3]
median = parent.add_mask(points=outline, color=color)
if median is not None:
parent.cellcolors.append(color)
parent.ncells+=1
else:
if dat['masks'].ndim==2:
dat['masks'] = dat['masks'][np.newaxis,:,:]
dat['outlines'] = dat['outlines'][np.newaxis,:,:]
if dat['masks'].min()==-1:
dat['masks'] += 1
dat['outlines'] += 1
if 'colors' in dat:
colors = dat['colors']
else:
col_rand = np.random.randint(0, 1000, (dat['masks'].max(),))
colors = parent.colormap[col_rand,:3]
parent.cellpix = dat['masks']
parent.outpix = dat['outlines']
parent.cellcolors.extend(colors)
parent.ncells = np.uint16(parent.cellpix.max())
parent.draw_masks()
if 'est_diam' in dat:
parent.Diameter.setText('%0.1f'%dat['est_diam'])
parent.diameter = dat['est_diam']
parent.compute_scale()
if parent.masksOn or parent.outlinesOn and not (parent.masksOn and parent.outlinesOn):
parent.redraw_masks(masks=parent.masksOn, outlines=parent.outlinesOn)
if 'zdraw' in dat:
parent.zdraw = dat['zdraw']
else:
parent.zdraw = [None for n in range(parent.ncells)]
parent.loaded = True
print('%d masks found'%(parent.ncells))
else:
parent.clear_all()
parent.ismanual = np.zeros(parent.ncells, np.bool)
if 'ismanual' in dat:
if len(dat['ismanual']) == parent.ncells:
parent.ismanual = dat['ismanual']
if 'current_channel' in dat:
parent.color = (dat['current_channel']+2)%5
parent.RGBDropDown.setCurrentIndex(parent.color)
if 'flows' in dat:
parent.flows = dat['flows']
try:
print(parent.flows[0].shape)
if parent.NZ==1:
parent.threshslider.setEnabled(True)
parent.probslider.setEnabled(True)
else:
parent.threshslider.setEnabled(False)
parent.probslider.setEnabled(False)
except:
try:
if len(parent.flows[0])>0:
parent.flows = parent.flows[0]
except:
parent.flows = [[],[],[],[],[[]]]
parent.threshslider.setEnabled(False)
parent.probslider.setEnabled(False)
parent.enable_buttons()
del dat
gc.collect()
def _load_masks(parent, filename=None):
""" load zeros-based masks (0=no cell, 1=cell 1, ...) """
if filename is None:
name = QtGui.QFileDialog.getOpenFileName(
parent, "Load masks (PNG or TIFF)"
)
filename = name[0]
masks = imread(filename)
outlines = None
if masks.ndim>3:
# Z x nchannels x Ly x Lx
if masks.shape[-1]>5:
parent.flows = list(np.transpose(masks[:,:,:,2:], (3,0,1,2)))
outlines = masks[...,1]
masks = masks[...,0]
else:
parent.flows = list(np.transpose(masks[:,:,:,1:], (3,0,1,2)))
masks = masks[...,0]
elif masks.ndim==3:
if masks.shape[-1]<5:
masks = masks[np.newaxis,:,:,0]
elif masks.ndim<3:
masks = masks[np.newaxis,:,:]
# masks should be Z x Ly x Lx
if masks.shape[0]!=parent.NZ:
print('ERROR: masks are not same depth (number of planes) as image stack')
return
print('%d masks found'%(len(np.unique(masks))-1))
_masks_to_gui(parent, masks, outlines)
parent.update_plot()
def _masks_to_gui(parent, masks, outlines=None):
""" masks loaded into GUI """
# get unique values
shape = masks.shape
_, masks = np.unique(masks, return_inverse=True)
masks = np.reshape(masks, shape)
parent.cellpix = masks.astype(np.uint16)
# get outlines
if outlines is None:
parent.outpix = np.zeros(masks.shape, np.uint16)
for z in range(parent.NZ):
outlines = utils.masks_to_outlines(masks[z])
parent.outpix[z] = ((outlines * masks[z])).astype(np.uint16)
if z%50==0:
print('plane %d outlines processed'%z)
else:
parent.outpix = outlines
shape = parent.outpix.shape
_,parent.outpix = np.unique(parent.outpix, return_inverse=True)
parent.outpix = np.reshape(parent.outpix, shape)
parent.ncells = np.uint16(parent.cellpix.max())
colors = parent.colormap[np.random.randint(0,1000,size=parent.ncells), :3]
parent.cellcolors = list(np.concatenate((np.array([[255,255,255]]), colors), axis=0).astype(np.uint8))
parent.draw_masks()
if parent.ncells>0:
parent.toggle_mask_ops()
parent.ismanual = np.zeros(parent.ncells, np.bool)
parent.zdraw = list(-1*np.ones(parent.ncells, np.int16))
parent.update_plot()
def _save_png(parent):
""" save masks to png or tiff (if 3D) """
filename = parent.filename
base = os.path.splitext(filename)[0]
if parent.NZ==1:
print('saving 2D masks to png')
imsave(base + '_cp_masks.png', parent.cellpix[0])
else:
print('saving 3D masks to tiff')
imsave(base + '_cp_masks.tif', parent.cellpix)
def _save_outlines(parent):
filename = parent.filename
base = os.path.splitext(filename)[0]
if parent.NZ==1:
print('saving 2D outlines to text file, see docs for info to load into ImageJ')
outlines = utils.outlines_list(parent.cellpix[0])
outlines_to_text(base, outlines)
else:
print('ERROR: cannot save 3D outlines')
def _save_sets(parent):
""" save masks to *_seg.npy """
filename = parent.filename
base = os.path.splitext(filename)[0]
if parent.NZ > 1 and parent.is_stack:
np.save(base + '_seg.npy',
{'outlines': parent.outpix,
'colors': parent.cellcolors[1:],
'masks': parent.cellpix,
'current_channel': (parent.color-2)%5,
'filename': parent.filename,
'flows': parent.flows,
'zdraw': parent.zdraw})
else:
image = parent.chanchoose(parent.stack[parent.currentZ].copy())
if image.ndim < 4:
image = image[np.newaxis,...]
np.save(base + '_seg.npy',
{'outlines': parent.outpix.squeeze(),
'colors': parent.cellcolors[1:],
'masks': parent.cellpix.squeeze(),
'chan_choose': [parent.ChannelChoose[0].currentIndex(),
parent.ChannelChoose[1].currentIndex()],
'img': image.squeeze(),
'ismanual': parent.ismanual,
'X2': parent.X2,
'filename': parent.filename,
'flows': parent.flows})
#print(parent.point_sets)
print('--- %d ROIs saved chan1 %s, chan2 %s'%(parent.ncells,
parent.ChannelChoose[0].currentText(),
parent.ChannelChoose[1].currentText())) | 36.435685 | 156 | 0.571537 |
1e8c276812e09e6125d7ac8876fc395d4ddfc2e3 | 4,067 | py | Python | spack/package.meson.py | blue42u/hpctoolk | 5af594b5d2f5a83439d766ef379de13a437a2896 | [
"BSD-3-Clause"
] | null | null | null | spack/package.meson.py | blue42u/hpctoolk | 5af594b5d2f5a83439d766ef379de13a437a2896 | [
"BSD-3-Clause"
] | 1 | 2021-08-22T05:04:05.000Z | 2021-09-19T13:27:57.000Z | spack/package.meson.py | blue42u/hpctoolkit | 5af594b5d2f5a83439d766ef379de13a437a2896 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
class HpctoolkitMeson(MesonPackage):
"""HPCToolkit is an integrated suite of tools for measurement and analysis
of program performance on computers ranging from multicore desktop systems
to the nation's largest supercomputers. By using statistical sampling of
timers and hardware performance counters, HPCToolkit collects accurate
measurements of a program's work, resource consumption, and inefficiency
and attributes them to the full calling context in which they occur."""
homepage = "http://hpctoolkit.org"
git = "https://github.com/HPCToolkit/hpctoolkit.git"
maintainers = ['mwkrentel']
version('master', branch='new-buildsys', git='https://github.com/blue42u/hpctoolkit.git')
# Options for MPI and hpcprof-mpi. We always support profiling
# MPI applications. These options add hpcprof-mpi, the MPI
# version of hpcprof. Cray and Blue Gene need separate options
# because an MPI module in packages.yaml doesn't work on these
# systems.
variant('cray', default=False,
description='Build for Cray compute nodes, including '
'hpcprof-mpi.')
variant('mpi', default=False,
description='Build hpcprof-mpi, the MPI version of hpcprof.')
# We can't build with both PAPI and perfmon for risk of segfault
# from mismatched header files (unless PAPI installs the perfmon
# headers).
variant('papi', default=True,
description='Use PAPI instead of perfmon for access to '
'the hardware performance counters.')
variant('all-static', default=False,
description='Needed when MPICXX builds static binaries '
'for the compute nodes.')
variant('cuda', default=False,
description='Support CUDA on NVIDIA GPUs (2020.03.01 or later).')
boost_libs = (
'+atomic +chrono +date_time +filesystem +system +thread +timer'
' +graph +regex +shared +multithreaded visibility=global'
)
depends_on('binutils+libiberty', type='link', when='@master:')
depends_on('binutils+libiberty~nls', type='link', when='@2020.04.00:trunk')
depends_on('binutils@:2.33.1+libiberty~nls', type='link', when='@:2020.03.99')
depends_on('boost' + boost_libs)
depends_on('bzip2+shared', type='link')
depends_on('cmake', type='build')
depends_on('dyninst@9.3.2:')
depends_on('elfutils+bzip2+xz~nls', type='link')
depends_on('gotcha@1.0.3:')
depends_on('intel-tbb+shared')
depends_on('libdwarf')
depends_on('libmonitor+hpctoolkit')
depends_on('libunwind@1.4: +xz+pic', when='@2020.09.00:')
depends_on('libunwind@1.4: +xz', when='@:2020.08.99')
depends_on('mbedtls+pic')
depends_on('meson@0.57:')
depends_on('pkgconf', type='build')
depends_on('xerces-c transcoder=iconv')
depends_on('xz+pic', type='link', when='@2020.09.00:')
depends_on('xz', type='link', when='@:2020.08.99')
depends_on('zlib+shared')
depends_on('cuda', when='+cuda')
depends_on('intel-xed', when='target=x86_64:')
depends_on('papi', when='+papi')
depends_on('libpfm4', when='~papi')
depends_on('mpi', when='+mpi')
conflicts('%gcc@:4.7.99', when='^dyninst@10.0.0:',
msg='hpctoolkit requires gnu gcc 4.8.x or later')
conflicts('%gcc@:4.99.99', when='@2020.03.01:',
msg='hpctoolkit requires gnu gcc 5.x or later')
conflicts('+cuda', when='@2018.0.0:2019.99.99',
msg='cuda requires 2020.03.01 or later')
def meson_args(self):
args = [
'-Dhpcrun=enabled', '-Dhpclink=enabled', '-Dhpcstruct=enabled',
'-Dhpcstruct=enabled',
'-Dcuda-monitoring=%s' % ('enabled' if '+cuda' in self.spec else 'disabled'),
'-Dmpi=%s' % ('enabled' if '+mpi' in self.spec else 'disabled'),
]
return args
| 41.080808 | 93 | 0.660684 |
206c778b8da43ff1e67f1df6e634965b5c74be61 | 1,217 | py | Python | test/fake_data_loaders.py | Weilando/bachelor_playground | e1455029384f05a48ea5d792f76aa5d232fc1ddc | [
"Apache-2.0"
] | 1 | 2020-11-09T12:00:59.000Z | 2020-11-09T12:00:59.000Z | test/fake_data_loaders.py | Weilando/bachelor_playground | e1455029384f05a48ea5d792f76aa5d232fc1ddc | [
"Apache-2.0"
] | null | null | null | test/fake_data_loaders.py | Weilando/bachelor_playground | e1455029384f05a48ea5d792f76aa5d232fc1ddc | [
"Apache-2.0"
] | null | null | null | import torch
def generate_fake_mnist_data_loaders():
"""" Generate fake-DataLoaders with fake batches, i.e. a list with sub-lists of samples and labels.
Each batch holds three pairs of samples and labels. """
torch.manual_seed(123)
samples1 = torch.rand((3, 28, 28))
samples2 = torch.rand((3, 28, 28))
labels1 = torch.tensor([0, 0, 1])
labels2 = torch.tensor([1, 1, 0])
train = [[samples1, labels1], [samples1, labels2], [samples2, labels1], [samples2, labels2]]
val = [[samples2, labels1]]
test = [[samples1, labels2], [samples2, labels1]]
return train, val, test
def generate_fake_cifar10_data_loaders():
"""" Generate fake-DataLoaders with fake batches, i.e. a list with sub-lists of samples and labels.
Each batch holds three pairs of samples and labels. """
torch.manual_seed(123)
samples1 = torch.rand((3, 3, 32, 32))
samples2 = torch.rand((3, 3, 32, 32))
labels1 = torch.tensor([0, 0, 1])
labels2 = torch.tensor([1, 1, 0])
train = [[samples1, labels1], [samples1, labels2], [samples2, labels1], [samples2, labels2]]
val = [[samples2, labels1]]
test = [[samples1, labels2], [samples2, labels1]]
return train, val, test
| 40.566667 | 103 | 0.656532 |
a23dc68ccfb1399f0cbec2346c32c5c62857d9b7 | 1,265 | py | Python | xlsxwriter/test/comparison/test_chart_column07.py | timgates42/XlsxWriter | 129044ed821de67895b4562c6b71f90eba5be6b4 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/test/comparison/test_chart_column07.py | timgates42/XlsxWriter | 129044ed821de67895b4562c6b71f90eba5be6b4 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/test/comparison/test_chart_column07.py | timgates42/XlsxWriter | 129044ed821de67895b4562c6b71f90eba5be6b4 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2020, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_column07.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [68810240, 68811776]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'values': '=(Sheet1!$A$1:$A$2,Sheet1!$A$4:$A$5)',
'values_data': [1, 2, 4, 5],
})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| 23.867925 | 79 | 0.545455 |
e384feb612f6d76bb17ab9f35e09015091a98945 | 1,858 | py | Python | server/config.py | yl0937/yesnet | 24a3c086affd7a5f125b51f16a6dc1c8d1ed4c4a | [
"MIT"
] | null | null | null | server/config.py | yl0937/yesnet | 24a3c086affd7a5f125b51f16a6dc1c8d1ed4c4a | [
"MIT"
] | 7 | 2020-02-17T05:58:36.000Z | 2022-02-18T20:20:37.000Z | server/config.py | yl0937/yesnet | 24a3c086affd7a5f125b51f16a6dc1c8d1ed4c4a | [
"MIT"
] | 1 | 2020-02-27T06:32:13.000Z | 2020-02-27T06:32:13.000Z | import os
import enum
CONFIG = {
'REDIS-IP' : 'redis',
'R-CHANNEL': 'orderChannel', # Receiving Channel
'S-CHANNEL': 'resChannel', # Sending Channel
'RPC-URL': 'http://210.114.89.52:8545', # RPC URL, 실제 적용 시에 적절한 값으로 바뀌어야 함.
'ADAM': '0xb3b4ef17ba517e75b79169354fd9dfff51b9d592'
}
CMD = {
'GET_BALANCE': 'getBalance', # query balance
'CALL_FUNCTION': 'callFunction', # DApp의 메서드를 호출
'CALL_TX': 'callTx', # Call Tx of DApp
'CREATE_ACCOUNT': 'createAccount', # 계정을 생성
'DEPLOY_DAPP': 'deployDApp', # DApp을 Deployment
'GET_BLOCK': 'getBlock', # Block에 대한 정보를 보여줌.
'GET_TX': 'getTx', # TX에 대한 정보를 보여줌.
'FILL_ETH': 'fillEth', # ether를 채우는 명령
'PING': 'ping' # ping method
}
class ResCode(enum.Enum):
OK = 200
SERVER_FAIL = 500
TIME_OUT = 501
# Error code
ERR_SERVER_FAIL = 500
POSTGRES = {
'user': os.getenv('POSTGRES_USER', 'postgres'),
'pw': os.getenv('POSTGRES_PASSWORD', 'root'),
'db': os.getenv('POSTGRES_DB', 'postgres'),
'host': os.getenv('POSTGRES_HOST', 'localhost'),
'port': os.getenv('POSTGRES_PORT', 5432),
}
TEST_POSTGRES = {
'user': os.getenv('POSTGRES_USER', 'postgres'),
'pw': os.getenv('POSTGRES_PASSWORD', 'root'),
'db': os.getenv('POSTGRES_DB', 'postgres'),
'host': os.getenv('POSTGRES_HOST', 'localhost'),
'port': os.getenv('POSTGRES_PORT', 5432),
}
class Config:
ERROR_404_HELP = False
SECRET_KEY = os.getenv('APP_SECRET', 'Myljadf09832908uflkjasdDSDS(S&SI*S')
MONGO_URI = 'mongodb://mj:16900/yesnet'
class DevConfig(Config):
DEBUG = True
class TestConfig(Config):
TESTING = True
DEBUG = True
class ProdConfig(Config):
DEBUG = False
config = {
'development': DevConfig,
'testing': TestConfig,
'production': ProdConfig
}
| 24.12987 | 79 | 0.619483 |
209fa1f978dead0fa47c98d42fa260b8511c606e | 2,082 | py | Python | bot/bot.py | debjit-bw/Reddit-Bot | 7f8d3aa12dbc59780c8ed5048ba40f95b32fcbe8 | [
"MIT"
] | null | null | null | bot/bot.py | debjit-bw/Reddit-Bot | 7f8d3aa12dbc59780c8ed5048ba40f95b32fcbe8 | [
"MIT"
] | null | null | null | bot/bot.py | debjit-bw/Reddit-Bot | 7f8d3aa12dbc59780c8ed5048ba40f95b32fcbe8 | [
"MIT"
] | null | null | null | import praw
import time
import os
import requests
import json
import bot_login
import re
from com_reply import reply_to_comment
target = "!q"
def run_bot(r, created_utc):
try:
comment_url = f"""https://api.pushshift.io/reddit/search/comment/?q={target}&sort=desc&size=50&fields=author,body,created_utc,id,subreddit&after=""" + created_utc
parsed_comment_json = requests.get(comment_url).json()
if (len(parsed_comment_json["data"]) > 0):
created_utc = parsed_comment_json["data"][0]["created_utc"]
for comment in parsed_comment_json["data"]:
comment_author = comment["author"]
comment_body = comment["body"]
comment_id = comment["id"]
comment_subreddit = comment["subreddit"]
if (target in comment_body.lower() and comment_author != "queybot"):
print ("\n\nFound a comment!")
# action here
comment_reply += "\n\n\n\n---\n\n^(Beep boop. I am a bot. If there are any issues, contact my) [^Main ](https://www.reddit.com/message/compose/?to=queybot&subject=/u/q)\n\n^(Want to make a similar reddit bot? Check out: ) [^GitHub ](https://github.com/kylelobo/Reddit-Bot)"
reply_to_comment(r, comment_id, comment_reply, comment_subreddit, comment_author, comment_body)
print ("\nFetching comments..")
except Exception as e:
print (str(e.__class__.__name__) + ": " + str(e))
return str(created_utc)
if __name__ == "__main__":
while True:
try:
r = bot_login.bot_login()
created_utc = ""
print ("\nFetching comments..")
while True:
# Fetching all new comments that were created after created_utc time
created_utc = run_bot(r, created_utc)
time.sleep(10)
except Exception as e:
print (str(e.__class__.__name__) + ": " + str(e))
cur.close()
conn.close()
time.sleep(15)
| 34.131148 | 293 | 0.591739 |
2d2e90c339ec63167a6b4e94ad64f88c4b86fd58 | 351 | py | Python | classes/response.py | Vio-Eli/CelestAI_Discord | d99248393a5467cf1cb37243fd708ed8cc91b6ea | [
"MIT"
] | null | null | null | classes/response.py | Vio-Eli/CelestAI_Discord | d99248393a5467cf1cb37243fd708ed8cc91b6ea | [
"MIT"
] | null | null | null | classes/response.py | Vio-Eli/CelestAI_Discord | d99248393a5467cf1cb37243fd708ed8cc91b6ea | [
"MIT"
] | null | null | null | """
The "Response" class represents a cached audio response
"""
import os.path as path
from celestai import settings
class Response():
def __init__(self, key, text):
self.key = key
self.file = path.join(settings.RESPONSES_DIR, self.key+'.mp3')
self.text = text
if not path.isfile(self.file):
pass
| 18.473684 | 70 | 0.62963 |
f12585acf4a3d297db04595efeac284ecd0f0461 | 277 | py | Python | src/infi/django_rest_utils/admin.py | Infinidat/infi.django_rest_utils | 99bfc02978533572874e82bd2199ec8945e3985c | [
"BSD-3-Clause"
] | 4 | 2016-10-19T15:29:41.000Z | 2018-05-11T08:00:29.000Z | src/infi/django_rest_utils/admin.py | Infinidat/infi.django_rest_utils | 99bfc02978533572874e82bd2199ec8945e3985c | [
"BSD-3-Clause"
] | null | null | null | src/infi/django_rest_utils/admin.py | Infinidat/infi.django_rest_utils | 99bfc02978533572874e82bd2199ec8945e3985c | [
"BSD-3-Clause"
] | 2 | 2015-09-27T13:11:41.000Z | 2020-04-14T03:44:37.000Z | from __future__ import absolute_import
from django.contrib import admin
from .models import APIToken
class APITokenAdmin(admin.ModelAdmin):
list_display = ('user', 'token')
search_fields = ('user__username', 'token')
admin.site.register(APIToken, APITokenAdmin) | 18.466667 | 47 | 0.761733 |
00243933574966bf31bb0d27057c3d779d76be5b | 316 | py | Python | problem_4/parameters.py | vineeths96/SVM-and-Neural-Networks | 84d734542d4f7fc718c49a8d63db07b0597ccbc7 | [
"MIT"
] | 2 | 2020-12-07T09:51:40.000Z | 2021-05-03T18:29:23.000Z | problem_4/parameters.py | vineeths96/SVM-and-Neural-Networks | 84d734542d4f7fc718c49a8d63db07b0597ccbc7 | [
"MIT"
] | null | null | null | problem_4/parameters.py | vineeths96/SVM-and-Neural-Networks | 84d734542d4f7fc718c49a8d63db07b0597ccbc7 | [
"MIT"
] | 4 | 2021-02-22T16:36:50.000Z | 2021-09-14T12:50:36.000Z | TIME_TICKS = 140
AAL_BRAIN_REGIONS = 116
POWER_BRAIN_REGION = 264
NUM_PATIENTS = 34
NUM_NORMALS = 47
TEST_SPLIT = 0.2
NUM_DENSE_1 = 64
DROPOUT = 0.2
BATCH_SIZE = 16
TRAINING_EPOCHS = 25
VALIDATION_SPLIT = 0.2
NUM_CLASSES = 2
ALPHA = 0.1
RHO = 0.9
EPSILON = 1e-08
DECAY = 0.0
LEARNING_RATE = 0.0005
PATIENCE = 5
| 13.166667 | 24 | 0.731013 |
05d9a5de39db3c9d7a8f10e9a6c7f24f1a9d9ab3 | 3,496 | py | Python | FRG Hardware/frgpl/frgpl/tec.py | fenning-research-group/Instruments | c4e5f854fed1cce20f25076a38842bfbb5396917 | [
"MIT"
] | null | null | null | FRG Hardware/frgpl/frgpl/tec.py | fenning-research-group/Instruments | c4e5f854fed1cce20f25076a38842bfbb5396917 | [
"MIT"
] | null | null | null | FRG Hardware/frgpl/frgpl/tec.py | fenning-research-group/Instruments | c4e5f854fed1cce20f25076a38842bfbb5396917 | [
"MIT"
] | 1 | 2019-06-03T16:09:33.000Z | 2019-06-03T16:09:33.000Z | ## module for communication with Omega temperature controller
import serial
import numpy as np
import codecs
import time
import sys
class omega:
def __init__(self, port = 'COM15', address = 1):
self.connect(port = port, address = address)
@property
def setpoint(self):
return self.__setpoint
@setpoint.setter
def setpoint(self, x):
if self.setSetPoint(x):
self.__setpoint = x
return True
else:
print('Error changing set point - set point is still {0} C'.format(self.__setpoint))
return False
def connect(self, port, address = 1):
self.__handle = serial.Serial()
self.__handle.port = port
self.__handle.timeout = 2
self.__handle.parity = 'E'
self.__handle.bytesize = 7
self.__handle.baudrate = 9600
self.__handle.open()
#configure communication bits
self.__address = self.numtohex(address) #convert to hex, for use in communication <addr>
self.__end = b'\r\n' #end bit <etx>
#read current setpoint
# self.__setpoint = self.getSetPoint()
# self.__setpoint = None
return True
def disconnect(self):
self.__handle.close()
return True
def getTemperature(self):
numWords = 1
payload = self.buildPayload(
command = 3,
dataAddress = 1000,
content = numWords
)
self.__handle.write(payload)
response = self.__handle.readline()
data = int(response[7:-4], 16) * 0.1 #response given in 0.1 C
return round(data, 2) #only give two decimals, rounding error gives ~8 decimal places of 0's sometimes
def getSetPoint(self):
numWords = 1
payload = self.buildPayload(
command = 3,
dataAddress = 1001,
content = numWords
)
self.__handle.write(payload)
response = self.__handle.readline()
try:
data = int(response[7:-4], 16) * 0.1 #response given in 0.1 C
except Exception as e:
print('\nError in tec.py, method getSetPoint: ')
print(e)
sys.exit("\n************************************\nError: Make sure the temperature controller switch is turned on.\n****************************************")
return data
def setSetPoint(self, setpoint):
setpoint = round(setpoint * 10) #need to give integer values of 0.1 C
payload = self.buildPayload(
command = 6,
dataAddress = 1001,
content = setpoint
)
self.__handle.write(payload)
response = self.__handle.readline()
# time.sleep(0.2)
if self.getSetPoint()*10 == setpoint:
return True
self.__setpoint = setpoint
else:
return False
### helper methods
def numtohex(self, num):
# return codecs.encode(str.encode('{0:02d}'.format(num)), 'hex_codec')
return '{0:02X}'.format(num).encode()
def buildPayload(self, command, dataAddress, content):
def calculateChecksum(payload):
numHexValues = int(len(payload)/2)
hexValues = [int(payload[2*i : (2*i)+2], 16) for i in range(numHexValues)]
checksum_int = 256 - sum(hexValues)%256 #drop the 0x convention at front, we only want the last two characters
checksum = '{0:02X}'.format(checksum_int)
return str.upper(checksum).encode()
payload = self.__address
payload = payload + self.numtohex(command)
payload = payload + str.encode(str(dataAddress))
payload = payload + '{0:04X}'.format(content).encode()
# calculate checksum from current payload
chksum =calculateChecksum(payload)
# complete the payload
payload = payload + chksum
payload = payload + self.__end
payload = b':' + payload #should start with ":", just held til the end to not interfere with checksum calculation
return payload
| 26.484848 | 161 | 0.681064 |
d333c9c5b352be7cd497e119e8abe3b326f6a71b | 35,157 | py | Python | plugin/core/types.py | Daggy1234/LSP | 65a40d6d28fd8ed03e483ed2bb9d43d049ed29d0 | [
"MIT"
] | null | null | null | plugin/core/types.py | Daggy1234/LSP | 65a40d6d28fd8ed03e483ed2bb9d43d049ed29d0 | [
"MIT"
] | null | null | null | plugin/core/types.py | Daggy1234/LSP | 65a40d6d28fd8ed03e483ed2bb9d43d049ed29d0 | [
"MIT"
] | null | null | null | from .collections import DottedDict
from .logging import debug, set_debug_logging
from .protocol import TextDocumentSyncKindNone
from .typing import Any, Optional, List, Dict, Generator, Callable, Iterable, Union, Set, Tuple, TypeVar
from .url import filename_to_uri
from .url import uri_to_filename
from threading import RLock
from wcmatch.glob import BRACE
from wcmatch.glob import globmatch
from wcmatch.glob import GLOBSTAR
import contextlib
import os
import socket
import sublime
import time
TCP_CONNECT_TIMEOUT = 5 # seconds
FEATURES_TIMEOUT = 300 # milliseconds
PANEL_FILE_REGEX = r"^(?!\s+\d+:\d+)(.*)(:)$"
PANEL_LINE_REGEX = r"^\s+(\d+):(\d+)"
def basescope2languageid(base_scope: str) -> str:
# This the connection between Language IDs and ST selectors.
base_scope_map = sublime.load_settings("language-ids.sublime-settings")
result = base_scope_map.get(base_scope, base_scope.split(".")[-1])
return result if isinstance(result, str) else ""
@contextlib.contextmanager
def runtime(token: str) -> Generator[None, None, None]:
t = time.time()
yield
debug(token, "running time:", int((time.time() - t) * 1000000), "μs")
T = TypeVar("T")
def diff(old: Iterable[T], new: Iterable[T]) -> Tuple[Set[T], Set[T]]:
"""
Return a tuple of (added, removed) items
"""
old_set = old if isinstance(old, set) else set(old)
new_set = new if isinstance(new, set) else set(new)
added = new_set - old_set
removed = old_set - new_set
return added, removed
def debounced(f: Callable[[], Any], timeout_ms: int = 0, condition: Callable[[], bool] = lambda: True,
async_thread: bool = False) -> None:
"""
Possibly run a function at a later point in time, either on the async thread or on the main thread.
:param f: The function to possibly run. Its return type is discarded.
:param timeout_ms: The time in milliseconds after which to possibly to run the function
:param condition: The condition that must evaluate to True in order to run the funtion
:param async_thread: If true, run the function on the async worker thread, otherwise run the function on the
main thread
"""
def run() -> None:
if condition():
f()
runner = sublime.set_timeout_async if async_thread else sublime.set_timeout
runner(run, timeout_ms)
def _settings_style_to_add_regions_flag(style: str) -> int:
flags = 0
if style == "fill":
flags = sublime.DRAW_NO_OUTLINE
elif style == "box":
flags = sublime.DRAW_NO_FILL
else:
flags = sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE
if style == "underline":
flags |= sublime.DRAW_SOLID_UNDERLINE
elif style == "stippled":
flags |= sublime.DRAW_STIPPLED_UNDERLINE
elif style == "squiggly":
flags |= sublime.DRAW_SQUIGGLY_UNDERLINE
return flags
class SettingsRegistration:
__slots__ = ("_settings",)
def __init__(self, settings: sublime.Settings, on_change: Callable[[], None]) -> None:
self._settings = settings
settings.add_on_change("LSP", on_change)
def __del__(self) -> None:
self._settings.clear_on_change("LSP")
class Debouncer:
def __init__(self) -> None:
self._current_id = -1
self._next_id = 0
self._current_id_lock = RLock()
def debounce(self, f: Callable[[], None], timeout_ms: int = 0, condition: Callable[[], bool] = lambda: True,
async_thread: bool = False) -> None:
"""
Possibly run a function at a later point in time, either on the async thread or on the main thread.
:param f: The function to possibly run
:param timeout_ms: The time in milliseconds after which to possibly to run the function
:param condition: The condition that must evaluate to True in order to run the funtion
:param async_thread: If true, run the function on the async worker thread, otherwise run
the function on the main thread
"""
def run(debounce_id: int) -> None:
with self._current_id_lock:
if debounce_id != self._current_id:
return
if condition():
f()
runner = sublime.set_timeout_async if async_thread else sublime.set_timeout
with self._current_id_lock:
current_id = self._current_id = self._next_id
self._next_id += 1
runner(lambda: run(current_id), timeout_ms)
def cancel_pending(self) -> None:
with self._current_id_lock:
self._current_id = -1
def read_dict_setting(settings_obj: sublime.Settings, key: str, default: dict) -> dict:
val = settings_obj.get(key)
return val if isinstance(val, dict) else default
def read_list_setting(settings_obj: sublime.Settings, key: str, default: list) -> list:
val = settings_obj.get(key)
return val if isinstance(val, list) else default
class Settings:
# This is only for mypy
code_action_on_save_timeout_ms = None # type: int
diagnostics_additional_delay_auto_complete_ms = None # type: int
diagnostics_delay_ms = None # type: int
diagnostics_gutter_marker = None # type: str
diagnostics_highlight_style = None # type: str
diagnostics_panel_include_severity_level = None # type: int
disabled_capabilities = None # type: List[str]
document_highlight_style = None # type: str
inhibit_snippet_completions = None # type: bool
inhibit_word_completions = None # type: bool
log_debug = None # type: bool
log_max_size = None # type: int
log_server = None # type: List[str]
lsp_code_actions_on_save = None # type: Dict[str, bool]
lsp_format_on_save = None # type: bool
only_show_lsp_completions = None # type: bool
popup_max_characters_height = None # type: int
popup_max_characters_width = None # type: int
show_code_actions = None # type: bool
show_diagnostics_count_in_view_status = None # type: bool
show_diagnostics_in_view_status = None # type: bool
show_diagnostics_panel_on_save = None # type: int
show_diagnostics_severity_level = None # type: int
show_references_in_quick_panel = None # type: bool
show_symbol_action_links = None # type: bool
show_view_status = None # type: bool
def __init__(self, s: sublime.Settings) -> None:
self.update(s)
def update(self, s: sublime.Settings) -> None:
def r(name: str, default: Union[bool, int, str, list, dict]) -> None:
val = s.get(name)
setattr(self, name, val if isinstance(val, default.__class__) else default)
r("code_action_on_save_timeout_ms", 2000)
r("diagnostics_additional_delay_auto_complete_ms", 0)
r("diagnostics_delay_ms", 0)
r("diagnostics_gutter_marker", "dot")
r("diagnostics_highlight_style", "underline")
r("diagnostics_panel_include_severity_level", 4)
r("disabled_capabilities", [])
r("document_highlight_style", "stippled")
r("log_debug", False)
r("log_max_size", 8 * 1024)
r("lsp_code_actions_on_save", {})
r("lsp_format_on_save", False)
r("only_show_lsp_completions", False)
r("popup_max_characters_height", 1000)
r("popup_max_characters_width", 120)
r("show_code_actions", "annotation")
r("show_diagnostics_count_in_view_status", False)
r("show_diagnostics_in_view_status", True)
r("show_diagnostics_panel_on_save", 2)
r("show_diagnostics_severity_level", 2)
r("show_references_in_quick_panel", False)
r("show_symbol_action_links", False)
r("show_view_status", True)
# Backwards-compatible with the bool setting
log_server = s.get("log_server")
if isinstance(log_server, bool):
self.log_server = ["panel"] if log_server else []
elif isinstance(log_server, list):
self.log_server = log_server
else:
self.log_server = []
# Backwards-compatible with the bool setting
auto_show_diagnostics_panel = s.get("auto_show_diagnostics_panel")
if isinstance(auto_show_diagnostics_panel, bool):
if not auto_show_diagnostics_panel:
self.show_diagnostics_panel_on_save = 0
elif isinstance(auto_show_diagnostics_panel, str):
if auto_show_diagnostics_panel == "never":
self.show_diagnostics_panel_on_save = 0
# Backwards-compatible with "only_show_lsp_completions"
only_show_lsp_completions = s.get("only_show_lsp_completions")
if isinstance(only_show_lsp_completions, bool):
self.inhibit_snippet_completions = only_show_lsp_completions
self.inhibit_word_completions = only_show_lsp_completions
else:
r("inhibit_snippet_completions", False)
r("inhibit_word_completions", True)
set_debug_logging(self.log_debug)
def document_highlight_style_to_add_regions_flags(self) -> int:
return _settings_style_to_add_regions_flag(self.document_highlight_style)
def diagnostics_highlight_style_to_add_regions_flag(self) -> int:
return _settings_style_to_add_regions_flag(self.diagnostics_highlight_style)
class ClientStates:
STARTING = 0
READY = 1
STOPPING = 2
class DocumentFilter:
"""
A document filter denotes a document through properties like language, scheme or pattern. An example is a filter
that applies to TypeScript files on disk. Another example is a filter that applies to JSON files with name
package.json:
{ "language": "typescript", scheme: "file" }
{ "language": "json", "pattern": "**/package.json" }
Sublime Text doesn't understand what a language ID is, so we have to maintain a global translation map from language
IDs to selectors. Sublime Text also has no support for patterns. We use the wcmatch library for this.
"""
__slots__ = ("language", "scheme", "pattern")
def __init__(
self,
language: Optional[str] = None,
scheme: Optional[str] = None,
pattern: Optional[str] = None
) -> None:
self.scheme = scheme
self.pattern = pattern
self.language = language
def __call__(self, view: sublime.View) -> bool:
"""Does this filter match the view? An empty filter matches any view."""
if self.language:
syntax = view.syntax()
if not syntax or basescope2languageid(syntax.scope) != self.language:
return False
if self.scheme:
# Can be "file" or "untitled"?
pass
if self.pattern:
if not globmatch(view.file_name() or "", self.pattern, flags=GLOBSTAR | BRACE):
return False
return True
class DocumentSelector:
"""
A DocumentSelector is a list of DocumentFilters. A view matches a DocumentSelector if and only if any one of its
filters matches against the view.
"""
__slots__ = ("filters",)
def __init__(self, document_selector: List[Dict[str, Any]]) -> None:
self.filters = [DocumentFilter(**document_filter) for document_filter in document_selector]
def __bool__(self) -> bool:
return bool(self.filters)
def matches(self, view: sublime.View) -> bool:
"""Does this selector match the view? A selector with no filters matches all views."""
return any(f(view) for f in self.filters) if self.filters else True
# method -> (capability dotted path, optional registration dotted path)
# these are the EXCEPTIONS. The general rule is: method foo/bar --> (barProvider, barProvider.id)
_METHOD_TO_CAPABILITY_EXCEPTIONS = {
'workspace/symbol': ('workspaceSymbolProvider', None),
'workspace/didChangeWorkspaceFolders': ('workspace.workspaceFolders',
'workspace.workspaceFolders.changeNotifications'),
'textDocument/didOpen': ('textDocumentSync.didOpen', None),
'textDocument/didClose': ('textDocumentSync.didClose', None),
'textDocument/didChange': ('textDocumentSync.change', None),
'textDocument/didSave': ('textDocumentSync.save', None),
'textDocument/willSave': ('textDocumentSync.willSave', None),
'textDocument/willSaveWaitUntil': ('textDocumentSync.willSaveWaitUntil', None),
'textDocument/formatting': ('documentFormattingProvider', None),
'textDocument/documentColor': ('colorProvider', None)
} # type: Dict[str, Tuple[str, Optional[str]]]
def method_to_capability(method: str) -> Tuple[str, str]:
"""
Given a method, returns the corresponding capability path, and the associated path to stash the registration key.
Examples:
textDocument/definition --> (definitionProvider, definitionProvider.id)
textDocument/references --> (referencesProvider, referencesProvider.id)
textDocument/didOpen --> (textDocumentSync.didOpen, textDocumentSync.didOpen.id)
"""
capability_path, registration_path = _METHOD_TO_CAPABILITY_EXCEPTIONS.get(method, (None, None))
if capability_path is None:
capability_path = method.split('/')[1] + "Provider"
if registration_path is None:
# This path happens to coincide with the StaticRegistrationOptions' id, which is on purpose. As a consequence,
# if a server made a "registration" via the initialize response, it can call client/unregisterCapability at
# a later date, and the capability will pop from the capabilities dict.
registration_path = capability_path + ".id"
return capability_path, registration_path
def normalize_text_sync(textsync: Union[None, int, Dict[str, Any]]) -> Dict[str, Any]:
"""
Brings legacy text sync capabilities to the most modern format
"""
result = {} # type: Dict[str, Any]
if isinstance(textsync, int):
change = {"syncKind": textsync} # type: Optional[Dict[str, Any]]
result["textDocumentSync"] = {"didOpen": {}, "save": {}, "didClose": {}, "change": change}
elif isinstance(textsync, dict):
new = {}
change = textsync.get("change")
if isinstance(change, int):
new["change"] = {"syncKind": change}
elif isinstance(change, dict):
new["change"] = change
def maybe_assign_bool_or_dict(key: str) -> None:
assert isinstance(textsync, dict)
value = textsync.get(key)
if isinstance(value, bool) and value:
new[key] = {}
elif isinstance(value, dict):
new[key] = value
open_close = textsync.get("openClose")
if isinstance(open_close, bool):
if open_close:
new["didOpen"] = {}
new["didClose"] = {}
else:
maybe_assign_bool_or_dict("didOpen")
maybe_assign_bool_or_dict("didClose")
maybe_assign_bool_or_dict("willSave")
maybe_assign_bool_or_dict("willSaveWaitUntil")
maybe_assign_bool_or_dict("save")
result["textDocumentSync"] = new
return result
class Capabilities(DottedDict):
"""
Maintains static and dynamic capabilities
Static capabilities come from a response to the initialize request (from Client -> Server).
Dynamic capabilities can be registered at any moment with client/registerCapability and client/unregisterCapability
(from Server -> Client).
"""
def register(
self,
registration_id: str,
capability_path: str,
registration_path: str,
options: Dict[str, Any]
) -> None:
stored_registration_id = self.get(registration_path)
if isinstance(stored_registration_id, str):
msg = "{} is already registered at {} with ID {}, overwriting"
debug(msg.format(capability_path, registration_path, stored_registration_id))
self.set(capability_path, options)
self.set(registration_path, registration_id)
def unregister(
self,
registration_id: str,
capability_path: str,
registration_path: str
) -> Optional[Dict[str, Any]]:
stored_registration_id = self.get(registration_path)
if not isinstance(stored_registration_id, str):
debug("stored registration ID at", registration_path, "is not a string")
return None
elif stored_registration_id != registration_id:
msg = "stored registration ID ({}) is not the same as the provided registration ID ({})"
debug(msg.format(stored_registration_id, registration_id))
return None
else:
discarded = self.get(capability_path)
self.remove(capability_path)
self.remove(registration_path)
return discarded
def assign(self, d: Dict[str, Any]) -> None:
textsync = normalize_text_sync(d.pop("textDocumentSync", None))
super().assign(d)
if textsync:
self.update(textsync)
def should_notify_did_open(self) -> bool:
return "textDocumentSync.didOpen" in self
def text_sync_kind(self) -> int:
value = self.get("textDocumentSync.change.syncKind")
return value if isinstance(value, int) else TextDocumentSyncKindNone
def should_notify_did_change_workspace_folders(self) -> bool:
return "workspace.workspaceFolders.changeNotifications" in self
def should_notify_will_save(self) -> bool:
return "textDocumentSync.willSave" in self
def should_notify_did_save(self) -> Tuple[bool, bool]:
save = self.get("textDocumentSync.save")
if isinstance(save, bool):
return save, False
elif isinstance(save, dict):
return True, bool(save.get("includeText"))
else:
return False, False
def should_notify_did_close(self) -> bool:
return "textDocumentSync.didClose" in self
def _translate_path(path: str, source: str, destination: str) -> Tuple[str, bool]:
# TODO: Case-insensitive file systems. Maybe this problem needs a much larger refactor. Even Sublime Text doesn't
# handle case-insensitive file systems correctly. There are a few other places where case-sensitivity matters, for
# example when looking up the correct view for diagnostics, and when finding a view for goto-def.
if path.startswith(source) and len(path) > len(source) and path[len(source)] in ("/", "\\"):
return path.replace(source, destination, 1), True
return path, False
class PathMap:
__slots__ = ("_local", "_remote")
def __init__(self, local: str, remote: str) -> None:
self._local = local
self._remote = remote
@classmethod
def parse(cls, json: Any) -> "Optional[List[PathMap]]":
if not isinstance(json, list):
return None
result = [] # type: List[PathMap]
for path_map in json:
if not isinstance(path_map, dict):
debug('path map entry is not an object')
continue
local = path_map.get("local")
if not isinstance(local, str):
debug('missing "local" key for path map entry')
continue
remote = path_map.get("remote")
if not isinstance(remote, str):
debug('missing "remote" key for path map entry')
continue
result.append(PathMap(local, remote))
return result
def __eq__(self, other: Any) -> bool:
if not isinstance(other, PathMap):
return False
return self._local == other._local and self._remote == other._remote
def map_from_local_to_remote(self, uri: str) -> Tuple[str, bool]:
return _translate_path(uri, self._local, self._remote)
def map_from_remote_to_local(self, uri: str) -> Tuple[str, bool]:
return _translate_path(uri, self._remote, self._local)
class TransportConfig:
__slots__ = ("name", "command", "tcp_port", "env", "listener_socket")
def __init__(
self,
name: str,
command: List[str],
tcp_port: Optional[int],
env: Dict[str, str],
listener_socket: Optional[socket.socket]
) -> None:
if not command and not tcp_port:
raise ValueError('neither "command" nor "tcp_port" is provided; cannot start a language server')
self.name = name
self.command = command
self.tcp_port = tcp_port
self.env = env
self.listener_socket = listener_socket
class ClientConfig:
def __init__(self,
name: str,
selector: str,
priority_selector: Optional[str] = None,
command: Optional[List[str]] = None,
binary_args: Optional[List[str]] = None, # DEPRECATED
tcp_port: Optional[int] = None,
auto_complete_selector: Optional[str] = None,
enabled: bool = True,
init_options: DottedDict = DottedDict(),
settings: DottedDict = DottedDict(),
env: Dict[str, str] = {},
experimental_capabilities: Optional[Dict[str, Any]] = None,
disabled_capabilities: DottedDict = DottedDict(),
path_maps: Optional[List[PathMap]] = None) -> None:
self.name = name
self.selector = selector
self.priority_selector = priority_selector if priority_selector else self.selector
if isinstance(command, list):
self.command = command
else:
assert isinstance(binary_args, list)
self.command = binary_args
self.tcp_port = tcp_port
self.auto_complete_selector = auto_complete_selector
self.enabled = enabled
self.init_options = init_options
self.settings = settings
self.env = env
self.experimental_capabilities = experimental_capabilities
self.disabled_capabilities = disabled_capabilities
self.path_maps = path_maps
self.status_key = "lsp_{}".format(self.name)
@classmethod
def from_sublime_settings(cls, name: str, s: sublime.Settings, file: str) -> "ClientConfig":
base = sublime.decode_value(sublime.load_resource(file))
settings = DottedDict(base.get("settings", {})) # defined by the plugin author
settings.update(read_dict_setting(s, "settings", {})) # overrides from the user
init_options = DottedDict(base.get("initializationOptions", {}))
init_options.update(read_dict_setting(s, "initializationOptions", {}))
disabled_capabilities = s.get("disabled_capabilities")
if isinstance(disabled_capabilities, dict):
disabled_capabilities = DottedDict(disabled_capabilities)
else:
disabled_capabilities = DottedDict()
return ClientConfig(
name=name,
selector=_read_selector(s),
priority_selector=_read_priority_selector(s),
command=read_list_setting(s, "command", []),
tcp_port=s.get("tcp_port"),
auto_complete_selector=s.get("auto_complete_selector"),
# Default to True, because an LSP plugin is enabled iff it is enabled as a Sublime package.
enabled=bool(s.get("enabled", True)),
init_options=init_options,
settings=settings,
env=read_dict_setting(s, "env", {}),
experimental_capabilities=s.get("experimental_capabilities"),
disabled_capabilities=disabled_capabilities,
path_maps=PathMap.parse(s.get("path_maps"))
)
@classmethod
def from_dict(cls, name: str, d: Dict[str, Any]) -> "ClientConfig":
disabled_capabilities = d.get("disabled_capabilities")
if isinstance(disabled_capabilities, dict):
disabled_capabilities = DottedDict(disabled_capabilities)
else:
disabled_capabilities = DottedDict()
return ClientConfig(
name=name,
selector=_read_selector(d),
priority_selector=_read_priority_selector(d),
command=d.get("command", []),
tcp_port=d.get("tcp_port"),
auto_complete_selector=d.get("auto_complete_selector"),
enabled=d.get("enabled", False),
init_options=DottedDict(d.get("initializationOptions")),
settings=DottedDict(d.get("settings")),
env=d.get("env", dict()),
experimental_capabilities=d.get("experimental_capabilities"),
disabled_capabilities=disabled_capabilities,
path_maps=PathMap.parse(d.get("path_maps"))
)
@classmethod
def from_config(cls, src_config: "ClientConfig", override: Dict[str, Any]) -> "ClientConfig":
path_map_override = PathMap.parse(override.get("path_maps"))
disabled_capabilities = override.get("disabled_capabilities")
if isinstance(disabled_capabilities, dict):
disabled_capabilities = DottedDict(disabled_capabilities)
else:
disabled_capabilities = src_config.disabled_capabilities
return ClientConfig(
name=src_config.name,
selector=_read_selector(override) or src_config.selector,
priority_selector=_read_priority_selector(override) or src_config.priority_selector,
command=override.get("command", src_config.command),
tcp_port=override.get("tcp_port", src_config.tcp_port),
auto_complete_selector=override.get("auto_complete_selector", src_config.auto_complete_selector),
enabled=override.get("enabled", src_config.enabled),
init_options=DottedDict.from_base_and_override(
src_config.init_options, override.get("initializationOptions")),
settings=DottedDict.from_base_and_override(src_config.settings, override.get("settings")),
env=override.get("env", src_config.env),
experimental_capabilities=override.get(
"experimental_capabilities", src_config.experimental_capabilities),
disabled_capabilities=disabled_capabilities,
path_maps=path_map_override if path_map_override else src_config.path_maps
)
def resolve_transport_config(self, variables: Dict[str, str]) -> TransportConfig:
tcp_port = None # type: Optional[int]
listener_socket = None # type: Optional[socket.socket]
if self.tcp_port is not None:
# < 0 means we're hosting a TCP server
if self.tcp_port < 0:
# -1 means pick any free port
if self.tcp_port < -1:
tcp_port = -self.tcp_port
# Create a listener socket for incoming connections
listener_socket = _start_tcp_listener(tcp_port)
tcp_port = int(listener_socket.getsockname()[1])
else:
tcp_port = _find_free_port() if self.tcp_port == 0 else self.tcp_port
if tcp_port is not None:
variables["port"] = str(tcp_port)
command = sublime.expand_variables(self.command, variables)
command = [os.path.expanduser(arg) for arg in command]
if tcp_port is not None:
# DEPRECATED -- replace {port} with $port or ${port} in your client config
command = [a.replace('{port}', str(tcp_port)) for a in command]
env = os.environ.copy()
for var, value in self.env.items():
env[var] = sublime.expand_variables(value, variables)
return TransportConfig(self.name, command, tcp_port, env, listener_socket)
def set_view_status(self, view: sublime.View, message: str) -> None:
if sublime.load_settings("LSP.sublime-settings").get("show_view_status"):
status = "{}: {}".format(self.name, message) if message else self.name
view.set_status(self.status_key, status)
def erase_view_status(self, view: sublime.View) -> None:
view.erase_status(self.status_key)
def match_view(self, view: sublime.View) -> bool:
syntax = view.syntax()
if syntax:
# Every part of a x.y.z scope seems to contribute 8.
# An empty selector result in a score of 1.
# A non-matching non-empty selector results in a score of 0.
# We want to match at least one part of an x.y.z, and we don't want to match on empty selectors.
return sublime.score_selector(syntax.scope, self.selector) >= 8
return False
def map_client_path_to_server_uri(self, path: str) -> str:
if self.path_maps:
for path_map in self.path_maps:
path, mapped = path_map.map_from_local_to_remote(path)
if mapped:
break
return filename_to_uri(path)
def map_server_uri_to_client_path(self, uri: str) -> str:
path = uri_to_filename(uri)
if self.path_maps:
for path_map in self.path_maps:
path, mapped = path_map.map_from_remote_to_local(path)
if mapped:
break
return path
def is_disabled_capability(self, capability_path: str) -> bool:
for value in self.disabled_capabilities.walk(capability_path):
if isinstance(value, bool):
return value
elif isinstance(value, dict):
if value:
# If it's not empty we'll continue the walk
continue
else:
# This might be a leaf node
return True
return False
def filter_out_disabled_capabilities(self, capability_path: str, options: Dict[str, Any]) -> Dict[str, Any]:
result = {} # type: Dict[str, Any]
for k, v in options.items():
if not self.is_disabled_capability("{}.{}".format(capability_path, k)):
result[k] = v
return result
def __repr__(self) -> str:
items = [] # type: List[str]
for k, v in self.__dict__.items():
if not k.startswith("_"):
items.append("{}={}".format(k, repr(v)))
return "{}({})".format(self.__class__.__name__, ", ".join(items))
def __eq__(self, other: Any) -> bool:
if not isinstance(other, ClientConfig):
return False
for k, v in self.__dict__.items():
if not k.startswith("_") and v != getattr(other, k):
return False
return True
def syntax2scope(syntax_path: str) -> Optional[str]:
syntax = sublime.syntax_from_path(syntax_path)
return syntax.scope if syntax else None
def view2scope(view: sublime.View) -> str:
try:
return view.scope_name(0).split()[0]
except IndexError:
return ''
def _read_selector(config: Union[sublime.Settings, Dict[str, Any]]) -> str:
# Best base scenario,
selector = config.get("selector")
if isinstance(selector, str):
return selector
# Otherwise, look for "languages": [...]
languages = config.get("languages")
if isinstance(languages, list):
selectors = []
for language in languages:
# First priority is document_selector,
document_selector = language.get("document_selector")
if isinstance(document_selector, str):
selectors.append(document_selector)
continue
# After that syntaxes has priority,
syntaxes = language.get("syntaxes")
if isinstance(syntaxes, list):
for path in syntaxes:
syntax = sublime.syntax_from_path(path)
if syntax:
selectors.append(syntax.scope)
continue
# No syntaxes and no document_selector... then there must exist a languageId.
language_id = language.get("languageId")
if isinstance(language_id, str):
selectors.append("source.{}".format(language_id))
return "|".join(map("({})".format, selectors))
# Otherwise, look for "document_selector"
document_selector = config.get("document_selector")
if isinstance(document_selector, str):
return document_selector
# Otherwise, look for "syntaxes": [...]
syntaxes = config.get("syntaxes")
if isinstance(syntaxes, list):
selectors = []
for path in syntaxes:
syntax = sublime.syntax_from_path(path)
if syntax:
selectors.append(syntax.scope)
return "|".join(selectors)
# No syntaxes and no document_selector... then there must exist a languageId.
language_id = config.get("languageId")
if language_id:
return "source.{}".format(language_id)
return ""
def _read_priority_selector(config: Union[sublime.Settings, Dict[str, Any]]) -> str:
# Best case scenario
selector = config.get("priority_selector")
if isinstance(selector, str):
return selector
# Otherwise, look for "languages": [...]
languages = config.get("languages")
if isinstance(languages, list):
selectors = []
for language in languages:
# First priority is feature_selector.
feature_selector = language.get("feature_selector")
if isinstance(feature_selector, str):
selectors.append(feature_selector)
continue
# After that scopes has priority.
scopes = language.get("scopes")
if isinstance(scopes, list):
selectors.extend(scopes)
continue
# No scopes and no feature_selector. So there must be a languageId
language_id = language.get("languageId")
if isinstance(language_id, str):
selectors.append("source.{}".format(language_id))
return "|".join(map("({})".format, selectors))
# Otherwise, look for "feature_selector"
feature_selector = config.get("feature_selector")
if isinstance(feature_selector, str):
return feature_selector
# Otherwise, look for "scopes": [...]
scopes = config.get("scopes")
if isinstance(scopes, list):
return "|".join(map("({})".format, scopes))
# No scopes and no feature_selector... then there must exist a languageId
language_id = config.get("languageId")
if language_id:
return "source.{}".format(language_id)
return ""
def _find_free_port() -> int:
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
def _start_tcp_listener(tcp_port: Optional[int]) -> socket.socket:
sock = socket.socket()
sock.bind(('localhost', tcp_port or 0))
sock.settimeout(TCP_CONNECT_TIMEOUT)
sock.listen(1)
return sock
| 41.119298 | 120 | 0.640754 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.